before send to remote
This commit is contained in:
0
env/lib/python3.8/site-packages/django/db/backends/postgresql/__init__.py
vendored
Normal file
0
env/lib/python3.8/site-packages/django/db/backends/postgresql/__init__.py
vendored
Normal file
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/__init__.cpython-38.pyc
vendored
Normal file
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/__init__.cpython-38.pyc
vendored
Normal file
Binary file not shown.
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/base.cpython-38.pyc
vendored
Normal file
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/base.cpython-38.pyc
vendored
Normal file
Binary file not shown.
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/client.cpython-38.pyc
vendored
Normal file
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/client.cpython-38.pyc
vendored
Normal file
Binary file not shown.
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/creation.cpython-38.pyc
vendored
Normal file
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/creation.cpython-38.pyc
vendored
Normal file
Binary file not shown.
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/features.cpython-38.pyc
vendored
Normal file
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/features.cpython-38.pyc
vendored
Normal file
Binary file not shown.
Binary file not shown.
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/operations.cpython-38.pyc
vendored
Normal file
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/operations.cpython-38.pyc
vendored
Normal file
Binary file not shown.
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/schema.cpython-38.pyc
vendored
Normal file
BIN
env/lib/python3.8/site-packages/django/db/backends/postgresql/__pycache__/schema.cpython-38.pyc
vendored
Normal file
Binary file not shown.
381
env/lib/python3.8/site-packages/django/db/backends/postgresql/base.py
vendored
Normal file
381
env/lib/python3.8/site-packages/django/db/backends/postgresql/base.py
vendored
Normal file
@@ -0,0 +1,381 @@
|
||||
"""
|
||||
PostgreSQL database backend for Django.
|
||||
|
||||
Requires psycopg 2: https://www.psycopg.org/
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import threading
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db import DatabaseError as WrappedDatabaseError
|
||||
from django.db import connections
|
||||
from django.db.backends.base.base import BaseDatabaseWrapper
|
||||
from django.db.backends.utils import CursorDebugWrapper as BaseCursorDebugWrapper
|
||||
from django.utils.asyncio import async_unsafe
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.safestring import SafeString
|
||||
from django.utils.version import get_version_tuple
|
||||
|
||||
try:
|
||||
import psycopg2 as Database
|
||||
import psycopg2.extensions
|
||||
import psycopg2.extras
|
||||
except ImportError as e:
|
||||
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
|
||||
|
||||
|
||||
def psycopg2_version():
|
||||
version = psycopg2.__version__.split(" ", 1)[0]
|
||||
return get_version_tuple(version)
|
||||
|
||||
|
||||
PSYCOPG2_VERSION = psycopg2_version()
|
||||
|
||||
if PSYCOPG2_VERSION < (2, 8, 4):
|
||||
raise ImproperlyConfigured(
|
||||
"psycopg2 version 2.8.4 or newer is required; you have %s"
|
||||
% psycopg2.__version__
|
||||
)
|
||||
|
||||
|
||||
# Some of these import psycopg2, so import them after checking if it's installed.
|
||||
from .client import DatabaseClient # NOQA
|
||||
from .creation import DatabaseCreation # NOQA
|
||||
from .features import DatabaseFeatures # NOQA
|
||||
from .introspection import DatabaseIntrospection # NOQA
|
||||
from .operations import DatabaseOperations # NOQA
|
||||
from .schema import DatabaseSchemaEditor # NOQA
|
||||
|
||||
psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString)
|
||||
psycopg2.extras.register_uuid()
|
||||
|
||||
# Register support for inet[] manually so we don't have to handle the Inet()
|
||||
# object on load all the time.
|
||||
INETARRAY_OID = 1041
|
||||
INETARRAY = psycopg2.extensions.new_array_type(
|
||||
(INETARRAY_OID,),
|
||||
"INETARRAY",
|
||||
psycopg2.extensions.UNICODE,
|
||||
)
|
||||
psycopg2.extensions.register_type(INETARRAY)
|
||||
|
||||
|
||||
class DatabaseWrapper(BaseDatabaseWrapper):
|
||||
vendor = "postgresql"
|
||||
display_name = "PostgreSQL"
|
||||
# This dictionary maps Field objects to their associated PostgreSQL column
|
||||
# types, as strings. Column-type strings can contain format strings; they'll
|
||||
# be interpolated against the values of Field.__dict__ before being output.
|
||||
# If a column type is set to None, it won't be included in the output.
|
||||
data_types = {
|
||||
"AutoField": "integer",
|
||||
"BigAutoField": "bigint",
|
||||
"BinaryField": "bytea",
|
||||
"BooleanField": "boolean",
|
||||
"CharField": "varchar(%(max_length)s)",
|
||||
"DateField": "date",
|
||||
"DateTimeField": "timestamp with time zone",
|
||||
"DecimalField": "numeric(%(max_digits)s, %(decimal_places)s)",
|
||||
"DurationField": "interval",
|
||||
"FileField": "varchar(%(max_length)s)",
|
||||
"FilePathField": "varchar(%(max_length)s)",
|
||||
"FloatField": "double precision",
|
||||
"IntegerField": "integer",
|
||||
"BigIntegerField": "bigint",
|
||||
"IPAddressField": "inet",
|
||||
"GenericIPAddressField": "inet",
|
||||
"JSONField": "jsonb",
|
||||
"OneToOneField": "integer",
|
||||
"PositiveBigIntegerField": "bigint",
|
||||
"PositiveIntegerField": "integer",
|
||||
"PositiveSmallIntegerField": "smallint",
|
||||
"SlugField": "varchar(%(max_length)s)",
|
||||
"SmallAutoField": "smallint",
|
||||
"SmallIntegerField": "smallint",
|
||||
"TextField": "text",
|
||||
"TimeField": "time",
|
||||
"UUIDField": "uuid",
|
||||
}
|
||||
data_type_check_constraints = {
|
||||
"PositiveBigIntegerField": '"%(column)s" >= 0',
|
||||
"PositiveIntegerField": '"%(column)s" >= 0',
|
||||
"PositiveSmallIntegerField": '"%(column)s" >= 0',
|
||||
}
|
||||
data_types_suffix = {
|
||||
"AutoField": "GENERATED BY DEFAULT AS IDENTITY",
|
||||
"BigAutoField": "GENERATED BY DEFAULT AS IDENTITY",
|
||||
"SmallAutoField": "GENERATED BY DEFAULT AS IDENTITY",
|
||||
}
|
||||
operators = {
|
||||
"exact": "= %s",
|
||||
"iexact": "= UPPER(%s)",
|
||||
"contains": "LIKE %s",
|
||||
"icontains": "LIKE UPPER(%s)",
|
||||
"regex": "~ %s",
|
||||
"iregex": "~* %s",
|
||||
"gt": "> %s",
|
||||
"gte": ">= %s",
|
||||
"lt": "< %s",
|
||||
"lte": "<= %s",
|
||||
"startswith": "LIKE %s",
|
||||
"endswith": "LIKE %s",
|
||||
"istartswith": "LIKE UPPER(%s)",
|
||||
"iendswith": "LIKE UPPER(%s)",
|
||||
}
|
||||
|
||||
# The patterns below are used to generate SQL pattern lookup clauses when
|
||||
# the right-hand side of the lookup isn't a raw string (it might be an expression
|
||||
# or the result of a bilateral transformation).
|
||||
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
|
||||
# escaped on database side.
|
||||
#
|
||||
# Note: we use str.format() here for readability as '%' is used as a wildcard for
|
||||
# the LIKE operator.
|
||||
pattern_esc = (
|
||||
r"REPLACE(REPLACE(REPLACE({}, E'\\', E'\\\\'), E'%%', E'\\%%'), E'_', E'\\_')"
|
||||
)
|
||||
pattern_ops = {
|
||||
"contains": "LIKE '%%' || {} || '%%'",
|
||||
"icontains": "LIKE '%%' || UPPER({}) || '%%'",
|
||||
"startswith": "LIKE {} || '%%'",
|
||||
"istartswith": "LIKE UPPER({}) || '%%'",
|
||||
"endswith": "LIKE '%%' || {}",
|
||||
"iendswith": "LIKE '%%' || UPPER({})",
|
||||
}
|
||||
|
||||
Database = Database
|
||||
SchemaEditorClass = DatabaseSchemaEditor
|
||||
# Classes instantiated in __init__().
|
||||
client_class = DatabaseClient
|
||||
creation_class = DatabaseCreation
|
||||
features_class = DatabaseFeatures
|
||||
introspection_class = DatabaseIntrospection
|
||||
ops_class = DatabaseOperations
|
||||
# PostgreSQL backend-specific attributes.
|
||||
_named_cursor_idx = 0
|
||||
|
||||
def get_database_version(self):
|
||||
"""
|
||||
Return a tuple of the database's version.
|
||||
E.g. for pg_version 120004, return (12, 4).
|
||||
"""
|
||||
return divmod(self.pg_version, 10000)
|
||||
|
||||
def get_connection_params(self):
|
||||
settings_dict = self.settings_dict
|
||||
# None may be used to connect to the default 'postgres' db
|
||||
if settings_dict["NAME"] == "" and not settings_dict.get("OPTIONS", {}).get(
|
||||
"service"
|
||||
):
|
||||
raise ImproperlyConfigured(
|
||||
"settings.DATABASES is improperly configured. "
|
||||
"Please supply the NAME or OPTIONS['service'] value."
|
||||
)
|
||||
if len(settings_dict["NAME"] or "") > self.ops.max_name_length():
|
||||
raise ImproperlyConfigured(
|
||||
"The database name '%s' (%d characters) is longer than "
|
||||
"PostgreSQL's limit of %d characters. Supply a shorter NAME "
|
||||
"in settings.DATABASES."
|
||||
% (
|
||||
settings_dict["NAME"],
|
||||
len(settings_dict["NAME"]),
|
||||
self.ops.max_name_length(),
|
||||
)
|
||||
)
|
||||
conn_params = {}
|
||||
if settings_dict["NAME"]:
|
||||
conn_params = {
|
||||
"database": settings_dict["NAME"],
|
||||
**settings_dict["OPTIONS"],
|
||||
}
|
||||
elif settings_dict["NAME"] is None:
|
||||
# Connect to the default 'postgres' db.
|
||||
settings_dict.get("OPTIONS", {}).pop("service", None)
|
||||
conn_params = {"database": "postgres", **settings_dict["OPTIONS"]}
|
||||
else:
|
||||
conn_params = {**settings_dict["OPTIONS"]}
|
||||
|
||||
conn_params.pop("isolation_level", None)
|
||||
if settings_dict["USER"]:
|
||||
conn_params["user"] = settings_dict["USER"]
|
||||
if settings_dict["PASSWORD"]:
|
||||
conn_params["password"] = settings_dict["PASSWORD"]
|
||||
if settings_dict["HOST"]:
|
||||
conn_params["host"] = settings_dict["HOST"]
|
||||
if settings_dict["PORT"]:
|
||||
conn_params["port"] = settings_dict["PORT"]
|
||||
return conn_params
|
||||
|
||||
@async_unsafe
|
||||
def get_new_connection(self, conn_params):
|
||||
connection = Database.connect(**conn_params)
|
||||
|
||||
# self.isolation_level must be set:
|
||||
# - after connecting to the database in order to obtain the database's
|
||||
# default when no value is explicitly specified in options.
|
||||
# - before calling _set_autocommit() because if autocommit is on, that
|
||||
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
|
||||
options = self.settings_dict["OPTIONS"]
|
||||
try:
|
||||
self.isolation_level = options["isolation_level"]
|
||||
except KeyError:
|
||||
self.isolation_level = connection.isolation_level
|
||||
else:
|
||||
# Set the isolation level to the value from OPTIONS.
|
||||
if self.isolation_level != connection.isolation_level:
|
||||
connection.set_session(isolation_level=self.isolation_level)
|
||||
# Register dummy loads() to avoid a round trip from psycopg2's decode
|
||||
# to json.dumps() to json.loads(), when using a custom decoder in
|
||||
# JSONField.
|
||||
psycopg2.extras.register_default_jsonb(
|
||||
conn_or_curs=connection, loads=lambda x: x
|
||||
)
|
||||
return connection
|
||||
|
||||
def ensure_timezone(self):
|
||||
if self.connection is None:
|
||||
return False
|
||||
conn_timezone_name = self.connection.get_parameter_status("TimeZone")
|
||||
timezone_name = self.timezone_name
|
||||
if timezone_name and conn_timezone_name != timezone_name:
|
||||
with self.connection.cursor() as cursor:
|
||||
cursor.execute(self.ops.set_time_zone_sql(), [timezone_name])
|
||||
return True
|
||||
return False
|
||||
|
||||
def init_connection_state(self):
|
||||
super().init_connection_state()
|
||||
self.connection.set_client_encoding("UTF8")
|
||||
|
||||
timezone_changed = self.ensure_timezone()
|
||||
if timezone_changed:
|
||||
# Commit after setting the time zone (see #17062)
|
||||
if not self.get_autocommit():
|
||||
self.connection.commit()
|
||||
|
||||
@async_unsafe
|
||||
def create_cursor(self, name=None):
|
||||
if name:
|
||||
# In autocommit mode, the cursor will be used outside of a
|
||||
# transaction, hence use a holdable cursor.
|
||||
cursor = self.connection.cursor(
|
||||
name, scrollable=False, withhold=self.connection.autocommit
|
||||
)
|
||||
else:
|
||||
cursor = self.connection.cursor()
|
||||
cursor.tzinfo_factory = self.tzinfo_factory if settings.USE_TZ else None
|
||||
return cursor
|
||||
|
||||
def tzinfo_factory(self, offset):
|
||||
return self.timezone
|
||||
|
||||
@async_unsafe
|
||||
def chunked_cursor(self):
|
||||
self._named_cursor_idx += 1
|
||||
# Get the current async task
|
||||
# Note that right now this is behind @async_unsafe, so this is
|
||||
# unreachable, but in future we'll start loosening this restriction.
|
||||
# For now, it's here so that every use of "threading" is
|
||||
# also async-compatible.
|
||||
try:
|
||||
current_task = asyncio.current_task()
|
||||
except RuntimeError:
|
||||
current_task = None
|
||||
# Current task can be none even if the current_task call didn't error
|
||||
if current_task:
|
||||
task_ident = str(id(current_task))
|
||||
else:
|
||||
task_ident = "sync"
|
||||
# Use that and the thread ident to get a unique name
|
||||
return self._cursor(
|
||||
name="_django_curs_%d_%s_%d"
|
||||
% (
|
||||
# Avoid reusing name in other threads / tasks
|
||||
threading.current_thread().ident,
|
||||
task_ident,
|
||||
self._named_cursor_idx,
|
||||
)
|
||||
)
|
||||
|
||||
def _set_autocommit(self, autocommit):
|
||||
with self.wrap_database_errors:
|
||||
self.connection.autocommit = autocommit
|
||||
|
||||
def check_constraints(self, table_names=None):
|
||||
"""
|
||||
Check constraints by setting them to immediate. Return them to deferred
|
||||
afterward.
|
||||
"""
|
||||
with self.cursor() as cursor:
|
||||
cursor.execute("SET CONSTRAINTS ALL IMMEDIATE")
|
||||
cursor.execute("SET CONSTRAINTS ALL DEFERRED")
|
||||
|
||||
def is_usable(self):
|
||||
try:
|
||||
# Use a psycopg cursor directly, bypassing Django's utilities.
|
||||
with self.connection.cursor() as cursor:
|
||||
cursor.execute("SELECT 1")
|
||||
except Database.Error:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
@contextmanager
|
||||
def _nodb_cursor(self):
|
||||
cursor = None
|
||||
try:
|
||||
with super()._nodb_cursor() as cursor:
|
||||
yield cursor
|
||||
except (Database.DatabaseError, WrappedDatabaseError):
|
||||
if cursor is not None:
|
||||
raise
|
||||
warnings.warn(
|
||||
"Normally Django will use a connection to the 'postgres' database "
|
||||
"to avoid running initialization queries against the production "
|
||||
"database when it's not needed (for example, when running tests). "
|
||||
"Django was unable to create a connection to the 'postgres' database "
|
||||
"and will use the first PostgreSQL database instead.",
|
||||
RuntimeWarning,
|
||||
)
|
||||
for connection in connections.all():
|
||||
if (
|
||||
connection.vendor == "postgresql"
|
||||
and connection.settings_dict["NAME"] != "postgres"
|
||||
):
|
||||
conn = self.__class__(
|
||||
{
|
||||
**self.settings_dict,
|
||||
"NAME": connection.settings_dict["NAME"],
|
||||
},
|
||||
alias=self.alias,
|
||||
)
|
||||
try:
|
||||
with conn.cursor() as cursor:
|
||||
yield cursor
|
||||
finally:
|
||||
conn.close()
|
||||
break
|
||||
else:
|
||||
raise
|
||||
|
||||
@cached_property
|
||||
def pg_version(self):
|
||||
with self.temporary_connection():
|
||||
return self.connection.server_version
|
||||
|
||||
def make_debug_cursor(self, cursor):
|
||||
return CursorDebugWrapper(cursor, self)
|
||||
|
||||
|
||||
class CursorDebugWrapper(BaseCursorDebugWrapper):
|
||||
def copy_expert(self, sql, file, *args):
|
||||
with self.debug_sql(sql):
|
||||
return self.cursor.copy_expert(sql, file, *args)
|
||||
|
||||
def copy_to(self, file, table, *args, **kwargs):
|
||||
with self.debug_sql(sql="COPY %s TO STDOUT" % table):
|
||||
return self.cursor.copy_to(file, table, *args, **kwargs)
|
64
env/lib/python3.8/site-packages/django/db/backends/postgresql/client.py
vendored
Normal file
64
env/lib/python3.8/site-packages/django/db/backends/postgresql/client.py
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
import signal
|
||||
|
||||
from django.db.backends.base.client import BaseDatabaseClient
|
||||
|
||||
|
||||
class DatabaseClient(BaseDatabaseClient):
|
||||
executable_name = "psql"
|
||||
|
||||
@classmethod
|
||||
def settings_to_cmd_args_env(cls, settings_dict, parameters):
|
||||
args = [cls.executable_name]
|
||||
options = settings_dict.get("OPTIONS", {})
|
||||
|
||||
host = settings_dict.get("HOST")
|
||||
port = settings_dict.get("PORT")
|
||||
dbname = settings_dict.get("NAME")
|
||||
user = settings_dict.get("USER")
|
||||
passwd = settings_dict.get("PASSWORD")
|
||||
passfile = options.get("passfile")
|
||||
service = options.get("service")
|
||||
sslmode = options.get("sslmode")
|
||||
sslrootcert = options.get("sslrootcert")
|
||||
sslcert = options.get("sslcert")
|
||||
sslkey = options.get("sslkey")
|
||||
|
||||
if not dbname and not service:
|
||||
# Connect to the default 'postgres' db.
|
||||
dbname = "postgres"
|
||||
if user:
|
||||
args += ["-U", user]
|
||||
if host:
|
||||
args += ["-h", host]
|
||||
if port:
|
||||
args += ["-p", str(port)]
|
||||
if dbname:
|
||||
args += [dbname]
|
||||
args.extend(parameters)
|
||||
|
||||
env = {}
|
||||
if passwd:
|
||||
env["PGPASSWORD"] = str(passwd)
|
||||
if service:
|
||||
env["PGSERVICE"] = str(service)
|
||||
if sslmode:
|
||||
env["PGSSLMODE"] = str(sslmode)
|
||||
if sslrootcert:
|
||||
env["PGSSLROOTCERT"] = str(sslrootcert)
|
||||
if sslcert:
|
||||
env["PGSSLCERT"] = str(sslcert)
|
||||
if sslkey:
|
||||
env["PGSSLKEY"] = str(sslkey)
|
||||
if passfile:
|
||||
env["PGPASSFILE"] = str(passfile)
|
||||
return args, (env or None)
|
||||
|
||||
def runshell(self, parameters):
|
||||
sigint_handler = signal.getsignal(signal.SIGINT)
|
||||
try:
|
||||
# Allow SIGINT to pass to psql to abort queries.
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
super().runshell(parameters)
|
||||
finally:
|
||||
# Restore the original SIGINT handler.
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
87
env/lib/python3.8/site-packages/django/db/backends/postgresql/creation.py
vendored
Normal file
87
env/lib/python3.8/site-packages/django/db/backends/postgresql/creation.py
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
import sys
|
||||
|
||||
from psycopg2 import errorcodes
|
||||
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db.backends.base.creation import BaseDatabaseCreation
|
||||
from django.db.backends.utils import strip_quotes
|
||||
|
||||
|
||||
class DatabaseCreation(BaseDatabaseCreation):
|
||||
def _quote_name(self, name):
|
||||
return self.connection.ops.quote_name(name)
|
||||
|
||||
def _get_database_create_suffix(self, encoding=None, template=None):
|
||||
suffix = ""
|
||||
if encoding:
|
||||
suffix += " ENCODING '{}'".format(encoding)
|
||||
if template:
|
||||
suffix += " TEMPLATE {}".format(self._quote_name(template))
|
||||
return suffix and "WITH" + suffix
|
||||
|
||||
def sql_table_creation_suffix(self):
|
||||
test_settings = self.connection.settings_dict["TEST"]
|
||||
if test_settings.get("COLLATION") is not None:
|
||||
raise ImproperlyConfigured(
|
||||
"PostgreSQL does not support collation setting at database "
|
||||
"creation time."
|
||||
)
|
||||
return self._get_database_create_suffix(
|
||||
encoding=test_settings["CHARSET"],
|
||||
template=test_settings.get("TEMPLATE"),
|
||||
)
|
||||
|
||||
def _database_exists(self, cursor, database_name):
|
||||
cursor.execute(
|
||||
"SELECT 1 FROM pg_catalog.pg_database WHERE datname = %s",
|
||||
[strip_quotes(database_name)],
|
||||
)
|
||||
return cursor.fetchone() is not None
|
||||
|
||||
def _execute_create_test_db(self, cursor, parameters, keepdb=False):
|
||||
try:
|
||||
if keepdb and self._database_exists(cursor, parameters["dbname"]):
|
||||
# If the database should be kept and it already exists, don't
|
||||
# try to create a new one.
|
||||
return
|
||||
super()._execute_create_test_db(cursor, parameters, keepdb)
|
||||
except Exception as e:
|
||||
if getattr(e.__cause__, "pgcode", "") != errorcodes.DUPLICATE_DATABASE:
|
||||
# All errors except "database already exists" cancel tests.
|
||||
self.log("Got an error creating the test database: %s" % e)
|
||||
sys.exit(2)
|
||||
elif not keepdb:
|
||||
# If the database should be kept, ignore "database already
|
||||
# exists".
|
||||
raise
|
||||
|
||||
def _clone_test_db(self, suffix, verbosity, keepdb=False):
|
||||
# CREATE DATABASE ... WITH TEMPLATE ... requires closing connections
|
||||
# to the template database.
|
||||
self.connection.close()
|
||||
|
||||
source_database_name = self.connection.settings_dict["NAME"]
|
||||
target_database_name = self.get_test_db_clone_settings(suffix)["NAME"]
|
||||
test_db_params = {
|
||||
"dbname": self._quote_name(target_database_name),
|
||||
"suffix": self._get_database_create_suffix(template=source_database_name),
|
||||
}
|
||||
with self._nodb_cursor() as cursor:
|
||||
try:
|
||||
self._execute_create_test_db(cursor, test_db_params, keepdb)
|
||||
except Exception:
|
||||
try:
|
||||
if verbosity >= 1:
|
||||
self.log(
|
||||
"Destroying old test database for alias %s..."
|
||||
% (
|
||||
self._get_database_display_str(
|
||||
verbosity, target_database_name
|
||||
),
|
||||
)
|
||||
)
|
||||
cursor.execute("DROP DATABASE %(dbname)s" % test_db_params)
|
||||
self._execute_create_test_db(cursor, test_db_params, keepdb)
|
||||
except Exception as e:
|
||||
self.log("Got an error cloning the test database: %s" % e)
|
||||
sys.exit(2)
|
103
env/lib/python3.8/site-packages/django/db/backends/postgresql/features.py
vendored
Normal file
103
env/lib/python3.8/site-packages/django/db/backends/postgresql/features.py
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
import operator
|
||||
|
||||
from django.db import InterfaceError
|
||||
from django.db.backends.base.features import BaseDatabaseFeatures
|
||||
from django.utils.functional import cached_property
|
||||
|
||||
|
||||
class DatabaseFeatures(BaseDatabaseFeatures):
|
||||
minimum_database_version = (11,)
|
||||
allows_group_by_selected_pks = True
|
||||
can_return_columns_from_insert = True
|
||||
can_return_rows_from_bulk_insert = True
|
||||
has_real_datatype = True
|
||||
has_native_uuid_field = True
|
||||
has_native_duration_field = True
|
||||
has_native_json_field = True
|
||||
can_defer_constraint_checks = True
|
||||
has_select_for_update = True
|
||||
has_select_for_update_nowait = True
|
||||
has_select_for_update_of = True
|
||||
has_select_for_update_skip_locked = True
|
||||
has_select_for_no_key_update = True
|
||||
can_release_savepoints = True
|
||||
supports_tablespaces = True
|
||||
supports_transactions = True
|
||||
can_introspect_materialized_views = True
|
||||
can_distinct_on_fields = True
|
||||
can_rollback_ddl = True
|
||||
supports_combined_alters = True
|
||||
nulls_order_largest = True
|
||||
closed_cursor_error_class = InterfaceError
|
||||
greatest_least_ignores_nulls = True
|
||||
can_clone_databases = True
|
||||
supports_temporal_subtraction = True
|
||||
supports_slicing_ordering_in_compound = True
|
||||
create_test_procedure_without_params_sql = """
|
||||
CREATE FUNCTION test_procedure () RETURNS void AS $$
|
||||
DECLARE
|
||||
V_I INTEGER;
|
||||
BEGIN
|
||||
V_I := 1;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;"""
|
||||
create_test_procedure_with_int_param_sql = """
|
||||
CREATE FUNCTION test_procedure (P_I INTEGER) RETURNS void AS $$
|
||||
DECLARE
|
||||
V_I INTEGER;
|
||||
BEGIN
|
||||
V_I := P_I;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;"""
|
||||
requires_casted_case_in_updates = True
|
||||
supports_over_clause = True
|
||||
only_supports_unbounded_with_preceding_and_following = True
|
||||
supports_aggregate_filter_clause = True
|
||||
supported_explain_formats = {"JSON", "TEXT", "XML", "YAML"}
|
||||
supports_deferrable_unique_constraints = True
|
||||
has_json_operators = True
|
||||
json_key_contains_list_matching_requires_list = True
|
||||
supports_update_conflicts = True
|
||||
supports_update_conflicts_with_target = True
|
||||
supports_covering_indexes = True
|
||||
can_rename_index = True
|
||||
test_collations = {
|
||||
"non_default": "sv-x-icu",
|
||||
"swedish_ci": "sv-x-icu",
|
||||
}
|
||||
test_now_utc_template = "STATEMENT_TIMESTAMP() AT TIME ZONE 'UTC'"
|
||||
|
||||
django_test_skips = {
|
||||
"opclasses are PostgreSQL only.": {
|
||||
"indexes.tests.SchemaIndexesNotPostgreSQLTests."
|
||||
"test_create_index_ignores_opclasses",
|
||||
},
|
||||
}
|
||||
|
||||
@cached_property
|
||||
def introspected_field_types(self):
|
||||
return {
|
||||
**super().introspected_field_types,
|
||||
"PositiveBigIntegerField": "BigIntegerField",
|
||||
"PositiveIntegerField": "IntegerField",
|
||||
"PositiveSmallIntegerField": "SmallIntegerField",
|
||||
}
|
||||
|
||||
@cached_property
|
||||
def is_postgresql_12(self):
|
||||
return self.connection.pg_version >= 120000
|
||||
|
||||
@cached_property
|
||||
def is_postgresql_13(self):
|
||||
return self.connection.pg_version >= 130000
|
||||
|
||||
@cached_property
|
||||
def is_postgresql_14(self):
|
||||
return self.connection.pg_version >= 140000
|
||||
|
||||
has_bit_xor = property(operator.attrgetter("is_postgresql_14"))
|
||||
supports_covering_gist_indexes = property(operator.attrgetter("is_postgresql_12"))
|
||||
supports_covering_spgist_indexes = property(operator.attrgetter("is_postgresql_14"))
|
||||
supports_non_deterministic_collations = property(
|
||||
operator.attrgetter("is_postgresql_12")
|
||||
)
|
295
env/lib/python3.8/site-packages/django/db/backends/postgresql/introspection.py
vendored
Normal file
295
env/lib/python3.8/site-packages/django/db/backends/postgresql/introspection.py
vendored
Normal file
@@ -0,0 +1,295 @@
|
||||
from collections import namedtuple
|
||||
|
||||
from django.db.backends.base.introspection import BaseDatabaseIntrospection
|
||||
from django.db.backends.base.introspection import FieldInfo as BaseFieldInfo
|
||||
from django.db.backends.base.introspection import TableInfo
|
||||
from django.db.models import Index
|
||||
|
||||
FieldInfo = namedtuple("FieldInfo", BaseFieldInfo._fields + ("is_autofield",))
|
||||
|
||||
|
||||
class DatabaseIntrospection(BaseDatabaseIntrospection):
|
||||
# Maps type codes to Django Field types.
|
||||
data_types_reverse = {
|
||||
16: "BooleanField",
|
||||
17: "BinaryField",
|
||||
20: "BigIntegerField",
|
||||
21: "SmallIntegerField",
|
||||
23: "IntegerField",
|
||||
25: "TextField",
|
||||
700: "FloatField",
|
||||
701: "FloatField",
|
||||
869: "GenericIPAddressField",
|
||||
1042: "CharField", # blank-padded
|
||||
1043: "CharField",
|
||||
1082: "DateField",
|
||||
1083: "TimeField",
|
||||
1114: "DateTimeField",
|
||||
1184: "DateTimeField",
|
||||
1186: "DurationField",
|
||||
1266: "TimeField",
|
||||
1700: "DecimalField",
|
||||
2950: "UUIDField",
|
||||
3802: "JSONField",
|
||||
}
|
||||
# A hook for subclasses.
|
||||
index_default_access_method = "btree"
|
||||
|
||||
ignored_tables = []
|
||||
|
||||
def get_field_type(self, data_type, description):
|
||||
field_type = super().get_field_type(data_type, description)
|
||||
if description.is_autofield or (
|
||||
# Required for pre-Django 4.1 serial columns.
|
||||
description.default
|
||||
and "nextval" in description.default
|
||||
):
|
||||
if field_type == "IntegerField":
|
||||
return "AutoField"
|
||||
elif field_type == "BigIntegerField":
|
||||
return "BigAutoField"
|
||||
elif field_type == "SmallIntegerField":
|
||||
return "SmallAutoField"
|
||||
return field_type
|
||||
|
||||
def get_table_list(self, cursor):
|
||||
"""Return a list of table and view names in the current database."""
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT
|
||||
c.relname,
|
||||
CASE
|
||||
WHEN c.relispartition THEN 'p'
|
||||
WHEN c.relkind IN ('m', 'v') THEN 'v'
|
||||
ELSE 't'
|
||||
END
|
||||
FROM pg_catalog.pg_class c
|
||||
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE c.relkind IN ('f', 'm', 'p', 'r', 'v')
|
||||
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
|
||||
AND pg_catalog.pg_table_is_visible(c.oid)
|
||||
"""
|
||||
)
|
||||
return [
|
||||
TableInfo(*row)
|
||||
for row in cursor.fetchall()
|
||||
if row[0] not in self.ignored_tables
|
||||
]
|
||||
|
||||
def get_table_description(self, cursor, table_name):
|
||||
"""
|
||||
Return a description of the table with the DB-API cursor.description
|
||||
interface.
|
||||
"""
|
||||
# Query the pg_catalog tables as cursor.description does not reliably
|
||||
# return the nullable property and information_schema.columns does not
|
||||
# contain details of materialized views.
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT
|
||||
a.attname AS column_name,
|
||||
NOT (a.attnotnull OR (t.typtype = 'd' AND t.typnotnull)) AS is_nullable,
|
||||
pg_get_expr(ad.adbin, ad.adrelid) AS column_default,
|
||||
CASE WHEN collname = 'default' THEN NULL ELSE collname END AS collation,
|
||||
a.attidentity != '' AS is_autofield
|
||||
FROM pg_attribute a
|
||||
LEFT JOIN pg_attrdef ad ON a.attrelid = ad.adrelid AND a.attnum = ad.adnum
|
||||
LEFT JOIN pg_collation co ON a.attcollation = co.oid
|
||||
JOIN pg_type t ON a.atttypid = t.oid
|
||||
JOIN pg_class c ON a.attrelid = c.oid
|
||||
JOIN pg_namespace n ON c.relnamespace = n.oid
|
||||
WHERE c.relkind IN ('f', 'm', 'p', 'r', 'v')
|
||||
AND c.relname = %s
|
||||
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
|
||||
AND pg_catalog.pg_table_is_visible(c.oid)
|
||||
""",
|
||||
[table_name],
|
||||
)
|
||||
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
|
||||
cursor.execute(
|
||||
"SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)
|
||||
)
|
||||
return [
|
||||
FieldInfo(
|
||||
line.name,
|
||||
line.type_code,
|
||||
line.display_size,
|
||||
line.internal_size,
|
||||
line.precision,
|
||||
line.scale,
|
||||
*field_map[line.name],
|
||||
)
|
||||
for line in cursor.description
|
||||
]
|
||||
|
||||
def get_sequences(self, cursor, table_name, table_fields=()):
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT
|
||||
s.relname AS sequence_name,
|
||||
a.attname AS colname
|
||||
FROM
|
||||
pg_class s
|
||||
JOIN pg_depend d ON d.objid = s.oid
|
||||
AND d.classid = 'pg_class'::regclass
|
||||
AND d.refclassid = 'pg_class'::regclass
|
||||
JOIN pg_attribute a ON d.refobjid = a.attrelid
|
||||
AND d.refobjsubid = a.attnum
|
||||
JOIN pg_class tbl ON tbl.oid = d.refobjid
|
||||
AND tbl.relname = %s
|
||||
AND pg_catalog.pg_table_is_visible(tbl.oid)
|
||||
WHERE
|
||||
s.relkind = 'S';
|
||||
""",
|
||||
[table_name],
|
||||
)
|
||||
return [
|
||||
{"name": row[0], "table": table_name, "column": row[1]}
|
||||
for row in cursor.fetchall()
|
||||
]
|
||||
|
||||
def get_relations(self, cursor, table_name):
|
||||
"""
|
||||
Return a dictionary of {field_name: (field_name_other_table, other_table)}
|
||||
representing all foreign keys in the given table.
|
||||
"""
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT a1.attname, c2.relname, a2.attname
|
||||
FROM pg_constraint con
|
||||
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
|
||||
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
|
||||
LEFT JOIN
|
||||
pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
|
||||
LEFT JOIN
|
||||
pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
|
||||
WHERE
|
||||
c1.relname = %s AND
|
||||
con.contype = 'f' AND
|
||||
c1.relnamespace = c2.relnamespace AND
|
||||
pg_catalog.pg_table_is_visible(c1.oid)
|
||||
""",
|
||||
[table_name],
|
||||
)
|
||||
return {row[0]: (row[2], row[1]) for row in cursor.fetchall()}
|
||||
|
||||
def get_constraints(self, cursor, table_name):
|
||||
"""
|
||||
Retrieve any constraints or keys (unique, pk, fk, check, index) across
|
||||
one or more columns. Also retrieve the definition of expression-based
|
||||
indexes.
|
||||
"""
|
||||
constraints = {}
|
||||
# Loop over the key table, collecting things as constraints. The column
|
||||
# array must return column names in the same order in which they were
|
||||
# created.
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT
|
||||
c.conname,
|
||||
array(
|
||||
SELECT attname
|
||||
FROM unnest(c.conkey) WITH ORDINALITY cols(colid, arridx)
|
||||
JOIN pg_attribute AS ca ON cols.colid = ca.attnum
|
||||
WHERE ca.attrelid = c.conrelid
|
||||
ORDER BY cols.arridx
|
||||
),
|
||||
c.contype,
|
||||
(SELECT fkc.relname || '.' || fka.attname
|
||||
FROM pg_attribute AS fka
|
||||
JOIN pg_class AS fkc ON fka.attrelid = fkc.oid
|
||||
WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]),
|
||||
cl.reloptions
|
||||
FROM pg_constraint AS c
|
||||
JOIN pg_class AS cl ON c.conrelid = cl.oid
|
||||
WHERE cl.relname = %s AND pg_catalog.pg_table_is_visible(cl.oid)
|
||||
""",
|
||||
[table_name],
|
||||
)
|
||||
for constraint, columns, kind, used_cols, options in cursor.fetchall():
|
||||
constraints[constraint] = {
|
||||
"columns": columns,
|
||||
"primary_key": kind == "p",
|
||||
"unique": kind in ["p", "u"],
|
||||
"foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None,
|
||||
"check": kind == "c",
|
||||
"index": False,
|
||||
"definition": None,
|
||||
"options": options,
|
||||
}
|
||||
# Now get indexes
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT
|
||||
indexname,
|
||||
array_agg(attname ORDER BY arridx),
|
||||
indisunique,
|
||||
indisprimary,
|
||||
array_agg(ordering ORDER BY arridx),
|
||||
amname,
|
||||
exprdef,
|
||||
s2.attoptions
|
||||
FROM (
|
||||
SELECT
|
||||
c2.relname as indexname, idx.*, attr.attname, am.amname,
|
||||
CASE
|
||||
WHEN idx.indexprs IS NOT NULL THEN
|
||||
pg_get_indexdef(idx.indexrelid)
|
||||
END AS exprdef,
|
||||
CASE am.amname
|
||||
WHEN %s THEN
|
||||
CASE (option & 1)
|
||||
WHEN 1 THEN 'DESC' ELSE 'ASC'
|
||||
END
|
||||
END as ordering,
|
||||
c2.reloptions as attoptions
|
||||
FROM (
|
||||
SELECT *
|
||||
FROM
|
||||
pg_index i,
|
||||
unnest(i.indkey, i.indoption)
|
||||
WITH ORDINALITY koi(key, option, arridx)
|
||||
) idx
|
||||
LEFT JOIN pg_class c ON idx.indrelid = c.oid
|
||||
LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid
|
||||
LEFT JOIN pg_am am ON c2.relam = am.oid
|
||||
LEFT JOIN
|
||||
pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key
|
||||
WHERE c.relname = %s AND pg_catalog.pg_table_is_visible(c.oid)
|
||||
) s2
|
||||
GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions;
|
||||
""",
|
||||
[self.index_default_access_method, table_name],
|
||||
)
|
||||
for (
|
||||
index,
|
||||
columns,
|
||||
unique,
|
||||
primary,
|
||||
orders,
|
||||
type_,
|
||||
definition,
|
||||
options,
|
||||
) in cursor.fetchall():
|
||||
if index not in constraints:
|
||||
basic_index = (
|
||||
type_ == self.index_default_access_method
|
||||
and
|
||||
# '_btree' references
|
||||
# django.contrib.postgres.indexes.BTreeIndex.suffix.
|
||||
not index.endswith("_btree")
|
||||
and options is None
|
||||
)
|
||||
constraints[index] = {
|
||||
"columns": columns if columns != [None] else [],
|
||||
"orders": orders if orders != [None] else [],
|
||||
"primary_key": primary,
|
||||
"unique": unique,
|
||||
"foreign_key": None,
|
||||
"check": False,
|
||||
"index": True,
|
||||
"type": Index.suffix if basic_index else type_,
|
||||
"definition": definition,
|
||||
"options": options,
|
||||
}
|
||||
return constraints
|
355
env/lib/python3.8/site-packages/django/db/backends/postgresql/operations.py
vendored
Normal file
355
env/lib/python3.8/site-packages/django/db/backends/postgresql/operations.py
vendored
Normal file
@@ -0,0 +1,355 @@
|
||||
from psycopg2.extras import Inet
|
||||
|
||||
from django.conf import settings
|
||||
from django.db.backends.base.operations import BaseDatabaseOperations
|
||||
from django.db.backends.utils import split_tzname_delta
|
||||
from django.db.models.constants import OnConflict
|
||||
|
||||
|
||||
class DatabaseOperations(BaseDatabaseOperations):
|
||||
cast_char_field_without_max_length = "varchar"
|
||||
explain_prefix = "EXPLAIN"
|
||||
explain_options = frozenset(
|
||||
[
|
||||
"ANALYZE",
|
||||
"BUFFERS",
|
||||
"COSTS",
|
||||
"SETTINGS",
|
||||
"SUMMARY",
|
||||
"TIMING",
|
||||
"VERBOSE",
|
||||
"WAL",
|
||||
]
|
||||
)
|
||||
cast_data_types = {
|
||||
"AutoField": "integer",
|
||||
"BigAutoField": "bigint",
|
||||
"SmallAutoField": "smallint",
|
||||
}
|
||||
|
||||
def unification_cast_sql(self, output_field):
|
||||
internal_type = output_field.get_internal_type()
|
||||
if internal_type in (
|
||||
"GenericIPAddressField",
|
||||
"IPAddressField",
|
||||
"TimeField",
|
||||
"UUIDField",
|
||||
):
|
||||
# PostgreSQL will resolve a union as type 'text' if input types are
|
||||
# 'unknown'.
|
||||
# https://www.postgresql.org/docs/current/typeconv-union-case.html
|
||||
# These fields cannot be implicitly cast back in the default
|
||||
# PostgreSQL configuration so we need to explicitly cast them.
|
||||
# We must also remove components of the type within brackets:
|
||||
# varchar(255) -> varchar.
|
||||
return (
|
||||
"CAST(%%s AS %s)" % output_field.db_type(self.connection).split("(")[0]
|
||||
)
|
||||
return "%s"
|
||||
|
||||
def date_extract_sql(self, lookup_type, sql, params):
|
||||
# https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
|
||||
extract_sql = f"EXTRACT(%s FROM {sql})"
|
||||
extract_param = lookup_type
|
||||
if lookup_type == "week_day":
|
||||
# For consistency across backends, we return Sunday=1, Saturday=7.
|
||||
extract_sql = f"EXTRACT(%s FROM {sql}) + 1"
|
||||
extract_param = "dow"
|
||||
elif lookup_type == "iso_week_day":
|
||||
extract_param = "isodow"
|
||||
elif lookup_type == "iso_year":
|
||||
extract_param = "isoyear"
|
||||
return extract_sql, (extract_param, *params)
|
||||
|
||||
def date_trunc_sql(self, lookup_type, sql, params, tzname=None):
|
||||
sql, params = self._convert_sql_to_tz(sql, params, tzname)
|
||||
# https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
|
||||
return f"DATE_TRUNC(%s, {sql})", (lookup_type, *params)
|
||||
|
||||
def _prepare_tzname_delta(self, tzname):
|
||||
tzname, sign, offset = split_tzname_delta(tzname)
|
||||
if offset:
|
||||
sign = "-" if sign == "+" else "+"
|
||||
return f"{tzname}{sign}{offset}"
|
||||
return tzname
|
||||
|
||||
def _convert_sql_to_tz(self, sql, params, tzname):
|
||||
if tzname and settings.USE_TZ:
|
||||
tzname_param = self._prepare_tzname_delta(tzname)
|
||||
return f"{sql} AT TIME ZONE %s", (*params, tzname_param)
|
||||
return sql, params
|
||||
|
||||
def datetime_cast_date_sql(self, sql, params, tzname):
|
||||
sql, params = self._convert_sql_to_tz(sql, params, tzname)
|
||||
return f"({sql})::date", params
|
||||
|
||||
def datetime_cast_time_sql(self, sql, params, tzname):
|
||||
sql, params = self._convert_sql_to_tz(sql, params, tzname)
|
||||
return f"({sql})::time", params
|
||||
|
||||
def datetime_extract_sql(self, lookup_type, sql, params, tzname):
|
||||
sql, params = self._convert_sql_to_tz(sql, params, tzname)
|
||||
if lookup_type == "second":
|
||||
# Truncate fractional seconds.
|
||||
return (
|
||||
f"EXTRACT(%s FROM DATE_TRUNC(%s, {sql}))",
|
||||
("second", "second", *params),
|
||||
)
|
||||
return self.date_extract_sql(lookup_type, sql, params)
|
||||
|
||||
def datetime_trunc_sql(self, lookup_type, sql, params, tzname):
|
||||
sql, params = self._convert_sql_to_tz(sql, params, tzname)
|
||||
# https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
|
||||
return f"DATE_TRUNC(%s, {sql})", (lookup_type, *params)
|
||||
|
||||
def time_extract_sql(self, lookup_type, sql, params):
|
||||
if lookup_type == "second":
|
||||
# Truncate fractional seconds.
|
||||
return (
|
||||
f"EXTRACT(%s FROM DATE_TRUNC(%s, {sql}))",
|
||||
("second", "second", *params),
|
||||
)
|
||||
return self.date_extract_sql(lookup_type, sql, params)
|
||||
|
||||
def time_trunc_sql(self, lookup_type, sql, params, tzname=None):
|
||||
sql, params = self._convert_sql_to_tz(sql, params, tzname)
|
||||
return f"DATE_TRUNC(%s, {sql})::time", (lookup_type, *params)
|
||||
|
||||
def deferrable_sql(self):
|
||||
return " DEFERRABLE INITIALLY DEFERRED"
|
||||
|
||||
def fetch_returned_insert_rows(self, cursor):
|
||||
"""
|
||||
Given a cursor object that has just performed an INSERT...RETURNING
|
||||
statement into a table, return the tuple of returned data.
|
||||
"""
|
||||
return cursor.fetchall()
|
||||
|
||||
def lookup_cast(self, lookup_type, internal_type=None):
|
||||
lookup = "%s"
|
||||
|
||||
# Cast text lookups to text to allow things like filter(x__contains=4)
|
||||
if lookup_type in (
|
||||
"iexact",
|
||||
"contains",
|
||||
"icontains",
|
||||
"startswith",
|
||||
"istartswith",
|
||||
"endswith",
|
||||
"iendswith",
|
||||
"regex",
|
||||
"iregex",
|
||||
):
|
||||
if internal_type in ("IPAddressField", "GenericIPAddressField"):
|
||||
lookup = "HOST(%s)"
|
||||
elif internal_type in ("CICharField", "CIEmailField", "CITextField"):
|
||||
lookup = "%s::citext"
|
||||
else:
|
||||
lookup = "%s::text"
|
||||
|
||||
# Use UPPER(x) for case-insensitive lookups; it's faster.
|
||||
if lookup_type in ("iexact", "icontains", "istartswith", "iendswith"):
|
||||
lookup = "UPPER(%s)" % lookup
|
||||
|
||||
return lookup
|
||||
|
||||
def no_limit_value(self):
|
||||
return None
|
||||
|
||||
def prepare_sql_script(self, sql):
|
||||
return [sql]
|
||||
|
||||
def quote_name(self, name):
|
||||
if name.startswith('"') and name.endswith('"'):
|
||||
return name # Quoting once is enough.
|
||||
return '"%s"' % name
|
||||
|
||||
def set_time_zone_sql(self):
|
||||
return "SET TIME ZONE %s"
|
||||
|
||||
def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False):
|
||||
if not tables:
|
||||
return []
|
||||
|
||||
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows us
|
||||
# to truncate tables referenced by a foreign key in any other table.
|
||||
sql_parts = [
|
||||
style.SQL_KEYWORD("TRUNCATE"),
|
||||
", ".join(style.SQL_FIELD(self.quote_name(table)) for table in tables),
|
||||
]
|
||||
if reset_sequences:
|
||||
sql_parts.append(style.SQL_KEYWORD("RESTART IDENTITY"))
|
||||
if allow_cascade:
|
||||
sql_parts.append(style.SQL_KEYWORD("CASCADE"))
|
||||
return ["%s;" % " ".join(sql_parts)]
|
||||
|
||||
def sequence_reset_by_name_sql(self, style, sequences):
|
||||
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
|
||||
# to reset sequence indices
|
||||
sql = []
|
||||
for sequence_info in sequences:
|
||||
table_name = sequence_info["table"]
|
||||
# 'id' will be the case if it's an m2m using an autogenerated
|
||||
# intermediate table (see BaseDatabaseIntrospection.sequence_list).
|
||||
column_name = sequence_info["column"] or "id"
|
||||
sql.append(
|
||||
"%s setval(pg_get_serial_sequence('%s','%s'), 1, false);"
|
||||
% (
|
||||
style.SQL_KEYWORD("SELECT"),
|
||||
style.SQL_TABLE(self.quote_name(table_name)),
|
||||
style.SQL_FIELD(column_name),
|
||||
)
|
||||
)
|
||||
return sql
|
||||
|
||||
def tablespace_sql(self, tablespace, inline=False):
|
||||
if inline:
|
||||
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
|
||||
else:
|
||||
return "TABLESPACE %s" % self.quote_name(tablespace)
|
||||
|
||||
def sequence_reset_sql(self, style, model_list):
|
||||
from django.db import models
|
||||
|
||||
output = []
|
||||
qn = self.quote_name
|
||||
for model in model_list:
|
||||
# Use `coalesce` to set the sequence for each model to the max pk
|
||||
# value if there are records, or 1 if there are none. Set the
|
||||
# `is_called` property (the third argument to `setval`) to true if
|
||||
# there are records (as the max pk value is already in use),
|
||||
# otherwise set it to false. Use pg_get_serial_sequence to get the
|
||||
# underlying sequence name from the table name and column name.
|
||||
|
||||
for f in model._meta.local_fields:
|
||||
if isinstance(f, models.AutoField):
|
||||
output.append(
|
||||
"%s setval(pg_get_serial_sequence('%s','%s'), "
|
||||
"coalesce(max(%s), 1), max(%s) %s null) %s %s;"
|
||||
% (
|
||||
style.SQL_KEYWORD("SELECT"),
|
||||
style.SQL_TABLE(qn(model._meta.db_table)),
|
||||
style.SQL_FIELD(f.column),
|
||||
style.SQL_FIELD(qn(f.column)),
|
||||
style.SQL_FIELD(qn(f.column)),
|
||||
style.SQL_KEYWORD("IS NOT"),
|
||||
style.SQL_KEYWORD("FROM"),
|
||||
style.SQL_TABLE(qn(model._meta.db_table)),
|
||||
)
|
||||
)
|
||||
# Only one AutoField is allowed per model, so don't bother
|
||||
# continuing.
|
||||
break
|
||||
return output
|
||||
|
||||
def prep_for_iexact_query(self, x):
|
||||
return x
|
||||
|
||||
def max_name_length(self):
|
||||
"""
|
||||
Return the maximum length of an identifier.
|
||||
|
||||
The maximum length of an identifier is 63 by default, but can be
|
||||
changed by recompiling PostgreSQL after editing the NAMEDATALEN
|
||||
macro in src/include/pg_config_manual.h.
|
||||
|
||||
This implementation returns 63, but can be overridden by a custom
|
||||
database backend that inherits most of its behavior from this one.
|
||||
"""
|
||||
return 63
|
||||
|
||||
def distinct_sql(self, fields, params):
|
||||
if fields:
|
||||
params = [param for param_list in params for param in param_list]
|
||||
return (["DISTINCT ON (%s)" % ", ".join(fields)], params)
|
||||
else:
|
||||
return ["DISTINCT"], []
|
||||
|
||||
def last_executed_query(self, cursor, sql, params):
|
||||
# https://www.psycopg.org/docs/cursor.html#cursor.query
|
||||
# The query attribute is a Psycopg extension to the DB API 2.0.
|
||||
if cursor.query is not None:
|
||||
return cursor.query.decode()
|
||||
return None
|
||||
|
||||
def return_insert_columns(self, fields):
|
||||
if not fields:
|
||||
return "", ()
|
||||
columns = [
|
||||
"%s.%s"
|
||||
% (
|
||||
self.quote_name(field.model._meta.db_table),
|
||||
self.quote_name(field.column),
|
||||
)
|
||||
for field in fields
|
||||
]
|
||||
return "RETURNING %s" % ", ".join(columns), ()
|
||||
|
||||
def bulk_insert_sql(self, fields, placeholder_rows):
|
||||
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
|
||||
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
|
||||
return "VALUES " + values_sql
|
||||
|
||||
def adapt_datefield_value(self, value):
|
||||
return value
|
||||
|
||||
def adapt_datetimefield_value(self, value):
|
||||
return value
|
||||
|
||||
def adapt_timefield_value(self, value):
|
||||
return value
|
||||
|
||||
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
|
||||
return value
|
||||
|
||||
def adapt_ipaddressfield_value(self, value):
|
||||
if value:
|
||||
return Inet(value)
|
||||
return None
|
||||
|
||||
def subtract_temporals(self, internal_type, lhs, rhs):
|
||||
if internal_type == "DateField":
|
||||
lhs_sql, lhs_params = lhs
|
||||
rhs_sql, rhs_params = rhs
|
||||
params = (*lhs_params, *rhs_params)
|
||||
return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), params
|
||||
return super().subtract_temporals(internal_type, lhs, rhs)
|
||||
|
||||
def explain_query_prefix(self, format=None, **options):
|
||||
extra = {}
|
||||
# Normalize options.
|
||||
if options:
|
||||
options = {
|
||||
name.upper(): "true" if value else "false"
|
||||
for name, value in options.items()
|
||||
}
|
||||
for valid_option in self.explain_options:
|
||||
value = options.pop(valid_option, None)
|
||||
if value is not None:
|
||||
extra[valid_option] = value
|
||||
prefix = super().explain_query_prefix(format, **options)
|
||||
if format:
|
||||
extra["FORMAT"] = format
|
||||
if extra:
|
||||
prefix += " (%s)" % ", ".join("%s %s" % i for i in extra.items())
|
||||
return prefix
|
||||
|
||||
def on_conflict_suffix_sql(self, fields, on_conflict, update_fields, unique_fields):
|
||||
if on_conflict == OnConflict.IGNORE:
|
||||
return "ON CONFLICT DO NOTHING"
|
||||
if on_conflict == OnConflict.UPDATE:
|
||||
return "ON CONFLICT(%s) DO UPDATE SET %s" % (
|
||||
", ".join(map(self.quote_name, unique_fields)),
|
||||
", ".join(
|
||||
[
|
||||
f"{field} = EXCLUDED.{field}"
|
||||
for field in map(self.quote_name, update_fields)
|
||||
]
|
||||
),
|
||||
)
|
||||
return super().on_conflict_suffix_sql(
|
||||
fields,
|
||||
on_conflict,
|
||||
update_fields,
|
||||
unique_fields,
|
||||
)
|
299
env/lib/python3.8/site-packages/django/db/backends/postgresql/schema.py
vendored
Normal file
299
env/lib/python3.8/site-packages/django/db/backends/postgresql/schema.py
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
import psycopg2
|
||||
|
||||
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
|
||||
from django.db.backends.ddl_references import IndexColumns
|
||||
from django.db.backends.utils import strip_quotes
|
||||
|
||||
|
||||
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
|
||||
|
||||
# Setting all constraints to IMMEDIATE to allow changing data in the same
|
||||
# transaction.
|
||||
sql_update_with_default = (
|
||||
"UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
|
||||
"; SET CONSTRAINTS ALL IMMEDIATE"
|
||||
)
|
||||
|
||||
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
|
||||
|
||||
sql_create_index = (
|
||||
"CREATE INDEX %(name)s ON %(table)s%(using)s "
|
||||
"(%(columns)s)%(include)s%(extra)s%(condition)s"
|
||||
)
|
||||
sql_create_index_concurrently = (
|
||||
"CREATE INDEX CONCURRENTLY %(name)s ON %(table)s%(using)s "
|
||||
"(%(columns)s)%(include)s%(extra)s%(condition)s"
|
||||
)
|
||||
sql_delete_index = "DROP INDEX IF EXISTS %(name)s"
|
||||
sql_delete_index_concurrently = "DROP INDEX CONCURRENTLY IF EXISTS %(name)s"
|
||||
|
||||
# Setting the constraint to IMMEDIATE to allow changing data in the same
|
||||
# transaction.
|
||||
sql_create_column_inline_fk = (
|
||||
"CONSTRAINT %(name)s REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s"
|
||||
"; SET CONSTRAINTS %(namespace)s%(name)s IMMEDIATE"
|
||||
)
|
||||
# Setting the constraint to IMMEDIATE runs any deferred checks to allow
|
||||
# dropping it in the same transaction.
|
||||
sql_delete_fk = (
|
||||
"SET CONSTRAINTS %(name)s IMMEDIATE; "
|
||||
"ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
|
||||
)
|
||||
sql_delete_procedure = "DROP FUNCTION %(procedure)s(%(param_types)s)"
|
||||
|
||||
sql_add_identity = (
|
||||
"ALTER TABLE %(table)s ALTER COLUMN %(column)s ADD "
|
||||
"GENERATED BY DEFAULT AS IDENTITY"
|
||||
)
|
||||
sql_drop_indentity = (
|
||||
"ALTER TABLE %(table)s ALTER COLUMN %(column)s DROP IDENTITY IF EXISTS"
|
||||
)
|
||||
|
||||
def quote_value(self, value):
|
||||
if isinstance(value, str):
|
||||
value = value.replace("%", "%%")
|
||||
adapted = psycopg2.extensions.adapt(value)
|
||||
if hasattr(adapted, "encoding"):
|
||||
adapted.encoding = "utf8"
|
||||
# getquoted() returns a quoted bytestring of the adapted value.
|
||||
return adapted.getquoted().decode()
|
||||
|
||||
def _field_indexes_sql(self, model, field):
|
||||
output = super()._field_indexes_sql(model, field)
|
||||
like_index_statement = self._create_like_index_sql(model, field)
|
||||
if like_index_statement is not None:
|
||||
output.append(like_index_statement)
|
||||
return output
|
||||
|
||||
def _field_data_type(self, field):
|
||||
if field.is_relation:
|
||||
return field.rel_db_type(self.connection)
|
||||
return self.connection.data_types.get(
|
||||
field.get_internal_type(),
|
||||
field.db_type(self.connection),
|
||||
)
|
||||
|
||||
def _field_base_data_types(self, field):
|
||||
# Yield base data types for array fields.
|
||||
if field.base_field.get_internal_type() == "ArrayField":
|
||||
yield from self._field_base_data_types(field.base_field)
|
||||
else:
|
||||
yield self._field_data_type(field.base_field)
|
||||
|
||||
def _create_like_index_sql(self, model, field):
|
||||
"""
|
||||
Return the statement to create an index with varchar operator pattern
|
||||
when the column type is 'varchar' or 'text', otherwise return None.
|
||||
"""
|
||||
db_type = field.db_type(connection=self.connection)
|
||||
if db_type is not None and (field.db_index or field.unique):
|
||||
# Fields with database column types of `varchar` and `text` need
|
||||
# a second index that specifies their operator class, which is
|
||||
# needed when performing correct LIKE queries outside the
|
||||
# C locale. See #12234.
|
||||
#
|
||||
# The same doesn't apply to array fields such as varchar[size]
|
||||
# and text[size], so skip them.
|
||||
if "[" in db_type:
|
||||
return None
|
||||
if db_type.startswith("varchar"):
|
||||
return self._create_index_sql(
|
||||
model,
|
||||
fields=[field],
|
||||
suffix="_like",
|
||||
opclasses=["varchar_pattern_ops"],
|
||||
)
|
||||
elif db_type.startswith("text"):
|
||||
return self._create_index_sql(
|
||||
model,
|
||||
fields=[field],
|
||||
suffix="_like",
|
||||
opclasses=["text_pattern_ops"],
|
||||
)
|
||||
return None
|
||||
|
||||
def _alter_column_type_sql(self, model, old_field, new_field, new_type):
|
||||
self.sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
|
||||
# Cast when data type changed.
|
||||
using_sql = " USING %(column)s::%(type)s"
|
||||
new_internal_type = new_field.get_internal_type()
|
||||
old_internal_type = old_field.get_internal_type()
|
||||
if new_internal_type == "ArrayField" and new_internal_type == old_internal_type:
|
||||
# Compare base data types for array fields.
|
||||
if list(self._field_base_data_types(old_field)) != list(
|
||||
self._field_base_data_types(new_field)
|
||||
):
|
||||
self.sql_alter_column_type += using_sql
|
||||
elif self._field_data_type(old_field) != self._field_data_type(new_field):
|
||||
self.sql_alter_column_type += using_sql
|
||||
# Make ALTER TYPE with IDENTITY make sense.
|
||||
table = strip_quotes(model._meta.db_table)
|
||||
auto_field_types = {
|
||||
"AutoField",
|
||||
"BigAutoField",
|
||||
"SmallAutoField",
|
||||
}
|
||||
old_is_auto = old_internal_type in auto_field_types
|
||||
new_is_auto = new_internal_type in auto_field_types
|
||||
if new_is_auto and not old_is_auto:
|
||||
column = strip_quotes(new_field.column)
|
||||
return (
|
||||
(
|
||||
self.sql_alter_column_type
|
||||
% {
|
||||
"column": self.quote_name(column),
|
||||
"type": new_type,
|
||||
},
|
||||
[],
|
||||
),
|
||||
[
|
||||
(
|
||||
self.sql_add_identity
|
||||
% {
|
||||
"table": self.quote_name(table),
|
||||
"column": self.quote_name(column),
|
||||
},
|
||||
[],
|
||||
),
|
||||
],
|
||||
)
|
||||
elif old_is_auto and not new_is_auto:
|
||||
# Drop IDENTITY if exists (pre-Django 4.1 serial columns don't have
|
||||
# it).
|
||||
self.execute(
|
||||
self.sql_drop_indentity
|
||||
% {
|
||||
"table": self.quote_name(table),
|
||||
"column": self.quote_name(strip_quotes(old_field.column)),
|
||||
}
|
||||
)
|
||||
column = strip_quotes(new_field.column)
|
||||
sequence_name = "%s_%s_seq" % (table, column)
|
||||
fragment, _ = super()._alter_column_type_sql(
|
||||
model, old_field, new_field, new_type
|
||||
)
|
||||
return fragment, [
|
||||
(
|
||||
# Drop the sequence if exists (Django 4.1+ identity columns
|
||||
# don't have it).
|
||||
self.sql_delete_sequence
|
||||
% {
|
||||
"sequence": self.quote_name(sequence_name),
|
||||
},
|
||||
[],
|
||||
),
|
||||
]
|
||||
else:
|
||||
return super()._alter_column_type_sql(model, old_field, new_field, new_type)
|
||||
|
||||
def _alter_field(
|
||||
self,
|
||||
model,
|
||||
old_field,
|
||||
new_field,
|
||||
old_type,
|
||||
new_type,
|
||||
old_db_params,
|
||||
new_db_params,
|
||||
strict=False,
|
||||
):
|
||||
# Drop indexes on varchar/text/citext columns that are changing to a
|
||||
# different type.
|
||||
if (old_field.db_index or old_field.unique) and (
|
||||
(old_type.startswith("varchar") and not new_type.startswith("varchar"))
|
||||
or (old_type.startswith("text") and not new_type.startswith("text"))
|
||||
or (old_type.startswith("citext") and not new_type.startswith("citext"))
|
||||
):
|
||||
index_name = self._create_index_name(
|
||||
model._meta.db_table, [old_field.column], suffix="_like"
|
||||
)
|
||||
self.execute(self._delete_index_sql(model, index_name))
|
||||
|
||||
super()._alter_field(
|
||||
model,
|
||||
old_field,
|
||||
new_field,
|
||||
old_type,
|
||||
new_type,
|
||||
old_db_params,
|
||||
new_db_params,
|
||||
strict,
|
||||
)
|
||||
# Added an index? Create any PostgreSQL-specific indexes.
|
||||
if (not (old_field.db_index or old_field.unique) and new_field.db_index) or (
|
||||
not old_field.unique and new_field.unique
|
||||
):
|
||||
like_index_statement = self._create_like_index_sql(model, new_field)
|
||||
if like_index_statement is not None:
|
||||
self.execute(like_index_statement)
|
||||
|
||||
# Removed an index? Drop any PostgreSQL-specific indexes.
|
||||
if old_field.unique and not (new_field.db_index or new_field.unique):
|
||||
index_to_remove = self._create_index_name(
|
||||
model._meta.db_table, [old_field.column], suffix="_like"
|
||||
)
|
||||
self.execute(self._delete_index_sql(model, index_to_remove))
|
||||
|
||||
def _index_columns(self, table, columns, col_suffixes, opclasses):
|
||||
if opclasses:
|
||||
return IndexColumns(
|
||||
table,
|
||||
columns,
|
||||
self.quote_name,
|
||||
col_suffixes=col_suffixes,
|
||||
opclasses=opclasses,
|
||||
)
|
||||
return super()._index_columns(table, columns, col_suffixes, opclasses)
|
||||
|
||||
def add_index(self, model, index, concurrently=False):
|
||||
self.execute(
|
||||
index.create_sql(model, self, concurrently=concurrently), params=None
|
||||
)
|
||||
|
||||
def remove_index(self, model, index, concurrently=False):
|
||||
self.execute(index.remove_sql(model, self, concurrently=concurrently))
|
||||
|
||||
def _delete_index_sql(self, model, name, sql=None, concurrently=False):
|
||||
sql = (
|
||||
self.sql_delete_index_concurrently
|
||||
if concurrently
|
||||
else self.sql_delete_index
|
||||
)
|
||||
return super()._delete_index_sql(model, name, sql)
|
||||
|
||||
def _create_index_sql(
|
||||
self,
|
||||
model,
|
||||
*,
|
||||
fields=None,
|
||||
name=None,
|
||||
suffix="",
|
||||
using="",
|
||||
db_tablespace=None,
|
||||
col_suffixes=(),
|
||||
sql=None,
|
||||
opclasses=(),
|
||||
condition=None,
|
||||
concurrently=False,
|
||||
include=None,
|
||||
expressions=None,
|
||||
):
|
||||
sql = (
|
||||
self.sql_create_index
|
||||
if not concurrently
|
||||
else self.sql_create_index_concurrently
|
||||
)
|
||||
return super()._create_index_sql(
|
||||
model,
|
||||
fields=fields,
|
||||
name=name,
|
||||
suffix=suffix,
|
||||
using=using,
|
||||
db_tablespace=db_tablespace,
|
||||
col_suffixes=col_suffixes,
|
||||
sql=sql,
|
||||
opclasses=opclasses,
|
||||
condition=condition,
|
||||
include=include,
|
||||
expressions=expressions,
|
||||
)
|
Reference in New Issue
Block a user