|
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608 |
- """
- The main QuerySet implementation. This provides the public API for the ORM.
- """
-
- import copy
- import operator
- import warnings
- from itertools import chain, islice
-
- from asgiref.sync import sync_to_async
-
- import django
- from django.conf import settings
- from django.core import exceptions
- from django.db import (
- DJANGO_VERSION_PICKLE_KEY,
- IntegrityError,
- NotSupportedError,
- connections,
- router,
- transaction,
- )
- from django.db.models import AutoField, DateField, DateTimeField, sql
- from django.db.models.constants import LOOKUP_SEP, OnConflict
- from django.db.models.deletion import Collector
- from django.db.models.expressions import Case, F, Ref, Value, When
- from django.db.models.functions import Cast, Trunc
- from django.db.models.query_utils import FilteredRelation, Q
- from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
- from django.db.models.utils import create_namedtuple_class, resolve_callables
- from django.utils import timezone
- from django.utils.deprecation import RemovedInDjango50Warning
- from django.utils.functional import cached_property, partition
-
- # The maximum number of results to fetch in a get() query.
- MAX_GET_RESULTS = 21
-
- # The maximum number of items to display in a QuerySet.__repr__
- REPR_OUTPUT_SIZE = 20
-
-
- class BaseIterable:
- def __init__(
- self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
- ):
- self.queryset = queryset
- self.chunked_fetch = chunked_fetch
- self.chunk_size = chunk_size
-
- async def _async_generator(self):
- # Generators don't actually start running until the first time you call
- # next() on them, so make the generator object in the async thread and
- # then repeatedly dispatch to it in a sync thread.
- sync_generator = self.__iter__()
-
- def next_slice(gen):
- return list(islice(gen, self.chunk_size))
-
- while True:
- chunk = await sync_to_async(next_slice)(sync_generator)
- for item in chunk:
- yield item
- if len(chunk) < self.chunk_size:
- break
-
- # __aiter__() is a *synchronous* method that has to then return an
- # *asynchronous* iterator/generator. Thus, nest an async generator inside
- # it.
- # This is a generic iterable converter for now, and is going to suffer a
- # performance penalty on large sets of items due to the cost of crossing
- # over the sync barrier for each chunk. Custom __aiter__() methods should
- # be added to each Iterable subclass, but that needs some work in the
- # Compiler first.
- def __aiter__(self):
- return self._async_generator()
-
-
- class ModelIterable(BaseIterable):
- """Iterable that yields a model instance for each row."""
-
- def __iter__(self):
- queryset = self.queryset
- db = queryset.db
- compiler = queryset.query.get_compiler(using=db)
- # Execute the query. This will also fill compiler.select, klass_info,
- # and annotations.
- results = compiler.execute_sql(
- chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
- )
- select, klass_info, annotation_col_map = (
- compiler.select,
- compiler.klass_info,
- compiler.annotation_col_map,
- )
- model_cls = klass_info["model"]
- select_fields = klass_info["select_fields"]
- model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
- init_list = [
- f[0].target.attname for f in select[model_fields_start:model_fields_end]
- ]
- related_populators = get_related_populators(klass_info, select, db)
- known_related_objects = [
- (
- field,
- related_objs,
- operator.attrgetter(
- *[
- field.attname
- if from_field == "self"
- else queryset.model._meta.get_field(from_field).attname
- for from_field in field.from_fields
- ]
- ),
- )
- for field, related_objs in queryset._known_related_objects.items()
- ]
- for row in compiler.results_iter(results):
- obj = model_cls.from_db(
- db, init_list, row[model_fields_start:model_fields_end]
- )
- for rel_populator in related_populators:
- rel_populator.populate(row, obj)
- if annotation_col_map:
- for attr_name, col_pos in annotation_col_map.items():
- setattr(obj, attr_name, row[col_pos])
-
- # Add the known related objects to the model.
- for field, rel_objs, rel_getter in known_related_objects:
- # Avoid overwriting objects loaded by, e.g., select_related().
- if field.is_cached(obj):
- continue
- rel_obj_id = rel_getter(obj)
- try:
- rel_obj = rel_objs[rel_obj_id]
- except KeyError:
- pass # May happen in qs1 | qs2 scenarios.
- else:
- setattr(obj, field.name, rel_obj)
-
- yield obj
-
-
- class RawModelIterable(BaseIterable):
- """
- Iterable that yields a model instance for each row from a raw queryset.
- """
-
- def __iter__(self):
- # Cache some things for performance reasons outside the loop.
- db = self.queryset.db
- query = self.queryset.query
- connection = connections[db]
- compiler = connection.ops.compiler("SQLCompiler")(query, connection, db)
- query_iterator = iter(query)
-
- try:
- (
- model_init_names,
- model_init_pos,
- annotation_fields,
- ) = self.queryset.resolve_model_init_order()
- model_cls = self.queryset.model
- if model_cls._meta.pk.attname not in model_init_names:
- raise exceptions.FieldDoesNotExist(
- "Raw query must include the primary key"
- )
- fields = [self.queryset.model_fields.get(c) for c in self.queryset.columns]
- converters = compiler.get_converters(
- [f.get_col(f.model._meta.db_table) if f else None for f in fields]
- )
- if converters:
- query_iterator = compiler.apply_converters(query_iterator, converters)
- for values in query_iterator:
- # Associate fields to values
- model_init_values = [values[pos] for pos in model_init_pos]
- instance = model_cls.from_db(db, model_init_names, model_init_values)
- if annotation_fields:
- for column, pos in annotation_fields:
- setattr(instance, column, values[pos])
- yield instance
- finally:
- # Done iterating the Query. If it has its own cursor, close it.
- if hasattr(query, "cursor") and query.cursor:
- query.cursor.close()
-
-
- class ValuesIterable(BaseIterable):
- """
- Iterable returned by QuerySet.values() that yields a dict for each row.
- """
-
- def __iter__(self):
- queryset = self.queryset
- query = queryset.query
- compiler = query.get_compiler(queryset.db)
-
- # extra(select=...) cols are always at the start of the row.
- names = [
- *query.extra_select,
- *query.values_select,
- *query.annotation_select,
- ]
- indexes = range(len(names))
- for row in compiler.results_iter(
- chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
- ):
- yield {names[i]: row[i] for i in indexes}
-
-
- class ValuesListIterable(BaseIterable):
- """
- Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
- for each row.
- """
-
- def __iter__(self):
- queryset = self.queryset
- query = queryset.query
- compiler = query.get_compiler(queryset.db)
-
- if queryset._fields:
- # extra(select=...) cols are always at the start of the row.
- names = [
- *query.extra_select,
- *query.values_select,
- *query.annotation_select,
- ]
- fields = [
- *queryset._fields,
- *(f for f in query.annotation_select if f not in queryset._fields),
- ]
- if fields != names:
- # Reorder according to fields.
- index_map = {name: idx for idx, name in enumerate(names)}
- rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
- return map(
- rowfactory,
- compiler.results_iter(
- chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
- ),
- )
- return compiler.results_iter(
- tuple_expected=True,
- chunked_fetch=self.chunked_fetch,
- chunk_size=self.chunk_size,
- )
-
-
- class NamedValuesListIterable(ValuesListIterable):
- """
- Iterable returned by QuerySet.values_list(named=True) that yields a
- namedtuple for each row.
- """
-
- def __iter__(self):
- queryset = self.queryset
- if queryset._fields:
- names = queryset._fields
- else:
- query = queryset.query
- names = [
- *query.extra_select,
- *query.values_select,
- *query.annotation_select,
- ]
- tuple_class = create_namedtuple_class(*names)
- new = tuple.__new__
- for row in super().__iter__():
- yield new(tuple_class, row)
-
-
- class FlatValuesListIterable(BaseIterable):
- """
- Iterable returned by QuerySet.values_list(flat=True) that yields single
- values.
- """
-
- def __iter__(self):
- queryset = self.queryset
- compiler = queryset.query.get_compiler(queryset.db)
- for row in compiler.results_iter(
- chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
- ):
- yield row[0]
-
-
- class QuerySet:
- """Represent a lazy database lookup for a set of objects."""
-
- def __init__(self, model=None, query=None, using=None, hints=None):
- self.model = model
- self._db = using
- self._hints = hints or {}
- self._query = query or sql.Query(self.model)
- self._result_cache = None
- self._sticky_filter = False
- self._for_write = False
- self._prefetch_related_lookups = ()
- self._prefetch_done = False
- self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
- self._iterable_class = ModelIterable
- self._fields = None
- self._defer_next_filter = False
- self._deferred_filter = None
-
- @property
- def query(self):
- if self._deferred_filter:
- negate, args, kwargs = self._deferred_filter
- self._filter_or_exclude_inplace(negate, args, kwargs)
- self._deferred_filter = None
- return self._query
-
- @query.setter
- def query(self, value):
- if value.values_select:
- self._iterable_class = ValuesIterable
- self._query = value
-
- def as_manager(cls):
- # Address the circular dependency between `Queryset` and `Manager`.
- from django.db.models.manager import Manager
-
- manager = Manager.from_queryset(cls)()
- manager._built_with_as_manager = True
- return manager
-
- as_manager.queryset_only = True
- as_manager = classmethod(as_manager)
-
- ########################
- # PYTHON MAGIC METHODS #
- ########################
-
- def __deepcopy__(self, memo):
- """Don't populate the QuerySet's cache."""
- obj = self.__class__()
- for k, v in self.__dict__.items():
- if k == "_result_cache":
- obj.__dict__[k] = None
- else:
- obj.__dict__[k] = copy.deepcopy(v, memo)
- return obj
-
- def __getstate__(self):
- # Force the cache to be fully populated.
- self._fetch_all()
- return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__}
-
- def __setstate__(self, state):
- pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
- if pickled_version:
- if pickled_version != django.__version__:
- warnings.warn(
- "Pickled queryset instance's Django version %s does not "
- "match the current version %s."
- % (pickled_version, django.__version__),
- RuntimeWarning,
- stacklevel=2,
- )
- else:
- warnings.warn(
- "Pickled queryset instance's Django version is not specified.",
- RuntimeWarning,
- stacklevel=2,
- )
- self.__dict__.update(state)
-
- def __repr__(self):
- data = list(self[: REPR_OUTPUT_SIZE + 1])
- if len(data) > REPR_OUTPUT_SIZE:
- data[-1] = "...(remaining elements truncated)..."
- return "<%s %r>" % (self.__class__.__name__, data)
-
- def __len__(self):
- self._fetch_all()
- return len(self._result_cache)
-
- def __iter__(self):
- """
- The queryset iterator protocol uses three nested iterators in the
- default case:
- 1. sql.compiler.execute_sql()
- - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
- using cursor.fetchmany(). This part is responsible for
- doing some column masking, and returning the rows in chunks.
- 2. sql.compiler.results_iter()
- - Returns one row at time. At this point the rows are still just
- tuples. In some cases the return values are converted to
- Python values at this location.
- 3. self.iterator()
- - Responsible for turning the rows into model objects.
- """
- self._fetch_all()
- return iter(self._result_cache)
-
- def __aiter__(self):
- # Remember, __aiter__ itself is synchronous, it's the thing it returns
- # that is async!
- async def generator():
- await sync_to_async(self._fetch_all)()
- for item in self._result_cache:
- yield item
-
- return generator()
-
- def __bool__(self):
- self._fetch_all()
- return bool(self._result_cache)
-
- def __getitem__(self, k):
- """Retrieve an item or slice from the set of results."""
- if not isinstance(k, (int, slice)):
- raise TypeError(
- "QuerySet indices must be integers or slices, not %s."
- % type(k).__name__
- )
- if (isinstance(k, int) and k < 0) or (
- isinstance(k, slice)
- and (
- (k.start is not None and k.start < 0)
- or (k.stop is not None and k.stop < 0)
- )
- ):
- raise ValueError("Negative indexing is not supported.")
-
- if self._result_cache is not None:
- return self._result_cache[k]
-
- if isinstance(k, slice):
- qs = self._chain()
- if k.start is not None:
- start = int(k.start)
- else:
- start = None
- if k.stop is not None:
- stop = int(k.stop)
- else:
- stop = None
- qs.query.set_limits(start, stop)
- return list(qs)[:: k.step] if k.step else qs
-
- qs = self._chain()
- qs.query.set_limits(k, k + 1)
- qs._fetch_all()
- return qs._result_cache[0]
-
- def __class_getitem__(cls, *args, **kwargs):
- return cls
-
- def __and__(self, other):
- self._check_operator_queryset(other, "&")
- self._merge_sanity_check(other)
- if isinstance(other, EmptyQuerySet):
- return other
- if isinstance(self, EmptyQuerySet):
- return self
- combined = self._chain()
- combined._merge_known_related_objects(other)
- combined.query.combine(other.query, sql.AND)
- return combined
-
- def __or__(self, other):
- self._check_operator_queryset(other, "|")
- self._merge_sanity_check(other)
- if isinstance(self, EmptyQuerySet):
- return other
- if isinstance(other, EmptyQuerySet):
- return self
- query = (
- self
- if self.query.can_filter()
- else self.model._base_manager.filter(pk__in=self.values("pk"))
- )
- combined = query._chain()
- combined._merge_known_related_objects(other)
- if not other.query.can_filter():
- other = other.model._base_manager.filter(pk__in=other.values("pk"))
- combined.query.combine(other.query, sql.OR)
- return combined
-
- def __xor__(self, other):
- self._check_operator_queryset(other, "^")
- self._merge_sanity_check(other)
- if isinstance(self, EmptyQuerySet):
- return other
- if isinstance(other, EmptyQuerySet):
- return self
- query = (
- self
- if self.query.can_filter()
- else self.model._base_manager.filter(pk__in=self.values("pk"))
- )
- combined = query._chain()
- combined._merge_known_related_objects(other)
- if not other.query.can_filter():
- other = other.model._base_manager.filter(pk__in=other.values("pk"))
- combined.query.combine(other.query, sql.XOR)
- return combined
-
- ####################################
- # METHODS THAT DO DATABASE QUERIES #
- ####################################
-
- def _iterator(self, use_chunked_fetch, chunk_size):
- iterable = self._iterable_class(
- self,
- chunked_fetch=use_chunked_fetch,
- chunk_size=chunk_size or 2000,
- )
- if not self._prefetch_related_lookups or chunk_size is None:
- yield from iterable
- return
-
- iterator = iter(iterable)
- while results := list(islice(iterator, chunk_size)):
- prefetch_related_objects(results, *self._prefetch_related_lookups)
- yield from results
-
- def iterator(self, chunk_size=None):
- """
- An iterator over the results from applying this QuerySet to the
- database. chunk_size must be provided for QuerySets that prefetch
- related objects. Otherwise, a default chunk_size of 2000 is supplied.
- """
- if chunk_size is None:
- if self._prefetch_related_lookups:
- # When the deprecation ends, replace with:
- # raise ValueError(
- # 'chunk_size must be provided when using '
- # 'QuerySet.iterator() after prefetch_related().'
- # )
- warnings.warn(
- "Using QuerySet.iterator() after prefetch_related() "
- "without specifying chunk_size is deprecated.",
- category=RemovedInDjango50Warning,
- stacklevel=2,
- )
- elif chunk_size <= 0:
- raise ValueError("Chunk size must be strictly positive.")
- use_chunked_fetch = not connections[self.db].settings_dict.get(
- "DISABLE_SERVER_SIDE_CURSORS"
- )
- return self._iterator(use_chunked_fetch, chunk_size)
-
- async def aiterator(self, chunk_size=2000):
- """
- An asynchronous iterator over the results from applying this QuerySet
- to the database.
- """
- if self._prefetch_related_lookups:
- raise NotSupportedError(
- "Using QuerySet.aiterator() after prefetch_related() is not supported."
- )
- if chunk_size <= 0:
- raise ValueError("Chunk size must be strictly positive.")
- use_chunked_fetch = not connections[self.db].settings_dict.get(
- "DISABLE_SERVER_SIDE_CURSORS"
- )
- async for item in self._iterable_class(
- self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size
- ):
- yield item
-
- def aggregate(self, *args, **kwargs):
- """
- Return a dictionary containing the calculations (aggregation)
- over the current queryset.
-
- If args is present the expression is passed as a kwarg using
- the Aggregate object's default alias.
- """
- if self.query.distinct_fields:
- raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
- self._validate_values_are_expressions(
- (*args, *kwargs.values()), method_name="aggregate"
- )
- for arg in args:
- # The default_alias property raises TypeError if default_alias
- # can't be set automatically or AttributeError if it isn't an
- # attribute.
- try:
- arg.default_alias
- except (AttributeError, TypeError):
- raise TypeError("Complex aggregates require an alias")
- kwargs[arg.default_alias] = arg
-
- query = self.query.chain()
- for alias, aggregate_expr in kwargs.items():
- query.add_annotation(aggregate_expr, alias, is_summary=True)
- annotation = query.annotations[alias]
- if not annotation.contains_aggregate:
- raise TypeError("%s is not an aggregate expression" % alias)
- for expr in annotation.get_source_expressions():
- if (
- expr.contains_aggregate
- and isinstance(expr, Ref)
- and expr.refs in kwargs
- ):
- name = expr.refs
- raise exceptions.FieldError(
- "Cannot compute %s('%s'): '%s' is an aggregate"
- % (annotation.name, name, name)
- )
- return query.get_aggregation(self.db, kwargs)
-
- async def aaggregate(self, *args, **kwargs):
- return await sync_to_async(self.aggregate)(*args, **kwargs)
-
- def count(self):
- """
- Perform a SELECT COUNT() and return the number of records as an
- integer.
-
- If the QuerySet is already fully cached, return the length of the
- cached results set to avoid multiple SELECT COUNT(*) calls.
- """
- if self._result_cache is not None:
- return len(self._result_cache)
-
- return self.query.get_count(using=self.db)
-
- async def acount(self):
- return await sync_to_async(self.count)()
-
- def get(self, *args, **kwargs):
- """
- Perform the query and return a single object matching the given
- keyword arguments.
- """
- if self.query.combinator and (args or kwargs):
- raise NotSupportedError(
- "Calling QuerySet.get(...) with filters after %s() is not "
- "supported." % self.query.combinator
- )
- clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)
- if self.query.can_filter() and not self.query.distinct_fields:
- clone = clone.order_by()
- limit = None
- if (
- not clone.query.select_for_update
- or connections[clone.db].features.supports_select_for_update_with_limit
- ):
- limit = MAX_GET_RESULTS
- clone.query.set_limits(high=limit)
- num = len(clone)
- if num == 1:
- return clone._result_cache[0]
- if not num:
- raise self.model.DoesNotExist(
- "%s matching query does not exist." % self.model._meta.object_name
- )
- raise self.model.MultipleObjectsReturned(
- "get() returned more than one %s -- it returned %s!"
- % (
- self.model._meta.object_name,
- num if not limit or num < limit else "more than %s" % (limit - 1),
- )
- )
-
- async def aget(self, *args, **kwargs):
- return await sync_to_async(self.get)(*args, **kwargs)
-
- def create(self, **kwargs):
- """
- Create a new object with the given kwargs, saving it to the database
- and returning the created object.
- """
- obj = self.model(**kwargs)
- self._for_write = True
- obj.save(force_insert=True, using=self.db)
- return obj
-
- async def acreate(self, **kwargs):
- return await sync_to_async(self.create)(**kwargs)
-
- def _prepare_for_bulk_create(self, objs):
- for obj in objs:
- if obj.pk is None:
- # Populate new PK values.
- obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
- obj._prepare_related_fields_for_save(operation_name="bulk_create")
-
- def _check_bulk_create_options(
- self, ignore_conflicts, update_conflicts, update_fields, unique_fields
- ):
- if ignore_conflicts and update_conflicts:
- raise ValueError(
- "ignore_conflicts and update_conflicts are mutually exclusive."
- )
- db_features = connections[self.db].features
- if ignore_conflicts:
- if not db_features.supports_ignore_conflicts:
- raise NotSupportedError(
- "This database backend does not support ignoring conflicts."
- )
- return OnConflict.IGNORE
- elif update_conflicts:
- if not db_features.supports_update_conflicts:
- raise NotSupportedError(
- "This database backend does not support updating conflicts."
- )
- if not update_fields:
- raise ValueError(
- "Fields that will be updated when a row insertion fails "
- "on conflicts must be provided."
- )
- if unique_fields and not db_features.supports_update_conflicts_with_target:
- raise NotSupportedError(
- "This database backend does not support updating "
- "conflicts with specifying unique fields that can trigger "
- "the upsert."
- )
- if not unique_fields and db_features.supports_update_conflicts_with_target:
- raise ValueError(
- "Unique fields that can trigger the upsert must be provided."
- )
- # Updating primary keys and non-concrete fields is forbidden.
- if any(not f.concrete or f.many_to_many for f in update_fields):
- raise ValueError(
- "bulk_create() can only be used with concrete fields in "
- "update_fields."
- )
- if any(f.primary_key for f in update_fields):
- raise ValueError(
- "bulk_create() cannot be used with primary keys in "
- "update_fields."
- )
- if unique_fields:
- if any(not f.concrete or f.many_to_many for f in unique_fields):
- raise ValueError(
- "bulk_create() can only be used with concrete fields "
- "in unique_fields."
- )
- return OnConflict.UPDATE
- return None
-
- def bulk_create(
- self,
- objs,
- batch_size=None,
- ignore_conflicts=False,
- update_conflicts=False,
- update_fields=None,
- unique_fields=None,
- ):
- """
- Insert each of the instances into the database. Do *not* call
- save() on each of the instances, do not send any pre/post_save
- signals, and do not set the primary key attribute if it is an
- autoincrement field (except if features.can_return_rows_from_bulk_insert=True).
- Multi-table models are not supported.
- """
- # When you bulk insert you don't get the primary keys back (if it's an
- # autoincrement, except if can_return_rows_from_bulk_insert=True), so
- # you can't insert into the child tables which references this. There
- # are two workarounds:
- # 1) This could be implemented if you didn't have an autoincrement pk
- # 2) You could do it by doing O(n) normal inserts into the parent
- # tables to get the primary keys back and then doing a single bulk
- # insert into the childmost table.
- # We currently set the primary keys on the objects when using
- # PostgreSQL via the RETURNING ID clause. It should be possible for
- # Oracle as well, but the semantics for extracting the primary keys is
- # trickier so it's not done yet.
- if batch_size is not None and batch_size <= 0:
- raise ValueError("Batch size must be a positive integer.")
- # Check that the parents share the same concrete model with the our
- # model to detect the inheritance pattern ConcreteGrandParent ->
- # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
- # would not identify that case as involving multiple tables.
- for parent in self.model._meta.get_parent_list():
- if parent._meta.concrete_model is not self.model._meta.concrete_model:
- raise ValueError("Can't bulk create a multi-table inherited model")
- if not objs:
- return objs
- opts = self.model._meta
- if unique_fields:
- # Primary key is allowed in unique_fields.
- unique_fields = [
- self.model._meta.get_field(opts.pk.name if name == "pk" else name)
- for name in unique_fields
- ]
- if update_fields:
- update_fields = [self.model._meta.get_field(name) for name in update_fields]
- on_conflict = self._check_bulk_create_options(
- ignore_conflicts,
- update_conflicts,
- update_fields,
- unique_fields,
- )
- self._for_write = True
- fields = opts.concrete_fields
- objs = list(objs)
- self._prepare_for_bulk_create(objs)
- with transaction.atomic(using=self.db, savepoint=False):
- objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
- if objs_with_pk:
- returned_columns = self._batched_insert(
- objs_with_pk,
- fields,
- batch_size,
- on_conflict=on_conflict,
- update_fields=update_fields,
- unique_fields=unique_fields,
- )
- for obj_with_pk, results in zip(objs_with_pk, returned_columns):
- for result, field in zip(results, opts.db_returning_fields):
- if field != opts.pk:
- setattr(obj_with_pk, field.attname, result)
- for obj_with_pk in objs_with_pk:
- obj_with_pk._state.adding = False
- obj_with_pk._state.db = self.db
- if objs_without_pk:
- fields = [f for f in fields if not isinstance(f, AutoField)]
- returned_columns = self._batched_insert(
- objs_without_pk,
- fields,
- batch_size,
- on_conflict=on_conflict,
- update_fields=update_fields,
- unique_fields=unique_fields,
- )
- connection = connections[self.db]
- if (
- connection.features.can_return_rows_from_bulk_insert
- and on_conflict is None
- ):
- assert len(returned_columns) == len(objs_without_pk)
- for obj_without_pk, results in zip(objs_without_pk, returned_columns):
- for result, field in zip(results, opts.db_returning_fields):
- setattr(obj_without_pk, field.attname, result)
- obj_without_pk._state.adding = False
- obj_without_pk._state.db = self.db
-
- return objs
-
- async def abulk_create(
- self,
- objs,
- batch_size=None,
- ignore_conflicts=False,
- update_conflicts=False,
- update_fields=None,
- unique_fields=None,
- ):
- return await sync_to_async(self.bulk_create)(
- objs=objs,
- batch_size=batch_size,
- ignore_conflicts=ignore_conflicts,
- update_conflicts=update_conflicts,
- update_fields=update_fields,
- unique_fields=unique_fields,
- )
-
- def bulk_update(self, objs, fields, batch_size=None):
- """
- Update the given fields in each of the given objects in the database.
- """
- if batch_size is not None and batch_size < 0:
- raise ValueError("Batch size must be a positive integer.")
- if not fields:
- raise ValueError("Field names must be given to bulk_update().")
- objs = tuple(objs)
- if any(obj.pk is None for obj in objs):
- raise ValueError("All bulk_update() objects must have a primary key set.")
- fields = [self.model._meta.get_field(name) for name in fields]
- if any(not f.concrete or f.many_to_many for f in fields):
- raise ValueError("bulk_update() can only be used with concrete fields.")
- if any(f.primary_key for f in fields):
- raise ValueError("bulk_update() cannot be used with primary key fields.")
- if not objs:
- return 0
- for obj in objs:
- obj._prepare_related_fields_for_save(
- operation_name="bulk_update", fields=fields
- )
- # PK is used twice in the resulting update query, once in the filter
- # and once in the WHEN. Each field will also have one CAST.
- self._for_write = True
- connection = connections[self.db]
- max_batch_size = connection.ops.bulk_batch_size(["pk", "pk"] + fields, objs)
- batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
- requires_casting = connection.features.requires_casted_case_in_updates
- batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size))
- updates = []
- for batch_objs in batches:
- update_kwargs = {}
- for field in fields:
- when_statements = []
- for obj in batch_objs:
- attr = getattr(obj, field.attname)
- if not hasattr(attr, "resolve_expression"):
- attr = Value(attr, output_field=field)
- when_statements.append(When(pk=obj.pk, then=attr))
- case_statement = Case(*when_statements, output_field=field)
- if requires_casting:
- case_statement = Cast(case_statement, output_field=field)
- update_kwargs[field.attname] = case_statement
- updates.append(([obj.pk for obj in batch_objs], update_kwargs))
- rows_updated = 0
- queryset = self.using(self.db)
- with transaction.atomic(using=self.db, savepoint=False):
- for pks, update_kwargs in updates:
- rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs)
- return rows_updated
-
- bulk_update.alters_data = True
-
- async def abulk_update(self, objs, fields, batch_size=None):
- return await sync_to_async(self.bulk_update)(
- objs=objs,
- fields=fields,
- batch_size=batch_size,
- )
-
- abulk_update.alters_data = True
-
- def get_or_create(self, defaults=None, **kwargs):
- """
- Look up an object with the given kwargs, creating one if necessary.
- Return a tuple of (object, created), where created is a boolean
- specifying whether an object was created.
- """
- # The get() needs to be targeted at the write database in order
- # to avoid potential transaction consistency problems.
- self._for_write = True
- try:
- return self.get(**kwargs), False
- except self.model.DoesNotExist:
- params = self._extract_model_params(defaults, **kwargs)
- # Try to create an object using passed params.
- try:
- with transaction.atomic(using=self.db):
- params = dict(resolve_callables(params))
- return self.create(**params), True
- except IntegrityError:
- try:
- return self.get(**kwargs), False
- except self.model.DoesNotExist:
- pass
- raise
-
- async def aget_or_create(self, defaults=None, **kwargs):
- return await sync_to_async(self.get_or_create)(
- defaults=defaults,
- **kwargs,
- )
-
- def update_or_create(self, defaults=None, **kwargs):
- """
- Look up an object with the given kwargs, updating one with defaults
- if it exists, otherwise create a new one.
- Return a tuple (object, created), where created is a boolean
- specifying whether an object was created.
- """
- defaults = defaults or {}
- self._for_write = True
- with transaction.atomic(using=self.db):
- # Lock the row so that a concurrent update is blocked until
- # update_or_create() has performed its save.
- obj, created = self.select_for_update().get_or_create(defaults, **kwargs)
- if created:
- return obj, created
- for k, v in resolve_callables(defaults):
- setattr(obj, k, v)
- obj.save(using=self.db)
- return obj, False
-
- async def aupdate_or_create(self, defaults=None, **kwargs):
- return await sync_to_async(self.update_or_create)(
- defaults=defaults,
- **kwargs,
- )
-
- def _extract_model_params(self, defaults, **kwargs):
- """
- Prepare `params` for creating a model instance based on the given
- kwargs; for use by get_or_create().
- """
- defaults = defaults or {}
- params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
- params.update(defaults)
- property_names = self.model._meta._property_names
- invalid_params = []
- for param in params:
- try:
- self.model._meta.get_field(param)
- except exceptions.FieldDoesNotExist:
- # It's okay to use a model's property if it has a setter.
- if not (param in property_names and getattr(self.model, param).fset):
- invalid_params.append(param)
- if invalid_params:
- raise exceptions.FieldError(
- "Invalid field name(s) for model %s: '%s'."
- % (
- self.model._meta.object_name,
- "', '".join(sorted(invalid_params)),
- )
- )
- return params
-
- def _earliest(self, *fields):
- """
- Return the earliest object according to fields (if given) or by the
- model's Meta.get_latest_by.
- """
- if fields:
- order_by = fields
- else:
- order_by = getattr(self.model._meta, "get_latest_by")
- if order_by and not isinstance(order_by, (tuple, list)):
- order_by = (order_by,)
- if order_by is None:
- raise ValueError(
- "earliest() and latest() require either fields as positional "
- "arguments or 'get_latest_by' in the model's Meta."
- )
- obj = self._chain()
- obj.query.set_limits(high=1)
- obj.query.clear_ordering(force=True)
- obj.query.add_ordering(*order_by)
- return obj.get()
-
- def earliest(self, *fields):
- if self.query.is_sliced:
- raise TypeError("Cannot change a query once a slice has been taken.")
- return self._earliest(*fields)
-
- async def aearliest(self, *fields):
- return await sync_to_async(self.earliest)(*fields)
-
- def latest(self, *fields):
- """
- Return the latest object according to fields (if given) or by the
- model's Meta.get_latest_by.
- """
- if self.query.is_sliced:
- raise TypeError("Cannot change a query once a slice has been taken.")
- return self.reverse()._earliest(*fields)
-
- async def alatest(self, *fields):
- return await sync_to_async(self.latest)(*fields)
-
- def first(self):
- """Return the first object of a query or None if no match is found."""
- for obj in (self if self.ordered else self.order_by("pk"))[:1]:
- return obj
-
- async def afirst(self):
- return await sync_to_async(self.first)()
-
- def last(self):
- """Return the last object of a query or None if no match is found."""
- for obj in (self.reverse() if self.ordered else self.order_by("-pk"))[:1]:
- return obj
-
- async def alast(self):
- return await sync_to_async(self.last)()
-
- def in_bulk(self, id_list=None, *, field_name="pk"):
- """
- Return a dictionary mapping each of the given IDs to the object with
- that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
- """
- if self.query.is_sliced:
- raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().")
- opts = self.model._meta
- unique_fields = [
- constraint.fields[0]
- for constraint in opts.total_unique_constraints
- if len(constraint.fields) == 1
- ]
- if (
- field_name != "pk"
- and not opts.get_field(field_name).unique
- and field_name not in unique_fields
- and self.query.distinct_fields != (field_name,)
- ):
- raise ValueError(
- "in_bulk()'s field_name must be a unique field but %r isn't."
- % field_name
- )
- if id_list is not None:
- if not id_list:
- return {}
- filter_key = "{}__in".format(field_name)
- batch_size = connections[self.db].features.max_query_params
- id_list = tuple(id_list)
- # If the database has a limit on the number of query parameters
- # (e.g. SQLite), retrieve objects in batches if necessary.
- if batch_size and batch_size < len(id_list):
- qs = ()
- for offset in range(0, len(id_list), batch_size):
- batch = id_list[offset : offset + batch_size]
- qs += tuple(self.filter(**{filter_key: batch}).order_by())
- else:
- qs = self.filter(**{filter_key: id_list}).order_by()
- else:
- qs = self._chain()
- return {getattr(obj, field_name): obj for obj in qs}
-
- async def ain_bulk(self, id_list=None, *, field_name="pk"):
- return await sync_to_async(self.in_bulk)(
- id_list=id_list,
- field_name=field_name,
- )
-
- def delete(self):
- """Delete the records in the current QuerySet."""
- self._not_support_combined_queries("delete")
- if self.query.is_sliced:
- raise TypeError("Cannot use 'limit' or 'offset' with delete().")
- if self.query.distinct or self.query.distinct_fields:
- raise TypeError("Cannot call delete() after .distinct().")
- if self._fields is not None:
- raise TypeError("Cannot call delete() after .values() or .values_list()")
-
- del_query = self._chain()
-
- # The delete is actually 2 queries - one to find related objects,
- # and one to delete. Make sure that the discovery of related
- # objects is performed on the same database as the deletion.
- del_query._for_write = True
-
- # Disable non-supported fields.
- del_query.query.select_for_update = False
- del_query.query.select_related = False
- del_query.query.clear_ordering(force=True)
-
- collector = Collector(using=del_query.db, origin=self)
- collector.collect(del_query)
- deleted, _rows_count = collector.delete()
-
- # Clear the result cache, in case this QuerySet gets reused.
- self._result_cache = None
- return deleted, _rows_count
-
- delete.alters_data = True
- delete.queryset_only = True
-
- async def adelete(self):
- return await sync_to_async(self.delete)()
-
- adelete.alters_data = True
- adelete.queryset_only = True
-
- def _raw_delete(self, using):
- """
- Delete objects found from the given queryset in single direct SQL
- query. No signals are sent and there is no protection for cascades.
- """
- query = self.query.clone()
- query.__class__ = sql.DeleteQuery
- cursor = query.get_compiler(using).execute_sql(CURSOR)
- if cursor:
- with cursor:
- return cursor.rowcount
- return 0
-
- _raw_delete.alters_data = True
-
- def update(self, **kwargs):
- """
- Update all elements in the current QuerySet, setting all the given
- fields to the appropriate values.
- """
- self._not_support_combined_queries("update")
- if self.query.is_sliced:
- raise TypeError("Cannot update a query once a slice has been taken.")
- self._for_write = True
- query = self.query.chain(sql.UpdateQuery)
- query.add_update_values(kwargs)
-
- # Inline annotations in order_by(), if possible.
- new_order_by = []
- for col in query.order_by:
- if annotation := query.annotations.get(col):
- if getattr(annotation, "contains_aggregate", False):
- raise exceptions.FieldError(
- f"Cannot update when ordering by an aggregate: {annotation}"
- )
- new_order_by.append(annotation)
- else:
- new_order_by.append(col)
- query.order_by = tuple(new_order_by)
-
- # Clear any annotations so that they won't be present in subqueries.
- query.annotations = {}
- with transaction.mark_for_rollback_on_error(using=self.db):
- rows = query.get_compiler(self.db).execute_sql(CURSOR)
- self._result_cache = None
- return rows
-
- update.alters_data = True
-
- async def aupdate(self, **kwargs):
- return await sync_to_async(self.update)(**kwargs)
-
- aupdate.alters_data = True
-
- def _update(self, values):
- """
- A version of update() that accepts field objects instead of field names.
- Used primarily for model saving and not intended for use by general
- code (it requires too much poking around at model internals to be
- useful at that level).
- """
- if self.query.is_sliced:
- raise TypeError("Cannot update a query once a slice has been taken.")
- query = self.query.chain(sql.UpdateQuery)
- query.add_update_fields(values)
- # Clear any annotations so that they won't be present in subqueries.
- query.annotations = {}
- self._result_cache = None
- return query.get_compiler(self.db).execute_sql(CURSOR)
-
- _update.alters_data = True
- _update.queryset_only = False
-
- def exists(self):
- """
- Return True if the QuerySet would have any results, False otherwise.
- """
- if self._result_cache is None:
- return self.query.has_results(using=self.db)
- return bool(self._result_cache)
-
- async def aexists(self):
- return await sync_to_async(self.exists)()
-
- def contains(self, obj):
- """
- Return True if the QuerySet contains the provided obj,
- False otherwise.
- """
- self._not_support_combined_queries("contains")
- if self._fields is not None:
- raise TypeError(
- "Cannot call QuerySet.contains() after .values() or .values_list()."
- )
- try:
- if obj._meta.concrete_model != self.model._meta.concrete_model:
- return False
- except AttributeError:
- raise TypeError("'obj' must be a model instance.")
- if obj.pk is None:
- raise ValueError("QuerySet.contains() cannot be used on unsaved objects.")
- if self._result_cache is not None:
- return obj in self._result_cache
- return self.filter(pk=obj.pk).exists()
-
- async def acontains(self, obj):
- return await sync_to_async(self.contains)(obj=obj)
-
- def _prefetch_related_objects(self):
- # This method can only be called once the result cache has been filled.
- prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
- self._prefetch_done = True
-
- def explain(self, *, format=None, **options):
- """
- Runs an EXPLAIN on the SQL query this QuerySet would perform, and
- returns the results.
- """
- return self.query.explain(using=self.db, format=format, **options)
-
- async def aexplain(self, *, format=None, **options):
- return await sync_to_async(self.explain)(format=format, **options)
-
- ##################################################
- # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
- ##################################################
-
- def raw(self, raw_query, params=(), translations=None, using=None):
- if using is None:
- using = self.db
- qs = RawQuerySet(
- raw_query,
- model=self.model,
- params=params,
- translations=translations,
- using=using,
- )
- qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
- return qs
-
- def _values(self, *fields, **expressions):
- clone = self._chain()
- if expressions:
- clone = clone.annotate(**expressions)
- clone._fields = fields
- clone.query.set_values(fields)
- return clone
-
- def values(self, *fields, **expressions):
- fields += tuple(expressions)
- clone = self._values(*fields, **expressions)
- clone._iterable_class = ValuesIterable
- return clone
-
- def values_list(self, *fields, flat=False, named=False):
- if flat and named:
- raise TypeError("'flat' and 'named' can't be used together.")
- if flat and len(fields) > 1:
- raise TypeError(
- "'flat' is not valid when values_list is called with more than one "
- "field."
- )
-
- field_names = {f for f in fields if not hasattr(f, "resolve_expression")}
- _fields = []
- expressions = {}
- counter = 1
- for field in fields:
- if hasattr(field, "resolve_expression"):
- field_id_prefix = getattr(
- field, "default_alias", field.__class__.__name__.lower()
- )
- while True:
- field_id = field_id_prefix + str(counter)
- counter += 1
- if field_id not in field_names:
- break
- expressions[field_id] = field
- _fields.append(field_id)
- else:
- _fields.append(field)
-
- clone = self._values(*_fields, **expressions)
- clone._iterable_class = (
- NamedValuesListIterable
- if named
- else FlatValuesListIterable
- if flat
- else ValuesListIterable
- )
- return clone
-
- def dates(self, field_name, kind, order="ASC"):
- """
- Return a list of date objects representing all available dates for
- the given field_name, scoped to 'kind'.
- """
- if kind not in ("year", "month", "week", "day"):
- raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.")
- if order not in ("ASC", "DESC"):
- raise ValueError("'order' must be either 'ASC' or 'DESC'.")
- return (
- self.annotate(
- datefield=Trunc(field_name, kind, output_field=DateField()),
- plain_field=F(field_name),
- )
- .values_list("datefield", flat=True)
- .distinct()
- .filter(plain_field__isnull=False)
- .order_by(("-" if order == "DESC" else "") + "datefield")
- )
-
- # RemovedInDjango50Warning: when the deprecation ends, remove is_dst
- # argument.
- def datetimes(
- self, field_name, kind, order="ASC", tzinfo=None, is_dst=timezone.NOT_PASSED
- ):
- """
- Return a list of datetime objects representing all available
- datetimes for the given field_name, scoped to 'kind'.
- """
- if kind not in ("year", "month", "week", "day", "hour", "minute", "second"):
- raise ValueError(
- "'kind' must be one of 'year', 'month', 'week', 'day', "
- "'hour', 'minute', or 'second'."
- )
- if order not in ("ASC", "DESC"):
- raise ValueError("'order' must be either 'ASC' or 'DESC'.")
- if settings.USE_TZ:
- if tzinfo is None:
- tzinfo = timezone.get_current_timezone()
- else:
- tzinfo = None
- return (
- self.annotate(
- datetimefield=Trunc(
- field_name,
- kind,
- output_field=DateTimeField(),
- tzinfo=tzinfo,
- is_dst=is_dst,
- ),
- plain_field=F(field_name),
- )
- .values_list("datetimefield", flat=True)
- .distinct()
- .filter(plain_field__isnull=False)
- .order_by(("-" if order == "DESC" else "") + "datetimefield")
- )
-
- def none(self):
- """Return an empty QuerySet."""
- clone = self._chain()
- clone.query.set_empty()
- return clone
-
- ##################################################################
- # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
- ##################################################################
-
- def all(self):
- """
- Return a new QuerySet that is a copy of the current one. This allows a
- QuerySet to proxy for a model manager in some cases.
- """
- return self._chain()
-
- def filter(self, *args, **kwargs):
- """
- Return a new QuerySet instance with the args ANDed to the existing
- set.
- """
- self._not_support_combined_queries("filter")
- return self._filter_or_exclude(False, args, kwargs)
-
- def exclude(self, *args, **kwargs):
- """
- Return a new QuerySet instance with NOT (args) ANDed to the existing
- set.
- """
- self._not_support_combined_queries("exclude")
- return self._filter_or_exclude(True, args, kwargs)
-
- def _filter_or_exclude(self, negate, args, kwargs):
- if (args or kwargs) and self.query.is_sliced:
- raise TypeError("Cannot filter a query once a slice has been taken.")
- clone = self._chain()
- if self._defer_next_filter:
- self._defer_next_filter = False
- clone._deferred_filter = negate, args, kwargs
- else:
- clone._filter_or_exclude_inplace(negate, args, kwargs)
- return clone
-
- def _filter_or_exclude_inplace(self, negate, args, kwargs):
- if negate:
- self._query.add_q(~Q(*args, **kwargs))
- else:
- self._query.add_q(Q(*args, **kwargs))
-
- def complex_filter(self, filter_obj):
- """
- Return a new QuerySet instance with filter_obj added to the filters.
-
- filter_obj can be a Q object or a dictionary of keyword lookup
- arguments.
-
- This exists to support framework features such as 'limit_choices_to',
- and usually it will be more natural to use other methods.
- """
- if isinstance(filter_obj, Q):
- clone = self._chain()
- clone.query.add_q(filter_obj)
- return clone
- else:
- return self._filter_or_exclude(False, args=(), kwargs=filter_obj)
-
- def _combinator_query(self, combinator, *other_qs, all=False):
- # Clone the query to inherit the select list and everything
- clone = self._chain()
- # Clear limits and ordering so they can be reapplied
- clone.query.clear_ordering(force=True)
- clone.query.clear_limits()
- clone.query.combined_queries = (self.query,) + tuple(
- qs.query for qs in other_qs
- )
- clone.query.combinator = combinator
- clone.query.combinator_all = all
- return clone
-
- def union(self, *other_qs, all=False):
- # If the query is an EmptyQuerySet, combine all nonempty querysets.
- if isinstance(self, EmptyQuerySet):
- qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
- if not qs:
- return self
- if len(qs) == 1:
- return qs[0]
- return qs[0]._combinator_query("union", *qs[1:], all=all)
- return self._combinator_query("union", *other_qs, all=all)
-
- def intersection(self, *other_qs):
- # If any query is an EmptyQuerySet, return it.
- if isinstance(self, EmptyQuerySet):
- return self
- for other in other_qs:
- if isinstance(other, EmptyQuerySet):
- return other
- return self._combinator_query("intersection", *other_qs)
-
- def difference(self, *other_qs):
- # If the query is an EmptyQuerySet, return it.
- if isinstance(self, EmptyQuerySet):
- return self
- return self._combinator_query("difference", *other_qs)
-
- def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):
- """
- Return a new QuerySet instance that will select objects with a
- FOR UPDATE lock.
- """
- if nowait and skip_locked:
- raise ValueError("The nowait option cannot be used with skip_locked.")
- obj = self._chain()
- obj._for_write = True
- obj.query.select_for_update = True
- obj.query.select_for_update_nowait = nowait
- obj.query.select_for_update_skip_locked = skip_locked
- obj.query.select_for_update_of = of
- obj.query.select_for_no_key_update = no_key
- return obj
-
- def select_related(self, *fields):
- """
- Return a new QuerySet instance that will select related objects.
-
- If fields are specified, they must be ForeignKey fields and only those
- related objects are included in the selection.
-
- If select_related(None) is called, clear the list.
- """
- self._not_support_combined_queries("select_related")
- if self._fields is not None:
- raise TypeError(
- "Cannot call select_related() after .values() or .values_list()"
- )
-
- obj = self._chain()
- if fields == (None,):
- obj.query.select_related = False
- elif fields:
- obj.query.add_select_related(fields)
- else:
- obj.query.select_related = True
- return obj
-
- def prefetch_related(self, *lookups):
- """
- Return a new QuerySet instance that will prefetch the specified
- Many-To-One and Many-To-Many related objects when the QuerySet is
- evaluated.
-
- When prefetch_related() is called more than once, append to the list of
- prefetch lookups. If prefetch_related(None) is called, clear the list.
- """
- self._not_support_combined_queries("prefetch_related")
- clone = self._chain()
- if lookups == (None,):
- clone._prefetch_related_lookups = ()
- else:
- for lookup in lookups:
- if isinstance(lookup, Prefetch):
- lookup = lookup.prefetch_to
- lookup = lookup.split(LOOKUP_SEP, 1)[0]
- if lookup in self.query._filtered_relations:
- raise ValueError(
- "prefetch_related() is not supported with FilteredRelation."
- )
- clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
- return clone
-
- def annotate(self, *args, **kwargs):
- """
- Return a query set in which the returned objects have been annotated
- with extra data or aggregations.
- """
- self._not_support_combined_queries("annotate")
- return self._annotate(args, kwargs, select=True)
-
- def alias(self, *args, **kwargs):
- """
- Return a query set with added aliases for extra data or aggregations.
- """
- self._not_support_combined_queries("alias")
- return self._annotate(args, kwargs, select=False)
-
- def _annotate(self, args, kwargs, select=True):
- self._validate_values_are_expressions(
- args + tuple(kwargs.values()), method_name="annotate"
- )
- annotations = {}
- for arg in args:
- # The default_alias property may raise a TypeError.
- try:
- if arg.default_alias in kwargs:
- raise ValueError(
- "The named annotation '%s' conflicts with the "
- "default name for another annotation." % arg.default_alias
- )
- except TypeError:
- raise TypeError("Complex annotations require an alias")
- annotations[arg.default_alias] = arg
- annotations.update(kwargs)
-
- clone = self._chain()
- names = self._fields
- if names is None:
- names = set(
- chain.from_iterable(
- (field.name, field.attname)
- if hasattr(field, "attname")
- else (field.name,)
- for field in self.model._meta.get_fields()
- )
- )
-
- for alias, annotation in annotations.items():
- if alias in names:
- raise ValueError(
- "The annotation '%s' conflicts with a field on "
- "the model." % alias
- )
- if isinstance(annotation, FilteredRelation):
- clone.query.add_filtered_relation(annotation, alias)
- else:
- clone.query.add_annotation(
- annotation,
- alias,
- is_summary=False,
- select=select,
- )
- for alias, annotation in clone.query.annotations.items():
- if alias in annotations and annotation.contains_aggregate:
- if clone._fields is None:
- clone.query.group_by = True
- else:
- clone.query.set_group_by()
- break
-
- return clone
-
- def order_by(self, *field_names):
- """Return a new QuerySet instance with the ordering changed."""
- if self.query.is_sliced:
- raise TypeError("Cannot reorder a query once a slice has been taken.")
- obj = self._chain()
- obj.query.clear_ordering(force=True, clear_default=False)
- obj.query.add_ordering(*field_names)
- return obj
-
- def distinct(self, *field_names):
- """
- Return a new QuerySet instance that will select only distinct results.
- """
- self._not_support_combined_queries("distinct")
- if self.query.is_sliced:
- raise TypeError(
- "Cannot create distinct fields once a slice has been taken."
- )
- obj = self._chain()
- obj.query.add_distinct_fields(*field_names)
- return obj
-
- def extra(
- self,
- select=None,
- where=None,
- params=None,
- tables=None,
- order_by=None,
- select_params=None,
- ):
- """Add extra SQL fragments to the query."""
- self._not_support_combined_queries("extra")
- if self.query.is_sliced:
- raise TypeError("Cannot change a query once a slice has been taken.")
- clone = self._chain()
- clone.query.add_extra(select, select_params, where, params, tables, order_by)
- return clone
-
- def reverse(self):
- """Reverse the ordering of the QuerySet."""
- if self.query.is_sliced:
- raise TypeError("Cannot reverse a query once a slice has been taken.")
- clone = self._chain()
- clone.query.standard_ordering = not clone.query.standard_ordering
- return clone
-
- def defer(self, *fields):
- """
- Defer the loading of data for certain fields until they are accessed.
- Add the set of deferred fields to any existing set of deferred fields.
- The only exception to this is if None is passed in as the only
- parameter, in which case removal all deferrals.
- """
- self._not_support_combined_queries("defer")
- if self._fields is not None:
- raise TypeError("Cannot call defer() after .values() or .values_list()")
- clone = self._chain()
- if fields == (None,):
- clone.query.clear_deferred_loading()
- else:
- clone.query.add_deferred_loading(fields)
- return clone
-
- def only(self, *fields):
- """
- Essentially, the opposite of defer(). Only the fields passed into this
- method and that are not already specified as deferred are loaded
- immediately when the queryset is evaluated.
- """
- self._not_support_combined_queries("only")
- if self._fields is not None:
- raise TypeError("Cannot call only() after .values() or .values_list()")
- if fields == (None,):
- # Can only pass None to defer(), not only(), as the rest option.
- # That won't stop people trying to do this, so let's be explicit.
- raise TypeError("Cannot pass None as an argument to only().")
- for field in fields:
- field = field.split(LOOKUP_SEP, 1)[0]
- if field in self.query._filtered_relations:
- raise ValueError("only() is not supported with FilteredRelation.")
- clone = self._chain()
- clone.query.add_immediate_loading(fields)
- return clone
-
- def using(self, alias):
- """Select which database this QuerySet should execute against."""
- clone = self._chain()
- clone._db = alias
- return clone
-
- ###################################
- # PUBLIC INTROSPECTION ATTRIBUTES #
- ###################################
-
- @property
- def ordered(self):
- """
- Return True if the QuerySet is ordered -- i.e. has an order_by()
- clause or a default ordering on the model (or is empty).
- """
- if isinstance(self, EmptyQuerySet):
- return True
- if self.query.extra_order_by or self.query.order_by:
- return True
- elif (
- self.query.default_ordering
- and self.query.get_meta().ordering
- and
- # A default ordering doesn't affect GROUP BY queries.
- not self.query.group_by
- ):
- return True
- else:
- return False
-
- @property
- def db(self):
- """Return the database used if this query is executed now."""
- if self._for_write:
- return self._db or router.db_for_write(self.model, **self._hints)
- return self._db or router.db_for_read(self.model, **self._hints)
-
- ###################
- # PRIVATE METHODS #
- ###################
-
- def _insert(
- self,
- objs,
- fields,
- returning_fields=None,
- raw=False,
- using=None,
- on_conflict=None,
- update_fields=None,
- unique_fields=None,
- ):
- """
- Insert a new record for the given model. This provides an interface to
- the InsertQuery class and is how Model.save() is implemented.
- """
- self._for_write = True
- if using is None:
- using = self.db
- query = sql.InsertQuery(
- self.model,
- on_conflict=on_conflict,
- update_fields=update_fields,
- unique_fields=unique_fields,
- )
- query.insert_values(fields, objs, raw=raw)
- return query.get_compiler(using=using).execute_sql(returning_fields)
-
- _insert.alters_data = True
- _insert.queryset_only = False
-
- def _batched_insert(
- self,
- objs,
- fields,
- batch_size,
- on_conflict=None,
- update_fields=None,
- unique_fields=None,
- ):
- """
- Helper method for bulk_create() to insert objs one batch at a time.
- """
- connection = connections[self.db]
- ops = connection.ops
- max_batch_size = max(ops.bulk_batch_size(fields, objs), 1)
- batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
- inserted_rows = []
- bulk_return = connection.features.can_return_rows_from_bulk_insert
- for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]:
- if bulk_return and on_conflict is None:
- inserted_rows.extend(
- self._insert(
- item,
- fields=fields,
- using=self.db,
- returning_fields=self.model._meta.db_returning_fields,
- )
- )
- else:
- self._insert(
- item,
- fields=fields,
- using=self.db,
- on_conflict=on_conflict,
- update_fields=update_fields,
- unique_fields=unique_fields,
- )
- return inserted_rows
-
- def _chain(self):
- """
- Return a copy of the current QuerySet that's ready for another
- operation.
- """
- obj = self._clone()
- if obj._sticky_filter:
- obj.query.filter_is_sticky = True
- obj._sticky_filter = False
- return obj
-
- def _clone(self):
- """
- Return a copy of the current QuerySet. A lightweight alternative
- to deepcopy().
- """
- c = self.__class__(
- model=self.model,
- query=self.query.chain(),
- using=self._db,
- hints=self._hints,
- )
- c._sticky_filter = self._sticky_filter
- c._for_write = self._for_write
- c._prefetch_related_lookups = self._prefetch_related_lookups[:]
- c._known_related_objects = self._known_related_objects
- c._iterable_class = self._iterable_class
- c._fields = self._fields
- return c
-
- def _fetch_all(self):
- if self._result_cache is None:
- self._result_cache = list(self._iterable_class(self))
- if self._prefetch_related_lookups and not self._prefetch_done:
- self._prefetch_related_objects()
-
- def _next_is_sticky(self):
- """
- Indicate that the next filter call and the one following that should
- be treated as a single filter. This is only important when it comes to
- determining when to reuse tables for many-to-many filters. Required so
- that we can filter naturally on the results of related managers.
-
- This doesn't return a clone of the current QuerySet (it returns
- "self"). The method is only used internally and should be immediately
- followed by a filter() that does create a clone.
- """
- self._sticky_filter = True
- return self
-
- def _merge_sanity_check(self, other):
- """Check that two QuerySet classes may be merged."""
- if self._fields is not None and (
- set(self.query.values_select) != set(other.query.values_select)
- or set(self.query.extra_select) != set(other.query.extra_select)
- or set(self.query.annotation_select) != set(other.query.annotation_select)
- ):
- raise TypeError(
- "Merging '%s' classes must involve the same values in each case."
- % self.__class__.__name__
- )
-
- def _merge_known_related_objects(self, other):
- """
- Keep track of all known related objects from either QuerySet instance.
- """
- for field, objects in other._known_related_objects.items():
- self._known_related_objects.setdefault(field, {}).update(objects)
-
- def resolve_expression(self, *args, **kwargs):
- if self._fields and len(self._fields) > 1:
- # values() queryset can only be used as nested queries
- # if they are set up to select only a single field.
- raise TypeError("Cannot use multi-field values as a filter value.")
- query = self.query.resolve_expression(*args, **kwargs)
- query._db = self._db
- return query
-
- resolve_expression.queryset_only = True
-
- def _add_hints(self, **hints):
- """
- Update hinting information for use by routers. Add new key/values or
- overwrite existing key/values.
- """
- self._hints.update(hints)
-
- def _has_filters(self):
- """
- Check if this QuerySet has any filtering going on. This isn't
- equivalent with checking if all objects are present in results, for
- example, qs[1:]._has_filters() -> False.
- """
- return self.query.has_filters()
-
- @staticmethod
- def _validate_values_are_expressions(values, method_name):
- invalid_args = sorted(
- str(arg) for arg in values if not hasattr(arg, "resolve_expression")
- )
- if invalid_args:
- raise TypeError(
- "QuerySet.%s() received non-expression(s): %s."
- % (
- method_name,
- ", ".join(invalid_args),
- )
- )
-
- def _not_support_combined_queries(self, operation_name):
- if self.query.combinator:
- raise NotSupportedError(
- "Calling QuerySet.%s() after %s() is not supported."
- % (operation_name, self.query.combinator)
- )
-
- def _check_operator_queryset(self, other, operator_):
- if self.query.combinator or other.query.combinator:
- raise TypeError(f"Cannot use {operator_} operator with combined queryset.")
-
-
- class InstanceCheckMeta(type):
- def __instancecheck__(self, instance):
- return isinstance(instance, QuerySet) and instance.query.is_empty()
-
-
- class EmptyQuerySet(metaclass=InstanceCheckMeta):
- """
- Marker class to checking if a queryset is empty by .none():
- isinstance(qs.none(), EmptyQuerySet) -> True
- """
-
- def __init__(self, *args, **kwargs):
- raise TypeError("EmptyQuerySet can't be instantiated")
-
-
- class RawQuerySet:
- """
- Provide an iterator which converts the results of raw SQL queries into
- annotated model instances.
- """
-
- def __init__(
- self,
- raw_query,
- model=None,
- query=None,
- params=(),
- translations=None,
- using=None,
- hints=None,
- ):
- self.raw_query = raw_query
- self.model = model
- self._db = using
- self._hints = hints or {}
- self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
- self.params = params
- self.translations = translations or {}
- self._result_cache = None
- self._prefetch_related_lookups = ()
- self._prefetch_done = False
-
- def resolve_model_init_order(self):
- """Resolve the init field names and value positions."""
- converter = connections[self.db].introspection.identifier_converter
- model_init_fields = [
- f for f in self.model._meta.fields if converter(f.column) in self.columns
- ]
- annotation_fields = [
- (column, pos)
- for pos, column in enumerate(self.columns)
- if column not in self.model_fields
- ]
- model_init_order = [
- self.columns.index(converter(f.column)) for f in model_init_fields
- ]
- model_init_names = [f.attname for f in model_init_fields]
- return model_init_names, model_init_order, annotation_fields
-
- def prefetch_related(self, *lookups):
- """Same as QuerySet.prefetch_related()"""
- clone = self._clone()
- if lookups == (None,):
- clone._prefetch_related_lookups = ()
- else:
- clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
- return clone
-
- def _prefetch_related_objects(self):
- prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
- self._prefetch_done = True
-
- def _clone(self):
- """Same as QuerySet._clone()"""
- c = self.__class__(
- self.raw_query,
- model=self.model,
- query=self.query,
- params=self.params,
- translations=self.translations,
- using=self._db,
- hints=self._hints,
- )
- c._prefetch_related_lookups = self._prefetch_related_lookups[:]
- return c
-
- def _fetch_all(self):
- if self._result_cache is None:
- self._result_cache = list(self.iterator())
- if self._prefetch_related_lookups and not self._prefetch_done:
- self._prefetch_related_objects()
-
- def __len__(self):
- self._fetch_all()
- return len(self._result_cache)
-
- def __bool__(self):
- self._fetch_all()
- return bool(self._result_cache)
-
- def __iter__(self):
- self._fetch_all()
- return iter(self._result_cache)
-
- def __aiter__(self):
- # Remember, __aiter__ itself is synchronous, it's the thing it returns
- # that is async!
- async def generator():
- await sync_to_async(self._fetch_all)()
- for item in self._result_cache:
- yield item
-
- return generator()
-
- def iterator(self):
- yield from RawModelIterable(self)
-
- def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self.query)
-
- def __getitem__(self, k):
- return list(self)[k]
-
- @property
- def db(self):
- """Return the database used if this query is executed now."""
- return self._db or router.db_for_read(self.model, **self._hints)
-
- def using(self, alias):
- """Select the database this RawQuerySet should execute against."""
- return RawQuerySet(
- self.raw_query,
- model=self.model,
- query=self.query.chain(using=alias),
- params=self.params,
- translations=self.translations,
- using=alias,
- )
-
- @cached_property
- def columns(self):
- """
- A list of model field names in the order they'll appear in the
- query results.
- """
- columns = self.query.get_columns()
- # Adjust any column names which don't match field names
- for query_name, model_name in self.translations.items():
- # Ignore translations for nonexistent column names
- try:
- index = columns.index(query_name)
- except ValueError:
- pass
- else:
- columns[index] = model_name
- return columns
-
- @cached_property
- def model_fields(self):
- """A dict mapping column names to model field names."""
- converter = connections[self.db].introspection.identifier_converter
- model_fields = {}
- for field in self.model._meta.fields:
- name, column = field.get_attname_column()
- model_fields[converter(column)] = field
- return model_fields
-
-
- class Prefetch:
- def __init__(self, lookup, queryset=None, to_attr=None):
- # `prefetch_through` is the path we traverse to perform the prefetch.
- self.prefetch_through = lookup
- # `prefetch_to` is the path to the attribute that stores the result.
- self.prefetch_to = lookup
- if queryset is not None and (
- isinstance(queryset, RawQuerySet)
- or (
- hasattr(queryset, "_iterable_class")
- and not issubclass(queryset._iterable_class, ModelIterable)
- )
- ):
- raise ValueError(
- "Prefetch querysets cannot use raw(), values(), and values_list()."
- )
- if to_attr:
- self.prefetch_to = LOOKUP_SEP.join(
- lookup.split(LOOKUP_SEP)[:-1] + [to_attr]
- )
-
- self.queryset = queryset
- self.to_attr = to_attr
-
- def __getstate__(self):
- obj_dict = self.__dict__.copy()
- if self.queryset is not None:
- queryset = self.queryset._chain()
- # Prevent the QuerySet from being evaluated
- queryset._result_cache = []
- queryset._prefetch_done = True
- obj_dict["queryset"] = queryset
- return obj_dict
-
- def add_prefix(self, prefix):
- self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
- self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
-
- def get_current_prefetch_to(self, level):
- return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1])
-
- def get_current_to_attr(self, level):
- parts = self.prefetch_to.split(LOOKUP_SEP)
- to_attr = parts[level]
- as_attr = self.to_attr and level == len(parts) - 1
- return to_attr, as_attr
-
- def get_current_queryset(self, level):
- if self.get_current_prefetch_to(level) == self.prefetch_to:
- return self.queryset
- return None
-
- def __eq__(self, other):
- if not isinstance(other, Prefetch):
- return NotImplemented
- return self.prefetch_to == other.prefetch_to
-
- def __hash__(self):
- return hash((self.__class__, self.prefetch_to))
-
-
- def normalize_prefetch_lookups(lookups, prefix=None):
- """Normalize lookups into Prefetch objects."""
- ret = []
- for lookup in lookups:
- if not isinstance(lookup, Prefetch):
- lookup = Prefetch(lookup)
- if prefix:
- lookup.add_prefix(prefix)
- ret.append(lookup)
- return ret
-
-
- def prefetch_related_objects(model_instances, *related_lookups):
- """
- Populate prefetched object caches for a list of model instances based on
- the lookups/Prefetch instances given.
- """
- if not model_instances:
- return # nothing to do
-
- # We need to be able to dynamically add to the list of prefetch_related
- # lookups that we look up (see below). So we need some book keeping to
- # ensure we don't do duplicate work.
- done_queries = {} # dictionary of things like 'foo__bar': [results]
-
- auto_lookups = set() # we add to this as we go through.
- followed_descriptors = set() # recursion protection
-
- all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
- while all_lookups:
- lookup = all_lookups.pop()
- if lookup.prefetch_to in done_queries:
- if lookup.queryset is not None:
- raise ValueError(
- "'%s' lookup was already seen with a different queryset. "
- "You may need to adjust the ordering of your lookups."
- % lookup.prefetch_to
- )
-
- continue
-
- # Top level, the list of objects to decorate is the result cache
- # from the primary QuerySet. It won't be for deeper levels.
- obj_list = model_instances
-
- through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
- for level, through_attr in enumerate(through_attrs):
- # Prepare main instances
- if not obj_list:
- break
-
- prefetch_to = lookup.get_current_prefetch_to(level)
- if prefetch_to in done_queries:
- # Skip any prefetching, and any object preparation
- obj_list = done_queries[prefetch_to]
- continue
-
- # Prepare objects:
- good_objects = True
- for obj in obj_list:
- # Since prefetching can re-use instances, it is possible to have
- # the same instance multiple times in obj_list, so obj might
- # already be prepared.
- if not hasattr(obj, "_prefetched_objects_cache"):
- try:
- obj._prefetched_objects_cache = {}
- except (AttributeError, TypeError):
- # Must be an immutable object from
- # values_list(flat=True), for example (TypeError) or
- # a QuerySet subclass that isn't returning Model
- # instances (AttributeError), either in Django or a 3rd
- # party. prefetch_related() doesn't make sense, so quit.
- good_objects = False
- break
- if not good_objects:
- break
-
- # Descend down tree
-
- # We assume that objects retrieved are homogeneous (which is the premise
- # of prefetch_related), so what applies to first object applies to all.
- first_obj = obj_list[0]
- to_attr = lookup.get_current_to_attr(level)[0]
- prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(
- first_obj, through_attr, to_attr
- )
-
- if not attr_found:
- raise AttributeError(
- "Cannot find '%s' on %s object, '%s' is an invalid "
- "parameter to prefetch_related()"
- % (
- through_attr,
- first_obj.__class__.__name__,
- lookup.prefetch_through,
- )
- )
-
- if level == len(through_attrs) - 1 and prefetcher is None:
- # Last one, this *must* resolve to something that supports
- # prefetching, otherwise there is no point adding it and the
- # developer asking for it has made a mistake.
- raise ValueError(
- "'%s' does not resolve to an item that supports "
- "prefetching - this is an invalid parameter to "
- "prefetch_related()." % lookup.prefetch_through
- )
-
- obj_to_fetch = None
- if prefetcher is not None:
- obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)]
-
- if obj_to_fetch:
- obj_list, additional_lookups = prefetch_one_level(
- obj_to_fetch,
- prefetcher,
- lookup,
- level,
- )
- # We need to ensure we don't keep adding lookups from the
- # same relationships to stop infinite recursion. So, if we
- # are already on an automatically added lookup, don't add
- # the new lookups from relationships we've seen already.
- if not (
- prefetch_to in done_queries
- and lookup in auto_lookups
- and descriptor in followed_descriptors
- ):
- done_queries[prefetch_to] = obj_list
- new_lookups = normalize_prefetch_lookups(
- reversed(additional_lookups), prefetch_to
- )
- auto_lookups.update(new_lookups)
- all_lookups.extend(new_lookups)
- followed_descriptors.add(descriptor)
- else:
- # Either a singly related object that has already been fetched
- # (e.g. via select_related), or hopefully some other property
- # that doesn't support prefetching but needs to be traversed.
-
- # We replace the current list of parent objects with the list
- # of related objects, filtering out empty or missing values so
- # that we can continue with nullable or reverse relations.
- new_obj_list = []
- for obj in obj_list:
- if through_attr in getattr(obj, "_prefetched_objects_cache", ()):
- # If related objects have been prefetched, use the
- # cache rather than the object's through_attr.
- new_obj = list(obj._prefetched_objects_cache.get(through_attr))
- else:
- try:
- new_obj = getattr(obj, through_attr)
- except exceptions.ObjectDoesNotExist:
- continue
- if new_obj is None:
- continue
- # We special-case `list` rather than something more generic
- # like `Iterable` because we don't want to accidentally match
- # user models that define __iter__.
- if isinstance(new_obj, list):
- new_obj_list.extend(new_obj)
- else:
- new_obj_list.append(new_obj)
- obj_list = new_obj_list
-
-
- def get_prefetcher(instance, through_attr, to_attr):
- """
- For the attribute 'through_attr' on the given instance, find
- an object that has a get_prefetch_queryset().
- Return a 4 tuple containing:
- (the object with get_prefetch_queryset (or None),
- the descriptor object representing this relationship (or None),
- a boolean that is False if the attribute was not found at all,
- a function that takes an instance and returns a boolean that is True if
- the attribute has already been fetched for that instance)
- """
-
- def has_to_attr_attribute(instance):
- return hasattr(instance, to_attr)
-
- prefetcher = None
- is_fetched = has_to_attr_attribute
-
- # For singly related objects, we have to avoid getting the attribute
- # from the object, as this will trigger the query. So we first try
- # on the class, in order to get the descriptor object.
- rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
- if rel_obj_descriptor is None:
- attr_found = hasattr(instance, through_attr)
- else:
- attr_found = True
- if rel_obj_descriptor:
- # singly related object, descriptor object has the
- # get_prefetch_queryset() method.
- if hasattr(rel_obj_descriptor, "get_prefetch_queryset"):
- prefetcher = rel_obj_descriptor
- is_fetched = rel_obj_descriptor.is_cached
- else:
- # descriptor doesn't support prefetching, so we go ahead and get
- # the attribute on the instance rather than the class to
- # support many related managers
- rel_obj = getattr(instance, through_attr)
- if hasattr(rel_obj, "get_prefetch_queryset"):
- prefetcher = rel_obj
- if through_attr != to_attr:
- # Special case cached_property instances because hasattr
- # triggers attribute computation and assignment.
- if isinstance(
- getattr(instance.__class__, to_attr, None), cached_property
- ):
-
- def has_cached_property(instance):
- return to_attr in instance.__dict__
-
- is_fetched = has_cached_property
- else:
-
- def in_prefetched_cache(instance):
- return through_attr in instance._prefetched_objects_cache
-
- is_fetched = in_prefetched_cache
- return prefetcher, rel_obj_descriptor, attr_found, is_fetched
-
-
- def prefetch_one_level(instances, prefetcher, lookup, level):
- """
- Helper function for prefetch_related_objects().
-
- Run prefetches on all instances using the prefetcher object,
- assigning results to relevant caches in instance.
-
- Return the prefetched objects along with any additional prefetches that
- must be done due to prefetch_related lookups found from default managers.
- """
- # prefetcher must have a method get_prefetch_queryset() which takes a list
- # of instances, and returns a tuple:
-
- # (queryset of instances of self.model that are related to passed in instances,
- # callable that gets value to be matched for returned instances,
- # callable that gets value to be matched for passed in instances,
- # boolean that is True for singly related objects,
- # cache or field name to assign to,
- # boolean that is True when the previous argument is a cache name vs a field name).
-
- # The 'values to be matched' must be hashable as they will be used
- # in a dictionary.
-
- (
- rel_qs,
- rel_obj_attr,
- instance_attr,
- single,
- cache_name,
- is_descriptor,
- ) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))
- # We have to handle the possibility that the QuerySet we just got back
- # contains some prefetch_related lookups. We don't want to trigger the
- # prefetch_related functionality by evaluating the query. Rather, we need
- # to merge in the prefetch_related lookups.
- # Copy the lookups in case it is a Prefetch object which could be reused
- # later (happens in nested prefetch_related).
- additional_lookups = [
- copy.copy(additional_lookup)
- for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ())
- ]
- if additional_lookups:
- # Don't need to clone because the manager should have given us a fresh
- # instance, so we access an internal instead of using public interface
- # for performance reasons.
- rel_qs._prefetch_related_lookups = ()
-
- all_related_objects = list(rel_qs)
-
- rel_obj_cache = {}
- for rel_obj in all_related_objects:
- rel_attr_val = rel_obj_attr(rel_obj)
- rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
-
- to_attr, as_attr = lookup.get_current_to_attr(level)
- # Make sure `to_attr` does not conflict with a field.
- if as_attr and instances:
- # We assume that objects retrieved are homogeneous (which is the premise
- # of prefetch_related), so what applies to first object applies to all.
- model = instances[0].__class__
- try:
- model._meta.get_field(to_attr)
- except exceptions.FieldDoesNotExist:
- pass
- else:
- msg = "to_attr={} conflicts with a field on the {} model."
- raise ValueError(msg.format(to_attr, model.__name__))
-
- # Whether or not we're prefetching the last part of the lookup.
- leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
-
- for obj in instances:
- instance_attr_val = instance_attr(obj)
- vals = rel_obj_cache.get(instance_attr_val, [])
-
- if single:
- val = vals[0] if vals else None
- if as_attr:
- # A to_attr has been given for the prefetch.
- setattr(obj, to_attr, val)
- elif is_descriptor:
- # cache_name points to a field name in obj.
- # This field is a descriptor for a related object.
- setattr(obj, cache_name, val)
- else:
- # No to_attr has been given for this prefetch operation and the
- # cache_name does not point to a descriptor. Store the value of
- # the field in the object's field cache.
- obj._state.fields_cache[cache_name] = val
- else:
- if as_attr:
- setattr(obj, to_attr, vals)
- else:
- manager = getattr(obj, to_attr)
- if leaf and lookup.queryset is not None:
- qs = manager._apply_rel_filters(lookup.queryset)
- else:
- qs = manager.get_queryset()
- qs._result_cache = vals
- # We don't want the individual qs doing prefetch_related now,
- # since we have merged this into the current work.
- qs._prefetch_done = True
- obj._prefetched_objects_cache[cache_name] = qs
- return all_related_objects, additional_lookups
-
-
- class RelatedPopulator:
- """
- RelatedPopulator is used for select_related() object instantiation.
-
- The idea is that each select_related() model will be populated by a
- different RelatedPopulator instance. The RelatedPopulator instances get
- klass_info and select (computed in SQLCompiler) plus the used db as
- input for initialization. That data is used to compute which columns
- to use, how to instantiate the model, and how to populate the links
- between the objects.
-
- The actual creation of the objects is done in populate() method. This
- method gets row and from_obj as input and populates the select_related()
- model instance.
- """
-
- def __init__(self, klass_info, select, db):
- self.db = db
- # Pre-compute needed attributes. The attributes are:
- # - model_cls: the possibly deferred model class to instantiate
- # - either:
- # - cols_start, cols_end: usually the columns in the row are
- # in the same order model_cls.__init__ expects them, so we
- # can instantiate by model_cls(*row[cols_start:cols_end])
- # - reorder_for_init: When select_related descends to a child
- # class, then we want to reuse the already selected parent
- # data. However, in this case the parent data isn't necessarily
- # in the same order that Model.__init__ expects it to be, so
- # we have to reorder the parent data. The reorder_for_init
- # attribute contains a function used to reorder the field data
- # in the order __init__ expects it.
- # - pk_idx: the index of the primary key field in the reordered
- # model data. Used to check if a related object exists at all.
- # - init_list: the field attnames fetched from the database. For
- # deferred models this isn't the same as all attnames of the
- # model's fields.
- # - related_populators: a list of RelatedPopulator instances if
- # select_related() descends to related models from this model.
- # - local_setter, remote_setter: Methods to set cached values on
- # the object being populated and on the remote object. Usually
- # these are Field.set_cached_value() methods.
- select_fields = klass_info["select_fields"]
- from_parent = klass_info["from_parent"]
- if not from_parent:
- self.cols_start = select_fields[0]
- self.cols_end = select_fields[-1] + 1
- self.init_list = [
- f[0].target.attname for f in select[self.cols_start : self.cols_end]
- ]
- self.reorder_for_init = None
- else:
- attname_indexes = {
- select[idx][0].target.attname: idx for idx in select_fields
- }
- model_init_attnames = (
- f.attname for f in klass_info["model"]._meta.concrete_fields
- )
- self.init_list = [
- attname for attname in model_init_attnames if attname in attname_indexes
- ]
- self.reorder_for_init = operator.itemgetter(
- *[attname_indexes[attname] for attname in self.init_list]
- )
-
- self.model_cls = klass_info["model"]
- self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
- self.related_populators = get_related_populators(klass_info, select, self.db)
- self.local_setter = klass_info["local_setter"]
- self.remote_setter = klass_info["remote_setter"]
-
- def populate(self, row, from_obj):
- if self.reorder_for_init:
- obj_data = self.reorder_for_init(row)
- else:
- obj_data = row[self.cols_start : self.cols_end]
- if obj_data[self.pk_idx] is None:
- obj = None
- else:
- obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
- for rel_iter in self.related_populators:
- rel_iter.populate(row, obj)
- self.local_setter(from_obj, obj)
- if obj is not None:
- self.remote_setter(obj, from_obj)
-
-
- def get_related_populators(klass_info, select, db):
- iterators = []
- related_klass_infos = klass_info.get("related_klass_infos", [])
- for rel_klass_info in related_klass_infos:
- rel_cls = RelatedPopulator(rel_klass_info, select, db)
- iterators.append(rel_cls)
- return iterators
|