PostgreSQL Source Code git master
catcache.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * catcache.c
4 * System catalog cache for tuples matching a key.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/utils/cache/catcache.c
12 *
13 *-------------------------------------------------------------------------
14 */
15#include "postgres.h"
16
17#include "access/genam.h"
18#include "access/heaptoast.h"
19#include "access/relscan.h"
20#include "access/table.h"
21#include "access/xact.h"
22#include "catalog/catalog.h"
24#include "catalog/pg_type.h"
25#include "common/hashfn.h"
26#include "common/pg_prng.h"
27#include "miscadmin.h"
28#include "port/pg_bitutils.h"
29#ifdef CATCACHE_STATS
30#include "storage/ipc.h" /* for on_proc_exit */
31#endif
32#include "storage/lmgr.h"
33#include "utils/builtins.h"
34#include "utils/catcache.h"
35#include "utils/datum.h"
36#include "utils/fmgroids.h"
38#include "utils/inval.h"
39#include "utils/memutils.h"
40#include "utils/rel.h"
41#include "utils/resowner.h"
42#include "utils/syscache.h"
43
44/*
45 * If a catcache invalidation is processed while we are in the middle of
46 * creating a catcache entry (or list), it might apply to the entry we're
47 * creating, making it invalid before it's been inserted to the catcache. To
48 * catch such cases, we have a stack of "create-in-progress" entries. Cache
49 * invalidation marks any matching entries in the stack as dead, in addition
50 * to the actual CatCTup and CatCList entries.
51 */
52typedef struct CatCInProgress
53{
54 CatCache *cache; /* cache that the entry belongs to */
55 uint32 hash_value; /* hash of the entry; ignored for lists */
56 bool list; /* is it a list entry? */
57 bool dead; /* set when the entry is invalidated */
60
62
63 /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
64
65/*
66 * Given a hash value and the size of the hash table, find the bucket
67 * in which the hash value belongs. Since the hash table must contain
68 * a power-of-2 number of elements, this is a simple bitmask.
69 */
70#define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
71
72
73/*
74 * variables, macros and other stuff
75 */
76
77#ifdef CACHEDEBUG
78#define CACHE_elog(...) elog(__VA_ARGS__)
79#else
80#define CACHE_elog(...)
81#endif
82
83/* Cache management header --- pointer is NULL until created */
84static CatCacheHeader *CacheHdr = NULL;
85
87 int nkeys,
88 Datum v1, Datum v2,
89 Datum v3, Datum v4);
90
92 int nkeys,
93 uint32 hashValue,
94 Index hashIndex,
95 Datum v1, Datum v2,
96 Datum v3, Datum v4);
97
99 Datum v1, Datum v2, Datum v3, Datum v4);
101 HeapTuple tuple);
102static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
103 const Datum *cachekeys,
104 const Datum *searchkeys);
105
106#ifdef CATCACHE_STATS
107static void CatCachePrintStats(int code, Datum arg);
108#endif
109static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
110static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
111static void RehashCatCache(CatCache *cp);
112static void RehashCatCacheLists(CatCache *cp);
116 uint32 hashValue, Index hashIndex);
117
118static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner);
120static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
121 Datum *keys);
122static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
123 Datum *srckeys, Datum *dstkeys);
124
125
126/*
127 * internal support functions
128 */
129
130/* ResourceOwner callbacks to hold catcache references */
131
132static void ResOwnerReleaseCatCache(Datum res);
133static char *ResOwnerPrintCatCache(Datum res);
134static void ResOwnerReleaseCatCacheList(Datum res);
135static char *ResOwnerPrintCatCacheList(Datum res);
136
138{
139 /* catcache references */
140 .name = "catcache reference",
141 .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
142 .release_priority = RELEASE_PRIO_CATCACHE_REFS,
143 .ReleaseResource = ResOwnerReleaseCatCache,
144 .DebugPrint = ResOwnerPrintCatCache
145};
146
148{
149 /* catcache-list pins */
150 .name = "catcache list reference",
151 .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
152 .release_priority = RELEASE_PRIO_CATCACHE_LIST_REFS,
153 .ReleaseResource = ResOwnerReleaseCatCacheList,
154 .DebugPrint = ResOwnerPrintCatCacheList
155};
156
157/* Convenience wrappers over ResourceOwnerRemember/Forget */
158static inline void
160{
162}
163static inline void
165{
167}
168static inline void
170{
172}
173static inline void
175{
177}
178
179
180/*
181 * Hash and equality functions for system types that are used as cache key
182 * fields. In some cases, we just call the regular SQL-callable functions for
183 * the appropriate data type, but that tends to be a little slow, and the
184 * speed of these functions is performance-critical. Therefore, for data
185 * types that frequently occur as catcache keys, we hard-code the logic here.
186 * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
187 * in certain cases (like int4) we can adopt a faster hash algorithm as well.
188 */
189
190static bool
192{
193 return DatumGetChar(a) == DatumGetChar(b);
194}
195
196static uint32
198{
199 return murmurhash32((int32) DatumGetChar(datum));
200}
201
202static bool
204{
205 char *ca = NameStr(*DatumGetName(a));
206 char *cb = NameStr(*DatumGetName(b));
207
208 return strncmp(ca, cb, NAMEDATALEN) == 0;
209}
210
211static uint32
213{
214 char *key = NameStr(*DatumGetName(datum));
215
216 return hash_any((unsigned char *) key, strlen(key));
217}
218
219static bool
221{
222 return DatumGetInt16(a) == DatumGetInt16(b);
223}
224
225static uint32
227{
228 return murmurhash32((int32) DatumGetInt16(datum));
229}
230
231static bool
233{
234 return DatumGetInt32(a) == DatumGetInt32(b);
235}
236
237static uint32
239{
240 return murmurhash32((int32) DatumGetInt32(datum));
241}
242
243static bool
245{
246 /*
247 * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
248 * want to take the fast "deterministic" path in texteq().
249 */
250 return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
251}
252
253static uint32
255{
256 /* analogously here as in texteqfast() */
257 return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
258}
259
260static bool
262{
264}
265
266static uint32
268{
270}
271
272/* Lookup support functions for a type. */
273static void
274GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
275{
276 switch (keytype)
277 {
278 case BOOLOID:
279 *hashfunc = charhashfast;
280 *fasteqfunc = chareqfast;
281 *eqfunc = F_BOOLEQ;
282 break;
283 case CHAROID:
284 *hashfunc = charhashfast;
285 *fasteqfunc = chareqfast;
286 *eqfunc = F_CHAREQ;
287 break;
288 case NAMEOID:
289 *hashfunc = namehashfast;
290 *fasteqfunc = nameeqfast;
291 *eqfunc = F_NAMEEQ;
292 break;
293 case INT2OID:
294 *hashfunc = int2hashfast;
295 *fasteqfunc = int2eqfast;
296 *eqfunc = F_INT2EQ;
297 break;
298 case INT4OID:
299 *hashfunc = int4hashfast;
300 *fasteqfunc = int4eqfast;
301 *eqfunc = F_INT4EQ;
302 break;
303 case TEXTOID:
304 *hashfunc = texthashfast;
305 *fasteqfunc = texteqfast;
306 *eqfunc = F_TEXTEQ;
307 break;
308 case OIDOID:
309 case REGPROCOID:
310 case REGPROCEDUREOID:
311 case REGOPEROID:
312 case REGOPERATOROID:
313 case REGCLASSOID:
314 case REGTYPEOID:
315 case REGCOLLATIONOID:
316 case REGCONFIGOID:
317 case REGDICTIONARYOID:
318 case REGROLEOID:
319 case REGNAMESPACEOID:
320 *hashfunc = int4hashfast;
321 *fasteqfunc = int4eqfast;
322 *eqfunc = F_OIDEQ;
323 break;
324 case OIDVECTOROID:
325 *hashfunc = oidvectorhashfast;
326 *fasteqfunc = oidvectoreqfast;
327 *eqfunc = F_OIDVECTOREQ;
328 break;
329 default:
330 elog(FATAL, "type %u not supported as catcache key", keytype);
331 *hashfunc = NULL; /* keep compiler quiet */
332
333 *eqfunc = InvalidOid;
334 break;
335 }
336}
337
338/*
339 * CatalogCacheComputeHashValue
340 *
341 * Compute the hash value associated with a given set of lookup keys
342 */
343static uint32
345 Datum v1, Datum v2, Datum v3, Datum v4)
346{
347 uint32 hashValue = 0;
348 uint32 oneHash;
349 CCHashFN *cc_hashfunc = cache->cc_hashfunc;
350
351 CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
352 cache->cc_relname, nkeys, cache);
353
354 switch (nkeys)
355 {
356 case 4:
357 oneHash = (cc_hashfunc[3]) (v4);
358 hashValue ^= pg_rotate_left32(oneHash, 24);
359 /* FALLTHROUGH */
360 case 3:
361 oneHash = (cc_hashfunc[2]) (v3);
362 hashValue ^= pg_rotate_left32(oneHash, 16);
363 /* FALLTHROUGH */
364 case 2:
365 oneHash = (cc_hashfunc[1]) (v2);
366 hashValue ^= pg_rotate_left32(oneHash, 8);
367 /* FALLTHROUGH */
368 case 1:
369 oneHash = (cc_hashfunc[0]) (v1);
370 hashValue ^= oneHash;
371 break;
372 default:
373 elog(FATAL, "wrong number of hash keys: %d", nkeys);
374 break;
375 }
376
377 return hashValue;
378}
379
380/*
381 * CatalogCacheComputeTupleHashValue
382 *
383 * Compute the hash value associated with a given tuple to be cached
384 */
385static uint32
387{
388 Datum v1 = 0,
389 v2 = 0,
390 v3 = 0,
391 v4 = 0;
392 bool isNull = false;
393 int *cc_keyno = cache->cc_keyno;
394 TupleDesc cc_tupdesc = cache->cc_tupdesc;
395
396 /* Now extract key fields from tuple, insert into scankey */
397 switch (nkeys)
398 {
399 case 4:
400 v4 = fastgetattr(tuple,
401 cc_keyno[3],
402 cc_tupdesc,
403 &isNull);
404 Assert(!isNull);
405 /* FALLTHROUGH */
406 case 3:
407 v3 = fastgetattr(tuple,
408 cc_keyno[2],
409 cc_tupdesc,
410 &isNull);
411 Assert(!isNull);
412 /* FALLTHROUGH */
413 case 2:
414 v2 = fastgetattr(tuple,
415 cc_keyno[1],
416 cc_tupdesc,
417 &isNull);
418 Assert(!isNull);
419 /* FALLTHROUGH */
420 case 1:
421 v1 = fastgetattr(tuple,
422 cc_keyno[0],
423 cc_tupdesc,
424 &isNull);
425 Assert(!isNull);
426 break;
427 default:
428 elog(FATAL, "wrong number of hash keys: %d", nkeys);
429 break;
430 }
431
432 return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
433}
434
435/*
436 * CatalogCacheCompareTuple
437 *
438 * Compare a tuple to the passed arguments.
439 */
440static inline bool
442 const Datum *cachekeys,
443 const Datum *searchkeys)
444{
445 const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
446 int i;
447
448 for (i = 0; i < nkeys; i++)
449 {
450 if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
451 return false;
452 }
453 return true;
454}
455
456
457#ifdef CATCACHE_STATS
458
459static void
460CatCachePrintStats(int code, Datum arg)
461{
462 slist_iter iter;
463 long cc_searches = 0;
464 long cc_hits = 0;
465 long cc_neg_hits = 0;
466 long cc_newloads = 0;
467 long cc_invals = 0;
468 long cc_nlists = 0;
469 long cc_lsearches = 0;
470 long cc_lhits = 0;
471
473 {
474 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
475
476 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
477 continue; /* don't print unused caches */
478 elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %d lists, %ld lsrch, %ld lhits",
481 cache->cc_ntup,
482 cache->cc_searches,
483 cache->cc_hits,
484 cache->cc_neg_hits,
485 cache->cc_hits + cache->cc_neg_hits,
486 cache->cc_newloads,
487 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
488 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
489 cache->cc_invals,
491 cache->cc_lsearches,
492 cache->cc_lhits);
493 cc_searches += cache->cc_searches;
494 cc_hits += cache->cc_hits;
495 cc_neg_hits += cache->cc_neg_hits;
496 cc_newloads += cache->cc_newloads;
497 cc_invals += cache->cc_invals;
498 cc_nlists += cache->cc_nlist;
499 cc_lsearches += cache->cc_lsearches;
500 cc_lhits += cache->cc_lhits;
501 }
502 elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lists, %ld lsrch, %ld lhits",
504 cc_searches,
505 cc_hits,
506 cc_neg_hits,
507 cc_hits + cc_neg_hits,
508 cc_newloads,
509 cc_searches - cc_hits - cc_neg_hits - cc_newloads,
510 cc_searches - cc_hits - cc_neg_hits,
511 cc_invals,
512 cc_nlists,
513 cc_lsearches,
514 cc_lhits);
515}
516#endif /* CATCACHE_STATS */
517
518
519/*
520 * CatCacheRemoveCTup
521 *
522 * Unlink and delete the given cache entry
523 *
524 * NB: if it is a member of a CatCList, the CatCList is deleted too.
525 * Both the cache entry and the list had better have zero refcount.
526 */
527static void
529{
530 Assert(ct->refcount == 0);
531 Assert(ct->my_cache == cache);
532
533 if (ct->c_list)
534 {
535 /*
536 * The cleanest way to handle this is to call CatCacheRemoveCList,
537 * which will recurse back to me, and the recursive call will do the
538 * work. Set the "dead" flag to make sure it does recurse.
539 */
540 ct->dead = true;
542 return; /* nothing left to do */
543 }
544
545 /* delink from linked list */
547
548 /*
549 * Free keys when we're dealing with a negative entry, normal entries just
550 * point into tuple, allocated together with the CatCTup.
551 */
552 if (ct->negative)
554 cache->cc_keyno, ct->keys);
555
556 pfree(ct);
557
558 --cache->cc_ntup;
559 --CacheHdr->ch_ntup;
560}
561
562/*
563 * CatCacheRemoveCList
564 *
565 * Unlink and delete the given cache list entry
566 *
567 * NB: any dead member entries that become unreferenced are deleted too.
568 */
569static void
571{
572 int i;
573
574 Assert(cl->refcount == 0);
575 Assert(cl->my_cache == cache);
576
577 /* delink from member tuples */
578 for (i = cl->n_members; --i >= 0;)
579 {
580 CatCTup *ct = cl->members[i];
581
582 Assert(ct->c_list == cl);
583 ct->c_list = NULL;
584 /* if the member is dead and now has no references, remove it */
585 if (
586#ifndef CATCACHE_FORCE_RELEASE
587 ct->dead &&
588#endif
589 ct->refcount == 0)
591 }
592
593 /* delink from linked list */
595
596 /* free associated column data */
598 cache->cc_keyno, cl->keys);
599
600 pfree(cl);
601
602 --cache->cc_nlist;
603}
604
605
606/*
607 * CatCacheInvalidate
608 *
609 * Invalidate entries in the specified cache, given a hash value.
610 *
611 * We delete cache entries that match the hash value, whether positive
612 * or negative. We don't care whether the invalidation is the result
613 * of a tuple insertion or a deletion.
614 *
615 * We used to try to match positive cache entries by TID, but that is
616 * unsafe after a VACUUM FULL on a system catalog: an inval event could
617 * be queued before VACUUM FULL, and then processed afterwards, when the
618 * target tuple that has to be invalidated has a different TID than it
619 * did when the event was created. So now we just compare hash values and
620 * accept the small risk of unnecessary invalidations due to false matches.
621 *
622 * This routine is only quasi-public: it should only be used by inval.c.
623 */
624void
626{
627 Index hashIndex;
629
630 CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
631
632 /*
633 * We don't bother to check whether the cache has finished initialization
634 * yet; if not, there will be no entries in it so no problem.
635 */
636
637 /*
638 * Invalidate *all* CatCLists in this cache; it's too hard to tell which
639 * searches might still be correct, so just zap 'em all.
640 */
641 for (int i = 0; i < cache->cc_nlbuckets; i++)
642 {
643 dlist_head *bucket = &cache->cc_lbucket[i];
644
645 dlist_foreach_modify(iter, bucket)
646 {
647 CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
648
649 if (cl->refcount > 0)
650 cl->dead = true;
651 else
653 }
654 }
655
656 /*
657 * inspect the proper hash bucket for tuple matches
658 */
659 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
660 dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
661 {
662 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
663
664 if (hashValue == ct->hash_value)
665 {
666 if (ct->refcount > 0 ||
667 (ct->c_list && ct->c_list->refcount > 0))
668 {
669 ct->dead = true;
670 /* list, if any, was marked dead above */
671 Assert(ct->c_list == NULL || ct->c_list->dead);
672 }
673 else
675 CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
676#ifdef CATCACHE_STATS
677 cache->cc_invals++;
678#endif
679 /* could be multiple matches, so keep looking! */
680 }
681 }
682
683 /* Also invalidate any entries that are being built */
684 for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
685 {
686 if (e->cache == cache)
687 {
688 if (e->list || e->hash_value == hashValue)
689 e->dead = true;
690 }
691 }
692}
693
694/* ----------------------------------------------------------------
695 * public functions
696 * ----------------------------------------------------------------
697 */
698
699
700/*
701 * Standard routine for creating cache context if it doesn't exist yet
702 *
703 * There are a lot of places (probably far more than necessary) that check
704 * whether CacheMemoryContext exists yet and want to create it if not.
705 * We centralize knowledge of exactly how to create it here.
706 */
707void
709{
710 /*
711 * Purely for paranoia, check that context doesn't exist; caller probably
712 * did so already.
713 */
716 "CacheMemoryContext",
718}
719
720
721/*
722 * ResetCatalogCache
723 *
724 * Reset one catalog cache to empty.
725 *
726 * This is not very efficient if the target cache is nearly empty.
727 * However, it shouldn't need to be efficient; we don't invoke it often.
728 *
729 * If 'debug_discard' is true, we are being called as part of
730 * debug_discard_caches. In that case, the cache is not reset for
731 * correctness, but just to get more testing of cache invalidation. We skip
732 * resetting in-progress build entries in that case, or we'd never make any
733 * progress.
734 */
735static void
736ResetCatalogCache(CatCache *cache, bool debug_discard)
737{
739 int i;
740
741 /* Remove each list in this cache, or at least mark it dead */
742 for (i = 0; i < cache->cc_nlbuckets; i++)
743 {
744 dlist_head *bucket = &cache->cc_lbucket[i];
745
746 dlist_foreach_modify(iter, bucket)
747 {
748 CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
749
750 if (cl->refcount > 0)
751 cl->dead = true;
752 else
754 }
755 }
756
757 /* Remove each tuple in this cache, or at least mark it dead */
758 for (i = 0; i < cache->cc_nbuckets; i++)
759 {
760 dlist_head *bucket = &cache->cc_bucket[i];
761
762 dlist_foreach_modify(iter, bucket)
763 {
764 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
765
766 if (ct->refcount > 0 ||
767 (ct->c_list && ct->c_list->refcount > 0))
768 {
769 ct->dead = true;
770 /* list, if any, was marked dead above */
771 Assert(ct->c_list == NULL || ct->c_list->dead);
772 }
773 else
775#ifdef CATCACHE_STATS
776 cache->cc_invals++;
777#endif
778 }
779 }
780
781 /* Also invalidate any entries that are being built */
782 if (!debug_discard)
783 {
784 for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
785 {
786 if (e->cache == cache)
787 e->dead = true;
788 }
789 }
790}
791
792/*
793 * ResetCatalogCaches
794 *
795 * Reset all caches when a shared cache inval event forces it
796 */
797void
799{
801}
802
803void
804ResetCatalogCachesExt(bool debug_discard)
805{
806 slist_iter iter;
807
808 CACHE_elog(DEBUG2, "ResetCatalogCaches called");
809
811 {
812 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
813
814 ResetCatalogCache(cache, debug_discard);
815 }
816
817 CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
818}
819
820/*
821 * CatalogCacheFlushCatalog
822 *
823 * Flush all catcache entries that came from the specified system catalog.
824 * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
825 * tuples very likely now have different TIDs than before. (At one point
826 * we also tried to force re-execution of CatalogCacheInitializeCache for
827 * the cache(s) on that catalog. This is a bad idea since it leads to all
828 * kinds of trouble if a cache flush occurs while loading cache entries.
829 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
830 * rather than relying on the relcache to keep a tupdesc for us. Of course
831 * this assumes the tupdesc of a cachable system table will not change...)
832 */
833void
835{
836 slist_iter iter;
837
838 CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
839
841 {
842 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
843
844 /* Does this cache store tuples of the target catalog? */
845 if (cache->cc_reloid == catId)
846 {
847 /* Yes, so flush all its contents */
848 ResetCatalogCache(cache, false);
849
850 /* Tell inval.c to call syscache callbacks for this cache */
852 }
853 }
854
855 CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
856}
857
858/*
859 * InitCatCache
860 *
861 * This allocates and initializes a cache for a system catalog relation.
862 * Actually, the cache is only partially initialized to avoid opening the
863 * relation. The relation will be opened and the rest of the cache
864 * structure initialized on the first access.
865 */
866#ifdef CACHEDEBUG
867#define InitCatCache_DEBUG2 \
868do { \
869 elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
870 cp->cc_reloid, cp->cc_indexoid, cp->id, \
871 cp->cc_nkeys, cp->cc_nbuckets); \
872} while(0)
873#else
874#define InitCatCache_DEBUG2
875#endif
876
877CatCache *
879 Oid reloid,
880 Oid indexoid,
881 int nkeys,
882 const int *key,
883 int nbuckets)
884{
885 CatCache *cp;
886 MemoryContext oldcxt;
887 int i;
888
889 /*
890 * nbuckets is the initial number of hash buckets to use in this catcache.
891 * It will be enlarged later if it becomes too full.
892 *
893 * nbuckets must be a power of two. We check this via Assert rather than
894 * a full runtime check because the values will be coming from constant
895 * tables.
896 *
897 * If you're confused by the power-of-two check, see comments in
898 * bitmapset.c for an explanation.
899 */
900 Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
901
902 /*
903 * first switch to the cache context so our allocations do not vanish at
904 * the end of a transaction
905 */
908
910
911 /*
912 * if first time through, initialize the cache group header
913 */
914 if (CacheHdr == NULL)
915 {
918 CacheHdr->ch_ntup = 0;
919#ifdef CATCACHE_STATS
920 /* set up to dump stats at backend exit */
921 on_proc_exit(CatCachePrintStats, 0);
922#endif
923 }
924
925 /*
926 * Allocate a new cache structure, aligning to a cacheline boundary
927 *
928 * Note: we rely on zeroing to initialize all the dlist headers correctly
929 */
932 cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
933
934 /*
935 * Many catcaches never receive any list searches. Therefore, we don't
936 * allocate the cc_lbuckets till we get a list search.
937 */
938 cp->cc_lbucket = NULL;
939
940 /*
941 * initialize the cache's relation information for the relation
942 * corresponding to this cache, and initialize some of the new cache's
943 * other internal fields. But don't open the relation yet.
944 */
945 cp->id = id;
946 cp->cc_relname = "(not known yet)";
947 cp->cc_reloid = reloid;
948 cp->cc_indexoid = indexoid;
949 cp->cc_relisshared = false; /* temporary */
950 cp->cc_tupdesc = (TupleDesc) NULL;
951 cp->cc_ntup = 0;
952 cp->cc_nlist = 0;
953 cp->cc_nbuckets = nbuckets;
954 cp->cc_nlbuckets = 0;
955 cp->cc_nkeys = nkeys;
956 for (i = 0; i < nkeys; ++i)
957 {
959 cp->cc_keyno[i] = key[i];
960 }
961
962 /*
963 * new cache is initialized as far as we can go for now. print some
964 * debugging information, if appropriate.
965 */
967
968 /*
969 * add completed cache to top of group header's list
970 */
972
973 /*
974 * back to the old context before we return...
975 */
976 MemoryContextSwitchTo(oldcxt);
977
978 return cp;
979}
980
981/*
982 * Enlarge a catcache, doubling the number of buckets.
983 */
984static void
986{
987 dlist_head *newbucket;
988 int newnbuckets;
989 int i;
990
991 elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
992 cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
993
994 /* Allocate a new, larger, hash table. */
995 newnbuckets = cp->cc_nbuckets * 2;
996 newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
997
998 /* Move all entries from old hash table to new. */
999 for (i = 0; i < cp->cc_nbuckets; i++)
1000 {
1001 dlist_mutable_iter iter;
1002
1003 dlist_foreach_modify(iter, &cp->cc_bucket[i])
1004 {
1005 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
1006 int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
1007
1008 dlist_delete(iter.cur);
1009 dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
1010 }
1011 }
1012
1013 /* Switch to the new array. */
1014 pfree(cp->cc_bucket);
1015 cp->cc_nbuckets = newnbuckets;
1016 cp->cc_bucket = newbucket;
1017}
1018
1019/*
1020 * Enlarge a catcache's list storage, doubling the number of buckets.
1021 */
1022static void
1024{
1025 dlist_head *newbucket;
1026 int newnbuckets;
1027 int i;
1028
1029 elog(DEBUG1, "rehashing catalog cache id %d for %s; %d lists, %d buckets",
1030 cp->id, cp->cc_relname, cp->cc_nlist, cp->cc_nlbuckets);
1031
1032 /* Allocate a new, larger, hash table. */
1033 newnbuckets = cp->cc_nlbuckets * 2;
1034 newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
1035
1036 /* Move all entries from old hash table to new. */
1037 for (i = 0; i < cp->cc_nlbuckets; i++)
1038 {
1039 dlist_mutable_iter iter;
1040
1041 dlist_foreach_modify(iter, &cp->cc_lbucket[i])
1042 {
1043 CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
1044 int hashIndex = HASH_INDEX(cl->hash_value, newnbuckets);
1045
1046 dlist_delete(iter.cur);
1047 dlist_push_head(&newbucket[hashIndex], &cl->cache_elem);
1048 }
1049 }
1050
1051 /* Switch to the new array. */
1052 pfree(cp->cc_lbucket);
1053 cp->cc_nlbuckets = newnbuckets;
1054 cp->cc_lbucket = newbucket;
1055}
1056
1057/*
1058 * ConditionalCatalogCacheInitializeCache
1059 *
1060 * Call CatalogCacheInitializeCache() if not yet done.
1061 */
1063static void
1065{
1066#ifdef USE_ASSERT_CHECKING
1067 /*
1068 * TypeCacheRelCallback() runs outside transactions and relies on TYPEOID
1069 * for hashing. This isn't ideal. Since lookup_type_cache() both
1070 * registers the callback and searches TYPEOID, reaching trouble likely
1071 * requires OOM at an unlucky moment.
1072 *
1073 * InvalidateAttoptCacheCallback() runs outside transactions and likewise
1074 * relies on ATTNUM. InitPostgres() initializes ATTNUM, so it's reliable.
1075 */
1076 if (!(cache->id == TYPEOID || cache->id == ATTNUM) ||
1079 else
1080 Assert(cache->cc_tupdesc != NULL);
1081#endif
1082
1083 if (unlikely(cache->cc_tupdesc == NULL))
1085}
1086
1087/*
1088 * CatalogCacheInitializeCache
1089 *
1090 * This function does final initialization of a catcache: obtain the tuple
1091 * descriptor and set up the hash and equality function links.
1092 */
1093#ifdef CACHEDEBUG
1094#define CatalogCacheInitializeCache_DEBUG1 \
1095 elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
1096 cache->cc_reloid)
1097
1098#define CatalogCacheInitializeCache_DEBUG2 \
1099do { \
1100 if (cache->cc_keyno[i] > 0) { \
1101 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
1102 i+1, cache->cc_nkeys, cache->cc_keyno[i], \
1103 TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
1104 } else { \
1105 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
1106 i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
1107 } \
1108} while(0)
1109#else
1110#define CatalogCacheInitializeCache_DEBUG1
1111#define CatalogCacheInitializeCache_DEBUG2
1112#endif
1113
1114static void
1116{
1117 Relation relation;
1118 MemoryContext oldcxt;
1119 TupleDesc tupdesc;
1120 int i;
1121
1123
1125
1126 /*
1127 * switch to the cache context so our allocations do not vanish at the end
1128 * of a transaction
1129 */
1130 Assert(CacheMemoryContext != NULL);
1131
1133
1134 /*
1135 * copy the relcache's tuple descriptor to permanent cache storage
1136 */
1137 tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
1138
1139 /*
1140 * save the relation's name and relisshared flag, too (cc_relname is used
1141 * only for debugging purposes)
1142 */
1144 cache->cc_relisshared = RelationGetForm(relation)->relisshared;
1145
1146 /*
1147 * return to the caller's memory context and close the rel
1148 */
1149 MemoryContextSwitchTo(oldcxt);
1150
1151 table_close(relation, AccessShareLock);
1152
1153 CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
1155
1156 /*
1157 * initialize cache's key information
1158 */
1159 for (i = 0; i < cache->cc_nkeys; ++i)
1160 {
1161 Oid keytype;
1162 RegProcedure eqfunc;
1163
1165
1166 if (cache->cc_keyno[i] > 0)
1167 {
1168 Form_pg_attribute attr = TupleDescAttr(tupdesc,
1169 cache->cc_keyno[i] - 1);
1170
1171 keytype = attr->atttypid;
1172 /* cache key columns should always be NOT NULL */
1173 Assert(attr->attnotnull);
1174 }
1175 else
1176 {
1177 if (cache->cc_keyno[i] < 0)
1178 elog(FATAL, "sys attributes are not supported in caches");
1179 keytype = OIDOID;
1180 }
1181
1182 GetCCHashEqFuncs(keytype,
1183 &cache->cc_hashfunc[i],
1184 &eqfunc,
1185 &cache->cc_fastequal[i]);
1186
1187 /*
1188 * Do equality-function lookup (we assume this won't need a catalog
1189 * lookup for any supported type)
1190 */
1191 fmgr_info_cxt(eqfunc,
1194
1195 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1197
1198 /* Fill in sk_strategy as well --- always standard equality */
1201 /* If a catcache key requires a collation, it must be C collation */
1202 cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1203
1204 CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1205 cache->cc_relname, i, cache);
1206 }
1207
1208 /*
1209 * mark this cache fully initialized
1210 */
1211 cache->cc_tupdesc = tupdesc;
1212}
1213
1214/*
1215 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1216 *
1217 * One reason to call this routine is to ensure that the relcache has
1218 * created entries for all the catalogs and indexes referenced by catcaches.
1219 * Therefore, provide an option to open the index as well as fixing the
1220 * cache itself. An exception is the indexes on pg_am, which we don't use
1221 * (cf. IndexScanOK).
1222 */
1223void
1225{
1227
1228 if (touch_index &&
1229 cache->id != AMOID &&
1230 cache->id != AMNAME)
1231 {
1232 Relation idesc;
1233
1234 /*
1235 * We must lock the underlying catalog before opening the index to
1236 * avoid deadlock, since index_open could possibly result in reading
1237 * this same catalog, and if anyone else is exclusive-locking this
1238 * catalog and index they'll be doing it in that order.
1239 */
1242
1243 /*
1244 * While we've got the index open, let's check that it's unique (and
1245 * not just deferrable-unique, thank you very much). This is just to
1246 * catch thinkos in definitions of new catcaches, so we don't worry
1247 * about the pg_am indexes not getting tested.
1248 */
1249 Assert(idesc->rd_index->indisunique &&
1250 idesc->rd_index->indimmediate);
1251
1254 }
1255}
1256
1257
1258/*
1259 * IndexScanOK
1260 *
1261 * This function checks for tuples that will be fetched by
1262 * IndexSupportInitialize() during relcache initialization for
1263 * certain system indexes that support critical syscaches.
1264 * We can't use an indexscan to fetch these, else we'll get into
1265 * infinite recursion. A plain heap scan will work, however.
1266 * Once we have completed relcache initialization (signaled by
1267 * criticalRelcachesBuilt), we don't have to worry anymore.
1268 *
1269 * Similarly, during backend startup we have to be able to use the
1270 * pg_authid, pg_auth_members and pg_database syscaches for
1271 * authentication even if we don't yet have relcache entries for those
1272 * catalogs' indexes.
1273 */
1274static bool
1276{
1277 switch (cache->id)
1278 {
1279 case INDEXRELID:
1280
1281 /*
1282 * Rather than tracking exactly which indexes have to be loaded
1283 * before we can use indexscans (which changes from time to time),
1284 * just force all pg_index searches to be heap scans until we've
1285 * built the critical relcaches.
1286 */
1288 return false;
1289 break;
1290
1291 case AMOID:
1292 case AMNAME:
1293
1294 /*
1295 * Always do heap scans in pg_am, because it's so small there's
1296 * not much point in an indexscan anyway. We *must* do this when
1297 * initially building critical relcache entries, but we might as
1298 * well just always do it.
1299 */
1300 return false;
1301
1302 case AUTHNAME:
1303 case AUTHOID:
1304 case AUTHMEMMEMROLE:
1305 case DATABASEOID:
1306
1307 /*
1308 * Protect authentication lookups occurring before relcache has
1309 * collected entries for shared indexes.
1310 */
1312 return false;
1313 break;
1314
1315 default:
1316 break;
1317 }
1318
1319 /* Normal case, allow index scan */
1320 return true;
1321}
1322
1323/*
1324 * SearchCatCache
1325 *
1326 * This call searches a system cache for a tuple, opening the relation
1327 * if necessary (on the first access to a particular cache).
1328 *
1329 * The result is NULL if not found, or a pointer to a HeapTuple in
1330 * the cache. The caller must not modify the tuple, and must call
1331 * ReleaseCatCache() when done with it.
1332 *
1333 * The search key values should be expressed as Datums of the key columns'
1334 * datatype(s). (Pass zeroes for any unused parameters.) As a special
1335 * exception, the passed-in key for a NAME column can be just a C string;
1336 * the caller need not go to the trouble of converting it to a fully
1337 * null-padded NAME.
1338 */
1341 Datum v1,
1342 Datum v2,
1343 Datum v3,
1344 Datum v4)
1345{
1346 return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1347}
1348
1349
1350/*
1351 * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1352 * arguments. The compiler can inline the body and unroll loops, making them a
1353 * bit faster than SearchCatCache().
1354 */
1355
1358 Datum v1)
1359{
1360 return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1361}
1362
1363
1366 Datum v1, Datum v2)
1367{
1368 return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1369}
1370
1371
1374 Datum v1, Datum v2, Datum v3)
1375{
1376 return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1377}
1378
1379
1382 Datum v1, Datum v2, Datum v3, Datum v4)
1383{
1384 return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1385}
1386
1387/*
1388 * Work-horse for SearchCatCache/SearchCatCacheN.
1389 */
1390static inline HeapTuple
1392 int nkeys,
1393 Datum v1,
1394 Datum v2,
1395 Datum v3,
1396 Datum v4)
1397{
1399 uint32 hashValue;
1400 Index hashIndex;
1401 dlist_iter iter;
1402 dlist_head *bucket;
1403 CatCTup *ct;
1404
1405 Assert(cache->cc_nkeys == nkeys);
1406
1407 /*
1408 * one-time startup overhead for each cache
1409 */
1411
1412#ifdef CATCACHE_STATS
1413 cache->cc_searches++;
1414#endif
1415
1416 /* Initialize local parameter array */
1417 arguments[0] = v1;
1418 arguments[1] = v2;
1419 arguments[2] = v3;
1420 arguments[3] = v4;
1421
1422 /*
1423 * find the hash bucket in which to look for the tuple
1424 */
1425 hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1426 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1427
1428 /*
1429 * scan the hash bucket until we find a match or exhaust our tuples
1430 *
1431 * Note: it's okay to use dlist_foreach here, even though we modify the
1432 * dlist within the loop, because we don't continue the loop afterwards.
1433 */
1434 bucket = &cache->cc_bucket[hashIndex];
1435 dlist_foreach(iter, bucket)
1436 {
1437 ct = dlist_container(CatCTup, cache_elem, iter.cur);
1438
1439 if (ct->dead)
1440 continue; /* ignore dead entries */
1441
1442 if (ct->hash_value != hashValue)
1443 continue; /* quickly skip entry if wrong hash val */
1444
1445 if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1446 continue;
1447
1448 /*
1449 * We found a match in the cache. Move it to the front of the list
1450 * for its hashbucket, in order to speed subsequent searches. (The
1451 * most frequently accessed elements in any hashbucket will tend to be
1452 * near the front of the hashbucket's list.)
1453 */
1454 dlist_move_head(bucket, &ct->cache_elem);
1455
1456 /*
1457 * If it's a positive entry, bump its refcount and return it. If it's
1458 * negative, we can report failure to the caller.
1459 */
1460 if (!ct->negative)
1461 {
1463 ct->refcount++;
1465
1466 CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1467 cache->cc_relname, hashIndex);
1468
1469#ifdef CATCACHE_STATS
1470 cache->cc_hits++;
1471#endif
1472
1473 return &ct->tuple;
1474 }
1475 else
1476 {
1477 CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1478 cache->cc_relname, hashIndex);
1479
1480#ifdef CATCACHE_STATS
1481 cache->cc_neg_hits++;
1482#endif
1483
1484 return NULL;
1485 }
1486 }
1487
1488 return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1489}
1490
1491/*
1492 * Search the actual catalogs, rather than the cache.
1493 *
1494 * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1495 * as small as possible. To avoid that effort being undone by a helpful
1496 * compiler, try to explicitly forbid inlining.
1497 */
1500 int nkeys,
1501 uint32 hashValue,
1502 Index hashIndex,
1503 Datum v1,
1504 Datum v2,
1505 Datum v3,
1506 Datum v4)
1507{
1508 ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1509 Relation relation;
1510 SysScanDesc scandesc;
1511 HeapTuple ntp;
1512 CatCTup *ct;
1513 bool stale;
1515
1516 /* Initialize local parameter array */
1517 arguments[0] = v1;
1518 arguments[1] = v2;
1519 arguments[2] = v3;
1520 arguments[3] = v4;
1521
1522 /*
1523 * Tuple was not found in cache, so we have to try to retrieve it directly
1524 * from the relation. If found, we will add it to the cache; if not
1525 * found, we will add a negative cache entry instead.
1526 *
1527 * NOTE: it is possible for recursive cache lookups to occur while reading
1528 * the relation --- for example, due to shared-cache-inval messages being
1529 * processed during table_open(). This is OK. It's even possible for one
1530 * of those lookups to find and enter the very same tuple we are trying to
1531 * fetch here. If that happens, we will enter a second copy of the tuple
1532 * into the cache. The first copy will never be referenced again, and
1533 * will eventually age out of the cache, so there's no functional problem.
1534 * This case is rare enough that it's not worth expending extra cycles to
1535 * detect.
1536 *
1537 * Another case, which we *must* handle, is that the tuple could become
1538 * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
1539 * AcceptInvalidationMessages can run during TOAST table access). We do
1540 * not want to return already-stale catcache entries, so we loop around
1541 * and do the table scan again if that happens.
1542 */
1544
1545 /*
1546 * Ok, need to make a lookup in the relation, copy the scankey and fill
1547 * out any per-call fields.
1548 */
1549 memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1550 cur_skey[0].sk_argument = v1;
1551 cur_skey[1].sk_argument = v2;
1552 cur_skey[2].sk_argument = v3;
1553 cur_skey[3].sk_argument = v4;
1554
1555 do
1556 {
1557 scandesc = systable_beginscan(relation,
1560 NULL,
1561 nkeys,
1562 cur_skey);
1563
1564 ct = NULL;
1565 stale = false;
1566
1567 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1568 {
1569 ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1570 hashValue, hashIndex);
1571 /* upon failure, we must start the scan over */
1572 if (ct == NULL)
1573 {
1574 stale = true;
1575 break;
1576 }
1577 /* immediately set the refcount to 1 */
1579 ct->refcount++;
1581 break; /* assume only one match */
1582 }
1583
1584 systable_endscan(scandesc);
1585 } while (stale);
1586
1587 table_close(relation, AccessShareLock);
1588
1589 /*
1590 * If tuple was not found, we need to build a negative cache entry
1591 * containing a fake tuple. The fake tuple has the correct key columns,
1592 * but nulls everywhere else.
1593 *
1594 * In bootstrap mode, we don't build negative entries, because the cache
1595 * invalidation mechanism isn't alive and can't clear them if the tuple
1596 * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1597 * cache inval for that.)
1598 */
1599 if (ct == NULL)
1600 {
1602 return NULL;
1603
1605 hashValue, hashIndex);
1606
1607 /* Creating a negative cache entry shouldn't fail */
1608 Assert(ct != NULL);
1609
1610 CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1612 CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1613 cache->cc_relname, hashIndex);
1614
1615 /*
1616 * We are not returning the negative entry to the caller, so leave its
1617 * refcount zero.
1618 */
1619
1620 return NULL;
1621 }
1622
1623 CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1625 CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1626 cache->cc_relname, hashIndex);
1627
1628#ifdef CATCACHE_STATS
1629 cache->cc_newloads++;
1630#endif
1631
1632 return &ct->tuple;
1633}
1634
1635/*
1636 * ReleaseCatCache
1637 *
1638 * Decrement the reference count of a catcache entry (releasing the
1639 * hold grabbed by a successful SearchCatCache).
1640 *
1641 * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1642 * will be freed as soon as their refcount goes to zero. In combination
1643 * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1644 * to catch references to already-released catcache entries.
1645 */
1646void
1648{
1650}
1651
1652static void
1654{
1655 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1656 offsetof(CatCTup, tuple));
1657
1658 /* Safety checks to ensure we were handed a cache entry */
1659 Assert(ct->ct_magic == CT_MAGIC);
1660 Assert(ct->refcount > 0);
1661
1662 ct->refcount--;
1663 if (resowner)
1665
1666 if (
1667#ifndef CATCACHE_FORCE_RELEASE
1668 ct->dead &&
1669#endif
1670 ct->refcount == 0 &&
1671 (ct->c_list == NULL || ct->c_list->refcount == 0))
1673}
1674
1675
1676/*
1677 * GetCatCacheHashValue
1678 *
1679 * Compute the hash value for a given set of search keys.
1680 *
1681 * The reason for exposing this as part of the API is that the hash value is
1682 * exposed in cache invalidation operations, so there are places outside the
1683 * catcache code that need to be able to compute the hash values.
1684 */
1685uint32
1687 Datum v1,
1688 Datum v2,
1689 Datum v3,
1690 Datum v4)
1691{
1692 /*
1693 * one-time startup overhead for each cache
1694 */
1696
1697 /*
1698 * calculate the hash value
1699 */
1700 return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1701}
1702
1703
1704/*
1705 * SearchCatCacheList
1706 *
1707 * Generate a list of all tuples matching a partial key (that is,
1708 * a key specifying just the first K of the cache's N key columns).
1709 *
1710 * It doesn't make any sense to specify all of the cache's key columns
1711 * here: since the key is unique, there could be at most one match, so
1712 * you ought to use SearchCatCache() instead. Hence this function takes
1713 * one fewer Datum argument than SearchCatCache() does.
1714 *
1715 * The caller must not modify the list object or the pointed-to tuples,
1716 * and must call ReleaseCatCacheList() when done with the list.
1717 */
1718CatCList *
1720 int nkeys,
1721 Datum v1,
1722 Datum v2,
1723 Datum v3)
1724{
1725 Datum v4 = 0; /* dummy last-column value */
1727 uint32 lHashValue;
1728 Index lHashIndex;
1729 dlist_iter iter;
1730 dlist_head *lbucket;
1731 CatCList *cl;
1732 CatCTup *ct;
1733 List *volatile ctlist;
1734 ListCell *ctlist_item;
1735 int nmembers;
1736 bool ordered;
1737 HeapTuple ntp;
1738 MemoryContext oldcxt;
1739 int i;
1740 CatCInProgress *save_in_progress;
1741 CatCInProgress in_progress_ent;
1742
1743 /*
1744 * one-time startup overhead for each cache
1745 */
1747
1748 Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1749
1750#ifdef CATCACHE_STATS
1751 cache->cc_lsearches++;
1752#endif
1753
1754 /* Initialize local parameter array */
1755 arguments[0] = v1;
1756 arguments[1] = v2;
1757 arguments[2] = v3;
1758 arguments[3] = v4;
1759
1760 /*
1761 * If we haven't previously done a list search in this cache, create the
1762 * bucket header array; otherwise, consider whether it's time to enlarge
1763 * it.
1764 */
1765 if (cache->cc_lbucket == NULL)
1766 {
1767 /* Arbitrary initial size --- must be a power of 2 */
1768 int nbuckets = 16;
1769
1772 nbuckets * sizeof(dlist_head));
1773 /* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */
1774 cache->cc_nlbuckets = nbuckets;
1775 }
1776 else
1777 {
1778 /*
1779 * If the hash table has become too full, enlarge the buckets array.
1780 * Quite arbitrarily, we enlarge when fill factor > 2.
1781 */
1782 if (cache->cc_nlist > cache->cc_nlbuckets * 2)
1784 }
1785
1786 /*
1787 * Find the hash bucket in which to look for the CatCList.
1788 */
1789 lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1790 lHashIndex = HASH_INDEX(lHashValue, cache->cc_nlbuckets);
1791
1792 /*
1793 * scan the items until we find a match or exhaust our list
1794 *
1795 * Note: it's okay to use dlist_foreach here, even though we modify the
1796 * dlist within the loop, because we don't continue the loop afterwards.
1797 */
1798 lbucket = &cache->cc_lbucket[lHashIndex];
1799 dlist_foreach(iter, lbucket)
1800 {
1801 cl = dlist_container(CatCList, cache_elem, iter.cur);
1802
1803 if (cl->dead)
1804 continue; /* ignore dead entries */
1805
1806 if (cl->hash_value != lHashValue)
1807 continue; /* quickly skip entry if wrong hash val */
1808
1809 /*
1810 * see if the cached list matches our key.
1811 */
1812 if (cl->nkeys != nkeys)
1813 continue;
1814
1815 if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1816 continue;
1817
1818 /*
1819 * We found a matching list. Move the list to the front of the list
1820 * for its hashbucket, so as to speed subsequent searches. (We do not
1821 * move the members to the fronts of their hashbucket lists, however,
1822 * since there's no point in that unless they are searched for
1823 * individually.)
1824 */
1825 dlist_move_head(lbucket, &cl->cache_elem);
1826
1827 /* Bump the list's refcount and return it */
1829 cl->refcount++;
1831
1832 CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1833 cache->cc_relname);
1834
1835#ifdef CATCACHE_STATS
1836 cache->cc_lhits++;
1837#endif
1838
1839 return cl;
1840 }
1841
1842 /*
1843 * List was not found in cache, so we have to build it by reading the
1844 * relation. For each matching tuple found in the relation, use an
1845 * existing cache entry if possible, else build a new one.
1846 *
1847 * We have to bump the member refcounts temporarily to ensure they won't
1848 * get dropped from the cache while loading other members. We use a PG_TRY
1849 * block to ensure we can undo those refcounts if we get an error before
1850 * we finish constructing the CatCList. ctlist must be valid throughout
1851 * the PG_TRY block.
1852 */
1853 ctlist = NIL;
1854
1855 /*
1856 * Cache invalidation can happen while we're building the list.
1857 * CatalogCacheCreateEntry() handles concurrent invalidation of individual
1858 * tuples, but it's also possible that a new entry is concurrently added
1859 * that should be part of the list we're building. Register an
1860 * "in-progress" entry that will receive the invalidation, until we have
1861 * built the final list entry.
1862 */
1863 save_in_progress = catcache_in_progress_stack;
1864 in_progress_ent.next = catcache_in_progress_stack;
1865 in_progress_ent.cache = cache;
1866 in_progress_ent.hash_value = lHashValue;
1867 in_progress_ent.list = true;
1868 in_progress_ent.dead = false;
1869 catcache_in_progress_stack = &in_progress_ent;
1870
1871 PG_TRY();
1872 {
1873 ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1874 Relation relation;
1875 SysScanDesc scandesc;
1876 bool first_iter = true;
1877
1879
1880 /*
1881 * Ok, need to make a lookup in the relation, copy the scankey and
1882 * fill out any per-call fields.
1883 */
1884 memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1885 cur_skey[0].sk_argument = v1;
1886 cur_skey[1].sk_argument = v2;
1887 cur_skey[2].sk_argument = v3;
1888 cur_skey[3].sk_argument = v4;
1889
1890 /*
1891 * Scan the table for matching entries. If an invalidation arrives
1892 * mid-build, we will loop back here to retry.
1893 */
1894 do
1895 {
1896 /*
1897 * If we are retrying, release refcounts on any items created on
1898 * the previous iteration. We dare not try to free them if
1899 * they're now unreferenced, since an error while doing that would
1900 * result in the PG_CATCH below doing extra refcount decrements.
1901 * Besides, we'll likely re-adopt those items in the next
1902 * iteration, so it's not worth complicating matters to try to get
1903 * rid of them.
1904 */
1905 foreach(ctlist_item, ctlist)
1906 {
1907 ct = (CatCTup *) lfirst(ctlist_item);
1908 Assert(ct->c_list == NULL);
1909 Assert(ct->refcount > 0);
1910 ct->refcount--;
1911 }
1912 /* Reset ctlist in preparation for new try */
1913 ctlist = NIL;
1914 in_progress_ent.dead = false;
1915
1916 scandesc = systable_beginscan(relation,
1919 NULL,
1920 nkeys,
1921 cur_skey);
1922
1923 /* The list will be ordered iff we are doing an index scan */
1924 ordered = (scandesc->irel != NULL);
1925
1926 /* Injection point to help testing the recursive invalidation case */
1927 if (first_iter)
1928 {
1929 INJECTION_POINT("catcache-list-miss-systable-scan-started");
1930 first_iter = false;
1931 }
1932
1933 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)) &&
1934 !in_progress_ent.dead)
1935 {
1936 uint32 hashValue;
1937 Index hashIndex;
1938 bool found = false;
1939 dlist_head *bucket;
1940
1941 /*
1942 * See if there's an entry for this tuple already.
1943 */
1944 ct = NULL;
1946 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1947
1948 bucket = &cache->cc_bucket[hashIndex];
1949 dlist_foreach(iter, bucket)
1950 {
1951 ct = dlist_container(CatCTup, cache_elem, iter.cur);
1952
1953 if (ct->dead || ct->negative)
1954 continue; /* ignore dead and negative entries */
1955
1956 if (ct->hash_value != hashValue)
1957 continue; /* quickly skip entry if wrong hash val */
1958
1959 if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1960 continue; /* not same tuple */
1961
1962 /*
1963 * Found a match, but can't use it if it belongs to
1964 * another list already
1965 */
1966 if (ct->c_list)
1967 continue;
1968
1969 found = true;
1970 break; /* A-OK */
1971 }
1972
1973 if (!found)
1974 {
1975 /* We didn't find a usable entry, so make a new one */
1976 ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1977 hashValue, hashIndex);
1978
1979 /* upon failure, we must start the scan over */
1980 if (ct == NULL)
1981 {
1982 in_progress_ent.dead = true;
1983 break;
1984 }
1985 }
1986
1987 /* Careful here: add entry to ctlist, then bump its refcount */
1988 /* This way leaves state correct if lappend runs out of memory */
1989 ctlist = lappend(ctlist, ct);
1990 ct->refcount++;
1991 }
1992
1993 systable_endscan(scandesc);
1994 } while (in_progress_ent.dead);
1995
1996 table_close(relation, AccessShareLock);
1997
1998 /* Make sure the resource owner has room to remember this entry. */
2000
2001 /* Now we can build the CatCList entry. */
2003 nmembers = list_length(ctlist);
2004 cl = (CatCList *)
2005 palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
2006
2007 /* Extract key values */
2009 arguments, cl->keys);
2010 MemoryContextSwitchTo(oldcxt);
2011
2012 /*
2013 * We are now past the last thing that could trigger an elog before we
2014 * have finished building the CatCList and remembering it in the
2015 * resource owner. So it's OK to fall out of the PG_TRY, and indeed
2016 * we'd better do so before we start marking the members as belonging
2017 * to the list.
2018 */
2019 }
2020 PG_CATCH();
2021 {
2022 Assert(catcache_in_progress_stack == &in_progress_ent);
2023 catcache_in_progress_stack = save_in_progress;
2024
2025 foreach(ctlist_item, ctlist)
2026 {
2027 ct = (CatCTup *) lfirst(ctlist_item);
2028 Assert(ct->c_list == NULL);
2029 Assert(ct->refcount > 0);
2030 ct->refcount--;
2031 if (
2032#ifndef CATCACHE_FORCE_RELEASE
2033 ct->dead &&
2034#endif
2035 ct->refcount == 0 &&
2036 (ct->c_list == NULL || ct->c_list->refcount == 0))
2038 }
2039
2040 PG_RE_THROW();
2041 }
2042 PG_END_TRY();
2043 Assert(catcache_in_progress_stack == &in_progress_ent);
2044 catcache_in_progress_stack = save_in_progress;
2045
2046 cl->cl_magic = CL_MAGIC;
2047 cl->my_cache = cache;
2048 cl->refcount = 0; /* for the moment */
2049 cl->dead = false;
2050 cl->ordered = ordered;
2051 cl->nkeys = nkeys;
2052 cl->hash_value = lHashValue;
2053 cl->n_members = nmembers;
2054
2055 i = 0;
2056 foreach(ctlist_item, ctlist)
2057 {
2058 cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
2059 Assert(ct->c_list == NULL);
2060 ct->c_list = cl;
2061 /* release the temporary refcount on the member */
2062 Assert(ct->refcount > 0);
2063 ct->refcount--;
2064 /* mark list dead if any members already dead */
2065 if (ct->dead)
2066 cl->dead = true;
2067 }
2068 Assert(i == nmembers);
2069
2070 /*
2071 * Add the CatCList to the appropriate bucket, and count it.
2072 */
2073 dlist_push_head(lbucket, &cl->cache_elem);
2074
2075 cache->cc_nlist++;
2076
2077 /* Finally, bump the list's refcount and return it */
2078 cl->refcount++;
2080
2081 CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
2082 cache->cc_relname, nmembers);
2083
2084 return cl;
2085}
2086
2087/*
2088 * ReleaseCatCacheList
2089 *
2090 * Decrement the reference count of a catcache list.
2091 */
2092void
2094{
2096}
2097
2098static void
2100{
2101 /* Safety checks to ensure we were handed a cache entry */
2102 Assert(list->cl_magic == CL_MAGIC);
2103 Assert(list->refcount > 0);
2104 list->refcount--;
2105 if (resowner)
2107
2108 if (
2109#ifndef CATCACHE_FORCE_RELEASE
2110 list->dead &&
2111#endif
2112 list->refcount == 0)
2113 CatCacheRemoveCList(list->my_cache, list);
2114}
2115
2116
2117/*
2118 * CatalogCacheCreateEntry
2119 * Create a new CatCTup entry, copying the given HeapTuple and other
2120 * supplied data into it. The new entry initially has refcount 0.
2121 *
2122 * To create a normal cache entry, ntp must be the HeapTuple just fetched
2123 * from scandesc, and "arguments" is not used. To create a negative cache
2124 * entry, pass NULL for ntp; then "arguments" is the cache keys to use.
2125 * In either case, hashValue/hashIndex are the hash values computed from
2126 * the cache keys.
2127 *
2128 * Returns NULL if we attempt to detoast the tuple and observe that it
2129 * became stale. (This cannot happen for a negative entry.) Caller must
2130 * retry the tuple lookup in that case.
2131 */
2132static CatCTup *
2134 uint32 hashValue, Index hashIndex)
2135{
2136 CatCTup *ct;
2137 MemoryContext oldcxt;
2138
2139 if (ntp)
2140 {
2141 int i;
2142 HeapTuple dtp = NULL;
2143
2144 /*
2145 * The invalidation of the in-progress entry essentially never happens
2146 * during our regression tests, and there's no easy way to force it to
2147 * fail for testing purposes. To ensure we have test coverage for the
2148 * retry paths in our callers, make debug builds randomly fail about
2149 * 0.1% of the times through this code path, even when there's no
2150 * toasted fields.
2151 */
2152#ifdef USE_ASSERT_CHECKING
2154 return NULL;
2155#endif
2156
2157 /*
2158 * If there are any out-of-line toasted fields in the tuple, expand
2159 * them in-line. This saves cycles during later use of the catcache
2160 * entry, and also protects us against the possibility of the toast
2161 * tuples being freed before we attempt to fetch them, in case of
2162 * something using a slightly stale catcache entry.
2163 */
2164 if (HeapTupleHasExternal(ntp))
2165 {
2166 CatCInProgress *save_in_progress;
2167 CatCInProgress in_progress_ent;
2168
2169 /*
2170 * The tuple could become stale while we are doing toast table
2171 * access (since AcceptInvalidationMessages can run then). The
2172 * invalidation will mark our in-progress entry as dead.
2173 */
2174 save_in_progress = catcache_in_progress_stack;
2175 in_progress_ent.next = catcache_in_progress_stack;
2176 in_progress_ent.cache = cache;
2177 in_progress_ent.hash_value = hashValue;
2178 in_progress_ent.list = false;
2179 in_progress_ent.dead = false;
2180 catcache_in_progress_stack = &in_progress_ent;
2181
2182 PG_TRY();
2183 {
2185 }
2186 PG_FINALLY();
2187 {
2188 Assert(catcache_in_progress_stack == &in_progress_ent);
2189 catcache_in_progress_stack = save_in_progress;
2190 }
2191 PG_END_TRY();
2192
2193 if (in_progress_ent.dead)
2194 {
2195 heap_freetuple(dtp);
2196 return NULL;
2197 }
2198 }
2199 else
2200 dtp = ntp;
2201
2202 /* Allocate memory for CatCTup and the cached tuple in one go */
2204
2205 ct = (CatCTup *) palloc(sizeof(CatCTup) +
2206 MAXIMUM_ALIGNOF + dtp->t_len);
2207 ct->tuple.t_len = dtp->t_len;
2208 ct->tuple.t_self = dtp->t_self;
2209 ct->tuple.t_tableOid = dtp->t_tableOid;
2211 MAXALIGN(((char *) ct) + sizeof(CatCTup));
2212 /* copy tuple contents */
2213 memcpy((char *) ct->tuple.t_data,
2214 (const char *) dtp->t_data,
2215 dtp->t_len);
2216 MemoryContextSwitchTo(oldcxt);
2217
2218 if (dtp != ntp)
2219 heap_freetuple(dtp);
2220
2221 /* extract keys - they'll point into the tuple if not by-value */
2222 for (i = 0; i < cache->cc_nkeys; i++)
2223 {
2224 Datum atp;
2225 bool isnull;
2226
2227 atp = heap_getattr(&ct->tuple,
2228 cache->cc_keyno[i],
2230 &isnull);
2231 Assert(!isnull);
2232 ct->keys[i] = atp;
2233 }
2234 }
2235 else
2236 {
2237 /* Set up keys for a negative cache entry */
2239 ct = (CatCTup *) palloc(sizeof(CatCTup));
2240
2241 /*
2242 * Store keys - they'll point into separately allocated memory if not
2243 * by-value.
2244 */
2246 arguments, ct->keys);
2247 MemoryContextSwitchTo(oldcxt);
2248 }
2249
2250 /*
2251 * Finish initializing the CatCTup header, and add it to the cache's
2252 * linked list and counts.
2253 */
2254 ct->ct_magic = CT_MAGIC;
2255 ct->my_cache = cache;
2256 ct->c_list = NULL;
2257 ct->refcount = 0; /* for the moment */
2258 ct->dead = false;
2259 ct->negative = (ntp == NULL);
2260 ct->hash_value = hashValue;
2261
2262 dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
2263
2264 cache->cc_ntup++;
2265 CacheHdr->ch_ntup++;
2266
2267 /*
2268 * If the hash table has become too full, enlarge the buckets array. Quite
2269 * arbitrarily, we enlarge when fill factor > 2.
2270 */
2271 if (cache->cc_ntup > cache->cc_nbuckets * 2)
2273
2274 return ct;
2275}
2276
2277/*
2278 * Helper routine that frees keys stored in the keys array.
2279 */
2280static void
2281CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
2282{
2283 int i;
2284
2285 for (i = 0; i < nkeys; i++)
2286 {
2287 int attnum = attnos[i];
2289
2290 /* system attribute are not supported in caches */
2291 Assert(attnum > 0);
2292
2293 att = TupleDescAttr(tupdesc, attnum - 1);
2294
2295 if (!att->attbyval)
2296 pfree(DatumGetPointer(keys[i]));
2297 }
2298}
2299
2300/*
2301 * Helper routine that copies the keys in the srckeys array into the dstkeys
2302 * one, guaranteeing that the datums are fully allocated in the current memory
2303 * context.
2304 */
2305static void
2306CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
2307 Datum *srckeys, Datum *dstkeys)
2308{
2309 int i;
2310
2311 /*
2312 * XXX: memory and lookup performance could possibly be improved by
2313 * storing all keys in one allocation.
2314 */
2315
2316 for (i = 0; i < nkeys; i++)
2317 {
2318 int attnum = attnos[i];
2319 Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2320 Datum src = srckeys[i];
2321 NameData srcname;
2322
2323 /*
2324 * Must be careful in case the caller passed a C string where a NAME
2325 * is wanted: convert the given argument to a correctly padded NAME.
2326 * Otherwise the memcpy() done by datumCopy() could fall off the end
2327 * of memory.
2328 */
2329 if (att->atttypid == NAMEOID)
2330 {
2331 namestrcpy(&srcname, DatumGetCString(src));
2332 src = NameGetDatum(&srcname);
2333 }
2334
2335 dstkeys[i] = datumCopy(src,
2336 att->attbyval,
2337 att->attlen);
2338 }
2339}
2340
2341/*
2342 * PrepareToInvalidateCacheTuple()
2343 *
2344 * This is part of a rather subtle chain of events, so pay attention:
2345 *
2346 * When a tuple is inserted or deleted, it cannot be flushed from the
2347 * catcaches immediately, for reasons explained at the top of cache/inval.c.
2348 * Instead we have to add entry(s) for the tuple to a list of pending tuple
2349 * invalidations that will be done at the end of the command or transaction.
2350 *
2351 * The lists of tuples that need to be flushed are kept by inval.c. This
2352 * routine is a helper routine for inval.c. Given a tuple belonging to
2353 * the specified relation, find all catcaches it could be in, compute the
2354 * correct hash value for each such catcache, and call the specified
2355 * function to record the cache id and hash value in inval.c's lists.
2356 * SysCacheInvalidate will be called later, if appropriate,
2357 * using the recorded information.
2358 *
2359 * For an insert or delete, tuple is the target tuple and newtuple is NULL.
2360 * For an update, we are called just once, with tuple being the old tuple
2361 * version and newtuple the new version. We should make two list entries
2362 * if the tuple's hash value changed, but only one if it didn't.
2363 *
2364 * Note that it is irrelevant whether the given tuple is actually loaded
2365 * into the catcache at the moment. Even if it's not there now, it might
2366 * be by the end of the command, or there might be a matching negative entry
2367 * to flush --- or other backends' caches might have such entries --- so
2368 * we have to make list entries to flush it later.
2369 *
2370 * Also note that it's not an error if there are no catcaches for the
2371 * specified relation. inval.c doesn't know exactly which rels have
2372 * catcaches --- it will call this routine for any tuple that's in a
2373 * system relation.
2374 */
2375void
2377 HeapTuple tuple,
2378 HeapTuple newtuple,
2379 void (*function) (int, uint32, Oid, void *),
2380 void *context)
2381{
2382 slist_iter iter;
2383 Oid reloid;
2384
2385 CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2386
2387 /*
2388 * sanity checks
2389 */
2390 Assert(RelationIsValid(relation));
2391 Assert(HeapTupleIsValid(tuple));
2393 Assert(CacheHdr != NULL);
2394
2395 reloid = RelationGetRelid(relation);
2396
2397 /* ----------------
2398 * for each cache
2399 * if the cache contains tuples from the specified relation
2400 * compute the tuple's hash value(s) in this cache,
2401 * and call the passed function to register the information.
2402 * ----------------
2403 */
2404
2406 {
2407 CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2408 uint32 hashvalue;
2409 Oid dbid;
2410
2411 if (ccp->cc_reloid != reloid)
2412 continue;
2413
2414 /* Just in case cache hasn't finished initialization yet... */
2416
2417 hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2418 dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2419
2420 (*function) (ccp->id, hashvalue, dbid, context);
2421
2422 if (newtuple)
2423 {
2424 uint32 newhashvalue;
2425
2426 newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2427
2428 if (newhashvalue != hashvalue)
2429 (*function) (ccp->id, newhashvalue, dbid, context);
2430 }
2431 }
2432}
2433
2434/* ResourceOwner callbacks */
2435
2436static void
2438{
2440}
2441
2442static char *
2444{
2445 HeapTuple tuple = (HeapTuple) DatumGetPointer(res);
2446 CatCTup *ct = (CatCTup *) (((char *) tuple) -
2447 offsetof(CatCTup, tuple));
2448
2449 /* Safety check to ensure we were handed a cache entry */
2450 Assert(ct->ct_magic == CT_MAGIC);
2451
2452 return psprintf("cache %s (%d), tuple %u/%u has count %d",
2453 ct->my_cache->cc_relname, ct->my_cache->id,
2456 ct->refcount);
2457}
2458
2459static void
2461{
2463}
2464
2465static char *
2467{
2469
2470 return psprintf("cache %s (%d), list %p has count %d",
2471 list->my_cache->cc_relname, list->my_cache->id,
2472 list, list->refcount);
2473}
#define AttributeNumberIsValid(attributeNumber)
Definition: attnum.h:34
#define NameStr(name)
Definition: c.h:717
#define pg_noinline
Definition: c.h:286
#define MAXALIGN(LEN)
Definition: c.h:782
#define PG_UINT32_MAX
Definition: c.h:561
#define PointerIsValid(pointer)
Definition: c.h:734
#define pg_attribute_always_inline
Definition: c.h:270
regproc RegProcedure
Definition: c.h:621
int32_t int32
Definition: c.h:498
#define unlikely(x)
Definition: c.h:347
uint32_t uint32
Definition: c.h:502
unsigned int Index
Definition: c.h:585
struct CatCInProgress CatCInProgress
static bool chareqfast(Datum a, Datum b)
Definition: catcache.c:191
CatCache * InitCatCache(int id, Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets)
Definition: catcache.c:878
HeapTuple SearchCatCache2(CatCache *cache, Datum v1, Datum v2)
Definition: catcache.c:1365
static bool int4eqfast(Datum a, Datum b)
Definition: catcache.c:232
HeapTuple SearchCatCache3(CatCache *cache, Datum v1, Datum v2, Datum v3)
Definition: catcache.c:1373
void ReleaseCatCacheList(CatCList *list)
Definition: catcache.c:2093
static void CatalogCacheInitializeCache(CatCache *cache)
Definition: catcache.c:1115
static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache, int nkeys, uint32 hashValue, Index hashIndex, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1499
static bool int2eqfast(Datum a, Datum b)
Definition: catcache.c:220
static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner)
Definition: catcache.c:1653
static uint32 int4hashfast(Datum datum)
Definition: catcache.c:238
void InitCatCachePhase2(CatCache *cache, bool touch_index)
Definition: catcache.c:1224
void ResetCatalogCaches(void)
Definition: catcache.c:798
CatCList * SearchCatCacheList(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3)
Definition: catcache.c:1719
static void ResOwnerReleaseCatCache(Datum res)
Definition: catcache.c:2437
uint32 GetCatCacheHashValue(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1686
static CatCInProgress * catcache_in_progress_stack
Definition: catcache.c:61
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
Definition: catcache.c:528
static char * ResOwnerPrintCatCache(Datum res)
Definition: catcache.c:2443
static void RehashCatCache(CatCache *cp)
Definition: catcache.c:985
static void ResetCatalogCache(CatCache *cache, bool debug_discard)
Definition: catcache.c:736
static CatCTup * CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments, uint32 hashValue, Index hashIndex)
Definition: catcache.c:2133
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
Definition: catcache.c:386
HeapTuple SearchCatCache4(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1381
static void ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: catcache.c:174
static const ResourceOwnerDesc catcache_resowner_desc
Definition: catcache.c:137
static void ResOwnerReleaseCatCacheList(Datum res)
Definition: catcache.c:2460
static void ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: catcache.c:159
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid, void *), void *context)
Definition: catcache.c:2376
#define CatalogCacheInitializeCache_DEBUG1
Definition: catcache.c:1110
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *srckeys, Datum *dstkeys)
Definition: catcache.c:2306
static HeapTuple SearchCatCacheInternal(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1391
static pg_attribute_always_inline void ConditionalCatalogCacheInitializeCache(CatCache *cache)
Definition: catcache.c:1064
static char * ResOwnerPrintCatCacheList(Datum res)
Definition: catcache.c:2466
static void ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner)
Definition: catcache.c:2099
static CatCacheHeader * CacheHdr
Definition: catcache.c:84
static uint32 namehashfast(Datum datum)
Definition: catcache.c:212
void CreateCacheMemoryContext(void)
Definition: catcache.c:708
static const ResourceOwnerDesc catlistref_resowner_desc
Definition: catcache.c:147
static bool IndexScanOK(CatCache *cache)
Definition: catcache.c:1275
static void GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
Definition: catcache.c:274
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:344
static bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys, const Datum *cachekeys, const Datum *searchkeys)
Definition: catcache.c:441
void CatCacheInvalidate(CatCache *cache, uint32 hashValue)
Definition: catcache.c:625
static void ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: catcache.c:164
static bool nameeqfast(Datum a, Datum b)
Definition: catcache.c:203
static uint32 charhashfast(Datum datum)
Definition: catcache.c:197
static void RehashCatCacheLists(CatCache *cp)
Definition: catcache.c:1023
HeapTuple SearchCatCache1(CatCache *cache, Datum v1)
Definition: catcache.c:1357
#define InitCatCache_DEBUG2
Definition: catcache.c:874
static uint32 oidvectorhashfast(Datum datum)
Definition: catcache.c:267
static void ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: catcache.c:169
static bool texteqfast(Datum a, Datum b)
Definition: catcache.c:244
#define CACHE_elog(...)
Definition: catcache.c:80
static bool oidvectoreqfast(Datum a, Datum b)
Definition: catcache.c:261
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:834
static uint32 int2hashfast(Datum datum)
Definition: catcache.c:226
#define CatalogCacheInitializeCache_DEBUG2
Definition: catcache.c:1111
static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
Definition: catcache.c:2281
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl)
Definition: catcache.c:570
#define HASH_INDEX(h, sz)
Definition: catcache.c:70
static uint32 texthashfast(Datum datum)
Definition: catcache.c:254
void ReleaseCatCache(HeapTuple tuple)
Definition: catcache.c:1647
HeapTuple SearchCatCache(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1340
void ResetCatalogCachesExt(bool debug_discard)
Definition: catcache.c:804
#define CT_MAGIC
Definition: catcache.h:91
uint32(* CCHashFN)(Datum datum)
Definition: catcache.h:39
#define CATCACHE_MAXKEYS
Definition: catcache.h:35
bool(* CCFastEqualFN)(Datum a, Datum b)
Definition: catcache.h:42
#define CL_MAGIC
Definition: catcache.h:162
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
#define PG_RE_THROW()
Definition: elog.h:404
#define FATAL
Definition: elog.h:41
#define PG_TRY(...)
Definition: elog.h:371
#define DEBUG2
Definition: elog.h:29
#define PG_END_TRY(...)
Definition: elog.h:396
#define DEBUG1
Definition: elog.h:30
#define PG_CATCH(...)
Definition: elog.h:381
#define elog(elevel,...)
Definition: elog.h:225
#define PG_FINALLY(...)
Definition: elog.h:388
#define MCXT_ALLOC_ZERO
Definition: fe_memutils.h:30
Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:812
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:137
Datum DirectFunctionCall1Coll(PGFunction func, Oid collation, Datum arg1)
Definition: fmgr.c:792
#define DirectFunctionCall2(func, arg1, arg2)
Definition: fmgr.h:684
#define DirectFunctionCall1(func, arg1)
Definition: fmgr.h:682
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:603
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:514
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:388
Oid MyDatabaseId
Definition: globals.c:95
static uint32 murmurhash32(uint32 data)
Definition: hashfn.h:92
static Datum hash_any(const unsigned char *k, int keylen)
Definition: hashfn.h:31
Assert(PointerIsAligned(start, uint64))
Datum hashoidvector(PG_FUNCTION_ARGS)
Definition: hashfunc.c:232
Datum hashtext(PG_FUNCTION_ARGS)
Definition: hashfunc.c:267
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: heaptoast.c:350
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1435
HeapTupleData * HeapTuple
Definition: htup.h:71
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:904
static bool HeapTupleHasExternal(const HeapTupleData *tuple)
Definition: htup_details.h:762
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:861
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static void slist_init(slist_head *head)
Definition: ilist.h:986
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:1006
#define slist_container(type, membername, ptr)
Definition: ilist.h:1106
static void dlist_move_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:467
#define slist_foreach(iter, lhead)
Definition: ilist.h:1132
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
#define INJECTION_POINT(name)
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1894
void on_proc_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:309
int b
Definition: isn.c:74
int a
Definition: isn.c:73
int i
Definition: isn.c:77
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:35
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
List * lappend(List *list, void *datum)
Definition: list.c:339
void UnlockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:229
void LockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:107
#define AccessShareLock
Definition: lockdefs.h:36
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1294
char * pstrdup(const char *in)
Definition: mcxt.c:2327
void pfree(void *pointer)
Definition: mcxt.c:2152
void * palloc0(Size size)
Definition: mcxt.c:1975
MemoryContext TopMemoryContext
Definition: mcxt.c:165
void * palloc(Size size)
Definition: mcxt.c:1945
MemoryContext CacheMemoryContext
Definition: mcxt.c:168
void * palloc_aligned(Size size, Size alignto, int flags)
Definition: mcxt.c:2142
#define AllocSetContextCreate
Definition: memutils.h:149
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:180
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:477
void namestrcpy(Name name, const char *str)
Definition: name.c:233
Datum oidvectoreq(PG_FUNCTION_ARGS)
Definition: oid.c:344
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
int16 attnum
Definition: pg_attribute.h:74
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:202
on_exit_nicely_callback function
void * arg
static uint32 pg_rotate_left32(uint32 word, int n)
Definition: pg_bitutils.h:428
#define NAMEDATALEN
#define PG_CACHE_LINE_SIZE
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
uint32 pg_prng_uint32(pg_prng_state *state)
Definition: pg_prng.c:227
pg_prng_state pg_global_prng_state
Definition: pg_prng.c:34
static bool DatumGetBool(Datum X)
Definition: postgres.h:95
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:327
static Name DatumGetName(Datum X)
Definition: postgres.h:365
uintptr_t Datum
Definition: postgres.h:69
static char * DatumGetCString(Datum X)
Definition: postgres.h:340
static Datum NameGetDatum(const NameData *X)
Definition: postgres.h:378
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:317
static char DatumGetChar(Datum X)
Definition: postgres.h:117
static int16 DatumGetInt16(Datum X)
Definition: postgres.h:167
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:207
#define InvalidOid
Definition: postgres_ext.h:35
unsigned int Oid
Definition: postgres_ext.h:30
e
Definition: preproc-init.c:82
char * psprintf(const char *fmt,...)
Definition: psprintf.c:43
#define RelationGetForm(relation)
Definition: rel.h:510
#define RelationGetRelid(relation)
Definition: rel.h:516
#define RelationGetDescr(relation)
Definition: rel.h:542
#define RelationGetRelationName(relation)
Definition: rel.h:550
#define RelationIsValid(relation)
Definition: rel.h:489
bool criticalRelcachesBuilt
Definition: relcache.c:140
bool criticalSharedRelcachesBuilt
Definition: relcache.c:146
static void AssertCouldGetRelation(void)
Definition: relcache.h:44
ResourceOwner CurrentResourceOwner
Definition: resowner.c:173
void ResourceOwnerForget(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:564
void ResourceOwnerRemember(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:524
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:452
#define RELEASE_PRIO_CATCACHE_LIST_REFS
Definition: resowner.h:72
@ RESOURCE_RELEASE_AFTER_LOCKS
Definition: resowner.h:56
#define RELEASE_PRIO_CATCACHE_REFS
Definition: resowner.h:71
#define BTEqualStrategyNumber
Definition: stratnum.h:31
uint32 hash_value
Definition: catcache.c:55
struct CatCInProgress * next
Definition: catcache.c:58
CatCache * cache
Definition: catcache.c:54
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
Definition: pg_list.h:54
Form_pg_index rd_index
Definition: rel.h:192
const char * name
Definition: resowner.h:93
Datum sk_argument
Definition: skey.h:72
FmgrInfo sk_func
Definition: skey.h:71
Oid sk_subtype
Definition: skey.h:69
Oid sk_collation
Definition: skey.h:70
StrategyNumber sk_strategy
Definition: skey.h:68
AttrNumber sk_attno
Definition: skey.h:67
Relation irel
Definition: relscan.h:210
const char * cc_relname
Definition: catcache.h:59
CCHashFN cc_hashfunc[CATCACHE_MAXKEYS]
Definition: catcache.h:50
dlist_head * cc_bucket
Definition: catcache.h:49
slist_node cc_next
Definition: catcache.h:63
Oid cc_reloid
Definition: catcache.h:60
int cc_nkeys
Definition: catcache.h:54
int cc_keyno[CATCACHE_MAXKEYS]
Definition: catcache.h:53
CCFastEqualFN cc_fastequal[CATCACHE_MAXKEYS]
Definition: catcache.h:51
Oid cc_indexoid
Definition: catcache.h:61
int cc_nbuckets
Definition: catcache.h:47
bool cc_relisshared
Definition: catcache.h:62
int cc_ntup
Definition: catcache.h:55
ScanKeyData cc_skey[CATCACHE_MAXKEYS]
Definition: catcache.h:64
int cc_nlist
Definition: catcache.h:56
int id
Definition: catcache.h:46
TupleDesc cc_tupdesc
Definition: catcache.h:48
int cc_nlbuckets
Definition: catcache.h:57
dlist_head * cc_lbucket
Definition: catcache.h:58
slist_head ch_caches
Definition: catcache.h:186
dlist_node cache_elem
Definition: catcache.h:166
int refcount
Definition: catcache.h:174
CatCache * my_cache
Definition: catcache.h:179
int cl_magic
Definition: catcache.h:161
bool dead
Definition: catcache.h:175
short nkeys
Definition: catcache.h:177
Datum keys[CATCACHE_MAXKEYS]
Definition: catcache.h:172
bool ordered
Definition: catcache.h:176
CatCTup * members[FLEXIBLE_ARRAY_MEMBER]
Definition: catcache.h:180
uint32 hash_value
Definition: catcache.h:164
int n_members
Definition: catcache.h:178
int ct_magic
Definition: catcache.h:90
int refcount
Definition: catcache.h:120
bool negative
Definition: catcache.h:122
dlist_node cache_elem
Definition: catcache.h:106
HeapTupleData tuple
Definition: catcache.h:123
CatCache * my_cache
Definition: catcache.h:134
struct catclist * c_list
Definition: catcache.h:132
Datum keys[CATCACHE_MAXKEYS]
Definition: catcache.h:99
bool dead
Definition: catcache.h:121
uint32 hash_value
Definition: catcache.h:93
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200
Definition: c.h:712
slist_node * cur
Definition: ilist.h:259
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:333
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:160
struct TupleDescData * TupleDesc
Definition: tupdesc.h:145
Datum texteq(PG_FUNCTION_ARGS)
Definition: varlena.c:1683
bool IsTransactionState(void)
Definition: xact.c:387