diff options
Diffstat (limited to 'src')
652 files changed, 28740 insertions, 28135 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 8b9714184c2..2ba59ab5e9e 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -6,7 +6,7 @@ * * NOTE: there is massive duplication of code in this module to * support both the convention that a null is marked by a bool TRUE, - * and the convention that a null is marked by a char 'n'. The latter + * and the convention that a null is marked by a char 'n'. The latter * convention is deprecated but it'll probably be a long time before * we can get rid of it entirely. * @@ -16,7 +16,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.99 2005/03/21 01:23:55 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.100 2005/10/15 02:49:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -452,8 +452,8 @@ nocachegetattr(HeapTuple tuple, int j; /* - * In for(), we test <= and not < because we want to see if we - * can go past it in initializing offsets. + * In for(), we test <= and not < because we want to see if we can + * go past it in initializing offsets. */ for (j = 0; j <= attnum; j++) { @@ -467,10 +467,9 @@ nocachegetattr(HeapTuple tuple, } /* - * If slow is false, and we got here, we know that we have a tuple - * with no nulls or var-widths before the target attribute. If - * possible, we also want to initialize the remainder of the attribute - * cached offset values. + * If slow is false, and we got here, we know that we have a tuple with no + * nulls or var-widths before the target attribute. If possible, we also + * want to initialize the remainder of the attribute cached offset values. */ if (!slow) { @@ -513,11 +512,11 @@ nocachegetattr(HeapTuple tuple, /* * Now we know that we have to walk the tuple CAREFULLY. * - * Note - This loop is a little tricky. For each non-null attribute, - * we have to first account for alignment padding before the attr, - * then advance over the attr based on its length. Nulls have no - * storage and no alignment padding either. We can use/set attcacheoff - * until we pass either a null or a var-width attribute. + * Note - This loop is a little tricky. For each non-null attribute, we + * have to first account for alignment padding before the attr, then + * advance over the attr based on its length. Nulls have no storage + * and no alignment padding either. We can use/set attcacheoff until + * we pass either a null or a var-width attribute. */ for (i = 0; i < attnum; i++) @@ -597,15 +596,13 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) break; /* - * If the attribute number is 0, then we are supposed to - * return the entire tuple as a row-type Datum. (Using zero - * for this purpose is unclean since it risks confusion with - * "invalid attr" result codes, but it's not worth changing - * now.) + * If the attribute number is 0, then we are supposed to return + * the entire tuple as a row-type Datum. (Using zero for this + * purpose is unclean since it risks confusion with "invalid attr" + * result codes, but it's not worth changing now.) * - * We have to make a copy of the tuple so we can safely insert - * the Datum overhead fields, which are not set in on-disk - * tuples. + * We have to make a copy of the tuple so we can safely insert the + * Datum overhead fields, which are not set in on-disk tuples. */ case InvalidAttrNumber: { @@ -708,15 +705,15 @@ heap_form_tuple(TupleDesc tupleDescriptor, numberOfAttributes, MaxTupleAttributeNumber))); /* - * Check for nulls and embedded tuples; expand any toasted attributes - * in embedded tuples. This preserves the invariant that toasting can - * only go one level deep. + * Check for nulls and embedded tuples; expand any toasted attributes in + * embedded tuples. This preserves the invariant that toasting can only + * go one level deep. * * We can skip calling toast_flatten_tuple_attribute() if the attribute * couldn't possibly be of composite type. All composite datums are - * varlena and have alignment 'd'; furthermore they aren't arrays. - * Also, if an attribute is already toasted, it must have been sent to - * disk already and so cannot contain toasted attributes. + * varlena and have alignment 'd'; furthermore they aren't arrays. Also, + * if an attribute is already toasted, it must have been sent to disk + * already and so cannot contain toasted attributes. */ for (i = 0; i < numberOfAttributes; i++) { @@ -757,8 +754,8 @@ heap_form_tuple(TupleDesc tupleDescriptor, tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE); /* - * And fill in the information. Note we fill the Datum fields even - * though this tuple may never become a Datum. + * And fill in the information. Note we fill the Datum fields even though + * this tuple may never become a Datum. */ tuple->t_len = len; ItemPointerSetInvalid(&(tuple->t_self)); @@ -816,15 +813,15 @@ heap_formtuple(TupleDesc tupleDescriptor, numberOfAttributes, MaxTupleAttributeNumber))); /* - * Check for nulls and embedded tuples; expand any toasted attributes - * in embedded tuples. This preserves the invariant that toasting can - * only go one level deep. + * Check for nulls and embedded tuples; expand any toasted attributes in + * embedded tuples. This preserves the invariant that toasting can only + * go one level deep. * * We can skip calling toast_flatten_tuple_attribute() if the attribute * couldn't possibly be of composite type. All composite datums are - * varlena and have alignment 'd'; furthermore they aren't arrays. - * Also, if an attribute is already toasted, it must have been sent to - * disk already and so cannot contain toasted attributes. + * varlena and have alignment 'd'; furthermore they aren't arrays. Also, + * if an attribute is already toasted, it must have been sent to disk + * already and so cannot contain toasted attributes. */ for (i = 0; i < numberOfAttributes; i++) { @@ -865,8 +862,8 @@ heap_formtuple(TupleDesc tupleDescriptor, tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE); /* - * And fill in the information. Note we fill the Datum fields even - * though this tuple may never become a Datum. + * And fill in the information. Note we fill the Datum fields even though + * this tuple may never become a Datum. */ tuple->t_len = len; ItemPointerSetInvalid(&(tuple->t_self)); @@ -917,15 +914,15 @@ heap_modify_tuple(HeapTuple tuple, HeapTuple newTuple; /* - * allocate and fill values and isnull arrays from either the tuple or - * the repl information, as appropriate. + * allocate and fill values and isnull arrays from either the tuple or the + * repl information, as appropriate. * * NOTE: it's debatable whether to use heap_deform_tuple() here or just - * heap_getattr() only the non-replaced colums. The latter could win - * if there are many replaced columns and few non-replaced ones. - * However, heap_deform_tuple costs only O(N) while the heap_getattr - * way would cost O(N^2) if there are many non-replaced columns, so it - * seems better to err on the side of linear cost. + * heap_getattr() only the non-replaced colums. The latter could win if + * there are many replaced columns and few non-replaced ones. However, + * heap_deform_tuple costs only O(N) while the heap_getattr way would cost + * O(N^2) if there are many non-replaced columns, so it seems better to + * err on the side of linear cost. */ values = (Datum *) palloc(numberOfAttributes * sizeof(Datum)); isnull = (bool *) palloc(numberOfAttributes * sizeof(bool)); @@ -950,8 +947,8 @@ heap_modify_tuple(HeapTuple tuple, pfree(isnull); /* - * copy the identification info of the old tuple: t_ctid, t_self, and - * OID (if any) + * copy the identification info of the old tuple: t_ctid, t_self, and OID + * (if any) */ newTuple->t_data->t_ctid = tuple->t_data->t_ctid; newTuple->t_self = tuple->t_self; @@ -986,15 +983,15 @@ heap_modifytuple(HeapTuple tuple, HeapTuple newTuple; /* - * allocate and fill values and nulls arrays from either the tuple or - * the repl information, as appropriate. + * allocate and fill values and nulls arrays from either the tuple or the + * repl information, as appropriate. * * NOTE: it's debatable whether to use heap_deformtuple() here or just - * heap_getattr() only the non-replaced colums. The latter could win - * if there are many replaced columns and few non-replaced ones. - * However, heap_deformtuple costs only O(N) while the heap_getattr - * way would cost O(N^2) if there are many non-replaced columns, so it - * seems better to err on the side of linear cost. + * heap_getattr() only the non-replaced colums. The latter could win if + * there are many replaced columns and few non-replaced ones. However, + * heap_deformtuple costs only O(N) while the heap_getattr way would cost + * O(N^2) if there are many non-replaced columns, so it seems better to + * err on the side of linear cost. */ values = (Datum *) palloc(numberOfAttributes * sizeof(Datum)); nulls = (char *) palloc(numberOfAttributes * sizeof(char)); @@ -1022,8 +1019,8 @@ heap_modifytuple(HeapTuple tuple, pfree(nulls); /* - * copy the identification info of the old tuple: t_ctid, t_self, and - * OID (if any) + * copy the identification info of the old tuple: t_ctid, t_self, and OID + * (if any) */ newTuple->t_data->t_ctid = tuple->t_data->t_ctid; newTuple->t_self = tuple->t_self; @@ -1068,9 +1065,9 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, natts = tup->t_natts; /* - * In inheritance situations, it is possible that the given tuple - * actually has more fields than the caller is expecting. Don't run - * off the end of the caller's arrays. + * In inheritance situations, it is possible that the given tuple actually + * has more fields than the caller is expecting. Don't run off the end of + * the caller's arrays. */ natts = Min(natts, tdesc_natts); @@ -1161,9 +1158,9 @@ heap_deformtuple(HeapTuple tuple, natts = tup->t_natts; /* - * In inheritance situations, it is possible that the given tuple - * actually has more fields than the caller is expecting. Don't run - * off the end of the caller's arrays. + * In inheritance situations, it is possible that the given tuple actually + * has more fields than the caller is expecting. Don't run off the end of + * the caller's arrays. */ natts = Min(natts, tdesc_natts); @@ -1228,22 +1225,22 @@ heap_deformtuple(HeapTuple tuple, static void slot_deform_tuple(TupleTableSlot *slot, int natts) { - HeapTuple tuple = slot->tts_tuple; - TupleDesc tupleDesc = slot->tts_tupleDescriptor; + HeapTuple tuple = slot->tts_tuple; + TupleDesc tupleDesc = slot->tts_tupleDescriptor; Datum *values = slot->tts_values; bool *isnull = slot->tts_isnull; - HeapTupleHeader tup = tuple->t_data; + HeapTupleHeader tup = tuple->t_data; bool hasnulls = HeapTupleHasNulls(tuple); Form_pg_attribute *att = tupleDesc->attrs; int attnum; - char *tp; /* ptr to tuple data */ - long off; /* offset in tuple data */ - bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */ - bool slow; /* can we use/set attcacheoff? */ + char *tp; /* ptr to tuple data */ + long off; /* offset in tuple data */ + bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */ + bool slow; /* can we use/set attcacheoff? */ /* - * Check whether the first call for this tuple, and initialize or - * restore loop state. + * Check whether the first call for this tuple, and initialize or restore + * loop state. */ attnum = slot->tts_nvalid; if (attnum == 0) @@ -1269,7 +1266,7 @@ slot_deform_tuple(TupleTableSlot *slot, int natts) { values[attnum] = (Datum) 0; isnull[attnum] = true; - slow = true; /* can't use attcacheoff anymore */ + slow = true; /* can't use attcacheoff anymore */ continue; } @@ -1290,7 +1287,7 @@ slot_deform_tuple(TupleTableSlot *slot, int natts) off = att_addlength(off, thisatt->attlen, tp + off); if (thisatt->attlen <= 0) - slow = true; /* can't use attcacheoff anymore */ + slow = true; /* can't use attcacheoff anymore */ } /* @@ -1316,9 +1313,9 @@ slot_deform_tuple(TupleTableSlot *slot, int natts) Datum slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull) { - HeapTuple tuple = slot->tts_tuple; - TupleDesc tupleDesc = slot->tts_tupleDescriptor; - HeapTupleHeader tup; + HeapTuple tuple = slot->tts_tuple; + TupleDesc tupleDesc = slot->tts_tupleDescriptor; + HeapTupleHeader tup; /* * system attributes are handled by heap_getsysattr @@ -1349,18 +1346,18 @@ slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull) } /* - * otherwise we had better have a physical tuple (tts_nvalid should - * equal natts in all virtual-tuple cases) + * otherwise we had better have a physical tuple (tts_nvalid should equal + * natts in all virtual-tuple cases) */ - if (tuple == NULL) /* internal error */ + if (tuple == NULL) /* internal error */ elog(ERROR, "cannot extract attribute from empty tuple slot"); /* * return NULL if attnum is out of range according to the tuple * - * (We have to check this separately because of various inheritance - * and table-alteration scenarios: the tuple could be either longer - * or shorter than the tupdesc.) + * (We have to check this separately because of various inheritance and + * table-alteration scenarios: the tuple could be either longer or shorter + * than the tupdesc.) */ tup = tuple->t_data; if (attnum > tup->t_natts) @@ -1379,10 +1376,9 @@ slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull) } /* - * If the attribute's column has been dropped, we force a NULL - * result. This case should not happen in normal use, but it could - * happen if we are executing a plan cached before the column was - * dropped. + * If the attribute's column has been dropped, we force a NULL result. + * This case should not happen in normal use, but it could happen if we + * are executing a plan cached before the column was dropped. */ if (tupleDesc->attrs[attnum - 1]->attisdropped) { @@ -1420,11 +1416,11 @@ slot_getallattrs(TupleTableSlot *slot) return; /* - * otherwise we had better have a physical tuple (tts_nvalid should - * equal natts in all virtual-tuple cases) + * otherwise we had better have a physical tuple (tts_nvalid should equal + * natts in all virtual-tuple cases) */ tuple = slot->tts_tuple; - if (tuple == NULL) /* internal error */ + if (tuple == NULL) /* internal error */ elog(ERROR, "cannot extract attribute from empty tuple slot"); /* @@ -1467,11 +1463,11 @@ slot_getsomeattrs(TupleTableSlot *slot, int attnum) elog(ERROR, "invalid attribute number %d", attnum); /* - * otherwise we had better have a physical tuple (tts_nvalid should - * equal natts in all virtual-tuple cases) + * otherwise we had better have a physical tuple (tts_nvalid should equal + * natts in all virtual-tuple cases) */ tuple = slot->tts_tuple; - if (tuple == NULL) /* internal error */ + if (tuple == NULL) /* internal error */ elog(ERROR, "cannot extract attribute from empty tuple slot"); /* @@ -1502,8 +1498,8 @@ slot_getsomeattrs(TupleTableSlot *slot, int attnum) bool slot_attisnull(TupleTableSlot *slot, int attnum) { - HeapTuple tuple = slot->tts_tuple; - TupleDesc tupleDesc = slot->tts_tupleDescriptor; + HeapTuple tuple = slot->tts_tuple; + TupleDesc tupleDesc = slot->tts_tupleDescriptor; /* * system attributes are handled by heap_attisnull @@ -1528,10 +1524,10 @@ slot_attisnull(TupleTableSlot *slot, int attnum) return true; /* - * otherwise we had better have a physical tuple (tts_nvalid should - * equal natts in all virtual-tuple cases) + * otherwise we had better have a physical tuple (tts_nvalid should equal + * natts in all virtual-tuple cases) */ - if (tuple == NULL) /* internal error */ + if (tuple == NULL) /* internal error */ elog(ERROR, "cannot extract attribute from empty tuple slot"); /* and let the tuple tell it */ diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index e5d19765e79..b3520baa2bc 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.74 2005/03/27 18:38:26 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.75 2005/10/15 02:49:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -70,20 +70,20 @@ index_form_tuple(TupleDesc tupleDescriptor, continue; /* - * If value is stored EXTERNAL, must fetch it so we are not - * depending on outside storage. This should be improved someday. + * If value is stored EXTERNAL, must fetch it so we are not depending + * on outside storage. This should be improved someday. */ if (VARATT_IS_EXTERNAL(values[i])) { untoasted_values[i] = PointerGetDatum( - heap_tuple_fetch_attr( - (varattrib *) DatumGetPointer(values[i]))); + heap_tuple_fetch_attr( + (varattrib *) DatumGetPointer(values[i]))); untoasted_free[i] = true; } /* - * If value is above size target, and is of a compressible - * datatype, try to compress it in-line. + * If value is above size target, and is of a compressible datatype, + * try to compress it in-line. */ if (VARATT_SIZE(untoasted_values[i]) > TOAST_INDEX_TARGET && !VARATT_IS_EXTENDED(untoasted_values[i]) && @@ -149,23 +149,23 @@ index_form_tuple(TupleDesc tupleDescriptor, /* * We do this because heap_fill_tuple wants to initialize a "tupmask" - * which is used for HeapTuples, but we want an indextuple infomask. - * The only relevant info is the "has variable attributes" field. - * We have already set the hasnull bit above. + * which is used for HeapTuples, but we want an indextuple infomask. The + * only relevant info is the "has variable attributes" field. We have + * already set the hasnull bit above. */ if (tupmask & HEAP_HASVARWIDTH) infomask |= INDEX_VAR_MASK; /* - * Here we make sure that the size will fit in the field reserved for - * it in t_info. + * Here we make sure that the size will fit in the field reserved for it + * in t_info. */ if ((size & INDEX_SIZE_MASK) != size) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("index row requires %lu bytes, maximum size is %lu", - (unsigned long) size, - (unsigned long) INDEX_SIZE_MASK))); + errmsg("index row requires %lu bytes, maximum size is %lu", + (unsigned long) size, + (unsigned long) INDEX_SIZE_MASK))); infomask |= size; @@ -322,10 +322,9 @@ nocache_index_getattr(IndexTuple tup, } /* - * If slow is false, and we got here, we know that we have a tuple - * with no nulls or var-widths before the target attribute. If - * possible, we also want to initialize the remainder of the attribute - * cached offset values. + * If slow is false, and we got here, we know that we have a tuple with no + * nulls or var-widths before the target attribute. If possible, we also + * want to initialize the remainder of the attribute cached offset values. */ if (!slow) { diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c index 9080d047fc2..96dfafb7cbf 100644 --- a/src/backend/access/common/printtup.c +++ b/src/backend/access/common/printtup.c @@ -9,7 +9,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.91 2005/06/22 17:45:45 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.92 2005/10/15 02:49:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -78,9 +78,9 @@ printtup_create_DR(CommandDest dest, Portal portal) else { /* - * In protocol 2.0 the Bind message does not exist, so there is no - * way for the columns to have different print formats; it's - * sufficient to look at the first one. + * In protocol 2.0 the Bind message does not exist, so there is no way + * for the columns to have different print formats; it's sufficient to + * look at the first one. */ if (portal->formats && portal->formats[0] != 0) self->pub.receiveSlot = printtup_internal_20; @@ -113,8 +113,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo) if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3) { /* - * Send portal name to frontend (obsolete cruft, gone in proto - * 3.0) + * Send portal name to frontend (obsolete cruft, gone in proto 3.0) * * If portal name not specified, use "blank" portal. */ @@ -127,8 +126,8 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo) } /* - * If this is a retrieve, and we are supposed to emit row - * descriptions, then we send back the tuple descriptor of the tuples. + * If this is a retrieve, and we are supposed to emit row descriptions, + * then we send back the tuple descriptor of the tuples. */ if (operation == CMD_SELECT && myState->sendDescrip) SendRowDescriptionMessage(typeinfo, @@ -280,7 +279,7 @@ printtup_prepare_info(DR_printtup *myState, TupleDesc typeinfo, int numAttrs) static void printtup(TupleTableSlot *slot, DestReceiver *self) { - TupleDesc typeinfo = slot->tts_tupleDescriptor; + TupleDesc typeinfo = slot->tts_tupleDescriptor; DR_printtup *myState = (DR_printtup *) self; StringInfoData buf; int natts = typeinfo->natts; @@ -363,7 +362,7 @@ printtup(TupleTableSlot *slot, DestReceiver *self) static void printtup_20(TupleTableSlot *slot, DestReceiver *self) { - TupleDesc typeinfo = slot->tts_tupleDescriptor; + TupleDesc typeinfo = slot->tts_tupleDescriptor; DR_printtup *myState = (DR_printtup *) self; StringInfoData buf; int natts = typeinfo->natts; @@ -566,7 +565,7 @@ debugtup(TupleTableSlot *slot, DestReceiver *self) static void printtup_internal_20(TupleTableSlot *slot, DestReceiver *self) { - TupleDesc typeinfo = slot->tts_tupleDescriptor; + TupleDesc typeinfo = slot->tts_tupleDescriptor; DR_printtup *myState = (DR_printtup *) self; StringInfoData buf; int natts = typeinfo->natts; diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c index fedc7ec4894..cfa455beec9 100644 --- a/src/backend/access/common/tupdesc.c +++ b/src/backend/access/common/tupdesc.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.111 2005/04/14 22:34:48 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.112 2005/10/15 02:49:08 momjian Exp $ * * NOTES * some of the executor utility code such as "ExecTypeFromTL" should be @@ -49,10 +49,10 @@ CreateTemplateTupleDesc(int natts, bool hasoid) * Allocate enough memory for the tuple descriptor, including the * attribute rows, and set up the attribute row pointers. * - * Note: we assume that sizeof(struct tupleDesc) is a multiple of - * the struct pointer alignment requirement, and hence we don't need - * to insert alignment padding between the struct and the array of - * attribute row pointers. + * Note: we assume that sizeof(struct tupleDesc) is a multiple of the struct + * pointer alignment requirement, and hence we don't need to insert + * alignment padding between the struct and the array of attribute row + * pointers. */ attroffset = sizeof(struct tupleDesc) + natts * sizeof(Form_pg_attribute); attroffset = MAXALIGN(attroffset); @@ -273,16 +273,16 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2) Form_pg_attribute attr2 = tupdesc2->attrs[i]; /* - * We do not need to check every single field here: we can - * disregard attrelid and attnum (which were used to place the row - * in the attrs array in the first place). It might look like we - * could dispense with checking attlen/attbyval/attalign, since these - * are derived from atttypid; but in the case of dropped columns - * we must check them (since atttypid will be zero for all dropped - * columns) and in general it seems safer to check them always. + * We do not need to check every single field here: we can disregard + * attrelid and attnum (which were used to place the row in the attrs + * array in the first place). It might look like we could dispense + * with checking attlen/attbyval/attalign, since these are derived + * from atttypid; but in the case of dropped columns we must check + * them (since atttypid will be zero for all dropped columns) and in + * general it seems safer to check them always. * - * attcacheoff must NOT be checked since it's possibly not set - * in both copies. + * attcacheoff must NOT be checked since it's possibly not set in both + * copies. */ if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0) return false; @@ -332,9 +332,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2) AttrDefault *defval2 = constr2->defval; /* - * We can't assume that the items are always read from the - * system catalogs in the same order; so use the adnum field - * to identify the matching item to compare. + * We can't assume that the items are always read from the system + * catalogs in the same order; so use the adnum field to identify + * the matching item to compare. */ for (j = 0; j < n; defval2++, j++) { @@ -355,9 +355,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2) ConstrCheck *check2 = constr2->check; /* - * Similarly, don't assume that the checks are always read in - * the same order; match them up by name and contents. (The - * name *should* be unique, but...) + * Similarly, don't assume that the checks are always read in the + * same order; match them up by name and contents. (The name + * *should* be unique, but...) */ for (j = 0; j < n; check2++, j++) { @@ -407,8 +407,8 @@ TupleDescInitEntry(TupleDesc desc, /* * Note: attributeName can be NULL, because the planner doesn't always - * fill in valid resname values in targetlists, particularly for - * resjunk attributes. + * fill in valid resname values in targetlists, particularly for resjunk + * attributes. */ if (attributeName != NULL) namestrcpy(&(att->attname), attributeName); @@ -482,8 +482,8 @@ BuildDescForRelation(List *schema) ColumnDef *entry = lfirst(l); /* - * for each entry in the list, get the name and type information - * from the list and have TupleDescInitEntry fill in the attribute + * for each entry in the list, get the name and type information from + * the list and have TupleDescInitEntry fill in the attribute * information we need. */ attnum++; @@ -508,8 +508,8 @@ BuildDescForRelation(List *schema) desc->attrs[attnum - 1]->attnotnull = entry->is_not_null; /* - * Note we copy only pre-cooked default expressions. Digestion of - * raw ones is someone else's problem. + * Note we copy only pre-cooked default expressions. Digestion of raw + * ones is someone else's problem. */ if (entry->cooked_default != NULL) { diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c index 5978c8af4cc..b9e0469b05b 100644 --- a/src/backend/access/gist/gistproc.c +++ b/src/backend/access/gist/gistproc.c @@ -10,7 +10,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.2 2005/09/22 20:44:36 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.3 2005/10/15 02:49:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -26,7 +26,7 @@ typedef struct { BOX *key; int pos; -} KBsort; +} KBsort; static int compare_KB(const void *a, const void *b); static bool gist_box_leaf_consistent(BOX *key, BOX *query, diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index f8611ce46a0..2cff9509b6a 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.80 2005/06/06 17:01:21 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.81 2005/10/15 02:49:08 momjian Exp $ * * NOTES * This file contains only the public interface routines. @@ -55,8 +55,8 @@ hashbuild(PG_FUNCTION_ARGS) HashBuildState buildstate; /* - * We expect to be called exactly once for any index relation. If - * that's not the case, big trouble's what we have. + * We expect to be called exactly once for any index relation. If that's + * not the case, big trouble's what we have. */ if (RelationGetNumberOfBlocks(index) != 0) elog(ERROR, "index \"%s\" already contains data", @@ -70,7 +70,7 @@ hashbuild(PG_FUNCTION_ARGS) /* do the heap scan */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, - hashbuildCallback, (void *) &buildstate); + hashbuildCallback, (void *) &buildstate); /* since we just counted the # of tuples, may as well update stats */ IndexCloseAndUpdateStats(heap, reltuples, index, buildstate.indtuples); @@ -141,12 +141,12 @@ hashinsert(PG_FUNCTION_ARGS) /* * If the single index key is null, we don't insert it into the index. - * Hash tables support scans on '='. Relational algebra says that A = - * B returns null if either A or B is null. This means that no - * qualification used in an index scan could ever return true on a - * null attribute. It also means that indices can't be used by ISNULL - * or NOTNULL scans, but that's an artifact of the strategy map - * architecture chosen in 1986, not of the way nulls are handled here. + * Hash tables support scans on '='. Relational algebra says that A = B + * returns null if either A or B is null. This means that no + * qualification used in an index scan could ever return true on a null + * attribute. It also means that indices can't be used by ISNULL or + * NOTNULL scans, but that's an artifact of the strategy map architecture + * chosen in 1986, not of the way nulls are handled here. */ if (IndexTupleHasNulls(itup)) { @@ -180,16 +180,16 @@ hashgettuple(PG_FUNCTION_ARGS) bool res; /* - * We hold pin but not lock on current buffer while outside the hash - * AM. Reacquire the read lock here. + * We hold pin but not lock on current buffer while outside the hash AM. + * Reacquire the read lock here. */ if (BufferIsValid(so->hashso_curbuf)) _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ); /* - * If we've already initialized this scan, we can just advance it in - * the appropriate direction. If we haven't done so yet, we call a - * routine to get the first item in the scan. + * If we've already initialized this scan, we can just advance it in the + * appropriate direction. If we haven't done so yet, we call a routine to + * get the first item in the scan. */ if (ItemPointerIsValid(&(scan->currentItemData))) { @@ -199,17 +199,16 @@ hashgettuple(PG_FUNCTION_ARGS) if (scan->kill_prior_tuple) { /* - * Yes, so mark it by setting the LP_DELETE bit in the item - * flags. + * Yes, so mark it by setting the LP_DELETE bit in the item flags. */ offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData)); page = BufferGetPage(so->hashso_curbuf); PageGetItemId(page, offnum)->lp_flags |= LP_DELETE; /* - * Since this can be redone later if needed, it's treated the - * same as a commit-hint-bit status update for heap tuples: we - * mark the buffer dirty but don't make a WAL log entry. + * Since this can be redone later if needed, it's treated the same + * as a commit-hint-bit status update for heap tuples: we mark the + * buffer dirty but don't make a WAL log entry. */ SetBufferCommitInfoNeedsSave(so->hashso_curbuf); } @@ -256,7 +255,7 @@ Datum hashgetmulti(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); - ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1); + ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1); int32 max_tids = PG_GETARG_INT32(2); int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3); HashScanOpaque so = (HashScanOpaque) scan->opaque; @@ -265,8 +264,8 @@ hashgetmulti(PG_FUNCTION_ARGS) int32 ntids = 0; /* - * We hold pin but not lock on current buffer while outside the hash - * AM. Reacquire the read lock here. + * We hold pin but not lock on current buffer while outside the hash AM. + * Reacquire the read lock here. */ if (BufferIsValid(so->hashso_curbuf)) _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ); @@ -280,6 +279,7 @@ hashgetmulti(PG_FUNCTION_ARGS) res = _hash_next(scan, ForwardScanDirection); else res = _hash_first(scan, ForwardScanDirection); + /* * Skip killed tuples if asked to. */ @@ -505,12 +505,12 @@ hashbulkdelete(PG_FUNCTION_ARGS) num_index_tuples = 0; /* - * Read the metapage to fetch original bucket and tuple counts. Also, - * we keep a copy of the last-seen metapage so that we can use its - * hashm_spares[] values to compute bucket page addresses. This is a - * bit hokey but perfectly safe, since the interesting entries in the - * spares array cannot change under us; and it beats rereading the - * metapage for each bucket. + * Read the metapage to fetch original bucket and tuple counts. Also, we + * keep a copy of the last-seen metapage so that we can use its + * hashm_spares[] values to compute bucket page addresses. This is a bit + * hokey but perfectly safe, since the interesting entries in the spares + * array cannot change under us; and it beats rereading the metapage for + * each bucket. */ metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ); metap = (HashMetaPage) BufferGetPage(metabuf); @@ -569,7 +569,7 @@ loop_top: ItemPointer htup; hitem = (HashItem) PageGetItem(page, - PageGetItemId(page, offno)); + PageGetItemId(page, offno)); htup = &(hitem->hash_itup.t_tid); if (callback(htup, callback_state)) { @@ -641,8 +641,7 @@ loop_top: { /* * Otherwise, our count is untrustworthy since we may have - * double-scanned tuples in split buckets. Proceed by - * dead-reckoning. + * double-scanned tuples in split buckets. Proceed by dead-reckoning. */ if (metap->hashm_ntuples > tuples_removed) metap->hashm_ntuples -= tuples_removed; diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c index 05ca3bcdb12..2ffca5efe6a 100644 --- a/src/backend/access/hash/hashfunc.c +++ b/src/backend/access/hash/hashfunc.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.44 2005/05/25 21:40:40 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.45 2005/10/15 02:49:08 momjian Exp $ * * NOTES * These functions are stored in pg_amproc. For each operator class @@ -46,11 +46,11 @@ hashint8(PG_FUNCTION_ARGS) { /* * The idea here is to produce a hash value compatible with the values - * produced by hashint4 and hashint2 for logically equivalent inputs; - * this is necessary if we ever hope to support cross-type hash joins - * across these input types. Since all three types are signed, we can - * xor the high half of the int8 value if the sign is positive, or the - * complement of the high half when the sign is negative. + * produced by hashint4 and hashint2 for logically equivalent inputs; this + * is necessary if we ever hope to support cross-type hash joins across + * these input types. Since all three types are signed, we can xor the + * high half of the int8 value if the sign is positive, or the complement + * of the high half when the sign is negative. */ #ifndef INT64_IS_BUSTED int64 val = PG_GETARG_INT64(0); @@ -78,9 +78,9 @@ hashfloat4(PG_FUNCTION_ARGS) float4 key = PG_GETARG_FLOAT4(0); /* - * On IEEE-float machines, minus zero and zero have different bit - * patterns but should compare as equal. We must ensure that they - * have the same hash value, which is most easily done this way: + * On IEEE-float machines, minus zero and zero have different bit patterns + * but should compare as equal. We must ensure that they have the same + * hash value, which is most easily done this way: */ if (key == (float4) 0) PG_RETURN_UINT32(0); @@ -94,9 +94,9 @@ hashfloat8(PG_FUNCTION_ARGS) float8 key = PG_GETARG_FLOAT8(0); /* - * On IEEE-float machines, minus zero and zero have different bit - * patterns but should compare as equal. We must ensure that they - * have the same hash value, which is most easily done this way: + * On IEEE-float machines, minus zero and zero have different bit patterns + * but should compare as equal. We must ensure that they have the same + * hash value, which is most easily done this way: */ if (key == (float8) 0) PG_RETURN_UINT32(0); @@ -126,8 +126,7 @@ hashname(PG_FUNCTION_ARGS) char *key = NameStr(*PG_GETARG_NAME(0)); int keylen = strlen(key); - Assert(keylen < NAMEDATALEN); /* else it's not truncated - * correctly */ + Assert(keylen < NAMEDATALEN); /* else it's not truncated correctly */ return hash_any((unsigned char *) key, keylen); } @@ -139,8 +138,8 @@ hashtext(PG_FUNCTION_ARGS) Datum result; /* - * Note: this is currently identical in behavior to hashvarlena, but - * it seems likely that we may need to do something different in non-C + * Note: this is currently identical in behavior to hashvarlena, but it + * seems likely that we may need to do something different in non-C * locales. (See also hashbpchar, if so.) */ result = hash_any((unsigned char *) VARDATA(key), diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index 860376cd481..7637c3566cb 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.37 2005/08/10 21:36:45 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.38 2005/10/15 02:49:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -50,8 +50,8 @@ _hash_doinsert(Relation rel, HashItem hitem) bool isnull; /* - * Compute the hash key for the item. We do this first so as not to - * need to hold any locks while running the hash function. + * Compute the hash key for the item. We do this first so as not to need + * to hold any locks while running the hash function. */ itup = &(hitem->hash_itup); if (rel->rd_rel->relnatts != 1) @@ -64,12 +64,12 @@ _hash_doinsert(Relation rel, HashItem hitem) itemsz = IndexTupleDSize(hitem->hash_itup) + (sizeof(HashItemData) - sizeof(IndexTupleData)); - itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but - * we need to be consistent */ + itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we + * need to be consistent */ /* - * Acquire shared split lock so we can compute the target bucket - * safely (see README). + * Acquire shared split lock so we can compute the target bucket safely + * (see README). */ _hash_getlock(rel, 0, HASH_SHARE); @@ -79,9 +79,9 @@ _hash_doinsert(Relation rel, HashItem hitem) _hash_checkpage(rel, (Page) metap, LH_META_PAGE); /* - * Check whether the item can fit on a hash page at all. (Eventually, - * we ought to try to apply TOAST methods if not.) Note that at this - * point, itemsz doesn't include the ItemId. + * Check whether the item can fit on a hash page at all. (Eventually, we + * ought to try to apply TOAST methods if not.) Note that at this point, + * itemsz doesn't include the ItemId. */ if (itemsz > HashMaxItemSize((Page) metap)) ereport(ERROR, @@ -89,7 +89,7 @@ _hash_doinsert(Relation rel, HashItem hitem) errmsg("index row size %lu exceeds hash maximum %lu", (unsigned long) itemsz, (unsigned long) HashMaxItemSize((Page) metap)), - errhint("Values larger than a buffer page cannot be indexed."))); + errhint("Values larger than a buffer page cannot be indexed."))); /* * Compute the target bucket number, and convert to block number. @@ -105,8 +105,7 @@ _hash_doinsert(Relation rel, HashItem hitem) _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK); /* - * Acquire share lock on target bucket; then we can release split - * lock. + * Acquire share lock on target bucket; then we can release split lock. */ _hash_getlock(rel, blkno, HASH_SHARE); @@ -130,8 +129,8 @@ _hash_doinsert(Relation rel, HashItem hitem) if (BlockNumberIsValid(nextblkno)) { /* - * ovfl page exists; go get it. if it doesn't have room, - * we'll find out next pass through the loop test above. + * ovfl page exists; go get it. if it doesn't have room, we'll + * find out next pass through the loop test above. */ _hash_relbuf(rel, buf); buf = _hash_getbuf(rel, nextblkno, HASH_WRITE); diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index 1b8b798b45d..7289d9a0b35 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.46 2005/05/11 01:26:01 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.47 2005/10/15 02:49:08 momjian Exp $ * * NOTES * Overflow pages look like ordinary relation pages. @@ -44,8 +44,8 @@ bitno_to_blkno(HashMetaPage metap, uint32 ovflbitnum) /* loop */ ; /* - * Convert to absolute page number by adding the number of bucket - * pages that exist before this split point. + * Convert to absolute page number by adding the number of bucket pages + * that exist before this split point. */ return (BlockNumber) ((1 << i) + ovflbitnum); } @@ -252,10 +252,10 @@ _hash_getovflpage(Relation rel, Buffer metabuf) /* * We create the new bitmap page with all pages marked "in use". * Actually two pages in the new bitmap's range will exist - * immediately: the bitmap page itself, and the following page - * which is the one we return to the caller. Both of these are - * correctly marked "in use". Subsequent pages do not exist yet, - * but it is convenient to pre-mark them as "in use" too. + * immediately: the bitmap page itself, and the following page which + * is the one we return to the caller. Both of these are correctly + * marked "in use". Subsequent pages do not exist yet, but it is + * convenient to pre-mark them as "in use" too. */ _hash_initbitmap(rel, metap, bitno_to_blkno(metap, bit)); @@ -265,8 +265,8 @@ _hash_getovflpage(Relation rel, Buffer metabuf) else { /* - * Nothing to do here; since the page was past the last used page, - * we know its bitmap bit was preinitialized to "in use". + * Nothing to do here; since the page was past the last used page, we + * know its bitmap bit was preinitialized to "in use". */ } @@ -275,8 +275,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf) /* * Adjust hashm_firstfree to avoid redundant searches. But don't risk - * changing it if someone moved it while we were searching bitmap - * pages. + * changing it if someone moved it while we were searching bitmap pages. */ if (metap->hashm_firstfree == orig_firstfree) metap->hashm_firstfree = bit + 1; @@ -305,8 +304,7 @@ found: /* * Adjust hashm_firstfree to avoid redundant searches. But don't risk - * changing it if someone moved it while we were searching bitmap - * pages. + * changing it if someone moved it while we were searching bitmap pages. */ if (metap->hashm_firstfree == orig_firstfree) { @@ -394,10 +392,10 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf) _hash_wrtbuf(rel, ovflbuf); /* - * Fix up the bucket chain. this is a doubly-linked list, so we must - * fix up the bucket chain members behind and ahead of the overflow - * page being deleted. No concurrency issues since we hold exclusive - * lock on the entire bucket. + * Fix up the bucket chain. this is a doubly-linked list, so we must fix + * up the bucket chain members behind and ahead of the overflow page being + * deleted. No concurrency issues since we hold exclusive lock on the + * entire bucket. */ if (BlockNumberIsValid(prevblkno)) { @@ -488,12 +486,11 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno) /* * It is okay to write-lock the new bitmap page while holding metapage - * write lock, because no one else could be contending for the new - * page. + * write lock, because no one else could be contending for the new page. * - * There is some loss of concurrency in possibly doing I/O for the new - * page while holding the metapage lock, but this path is taken so - * seldom that it's not worth worrying about. + * There is some loss of concurrency in possibly doing I/O for the new page + * while holding the metapage lock, but this path is taken so seldom that + * it's not worth worrying about. */ buf = _hash_getbuf(rel, blkno, HASH_WRITE); pg = BufferGetPage(buf); @@ -586,8 +583,8 @@ _hash_squeezebucket(Relation rel, } /* - * find the last page in the bucket chain by starting at the base - * bucket page and working forward. + * find the last page in the bucket chain by starting at the base bucket + * page and working forward. */ ropaque = wopaque; do @@ -655,22 +652,21 @@ _hash_squeezebucket(Relation rel, /* * delete the tuple from the "read" page. PageIndexTupleDelete - * repacks the ItemId array, so 'roffnum' will be "advanced" - * to the "next" ItemId. + * repacks the ItemId array, so 'roffnum' will be "advanced" to + * the "next" ItemId. */ PageIndexTupleDelete(rpage, roffnum); } /* - * if the "read" page is now empty because of the deletion (or - * because it was empty when we got to it), free it. + * if the "read" page is now empty because of the deletion (or because + * it was empty when we got to it), free it. * * Tricky point here: if our read and write pages are adjacent in the * bucket chain, our write lock on wbuf will conflict with * _hash_freeovflpage's attempt to update the sibling links of the - * removed page. However, in that case we are done anyway, so we - * can simply drop the write lock before calling - * _hash_freeovflpage. + * removed page. However, in that case we are done anyway, so we can + * simply drop the write lock before calling _hash_freeovflpage. */ if (PageIsEmpty(rpage)) { diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 883f2a73121..b40c20b480b 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.51 2005/06/09 21:01:25 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.52 2005/10/15 02:49:08 momjian Exp $ * * NOTES * Postgres hash pages look like ordinary relation pages. The opaque @@ -240,13 +240,13 @@ _hash_metapinit(Relation rel) RelationGetRelationName(rel)); /* - * Determine the target fill factor (tuples per bucket) for this - * index. The idea is to make the fill factor correspond to pages - * about 3/4ths full. We can compute it exactly if the index datatype - * is fixed-width, but for var-width there's some guessing involved. + * Determine the target fill factor (tuples per bucket) for this index. + * The idea is to make the fill factor correspond to pages about 3/4ths + * full. We can compute it exactly if the index datatype is fixed-width, + * but for var-width there's some guessing involved. */ data_width = get_typavgwidth(RelationGetDescr(rel)->attrs[0]->atttypid, - RelationGetDescr(rel)->attrs[0]->atttypmod); + RelationGetDescr(rel)->attrs[0]->atttypmod); item_width = MAXALIGN(sizeof(HashItemData)) + MAXALIGN(data_width) + sizeof(ItemIdData); /* include the line pointer */ ffactor = (BLCKSZ * 3 / 4) / item_width; @@ -289,9 +289,8 @@ _hash_metapinit(Relation rel) metap->hashm_procid = index_getprocid(rel, 1, HASHPROC); /* - * We initialize the index with two buckets, 0 and 1, occupying - * physical blocks 1 and 2. The first freespace bitmap page is in - * block 3. + * We initialize the index with two buckets, 0 and 1, occupying physical + * blocks 1 and 2. The first freespace bitmap page is in block 3. */ metap->hashm_maxbucket = metap->hashm_lowmask = 1; /* nbuckets - 1 */ metap->hashm_highmask = 3; /* (nbuckets << 1) - 1 */ @@ -321,8 +320,8 @@ _hash_metapinit(Relation rel) } /* - * Initialize first bitmap page. Can't do this until we create the - * first two buckets, else smgr will complain. + * Initialize first bitmap page. Can't do this until we create the first + * two buckets, else smgr will complain. */ _hash_initbitmap(rel, metap, 3); @@ -367,15 +366,14 @@ _hash_expandtable(Relation rel, Buffer metabuf) * Obtain the page-zero lock to assert the right to begin a split (see * README). * - * Note: deadlock should be impossible here. Our own backend could only - * be holding bucket sharelocks due to stopped indexscans; those will - * not block other holders of the page-zero lock, who are only - * interested in acquiring bucket sharelocks themselves. Exclusive - * bucket locks are only taken here and in hashbulkdelete, and neither - * of these operations needs any additional locks to complete. (If, - * due to some flaw in this reasoning, we manage to deadlock anyway, - * it's okay to error out; the index will be left in a consistent - * state.) + * Note: deadlock should be impossible here. Our own backend could only be + * holding bucket sharelocks due to stopped indexscans; those will not + * block other holders of the page-zero lock, who are only interested in + * acquiring bucket sharelocks themselves. Exclusive bucket locks are + * only taken here and in hashbulkdelete, and neither of these operations + * needs any additional locks to complete. (If, due to some flaw in this + * reasoning, we manage to deadlock anyway, it's okay to error out; the + * index will be left in a consistent state.) */ _hash_getlock(rel, 0, HASH_EXCLUSIVE); @@ -386,8 +384,8 @@ _hash_expandtable(Relation rel, Buffer metabuf) _hash_checkpage(rel, (Page) metap, LH_META_PAGE); /* - * Check to see if split is still needed; someone else might have - * already done one while we waited for the lock. + * Check to see if split is still needed; someone else might have already + * done one while we waited for the lock. * * Make sure this stays in sync with _hash_doinsert() */ @@ -402,11 +400,11 @@ _hash_expandtable(Relation rel, Buffer metabuf) * The lock protects us against other backends, but not against our own * backend. Must check for active scans separately. * - * Ideally we would lock the new bucket too before proceeding, but if we - * are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping - * isn't correct yet. For simplicity we update the metapage first and - * then lock. This should be okay because no one else should be - * trying to lock the new bucket yet... + * Ideally we would lock the new bucket too before proceeding, but if we are + * about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't + * correct yet. For simplicity we update the metapage first and then + * lock. This should be okay because no one else should be trying to lock + * the new bucket yet... */ new_bucket = metap->hashm_maxbucket + 1; old_bucket = (new_bucket & metap->hashm_lowmask); @@ -420,14 +418,13 @@ _hash_expandtable(Relation rel, Buffer metabuf) goto fail; /* - * Okay to proceed with split. Update the metapage bucket mapping - * info. + * Okay to proceed with split. Update the metapage bucket mapping info. * - * Since we are scribbling on the metapage data right in the shared - * buffer, any failure in this next little bit leaves us with a big - * problem: the metapage is effectively corrupt but could get written - * back to disk. We don't really expect any failure, but just to be - * sure, establish a critical section. + * Since we are scribbling on the metapage data right in the shared buffer, + * any failure in this next little bit leaves us with a big problem: the + * metapage is effectively corrupt but could get written back to disk. We + * don't really expect any failure, but just to be sure, establish a + * critical section. */ START_CRIT_SECTION(); @@ -443,8 +440,8 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * If the split point is increasing (hashm_maxbucket's log base 2 * increases), we need to adjust the hashm_spares[] array and - * hashm_ovflpoint so that future overflow pages will be created - * beyond this new batch of bucket pages. + * hashm_ovflpoint so that future overflow pages will be created beyond + * this new batch of bucket pages. * * XXX should initialize new bucket pages to prevent out-of-order page * creation? Don't wanna do it right here though. @@ -471,10 +468,9 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * Copy bucket mapping info now; this saves re-accessing the meta page * inside _hash_splitbucket's inner loop. Note that once we drop the - * split lock, other splits could begin, so these values might be out - * of date before _hash_splitbucket finishes. That's okay, since all - * it needs is to tell which of these two buckets to map hashkeys - * into. + * split lock, other splits could begin, so these values might be out of + * date before _hash_splitbucket finishes. That's okay, since all it + * needs is to tell which of these two buckets to map hashkeys into. */ maxbucket = metap->hashm_maxbucket; highmask = metap->hashm_highmask; @@ -554,9 +550,9 @@ _hash_splitbucket(Relation rel, TupleDesc itupdesc = RelationGetDescr(rel); /* - * It should be okay to simultaneously write-lock pages from each - * bucket, since no one else can be trying to acquire buffer lock on - * pages of either bucket. + * It should be okay to simultaneously write-lock pages from each bucket, + * since no one else can be trying to acquire buffer lock on pages of + * either bucket. */ oblkno = start_oblkno; nblkno = start_nblkno; @@ -578,17 +574,17 @@ _hash_splitbucket(Relation rel, nopaque->hasho_filler = HASHO_FILL; /* - * Partition the tuples in the old bucket between the old bucket and - * the new bucket, advancing along the old bucket's overflow bucket - * chain and adding overflow pages to the new bucket as needed. + * Partition the tuples in the old bucket between the old bucket and the + * new bucket, advancing along the old bucket's overflow bucket chain and + * adding overflow pages to the new bucket as needed. */ ooffnum = FirstOffsetNumber; omaxoffnum = PageGetMaxOffsetNumber(opage); for (;;) { /* - * at each iteration through this loop, each of these variables - * should be up-to-date: obuf opage oopaque ooffnum omaxoffnum + * at each iteration through this loop, each of these variables should + * be up-to-date: obuf opage oopaque ooffnum omaxoffnum */ /* check if we're at the end of the page */ @@ -600,8 +596,8 @@ _hash_splitbucket(Relation rel, break; /* - * we ran out of tuples on this particular page, but we have - * more overflow pages; advance to next page. + * we ran out of tuples on this particular page, but we have more + * overflow pages; advance to next page. */ _hash_wrtbuf(rel, obuf); @@ -618,8 +614,7 @@ _hash_splitbucket(Relation rel, * Re-hash the tuple to determine which bucket it now belongs in. * * It is annoying to call the hash function while holding locks, but - * releasing and relocking the page for each tuple is unappealing - * too. + * releasing and relocking the page for each tuple is unappealing too. */ hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum)); itup = &(hitem->hash_itup); @@ -632,9 +627,9 @@ _hash_splitbucket(Relation rel, if (bucket == nbucket) { /* - * insert the tuple into the new bucket. if it doesn't fit on - * the current page in the new bucket, we must allocate a new - * overflow page and place the tuple on that page instead. + * insert the tuple into the new bucket. if it doesn't fit on the + * current page in the new bucket, we must allocate a new overflow + * page and place the tuple on that page instead. */ itemsz = IndexTupleDSize(hitem->hash_itup) + (sizeof(HashItemData) - sizeof(IndexTupleData)); @@ -659,13 +654,13 @@ _hash_splitbucket(Relation rel, RelationGetRelationName(rel)); /* - * now delete the tuple from the old bucket. after this - * section of code, 'ooffnum' will actually point to the - * ItemId to which we would point if we had advanced it before - * the deletion (PageIndexTupleDelete repacks the ItemId - * array). this also means that 'omaxoffnum' is exactly one - * less than it used to be, so we really can just decrement it - * instead of calling PageGetMaxOffsetNumber. + * now delete the tuple from the old bucket. after this section + * of code, 'ooffnum' will actually point to the ItemId to which + * we would point if we had advanced it before the deletion + * (PageIndexTupleDelete repacks the ItemId array). this also + * means that 'omaxoffnum' is exactly one less than it used to be, + * so we really can just decrement it instead of calling + * PageGetMaxOffsetNumber. */ PageIndexTupleDelete(opage, ooffnum); omaxoffnum = OffsetNumberPrev(omaxoffnum); @@ -673,9 +668,9 @@ _hash_splitbucket(Relation rel, else { /* - * the tuple stays on this page. we didn't move anything, so - * we didn't delete anything and therefore we don't have to - * change 'omaxoffnum'. + * the tuple stays on this page. we didn't move anything, so we + * didn't delete anything and therefore we don't have to change + * 'omaxoffnum'. */ Assert(bucket == obucket); ooffnum = OffsetNumberNext(ooffnum); @@ -683,11 +678,10 @@ _hash_splitbucket(Relation rel, } /* - * We're at the end of the old bucket chain, so we're done - * partitioning the tuples. Before quitting, call _hash_squeezebucket - * to ensure the tuples remaining in the old bucket (including the - * overflow pages) are packed as tightly as possible. The new bucket - * is already tight. + * We're at the end of the old bucket chain, so we're done partitioning + * the tuples. Before quitting, call _hash_squeezebucket to ensure the + * tuples remaining in the old bucket (including the overflow pages) are + * packed as tightly as possible. The new bucket is already tight. */ _hash_wrtbuf(rel, obuf); _hash_wrtbuf(rel, nbuf); diff --git a/src/backend/access/hash/hashscan.c b/src/backend/access/hash/hashscan.c index 782c087e3bc..213eaf89fcd 100644 --- a/src/backend/access/hash/hashscan.c +++ b/src/backend/access/hash/hashscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.38 2004/12/31 21:59:13 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.39 2005/10/15 02:49:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -44,9 +44,9 @@ ReleaseResources_hash(void) HashScanList next; /* - * Note: this should be a no-op during normal query shutdown. However, - * in an abort situation ExecutorEnd is not called and so there may be - * open index scans to clean up. + * Note: this should be a no-op during normal query shutdown. However, in + * an abort situation ExecutorEnd is not called and so there may be open + * index scans to clean up. */ prev = NULL; diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c index 9aaf70b0a9e..fac46d79022 100644 --- a/src/backend/access/hash/hashsearch.c +++ b/src/backend/access/hash/hashsearch.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.39 2005/10/06 02:29:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.40 2005/10/15 02:49:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -137,33 +137,32 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) ItemPointerSetInvalid(current); /* - * We do not support hash scans with no index qualification, because - * we would have to read the whole index rather than just one bucket. - * That creates a whole raft of problems, since we haven't got a - * practical way to lock all the buckets against splits or - * compactions. + * We do not support hash scans with no index qualification, because we + * would have to read the whole index rather than just one bucket. That + * creates a whole raft of problems, since we haven't got a practical way + * to lock all the buckets against splits or compactions. */ if (scan->numberOfKeys < 1) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("hash indexes do not support whole-index scans"))); + errmsg("hash indexes do not support whole-index scans"))); /* - * If the constant in the index qual is NULL, assume it cannot match - * any items in the index. + * If the constant in the index qual is NULL, assume it cannot match any + * items in the index. */ if (scan->keyData[0].sk_flags & SK_ISNULL) return false; /* - * Okay to compute the hash key. We want to do this before acquiring - * any locks, in case a user-defined hash function happens to be slow. + * Okay to compute the hash key. We want to do this before acquiring any + * locks, in case a user-defined hash function happens to be slow. */ hashkey = _hash_datum2hashkey(rel, scan->keyData[0].sk_argument); /* - * Acquire shared split lock so we can compute the target bucket - * safely (see README). + * Acquire shared split lock so we can compute the target bucket safely + * (see README). */ _hash_getlock(rel, 0, HASH_SHARE); @@ -186,8 +185,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) _hash_relbuf(rel, metabuf); /* - * Acquire share lock on target bucket; then we can release split - * lock. + * Acquire share lock on target bucket; then we can release split lock. */ _hash_getlock(rel, blkno, HASH_SHARE); @@ -263,9 +261,9 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) bucket = opaque->hasho_bucket; /* - * If _hash_step is called from _hash_first, current will not be - * valid, so we can't dereference it. However, in that case, we - * presumably want to start at the beginning/end of the page... + * If _hash_step is called from _hash_first, current will not be valid, so + * we can't dereference it. However, in that case, we presumably want to + * start at the beginning/end of the page... */ maxoff = PageGetMaxOffsetNumber(page); if (ItemPointerIsValid(current)) @@ -276,8 +274,8 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) /* * 'offnum' now points to the last tuple we have seen (if any). * - * continue to step through tuples until: 1) we get to the end of the - * bucket chain or 2) we find a valid tuple. + * continue to step through tuples until: 1) we get to the end of the bucket + * chain or 2) we find a valid tuple. */ do { diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 185918d03aa..6c669ed62b4 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.199 2005/10/06 02:29:10 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.200 2005/10/15 02:49:08 momjian Exp $ * * * INTERFACE ROUTINES @@ -54,7 +54,7 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, - ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move); + ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move); /* ---------------------------------------------------------------- @@ -272,8 +272,8 @@ heapgettup(Relation relation, /* 'dir' is now non-zero */ /* - * calculate line pointer and number of remaining items to check on - * this page. + * calculate line pointer and number of remaining items to check on this + * page. */ lpp = PageGetItemId(dp, lineoff); if (dir < 0) @@ -282,8 +282,8 @@ heapgettup(Relation relation, linesleft = lines - lineoff; /* - * advance the scan until we find a qualifying tuple or run out of - * stuff to scan + * advance the scan until we find a qualifying tuple or run out of stuff + * to scan */ for (;;) { @@ -321,15 +321,14 @@ heapgettup(Relation relation, } else { - ++lpp; /* move forward in this page's ItemId - * array */ + ++lpp; /* move forward in this page's ItemId array */ ++lineoff; } } /* - * if we get here, it means we've exhausted the items on this page - * and it's time to move to the next. + * if we get here, it means we've exhausted the items on this page and + * it's time to move to the next. */ LockBuffer(*buffer, BUFFER_LOCK_UNLOCK); @@ -506,15 +505,15 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode) /* * Check for shared-cache-inval messages before trying to open the - * relation. This is needed to cover the case where the name - * identifies a rel that has been dropped and recreated since the - * start of our transaction: if we don't flush the old syscache entry - * then we'll latch onto that entry and suffer an error when we do - * LockRelation. Note that relation_open does not need to do this, - * since a relation's OID never changes. + * relation. This is needed to cover the case where the name identifies a + * rel that has been dropped and recreated since the start of our + * transaction: if we don't flush the old syscache entry then we'll latch + * onto that entry and suffer an error when we do LockRelation. Note that + * relation_open does not need to do this, since a relation's OID never + * changes. * - * We skip this if asked for NoLock, on the assumption that the caller - * has already ensured some appropriate lock is held. + * We skip this if asked for NoLock, on the assumption that the caller has + * already ensured some appropriate lock is held. */ if (lockmode != NoLock) AcceptInvalidationMessages(); @@ -633,9 +632,9 @@ heap_beginscan(Relation relation, Snapshot snapshot, /* * increment relation ref count while scanning relation * - * This is just to make really sure the relcache entry won't go away - * while the scan has a pointer to it. Caller should be holding the - * rel open anyway, so this is redundant in all normal scenarios... + * This is just to make really sure the relcache entry won't go away while + * the scan has a pointer to it. Caller should be holding the rel open + * anyway, so this is redundant in all normal scenarios... */ RelationIncrementReferenceCount(relation); @@ -649,8 +648,8 @@ heap_beginscan(Relation relation, Snapshot snapshot, scan->rs_nkeys = nkeys; /* - * we do this here instead of in initscan() because heap_rescan also - * calls initscan() and we don't want to allocate memory again + * we do this here instead of in initscan() because heap_rescan also calls + * initscan() and we don't want to allocate memory again */ if (nkeys > 0) scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys); @@ -763,8 +762,8 @@ heap_getnext(HeapScanDesc scan, ScanDirection direction) } /* - * if we get here it means we have a new current scan tuple, so point - * to the proper return buffer and return the tuple. + * if we get here it means we have a new current scan tuple, so point to + * the proper return buffer and return the tuple. */ HEAPDEBUG_3; /* heap_getnext returning tuple */ @@ -859,8 +858,8 @@ heap_release_fetch(Relation relation, dp = (PageHeader) BufferGetPage(buffer); /* - * We'd better check for out-of-range offnum in case of VACUUM since - * the TID was obtained. + * We'd better check for out-of-range offnum in case of VACUUM since the + * TID was obtained. */ offnum = ItemPointerGetOffsetNumber(tid); if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp)) @@ -952,7 +951,7 @@ heap_release_fetch(Relation relation, * possibly uncommitted version. * * *tid is both an input and an output parameter: it is updated to - * show the latest version of the row. Note that it will not be changed + * show the latest version of the row. Note that it will not be changed * if no version of the row passes the snapshot test. */ void @@ -960,7 +959,7 @@ heap_get_latest_tid(Relation relation, Snapshot snapshot, ItemPointer tid) { - BlockNumber blk; + BlockNumber blk; ItemPointerData ctid; TransactionId priorXmax; @@ -969,10 +968,10 @@ heap_get_latest_tid(Relation relation, return; /* - * Since this can be called with user-supplied TID, don't trust the - * input too much. (RelationGetNumberOfBlocks is an expensive check, - * so we don't check t_ctid links again this way. Note that it would - * not do to call it just once and save the result, either.) + * Since this can be called with user-supplied TID, don't trust the input + * too much. (RelationGetNumberOfBlocks is an expensive check, so we + * don't check t_ctid links again this way. Note that it would not do to + * call it just once and save the result, either.) */ blk = ItemPointerGetBlockNumber(tid); if (blk >= RelationGetNumberOfBlocks(relation)) @@ -980,9 +979,9 @@ heap_get_latest_tid(Relation relation, blk, RelationGetRelationName(relation)); /* - * Loop to chase down t_ctid links. At top of loop, ctid is the - * tuple we need to examine, and *tid is the TID we will return if - * ctid turns out to be bogus. + * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we + * need to examine, and *tid is the TID we will return if ctid turns out + * to be bogus. * * Note that we will loop until we reach the end of the t_ctid chain. * Depending on the snapshot passed, there might be at most one visible @@ -1008,8 +1007,8 @@ heap_get_latest_tid(Relation relation, /* * Check for bogus item number. This is not treated as an error - * condition because it can happen while following a t_ctid link. - * We just assume that the prior tid is OK and return it unchanged. + * condition because it can happen while following a t_ctid link. We + * just assume that the prior tid is OK and return it unchanged. */ offnum = ItemPointerGetOffsetNumber(&ctid); if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp)) @@ -1037,7 +1036,7 @@ heap_get_latest_tid(Relation relation, * tuple. Check for XMIN match. */ if (TransactionIdIsValid(priorXmax) && - !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data))) + !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data))) { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buffer); @@ -1068,7 +1067,7 @@ heap_get_latest_tid(Relation relation, priorXmax = HeapTupleHeaderGetXmax(tp.t_data); LockBuffer(buffer, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buffer); - } /* end of loop */ + } /* end of loop */ } /* @@ -1102,13 +1101,12 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, #endif /* - * If the object id of this tuple has already been assigned, trust - * the caller. There are a couple of ways this can happen. At - * initial db creation, the backend program sets oids for tuples. - * When we define an index, we set the oid. Finally, in the - * future, we may allow users to set their own object ids in order - * to support a persistent object store (objects need to contain - * pointers to one another). + * If the object id of this tuple has already been assigned, trust the + * caller. There are a couple of ways this can happen. At initial db + * creation, the backend program sets oids for tuples. When we define + * an index, we set the oid. Finally, in the future, we may allow + * users to set their own object ids in order to support a persistent + * object store (objects need to contain pointers to one another). */ if (!OidIsValid(HeapTupleGetOid(tup))) HeapTupleSetOid(tup, GetNewOid(relation)); @@ -1129,8 +1127,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, /* * If the new tuple is too big for storage or contains already toasted - * out-of-line attributes from some other relation, invoke the - * toaster. + * out-of-line attributes from some other relation, invoke the toaster. */ if (HeapTupleHasExternal(tup) || (MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD)) @@ -1172,9 +1169,9 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, xlhdr.t_hoff = tup->t_data->t_hoff; /* - * note we mark rdata[1] as belonging to buffer; if XLogInsert - * decides to write the whole page to the xlog, we don't need to - * store xl_heap_header in the xlog. + * note we mark rdata[1] as belonging to buffer; if XLogInsert decides + * to write the whole page to the xlog, we don't need to store + * xl_heap_header in the xlog. */ rdata[1].data = (char *) &xlhdr; rdata[1].len = SizeOfHeapHeader; @@ -1190,9 +1187,9 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, rdata[2].next = NULL; /* - * If this is the single and first tuple on page, we can reinit - * the page instead of restoring the whole thing. Set flag, and - * hide buffer references from XLogInsert. + * If this is the single and first tuple on page, we can reinit the + * page instead of restoring the whole thing. Set flag, and hide + * buffer references from XLogInsert. */ if (ItemPointerGetOffsetNumber(&(tup->t_self)) == FirstOffsetNumber && PageGetMaxOffsetNumber(page) == FirstOffsetNumber) @@ -1213,10 +1210,10 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, WriteBuffer(buffer); /* - * If tuple is cachable, mark it for invalidation from the caches in - * case we abort. Note it is OK to do this after WriteBuffer releases - * the buffer, because the "tup" data structure is all in local - * memory, not in the shared buffer. + * If tuple is cachable, mark it for invalidation from the caches in case + * we abort. Note it is OK to do this after WriteBuffer releases the + * buffer, because the "tup" data structure is all in local memory, not in + * the shared buffer. */ CacheInvalidateHeapTuple(relation, tup); @@ -1268,7 +1265,7 @@ heap_delete(Relation relation, ItemPointer tid, ItemPointer ctid, TransactionId *update_xmax, CommandId cid, Snapshot crosscheck, bool wait) { - HTSU_Result result; + HTSU_Result result; TransactionId xid = GetCurrentTransactionId(); ItemId lp; HeapTupleData tp; @@ -1301,7 +1298,7 @@ l1: else if (result == HeapTupleBeingUpdated && wait) { TransactionId xwait; - uint16 infomask; + uint16 infomask; /* must copy state data before unlocking buffer */ xwait = HeapTupleHeaderGetXmax(tp.t_data); @@ -1310,13 +1307,13 @@ l1: LockBuffer(buffer, BUFFER_LOCK_UNLOCK); /* - * Acquire tuple lock to establish our priority for the tuple - * (see heap_lock_tuple). LockTuple will release us when we are + * Acquire tuple lock to establish our priority for the tuple (see + * heap_lock_tuple). LockTuple will release us when we are * next-in-line for the tuple. * - * If we are forced to "start over" below, we keep the tuple lock; - * this arranges that we stay at the head of the line while - * rechecking tuple state. + * If we are forced to "start over" below, we keep the tuple lock; this + * arranges that we stay at the head of the line while rechecking + * tuple state. */ if (!have_tuple_lock) { @@ -1347,12 +1344,12 @@ l1: goto l1; /* - * You might think the multixact is necessarily done here, but - * not so: it could have surviving members, namely our own xact - * or other subxacts of this backend. It is legal for us to - * delete the tuple in either case, however (the latter case is - * essentially a situation of upgrading our former shared lock - * to exclusive). We don't bother changing the on-disk hint bits + * You might think the multixact is necessarily done here, but not + * so: it could have surviving members, namely our own xact or + * other subxacts of this backend. It is legal for us to delete + * the tuple in either case, however (the latter case is + * essentially a situation of upgrading our former shared lock to + * exclusive). We don't bother changing the on-disk hint bits * since we are about to overwrite the xmax altogether. */ } @@ -1385,8 +1382,8 @@ l1: } /* - * We may overwrite if previous xmax aborted, or if it committed - * but only locked the tuple without updating it. + * We may overwrite if previous xmax aborted, or if it committed but + * only locked the tuple without updating it. */ if (tp.t_data->t_infomask & (HEAP_XMAX_INVALID | HEAP_IS_LOCKED)) @@ -1467,18 +1464,18 @@ l1: /* * If the tuple has toasted out-of-line attributes, we need to delete - * those items too. We have to do this before WriteBuffer because we - * need to look at the contents of the tuple, but it's OK to release - * the context lock on the buffer first. + * those items too. We have to do this before WriteBuffer because we need + * to look at the contents of the tuple, but it's OK to release the + * context lock on the buffer first. */ if (HeapTupleHasExternal(&tp)) heap_tuple_toast_attrs(relation, NULL, &tp); /* * Mark tuple for invalidation from system caches at next command - * boundary. We have to do this before WriteBuffer because we need to - * look at the contents of the tuple, so we need to hold our refcount - * on the buffer. + * boundary. We have to do this before WriteBuffer because we need to look + * at the contents of the tuple, so we need to hold our refcount on the + * buffer. */ CacheInvalidateHeapTuple(relation, &tp); @@ -1506,7 +1503,7 @@ l1: void simple_heap_delete(Relation relation, ItemPointer tid) { - HTSU_Result result; + HTSU_Result result; ItemPointerData update_ctid; TransactionId update_xmax; @@ -1569,7 +1566,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, ItemPointer ctid, TransactionId *update_xmax, CommandId cid, Snapshot crosscheck, bool wait) { - HTSU_Result result; + HTSU_Result result; TransactionId xid = GetCurrentTransactionId(); ItemId lp; HeapTupleData oldtup; @@ -1598,8 +1595,8 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, /* * Note: beyond this point, use oldtup not otid to refer to old tuple. * otid may very well point at newtup->t_self, which we will overwrite - * with the new tuple's location, so there's great risk of confusion - * if we use otid anymore. + * with the new tuple's location, so there's great risk of confusion if we + * use otid anymore. */ l2: @@ -1614,7 +1611,7 @@ l2: else if (result == HeapTupleBeingUpdated && wait) { TransactionId xwait; - uint16 infomask; + uint16 infomask; /* must copy state data before unlocking buffer */ xwait = HeapTupleHeaderGetXmax(oldtup.t_data); @@ -1623,13 +1620,13 @@ l2: LockBuffer(buffer, BUFFER_LOCK_UNLOCK); /* - * Acquire tuple lock to establish our priority for the tuple - * (see heap_lock_tuple). LockTuple will release us when we are + * Acquire tuple lock to establish our priority for the tuple (see + * heap_lock_tuple). LockTuple will release us when we are * next-in-line for the tuple. * - * If we are forced to "start over" below, we keep the tuple lock; - * this arranges that we stay at the head of the line while - * rechecking tuple state. + * If we are forced to "start over" below, we keep the tuple lock; this + * arranges that we stay at the head of the line while rechecking + * tuple state. */ if (!have_tuple_lock) { @@ -1660,12 +1657,12 @@ l2: goto l2; /* - * You might think the multixact is necessarily done here, but - * not so: it could have surviving members, namely our own xact - * or other subxacts of this backend. It is legal for us to - * update the tuple in either case, however (the latter case is - * essentially a situation of upgrading our former shared lock - * to exclusive). We don't bother changing the on-disk hint bits + * You might think the multixact is necessarily done here, but not + * so: it could have surviving members, namely our own xact or + * other subxacts of this backend. It is legal for us to update + * the tuple in either case, however (the latter case is + * essentially a situation of upgrading our former shared lock to + * exclusive). We don't bother changing the on-disk hint bits * since we are about to overwrite the xmax altogether. */ } @@ -1698,8 +1695,8 @@ l2: } /* - * We may overwrite if previous xmax aborted, or if it committed - * but only locked the tuple without updating it. + * We may overwrite if previous xmax aborted, or if it committed but + * only locked the tuple without updating it. */ if (oldtup.t_data->t_infomask & (HEAP_XMAX_INVALID | HEAP_IS_LOCKED)) @@ -1753,15 +1750,15 @@ l2: HeapTupleHeaderSetCmax(newtup->t_data, 0); /* for cleanliness */ /* - * If the toaster needs to be activated, OR if the new tuple will not - * fit on the same page as the old, then we need to release the - * context lock (but not the pin!) on the old tuple's buffer while we - * are off doing TOAST and/or table-file-extension work. We must mark - * the old tuple to show that it's already being updated, else other - * processes may try to update it themselves. + * If the toaster needs to be activated, OR if the new tuple will not fit + * on the same page as the old, then we need to release the context lock + * (but not the pin!) on the old tuple's buffer while we are off doing + * TOAST and/or table-file-extension work. We must mark the old tuple to + * show that it's already being updated, else other processes may try to + * update it themselves. * - * We need to invoke the toaster if there are already any out-of-line - * toasted values present, or if the new tuple is over-threshold. + * We need to invoke the toaster if there are already any out-of-line toasted + * values present, or if the new tuple is over-threshold. */ need_toast = (HeapTupleHasExternal(&oldtup) || HeapTupleHasExternal(newtup) || @@ -1790,22 +1787,21 @@ l2: } /* - * Now, do we need a new page for the tuple, or not? This is a - * bit tricky since someone else could have added tuples to the - * page while we weren't looking. We have to recheck the - * available space after reacquiring the buffer lock. But don't - * bother to do that if the former amount of free space is still - * not enough; it's unlikely there's more free now than before. + * Now, do we need a new page for the tuple, or not? This is a bit + * tricky since someone else could have added tuples to the page while + * we weren't looking. We have to recheck the available space after + * reacquiring the buffer lock. But don't bother to do that if the + * former amount of free space is still not enough; it's unlikely + * there's more free now than before. * * What's more, if we need to get a new page, we will need to acquire - * buffer locks on both old and new pages. To avoid deadlock - * against some other backend trying to get the same two locks in - * the other order, we must be consistent about the order we get - * the locks in. We use the rule "lock the lower-numbered page of - * the relation first". To implement this, we must do - * RelationGetBufferForTuple while not holding the lock on the old - * page, and we must rely on it to get the locks on both pages in - * the correct order. + * buffer locks on both old and new pages. To avoid deadlock against + * some other backend trying to get the same two locks in the other + * order, we must be consistent about the order we get the locks in. + * We use the rule "lock the lower-numbered page of the relation + * first". To implement this, we must do RelationGetBufferForTuple + * while not holding the lock on the old page, and we must rely on it + * to get the locks on both pages in the correct order. */ if (newtupsize > pagefree) { @@ -1823,8 +1819,8 @@ l2: { /* * Rats, it doesn't fit anymore. We must now unlock and - * relock to avoid deadlock. Fortunately, this path - * should seldom be taken. + * relock to avoid deadlock. Fortunately, this path should + * seldom be taken. */ LockBuffer(buffer, BUFFER_LOCK_UNLOCK); newbuf = RelationGetBufferForTuple(relation, newtup->t_len, @@ -1845,9 +1841,9 @@ l2: } /* - * At this point newbuf and buffer are both pinned and locked, and - * newbuf has enough space for the new tuple. If they are the same - * buffer, only one pin is held. + * At this point newbuf and buffer are both pinned and locked, and newbuf + * has enough space for the new tuple. If they are the same buffer, only + * one pin is held. */ /* NO EREPORT(ERROR) from here till changes are logged */ @@ -1897,8 +1893,8 @@ l2: /* * Mark old tuple for invalidation from system caches at next command - * boundary. We have to do this before WriteBuffer because we need to - * look at the contents of the tuple, so we need to hold our refcount. + * boundary. We have to do this before WriteBuffer because we need to look + * at the contents of the tuple, so we need to hold our refcount. */ CacheInvalidateHeapTuple(relation, &oldtup); @@ -1907,10 +1903,10 @@ l2: WriteBuffer(buffer); /* - * If new tuple is cachable, mark it for invalidation from the caches - * in case we abort. Note it is OK to do this after WriteBuffer - * releases the buffer, because the "newtup" data structure is all in - * local memory, not in the shared buffer. + * If new tuple is cachable, mark it for invalidation from the caches in + * case we abort. Note it is OK to do this after WriteBuffer releases the + * buffer, because the "newtup" data structure is all in local memory, not + * in the shared buffer. */ CacheInvalidateHeapTuple(relation, newtup); @@ -1936,7 +1932,7 @@ l2: void simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup) { - HTSU_Result result; + HTSU_Result result; ItemPointerData update_ctid; TransactionId update_xmax; @@ -2012,7 +2008,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup) * waiter gets the tuple, potentially leading to indefinite starvation of * some waiters. The possibility of share-locking makes the problem much * worse --- a steady stream of share-lockers can easily block an exclusive - * locker forever. To provide more reliable semantics about who gets a + * locker forever. To provide more reliable semantics about who gets a * tuple-level lock first, we use the standard lock manager. The protocol * for waiting for a tuple-level lock is really * LockTuple() @@ -2020,7 +2016,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup) * mark tuple as locked by me * UnlockTuple() * When there are multiple waiters, arbitration of who is to get the lock next - * is provided by LockTuple(). However, at most one tuple-level lock will + * is provided by LockTuple(). However, at most one tuple-level lock will * be held or awaited per backend at any time, so we don't risk overflow * of the lock table. Note that incoming share-lockers are required to * do LockTuple as well, if there is any conflict, to ensure that they don't @@ -2032,11 +2028,11 @@ heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer *buffer, ItemPointer ctid, TransactionId *update_xmax, CommandId cid, LockTupleMode mode, bool nowait) { - HTSU_Result result; + HTSU_Result result; ItemPointer tid = &(tuple->t_self); ItemId lp; PageHeader dp; - TransactionId xid; + TransactionId xid; uint16 new_infomask; LOCKMODE tuple_lock_type; bool have_tuple_lock = false; @@ -2067,7 +2063,7 @@ l3: else if (result == HeapTupleBeingUpdated) { TransactionId xwait; - uint16 infomask; + uint16 infomask; /* must copy state data before unlocking buffer */ xwait = HeapTupleHeaderGetXmax(tuple->t_data); @@ -2077,12 +2073,12 @@ l3: /* * Acquire tuple lock to establish our priority for the tuple. - * LockTuple will release us when we are next-in-line for the - * tuple. We must do this even if we are share-locking. + * LockTuple will release us when we are next-in-line for the tuple. + * We must do this even if we are share-locking. * - * If we are forced to "start over" below, we keep the tuple lock; - * this arranges that we stay at the head of the line while - * rechecking tuple state. + * If we are forced to "start over" below, we keep the tuple lock; this + * arranges that we stay at the head of the line while rechecking + * tuple state. */ if (!have_tuple_lock) { @@ -2091,8 +2087,8 @@ l3: if (!ConditionalLockTuple(relation, tid, tuple_lock_type)) ereport(ERROR, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), - errmsg("could not obtain lock on row in relation \"%s\"", - RelationGetRelationName(relation)))); + errmsg("could not obtain lock on row in relation \"%s\"", + RelationGetRelationName(relation)))); } else LockTuple(relation, tid, tuple_lock_type); @@ -2108,8 +2104,8 @@ l3: LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE); /* - * Make sure it's still a shared lock, else start over. (It's - * OK if the ownership of the shared lock has changed, though.) + * Make sure it's still a shared lock, else start over. (It's OK + * if the ownership of the shared lock has changed, though.) */ if (!(tuple->t_data->t_infomask & HEAP_XMAX_SHARED_LOCK)) goto l3; @@ -2122,8 +2118,8 @@ l3: if (!ConditionalMultiXactIdWait((MultiXactId) xwait)) ereport(ERROR, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), - errmsg("could not obtain lock on row in relation \"%s\"", - RelationGetRelationName(relation)))); + errmsg("could not obtain lock on row in relation \"%s\"", + RelationGetRelationName(relation)))); } else MultiXactIdWait((MultiXactId) xwait); @@ -2131,9 +2127,9 @@ l3: LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE); /* - * If xwait had just locked the tuple then some other xact - * could update this tuple before we get to this point. - * Check for xmax change, and start over if so. + * If xwait had just locked the tuple then some other xact could + * update this tuple before we get to this point. Check for xmax + * change, and start over if so. */ if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) || !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple->t_data), @@ -2141,12 +2137,12 @@ l3: goto l3; /* - * You might think the multixact is necessarily done here, but - * not so: it could have surviving members, namely our own xact - * or other subxacts of this backend. It is legal for us to - * lock the tuple in either case, however. We don't bother - * changing the on-disk hint bits since we are about to - * overwrite the xmax altogether. + * You might think the multixact is necessarily done here, but not + * so: it could have surviving members, namely our own xact or + * other subxacts of this backend. It is legal for us to lock the + * tuple in either case, however. We don't bother changing the + * on-disk hint bits since we are about to overwrite the xmax + * altogether. */ } else @@ -2157,8 +2153,8 @@ l3: if (!ConditionalXactLockTableWait(xwait)) ereport(ERROR, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), - errmsg("could not obtain lock on row in relation \"%s\"", - RelationGetRelationName(relation)))); + errmsg("could not obtain lock on row in relation \"%s\"", + RelationGetRelationName(relation)))); } else XactLockTableWait(xwait); @@ -2166,9 +2162,9 @@ l3: LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE); /* - * xwait is done, but if xwait had just locked the tuple then - * some other xact could update this tuple before we get to - * this point. Check for xmax change, and start over if so. + * xwait is done, but if xwait had just locked the tuple then some + * other xact could update this tuple before we get to this point. + * Check for xmax change, and start over if so. */ if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) || !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple->t_data), @@ -2188,10 +2184,10 @@ l3: } /* - * We may lock if previous xmax aborted, or if it committed - * but only locked the tuple without updating it. The case where - * we didn't wait because we are joining an existing shared lock - * is correctly handled, too. + * We may lock if previous xmax aborted, or if it committed but only + * locked the tuple without updating it. The case where we didn't + * wait because we are joining an existing shared lock is correctly + * handled, too. */ if (tuple->t_data->t_infomask & (HEAP_XMAX_INVALID | HEAP_IS_LOCKED)) @@ -2213,9 +2209,9 @@ l3: } /* - * Compute the new xmax and infomask to store into the tuple. Note we - * do not modify the tuple just yet, because that would leave it in the - * wrong state if multixact.c elogs. + * Compute the new xmax and infomask to store into the tuple. Note we do + * not modify the tuple just yet, because that would leave it in the wrong + * state if multixact.c elogs. */ xid = GetCurrentTransactionId(); @@ -2229,17 +2225,16 @@ l3: if (mode == LockTupleShared) { - TransactionId xmax = HeapTupleHeaderGetXmax(tuple->t_data); + TransactionId xmax = HeapTupleHeaderGetXmax(tuple->t_data); uint16 old_infomask = tuple->t_data->t_infomask; /* * If this is the first acquisition of a shared lock in the current - * transaction, set my per-backend OldestMemberMXactId setting. - * We can be certain that the transaction will never become a - * member of any older MultiXactIds than that. (We have to do this - * even if we end up just using our own TransactionId below, since - * some other backend could incorporate our XID into a MultiXact - * immediately afterwards.) + * transaction, set my per-backend OldestMemberMXactId setting. We can + * be certain that the transaction will never become a member of any + * older MultiXactIds than that. (We have to do this even if we end + * up just using our own TransactionId below, since some other backend + * could incorporate our XID into a MultiXact immediately afterwards.) */ MultiXactIdSetOldestMember(); @@ -2249,14 +2244,14 @@ l3: * Check to see if we need a MultiXactId because there are multiple * lockers. * - * HeapTupleSatisfiesUpdate will have set the HEAP_XMAX_INVALID - * bit if the xmax was a MultiXactId but it was not running anymore. - * There is a race condition, which is that the MultiXactId may have - * finished since then, but that uncommon case is handled within + * HeapTupleSatisfiesUpdate will have set the HEAP_XMAX_INVALID bit if + * the xmax was a MultiXactId but it was not running anymore. There is + * a race condition, which is that the MultiXactId may have finished + * since then, but that uncommon case is handled within * MultiXactIdExpand. * - * There is a similar race condition possible when the old xmax was - * a regular TransactionId. We test TransactionIdIsInProgress again + * There is a similar race condition possible when the old xmax was a + * regular TransactionId. We test TransactionIdIsInProgress again * just to narrow the window, but it's still possible to end up * creating an unnecessary MultiXactId. Fortunately this is harmless. */ @@ -2277,10 +2272,10 @@ l3: { /* * If the old locker is ourselves, we'll just mark the - * tuple again with our own TransactionId. However we - * have to consider the possibility that we had - * exclusive rather than shared lock before --- if so, - * be careful to preserve the exclusivity of the lock. + * tuple again with our own TransactionId. However we + * have to consider the possibility that we had exclusive + * rather than shared lock before --- if so, be careful to + * preserve the exclusivity of the lock. */ if (!(old_infomask & HEAP_XMAX_SHARED_LOCK)) { @@ -2303,9 +2298,9 @@ l3: else { /* - * Can get here iff HeapTupleSatisfiesUpdate saw the old - * xmax as running, but it finished before - * TransactionIdIsInProgress() got to run. Treat it like + * Can get here iff HeapTupleSatisfiesUpdate saw the old xmax + * as running, but it finished before + * TransactionIdIsInProgress() got to run. Treat it like * there's no locker in the tuple. */ } @@ -2329,8 +2324,8 @@ l3: /* * Store transaction information of xact locking the tuple. * - * Note: our CID is meaningless if storing a MultiXactId, but no harm - * in storing it anyway. + * Note: our CID is meaningless if storing a MultiXactId, but no harm in + * storing it anyway. */ tuple->t_data->t_infomask = new_infomask; HeapTupleHeaderSetXmax(tuple->t_data, xid); @@ -2339,8 +2334,8 @@ l3: tuple->t_data->t_ctid = *tid; /* - * XLOG stuff. You might think that we don't need an XLOG record because - * there is no state change worth restoring after a crash. You would be + * XLOG stuff. You might think that we don't need an XLOG record because + * there is no state change worth restoring after a crash. You would be * wrong however: we have just written either a TransactionId or a * MultiXactId that may never have been seen on disk before, and we need * to make sure that there are XLOG entries covering those ID numbers. @@ -2473,8 +2468,8 @@ log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *unused, int uncnt) /* * The unused-offsets array is not actually in the buffer, but pretend - * that it is. When XLogInsert stores the whole buffer, the offsets - * array need not be stored too. + * that it is. When XLogInsert stores the whole buffer, the offsets array + * need not be stored too. */ if (uncnt > 0) { @@ -2500,11 +2495,10 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move) { /* - * Note: xlhdr is declared to have adequate size and correct alignment - * for an xl_heap_header. However the two tids, if present at all, - * will be packed in with no wasted space after the xl_heap_header; - * they aren't necessarily aligned as implied by this struct - * declaration. + * Note: xlhdr is declared to have adequate size and correct alignment for + * an xl_heap_header. However the two tids, if present at all, will be + * packed in with no wasted space after the xl_heap_header; they aren't + * necessarily aligned as implied by this struct declaration. */ struct { @@ -2555,8 +2549,8 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from, } /* - * As with insert records, we need not store the rdata[2] segment if - * we decide to store the whole buffer instead. + * As with insert records, we need not store the rdata[2] segment if we + * decide to store the whole buffer instead. */ rdata[2].data = (char *) &xlhdr; rdata[2].len = hsize; @@ -2655,8 +2649,8 @@ heap_xlog_newpage(XLogRecPtr lsn, XLogRecord *record) Page page; /* - * Note: the NEWPAGE log record is used for both heaps and indexes, so - * do not do anything that assumes we are touching a heap. + * Note: the NEWPAGE log record is used for both heaps and indexes, so do + * not do anything that assumes we are touching a heap. */ if (record->xl_info & XLR_BKP_BLOCK_1) @@ -2699,7 +2693,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record) return; buffer = XLogReadBuffer(false, reln, - ItemPointerGetBlockNumber(&(xlrec->target.tid))); + ItemPointerGetBlockNumber(&(xlrec->target.tid))); if (!BufferIsValid(buffer)) elog(PANIC, "heap_delete_redo: no block"); @@ -2707,7 +2701,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record) if (PageIsNew((PageHeader) page)) elog(PANIC, "heap_delete_redo: uninitialized page"); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buffer); @@ -2749,7 +2743,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record) struct { HeapTupleHeaderData hdr; - char data[MaxTupleSize]; + char data[MaxTupleSize]; } tbuf; HeapTupleHeader htup; xl_heap_header xlhdr; @@ -2764,7 +2758,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record) return; buffer = XLogReadBuffer(true, reln, - ItemPointerGetBlockNumber(&(xlrec->target.tid))); + ItemPointerGetBlockNumber(&(xlrec->target.tid))); if (!BufferIsValid(buffer)) return; @@ -2776,7 +2770,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record) if (record->xl_info & XLOG_HEAP_INIT_PAGE) PageInit(page, BufferGetPageSize(buffer), 0); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buffer); @@ -2835,7 +2829,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move) struct { HeapTupleHeaderData hdr; - char data[MaxTupleSize]; + char data[MaxTupleSize]; } tbuf; xl_heap_header xlhdr; int hsize; @@ -2850,7 +2844,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move) /* Deal with old tuple version */ buffer = XLogReadBuffer(false, reln, - ItemPointerGetBlockNumber(&(xlrec->target.tid))); + ItemPointerGetBlockNumber(&(xlrec->target.tid))); if (!BufferIsValid(buffer)) elog(PANIC, "heap_update_redo: no block"); @@ -2858,7 +2852,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move) if (PageIsNew((PageHeader) page)) elog(PANIC, "heap_update_redo: uninitialized old page"); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buffer); @@ -2928,7 +2922,7 @@ newsame:; if (record->xl_info & XLOG_HEAP_INIT_PAGE) PageInit(page, BufferGetPageSize(buffer), 0); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buffer); @@ -2961,7 +2955,7 @@ newsame:; if (move) { - TransactionId xid[2]; /* xmax, xmin */ + TransactionId xid[2]; /* xmax, xmin */ memcpy((char *) xid, (char *) xlrec + SizeOfHeapUpdate + SizeOfHeapHeader, @@ -3008,7 +3002,7 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record) return; buffer = XLogReadBuffer(false, reln, - ItemPointerGetBlockNumber(&(xlrec->target.tid))); + ItemPointerGetBlockNumber(&(xlrec->target.tid))); if (!BufferIsValid(buffer)) elog(PANIC, "heap_lock_redo: no block"); @@ -3016,7 +3010,7 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record) if (PageIsNew((PageHeader) page)) elog(PANIC, "heap_lock_redo: uninitialized page"); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buffer); @@ -3081,7 +3075,7 @@ static void out_target(char *buf, xl_heaptid *target) { sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u", - target->node.spcNode, target->node.dbNode, target->node.relNode, + target->node.spcNode, target->node.dbNode, target->node.relNode, ItemPointerGetBlockNumber(&(target->tid)), ItemPointerGetOffsetNumber(&(target->tid))); } diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index fc1b0afd21e..800ee4a805b 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.57 2005/06/20 18:37:01 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.58 2005/10/15 02:49:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -80,7 +80,7 @@ RelationPutHeapTuple(Relation relation, * enough there). In that case, the page will be pinned and locked only once. * * If use_fsm is true (the normal case), we use FSM to help us find free - * space. If use_fsm is false, we always append a new empty page to the + * space. If use_fsm is false, we always append a new empty page to the * end of the relation if the tuple won't fit on the current target page. * This can save some cycles when we know the relation is new and doesn't * contain useful amounts of free space. @@ -122,22 +122,20 @@ RelationGetBufferForTuple(Relation relation, Size len, if (otherBuffer != InvalidBuffer) otherBlock = BufferGetBlockNumber(otherBuffer); else - otherBlock = InvalidBlockNumber; /* just to keep compiler - * quiet */ + otherBlock = InvalidBlockNumber; /* just to keep compiler quiet */ /* - * We first try to put the tuple on the same page we last inserted a - * tuple on, as cached in the relcache entry. If that doesn't work, - * we ask the shared Free Space Map to locate a suitable page. Since - * the FSM's info might be out of date, we have to be prepared to loop - * around and retry multiple times. (To insure this isn't an infinite - * loop, we must update the FSM with the correct amount of free space - * on each page that proves not to be suitable.) If the FSM has no - * record of a page with enough free space, we give up and extend the - * relation. + * We first try to put the tuple on the same page we last inserted a tuple + * on, as cached in the relcache entry. If that doesn't work, we ask the + * shared Free Space Map to locate a suitable page. Since the FSM's info + * might be out of date, we have to be prepared to loop around and retry + * multiple times. (To insure this isn't an infinite loop, we must update + * the FSM with the correct amount of free space on each page that proves + * not to be suitable.) If the FSM has no record of a page with enough + * free space, we give up and extend the relation. * - * When use_fsm is false, we either put the tuple onto the existing - * target page or extend the relation. + * When use_fsm is false, we either put the tuple onto the existing target + * page or extend the relation. */ targetBlock = relation->rd_targblock; @@ -151,9 +149,9 @@ RelationGetBufferForTuple(Relation relation, Size len, targetBlock = GetPageWithFreeSpace(&relation->rd_node, len); /* - * If the FSM knows nothing of the rel, try the last page before - * we give up and extend. This avoids one-tuple-per-page syndrome - * during bootstrapping or in a recently-started system. + * If the FSM knows nothing of the rel, try the last page before we + * give up and extend. This avoids one-tuple-per-page syndrome during + * bootstrapping or in a recently-started system. */ if (targetBlock == InvalidBlockNumber) { @@ -168,8 +166,8 @@ RelationGetBufferForTuple(Relation relation, Size len, { /* * Read and exclusive-lock the target block, as well as the other - * block if one was given, taking suitable care with lock ordering - * and the possibility they are the same block. + * block if one was given, taking suitable care with lock ordering and + * the possibility they are the same block. */ if (otherBuffer == InvalidBuffer) { @@ -199,8 +197,8 @@ RelationGetBufferForTuple(Relation relation, Size len, } /* - * Now we can check to see if there's enough free space here. If - * so, we're done. + * Now we can check to see if there's enough free space here. If so, + * we're done. */ pageHeader = (Page) BufferGetPage(buffer); pageFreeSpace = PageGetFreeSpace(pageHeader); @@ -213,9 +211,9 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * Not enough space, so we must give up our page locks and pin (if - * any) and prepare to look elsewhere. We don't care which order - * we unlock the two buffers in, so this can be slightly simpler - * than the code above. + * any) and prepare to look elsewhere. We don't care which order we + * unlock the two buffers in, so this can be slightly simpler than the + * code above. */ LockBuffer(buffer, BUFFER_LOCK_UNLOCK); if (otherBuffer == InvalidBuffer) @@ -231,8 +229,8 @@ RelationGetBufferForTuple(Relation relation, Size len, break; /* - * Update FSM as to condition of this page, and ask for another - * page to try. + * Update FSM as to condition of this page, and ask for another page + * to try. */ targetBlock = RecordAndGetPageWithFreeSpace(&relation->rd_node, targetBlock, @@ -243,10 +241,10 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * Have to extend the relation. * - * We have to use a lock to ensure no one else is extending the rel at - * the same time, else we will both try to initialize the same new - * page. We can skip locking for new or temp relations, however, - * since no one else could be accessing them. + * We have to use a lock to ensure no one else is extending the rel at the + * same time, else we will both try to initialize the same new page. We + * can skip locking for new or temp relations, however, since no one else + * could be accessing them. */ needLock = !RELATION_IS_LOCAL(relation); @@ -254,17 +252,16 @@ RelationGetBufferForTuple(Relation relation, Size len, LockRelationForExtension(relation, ExclusiveLock); /* - * XXX This does an lseek - rather expensive - but at the moment it is - * the only way to accurately determine how many blocks are in a - * relation. Is it worth keeping an accurate file length in shared - * memory someplace, rather than relying on the kernel to do it for - * us? + * XXX This does an lseek - rather expensive - but at the moment it is the + * only way to accurately determine how many blocks are in a relation. Is + * it worth keeping an accurate file length in shared memory someplace, + * rather than relying on the kernel to do it for us? */ buffer = ReadBuffer(relation, P_NEW); /* - * We can be certain that locking the otherBuffer first is OK, since - * it must have a lower page number. + * We can be certain that locking the otherBuffer first is OK, since it + * must have a lower page number. */ if (otherBuffer != InvalidBuffer) LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE); @@ -275,10 +272,10 @@ RelationGetBufferForTuple(Relation relation, Size len, LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); /* - * Release the file-extension lock; it's now OK for someone else to - * extend the relation some more. Note that we cannot release this - * lock before we have buffer lock on the new page, or we risk a - * race condition against vacuumlazy.c --- see comments therein. + * Release the file-extension lock; it's now OK for someone else to extend + * the relation some more. Note that we cannot release this lock before + * we have buffer lock on the new page, or we risk a race condition + * against vacuumlazy.c --- see comments therein. */ if (needLock) UnlockRelationForExtension(relation, ExclusiveLock); @@ -299,11 +296,11 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * Remember the new page as our target for future insertions. * - * XXX should we enter the new page into the free space map immediately, - * or just keep it for this backend's exclusive use in the short run - * (until VACUUM sees it)? Seems to depend on whether you expect the - * current backend to make more insertions or not, which is probably a - * good bet most of the time. So for now, don't add it to FSM yet. + * XXX should we enter the new page into the free space map immediately, or + * just keep it for this backend's exclusive use in the short run (until + * VACUUM sees it)? Seems to depend on whether you expect the current + * backend to make more insertions or not, which is probably a good bet + * most of the time. So for now, don't add it to FSM yet. */ relation->rd_targblock = BufferGetBlockNumber(buffer); diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c index 02da8446cd0..fd20f111b80 100644 --- a/src/backend/access/heap/tuptoaster.c +++ b/src/backend/access/heap/tuptoaster.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.52 2005/08/12 01:35:54 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.53 2005/10/15 02:49:09 momjian Exp $ * * * INTERFACE ROUTINES @@ -90,8 +90,7 @@ heap_tuple_fetch_attr(varattrib *attr) else { /* - * This is a plain value inside of the main tuple - why am I - * called? + * This is a plain value inside of the main tuple - why am I called? */ result = attr; } @@ -154,8 +153,7 @@ heap_tuple_untoast_attr(varattrib *attr) else /* - * This is a plain value inside of the main tuple - why am I - * called? + * This is a plain value inside of the main tuple - why am I called? */ return attr; @@ -255,8 +253,8 @@ toast_raw_datum_size(Datum value) else if (VARATT_IS_EXTERNAL(attr)) { /* - * an uncompressed external attribute has rawsize including the - * header (not too consistent!) + * an uncompressed external attribute has rawsize including the header + * (not too consistent!) */ result = attr->va_content.va_external.va_rawsize; } @@ -274,26 +272,26 @@ toast_raw_datum_size(Datum value) * Return the physical storage size (possibly compressed) of a varlena datum * ---------- */ -Size +Size toast_datum_size(Datum value) { - varattrib *attr = (varattrib *) DatumGetPointer(value); + varattrib *attr = (varattrib *) DatumGetPointer(value); Size result; if (VARATT_IS_EXTERNAL(attr)) { /* * Attribute is stored externally - return the extsize whether - * compressed or not. We do not count the size of the toast - * pointer ... should we? + * compressed or not. We do not count the size of the toast pointer + * ... should we? */ result = attr->va_content.va_external.va_extsize; } else { /* - * Attribute is stored inline either compressed or not, just - * calculate the size of the datum in either case. + * Attribute is stored inline either compressed or not, just calculate + * the size of the datum in either case. */ result = VARSIZE(attr); } @@ -321,12 +319,12 @@ toast_delete(Relation rel, HeapTuple oldtup) * Get the tuple descriptor and break down the tuple into fields. * * NOTE: it's debatable whether to use heap_deformtuple() here or just - * heap_getattr() only the varlena columns. The latter could win if - * there are few varlena columns and many non-varlena ones. However, - * heap_deformtuple costs only O(N) while the heap_getattr way would - * cost O(N^2) if there are many varlena columns, so it seems better - * to err on the side of linear cost. (We won't even be here unless - * there's at least one varlena column, by the way.) + * heap_getattr() only the varlena columns. The latter could win if there + * are few varlena columns and many non-varlena ones. However, + * heap_deformtuple costs only O(N) while the heap_getattr way would cost + * O(N^2) if there are many varlena columns, so it seems better to err on + * the side of linear cost. (We won't even be here unless there's at + * least one varlena column, by the way.) */ tupleDesc = rel->rd_att; att = tupleDesc->attrs; @@ -336,8 +334,8 @@ toast_delete(Relation rel, HeapTuple oldtup) heap_deform_tuple(oldtup, tupleDesc, toast_values, toast_isnull); /* - * Check for external stored attributes and delete them from the - * secondary relation. + * Check for external stored attributes and delete them from the secondary + * relation. */ for (i = 0; i < numAttrs; i++) { @@ -447,9 +445,9 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup) else { /* - * This attribute isn't changed by this update so we - * reuse the original reference to the old value in - * the new tuple. + * This attribute isn't changed by this update so we reuse + * the original reference to the old value in the new + * tuple. */ toast_action[i] = 'p'; toast_sizes[i] = VARATT_SIZE(toast_values[i]); @@ -582,16 +580,15 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup) else { /* - * incompressible data, ignore on subsequent compression - * passes + * incompressible data, ignore on subsequent compression passes */ toast_action[i] = 'x'; } } /* - * Second we look for attributes of attstorage 'x' or 'e' that are - * still inline. + * Second we look for attributes of attstorage 'x' or 'e' that are still + * inline. */ while (MAXALIGN(heap_compute_data_size(tupleDesc, toast_values, toast_isnull)) > @@ -696,8 +693,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup) else { /* - * incompressible data, ignore on subsequent compression - * passes + * incompressible data, ignore on subsequent compression passes */ toast_action[i] = 'x'; } @@ -755,8 +751,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup) } /* - * In the case we toasted any values, we need to build a new heap - * tuple with the changed values. + * In the case we toasted any values, we need to build a new heap tuple + * with the changed values. */ if (need_change) { @@ -798,8 +794,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup) has_nulls ? newtup->t_data->t_bits : NULL); /* - * In the case we modified a previously modified tuple again, free - * the memory from the previous run + * In the case we modified a previously modified tuple again, free the + * memory from the previous run */ if ((char *) olddata != ((char *) newtup + HEAPTUPLESIZE)) pfree(olddata); @@ -906,8 +902,8 @@ toast_flatten_tuple_attribute(Datum value, return value; /* - * Calculate the new size of the tuple. Header size should not - * change, but data size might. + * Calculate the new size of the tuple. Header size should not change, + * but data size might. */ new_len = offsetof(HeapTupleHeaderData, t_bits); if (has_nulls) @@ -1007,9 +1003,9 @@ toast_save_datum(Relation rel, Datum value) int32 data_todo; /* - * Open the toast relation and its index. We can use the index to - * check uniqueness of the OID we assign to the toasted item, even - * though it has additional columns besides OID. + * Open the toast relation and its index. We can use the index to check + * uniqueness of the OID we assign to the toasted item, even though it has + * additional columns besides OID. */ toastrel = heap_open(rel->rd_rel->reltoastrelid, RowExclusiveLock); toasttupDesc = toastrel->rd_att; @@ -1082,11 +1078,11 @@ toast_save_datum(Relation rel, Datum value) /* * Create the index entry. We cheat a little here by not using - * FormIndexDatum: this relies on the knowledge that the index - * columns are the same as the initial columns of the table. + * FormIndexDatum: this relies on the knowledge that the index columns + * are the same as the initial columns of the table. * - * Note also that there had better not be any user-created index on - * the TOAST table, since we don't bother to update anything else. + * Note also that there had better not be any user-created index on the + * TOAST table, since we don't bother to update anything else. */ index_insert(toastidx, t_values, t_isnull, &(toasttup->t_self), @@ -1148,7 +1144,7 @@ toast_delete_datum(Relation rel, Datum value) ScanKeyInit(&toastkey, (AttrNumber) 1, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); + ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); /* * Find the chunks by index @@ -1219,14 +1215,14 @@ toast_fetch_datum(varattrib *attr) ScanKeyInit(&toastkey, (AttrNumber) 1, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); + ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); /* * Read the chunks by index * - * Note that because the index is actually on (valueid, chunkidx) we will - * see the chunks in chunkidx order, even though we didn't explicitly - * ask for it. + * Note that because the index is actually on (valueid, chunkidx) we will see + * the chunks in chunkidx order, even though we didn't explicitly ask for + * it. */ nextidx = 0; @@ -1367,13 +1363,13 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length) toastidx = index_open(toastrel->rd_rel->reltoastidxid); /* - * Setup a scan key to fetch from the index. This is either two keys - * or three depending on the number of chunks. + * Setup a scan key to fetch from the index. This is either two keys or + * three depending on the number of chunks. */ ScanKeyInit(&toastkey[0], (AttrNumber) 1, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); + ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); /* * Use equality condition for one chunk, a range condition otherwise: diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index 90e910f343f..ed604f9c5dc 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.48 2005/05/27 23:31:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.49 2005/10/15 02:49:09 momjian Exp $ * * NOTES * many of the old access method routines have been turned into @@ -78,15 +78,15 @@ RelationGetIndexScan(Relation indexRelation, scan->numberOfKeys = nkeys; /* - * We allocate the key space here, but the AM is responsible for - * actually filling it from the passed key array. + * We allocate the key space here, but the AM is responsible for actually + * filling it from the passed key array. */ if (nkeys > 0) scan->keyData = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys); else scan->keyData = NULL; - scan->is_multiscan = false; /* caller may change this */ + scan->is_multiscan = false; /* caller may change this */ scan->kill_prior_tuple = false; scan->ignore_killed_tuples = true; /* default setting */ scan->keys_are_unique = false; /* may be set by index AM */ @@ -203,8 +203,8 @@ systable_beginscan(Relation heapRelation, /* * Change attribute numbers to be index column numbers. * - * This code could be generalized to search for the index key numbers - * to substitute, but for now there's no need. + * This code could be generalized to search for the index key numbers to + * substitute, but for now there's no need. */ for (i = 0; i < nkeys; i++) { diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index 7bf7fcd22f0..bd2e3bdd06e 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.85 2005/10/06 02:29:11 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.86 2005/10/15 02:49:09 momjian Exp $ * * INTERFACE ROUTINES * index_open - open an index relation by relation OID @@ -111,7 +111,7 @@ do { \ } while(0) static IndexScanDesc index_beginscan_internal(Relation indexRelation, - int nkeys, ScanKey key); + int nkeys, ScanKey key); /* ---------------------------------------------------------------- @@ -122,14 +122,14 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation, /* ---------------- * index_open - open an index relation by relation OID * - * Note: we acquire no lock on the index. A lock is not needed when + * Note: we acquire no lock on the index. A lock is not needed when * simply examining the index reldesc; the index's schema information * is considered to be protected by the lock that the caller had better - * be holding on the parent relation. Some type of lock should be + * be holding on the parent relation. Some type of lock should be * obtained on the index before physically accessing it, however. * This is handled automatically for most uses by index_beginscan * and index_endscan for scan cases, or by ExecOpenIndices and - * ExecCloseIndices for update cases. Other callers will need to + * ExecCloseIndices for update cases. Other callers will need to * obtain their own locks. * * This is a convenience routine adapted for indexscan use. @@ -241,8 +241,8 @@ index_beginscan(Relation heapRelation, scan = index_beginscan_internal(indexRelation, nkeys, key); /* - * Save additional parameters into the scandesc. Everything else was - * set up by RelationGetIndexScan. + * Save additional parameters into the scandesc. Everything else was set + * up by RelationGetIndexScan. */ scan->is_multiscan = false; scan->heapRelation = heapRelation; @@ -267,8 +267,8 @@ index_beginscan_multi(Relation indexRelation, scan = index_beginscan_internal(indexRelation, nkeys, key); /* - * Save additional parameters into the scandesc. Everything else was - * set up by RelationGetIndexScan. + * Save additional parameters into the scandesc. Everything else was set + * up by RelationGetIndexScan. */ scan->is_multiscan = true; scan->xs_snapshot = snapshot; @@ -294,14 +294,14 @@ index_beginscan_internal(Relation indexRelation, * Acquire AccessShareLock for the duration of the scan * * Note: we could get an SI inval message here and consequently have to - * rebuild the relcache entry. The refcount increment above ensures - * that we will rebuild it and not just flush it... + * rebuild the relcache entry. The refcount increment above ensures that + * we will rebuild it and not just flush it... */ LockRelation(indexRelation, AccessShareLock); /* - * LockRelation can clean rd_aminfo structure, so fill procedure - * after LockRelation + * LockRelation can clean rd_aminfo structure, so fill procedure after + * LockRelation */ GET_REL_PROCEDURE(ambeginscan); @@ -425,8 +425,8 @@ index_restrpos(IndexScanDesc scan) /* * We do not reset got_tuple; so if the scan is actually being - * short-circuited by index_getnext, the effective position - * restoration is done by restoring unique_tuple_pos. + * short-circuited by index_getnext, the effective position restoration is + * done by restoring unique_tuple_pos. */ scan->unique_tuple_pos = scan->unique_tuple_mark; @@ -454,19 +454,19 @@ index_getnext(IndexScanDesc scan, ScanDirection direction) /* * If we already got a tuple and it must be unique, there's no need to - * make the index AM look through any additional tuples. (This can - * save a useful amount of work in scenarios where there are many dead - * tuples due to heavy update activity.) + * make the index AM look through any additional tuples. (This can save a + * useful amount of work in scenarios where there are many dead tuples due + * to heavy update activity.) * * To do this we must keep track of the logical scan position * (before/on/after tuple). Also, we have to be sure to release scan - * resources before returning NULL; if we fail to do so then a - * multi-index scan can easily run the system out of free buffers. We - * can release index-level resources fairly cheaply by calling - * index_rescan. This means there are two persistent states as far as - * the index AM is concerned: on-tuple and rescanned. If we are - * actually asked to re-fetch the single tuple, we have to go through - * a fresh indexscan startup, which penalizes that (infrequent) case. + * resources before returning NULL; if we fail to do so then a multi-index + * scan can easily run the system out of free buffers. We can release + * index-level resources fairly cheaply by calling index_rescan. This + * means there are two persistent states as far as the index AM is + * concerned: on-tuple and rescanned. If we are actually asked to + * re-fetch the single tuple, we have to go through a fresh indexscan + * startup, which penalizes that (infrequent) case. */ if (scan->keys_are_unique && scan->got_tuple) { @@ -485,19 +485,18 @@ index_getnext(IndexScanDesc scan, ScanDirection direction) if (new_tuple_pos == 0) { /* - * We are moving onto the unique tuple from having been off - * it. We just fall through and let the index AM do the work. - * Note we should get the right answer regardless of scan - * direction. + * We are moving onto the unique tuple from having been off it. We + * just fall through and let the index AM do the work. Note we + * should get the right answer regardless of scan direction. */ scan->unique_tuple_pos = 0; /* need to update position */ } else { /* - * Moving off the tuple; must do amrescan to release - * index-level pins before we return NULL. Since index_rescan - * will reset my state, must save and restore... + * Moving off the tuple; must do amrescan to release index-level + * pins before we return NULL. Since index_rescan will reset my + * state, must save and restore... */ int unique_tuple_mark = scan->unique_tuple_mark; @@ -520,8 +519,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction) bool found; /* - * The AM's gettuple proc finds the next tuple matching the scan - * keys. + * The AM's gettuple proc finds the next tuple matching the scan keys. */ found = DatumGetBool(FunctionCall2(procedure, PointerGetDatum(scan), @@ -556,9 +554,9 @@ index_getnext(IndexScanDesc scan, ScanDirection direction) continue; /* - * If we can't see it, maybe no one else can either. Check to see - * if the tuple is dead to all transactions. If so, signal the - * index AM to not return it on future indexscans. + * If we can't see it, maybe no one else can either. Check to see if + * the tuple is dead to all transactions. If so, signal the index AM + * to not return it on future indexscans. * * We told heap_release_fetch to keep a pin on the buffer, so we can * re-access the tuple here. But we must re-lock the buffer first. @@ -576,8 +574,8 @@ index_getnext(IndexScanDesc scan, ScanDirection direction) scan->got_tuple = true; /* - * If we just fetched a known-unique tuple, then subsequent calls will - * go through the short-circuit code above. unique_tuple_pos has been + * If we just fetched a known-unique tuple, then subsequent calls will go + * through the short-circuit code above. unique_tuple_pos has been * initialized to 0, which is the correct state ("on row"). */ @@ -805,11 +803,10 @@ index_getprocinfo(Relation irel, procId = loc[procindex]; /* - * Complain if function was not found during - * IndexSupportInitialize. This should not happen unless the - * system tables contain bogus entries for the index opclass. (If - * an AM wants to allow a support function to be optional, it can - * use index_getprocid.) + * Complain if function was not found during IndexSupportInitialize. + * This should not happen unless the system tables contain bogus + * entries for the index opclass. (If an AM wants to allow a support + * function to be optional, it can use index_getprocid.) */ if (!RegProcedureIsValid(procId)) elog(ERROR, "missing support function %d for attribute %d of index \"%s\"", diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index c73ba358ec1..33c7612aac5 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.126 2005/10/12 17:18:03 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.127 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -93,30 +93,29 @@ top: /* * If the page was split between the time that we surrendered our read - * lock and acquired our write lock, then this page may no longer be - * the right place for the key we want to insert. In this case, we - * need to move right in the tree. See Lehman and Yao for an - * excruciatingly precise description. + * lock and acquired our write lock, then this page may no longer be the + * right place for the key we want to insert. In this case, we need to + * move right in the tree. See Lehman and Yao for an excruciatingly + * precise description. */ buf = _bt_moveright(rel, buf, natts, itup_scankey, false, BT_WRITE); /* - * If we're not allowing duplicates, make sure the key isn't already - * in the index. + * If we're not allowing duplicates, make sure the key isn't already in + * the index. * - * NOTE: obviously, _bt_check_unique can only detect keys that are - * already in the index; so it cannot defend against concurrent - * insertions of the same key. We protect against that by means of - * holding a write lock on the target page. Any other would-be - * inserter of the same key must acquire a write lock on the same - * target page, so only one would-be inserter can be making the check - * at one time. Furthermore, once we are past the check we hold write - * locks continuously until we have performed our insertion, so no - * later inserter can fail to see our insertion. (This requires some - * care in _bt_insertonpg.) + * NOTE: obviously, _bt_check_unique can only detect keys that are already in + * the index; so it cannot defend against concurrent insertions of the + * same key. We protect against that by means of holding a write lock on + * the target page. Any other would-be inserter of the same key must + * acquire a write lock on the same target page, so only one would-be + * inserter can be making the check at one time. Furthermore, once we are + * past the check we hold write locks continuously until we have performed + * our insertion, so no later inserter can fail to see our insertion. + * (This requires some care in _bt_insertonpg.) * - * If we must wait for another xact, we release the lock while waiting, - * and then must start over completely. + * If we must wait for another xact, we release the lock while waiting, and + * then must start over completely. */ if (index_is_unique) { @@ -167,8 +166,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel, maxoff = PageGetMaxOffsetNumber(page); /* - * Find first item >= proposed new item. Note we could also get a - * pointer to end-of-page here. + * Find first item >= proposed new item. Note we could also get a pointer + * to end-of-page here. */ offset = _bt_binsrch(rel, buf, natts, itup_scankey, false); @@ -194,24 +193,24 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel, /* * We can skip items that are marked killed. * - * Formerly, we applied _bt_isequal() before checking the kill - * flag, so as to fall out of the item loop as soon as - * possible. However, in the presence of heavy update activity - * an index may contain many killed items with the same key; - * running _bt_isequal() on each killed item gets expensive. - * Furthermore it is likely that the non-killed version of - * each key appears first, so that we didn't actually get to - * exit any sooner anyway. So now we just advance over killed - * items as quickly as we can. We only apply _bt_isequal() - * when we get to a non-killed item or the end of the page. + * Formerly, we applied _bt_isequal() before checking the kill flag, + * so as to fall out of the item loop as soon as possible. + * However, in the presence of heavy update activity an index may + * contain many killed items with the same key; running + * _bt_isequal() on each killed item gets expensive. Furthermore + * it is likely that the non-killed version of each key appears + * first, so that we didn't actually get to exit any sooner + * anyway. So now we just advance over killed items as quickly as + * we can. We only apply _bt_isequal() when we get to a non-killed + * item or the end of the page. */ if (!ItemIdDeleted(curitemid)) { /* - * _bt_compare returns 0 for (1,NULL) and (1,NULL) - - * this's how we handling NULLs - and so we must not use - * _bt_compare in real comparison, but only for - * ordering/finding items on pages. - vadim 03/24/97 + * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's + * how we handling NULLs - and so we must not use _bt_compare + * in real comparison, but only for ordering/finding items on + * pages. - vadim 03/24/97 */ if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey)) break; /* we're past all the equal tuples */ @@ -246,15 +245,15 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel, */ ereport(ERROR, (errcode(ERRCODE_UNIQUE_VIOLATION), - errmsg("duplicate key violates unique constraint \"%s\"", - RelationGetRelationName(rel)))); + errmsg("duplicate key violates unique constraint \"%s\"", + RelationGetRelationName(rel)))); } else if (htup.t_data != NULL) { /* - * Hmm, if we can't see the tuple, maybe it can be - * marked killed. This logic should match - * index_getnext and btgettuple. + * Hmm, if we can't see the tuple, maybe it can be marked + * killed. This logic should match index_getnext and + * btgettuple. */ LockBuffer(hbuffer, BUFFER_LOCK_SHARE); if (HeapTupleSatisfiesVacuum(htup.t_data, RecentGlobalXmin, @@ -377,15 +376,15 @@ _bt_insertonpg(Relation rel, itemsz = IndexTupleDSize(btitem->bti_itup) + (sizeof(BTItemData) - sizeof(IndexTupleData)); - itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but - * we need to be consistent */ + itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we + * need to be consistent */ /* - * Check whether the item can fit on a btree page at all. (Eventually, - * we ought to try to apply TOAST methods if not.) We actually need to - * be able to fit three items on every page, so restrict any one item - * to 1/3 the per-page available space. Note that at this point, - * itemsz doesn't include the ItemId. + * Check whether the item can fit on a btree page at all. (Eventually, we + * ought to try to apply TOAST methods if not.) We actually need to be + * able to fit three items on every page, so restrict any one item to 1/3 + * the per-page available space. Note that at this point, itemsz doesn't + * include the ItemId. */ if (itemsz > BTMaxItemSize(page)) ereport(ERROR, @@ -393,9 +392,9 @@ _bt_insertonpg(Relation rel, errmsg("index row size %lu exceeds btree maximum, %lu", (unsigned long) itemsz, (unsigned long) BTMaxItemSize(page)), - errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n" - "Consider a function index of an MD5 hash of the value, " - "or use full text indexing."))); + errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n" + "Consider a function index of an MD5 hash of the value, " + "or use full text indexing."))); /* * Determine exactly where new item will go. @@ -432,11 +431,11 @@ _bt_insertonpg(Relation rel, /* * step right to next non-dead page * - * must write-lock that page before releasing write lock on - * current page; else someone else's _bt_check_unique scan - * could fail to see our insertion. write locks on - * intermediate dead pages won't do because we don't know when - * they will get de-linked from the tree. + * must write-lock that page before releasing write lock on current + * page; else someone else's _bt_check_unique scan could fail to + * see our insertion. write locks on intermediate dead pages + * won't do because we don't know when they will get de-linked + * from the tree. */ Buffer rbuf = InvalidBuffer; @@ -459,9 +458,9 @@ _bt_insertonpg(Relation rel, } /* - * Now we are on the right page, so find the insert position. If - * we moved right at all, we know we should insert at the start of - * the page, else must find the position by searching. + * Now we are on the right page, so find the insert position. If we + * moved right at all, we know we should insert at the start of the + * page, else must find the position by searching. */ if (movedright) newitemoff = P_FIRSTDATAKEY(lpageop); @@ -472,9 +471,9 @@ _bt_insertonpg(Relation rel, /* * Do we need to split the page to fit the item on it? * - * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result, - * so this comparison is correct even though we appear to be - * accounting only for the item and not for its line pointer. + * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result, so + * this comparison is correct even though we appear to be accounting only + * for the item and not for its line pointer. */ if (PageGetFreeSpace(page) < itemsz) { @@ -522,12 +521,11 @@ _bt_insertonpg(Relation rel, itup_blkno = BufferGetBlockNumber(buf); /* - * If we are doing this insert because we split a page that was - * the only one on its tree level, but was not the root, it may - * have been the "fast root". We need to ensure that the fast - * root link points at or above the current page. We can safely - * acquire a lock on the metapage here --- see comments for - * _bt_newroot(). + * If we are doing this insert because we split a page that was the + * only one on its tree level, but was not the root, it may have been + * the "fast root". We need to ensure that the fast root link points + * at or above the current page. We can safely acquire a lock on the + * metapage here --- see comments for _bt_newroot(). */ if (split_only_page) { @@ -692,11 +690,11 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, lopaque->btpo.level = ropaque->btpo.level = oopaque->btpo.level; /* - * If the page we're splitting is not the rightmost page at its level - * in the tree, then the first entry on the page is the high key for - * the page. We need to copy that to the right half. Otherwise - * (meaning the rightmost page case), all the items on the right half - * will be user data. + * If the page we're splitting is not the rightmost page at its level in + * the tree, then the first entry on the page is the high key for the + * page. We need to copy that to the right half. Otherwise (meaning the + * rightmost page case), all the items on the right half will be user + * data. */ rightoff = P_HIKEY; @@ -712,9 +710,9 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, } /* - * The "high key" for the new left page will be the first key that's - * going to go into the new right page. This might be either the - * existing data item at position firstright, or the incoming tuple. + * The "high key" for the new left page will be the first key that's going + * to go into the new right page. This might be either the existing data + * item at position firstright, or the incoming tuple. */ leftoff = P_HIKEY; if (!newitemonleft && newitemoff == firstright) @@ -806,8 +804,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, /* * We have to grab the right sibling (if any) and fix the prev pointer * there. We are guaranteed that this is deadlock-free since no other - * writer will be holding a lock on that page and trying to move left, - * and all readers release locks on a page before trying to fetch its + * writer will be holding a lock on that page and trying to move left, and + * all readers release locks on a page before trying to fetch its * neighbors. */ @@ -821,8 +819,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, } /* - * Right sibling is locked, new siblings are prepared, but original - * page is not updated yet. Log changes before continuing. + * Right sibling is locked, new siblings are prepared, but original page + * is not updated yet. Log changes before continuing. * * NO EREPORT(ERROR) till right sibling is updated. */ @@ -850,10 +848,10 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, xlrec.level = lopaque->btpo.level; /* - * Direct access to page is not good but faster - we should - * implement some new func in page API. Note we only store the - * tuples themselves, knowing that the item pointers are in the - * same order and can be reconstructed by scanning the tuples. + * Direct access to page is not good but faster - we should implement + * some new func in page API. Note we only store the tuples + * themselves, knowing that the item pointers are in the same order + * and can be reconstructed by scanning the tuples. */ xlrec.leftlen = ((PageHeader) leftpage)->pd_special - ((PageHeader) leftpage)->pd_upper; @@ -903,13 +901,13 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, } /* - * By here, the original data page has been split into two new halves, - * and these are correct. The algorithm requires that the left page - * never move during a split, so we copy the new left page back on top - * of the original. Note that this is not a waste of time, since we - * also require (in the page management code) that the center of a - * page always be clean, and the most efficient way to guarantee this - * is just to compact the data by reinserting it into a new left page. + * By here, the original data page has been split into two new halves, and + * these are correct. The algorithm requires that the left page never + * move during a split, so we copy the new left page back on top of the + * original. Note that this is not a waste of time, since we also require + * (in the page management code) that the center of a page always be + * clean, and the most efficient way to guarantee this is just to compact + * the data by reinserting it into a new left page. */ PageRestoreTempPage(leftpage, origpage); @@ -984,13 +982,13 @@ _bt_findsplitloc(Relation rel, MAXALIGN(sizeof(BTPageOpaqueData)); /* - * Finding the best possible split would require checking all the - * possible split points, because of the high-key and left-key special - * cases. That's probably more work than it's worth; instead, stop as - * soon as we find a "good-enough" split, where good-enough is defined - * as an imbalance in free space of no more than pagesize/16 - * (arbitrary...) This should let us stop near the middle on most - * pages, instead of plowing to the end. + * Finding the best possible split would require checking all the possible + * split points, because of the high-key and left-key special cases. + * That's probably more work than it's worth; instead, stop as soon as we + * find a "good-enough" split, where good-enough is defined as an + * imbalance in free space of no more than pagesize/16 (arbitrary...) This + * should let us stop near the middle on most pages, instead of plowing to + * the end. */ goodenough = leftspace / 16; @@ -1006,8 +1004,8 @@ _bt_findsplitloc(Relation rel, dataitemtotal = rightspace - (int) PageGetFreeSpace(page); /* - * Scan through the data items and calculate space usage for a split - * at each possible position. + * Scan through the data items and calculate space usage for a split at + * each possible position. */ dataitemstoleft = 0; maxoff = PageGetMaxOffsetNumber(page); @@ -1024,9 +1022,9 @@ _bt_findsplitloc(Relation rel, itemsz = MAXALIGN(ItemIdGetLength(itemid)) + sizeof(ItemIdData); /* - * We have to allow for the current item becoming the high key of - * the left page; therefore it counts against left space as well - * as right space. + * We have to allow for the current item becoming the high key of the + * left page; therefore it counts against left space as well as right + * space. */ leftfree = leftspace - dataitemstoleft - (int) itemsz; rightfree = rightspace - (dataitemtotal - dataitemstoleft); @@ -1058,8 +1056,8 @@ _bt_findsplitloc(Relation rel, } /* - * I believe it is not possible to fail to find a feasible split, but - * just in case ... + * I believe it is not possible to fail to find a feasible split, but just + * in case ... */ if (!state.have_split) elog(ERROR, "could not find a feasible split point for \"%s\"", @@ -1105,8 +1103,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright, { /* * On a rightmost page, try to equalize right free space with - * twice the left free space. See comments for - * _bt_findsplitloc. + * twice the left free space. See comments for _bt_findsplitloc. */ delta = (2 * leftfree) - rightfree; } @@ -1153,19 +1150,18 @@ _bt_insert_parent(Relation rel, bool is_only) { /* - * Here we have to do something Lehman and Yao don't talk about: deal - * with a root split and construction of a new root. If our stack is - * empty then we have just split a node on what had been the root - * level when we descended the tree. If it was still the root then we - * perform a new-root construction. If it *wasn't* the root anymore, - * search to find the next higher level that someone constructed - * meanwhile, and find the right place to insert as for the normal - * case. + * Here we have to do something Lehman and Yao don't talk about: deal with + * a root split and construction of a new root. If our stack is empty + * then we have just split a node on what had been the root level when we + * descended the tree. If it was still the root then we perform a + * new-root construction. If it *wasn't* the root anymore, search to find + * the next higher level that someone constructed meanwhile, and find the + * right place to insert as for the normal case. * - * If we have to search for the parent level, we do so by re-descending - * from the root. This is not super-efficient, but it's rare enough - * not to matter. (This path is also taken when called from WAL - * recovery --- we have no stack in that case.) + * If we have to search for the parent level, we do so by re-descending from + * the root. This is not super-efficient, but it's rare enough not to + * matter. (This path is also taken when called from WAL recovery --- we + * have no stack in that case.) */ if (is_root) { @@ -1219,9 +1215,9 @@ _bt_insert_parent(Relation rel, /* * Find the parent buffer and get the parent page. * - * Oops - if we were moved right then we need to change stack item! - * We want to find parent pointing to where we are, right ? - - * vadim 05/27/97 + * Oops - if we were moved right then we need to change stack item! We + * want to find parent pointing to where we are, right ? - vadim + * 05/27/97 */ ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid), bknum, P_HIKEY); @@ -1291,9 +1287,9 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access) maxoff = PageGetMaxOffsetNumber(page); /* - * start = InvalidOffsetNumber means "search the whole page". - * We need this test anyway due to possibility that page has a - * high key now when it didn't before. + * start = InvalidOffsetNumber means "search the whole page". We + * need this test anyway due to possibility that page has a high + * key now when it didn't before. */ if (start < minoff) start = minoff; @@ -1307,8 +1303,8 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access) /* * These loops will check every item on the page --- but in an - * order that's attuned to the probability of where it - * actually is. Scan to the right first, then to the left. + * order that's attuned to the probability of where it actually + * is. Scan to the right first, then to the left. */ for (offnum = start; offnum <= maxoff; @@ -1424,9 +1420,9 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) metad->btm_fastlevel = rootopaque->btpo.level; /* - * Create downlink item for left page (old root). Since this will be - * the first item in a non-leaf page, it implicitly has minus-infinity - * key value, so we need not store any actual key in it. + * Create downlink item for left page (old root). Since this will be the + * first item in a non-leaf page, it implicitly has minus-infinity key + * value, so we need not store any actual key in it. */ itemsz = sizeof(BTItemData); new_item = (BTItem) palloc(itemsz); @@ -1434,17 +1430,17 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) ItemPointerSet(&(new_item->bti_itup.t_tid), lbkno, P_HIKEY); /* - * Insert the left page pointer into the new root page. The root page - * is the rightmost page on its level so there is no "high key" in it; - * the two items will go into positions P_HIKEY and P_FIRSTKEY. + * Insert the left page pointer into the new root page. The root page is + * the rightmost page on its level so there is no "high key" in it; the + * two items will go into positions P_HIKEY and P_FIRSTKEY. */ if (PageAddItem(rootpage, (Item) new_item, itemsz, P_HIKEY, LP_USED) == InvalidOffsetNumber) elog(PANIC, "failed to add leftkey to new root page"); pfree(new_item); /* - * Create downlink item for right page. The key for it is obtained - * from the "high key" position in the left page. + * Create downlink item for right page. The key for it is obtained from + * the "high key" position in the left page. */ itemid = PageGetItemId(lpage, P_HIKEY); itemsz = ItemIdGetLength(itemid); @@ -1476,8 +1472,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) rdata[0].next = &(rdata[1]); /* - * Direct access to page is not good but faster - we should - * implement some new func in page API. + * Direct access to page is not good but faster - we should implement + * some new func in page API. */ rdata[1].data = (char *) rootpage + ((PageHeader) rootpage)->pd_upper; rdata[1].len = ((PageHeader) rootpage)->pd_special - diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 52d60abaec0..927860030c8 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.87 2005/08/12 14:34:14 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.88 2005/10/15 02:49:09 momjian Exp $ * * NOTES * Postgres btree pages look like ordinary relation pages. The opaque @@ -115,8 +115,8 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level) metaopaque->btpo_flags = BTP_META; /* - * Set pd_lower just past the end of the metadata. This is not - * essential but it makes the page look compressible to xlog.c. + * Set pd_lower just past the end of the metadata. This is not essential + * but it makes the page look compressible to xlog.c. */ ((PageHeader) page)->pd_lower = ((char *) metad + sizeof(BTMetaPageData)) - (char *) page; @@ -198,26 +198,26 @@ _bt_getroot(Relation rel, int access) LockBuffer(metabuf, BT_WRITE); /* - * Race condition: if someone else initialized the metadata - * between the time we released the read lock and acquired the - * write lock, we must avoid doing it again. + * Race condition: if someone else initialized the metadata between + * the time we released the read lock and acquired the write lock, we + * must avoid doing it again. */ if (metad->btm_root != P_NONE) { /* - * Metadata initialized by someone else. In order to - * guarantee no deadlocks, we have to release the metadata - * page and start all over again. (Is that really true? But - * it's hardly worth trying to optimize this case.) + * Metadata initialized by someone else. In order to guarantee no + * deadlocks, we have to release the metadata page and start all + * over again. (Is that really true? But it's hardly worth trying + * to optimize this case.) */ _bt_relbuf(rel, metabuf); return _bt_getroot(rel, access); } /* - * Get, initialize, write, and leave a lock of the appropriate - * type on the new root page. Since this is the first page in the - * tree, it's a leaf as well as the root. + * Get, initialize, write, and leave a lock of the appropriate type on + * the new root page. Since this is the first page in the tree, it's + * a leaf as well as the root. */ rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE); rootblkno = BufferGetBlockNumber(rootbuf); @@ -266,9 +266,9 @@ _bt_getroot(Relation rel, int access) _bt_wrtnorelbuf(rel, rootbuf); /* - * swap root write lock for read lock. There is no danger of - * anyone else accessing the new root page while it's unlocked, - * since no one else knows where it is yet. + * swap root write lock for read lock. There is no danger of anyone + * else accessing the new root page while it's unlocked, since no one + * else knows where it is yet. */ LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK); LockBuffer(rootbuf, BT_READ); @@ -312,8 +312,8 @@ _bt_getroot(Relation rel, int access) } /* - * By here, we have a pin and read lock on the root page, and no lock - * set on the metadata page. Return the root page's buffer. + * By here, we have a pin and read lock on the root page, and no lock set + * on the metadata page. Return the root page's buffer. */ return rootbuf; } @@ -435,27 +435,26 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access) /* * First see if the FSM knows of any free pages. * - * We can't trust the FSM's report unreservedly; we have to check - * that the page is still free. (For example, an already-free - * page could have been re-used between the time the last VACUUM - * scanned it and the time the VACUUM made its FSM updates.) + * We can't trust the FSM's report unreservedly; we have to check that + * the page is still free. (For example, an already-free page could + * have been re-used between the time the last VACUUM scanned it and + * the time the VACUUM made its FSM updates.) * - * In fact, it's worse than that: we can't even assume that it's safe - * to take a lock on the reported page. If somebody else has a - * lock on it, or even worse our own caller does, we could - * deadlock. (The own-caller scenario is actually not improbable. - * Consider an index on a serial or timestamp column. Nearly all - * splits will be at the rightmost page, so it's entirely likely - * that _bt_split will call us while holding a lock on the page - * most recently acquired from FSM. A VACUUM running concurrently - * with the previous split could well have placed that page back - * in FSM.) + * In fact, it's worse than that: we can't even assume that it's safe to + * take a lock on the reported page. If somebody else has a lock on + * it, or even worse our own caller does, we could deadlock. (The + * own-caller scenario is actually not improbable. Consider an index + * on a serial or timestamp column. Nearly all splits will be at the + * rightmost page, so it's entirely likely that _bt_split will call us + * while holding a lock on the page most recently acquired from FSM. + * A VACUUM running concurrently with the previous split could well + * have placed that page back in FSM.) * - * To get around that, we ask for only a conditional lock on the - * reported page. If we fail, then someone else is using the - * page, and we may reasonably assume it's not free. (If we - * happen to be wrong, the worst consequence is the page will be - * lost to use till the next VACUUM, which is no big problem.) + * To get around that, we ask for only a conditional lock on the reported + * page. If we fail, then someone else is using the page, and we may + * reasonably assume it's not free. (If we happen to be wrong, the + * worst consequence is the page will be lost to use till the next + * VACUUM, which is no big problem.) */ for (;;) { @@ -486,10 +485,10 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access) /* * Extend the relation by one page. * - * We have to use a lock to ensure no one else is extending the rel - * at the same time, else we will both try to initialize the same - * new page. We can skip locking for new or temp relations, - * however, since no one else could be accessing them. + * We have to use a lock to ensure no one else is extending the rel at + * the same time, else we will both try to initialize the same new + * page. We can skip locking for new or temp relations, however, + * since no one else could be accessing them. */ needLock = !RELATION_IS_LOCAL(rel); @@ -504,8 +503,8 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access) /* * Release the file-extension lock; it's now OK for someone else to * extend the relation some more. Note that we cannot release this - * lock before we have buffer lock on the new page, or we risk a - * race condition against btvacuumcleanup --- see comments therein. + * lock before we have buffer lock on the new page, or we risk a race + * condition against btvacuumcleanup --- see comments therein. */ if (needLock) UnlockRelationForExtension(rel, ExclusiveLock); @@ -614,10 +613,10 @@ _bt_page_recyclable(Page page) BTPageOpaque opaque; /* - * It's possible to find an all-zeroes page in an index --- for - * example, a backend might successfully extend the relation one page - * and then crash before it is able to make a WAL entry for adding the - * page. If we find a zeroed page then reclaim it. + * It's possible to find an all-zeroes page in an index --- for example, a + * backend might successfully extend the relation one page and then crash + * before it is able to make a WAL entry for adding the page. If we find a + * zeroed page then reclaim it. */ if (PageIsNew(page)) return true; @@ -672,9 +671,9 @@ _bt_delitems(Relation rel, Buffer buf, rdata[0].next = &(rdata[1]); /* - * The target-offsets array is not in the buffer, but pretend that - * it is. When XLogInsert stores the whole buffer, the offsets - * array need not be stored too. + * The target-offsets array is not in the buffer, but pretend that it + * is. When XLogInsert stores the whole buffer, the offsets array + * need not be stored too. */ if (nitems > 0) { @@ -747,8 +746,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) BTPageOpaque opaque; /* - * We can never delete rightmost pages nor root pages. While at it, - * check that page is not already deleted and is empty. + * We can never delete rightmost pages nor root pages. While at it, check + * that page is not already deleted and is empty. */ page = BufferGetPage(buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); @@ -760,8 +759,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) } /* - * Save info about page, including a copy of its high key (it must - * have one, being non-rightmost). + * Save info about page, including a copy of its high key (it must have + * one, being non-rightmost). */ target = BufferGetBlockNumber(buf); targetlevel = opaque->btpo.level; @@ -770,11 +769,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) targetkey = CopyBTItem((BTItem) PageGetItem(page, itemid)); /* - * We need to get an approximate pointer to the page's parent page. - * Use the standard search mechanism to search for the page's high - * key; this will give us a link to either the current parent or - * someplace to its left (if there are multiple equal high keys). To - * avoid deadlocks, we'd better drop the target page lock first. + * We need to get an approximate pointer to the page's parent page. Use + * the standard search mechanism to search for the page's high key; this + * will give us a link to either the current parent or someplace to its + * left (if there are multiple equal high keys). To avoid deadlocks, we'd + * better drop the target page lock first. */ _bt_relbuf(rel, buf); /* we need a scan key to do our search, so build one */ @@ -786,9 +785,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) _bt_relbuf(rel, lbuf); /* - * If we are trying to delete an interior page, _bt_search did more - * than we needed. Locate the stack item pointing to our parent - * level. + * If we are trying to delete an interior page, _bt_search did more than + * we needed. Locate the stack item pointing to our parent level. */ ilevel = 0; for (;;) @@ -803,16 +801,15 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) /* * We have to lock the pages we need to modify in the standard order: - * moving right, then up. Else we will deadlock against other - * writers. + * moving right, then up. Else we will deadlock against other writers. * - * So, we need to find and write-lock the current left sibling of the - * target page. The sibling that was current a moment ago could have - * split, so we may have to move right. This search could fail if - * either the sibling or the target page was deleted by someone else - * meanwhile; if so, give up. (Right now, that should never happen, - * since page deletion is only done in VACUUM and there shouldn't be - * multiple VACUUMs concurrently on the same table.) + * So, we need to find and write-lock the current left sibling of the target + * page. The sibling that was current a moment ago could have split, so + * we may have to move right. This search could fail if either the + * sibling or the target page was deleted by someone else meanwhile; if + * so, give up. (Right now, that should never happen, since page deletion + * is only done in VACUUM and there shouldn't be multiple VACUUMs + * concurrently on the same table.) */ if (leftsib != P_NONE) { @@ -839,19 +836,18 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) lbuf = InvalidBuffer; /* - * Next write-lock the target page itself. It should be okay to take - * just a write lock not a superexclusive lock, since no scans would - * stop on an empty page. + * Next write-lock the target page itself. It should be okay to take just + * a write lock not a superexclusive lock, since no scans would stop on an + * empty page. */ buf = _bt_getbuf(rel, target, BT_WRITE); page = BufferGetPage(buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); /* - * Check page is still empty etc, else abandon deletion. The empty - * check is necessary since someone else might have inserted into it - * while we didn't have it locked; the others are just for paranoia's - * sake. + * Check page is still empty etc, else abandon deletion. The empty check + * is necessary since someone else might have inserted into it while we + * didn't have it locked; the others are just for paranoia's sake. */ if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) || P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page)) @@ -872,9 +868,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) rbuf = _bt_getbuf(rel, rightsib, BT_WRITE); /* - * Next find and write-lock the current parent of the target page. - * This is essentially the same as the corresponding step of - * splitting. + * Next find and write-lock the current parent of the target page. This is + * essentially the same as the corresponding step of splitting. */ ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid), target, P_HIKEY); @@ -887,8 +882,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) /* * If the target is the rightmost child of its parent, then we can't - * delete, unless it's also the only child --- in which case the - * parent changes to half-dead status. + * delete, unless it's also the only child --- in which case the parent + * changes to half-dead status. */ page = BufferGetPage(pbuf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); @@ -917,11 +912,10 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) } /* - * If we are deleting the next-to-last page on the target's level, - * then the rightsib is a candidate to become the new fast root. (In - * theory, it might be possible to push the fast root even further - * down, but the odds of doing so are slim, and the locking - * considerations daunting.) + * If we are deleting the next-to-last page on the target's level, then + * the rightsib is a candidate to become the new fast root. (In theory, it + * might be possible to push the fast root even further down, but the odds + * of doing so are slim, and the locking considerations daunting.) * * We can safely acquire a lock on the metapage here --- see comments for * _bt_newroot(). @@ -939,9 +933,9 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) metad = BTPageGetMeta(metapg); /* - * The expected case here is btm_fastlevel == targetlevel+1; - * if the fastlevel is <= targetlevel, something is wrong, and - * we choose to overwrite it to fix it. + * The expected case here is btm_fastlevel == targetlevel+1; if + * the fastlevel is <= targetlevel, something is wrong, and we + * choose to overwrite it to fix it. */ if (metad->btm_fastlevel > targetlevel + 1) { @@ -961,9 +955,9 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) /* * Update parent. The normal case is a tad tricky because we want to - * delete the target's downlink and the *following* key. Easiest way - * is to copy the right sibling's downlink over the target downlink, - * and then delete the following item. + * delete the target's downlink and the *following* key. Easiest way is + * to copy the right sibling's downlink over the target downlink, and then + * delete the following item. */ page = BufferGetPage(pbuf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); @@ -992,8 +986,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) } /* - * Update siblings' side-links. Note the target page's side-links - * will continue to point to the siblings. + * Update siblings' side-links. Note the target page's side-links will + * continue to point to the siblings. */ if (BufferIsValid(lbuf)) { @@ -1123,10 +1117,10 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full) _bt_wrtbuf(rel, lbuf); /* - * If parent became half dead, recurse to try to delete it. Otherwise, - * if right sibling is empty and is now the last child of the parent, - * recurse to try to delete it. (These cases cannot apply at the same - * time, though the second case might itself recurse to the first.) + * If parent became half dead, recurse to try to delete it. Otherwise, if + * right sibling is empty and is now the last child of the parent, recurse + * to try to delete it. (These cases cannot apply at the same time, + * though the second case might itself recurse to the first.) */ if (parent_half_dead) { diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index d4232c847f8..10e2fe6190d 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -12,7 +12,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.131 2005/09/02 19:02:19 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.132 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -39,9 +39,9 @@ typedef struct BTSpool *spool; /* - * spool2 is needed only when the index is an unique index. Dead - * tuples are put into spool2 instead of spool in order to avoid - * uniqueness check. + * spool2 is needed only when the index is an unique index. Dead tuples + * are put into spool2 instead of spool in order to avoid uniqueness + * check. */ BTSpool *spool2; double indtuples; @@ -72,10 +72,10 @@ btbuild(PG_FUNCTION_ARGS) BTBuildState buildstate; /* - * bootstrap processing does something strange, so don't use - * sort/build for initial catalog indices. at some point i need to - * look harder at this. (there is some kind of incremental processing - * going on there.) -- pma 08/29/95 + * bootstrap processing does something strange, so don't use sort/build + * for initial catalog indices. at some point i need to look harder at + * this. (there is some kind of incremental processing going on there.) + * -- pma 08/29/95 */ buildstate.usefast = (FastBuild && IsNormalProcessingMode()); buildstate.isUnique = indexInfo->ii_Unique; @@ -91,8 +91,8 @@ btbuild(PG_FUNCTION_ARGS) #endif /* BTREE_BUILD_STATS */ /* - * We expect to be called exactly once for any index relation. If - * that's not the case, big trouble's what we have. + * We expect to be called exactly once for any index relation. If that's + * not the case, big trouble's what we have. */ if (RelationGetNumberOfBlocks(index) != 0) elog(ERROR, "index \"%s\" already contains data", @@ -103,8 +103,8 @@ btbuild(PG_FUNCTION_ARGS) buildstate.spool = _bt_spoolinit(index, indexInfo->ii_Unique, false); /* - * If building a unique index, put dead tuples in a second spool - * to keep them out of the uniqueness check. + * If building a unique index, put dead tuples in a second spool to + * keep them out of the uniqueness check. */ if (indexInfo->ii_Unique) buildstate.spool2 = _bt_spoolinit(index, false, true); @@ -129,8 +129,8 @@ btbuild(PG_FUNCTION_ARGS) /* * if we are doing bottom-up btree build, finish the build by (1) - * completing the sort of the spool file, (2) inserting the sorted - * tuples into btree pages and (3) building the upper levels. + * completing the sort of the spool file, (2) inserting the sorted tuples + * into btree pages and (3) building the upper levels. */ if (buildstate.usefast) { @@ -176,9 +176,8 @@ btbuildCallback(Relation index, btitem = _bt_formitem(itup); /* - * if we are doing bottom-up btree build, we insert the index into a - * spool file for subsequent processing. otherwise, we insert into - * the btree. + * if we are doing bottom-up btree build, we insert the index into a spool + * file for subsequent processing. otherwise, we insert into the btree. */ if (buildstate->usefast) { @@ -248,16 +247,16 @@ btgettuple(PG_FUNCTION_ARGS) bool res; /* - * If we've already initialized this scan, we can just advance it in - * the appropriate direction. If we haven't done so yet, we call a - * routine to get the first item in the scan. + * If we've already initialized this scan, we can just advance it in the + * appropriate direction. If we haven't done so yet, we call a routine to + * get the first item in the scan. */ if (ItemPointerIsValid(&(scan->currentItemData))) { /* - * Restore scan position using heap TID returned by previous call - * to btgettuple(). _bt_restscan() re-grabs the read lock on the - * buffer, too. + * Restore scan position using heap TID returned by previous call to + * btgettuple(). _bt_restscan() re-grabs the read lock on the buffer, + * too. */ _bt_restscan(scan); @@ -267,17 +266,16 @@ btgettuple(PG_FUNCTION_ARGS) if (scan->kill_prior_tuple) { /* - * Yes, so mark it by setting the LP_DELETE bit in the item - * flags. + * Yes, so mark it by setting the LP_DELETE bit in the item flags. */ offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData)); page = BufferGetPage(so->btso_curbuf); PageGetItemId(page, offnum)->lp_flags |= LP_DELETE; /* - * Since this can be redone later if needed, it's treated the - * same as a commit-hint-bit status update for heap tuples: we - * mark the buffer dirty but don't make a WAL log entry. + * Since this can be redone later if needed, it's treated the same + * as a commit-hint-bit status update for heap tuples: we mark the + * buffer dirty but don't make a WAL log entry. */ SetBufferCommitInfoNeedsSave(so->btso_curbuf); } @@ -306,11 +304,11 @@ btgettuple(PG_FUNCTION_ARGS) } /* - * Save heap TID to use it in _bt_restscan. Then release the read - * lock on the buffer so that we aren't blocking other backends. + * Save heap TID to use it in _bt_restscan. Then release the read lock on + * the buffer so that we aren't blocking other backends. * - * NOTE: we do keep the pin on the buffer! This is essential to ensure - * that someone else doesn't delete the index entry we are stopped on. + * NOTE: we do keep the pin on the buffer! This is essential to ensure that + * someone else doesn't delete the index entry we are stopped on. */ if (res) { @@ -333,7 +331,7 @@ Datum btgetmulti(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); - ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1); + ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1); int32 max_tids = PG_GETARG_INT32(2); int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3); BTScanOpaque so = (BTScanOpaque) scan->opaque; @@ -355,6 +353,7 @@ btgetmulti(PG_FUNCTION_ARGS) res = _bt_next(scan, ForwardScanDirection); else res = _bt_first(scan, ForwardScanDirection); + /* * Skip killed tuples if asked to. */ @@ -381,8 +380,8 @@ btgetmulti(PG_FUNCTION_ARGS) } /* - * Save heap TID to use it in _bt_restscan. Then release the read - * lock on the buffer so that we aren't blocking other backends. + * Save heap TID to use it in _bt_restscan. Then release the read lock on + * the buffer so that we aren't blocking other backends. */ if (res) { @@ -456,8 +455,8 @@ btrescan(PG_FUNCTION_ARGS) } /* - * Reset the scan keys. Note that keys ordering stuff moved to - * _bt_first. - vadim 05/05/97 + * Reset the scan keys. Note that keys ordering stuff moved to _bt_first. + * - vadim 05/05/97 */ if (scankey && scan->numberOfKeys > 0) memmove(scan->keyData, @@ -593,21 +592,20 @@ btbulkdelete(PG_FUNCTION_ARGS) num_index_tuples = 0; /* - * The outer loop iterates over index leaf pages, the inner over items - * on a leaf page. We issue just one _bt_delitems() call per page, so - * as to minimize WAL traffic. + * The outer loop iterates over index leaf pages, the inner over items on + * a leaf page. We issue just one _bt_delitems() call per page, so as to + * minimize WAL traffic. * * Note that we exclusive-lock every leaf page containing data items, in - * sequence left to right. It sounds attractive to only - * exclusive-lock those containing items we need to delete, but - * unfortunately that is not safe: we could then pass a stopped - * indexscan, which could in rare cases lead to deleting the item it - * needs to find when it resumes. (See _bt_restscan --- this could - * only happen if an indexscan stops on a deletable item and then a - * page split moves that item into a page further to its right, which - * the indexscan will have no pin on.) We can skip obtaining - * exclusive lock on empty pages though, since no indexscan could be - * stopped on those. + * sequence left to right. It sounds attractive to only exclusive-lock + * those containing items we need to delete, but unfortunately that is not + * safe: we could then pass a stopped indexscan, which could in rare cases + * lead to deleting the item it needs to find when it resumes. (See + * _bt_restscan --- this could only happen if an indexscan stops on a + * deletable item and then a page split moves that item into a page + * further to its right, which the indexscan will have no pin on.) We can + * skip obtaining exclusive lock on empty pages though, since no indexscan + * could be stopped on those. */ buf = _bt_get_endpoint(rel, 0, false); if (BufferIsValid(buf)) /* check for empty index */ @@ -632,15 +630,15 @@ btbulkdelete(PG_FUNCTION_ARGS) if (minoff <= maxoff && !P_ISDELETED(opaque)) { /* - * Trade in the initial read lock for a super-exclusive - * write lock on this page. + * Trade in the initial read lock for a super-exclusive write + * lock on this page. */ LockBuffer(buf, BUFFER_LOCK_UNLOCK); LockBufferForCleanup(buf); /* - * Recompute minoff/maxoff, both of which could have - * changed while we weren't holding the lock. + * Recompute minoff/maxoff, both of which could have changed + * while we weren't holding the lock. */ minoff = P_FIRSTDATAKEY(opaque); maxoff = PageGetMaxOffsetNumber(page); @@ -657,7 +655,7 @@ btbulkdelete(PG_FUNCTION_ARGS) ItemPointer htup; btitem = (BTItem) PageGetItem(page, - PageGetItemId(page, offnum)); + PageGetItemId(page, offnum)); htup = &(btitem->bti_itup.t_tid); if (callback(htup, callback_state)) { @@ -670,8 +668,8 @@ btbulkdelete(PG_FUNCTION_ARGS) } /* - * If we need to delete anything, do it and write the buffer; - * else just release the buffer. + * If we need to delete anything, do it and write the buffer; else + * just release the buffer. */ nextpage = opaque->btpo_next; if (ndeletable > 0) @@ -725,19 +723,19 @@ btvacuumcleanup(PG_FUNCTION_ARGS) Assert(stats != NULL); /* - * First find out the number of pages in the index. We must acquire - * the relation-extension lock while doing this to avoid a race - * condition: if someone else is extending the relation, there is - * a window where bufmgr/smgr have created a new all-zero page but - * it hasn't yet been write-locked by _bt_getbuf(). If we manage to - * scan such a page here, we'll improperly assume it can be recycled. - * Taking the lock synchronizes things enough to prevent a problem: - * either num_pages won't include the new page, or _bt_getbuf already - * has write lock on the buffer and it will be fully initialized before - * we can examine it. (See also vacuumlazy.c, which has the same issue.) + * First find out the number of pages in the index. We must acquire the + * relation-extension lock while doing this to avoid a race condition: if + * someone else is extending the relation, there is a window where + * bufmgr/smgr have created a new all-zero page but it hasn't yet been + * write-locked by _bt_getbuf(). If we manage to scan such a page here, + * we'll improperly assume it can be recycled. Taking the lock + * synchronizes things enough to prevent a problem: either num_pages won't + * include the new page, or _bt_getbuf already has write lock on the + * buffer and it will be fully initialized before we can examine it. (See + * also vacuumlazy.c, which has the same issue.) * - * We can skip locking for new or temp relations, - * however, since no one else could be accessing them. + * We can skip locking for new or temp relations, however, since no one else + * could be accessing them. */ needLock = !RELATION_IS_LOCAL(rel); @@ -807,12 +805,12 @@ btvacuumcleanup(PG_FUNCTION_ARGS) /* * During VACUUM FULL it's okay to recycle deleted pages - * immediately, since there can be no other transactions - * scanning the index. Note that we will only recycle the - * current page and not any parent pages that _bt_pagedel - * might have recursed to; this seems reasonable in the name - * of simplicity. (Trying to do otherwise would mean we'd - * have to sort the list of recyclable pages we're building.) + * immediately, since there can be no other transactions scanning + * the index. Note that we will only recycle the current page and + * not any parent pages that _bt_pagedel might have recursed to; + * this seems reasonable in the name of simplicity. (Trying to do + * otherwise would mean we'd have to sort the list of recyclable + * pages we're building.) */ if (ndel && info->vacuum_full) { @@ -827,10 +825,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS) } /* - * During VACUUM FULL, we truncate off any recyclable pages at the end - * of the index. In a normal vacuum it'd be unsafe to do this except - * by acquiring exclusive lock on the index and then rechecking all - * the pages; doesn't seem worth it. + * During VACUUM FULL, we truncate off any recyclable pages at the end of + * the index. In a normal vacuum it'd be unsafe to do this except by + * acquiring exclusive lock on the index and then rechecking all the + * pages; doesn't seem worth it. */ if (info->vacuum_full && nFreePages > 0) { @@ -857,9 +855,9 @@ btvacuumcleanup(PG_FUNCTION_ARGS) } /* - * Update the shared Free Space Map with the info we now have about - * free pages in the index, discarding any old info the map may have. - * We do not need to sort the page numbers; they're in order already. + * Update the shared Free Space Map with the info we now have about free + * pages in the index, discarding any old info the map may have. We do not + * need to sort the page numbers; they're in order already. */ RecordIndexFreeSpace(&rel->rd_node, nFreePages, freePages); @@ -915,15 +913,15 @@ _bt_restscan(IndexScanDesc scan) opaque = (BTPageOpaque) PageGetSpecialPointer(page); /* - * We use this as flag when first index tuple on page is deleted but - * we do not move left (this would slowdown vacuum) - so we set + * We use this as flag when first index tuple on page is deleted but we do + * not move left (this would slowdown vacuum) - so we set * current->ip_posid before first index tuple on the current page * (_bt_step will move it right)... XXX still needed? */ if (!ItemPointerIsValid(target)) { ItemPointerSetOffsetNumber(current, - OffsetNumberPrev(P_FIRSTDATAKEY(opaque))); + OffsetNumberPrev(P_FIRSTDATAKEY(opaque))); return; } @@ -948,12 +946,12 @@ _bt_restscan(IndexScanDesc scan) } /* - * The item we're looking for moved right at least one page, so - * move right. We are careful here to pin and read-lock the next - * non-dead page before releasing the current one. This ensures - * that a concurrent btbulkdelete scan cannot pass our position - * --- if it did, it might be able to reach and delete our target - * item before we can find it again. + * The item we're looking for moved right at least one page, so move + * right. We are careful here to pin and read-lock the next non-dead + * page before releasing the current one. This ensures that a + * concurrent btbulkdelete scan cannot pass our position --- if it + * did, it might be able to reach and delete our target item before we + * can find it again. */ if (P_RIGHTMOST(opaque)) elog(ERROR, "failed to re-find previous key in \"%s\"", diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index c029824fa6f..06075dd3dda 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.94 2005/10/06 02:29:12 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.95 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -69,9 +69,9 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, BTStack new_stack; /* - * Race -- the page we just grabbed may have split since we read - * its pointer in the parent (or metapage). If it has, we may - * need to move right to its new sibling. Do that. + * Race -- the page we just grabbed may have split since we read its + * pointer in the parent (or metapage). If it has, we may need to + * move right to its new sibling. Do that. */ *bufP = _bt_moveright(rel, *bufP, keysz, scankey, nextkey, BT_READ); @@ -82,8 +82,8 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, break; /* - * Find the appropriate item on the internal page, and get the - * child page that it points to. + * Find the appropriate item on the internal page, and get the child + * page that it points to. */ offnum = _bt_binsrch(rel, *bufP, keysz, scankey, nextkey); itemid = PageGetItemId(page, offnum); @@ -94,13 +94,13 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, /* * We need to save the location of the index entry we chose in the - * parent page on a stack. In case we split the tree, we'll use - * the stack to work back up to the parent page. We also save the - * actual downlink (TID) to uniquely identify the index entry, in - * case it moves right while we're working lower in the tree. See - * the paper by Lehman and Yao for how this is detected and - * handled. (We use the child link to disambiguate duplicate keys - * in the index -- Lehman and Yao disallow duplicate keys.) + * parent page on a stack. In case we split the tree, we'll use the + * stack to work back up to the parent page. We also save the actual + * downlink (TID) to uniquely identify the index entry, in case it + * moves right while we're working lower in the tree. See the paper + * by Lehman and Yao for how this is detected and handled. (We use the + * child link to disambiguate duplicate keys in the index -- Lehman + * and Yao disallow duplicate keys.) */ new_stack = (BTStack) palloc(sizeof(BTStackData)); new_stack->bts_blkno = par_blkno; @@ -156,19 +156,18 @@ _bt_moveright(Relation rel, opaque = (BTPageOpaque) PageGetSpecialPointer(page); /* - * When nextkey = false (normal case): if the scan key that brought us - * to this page is > the high key stored on the page, then the page - * has split and we need to move right. (If the scan key is equal to - * the high key, we might or might not need to move right; have to - * scan the page first anyway.) + * When nextkey = false (normal case): if the scan key that brought us to + * this page is > the high key stored on the page, then the page has split + * and we need to move right. (If the scan key is equal to the high key, + * we might or might not need to move right; have to scan the page first + * anyway.) * * When nextkey = true: move right if the scan key is >= page's high key. * - * The page could even have split more than once, so scan as far as - * needed. + * The page could even have split more than once, so scan as far as needed. * - * We also have to move right if we followed a link that brought us to a - * dead page. + * We also have to move right if we followed a link that brought us to a dead + * page. */ cmpval = nextkey ? 0 : 1; @@ -242,24 +241,24 @@ _bt_binsrch(Relation rel, high = PageGetMaxOffsetNumber(page); /* - * If there are no keys on the page, return the first available slot. - * Note this covers two cases: the page is really empty (no keys), or - * it contains only a high key. The latter case is possible after - * vacuuming. This can never happen on an internal page, however, - * since they are never empty (an internal page must have children). + * If there are no keys on the page, return the first available slot. Note + * this covers two cases: the page is really empty (no keys), or it + * contains only a high key. The latter case is possible after vacuuming. + * This can never happen on an internal page, however, since they are + * never empty (an internal page must have children). */ if (high < low) return low; /* - * Binary search to find the first key on the page >= scan key, or - * first key > scankey when nextkey is true. + * Binary search to find the first key on the page >= scan key, or first + * key > scankey when nextkey is true. * * For nextkey=false (cmpval=1), the loop invariant is: all slots before * 'low' are < scan key, all slots at or after 'high' are >= scan key. * - * For nextkey=true (cmpval=0), the loop invariant is: all slots before - * 'low' are <= scan key, all slots at or after 'high' are > scan key. + * For nextkey=true (cmpval=0), the loop invariant is: all slots before 'low' + * are <= scan key, all slots at or after 'high' are > scan key. * * We can fall out when high == low. */ @@ -285,15 +284,15 @@ _bt_binsrch(Relation rel, * At this point we have high == low, but be careful: they could point * past the last slot on the page. * - * On a leaf page, we always return the first key >= scan key (resp. > - * scan key), which could be the last slot + 1. + * On a leaf page, we always return the first key >= scan key (resp. > scan + * key), which could be the last slot + 1. */ if (P_ISLEAF(opaque)) return low; /* - * On a non-leaf page, return the last key < scan key (resp. <= scan - * key). There must be one if _bt_compare() is playing by the rules. + * On a non-leaf page, return the last key < scan key (resp. <= scan key). + * There must be one if _bt_compare() is playing by the rules. */ Assert(low > P_FIRSTDATAKEY(opaque)); @@ -337,8 +336,8 @@ _bt_compare(Relation rel, int i; /* - * Force result ">" if target item is first data item on an internal - * page --- see NOTE above. + * Force result ">" if target item is first data item on an internal page + * --- see NOTE above. */ if (!P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque)) return 1; @@ -347,15 +346,15 @@ _bt_compare(Relation rel, itup = &(btitem->bti_itup); /* - * The scan key is set up with the attribute number associated with - * each term in the key. It is important that, if the index is - * multi-key, the scan contain the first k key attributes, and that - * they be in order. If you think about how multi-key ordering works, - * you'll understand why this is. + * The scan key is set up with the attribute number associated with each + * term in the key. It is important that, if the index is multi-key, the + * scan contain the first k key attributes, and that they be in order. If + * you think about how multi-key ordering works, you'll understand why + * this is. * - * We don't test for violation of this condition here, however. The - * initial setup for the index scan had better have gotten it right - * (see _bt_first). + * We don't test for violation of this condition here, however. The initial + * setup for the index scan had better have gotten it right (see + * _bt_first). */ for (i = 1; i <= keysz; i++) @@ -381,15 +380,15 @@ _bt_compare(Relation rel, else { /* - * The sk_func needs to be passed the index value as left arg - * and the sk_argument as right arg (they might be of - * different types). Since it is convenient for callers to - * think of _bt_compare as comparing the scankey to the index - * item, we have to flip the sign of the comparison result. + * The sk_func needs to be passed the index value as left arg and + * the sk_argument as right arg (they might be of different + * types). Since it is convenient for callers to think of + * _bt_compare as comparing the scankey to the index item, we have + * to flip the sign of the comparison result. * - * Note: curious-looking coding is to avoid overflow if - * comparison function returns INT_MIN. There is no risk of - * overflow for positive results. + * Note: curious-looking coding is to avoid overflow if comparison + * function returns INT_MIN. There is no risk of overflow for + * positive results. */ result = DatumGetInt32(FunctionCall2(&scankey->sk_func, datum, @@ -497,7 +496,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) bool goback; bool continuescan; ScanKey startKeys[INDEX_MAX_KEYS]; - ScanKeyData scankeys[INDEX_MAX_KEYS]; + ScanKeyData scankeys[INDEX_MAX_KEYS]; int keysCount = 0; int i; StrategyNumber strat_total; @@ -505,8 +504,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) pgstat_count_index_scan(&scan->xs_pgstat_info); /* - * Examine the scan keys and eliminate any redundant keys; also - * discover how many keys must be matched to continue the scan. + * Examine the scan keys and eliminate any redundant keys; also discover + * how many keys must be matched to continue the scan. */ _bt_preprocess_keys(scan); @@ -556,9 +555,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) ScanKey cur; /* - * chosen is the so-far-chosen key for the current attribute, if - * any. We don't cast the decision in stone until we reach keys - * for the next attribute. + * chosen is the so-far-chosen key for the current attribute, if any. + * We don't cast the decision in stone until we reach keys for the + * next attribute. */ curattr = 1; chosen = NULL; @@ -595,9 +594,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) } /* - * Done if that was the last attribute, or if next key - * is not in sequence (implying no boundary key is available - * for the next attribute). + * Done if that was the last attribute, or if next key is not + * in sequence (implying no boundary key is available for the + * next attribute). */ if (i >= so->numberOfKeys || cur->sk_attno != curattr + 1) @@ -632,17 +631,17 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) } /* - * If we found no usable boundary keys, we have to start from one end - * of the tree. Walk down that edge to the first or last key, and - * scan from there. + * If we found no usable boundary keys, we have to start from one end of + * the tree. Walk down that edge to the first or last key, and scan from + * there. */ if (keysCount == 0) return _bt_endpoint(scan, dir); /* * We want to start the scan somewhere within the index. Set up a - * 3-way-comparison scankey we can use to search for the boundary - * point we identified above. + * 3-way-comparison scankey we can use to search for the boundary point we + * identified above. */ Assert(keysCount <= INDEX_MAX_KEYS); for (i = 0; i < keysCount; i++) @@ -650,16 +649,15 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) ScanKey cur = startKeys[i]; /* - * _bt_preprocess_keys disallows it, but it's place to add some - * code later + * _bt_preprocess_keys disallows it, but it's place to add some code + * later */ if (cur->sk_flags & SK_ISNULL) elog(ERROR, "btree doesn't support is(not)null, yet"); /* - * If scankey operator is of default subtype, we can use the - * cached comparison procedure; otherwise gotta look it up in the - * catalogs. + * If scankey operator is of default subtype, we can use the cached + * comparison procedure; otherwise gotta look it up in the catalogs. */ if (cur->sk_subtype == InvalidOid) { @@ -692,13 +690,13 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) } /* - * Examine the selected initial-positioning strategy to determine - * exactly where we need to start the scan, and set flag variables to - * control the code below. + * Examine the selected initial-positioning strategy to determine exactly + * where we need to start the scan, and set flag variables to control the + * code below. * - * If nextkey = false, _bt_search and _bt_binsrch will locate the first - * item >= scan key. If nextkey = true, they will locate the first - * item > scan key. + * If nextkey = false, _bt_search and _bt_binsrch will locate the first item + * >= scan key. If nextkey = true, they will locate the first item > scan + * key. * * If goback = true, we will then step back one item, while if goback = * false, we will start the scan on the located item. @@ -710,10 +708,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) case BTLessStrategyNumber: /* - * Find first item >= scankey, then back up one to arrive at - * last item < scankey. (Note: this positioning strategy is - * only used for a backward scan, so that is always the - * correct starting position.) + * Find first item >= scankey, then back up one to arrive at last + * item < scankey. (Note: this positioning strategy is only used + * for a backward scan, so that is always the correct starting + * position.) */ nextkey = false; goback = true; @@ -722,10 +720,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) case BTLessEqualStrategyNumber: /* - * Find first item > scankey, then back up one to arrive at - * last item <= scankey. (Note: this positioning strategy is - * only used for a backward scan, so that is always the - * correct starting position.) + * Find first item > scankey, then back up one to arrive at last + * item <= scankey. (Note: this positioning strategy is only used + * for a backward scan, so that is always the correct starting + * position.) */ nextkey = true; goback = true; @@ -734,14 +732,14 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) case BTEqualStrategyNumber: /* - * If a backward scan was specified, need to start with last - * equal item not first one. + * If a backward scan was specified, need to start with last equal + * item not first one. */ if (ScanDirectionIsBackward(dir)) { /* - * This is the same as the <= strategy. We will check at - * the end whether the found item is actually =. + * This is the same as the <= strategy. We will check at the + * end whether the found item is actually =. */ nextkey = true; goback = true; @@ -749,8 +747,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) else { /* - * This is the same as the >= strategy. We will check at - * the end whether the found item is actually =. + * This is the same as the >= strategy. We will check at the + * end whether the found item is actually =. */ nextkey = false; goback = false; @@ -813,24 +811,24 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) ItemPointerSet(current, blkno, offnum); /* - * If nextkey = false, we are positioned at the first item >= scan - * key, or possibly at the end of a page on which all the existing - * items are less than the scan key and we know that everything on - * later pages is greater than or equal to scan key. + * If nextkey = false, we are positioned at the first item >= scan key, or + * possibly at the end of a page on which all the existing items are less + * than the scan key and we know that everything on later pages is greater + * than or equal to scan key. * * If nextkey = true, we are positioned at the first item > scan key, or - * possibly at the end of a page on which all the existing items are - * less than or equal to the scan key and we know that everything on - * later pages is greater than scan key. + * possibly at the end of a page on which all the existing items are less + * than or equal to the scan key and we know that everything on later + * pages is greater than scan key. * - * The actually desired starting point is either this item or the prior - * one, or in the end-of-page case it's the first item on the next - * page or the last item on this page. We apply _bt_step if needed to - * get to the right place. + * The actually desired starting point is either this item or the prior one, + * or in the end-of-page case it's the first item on the next page or the + * last item on this page. We apply _bt_step if needed to get to the + * right place. * * If _bt_step fails (meaning we fell off the end of the index in one - * direction or the other), then there are no matches so we just - * return false. + * direction or the other), then there are no matches so we just return + * false. */ if (goback) { @@ -902,8 +900,8 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) BlockNumber blkno; /* - * Don't use ItemPointerGetOffsetNumber or you risk to get assertion - * due to ability of ip_posid to be equal 0. + * Don't use ItemPointerGetOffsetNumber or you risk to get assertion due + * to ability of ip_posid to be equal 0. */ offnum = current->ip_posid; @@ -954,9 +952,9 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) /* * Walk left to the next page with data. This is much more * complex than the walk-right case because of the possibility - * that the page to our left splits while we are in flight to - * it, plus the possibility that the page we were on gets - * deleted after we leave it. See nbtree/README for details. + * that the page to our left splits while we are in flight to it, + * plus the possibility that the page we were on gets deleted + * after we leave it. See nbtree/README for details. */ for (;;) { @@ -973,9 +971,9 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) opaque = (BTPageOpaque) PageGetSpecialPointer(page); /* - * Okay, we managed to move left to a non-deleted page. - * Done if it's not half-dead and not empty. Else loop - * back and do it all again. + * Okay, we managed to move left to a non-deleted page. Done + * if it's not half-dead and not empty. Else loop back and do + * it all again. */ if (!P_IGNORE(opaque)) { @@ -1043,15 +1041,14 @@ _bt_walk_left(Relation rel, Buffer buf) /* * If this isn't the page we want, walk right till we find what we - * want --- but go no more than four hops (an arbitrary limit). If - * we don't find the correct page by then, the most likely bet is - * that the original page got deleted and isn't in the sibling - * chain at all anymore, not that its left sibling got split more - * than four times. + * want --- but go no more than four hops (an arbitrary limit). If we + * don't find the correct page by then, the most likely bet is that + * the original page got deleted and isn't in the sibling chain at all + * anymore, not that its left sibling got split more than four times. * - * Note that it is correct to test P_ISDELETED not P_IGNORE here, - * because half-dead pages are still in the sibling chain. Caller - * must reject half-dead pages if wanted. + * Note that it is correct to test P_ISDELETED not P_IGNORE here, because + * half-dead pages are still in the sibling chain. Caller must reject + * half-dead pages if wanted. */ tries = 0; for (;;) @@ -1077,9 +1074,9 @@ _bt_walk_left(Relation rel, Buffer buf) { /* * It was deleted. Move right to first nondeleted page (there - * must be one); that is the page that has acquired the - * deleted one's keyspace, so stepping left from it will take - * us where we want to be. + * must be one); that is the page that has acquired the deleted + * one's keyspace, so stepping left from it will take us where we + * want to be. */ for (;;) { @@ -1095,16 +1092,16 @@ _bt_walk_left(Relation rel, Buffer buf) } /* - * Now return to top of loop, resetting obknum to point to - * this nondeleted page, and try again. + * Now return to top of loop, resetting obknum to point to this + * nondeleted page, and try again. */ } else { /* - * It wasn't deleted; the explanation had better be that the - * page to the left got split or deleted. Without this check, - * we'd go into an infinite loop if there's anything wrong. + * It wasn't deleted; the explanation had better be that the page + * to the left got split or deleted. Without this check, we'd go + * into an infinite loop if there's anything wrong. */ if (opaque->btpo_prev == lblkno) elog(ERROR, "could not find left sibling in \"%s\"", @@ -1137,8 +1134,8 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost) /* * If we are looking for a leaf page, okay to descend from fast root; - * otherwise better descend from true root. (There is no point in - * being smarter about intermediate levels.) + * otherwise better descend from true root. (There is no point in being + * smarter about intermediate levels.) */ if (level == 0) buf = _bt_getroot(rel, BT_READ); @@ -1159,8 +1156,8 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost) /* * If we landed on a deleted page, step right to find a live page * (there must be one). Also, if we want the rightmost page, step - * right if needed to get to it (this could happen if the page - * split since we obtained a pointer to it). + * right if needed to get to it (this could happen if the page split + * since we obtained a pointer to it). */ while (P_IGNORE(opaque) || (rightmost && !P_RIGHTMOST(opaque))) @@ -1228,9 +1225,9 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir) so = (BTScanOpaque) scan->opaque; /* - * Scan down to the leftmost or rightmost leaf page. This is a - * simplified version of _bt_search(). We don't maintain a stack - * since we know we won't need it. + * Scan down to the leftmost or rightmost leaf page. This is a simplified + * version of _bt_search(). We don't maintain a stack since we know we + * won't need it. */ buf = _bt_get_endpoint(rel, 0, ScanDirectionIsBackward(dir)); @@ -1261,8 +1258,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir) Assert(P_RIGHTMOST(opaque)); start = PageGetMaxOffsetNumber(page); - if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty - * page */ + if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty page */ start = P_FIRSTDATAKEY(opaque); } else @@ -1276,8 +1272,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir) so->btso_curbuf = buf; /* - * Left/rightmost page could be empty due to deletions, if so step - * till we find a nonempty page. + * Left/rightmost page could be empty due to deletions, if so step till we + * find a nonempty page. */ if (start > maxoff) { @@ -1291,8 +1287,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir) itup = &(btitem->bti_itup); /* - * Okay, we are on the first or last tuple. Does it pass all the - * quals? + * Okay, we are on the first or last tuple. Does it pass all the quals? */ if (_bt_checkkeys(scan, itup, dir, &continuescan)) { diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index ee5acee5c3e..6ee5d42b63a 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -56,7 +56,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.94 2005/08/11 13:22:33 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.95 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -99,12 +99,10 @@ typedef struct BTPageState { Page btps_page; /* workspace for page building */ BlockNumber btps_blkno; /* block # to write this page at */ - BTItem btps_minkey; /* copy of minimum key (first item) on - * page */ + BTItem btps_minkey; /* copy of minimum key (first item) on page */ OffsetNumber btps_lastoff; /* last item offset loaded */ uint32 btps_level; /* tree level (0 = leaf) */ - Size btps_full; /* "full" if less than this much free - * space */ + Size btps_full; /* "full" if less than this much free space */ struct BTPageState *btps_next; /* link to parent level, if any */ } BTPageState; @@ -157,21 +155,21 @@ _bt_spoolinit(Relation index, bool isunique, bool isdead) btspool->isunique = isunique; /* - * We size the sort area as maintenance_work_mem rather than work_mem - * to speed index creation. This should be OK since a single backend - * can't run multiple index creations in parallel. Note that creation - * of a unique index actually requires two BTSpool objects. We expect - * that the second one (for dead tuples) won't get very full, so we - * give it only work_mem. + * We size the sort area as maintenance_work_mem rather than work_mem to + * speed index creation. This should be OK since a single backend can't + * run multiple index creations in parallel. Note that creation of a + * unique index actually requires two BTSpool objects. We expect that the + * second one (for dead tuples) won't get very full, so we give it only + * work_mem. */ btKbytes = isdead ? work_mem : maintenance_work_mem; btspool->sortstate = tuplesort_begin_index(index, isunique, btKbytes, false); /* - * Currently, tuplesort provides sort functions on IndexTuples. If we - * kept anything in a BTItem other than a regular IndexTuple, we'd - * need to modify tuplesort to understand BTItems as such. + * Currently, tuplesort provides sort functions on IndexTuples. If we kept + * anything in a BTItem other than a regular IndexTuple, we'd need to + * modify tuplesort to understand BTItems as such. */ Assert(sizeof(BTItemData) == sizeof(IndexTupleData)); @@ -222,8 +220,8 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2) wstate.index = btspool->index; /* - * We need to log index creation in WAL iff WAL archiving is enabled - * AND it's not a temp index. + * We need to log index creation in WAL iff WAL archiving is enabled AND + * it's not a temp index. */ wstate.btws_use_wal = XLogArchivingActive() && !wstate.index->rd_istemp; @@ -313,9 +311,9 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno) /* * If we have to write pages nonsequentially, fill in the space with * zeroes until we come back and overwrite. This is not logically - * necessary on standard Unix filesystems (unwritten space will read - * as zeroes anyway), but it should help to avoid fragmentation. The - * dummy pages aren't WAL-logged though. + * necessary on standard Unix filesystems (unwritten space will read as + * zeroes anyway), but it should help to avoid fragmentation. The dummy + * pages aren't WAL-logged though. */ while (blkno > wstate->btws_pages_written) { @@ -328,8 +326,8 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno) /* * Now write the page. We say isTemp = true even if it's not a temp - * index, because there's no need for smgr to schedule an fsync for - * this write; we'll do it ourselves before ending the build. + * index, because there's no need for smgr to schedule an fsync for this + * write; we'll do it ourselves before ending the build. */ smgrwrite(wstate->index->rd_smgr, blkno, (char *) page, true); @@ -483,15 +481,15 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) btisz = MAXALIGN(btisz); /* - * Check whether the item can fit on a btree page at all. (Eventually, - * we ought to try to apply TOAST methods if not.) We actually need to - * be able to fit three items on every page, so restrict any one item - * to 1/3 the per-page available space. Note that at this point, btisz - * doesn't include the ItemId. + * Check whether the item can fit on a btree page at all. (Eventually, we + * ought to try to apply TOAST methods if not.) We actually need to be + * able to fit three items on every page, so restrict any one item to 1/3 + * the per-page available space. Note that at this point, btisz doesn't + * include the ItemId. * - * NOTE: similar code appears in _bt_insertonpg() to defend against - * oversize items being inserted into an already-existing index. But - * during creation of an index, we don't go through there. + * NOTE: similar code appears in _bt_insertonpg() to defend against oversize + * items being inserted into an already-existing index. But during + * creation of an index, we don't go through there. */ if (btisz > BTMaxItemSize(npage)) ereport(ERROR, @@ -499,9 +497,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) errmsg("index row size %lu exceeds btree maximum, %lu", (unsigned long) btisz, (unsigned long) BTMaxItemSize(npage)), - errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n" - "Consider a function index of an MD5 hash of the value, " - "or use full text indexing."))); + errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n" + "Consider a function index of an MD5 hash of the value, " + "or use full text indexing."))); if (pgspc < btisz || pgspc < state->btps_full) { @@ -523,11 +521,11 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) /* * We copy the last item on the page into the new page, and then - * rearrange the old page so that the 'last item' becomes its high - * key rather than a true data item. There had better be at least - * two items on the page already, else the page would be empty of - * useful data. (Hence, we must allow pages to be packed at least - * 2/3rds full; the 70% figure used above is close to minimum.) + * rearrange the old page so that the 'last item' becomes its high key + * rather than a true data item. There had better be at least two + * items on the page already, else the page would be empty of useful + * data. (Hence, we must allow pages to be packed at least 2/3rds + * full; the 70% figure used above is close to minimum.) */ Assert(last_off > P_FIRSTKEY); ii = PageGetItemId(opage, last_off); @@ -544,8 +542,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) /* * Link the old page into its parent, using its minimum key. If we - * don't have a parent, we have to create one; this adds a new - * btree level. + * don't have a parent, we have to create one; this adds a new btree + * level. */ if (state->btps_next == NULL) state->btps_next = _bt_pagestate(wstate, state->btps_level + 1); @@ -557,9 +555,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) pfree(state->btps_minkey); /* - * Save a copy of the minimum key for the new page. We have to - * copy it off the old page, not the new one, in case we are not - * at leaf level. + * Save a copy of the minimum key for the new page. We have to copy + * it off the old page, not the new one, in case we are not at leaf + * level. */ state->btps_minkey = _bt_formitem(&(obti->bti_itup)); @@ -576,8 +574,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) } /* - * Write out the old page. We never need to touch it again, so we - * can free the opage workspace too. + * Write out the old page. We never need to touch it again, so we can + * free the opage workspace too. */ _bt_blwritepage(wstate, opage, oblkno); @@ -588,10 +586,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) } /* - * If the new item is the first for its page, stash a copy for later. - * Note this will only happen for the first item on a level; on later - * pages, the first item for a page is copied from the prior page in - * the code above. + * If the new item is the first for its page, stash a copy for later. Note + * this will only happen for the first item on a level; on later pages, + * the first item for a page is copied from the prior page in the code + * above. */ if (last_off == P_HIKEY) { @@ -636,9 +634,9 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state) * We have to link the last page on this level to somewhere. * * If we're at the top, it's the root, so attach it to the metapage. - * Otherwise, add an entry for it to its parent using its minimum - * key. This may cause the last page of the parent level to - * split, but that's not a problem -- we haven't gotten to it yet. + * Otherwise, add an entry for it to its parent using its minimum key. + * This may cause the last page of the parent level to split, but + * that's not a problem -- we haven't gotten to it yet. */ if (s->btps_next == NULL) { @@ -657,8 +655,8 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state) } /* - * This is the rightmost page, so the ItemId array needs to be - * slid back one slot. Then we can dump out the page. + * This is the rightmost page, so the ItemId array needs to be slid + * back one slot. Then we can dump out the page. */ _bt_slideleft(s->btps_page); _bt_blwritepage(wstate, s->btps_page, s->btps_blkno); @@ -667,9 +665,9 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state) /* * As the last step in the process, construct the metapage and make it - * point to the new root (unless we had no data at all, in which case - * it's set to point to "P_NONE"). This changes the index to the - * "valid" state by filling in a valid magic number in the metapage. + * point to the new root (unless we had no data at all, in which case it's + * set to point to "P_NONE"). This changes the index to the "valid" state + * by filling in a valid magic number in the metapage. */ metapage = (Page) palloc(BLCKSZ); _bt_initmetapage(metapage, rootblkno, rootlevel); @@ -748,7 +746,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) compare = DatumGetInt32(FunctionCall2(&entry->sk_func, attrDatum1, - attrDatum2)); + attrDatum2)); if (compare > 0) { load1 = false; @@ -772,7 +770,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) if (should_free) pfree(bti); bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, - true, &should_free); + true, &should_free); } else { @@ -780,7 +778,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) if (should_free2) pfree(bti2); bti2 = (BTItem) tuplesort_getindextuple(btspool2->sortstate, - true, &should_free2); + true, &should_free2); } } _bt_freeskey(indexScanKey); @@ -789,7 +787,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) { /* merge is unnecessary */ while ((bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, - true, &should_free)) != NULL) + true, &should_free)) != NULL) { /* When we see first tuple, create first index page */ if (state == NULL) @@ -805,19 +803,19 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) _bt_uppershutdown(wstate, state); /* - * If the index isn't temp, we must fsync it down to disk before it's - * safe to commit the transaction. (For a temp index we don't care - * since the index will be uninteresting after a crash anyway.) + * If the index isn't temp, we must fsync it down to disk before it's safe + * to commit the transaction. (For a temp index we don't care since the + * index will be uninteresting after a crash anyway.) * * It's obvious that we must do this when not WAL-logging the build. It's * less obvious that we have to do it even if we did WAL-log the index - * pages. The reason is that since we're building outside shared - * buffers, a CHECKPOINT occurring during the build has no way to - * flush the previously written data to disk (indeed it won't know the - * index even exists). A crash later on would replay WAL from the - * checkpoint, therefore it wouldn't replay our earlier WAL entries. - * If we do not fsync those pages here, they might still not be on - * disk when the crash occurs. + * pages. The reason is that since we're building outside shared buffers, + * a CHECKPOINT occurring during the build has no way to flush the + * previously written data to disk (indeed it won't know the index even + * exists). A crash later on would replay WAL from the checkpoint, + * therefore it wouldn't replay our earlier WAL entries. If we do not + * fsync those pages here, they might still not be on disk when the crash + * occurs. */ if (!wstate->index->rd_istemp) smgrimmedsync(wstate->index->rd_smgr); diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 9a5f8d7ac90..269213d21f7 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.63 2005/06/13 23:14:48 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.64 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -48,8 +48,8 @@ _bt_mkscankey(Relation rel, IndexTuple itup) bool null; /* - * We can use the cached (default) support procs since no - * cross-type comparison can be needed. + * We can use the cached (default) support procs since no cross-type + * comparison can be needed. */ procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC); arg = index_getattr(itup, i + 1, itupdesc, &null); @@ -93,8 +93,8 @@ _bt_mkscankey_nodata(Relation rel) FmgrInfo *procinfo; /* - * We can use the cached (default) support procs since no - * cross-type comparison can be needed. + * We can use the cached (default) support procs since no cross-type + * comparison can be needed. */ procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC); ScanKeyEntryInitializeWithInfo(&skey[i], @@ -257,9 +257,9 @@ _bt_preprocess_keys(IndexScanDesc scan) if (numberOfKeys == 1) { /* - * We don't use indices for 'A is null' and 'A is not null' - * currently and 'A < = > <> NULL' will always fail - so qual is - * not OK if comparison value is NULL. - vadim 03/21/97 + * We don't use indices for 'A is null' and 'A is not null' currently + * and 'A < = > <> NULL' will always fail - so qual is not OK if + * comparison value is NULL. - vadim 03/21/97 */ if (cur->sk_flags & SK_ISNULL) so->qual_ok = false; @@ -286,20 +286,20 @@ _bt_preprocess_keys(IndexScanDesc scan) /* * Initialize for processing of keys for attr 1. * - * xform[i] points to the currently best scan key of strategy type i+1, - * if any is found with a default operator subtype; it is NULL if we - * haven't yet found such a key for this attr. Scan keys of - * nondefault subtypes are transferred to the output with no - * processing except for noting if they are of "=" type. + * xform[i] points to the currently best scan key of strategy type i+1, if + * any is found with a default operator subtype; it is NULL if we haven't + * yet found such a key for this attr. Scan keys of nondefault subtypes + * are transferred to the output with no processing except for noting if + * they are of "=" type. */ attno = 1; memset(xform, 0, sizeof(xform)); hasOtherTypeEqual = false; /* - * Loop iterates from 0 to numberOfKeys inclusive; we use the last - * pass to handle after-last-key processing. Actual exit from the - * loop is at the "break" statement below. + * Loop iterates from 0 to numberOfKeys inclusive; we use the last pass to + * handle after-last-key processing. Actual exit from the loop is at the + * "break" statement below. */ for (i = 0;; cur++, i++) { @@ -319,8 +319,8 @@ _bt_preprocess_keys(IndexScanDesc scan) } /* - * If we are at the end of the keys for a particular attr, finish - * up processing and emit the cleaned-up keys. + * If we are at the end of the keys for a particular attr, finish up + * processing and emit the cleaned-up keys. */ if (i == numberOfKeys || cur->sk_attno != attno) { @@ -331,9 +331,9 @@ _bt_preprocess_keys(IndexScanDesc scan) elog(ERROR, "btree index keys must be ordered by attribute"); /* - * If = has been specified, no other key will be used. In case - * of key > 2 && key == 1 and so on we have to set qual_ok to - * false before discarding the other keys. + * If = has been specified, no other key will be used. In case of + * key > 2 && key == 1 and so on we have to set qual_ok to false + * before discarding the other keys. */ if (xform[BTEqualStrategyNumber - 1]) { @@ -411,8 +411,8 @@ _bt_preprocess_keys(IndexScanDesc scan) } /* - * If all attrs before this one had "=", include these keys - * into the required-keys count. + * If all attrs before this one had "=", include these keys into + * the required-keys count. */ if (priorNumberOfEqualCols == attno - 1) so->numberOfRequiredKeys = new_numberOfKeys; @@ -526,11 +526,11 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple, if (isNull) { /* - * Since NULLs are sorted after non-NULLs, we know we have - * reached the upper limit of the range of values for this - * index attr. On a forward scan, we can stop if this qual is - * one of the "must match" subset. On a backward scan, - * however, we should keep going. + * Since NULLs are sorted after non-NULLs, we know we have reached + * the upper limit of the range of values for this index attr. On + * a forward scan, we can stop if this qual is one of the "must + * match" subset. On a backward scan, however, we should keep + * going. */ if (ikey < so->numberOfRequiredKeys && ScanDirectionIsForward(dir)) @@ -547,24 +547,22 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple, if (!DatumGetBool(test)) { /* - * Tuple fails this qual. If it's a required qual, then we - * may be able to conclude no further tuples will pass, - * either. We have to look at the scan direction and the qual - * type. + * Tuple fails this qual. If it's a required qual, then we may be + * able to conclude no further tuples will pass, either. We have + * to look at the scan direction and the qual type. * - * Note: the only case in which we would keep going after failing - * a required qual is if there are partially-redundant quals - * that _bt_preprocess_keys() was unable to eliminate. For - * example, given "x > 4 AND x > 10" where both are cross-type - * comparisons and so not removable, we might start the scan - * at the x = 4 boundary point. The "x > 10" condition will - * fail until we pass x = 10, but we must not stop the scan on - * its account. + * Note: the only case in which we would keep going after failing a + * required qual is if there are partially-redundant quals that + * _bt_preprocess_keys() was unable to eliminate. For example, + * given "x > 4 AND x > 10" where both are cross-type comparisons + * and so not removable, we might start the scan at the x = 4 + * boundary point. The "x > 10" condition will fail until we pass + * x = 10, but we must not stop the scan on its account. * - * Note: because we stop the scan as soon as any required - * equality qual fails, it is critical that equality quals be - * used for the initial positioning in _bt_first() when they - * are available. See comments in _bt_first(). + * Note: because we stop the scan as soon as any required equality + * qual fails, it is critical that equality quals be used for the + * initial positioning in _bt_first() when they are available. See + * comments in _bt_first(). */ if (ikey < so->numberOfRequiredKeys) { diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index 078d8529241..61bf93a904b 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.22 2005/06/06 17:01:22 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.23 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -101,7 +101,7 @@ _bt_restore_page(Page page, char *from, int len) (sizeof(BTItemData) - sizeof(IndexTupleData)); itemsz = MAXALIGN(itemsz); if (PageAddItem(page, (Item) from, itemsz, - FirstOffsetNumber, LP_USED) == InvalidOffsetNumber) + FirstOffsetNumber, LP_USED) == InvalidOffsetNumber) elog(PANIC, "_bt_restore_page: can't add item to page"); from += itemsz; } @@ -136,8 +136,8 @@ _bt_restore_meta(Relation reln, XLogRecPtr lsn, pageop->btpo_flags = BTP_META; /* - * Set pd_lower just past the end of the metadata. This is not - * essential but it makes the page look compressible to xlog.c. + * Set pd_lower just past the end of the metadata. This is not essential + * but it makes the page look compressible to xlog.c. */ ((PageHeader) metapg)->pd_lower = ((char *) md + sizeof(BTMetaPageData)) - (char *) metapg; @@ -181,7 +181,7 @@ btree_xlog_insert(bool isleaf, bool ismeta, if (!(record->xl_info & XLR_BKP_BLOCK_1)) { buffer = XLogReadBuffer(false, reln, - ItemPointerGetBlockNumber(&(xlrec->target.tid))); + ItemPointerGetBlockNumber(&(xlrec->target.tid))); if (!BufferIsValid(buffer)) elog(PANIC, "btree_insert_redo: block unfound"); page = (Page) BufferGetPage(buffer); @@ -217,8 +217,8 @@ btree_xlog_insert(bool isleaf, bool ismeta, if (!isleaf && incomplete_splits != NIL) { forget_matching_split(reln, xlrec->target.node, - ItemPointerGetBlockNumber(&(xlrec->target.tid)), - ItemPointerGetOffsetNumber(&(xlrec->target.tid)), + ItemPointerGetBlockNumber(&(xlrec->target.tid)), + ItemPointerGetOffsetNumber(&(xlrec->target.tid)), false); } } @@ -325,8 +325,8 @@ btree_xlog_split(bool onleft, bool isroot, if (xlrec->level > 0 && incomplete_splits != NIL) { forget_matching_split(reln, xlrec->target.node, - ItemPointerGetBlockNumber(&(xlrec->target.tid)), - ItemPointerGetOffsetNumber(&(xlrec->target.tid)), + ItemPointerGetBlockNumber(&(xlrec->target.tid)), + ItemPointerGetOffsetNumber(&(xlrec->target.tid)), false); } @@ -655,7 +655,7 @@ static void out_target(char *buf, xl_btreetid *target) { sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u", - target->node.spcNode, target->node.dbNode, target->node.relNode, + target->node.spcNode, target->node.dbNode, target->node.relNode, ItemPointerGetBlockNumber(&(target->tid)), ItemPointerGetOffsetNumber(&(target->tid))); } diff --git a/src/backend/access/rtree/rtget.c b/src/backend/access/rtree/rtget.c index 199a178c4fd..010a493d20e 100644 --- a/src/backend/access/rtree/rtget.c +++ b/src/backend/access/rtree/rtget.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/rtree/rtget.c,v 1.36 2005/10/06 02:29:14 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/rtree/rtget.c,v 1.37 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -32,12 +32,12 @@ rtgettuple(PG_FUNCTION_ARGS) IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0); ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1); RTreeScanOpaque so = (RTreeScanOpaque) s->opaque; - Page page; + Page page; OffsetNumber offnum; /* - * If we've already produced a tuple and the executor has informed - * us that it should be marked "killed", do so now. + * If we've already produced a tuple and the executor has informed us that + * it should be marked "killed", do so now. */ if (s->kill_prior_tuple && ItemPointerIsValid(&(s->currentItemData))) { @@ -48,14 +48,13 @@ rtgettuple(PG_FUNCTION_ARGS) } /* - * Get the next tuple that matches the search key; if asked to - * skip killed tuples, find the first non-killed tuple that - * matches. Return as soon as we've run out of matches or we've - * found an acceptable match. + * Get the next tuple that matches the search key; if asked to skip killed + * tuples, find the first non-killed tuple that matches. Return as soon as + * we've run out of matches or we've found an acceptable match. */ for (;;) { - bool res = rtnext(s, dir); + bool res = rtnext(s, dir); if (res && s->ignore_killed_tuples) { @@ -73,7 +72,7 @@ Datum rtgetmulti(PG_FUNCTION_ARGS) { IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0); - ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1); + ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1); int32 max_tids = PG_GETARG_INT32(2); int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3); RTreeScanOpaque so = (RTreeScanOpaque) s->opaque; @@ -86,7 +85,7 @@ rtgetmulti(PG_FUNCTION_ARGS) res = rtnext(s, ForwardScanDirection); if (res && s->ignore_killed_tuples) { - Page page; + Page page; OffsetNumber offnum; offnum = ItemPointerGetOffsetNumber(&(s->currentItemData)); @@ -201,12 +200,11 @@ rtnext(IndexScanDesc s, ScanDirection dir) blk = ItemPointerGetBlockNumber(&(it->t_tid)); /* - * Note that we release the pin on the page as we descend - * down the tree, even though there's a good chance we'll - * eventually need to re-read the buffer later in this - * scan. This may or may not be optimal, but it doesn't - * seem likely to make a huge performance difference - * either way. + * Note that we release the pin on the page as we descend down the + * tree, even though there's a good chance we'll eventually need + * to re-read the buffer later in this scan. This may or may not + * be optimal, but it doesn't seem likely to make a huge + * performance difference either way. */ so->curbuf = ReleaseAndReadBuffer(so->curbuf, s->indexRelation, blk); p = BufferGetPage(so->curbuf); @@ -233,7 +231,7 @@ findnext(IndexScanDesc s, OffsetNumber n, ScanDirection dir) IndexTuple it; RTreePageOpaque po; RTreeScanOpaque so; - Page p; + Page p; so = (RTreeScanOpaque) s->opaque; p = BufferGetPage(so->curbuf); @@ -242,8 +240,8 @@ findnext(IndexScanDesc s, OffsetNumber n, ScanDirection dir) po = (RTreePageOpaque) PageGetSpecialPointer(p); /* - * If we modified the index during the scan, we may have a pointer to - * a ghost tuple, before the scan. If this is the case, back up one. + * If we modified the index during the scan, we may have a pointer to a + * ghost tuple, before the scan. If this is the case, back up one. */ if (so->s_flags & RTS_CURBEFORE) @@ -277,7 +275,7 @@ findnext(IndexScanDesc s, OffsetNumber n, ScanDirection dir) } if (n >= FirstOffsetNumber && n <= maxoff) - return n; /* found a match on this page */ + return n; /* found a match on this page */ else return InvalidOffsetNumber; /* no match, go to next page */ } diff --git a/src/backend/access/rtree/rtproc.c b/src/backend/access/rtree/rtproc.c index d8d766f47d4..292dac6a130 100644 --- a/src/backend/access/rtree/rtproc.c +++ b/src/backend/access/rtree/rtproc.c @@ -15,7 +15,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/rtree/rtproc.c,v 1.42 2004/12/31 21:59:26 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/access/rtree/rtproc.c,v 1.43 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -146,8 +146,8 @@ rt_poly_size(PG_FUNCTION_ARGS) ydim; /* - * Can't just use GETARG because of possibility that input is NULL; - * since POLYGON is toastable, GETARG will try to inspect its value + * Can't just use GETARG because of possibility that input is NULL; since + * POLYGON is toastable, GETARG will try to inspect its value */ if (aptr == NULL) { diff --git a/src/backend/access/rtree/rtree.c b/src/backend/access/rtree/rtree.c index 3b96b9ebe2d..d684101d261 100644 --- a/src/backend/access/rtree/rtree.c +++ b/src/backend/access/rtree/rtree.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/rtree/rtree.c,v 1.91 2005/08/10 21:36:46 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/rtree/rtree.c,v 1.92 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -121,8 +121,8 @@ rtbuild(PG_FUNCTION_ARGS) initRtstate(&buildstate.rtState, index); /* - * We expect to be called exactly once for any index relation. If - * that's not the case, big trouble's what we have. + * We expect to be called exactly once for any index relation. If that's + * not the case, big trouble's what we have. */ if (RelationGetNumberOfBlocks(index) != 0) elog(ERROR, "index \"%s\" already contains data", @@ -175,10 +175,10 @@ rtbuildCallback(Relation index, /* * Since we already have the index relation locked, we call rtdoinsert - * directly. Normal access method calls dispatch through rtinsert, - * which locks the relation for write. This is the right thing to do - * if you're inserting single tups, but not when you're initializing - * the whole index at once. + * directly. Normal access method calls dispatch through rtinsert, which + * locks the relation for write. This is the right thing to do if you're + * inserting single tups, but not when you're initializing the whole index + * at once. */ rtdoinsert(index, itup, &buildstate->rtState); @@ -226,9 +226,8 @@ rtinsert(PG_FUNCTION_ARGS) initRtstate(&rtState, r); /* - * Since rtree is not marked "amconcurrent" in pg_am, caller should - * have acquired exclusive lock on index relation. We need no locking - * here. + * Since rtree is not marked "amconcurrent" in pg_am, caller should have + * acquired exclusive lock on index relation. We need no locking here. */ rtdoinsert(r, itup, &rtState); @@ -331,7 +330,7 @@ rttighten(Relation r, p = BufferGetPage(b); oldud = IndexTupleGetDatum(PageGetItem(p, - PageGetItemId(p, stk->rts_child))); + PageGetItemId(p, stk->rts_child))); FunctionCall2(&rtstate->sizeFn, oldud, PointerGetDatum(&old_size)); @@ -342,8 +341,8 @@ rttighten(Relation r, PointerGetDatum(&newd_size)); /* - * If newd_size == 0 we have degenerate rectangles, so we don't know - * if there was any change, so we have to assume there was. + * If newd_size == 0 we have degenerate rectangles, so we don't know if + * there was any change, so we have to assume there was. */ if ((newd_size == 0) || (newd_size != old_size)) { @@ -370,8 +369,8 @@ rttighten(Relation r, /* * The user may be defining an index on variable-sized data (like * polygons). If so, we need to get a constant-sized datum for - * insertion on the internal page. We do this by calling the - * union proc, which is required to return a rectangle. + * insertion on the internal page. We do this by calling the union + * proc, which is required to return a rectangle. */ tdatum = FunctionCall2(&rtstate->unionFn, datum, datum); @@ -428,8 +427,8 @@ rtdosplit(Relation r, /* * The root of the tree is the first block in the relation. If we're - * about to split the root, we need to do some hocus-pocus to enforce - * this guarantee. + * about to split the root, we need to do some hocus-pocus to enforce this + * guarantee. */ if (BufferGetBlockNumber(buffer) == P_ROOT) @@ -459,10 +458,9 @@ rtdosplit(Relation r, newitemoff = OffsetNumberNext(maxoff); /* - * spl_left contains a list of the offset numbers of the tuples that - * will go to the left page. For each offset number, get the tuple - * item, then add the item to the left page. Similarly for the right - * side. + * spl_left contains a list of the offset numbers of the tuples that will + * go to the left page. For each offset number, get the tuple item, then + * add the item to the left page. Similarly for the right side. */ /* fill left node */ @@ -525,13 +523,13 @@ rtdosplit(Relation r, * introduced in its structure by splitting this page. * * 2) "Tighten" the bounding box of the pointer to the left page in the - * parent node in the tree, if any. Since we moved a bunch of stuff - * off the left page, we expect it to get smaller. This happens in - * the internal insertion routine. + * parent node in the tree, if any. Since we moved a bunch of stuff off + * the left page, we expect it to get smaller. This happens in the + * internal insertion routine. * - * 3) Insert a pointer to the right page in the parent. This may cause - * the parent to split. If it does, we need to repeat steps one and - * two for each split node in the tree. + * 3) Insert a pointer to the right page in the parent. This may cause the + * parent to split. If it does, we need to repeat steps one and two for + * each split node in the tree. */ /* adjust active scans */ @@ -583,10 +581,10 @@ rtintinsert(Relation r, old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child)); /* - * This is a hack. Right now, we force rtree internal keys to be - * constant size. To fix this, need delete the old key and add both - * left and right for the two new pages. The insertion of left may - * force a split if the new left key is bigger than the old key. + * This is a hack. Right now, we force rtree internal keys to be constant + * size. To fix this, need delete the old key and add both left and right + * for the two new pages. The insertion of left may force a split if the + * new left key is bigger than the old key. */ if (IndexTupleSize(old) != IndexTupleSize(ltup)) @@ -603,8 +601,7 @@ rtintinsert(Relation r, rttighten(r, stk->rts_parent, newdatum, IndexTupleAttSize(ltup), rtstate); rtdosplit(r, b, stk->rts_parent, rtup, rtstate); - WriteBuffer(b); /* don't forget to release buffer! - - * 01/31/94 */ + WriteBuffer(b); /* don't forget to release buffer! - 01/31/94 */ } else { @@ -716,16 +713,15 @@ rtpicksplit(Relation r, int total_num_tuples, num_tuples_without_seeds, max_after_split; /* in Guttman's lingo, (M - m) */ - float diff; /* diff between cost of putting tuple left - * or right */ + float diff; /* diff between cost of putting tuple left or + * right */ SPLITCOST *cost_vector; int n; /* - * First, make sure the new item is not so large that we can't - * possibly fit it on a page, even by itself. (It's sufficient to - * make this test here, since any oversize tuple must lead to a page - * split attempt.) + * First, make sure the new item is not so large that we can't possibly + * fit it on a page, even by itself. (It's sufficient to make this test + * here, since any oversize tuple must lead to a page split attempt.) */ newitemsz = IndexTupleTotalSize(itup); if (newitemsz > RTPageAvailSpace) @@ -734,11 +730,10 @@ rtpicksplit(Relation r, errmsg("index row size %lu exceeds rtree maximum, %lu", (unsigned long) newitemsz, (unsigned long) RTPageAvailSpace), - errhint("Values larger than a buffer page cannot be indexed."))); + errhint("Values larger than a buffer page cannot be indexed."))); maxoff = PageGetMaxOffsetNumber(page); - newitemoff = OffsetNumberNext(maxoff); /* phony index for new - * item */ + newitemoff = OffsetNumberNext(maxoff); /* phony index for new item */ total_num_tuples = newitemoff; num_tuples_without_seeds = total_num_tuples - 2; max_after_split = total_num_tuples / 2; /* works for m = M/2 */ @@ -793,8 +788,7 @@ rtpicksplit(Relation r, pfree(DatumGetPointer(inter_d)); /* - * are these a more promising split that what we've already - * seen? + * are these a more promising split that what we've already seen? */ if (size_waste > waste || firsttime) { @@ -809,10 +803,10 @@ rtpicksplit(Relation r, if (firsttime) { /* - * There is no possible split except to put the new item on its - * own page. Since we still have to compute the union rectangles, - * we play dumb and run through the split algorithm anyway, - * setting seed_1 = first item on page and seed_2 = new item. + * There is no possible split except to put the new item on its own + * page. Since we still have to compute the union rectangles, we play + * dumb and run through the split algorithm anyway, setting seed_1 = + * first item on page and seed_2 = new item. */ seed_1 = FirstOffsetNumber; seed_2 = newitemoff; @@ -840,25 +834,23 @@ rtpicksplit(Relation r, /* * Now split up the regions between the two seeds. * - * The cost_vector array will contain hints for determining where each - * tuple should go. Each record in the array will contain a boolean, - * choose_left, that indicates which node the tuple prefers to be on, - * and the absolute difference in cost between putting the tuple in - * its favored node and in the other node. + * The cost_vector array will contain hints for determining where each tuple + * should go. Each record in the array will contain a boolean, + * choose_left, that indicates which node the tuple prefers to be on, and + * the absolute difference in cost between putting the tuple in its + * favored node and in the other node. * * Later, we will sort the cost_vector in descending order by cost - * difference, and consider the tuples in that order for placement. - * That way, the tuples that *really* want to be in one node or the - * other get to choose first, and the tuples that don't really care - * choose last. + * difference, and consider the tuples in that order for placement. That + * way, the tuples that *really* want to be in one node or the other get + * to choose first, and the tuples that don't really care choose last. * * First, build the cost_vector array. The new index tuple will also be - * handled in this loop, and represented in the array, with - * i==newitemoff. + * handled in this loop, and represented in the array, with i==newitemoff. * - * In the case of variable size tuples it is possible that we only have - * the two seeds and no other tuples, in which case we don't do any of - * this cost_vector stuff. + * In the case of variable size tuples it is possible that we only have the + * two seeds and no other tuples, in which case we don't do any of this + * cost_vector stuff. */ /* to keep compiler quiet */ @@ -908,13 +900,13 @@ rtpicksplit(Relation r, } /* - * Now make the final decisions about where each tuple will go, and - * build the vectors to return in the SPLITVEC record. + * Now make the final decisions about where each tuple will go, and build + * the vectors to return in the SPLITVEC record. * - * The cost_vector array contains (descriptions of) all the tuples, in - * the order that we want to consider them, so we we just iterate - * through it and place each tuple in left or right nodes, according - * to the criteria described below. + * The cost_vector array contains (descriptions of) all the tuples, in the + * order that we want to consider them, so we we just iterate through it + * and place each tuple in left or right nodes, according to the criteria + * described below. */ left = v->spl_left; @@ -923,8 +915,8 @@ rtpicksplit(Relation r, v->spl_nright = 0; /* - * Place the seeds first. left avail space, left union, right avail - * space, and right union have already been adjusted for the seeds. + * Place the seeds first. left avail space, left union, right avail space, + * and right union have already been adjusted for the seeds. */ *left++ = seed_1; @@ -966,32 +958,30 @@ rtpicksplit(Relation r, PointerGetDatum(&size_beta)); /* - * We prefer the page that shows smaller enlargement of its union - * area (Guttman's algorithm), but we must take care that at least - * one page will still have room for the new item after this one - * is added. + * We prefer the page that shows smaller enlargement of its union area + * (Guttman's algorithm), but we must take care that at least one page + * will still have room for the new item after this one is added. * - * (We know that all the old items together can fit on one page, so - * we need not worry about any other problem than failing to fit - * the new item.) + * (We know that all the old items together can fit on one page, so we + * need not worry about any other problem than failing to fit the new + * item.) * - * Guttman's algorithm actually has two factors to consider (in - * order): 1. if one node has so many tuples already assigned to - * it that the other needs all the rest in order to satisfy the - * condition that neither node has fewer than m tuples, then that - * is decisive; 2. otherwise, choose the page that shows the - * smaller enlargement of its union area. + * Guttman's algorithm actually has two factors to consider (in order): + * 1. if one node has so many tuples already assigned to it that the + * other needs all the rest in order to satisfy the condition that + * neither node has fewer than m tuples, then that is decisive; 2. + * otherwise, choose the page that shows the smaller enlargement of + * its union area. * - * I have chosen m = M/2, where M is the maximum number of tuples on - * a page. (Actually, this is only strictly true for fixed size - * tuples. For variable size tuples, there still might have to be - * only one tuple on a page, if it is really big. But even with - * variable size tuples we still try to get m as close as possible - * to M/2.) + * I have chosen m = M/2, where M is the maximum number of tuples on a + * page. (Actually, this is only strictly true for fixed size tuples. + * For variable size tuples, there still might have to be only one + * tuple on a page, if it is really big. But even with variable size + * tuples we still try to get m as close as possible to M/2.) * - * The question of which page shows the smaller enlargement of its - * union area has already been answered, and the answer stored in - * the choose_left field of the SPLITCOST record. + * The question of which page shows the smaller enlargement of its union + * area has already been answered, and the answer stored in the + * choose_left field of the SPLITCOST record. */ left_feasible = (left_avail_space >= item_1_sz && ((left_avail_space - item_1_sz) >= newitemsz || @@ -1003,9 +993,8 @@ rtpicksplit(Relation r, { /* * Both feasible, use Guttman's algorithm. First check the m - * condition described above, and if that doesn't apply, - * choose the page with the smaller enlargement of its union - * area. + * condition described above, and if that doesn't apply, choose + * the page with the smaller enlargement of its union area. */ if (v->spl_nleft > max_after_split) choose_left = false; @@ -1153,9 +1142,8 @@ rtbulkdelete(PG_FUNCTION_ARGS) num_index_tuples = 0; /* - * Since rtree is not marked "amconcurrent" in pg_am, caller should - * have acquired exclusive lock on index relation. We need no locking - * here. + * Since rtree is not marked "amconcurrent" in pg_am, caller should have + * acquired exclusive lock on index relation. We need no locking here. */ /* diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c index 3f9f81befb0..577c6a64369 100644 --- a/src/backend/access/rtree/rtscan.c +++ b/src/backend/access/rtree/rtscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.59 2005/06/24 00:18:52 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.60 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -123,11 +123,11 @@ rtrescan(PG_FUNCTION_ARGS) /* * Scans on internal pages use different operators than they do on - * leaf pages. For example, if the user wants all boxes that - * exactly match (x1,y1,x2,y2), then on internal pages we need to - * find all boxes that contain (x1,y1,x2,y2). rtstrat.c knows - * how to pick the opclass member to use for internal pages. - * In some cases we need to negate the result of the opclass member. + * leaf pages. For example, if the user wants all boxes that exactly + * match (x1,y1,x2,y2), then on internal pages we need to find all + * boxes that contain (x1,y1,x2,y2). rtstrat.c knows how to pick the + * opclass member to use for internal pages. In some cases we need to + * negate the result of the opclass member. */ for (i = 0; i < s->numberOfKeys; i++) { @@ -333,9 +333,9 @@ ReleaseResources_rtree(void) RTScanList next; /* - * Note: this should be a no-op during normal query shutdown. However, - * in an abort situation ExecutorEnd is not called and so there may be - * open index scans to clean up. + * Note: this should be a no-op during normal query shutdown. However, in + * an abort situation ExecutorEnd is not called and so there may be open + * index scans to clean up. */ prev = NULL; @@ -440,8 +440,7 @@ adjustiptr(IndexScanDesc s, else { /* - * remember that we're before the current - * tuple + * remember that we're before the current tuple */ ItemPointerSet(iptr, blkno, FirstOffsetNumber); if (iptr == &(s->currentItemData)) diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index 4a2e1f55927..f29f460ade5 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -24,7 +24,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.32 2005/08/20 23:26:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.33 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -222,14 +222,14 @@ StartupCLOG(void) /* * Zero out the remainder of the current clog page. Under normal * circumstances it should be zeroes already, but it seems at least - * theoretically possible that XLOG replay will have settled on a - * nextXID value that is less than the last XID actually used and - * marked by the previous database lifecycle (since subtransaction - * commit writes clog but makes no WAL entry). Let's just be safe. - * (We need not worry about pages beyond the current one, since those - * will be zeroed when first used. For the same reason, there is no - * need to do anything when nextXid is exactly at a page boundary; and - * it's likely that the "current" page doesn't exist yet in that case.) + * theoretically possible that XLOG replay will have settled on a nextXID + * value that is less than the last XID actually used and marked by the + * previous database lifecycle (since subtransaction commit writes clog + * but makes no WAL entry). Let's just be safe. (We need not worry about + * pages beyond the current one, since those will be zeroed when first + * used. For the same reason, there is no need to do anything when + * nextXid is exactly at a page boundary; and it's likely that the + * "current" page doesn't exist yet in that case.) */ if (TransactionIdToPgIndex(xid) != 0) { @@ -325,8 +325,8 @@ TruncateCLOG(TransactionId oldestXact) int cutoffPage; /* - * The cutoff point is the start of the segment containing oldestXact. - * We pass the *page* containing oldestXact to SimpleLruTruncate. + * The cutoff point is the start of the segment containing oldestXact. We + * pass the *page* containing oldestXact to SimpleLruTruncate. */ cutoffPage = TransactionIdToPage(oldestXact); diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 1adaebb6d80..ffe14ed6bf1 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -4,15 +4,15 @@ * PostgreSQL multi-transaction-log manager * * The pg_multixact manager is a pg_clog-like manager that stores an array - * of TransactionIds for each MultiXactId. It is a fundamental part of the - * shared-row-lock implementation. A share-locked tuple stores a + * of TransactionIds for each MultiXactId. It is a fundamental part of the + * shared-row-lock implementation. A share-locked tuple stores a * MultiXactId in its Xmax, and a transaction that needs to wait for the * tuple to be unlocked can sleep on the potentially-several TransactionIds * that compose the MultiXactId. * * We use two SLRU areas, one for storing the offsets at which the data * starts for each MultiXactId in the other one. This trick allows us to - * store variable length arrays of TransactionIds. (We could alternatively + * store variable length arrays of TransactionIds. (We could alternatively * use one area containing counts and TransactionIds, with valid MultiXactId * values pointing at slots containing counts; but that way seems less robust * since it would get completely confused if someone inquired about a bogus @@ -32,7 +32,7 @@ * * Like clog.c, and unlike subtrans.c, we have to preserve state across * crashes and ensure that MXID and offset numbering increases monotonically - * across a crash. We do this in the same way as it's done for transaction + * across a crash. We do this in the same way as it's done for transaction * IDs: the WAL record is guaranteed to contain evidence of every MXID we * could need to worry about, and we just make sure that at the end of * replay, the next-MXID and next-offset counters are at least as large as @@ -42,7 +42,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.8 2005/08/20 23:26:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.9 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -59,13 +59,13 @@ /* - * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is + * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is * used everywhere else in Postgres. * * Note: because both MultiXactOffsets and TransactionIds are 32 bits and * wrap around at 0xFFFFFFFF, MultiXact page numbering also wraps around at * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE, and segment numbering at - * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no + * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no * explicit notice of that fact in this module, except when comparing segment * and page numbers in TruncateMultiXact * (see MultiXact{Offset,Member}PagePrecedes). @@ -92,11 +92,11 @@ static SlruCtlData MultiXactOffsetCtlData; static SlruCtlData MultiXactMemberCtlData; -#define MultiXactOffsetCtl (&MultiXactOffsetCtlData) -#define MultiXactMemberCtl (&MultiXactMemberCtlData) +#define MultiXactOffsetCtl (&MultiXactOffsetCtlData) +#define MultiXactMemberCtl (&MultiXactMemberCtlData) /* - * MultiXact state shared across all backends. All this state is protected + * MultiXact state shared across all backends. All this state is protected * by MultiXactGenLock. (We also use MultiXactOffsetControlLock and * MultiXactMemberControlLock to guard accesses to the two sets of SLRU * buffers. For concurrency's sake, we avoid holding more than one of these @@ -105,50 +105,48 @@ static SlruCtlData MultiXactMemberCtlData; typedef struct MultiXactStateData { /* next-to-be-assigned MultiXactId */ - MultiXactId nextMXact; + MultiXactId nextMXact; /* next-to-be-assigned offset */ - MultiXactOffset nextOffset; + MultiXactOffset nextOffset; /* the Offset SLRU area was last truncated at this MultiXactId */ - MultiXactId lastTruncationPoint; + MultiXactId lastTruncationPoint; /* - * Per-backend data starts here. We have two arrays stored in - * the area immediately following the MultiXactStateData struct. - * Each is indexed by BackendId. (Note: valid BackendIds run from 1 to - * MaxBackends; element zero of each array is never used.) + * Per-backend data starts here. We have two arrays stored in the area + * immediately following the MultiXactStateData struct. Each is indexed by + * BackendId. (Note: valid BackendIds run from 1 to MaxBackends; element + * zero of each array is never used.) * - * OldestMemberMXactId[k] is the oldest MultiXactId each backend's - * current transaction(s) could possibly be a member of, or - * InvalidMultiXactId when the backend has no live transaction that - * could possibly be a member of a MultiXact. Each backend sets its - * entry to the current nextMXact counter just before first acquiring a - * shared lock in a given transaction, and clears it at transaction end. - * (This works because only during or after acquiring a shared lock - * could an XID possibly become a member of a MultiXact, and that - * MultiXact would have to be created during or after the lock - * acquisition.) + * OldestMemberMXactId[k] is the oldest MultiXactId each backend's current + * transaction(s) could possibly be a member of, or InvalidMultiXactId + * when the backend has no live transaction that could possibly be a + * member of a MultiXact. Each backend sets its entry to the current + * nextMXact counter just before first acquiring a shared lock in a given + * transaction, and clears it at transaction end. (This works because only + * during or after acquiring a shared lock could an XID possibly become a + * member of a MultiXact, and that MultiXact would have to be created + * during or after the lock acquisition.) * - * OldestVisibleMXactId[k] is the oldest MultiXactId each backend's - * current transaction(s) think is potentially live, or InvalidMultiXactId - * when not in a transaction or not in a transaction that's paid any - * attention to MultiXacts yet. This is computed when first needed in - * a given transaction, and cleared at transaction end. We can compute - * it as the minimum of the valid OldestMemberMXactId[] entries at the - * time we compute it (using nextMXact if none are valid). Each backend - * is required not to attempt to access any SLRU data for MultiXactIds - * older than its own OldestVisibleMXactId[] setting; this is necessary - * because the checkpointer could truncate away such data at any instant. + * OldestVisibleMXactId[k] is the oldest MultiXactId each backend's current + * transaction(s) think is potentially live, or InvalidMultiXactId when + * not in a transaction or not in a transaction that's paid any attention + * to MultiXacts yet. This is computed when first needed in a given + * transaction, and cleared at transaction end. We can compute it as the + * minimum of the valid OldestMemberMXactId[] entries at the time we + * compute it (using nextMXact if none are valid). Each backend is + * required not to attempt to access any SLRU data for MultiXactIds older + * than its own OldestVisibleMXactId[] setting; this is necessary because + * the checkpointer could truncate away such data at any instant. * - * The checkpointer can compute the safe truncation point as the oldest - * valid value among all the OldestMemberMXactId[] and - * OldestVisibleMXactId[] entries, or nextMXact if none are valid. - * Clearly, it is not possible for any later-computed OldestVisibleMXactId - * value to be older than this, and so there is no risk of truncating - * data that is still needed. + * The checkpointer can compute the safe truncation point as the oldest valid + * value among all the OldestMemberMXactId[] and OldestVisibleMXactId[] + * entries, or nextMXact if none are valid. Clearly, it is not possible + * for any later-computed OldestVisibleMXactId value to be older than + * this, and so there is no risk of truncating data that is still needed. */ - MultiXactId perBackendXactIds[1]; /* VARIABLE LENGTH ARRAY */ + MultiXactId perBackendXactIds[1]; /* VARIABLE LENGTH ARRAY */ } MultiXactStateData; /* Pointers to the state data in shared memory */ @@ -176,13 +174,13 @@ static MultiXactId *OldestVisibleMXactId; typedef struct mXactCacheEnt { struct mXactCacheEnt *next; - MultiXactId multi; - int nxids; - TransactionId xids[1]; /* VARIABLE LENGTH ARRAY */ + MultiXactId multi; + int nxids; + TransactionId xids[1]; /* VARIABLE LENGTH ARRAY */ } mXactCacheEnt; -static mXactCacheEnt *MXactCache = NULL; -static MemoryContext MXactContext = NULL; +static mXactCacheEnt *MXactCache = NULL; +static MemoryContext MXactContext = NULL; #ifdef MULTIXACT_DEBUG @@ -201,14 +199,15 @@ static MemoryContext MXactContext = NULL; static void MultiXactIdSetOldestVisible(void); static MultiXactId CreateMultiXactId(int nxids, TransactionId *xids); static void RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset, - int nxids, TransactionId *xids); + int nxids, TransactionId *xids); static MultiXactId GetNewMultiXactId(int nxids, MultiXactOffset *offset); /* MultiXact cache management */ static MultiXactId mXactCacheGetBySet(int nxids, TransactionId *xids); -static int mXactCacheGetById(MultiXactId multi, TransactionId **xids); +static int mXactCacheGetById(MultiXactId multi, TransactionId **xids); static void mXactCachePut(MultiXactId multi, int nxids, TransactionId *xids); -static int xidComparator(const void *arg1, const void *arg2); +static int xidComparator(const void *arg1, const void *arg2); + #ifdef MULTIXACT_DEBUG static char *mxid_to_string(MultiXactId multi, int nxids, TransactionId *xids); #endif @@ -220,7 +219,7 @@ static bool MultiXactOffsetPagePrecedes(int page1, int page2); static bool MultiXactMemberPagePrecedes(int page1, int page2); static bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2); static bool MultiXactOffsetPrecedes(MultiXactOffset offset1, - MultiXactOffset offset2); + MultiXactOffset offset2); static void ExtendMultiXactOffset(MultiXactId multi); static void ExtendMultiXactMember(MultiXactOffset offset, int nmembers); static void TruncateMultiXact(void); @@ -239,8 +238,8 @@ static void WriteMZeroPageXlogRec(int pageno, uint8 info); MultiXactId MultiXactIdCreate(TransactionId xid1, TransactionId xid2) { - MultiXactId newMulti; - TransactionId xids[2]; + MultiXactId newMulti; + TransactionId xids[2]; AssertArg(TransactionIdIsValid(xid1)); AssertArg(TransactionIdIsValid(xid2)); @@ -248,9 +247,9 @@ MultiXactIdCreate(TransactionId xid1, TransactionId xid2) Assert(!TransactionIdEquals(xid1, xid2)); /* - * Note: unlike MultiXactIdExpand, we don't bother to check that both - * XIDs are still running. In typical usage, xid2 will be our own XID - * and the caller just did a check on xid1, so it'd be wasted effort. + * Note: unlike MultiXactIdExpand, we don't bother to check that both XIDs + * are still running. In typical usage, xid2 will be our own XID and the + * caller just did a check on xid1, so it'd be wasted effort. */ xids[0] = xid1; @@ -281,12 +280,12 @@ MultiXactIdCreate(TransactionId xid1, TransactionId xid2) MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid) { - MultiXactId newMulti; - TransactionId *members; - TransactionId *newMembers; - int nmembers; - int i; - int j; + MultiXactId newMulti; + TransactionId *members; + TransactionId *newMembers; + int nmembers; + int i; + int j; AssertArg(MultiXactIdIsValid(multi)); AssertArg(TransactionIdIsValid(xid)); @@ -313,8 +312,8 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid) } /* - * If the TransactionId is already a member of the MultiXactId, - * just return the existing MultiXactId. + * If the TransactionId is already a member of the MultiXactId, just + * return the existing MultiXactId. */ for (i = 0; i < nmembers; i++) { @@ -329,9 +328,9 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid) /* * Determine which of the members of the MultiXactId are still running, - * and use them to create a new one. (Removing dead members is just - * an optimization, but a useful one. Note we have the same race - * condition here as above: j could be 0 at the end of the loop.) + * and use them to create a new one. (Removing dead members is just an + * optimization, but a useful one. Note we have the same race condition + * here as above: j could be 0 at the end of the loop.) */ newMembers = (TransactionId *) palloc(sizeof(TransactionId) * (nmembers + 1)); @@ -355,7 +354,7 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid) /* * MultiXactIdIsRunning - * Returns whether a MultiXactId is "running". + * Returns whether a MultiXactId is "running". * * We return true if at least one member of the given MultiXactId is still * running. Note that a "false" result is certain not to change, @@ -365,9 +364,9 @@ bool MultiXactIdIsRunning(MultiXactId multi) { TransactionId *members; - TransactionId myXid; - int nmembers; - int i; + TransactionId myXid; + int nmembers; + int i; debug_elog3(DEBUG2, "IsRunning %u?", multi); @@ -394,7 +393,7 @@ MultiXactIdIsRunning(MultiXactId multi) /* * This could be made faster by having another entry point in procarray.c, - * walking the PGPROC array only once for all the members. But in most + * walking the PGPROC array only once for all the members. But in most * cases nmembers should be small enough that it doesn't much matter. */ for (i = 0; i < nmembers; i++) @@ -436,19 +435,19 @@ MultiXactIdSetOldestMember(void) /* * You might think we don't need to acquire a lock here, since - * fetching and storing of TransactionIds is probably atomic, - * but in fact we do: suppose we pick up nextMXact and then - * lose the CPU for a long time. Someone else could advance - * nextMXact, and then another someone else could compute an - * OldestVisibleMXactId that would be after the value we are - * going to store when we get control back. Which would be wrong. + * fetching and storing of TransactionIds is probably atomic, but in + * fact we do: suppose we pick up nextMXact and then lose the CPU for + * a long time. Someone else could advance nextMXact, and then + * another someone else could compute an OldestVisibleMXactId that + * would be after the value we are going to store when we get control + * back. Which would be wrong. */ LWLockAcquire(MultiXactGenLock, LW_EXCLUSIVE); /* * We have to beware of the possibility that nextMXact is in the - * wrapped-around state. We don't fix the counter itself here, - * but we must be sure to store a valid value in our array entry. + * wrapped-around state. We don't fix the counter itself here, but we + * must be sure to store a valid value in our array entry. */ nextMXact = MultiXactState->nextMXact; if (nextMXact < FirstMultiXactId) @@ -475,7 +474,7 @@ MultiXactIdSetOldestMember(void) * The value to set is the oldest of nextMXact and all the valid per-backend * OldestMemberMXactId[] entries. Because of the locking we do, we can be * certain that no subsequent call to MultiXactIdSetOldestMember can set - * an OldestMemberMXactId[] entry older than what we compute here. Therefore + * an OldestMemberMXactId[] entry older than what we compute here. Therefore * there is no live transaction, now or later, that can be a member of any * MultiXactId older than the OldestVisibleMXactId we compute here. */ @@ -485,14 +484,14 @@ MultiXactIdSetOldestVisible(void) if (!MultiXactIdIsValid(OldestVisibleMXactId[MyBackendId])) { MultiXactId oldestMXact; - int i; + int i; LWLockAcquire(MultiXactGenLock, LW_EXCLUSIVE); /* * We have to beware of the possibility that nextMXact is in the - * wrapped-around state. We don't fix the counter itself here, - * but we must be sure to store a valid value in our array entry. + * wrapped-around state. We don't fix the counter itself here, but we + * must be sure to store a valid value in our array entry. */ oldestMXact = MultiXactState->nextMXact; if (oldestMXact < FirstMultiXactId) @@ -535,17 +534,17 @@ void MultiXactIdWait(MultiXactId multi) { TransactionId *members; - int nmembers; + int nmembers; nmembers = GetMultiXactIdMembers(multi, &members); if (nmembers >= 0) { - int i; + int i; for (i = 0; i < nmembers; i++) { - TransactionId member = members[i]; + TransactionId member = members[i]; debug_elog4(DEBUG2, "MultiXactIdWait: waiting for %d (%u)", i, member); @@ -564,19 +563,19 @@ MultiXactIdWait(MultiXactId multi) bool ConditionalMultiXactIdWait(MultiXactId multi) { - bool result = true; + bool result = true; TransactionId *members; - int nmembers; + int nmembers; nmembers = GetMultiXactIdMembers(multi, &members); if (nmembers >= 0) { - int i; + int i; for (i = 0; i < nmembers; i++) { - TransactionId member = members[i]; + TransactionId member = members[i]; debug_elog4(DEBUG2, "ConditionalMultiXactIdWait: trying %d (%u)", i, member); @@ -596,7 +595,7 @@ ConditionalMultiXactIdWait(MultiXactId multi) /* * CreateMultiXactId - * Make a new MultiXactId + * Make a new MultiXactId * * Make XLOG, SLRU and cache entries for a new MultiXactId, recording the * given TransactionIds as members. Returns the newly created MultiXactId. @@ -606,7 +605,7 @@ ConditionalMultiXactIdWait(MultiXactId multi) static MultiXactId CreateMultiXactId(int nxids, TransactionId *xids) { - MultiXactId multi; + MultiXactId multi; MultiXactOffset offset; XLogRecData rdata[2]; xl_multixact_create xlrec; @@ -641,15 +640,15 @@ CreateMultiXactId(int nxids, TransactionId *xids) /* * Make an XLOG entry describing the new MXID. * - * Note: we need not flush this XLOG entry to disk before proceeding. - * The only way for the MXID to be referenced from any data page is - * for heap_lock_tuple() to have put it there, and heap_lock_tuple() - * generates an XLOG record that must follow ours. The normal LSN - * interlock between the data page and that XLOG record will ensure - * that our XLOG record reaches disk first. If the SLRU members/offsets - * data reaches disk sooner than the XLOG record, we do not care because - * we'll overwrite it with zeroes unless the XLOG record is there too; - * see notes at top of this file. + * Note: we need not flush this XLOG entry to disk before proceeding. The + * only way for the MXID to be referenced from any data page is for + * heap_lock_tuple() to have put it there, and heap_lock_tuple() generates + * an XLOG record that must follow ours. The normal LSN interlock between + * the data page and that XLOG record will ensure that our XLOG record + * reaches disk first. If the SLRU members/offsets data reaches disk + * sooner than the XLOG record, we do not care because we'll overwrite it + * with zeroes unless the XLOG record is there too; see notes at top of + * this file. */ xlrec.mid = multi; xlrec.moff = offset; @@ -702,9 +701,9 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset, /* * Note: we pass the MultiXactId to SimpleLruReadPage as the "transaction" * to complain about if there's any I/O error. This is kinda bogus, but - * since the errors will always give the full pathname, it should be - * clear enough that a MultiXactId is really involved. Perhaps someday - * we'll take the trouble to generalize the slru.c error reporting code. + * since the errors will always give the full pathname, it should be clear + * enough that a MultiXactId is really involved. Perhaps someday we'll + * take the trouble to generalize the slru.c error reporting code. */ slotno = SimpleLruReadPage(MultiXactOffsetCtl, pageno, multi); offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno]; @@ -750,7 +749,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset, * GetNewMultiXactId * Get the next MultiXactId. * - * Also, reserve the needed amount of space in the "members" area. The + * Also, reserve the needed amount of space in the "members" area. The * starting offset of the reserved space is returned in *offset. * * This may generate XLOG records for expansion of the offsets and/or members @@ -761,7 +760,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset, static MultiXactId GetNewMultiXactId(int nxids, MultiXactOffset *offset) { - MultiXactId result; + MultiXactId result; debug_elog3(DEBUG2, "GetNew: for %d xids", nxids); @@ -785,8 +784,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset) * Advance counter. As in GetNewTransactionId(), this must not happen * until after ExtendMultiXactOffset has succeeded! * - * We don't care about MultiXactId wraparound here; it will be handled by - * the next iteration. But note that nextMXact may be InvalidMultiXactId + * We don't care about MultiXactId wraparound here; it will be handled by the + * next iteration. But note that nextMXact may be InvalidMultiXactId * after this routine exits, so anyone else looking at the variable must * be prepared to deal with that. */ @@ -809,7 +808,7 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset) /* * GetMultiXactIdMembers - * Returns the set of TransactionIds that make up a MultiXactId + * Returns the set of TransactionIds that make up a MultiXactId * * We return -1 if the MultiXactId is too old to possibly have any members * still running; in that case we have not actually looked them up, and @@ -822,13 +821,13 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids) int prev_pageno; int entryno; int slotno; - MultiXactOffset *offptr; - MultiXactOffset offset; + MultiXactOffset *offptr; + MultiXactOffset offset; int length; int i; - MultiXactId nextMXact; - MultiXactId tmpMXact; - MultiXactOffset nextOffset; + MultiXactId nextMXact; + MultiXactId tmpMXact; + MultiXactOffset nextOffset; TransactionId *ptr; debug_elog3(DEBUG2, "GetMembers: asked for %u", multi); @@ -850,13 +849,13 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids) /* * We check known limits on MultiXact before resorting to the SLRU area. * - * An ID older than our OldestVisibleMXactId[] entry can't possibly still - * be running, and we'd run the risk of trying to read already-truncated - * SLRU data if we did try to examine it. + * An ID older than our OldestVisibleMXactId[] entry can't possibly still be + * running, and we'd run the risk of trying to read already-truncated SLRU + * data if we did try to examine it. * - * Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is - * seen, it implies undetected ID wraparound has occurred. We just - * silently assume that such an ID is no longer running. + * Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is seen, + * it implies undetected ID wraparound has occurred. We just silently + * assume that such an ID is no longer running. * * Shared lock is enough here since we aren't modifying any global state. * Also, we can examine our own OldestVisibleMXactId without the lock, @@ -880,9 +879,9 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids) } /* - * Before releasing the lock, save the current counter values, because - * the target MultiXactId may be just one less than nextMXact. We will - * need to use nextOffset as the endpoint if so. + * Before releasing the lock, save the current counter values, because the + * target MultiXactId may be just one less than nextMXact. We will need + * to use nextOffset as the endpoint if so. */ nextMXact = MultiXactState->nextMXact; nextOffset = MultiXactState->nextOffset; @@ -902,11 +901,11 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids) /* * How many members do we need to read? If we are at the end of the - * assigned MultiXactIds, use the offset just saved above. Else we - * need to check the MultiXactId following ours. + * assigned MultiXactIds, use the offset just saved above. Else we need + * to check the MultiXactId following ours. * - * Use the same increment rule as GetNewMultiXactId(), that is, don't - * handle wraparound explicitly until needed. + * Use the same increment rule as GetNewMultiXactId(), that is, don't handle + * wraparound explicitly until needed. */ tmpMXact = multi + 1; @@ -974,9 +973,9 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids) /* * mXactCacheGetBySet - * returns a MultiXactId from the cache based on the set of - * TransactionIds that compose it, or InvalidMultiXactId if - * none matches. + * returns a MultiXactId from the cache based on the set of + * TransactionIds that compose it, or InvalidMultiXactId if + * none matches. * * This is helpful, for example, if two transactions want to lock a huge * table. By using the cache, the second will use the same MultiXactId @@ -988,7 +987,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids) static MultiXactId mXactCacheGetBySet(int nxids, TransactionId *xids) { - mXactCacheEnt *entry; + mXactCacheEnt *entry; debug_elog3(DEBUG2, "CacheGet: looking for %s", mxid_to_string(InvalidMultiXactId, nxids, xids)); @@ -1015,8 +1014,8 @@ mXactCacheGetBySet(int nxids, TransactionId *xids) /* * mXactCacheGetById - * returns the composing TransactionId set from the cache for a - * given MultiXactId, if present. + * returns the composing TransactionId set from the cache for a + * given MultiXactId, if present. * * If successful, *xids is set to the address of a palloc'd copy of the * TransactionId set. Return value is number of members, or -1 on failure. @@ -1024,7 +1023,7 @@ mXactCacheGetBySet(int nxids, TransactionId *xids) static int mXactCacheGetById(MultiXactId multi, TransactionId **xids) { - mXactCacheEnt *entry; + mXactCacheEnt *entry; debug_elog3(DEBUG2, "CacheGet: looking for %u", multi); @@ -1032,7 +1031,7 @@ mXactCacheGetById(MultiXactId multi, TransactionId **xids) { if (entry->multi == multi) { - TransactionId *ptr; + TransactionId *ptr; Size size; size = sizeof(TransactionId) * entry->nxids; @@ -1042,7 +1041,7 @@ mXactCacheGetById(MultiXactId multi, TransactionId **xids) memcpy(ptr, entry->xids, size); debug_elog3(DEBUG2, "CacheGet: found %s", - mxid_to_string(multi, entry->nxids, entry->xids)); + mxid_to_string(multi, entry->nxids, entry->xids)); return entry->nxids; } } @@ -1053,12 +1052,12 @@ mXactCacheGetById(MultiXactId multi, TransactionId **xids) /* * mXactCachePut - * Add a new MultiXactId and its composing set into the local cache. + * Add a new MultiXactId and its composing set into the local cache. */ static void mXactCachePut(MultiXactId multi, int nxids, TransactionId *xids) { - mXactCacheEnt *entry; + mXactCacheEnt *entry; debug_elog3(DEBUG2, "CachePut: storing %s", mxid_to_string(multi, nxids, xids)); @@ -1092,7 +1091,7 @@ mXactCachePut(MultiXactId multi, int nxids, TransactionId *xids) /* * xidComparator - * qsort comparison function for XIDs + * qsort comparison function for XIDs * * We don't need to use wraparound comparison for XIDs, and indeed must * not do so since that does not respect the triangle inequality! Any @@ -1101,8 +1100,8 @@ mXactCachePut(MultiXactId multi, int nxids, TransactionId *xids) static int xidComparator(const void *arg1, const void *arg2) { - TransactionId xid1 = * (const TransactionId *) arg1; - TransactionId xid2 = * (const TransactionId *) arg2; + TransactionId xid1 = *(const TransactionId *) arg1; + TransactionId xid2 = *(const TransactionId *) arg2; if (xid1 > xid2) return 1; @@ -1115,8 +1114,9 @@ xidComparator(const void *arg1, const void *arg2) static char * mxid_to_string(MultiXactId multi, int nxids, TransactionId *xids) { - char *str = palloc(15 * (nxids + 1) + 4); - int i; + char *str = palloc(15 * (nxids + 1) + 4); + int i; + snprintf(str, 47, "%u %d[%u", multi, nxids, xids[0]); for (i = 1; i < nxids; i++) @@ -1137,18 +1137,18 @@ void AtEOXact_MultiXact(void) { /* - * Reset our OldestMemberMXactId and OldestVisibleMXactId values, - * both of which should only be valid while within a transaction. + * Reset our OldestMemberMXactId and OldestVisibleMXactId values, both of + * which should only be valid while within a transaction. * - * We assume that storing a MultiXactId is atomic and so we need - * not take MultiXactGenLock to do this. + * We assume that storing a MultiXactId is atomic and so we need not take + * MultiXactGenLock to do this. */ OldestMemberMXactId[MyBackendId] = InvalidMultiXactId; OldestVisibleMXactId[MyBackendId] = InvalidMultiXactId; /* - * Discard the local MultiXactId cache. Since MXactContext was created - * as a child of TopTransactionContext, we needn't delete it explicitly. + * Discard the local MultiXactId cache. Since MXactContext was created as + * a child of TopTransactionContext, we needn't delete it explicitly. */ MXactContext = NULL; MXactCache = NULL; @@ -1156,7 +1156,7 @@ AtEOXact_MultiXact(void) /* * Initialization of shared memory for MultiXact. We use two SLRU areas, - * thus double memory. Also, reserve space for the shared MultiXactState + * thus double memory. Also, reserve space for the shared MultiXactState * struct and the per-backend MultiXactId arrays (two of those, too). */ Size @@ -1178,7 +1178,7 @@ MultiXactShmemSize(void) void MultiXactShmemInit(void) { - bool found; + bool found; debug_elog2(DEBUG2, "Shared Memory Init for MultiXact"); @@ -1205,8 +1205,8 @@ MultiXactShmemInit(void) Assert(found); /* - * Set up array pointers. Note that perBackendXactIds[0] is wasted - * space since we only use indexes 1..MaxBackends in each array. + * Set up array pointers. Note that perBackendXactIds[0] is wasted space + * since we only use indexes 1..MaxBackends in each array. */ OldestMemberMXactId = MultiXactState->perBackendXactIds; OldestVisibleMXactId = OldestMemberMXactId + MaxBackends; @@ -1214,7 +1214,7 @@ MultiXactShmemInit(void) /* * This func must be called ONCE on system install. It creates the initial - * MultiXact segments. (The MultiXacts directories are assumed to have been + * MultiXact segments. (The MultiXacts directories are assumed to have been * created by initdb, and MultiXactShmemInit must have been called already.) */ void @@ -1287,7 +1287,7 @@ ZeroMultiXactMemberPage(int pageno, bool writeXlog) * This must be called ONCE during postmaster or standalone-backend startup. * * StartupXLOG has already established nextMXact/nextOffset by calling - * MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact. Note that we + * MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact. Note that we * may already have replayed WAL data into the SLRU files. * * We don't need any locks here, really; the SLRU locks are taken @@ -1311,14 +1311,14 @@ StartupMultiXact(void) MultiXactOffsetCtl->shared->latest_page_number = pageno; /* - * Zero out the remainder of the current offsets page. See notes - * in StartupCLOG() for motivation. + * Zero out the remainder of the current offsets page. See notes in + * StartupCLOG() for motivation. */ entryno = MultiXactIdToOffsetEntry(multi); if (entryno != 0) { int slotno; - MultiXactOffset *offptr; + MultiXactOffset *offptr; slotno = SimpleLruReadPage(MultiXactOffsetCtl, pageno, multi); offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno]; @@ -1341,14 +1341,14 @@ StartupMultiXact(void) MultiXactMemberCtl->shared->latest_page_number = pageno; /* - * Zero out the remainder of the current members page. See notes - * in StartupCLOG() for motivation. + * Zero out the remainder of the current members page. See notes in + * StartupCLOG() for motivation. */ entryno = MXOffsetToMemberEntry(offset); if (entryno != 0) { int slotno; - TransactionId *xidptr; + TransactionId *xidptr; slotno = SimpleLruReadPage(MultiXactMemberCtl, pageno, offset); xidptr = (TransactionId *) MultiXactMemberCtl->shared->page_buffer[slotno]; @@ -1499,14 +1499,14 @@ static void ExtendMultiXactMember(MultiXactOffset offset, int nmembers) { /* - * It's possible that the members span more than one page of the - * members file, so we loop to ensure we consider each page. The - * coding is not optimal if the members span several pages, but - * that seems unusual enough to not worry much about. + * It's possible that the members span more than one page of the members + * file, so we loop to ensure we consider each page. The coding is not + * optimal if the members span several pages, but that seems unusual + * enough to not worry much about. */ while (nmembers > 0) { - int entryno; + int entryno; /* * Only zero when at first entry of a page. @@ -1514,7 +1514,7 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers) entryno = MXOffsetToMemberEntry(offset); if (entryno == 0) { - int pageno; + int pageno; pageno = MXOffsetToMemberPage(offset); @@ -1536,7 +1536,7 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers) * Remove all MultiXactOffset and MultiXactMember segments before the oldest * ones still of interest. * - * This is called only during checkpoints. We assume no more than one + * This is called only during checkpoints. We assume no more than one * backend does this at a time. * * XXX do we have any issues with needing to checkpoint here? @@ -1545,23 +1545,23 @@ static void TruncateMultiXact(void) { MultiXactId nextMXact; - MultiXactOffset nextOffset; + MultiXactOffset nextOffset; MultiXactId oldestMXact; - MultiXactOffset oldestOffset; + MultiXactOffset oldestOffset; int cutoffPage; int i; /* - * First, compute where we can safely truncate. Per notes above, - * this is the oldest valid value among all the OldestMemberMXactId[] and + * First, compute where we can safely truncate. Per notes above, this is + * the oldest valid value among all the OldestMemberMXactId[] and * OldestVisibleMXactId[] entries, or nextMXact if none are valid. */ LWLockAcquire(MultiXactGenLock, LW_SHARED); /* * We have to beware of the possibility that nextMXact is in the - * wrapped-around state. We don't fix the counter itself here, - * but we must be sure to use a valid value in our calculation. + * wrapped-around state. We don't fix the counter itself here, but we + * must be sure to use a valid value in our calculation. */ nextMXact = MultiXactState->nextMXact; if (nextMXact < FirstMultiXactId) @@ -1597,9 +1597,9 @@ TruncateMultiXact(void) return; /* - * We need to determine where to truncate MultiXactMember. If we - * found a valid oldest MultiXactId, read its starting offset; - * otherwise we use the nextOffset value we saved above. + * We need to determine where to truncate MultiXactMember. If we found a + * valid oldest MultiXactId, read its starting offset; otherwise we use + * the nextOffset value we saved above. */ if (oldestMXact == nextMXact) oldestOffset = nextOffset; @@ -1608,7 +1608,7 @@ TruncateMultiXact(void) int pageno; int slotno; int entryno; - MultiXactOffset *offptr; + MultiXactOffset *offptr; LWLockAcquire(MultiXactOffsetControlLock, LW_EXCLUSIVE); @@ -1624,8 +1624,8 @@ TruncateMultiXact(void) } /* - * The cutoff point is the start of the segment containing oldestMXact. - * We pass the *page* containing oldestMXact to SimpleLruTruncate. + * The cutoff point is the start of the segment containing oldestMXact. We + * pass the *page* containing oldestMXact to SimpleLruTruncate. */ cutoffPage = MultiXactIdToOffsetPage(oldestMXact); @@ -1677,8 +1677,8 @@ MultiXactOffsetPagePrecedes(int page1, int page2) static bool MultiXactMemberPagePrecedes(int page1, int page2) { - MultiXactOffset offset1; - MultiXactOffset offset2; + MultiXactOffset offset1; + MultiXactOffset offset2; offset1 = ((MultiXactOffset) page1) * MULTIXACT_MEMBERS_PER_PAGE; offset2 = ((MultiXactOffset) page2) * MULTIXACT_MEMBERS_PER_PAGE; @@ -1695,7 +1695,7 @@ MultiXactMemberPagePrecedes(int page1, int page2) static bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2) { - int32 diff = (int32) (multi1 - multi2); + int32 diff = (int32) (multi1 - multi2); return (diff < 0); } @@ -1706,7 +1706,7 @@ MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2) static bool MultiXactOffsetPrecedes(MultiXactOffset offset1, MultiXactOffset offset2) { - int32 diff = (int32) (offset1 - offset2); + int32 diff = (int32) (offset1 - offset2); return (diff < 0); } @@ -1783,9 +1783,9 @@ multixact_redo(XLogRecPtr lsn, XLogRecord *record) MultiXactAdvanceNextMXact(xlrec->mid + 1, xlrec->moff + xlrec->nxids); /* - * Make sure nextXid is beyond any XID mentioned in the record. - * This should be unnecessary, since any XID found here ought to - * have other evidence in the XLOG, but let's be safe. + * Make sure nextXid is beyond any XID mentioned in the record. This + * should be unnecessary, since any XID found here ought to have other + * evidence in the XLOG, but let's be safe. */ max_xid = record->xl_xid; for (i = 0; i < xlrec->nxids; i++) diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index 67d9d3f54f3..5891890b764 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -48,7 +48,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.27 2005/08/20 23:26:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.28 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -186,8 +186,8 @@ SimpleLruInit(SlruCtl ctl, const char *name, Assert(found); /* - * Initialize the unshared control struct, including directory path. - * We assume caller set PagePrecedes. + * Initialize the unshared control struct, including directory path. We + * assume caller set PagePrecedes. */ ctl->shared = shared; ctl->do_fsync = true; /* default behavior */ @@ -351,11 +351,11 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata) LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE); /* - * Check to see if someone else already did the write, or took the - * buffer away from us. If so, do nothing. NOTE: we really should - * never see WRITE_IN_PROGRESS here, since that state should only - * occur while the writer is holding the buffer lock. But accept it - * so that we have a recovery path if a writer aborts. + * Check to see if someone else already did the write, or took the buffer + * away from us. If so, do nothing. NOTE: we really should never see + * WRITE_IN_PROGRESS here, since that state should only occur while the + * writer is holding the buffer lock. But accept it so that we have a + * recovery path if a writer aborts. */ if (shared->page_number[slotno] != pageno || (shared->page_status[slotno] != SLRU_PAGE_DIRTY && @@ -368,15 +368,14 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata) /* * Mark the slot write-busy. After this point, a transaction status - * update on this page will mark it dirty again. NB: we are assuming - * that read/write of the page status field is atomic, since we change - * the state while not holding control lock. However, we cannot set - * this state any sooner, or we'd possibly fool a previous writer into - * thinking he's successfully dumped the page when he hasn't. - * (Scenario: other writer starts, page is redirtied, we come along - * and set WRITE_IN_PROGRESS again, other writer completes and sets - * CLEAN because redirty info has been lost, then we think it's clean - * too.) + * update on this page will mark it dirty again. NB: we are assuming that + * read/write of the page status field is atomic, since we change the + * state while not holding control lock. However, we cannot set this + * state any sooner, or we'd possibly fool a previous writer into thinking + * he's successfully dumped the page when he hasn't. (Scenario: other + * writer starts, page is redirtied, we come along and set + * WRITE_IN_PROGRESS again, other writer completes and sets CLEAN because + * redirty info has been lost, then we think it's clean too.) */ shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS; @@ -436,8 +435,8 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno) * In a crash-and-restart situation, it's possible for us to receive * commands to set the commit status of transactions whose bits are in * already-truncated segments of the commit log (see notes in - * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the - * case where the file doesn't exist, and return zeroes instead. + * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case + * where the file doesn't exist, and return zeroes instead. */ fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); if (fd < 0) @@ -528,17 +527,16 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata) { /* * If the file doesn't already exist, we should create it. It is - * possible for this to need to happen when writing a page that's - * not first in its segment; we assume the OS can cope with that. - * (Note: it might seem that it'd be okay to create files only - * when SimpleLruZeroPage is called for the first page of a - * segment. However, if after a crash and restart the REDO logic - * elects to replay the log from a checkpoint before the latest - * one, then it's possible that we will get commands to set - * transaction status of transactions that have already been - * truncated from the commit log. Easiest way to deal with that is - * to accept references to nonexistent files here and in - * SlruPhysicalReadPage.) + * possible for this to need to happen when writing a page that's not + * first in its segment; we assume the OS can cope with that. (Note: + * it might seem that it'd be okay to create files only when + * SimpleLruZeroPage is called for the first page of a segment. + * However, if after a crash and restart the REDO logic elects to + * replay the log from a checkpoint before the latest one, then it's + * possible that we will get commands to set transaction status of + * transactions that have already been truncated from the commit log. + * Easiest way to deal with that is to accept references to + * nonexistent files here and in SlruPhysicalReadPage.) */ SlruFileName(ctl, path, segno); fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); @@ -635,49 +633,49 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid) case SLRU_OPEN_FAILED: ereport(ERROR, (errcode_for_file_access(), - errmsg("could not access status of transaction %u", xid), + errmsg("could not access status of transaction %u", xid), errdetail("could not open file \"%s\": %m", path))); break; case SLRU_CREATE_FAILED: ereport(ERROR, (errcode_for_file_access(), - errmsg("could not access status of transaction %u", xid), + errmsg("could not access status of transaction %u", xid), errdetail("could not create file \"%s\": %m", path))); break; case SLRU_SEEK_FAILED: ereport(ERROR, (errcode_for_file_access(), - errmsg("could not access status of transaction %u", xid), - errdetail("could not seek in file \"%s\" to offset %u: %m", - path, offset))); + errmsg("could not access status of transaction %u", xid), + errdetail("could not seek in file \"%s\" to offset %u: %m", + path, offset))); break; case SLRU_READ_FAILED: ereport(ERROR, (errcode_for_file_access(), - errmsg("could not access status of transaction %u", xid), - errdetail("could not read from file \"%s\" at offset %u: %m", - path, offset))); + errmsg("could not access status of transaction %u", xid), + errdetail("could not read from file \"%s\" at offset %u: %m", + path, offset))); break; case SLRU_WRITE_FAILED: ereport(ERROR, (errcode_for_file_access(), - errmsg("could not access status of transaction %u", xid), - errdetail("could not write to file \"%s\" at offset %u: %m", - path, offset))); + errmsg("could not access status of transaction %u", xid), + errdetail("could not write to file \"%s\" at offset %u: %m", + path, offset))); break; case SLRU_FSYNC_FAILED: ereport(ERROR, (errcode_for_file_access(), - errmsg("could not access status of transaction %u", xid), + errmsg("could not access status of transaction %u", xid), errdetail("could not fsync file \"%s\": %m", path))); break; case SLRU_CLOSE_FAILED: ereport(ERROR, (errcode_for_file_access(), - errmsg("could not access status of transaction %u", xid), + errmsg("could not access status of transaction %u", xid), errdetail("could not close file \"%s\": %m", path))); break; @@ -723,8 +721,8 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno) } /* - * If we find any EMPTY slot, just select that one. Else locate - * the least-recently-used slot that isn't the latest page. + * If we find any EMPTY slot, just select that one. Else locate the + * least-recently-used slot that isn't the latest page. */ for (slotno = 0; slotno < NUM_SLRU_BUFFERS; slotno++) { @@ -745,10 +743,10 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno) return bestslot; /* - * We need to do I/O. Normal case is that we have to write it - * out, but it's possible in the worst case to have selected a - * read-busy page. In that case we use SimpleLruReadPage to wait - * for the read to complete. + * We need to do I/O. Normal case is that we have to write it out, + * but it's possible in the worst case to have selected a read-busy + * page. In that case we use SimpleLruReadPage to wait for the read + * to complete. */ if (shared->page_status[bestslot] == SLRU_PAGE_READ_IN_PROGRESS) (void) SimpleLruReadPage(ctl, shared->page_number[bestslot], @@ -757,9 +755,9 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno) SimpleLruWritePage(ctl, bestslot, NULL); /* - * Now loop back and try again. This is the easiest way of - * dealing with corner cases such as the victim page being - * re-dirtied while we wrote it. + * Now loop back and try again. This is the easiest way of dealing + * with corner cases such as the victim page being re-dirtied while we + * wrote it. */ } } @@ -789,9 +787,9 @@ SimpleLruFlush(SlruCtl ctl, bool checkpoint) SimpleLruWritePage(ctl, slotno, &fdata); /* - * When called during a checkpoint, we cannot assert that the slot - * is clean now, since another process might have re-dirtied it - * already. That's okay. + * When called during a checkpoint, we cannot assert that the slot is + * clean now, since another process might have re-dirtied it already. + * That's okay. */ Assert(checkpoint || shared->page_status[slotno] == SLRU_PAGE_EMPTY || @@ -841,10 +839,10 @@ SimpleLruTruncate(SlruCtl ctl, int cutoffPage) cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT; /* - * Scan shared memory and remove any pages preceding the cutoff page, - * to ensure we won't rewrite them later. (Since this is normally - * called in or just after a checkpoint, any dirty pages should have - * been flushed already ... we're just being extra careful here.) + * Scan shared memory and remove any pages preceding the cutoff page, to + * ensure we won't rewrite them later. (Since this is normally called in + * or just after a checkpoint, any dirty pages should have been flushed + * already ... we're just being extra careful here.) */ LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE); @@ -852,16 +850,16 @@ restart:; /* * While we are holding the lock, make an important safety check: the - * planned cutoff point must be <= the current endpoint page. - * Otherwise we have already wrapped around, and proceeding with the - * truncation would risk removing the current segment. + * planned cutoff point must be <= the current endpoint page. Otherwise we + * have already wrapped around, and proceeding with the truncation would + * risk removing the current segment. */ if (ctl->PagePrecedes(shared->latest_page_number, cutoffPage)) { LWLockRelease(shared->ControlLock); ereport(LOG, - (errmsg("could not truncate directory \"%s\": apparent wraparound", - ctl->Dir))); + (errmsg("could not truncate directory \"%s\": apparent wraparound", + ctl->Dir))); return; } @@ -882,9 +880,9 @@ restart:; } /* - * Hmm, we have (or may have) I/O operations acting on the page, - * so we've got to wait for them to finish and then start again. - * This is the same logic as in SlruSelectLRUPage. + * Hmm, we have (or may have) I/O operations acting on the page, so + * we've got to wait for them to finish and then start again. This is + * the same logic as in SlruSelectLRUPage. */ if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS) (void) SimpleLruReadPage(ctl, shared->page_number[slotno], diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index 9b450350360..7671eb6a45e 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -22,7 +22,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.10 2005/08/20 23:26:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.11 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -234,9 +234,8 @@ StartupSUBTRANS(TransactionId oldestActiveXID) /* * Since we don't expect pg_subtrans to be valid across crashes, we * initialize the currently-active page(s) to zeroes during startup. - * Whenever we advance into a new page, ExtendSUBTRANS will likewise - * zero the new page without regard to whatever was previously on - * disk. + * Whenever we advance into a new page, ExtendSUBTRANS will likewise zero + * the new page without regard to whatever was previously on disk. */ LWLockAcquire(SubtransControlLock, LW_EXCLUSIVE); @@ -262,8 +261,8 @@ ShutdownSUBTRANS(void) /* * Flush dirty SUBTRANS pages to disk * - * This is not actually necessary from a correctness point of view. We do - * it merely as a debugging aid. + * This is not actually necessary from a correctness point of view. We do it + * merely as a debugging aid. */ SimpleLruFlush(SubTransCtl, false); } @@ -277,9 +276,9 @@ CheckPointSUBTRANS(void) /* * Flush dirty SUBTRANS pages to disk * - * This is not actually necessary from a correctness point of view. We do - * it merely to improve the odds that writing of dirty pages is done - * by the checkpoint process and not by backends. + * This is not actually necessary from a correctness point of view. We do it + * merely to improve the odds that writing of dirty pages is done by the + * checkpoint process and not by backends. */ SimpleLruFlush(SubTransCtl, true); } @@ -329,8 +328,8 @@ TruncateSUBTRANS(TransactionId oldestXact) int cutoffPage; /* - * The cutoff point is the start of the segment containing oldestXact. - * We pass the *page* containing oldestXact to SimpleLruTruncate. + * The cutoff point is the start of the segment containing oldestXact. We + * pass the *page* containing oldestXact to SimpleLruTruncate. */ cutoffPage = TransactionIdToPage(oldestXact); diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c index 5fa6f82daf4..59852520521 100644 --- a/src/backend/access/transam/transam.c +++ b/src/backend/access/transam/transam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.65 2005/06/17 22:32:42 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.66 2005/10/15 02:49:09 momjian Exp $ * * NOTES * This file contains the high level access-method interface to the @@ -54,8 +54,8 @@ TransactionLogFetch(TransactionId transactionId) XidStatus xidstatus; /* - * Before going to the commit log manager, check our single item cache - * to see if we didn't just check the transaction status a moment ago. + * Before going to the commit log manager, check our single item cache to + * see if we didn't just check the transaction status a moment ago. */ if (TransactionIdEquals(transactionId, cachedFetchXid)) return cachedFetchXidStatus; @@ -78,8 +78,8 @@ TransactionLogFetch(TransactionId transactionId) xidstatus = TransactionIdGetStatus(transactionId); /* - * DO NOT cache status for unfinished or sub-committed transactions! - * We only cache status that is guaranteed not to change. + * DO NOT cache status for unfinished or sub-committed transactions! We + * only cache status that is guaranteed not to change. */ if (xidstatus != TRANSACTION_STATUS_IN_PROGRESS && xidstatus != TRANSACTION_STATUS_SUB_COMMITTED) @@ -169,18 +169,18 @@ TransactionIdDidCommit(TransactionId transactionId) return true; /* - * If it's marked subcommitted, we have to check the parent - * recursively. However, if it's older than TransactionXmin, we can't - * look at pg_subtrans; instead assume that the parent crashed without - * cleaning up its children. + * If it's marked subcommitted, we have to check the parent recursively. + * However, if it's older than TransactionXmin, we can't look at + * pg_subtrans; instead assume that the parent crashed without cleaning up + * its children. * - * Originally we Assert'ed that the result of SubTransGetParent was - * not zero. However with the introduction of prepared transactions, - * there can be a window just after database startup where we do not - * have complete knowledge in pg_subtrans of the transactions after - * TransactionXmin. StartupSUBTRANS() has ensured that any missing - * information will be zeroed. Since this case should not happen under - * normal conditions, it seems reasonable to emit a WARNING for it. + * Originally we Assert'ed that the result of SubTransGetParent was not zero. + * However with the introduction of prepared transactions, there can be a + * window just after database startup where we do not have complete + * knowledge in pg_subtrans of the transactions after TransactionXmin. + * StartupSUBTRANS() has ensured that any missing information will be + * zeroed. Since this case should not happen under normal conditions, it + * seems reasonable to emit a WARNING for it. */ if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED) { @@ -225,10 +225,10 @@ TransactionIdDidAbort(TransactionId transactionId) return true; /* - * If it's marked subcommitted, we have to check the parent - * recursively. However, if it's older than TransactionXmin, we can't - * look at pg_subtrans; instead assume that the parent crashed without - * cleaning up its children. + * If it's marked subcommitted, we have to check the parent recursively. + * However, if it's older than TransactionXmin, we can't look at + * pg_subtrans; instead assume that the parent crashed without cleaning up + * its children. */ if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED) { diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 05590da14ed..0ece348e184 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.14 2005/10/13 22:55:55 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.15 2005/10/15 02:49:09 momjian Exp $ * * NOTES * Each global transaction is associated with a global transaction @@ -64,7 +64,7 @@ #define TWOPHASE_DIR "pg_twophase" /* GUC variable, can't be changed after startup */ -int max_prepared_xacts = 5; +int max_prepared_xacts = 5; /* * This struct describes one global transaction that is in prepared state @@ -97,7 +97,7 @@ int max_prepared_xacts = 5; * entry will remain in prepXacts until recycled. We can detect recyclable * entries by checking for valid = false and locking_xid no longer active. * - * typedef struct GlobalTransactionData *GlobalTransaction appears in + * typedef struct GlobalTransactionData *GlobalTransaction appears in * twophase.h */ #define GIDSIZE 200 @@ -105,12 +105,12 @@ int max_prepared_xacts = 5; typedef struct GlobalTransactionData { PGPROC proc; /* dummy proc */ - TimestampTz prepared_at; /* time of preparation */ + TimestampTz prepared_at; /* time of preparation */ XLogRecPtr prepare_lsn; /* XLOG offset of prepare record */ Oid owner; /* ID of user that executed the xact */ TransactionId locking_xid; /* top-level XID of backend working on xact */ bool valid; /* TRUE if fully prepared */ - char gid[GIDSIZE]; /* The GID assigned to the prepared xact */ + char gid[GIDSIZE]; /* The GID assigned to the prepared xact */ } GlobalTransactionData; /* @@ -123,30 +123,30 @@ typedef struct TwoPhaseStateData SHMEM_OFFSET freeGXacts; /* Number of valid prepXacts entries. */ - int numPrepXacts; + int numPrepXacts; /* * There are max_prepared_xacts items in this array, but C wants a * fixed-size array. */ - GlobalTransaction prepXacts[1]; /* VARIABLE LENGTH ARRAY */ + GlobalTransaction prepXacts[1]; /* VARIABLE LENGTH ARRAY */ } TwoPhaseStateData; /* VARIABLE LENGTH STRUCT */ static TwoPhaseStateData *TwoPhaseState; static void RecordTransactionCommitPrepared(TransactionId xid, - int nchildren, - TransactionId *children, - int nrels, - RelFileNode *rels); + int nchildren, + TransactionId *children, + int nrels, + RelFileNode *rels); static void RecordTransactionAbortPrepared(TransactionId xid, - int nchildren, - TransactionId *children, - int nrels, - RelFileNode *rels); + int nchildren, + TransactionId *children, + int nrels, + RelFileNode *rels); static void ProcessRecords(char *bufptr, TransactionId xid, - const TwoPhaseCallback callbacks[]); + const TwoPhaseCallback callbacks[]); /* @@ -171,7 +171,7 @@ TwoPhaseShmemSize(void) void TwoPhaseShmemInit(void) { - bool found; + bool found; TwoPhaseState = ShmemInitStruct("Prepared Transaction Table", TwoPhaseShmemSize(), @@ -190,7 +190,7 @@ TwoPhaseShmemInit(void) */ gxacts = (GlobalTransaction) ((char *) TwoPhaseState + - MAXALIGN(offsetof(TwoPhaseStateData, prepXacts) + + MAXALIGN(offsetof(TwoPhaseStateData, prepXacts) + sizeof(GlobalTransaction) * max_prepared_xacts)); for (i = 0; i < max_prepared_xacts; i++) { @@ -205,7 +205,7 @@ TwoPhaseShmemInit(void) /* * MarkAsPreparing - * Reserve the GID for the given transaction. + * Reserve the GID for the given transaction. * * Internally, this creates a gxact struct and puts it into the active array. * NOTE: this is also used when reloading a gxact after a crash; so avoid @@ -215,8 +215,8 @@ GlobalTransaction MarkAsPreparing(TransactionId xid, const char *gid, TimestampTz prepared_at, Oid owner, Oid databaseid) { - GlobalTransaction gxact; - int i; + GlobalTransaction gxact; + int i; if (strlen(gid) >= GIDSIZE) ereport(ERROR, @@ -227,10 +227,9 @@ MarkAsPreparing(TransactionId xid, const char *gid, LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE); /* - * First, find and recycle any gxacts that failed during prepare. - * We do this partly to ensure we don't mistakenly say their GIDs - * are still reserved, and partly so we don't fail on out-of-slots - * unnecessarily. + * First, find and recycle any gxacts that failed during prepare. We do + * this partly to ensure we don't mistakenly say their GIDs are still + * reserved, and partly so we don't fail on out-of-slots unnecessarily. */ for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { @@ -360,13 +359,13 @@ MarkAsPrepared(GlobalTransaction gxact) static GlobalTransaction LockGXact(const char *gid, Oid user) { - int i; + int i; LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE); for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { - GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; + GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; /* Ignore not-yet-valid GIDs */ if (!gxact->valid) @@ -380,15 +379,15 @@ LockGXact(const char *gid, Oid user) if (TransactionIdIsActive(gxact->locking_xid)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("prepared transaction with identifier \"%s\" is busy", - gid))); + errmsg("prepared transaction with identifier \"%s\" is busy", + gid))); gxact->locking_xid = InvalidTransactionId; } if (user != gxact->owner && !superuser_arg(user)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied to finish prepared transaction"), + errmsg("permission denied to finish prepared transaction"), errhint("Must be superuser or the user that prepared the transaction."))); /* OK for me to lock it */ @@ -403,8 +402,8 @@ LockGXact(const char *gid, Oid user) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("prepared transaction with identifier \"%s\" does not exist", - gid))); + errmsg("prepared transaction with identifier \"%s\" does not exist", + gid))); /* NOTREACHED */ return NULL; @@ -419,7 +418,7 @@ LockGXact(const char *gid, Oid user) static void RemoveGXact(GlobalTransaction gxact) { - int i; + int i; LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE); @@ -449,7 +448,7 @@ RemoveGXact(GlobalTransaction gxact) /* * TransactionIdIsPrepared * True iff transaction associated with the identifier is prepared - * for two-phase commit + * for two-phase commit * * Note: only gxacts marked "valid" are considered; but notice we do not * check the locking status. @@ -459,14 +458,14 @@ RemoveGXact(GlobalTransaction gxact) static bool TransactionIdIsPrepared(TransactionId xid) { - bool result = false; - int i; + bool result = false; + int i; LWLockAcquire(TwoPhaseStateLock, LW_SHARED); for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { - GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; + GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; if (gxact->valid && gxact->proc.xid == xid) { @@ -496,8 +495,8 @@ static int GetPreparedTransactionList(GlobalTransaction *gxacts) { GlobalTransaction array; - int num; - int i; + int num; + int i; LWLockAcquire(TwoPhaseStateLock, LW_SHARED); @@ -526,13 +525,13 @@ GetPreparedTransactionList(GlobalTransaction *gxacts) typedef struct { GlobalTransaction array; - int ngxacts; - int currIdx; + int ngxacts; + int currIdx; } Working_State; /* * pg_prepared_xact - * Produce a view with one row per prepared transaction. + * Produce a view with one row per prepared transaction. * * This function is here so we don't have to export the * GlobalTransactionData struct definition. @@ -552,8 +551,7 @@ pg_prepared_xact(PG_FUNCTION_ARGS) funcctx = SRF_FIRSTCALL_INIT(); /* - * Switch to memory context appropriate for multiple function - * calls + * Switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); @@ -574,8 +572,8 @@ pg_prepared_xact(PG_FUNCTION_ARGS) funcctx->tuple_desc = BlessTupleDesc(tupdesc); /* - * Collect all the 2PC status information that we will format and - * send out as a result set. + * Collect all the 2PC status information that we will format and send + * out as a result set. */ status = (Working_State *) palloc(sizeof(Working_State)); funcctx->user_fctx = (void *) status; @@ -644,7 +642,7 @@ TwoPhaseGetDummyProc(TransactionId xid) for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { - GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; + GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; if (gxact->proc.xid == xid) { @@ -665,7 +663,7 @@ TwoPhaseGetDummyProc(TransactionId xid) } /************************************************************************/ -/* State file support */ +/* State file support */ /************************************************************************/ #define TwoPhaseFilePath(path, xid) \ @@ -674,14 +672,14 @@ TwoPhaseGetDummyProc(TransactionId xid) /* * 2PC state file format: * - * 1. TwoPhaseFileHeader - * 2. TransactionId[] (subtransactions) + * 1. TwoPhaseFileHeader + * 2. TransactionId[] (subtransactions) * 3. RelFileNode[] (files to be deleted at commit) * 4. RelFileNode[] (files to be deleted at abort) - * 5. TwoPhaseRecordOnDisk - * 6. ... - * 7. TwoPhaseRecordOnDisk (end sentinel, rmid == TWOPHASE_RM_END_ID) - * 8. CRC32 + * 5. TwoPhaseRecordOnDisk + * 6. ... + * 7. TwoPhaseRecordOnDisk (end sentinel, rmid == TWOPHASE_RM_END_ID) + * 8. CRC32 * * Each segment except the final CRC32 is MAXALIGN'd. */ @@ -693,16 +691,16 @@ TwoPhaseGetDummyProc(TransactionId xid) typedef struct TwoPhaseFileHeader { - uint32 magic; /* format identifier */ - uint32 total_len; /* actual file length */ - TransactionId xid; /* original transaction XID */ - Oid database; /* OID of database it was in */ - TimestampTz prepared_at; /* time of preparation */ - Oid owner; /* user running the transaction */ - int32 nsubxacts; /* number of following subxact XIDs */ - int32 ncommitrels; /* number of delete-on-commit rels */ - int32 nabortrels; /* number of delete-on-abort rels */ - char gid[GIDSIZE]; /* GID for transaction */ + uint32 magic; /* format identifier */ + uint32 total_len; /* actual file length */ + TransactionId xid; /* original transaction XID */ + Oid database; /* OID of database it was in */ + TimestampTz prepared_at; /* time of preparation */ + Oid owner; /* user running the transaction */ + int32 nsubxacts; /* number of following subxact XIDs */ + int32 ncommitrels; /* number of delete-on-commit rels */ + int32 nabortrels; /* number of delete-on-abort rels */ + char gid[GIDSIZE]; /* GID for transaction */ } TwoPhaseFileHeader; /* @@ -713,9 +711,9 @@ typedef struct TwoPhaseFileHeader */ typedef struct TwoPhaseRecordOnDisk { - uint32 len; /* length of rmgr data */ - TwoPhaseRmgrId rmid; /* resource manager for this record */ - uint16 info; /* flag bits for use by rmgr */ + uint32 len; /* length of rmgr data */ + TwoPhaseRmgrId rmid; /* resource manager for this record */ + uint16 info; /* flag bits for use by rmgr */ } TwoPhaseRecordOnDisk; /* @@ -728,9 +726,9 @@ static struct xllist { XLogRecData *head; /* first data block in the chain */ XLogRecData *tail; /* last block in chain */ - uint32 bytes_free; /* free bytes left in tail block */ - uint32 total_len; /* total data bytes in chain */ -} records; + uint32 bytes_free; /* free bytes left in tail block */ + uint32 total_len; /* total data bytes in chain */ +} records; /* @@ -744,7 +742,7 @@ static struct xllist static void save_state_data(const void *data, uint32 len) { - uint32 padlen = MAXALIGN(len); + uint32 padlen = MAXALIGN(len); if (padlen > records.bytes_free) { @@ -772,7 +770,7 @@ save_state_data(const void *data, uint32 len) void StartPrepare(GlobalTransaction gxact) { - TransactionId xid = gxact->proc.xid; + TransactionId xid = gxact->proc.xid; TwoPhaseFileHeader hdr; TransactionId *children; RelFileNode *commitrels; @@ -833,13 +831,13 @@ StartPrepare(GlobalTransaction gxact) void EndPrepare(GlobalTransaction gxact) { - TransactionId xid = gxact->proc.xid; + TransactionId xid = gxact->proc.xid; TwoPhaseFileHeader *hdr; - char path[MAXPGPATH]; - XLogRecData *record; - pg_crc32 statefile_crc; - pg_crc32 bogus_crc; - int fd; + char path[MAXPGPATH]; + XLogRecData *record; + pg_crc32 statefile_crc; + pg_crc32 bogus_crc; + int fd; /* Add the end sentinel to the list of 2PC records */ RegisterTwoPhaseRecord(TWOPHASE_RM_END_ID, 0, @@ -853,10 +851,10 @@ EndPrepare(GlobalTransaction gxact) /* * Create the 2PC state file. * - * Note: because we use BasicOpenFile(), we are responsible for ensuring - * the FD gets closed in any error exit path. Once we get into the - * critical section, though, it doesn't matter since any failure causes - * PANIC anyway. + * Note: because we use BasicOpenFile(), we are responsible for ensuring the + * FD gets closed in any error exit path. Once we get into the critical + * section, though, it doesn't matter since any failure causes PANIC + * anyway. */ TwoPhaseFilePath(path, xid); @@ -887,11 +885,10 @@ EndPrepare(GlobalTransaction gxact) FIN_CRC32(statefile_crc); /* - * Write a deliberately bogus CRC to the state file; this is just - * paranoia to catch the case where four more bytes will run us out of - * disk space. + * Write a deliberately bogus CRC to the state file; this is just paranoia + * to catch the case where four more bytes will run us out of disk space. */ - bogus_crc = ~ statefile_crc; + bogus_crc = ~statefile_crc; if ((write(fd, &bogus_crc, sizeof(pg_crc32))) != sizeof(pg_crc32)) { @@ -914,11 +911,11 @@ EndPrepare(GlobalTransaction gxact) * The state file isn't valid yet, because we haven't written the correct * CRC yet. Before we do that, insert entry in WAL and flush it to disk. * - * Between the time we have written the WAL entry and the time we write - * out the correct state file CRC, we have an inconsistency: the xact is - * prepared according to WAL but not according to our on-disk state. - * We use a critical section to force a PANIC if we are unable to complete - * the write --- then, WAL replay should repair the inconsistency. The + * Between the time we have written the WAL entry and the time we write out + * the correct state file CRC, we have an inconsistency: the xact is + * prepared according to WAL but not according to our on-disk state. We + * use a critical section to force a PANIC if we are unable to complete + * the write --- then, WAL replay should repair the inconsistency. The * odds of a PANIC actually occurring should be very tiny given that we * were able to write the bogus CRC above. * @@ -956,16 +953,16 @@ EndPrepare(GlobalTransaction gxact) errmsg("could not close twophase state file: %m"))); /* - * Mark the prepared transaction as valid. As soon as xact.c marks - * MyProc as not running our XID (which it will do immediately after - * this function returns), others can commit/rollback the xact. + * Mark the prepared transaction as valid. As soon as xact.c marks MyProc + * as not running our XID (which it will do immediately after this + * function returns), others can commit/rollback the xact. * * NB: a side effect of this is to make a dummy ProcArray entry for the * prepared XID. This must happen before we clear the XID from MyProc, * else there is a window where the XID is not running according to - * TransactionIdInProgress, and onlookers would be entitled to assume - * the xact crashed. Instead we have a window where the same XID - * appears twice in ProcArray, which is OK. + * TransactionIdInProgress, and onlookers would be entitled to assume the + * xact crashed. Instead we have a window where the same XID appears + * twice in ProcArray, which is OK. */ MarkAsPrepared(gxact); @@ -1011,9 +1008,10 @@ ReadTwoPhaseFile(TransactionId xid) char *buf; TwoPhaseFileHeader *hdr; int fd; - struct stat stat; + struct stat stat; uint32 crc_offset; - pg_crc32 calc_crc, file_crc; + pg_crc32 calc_crc, + file_crc; TwoPhaseFilePath(path, xid); @@ -1028,9 +1026,8 @@ ReadTwoPhaseFile(TransactionId xid) } /* - * Check file length. We can determine a lower bound pretty easily. - * We set an upper bound mainly to avoid palloc() failure on a corrupt - * file. + * Check file length. We can determine a lower bound pretty easily. We + * set an upper bound mainly to avoid palloc() failure on a corrupt file. */ if (fstat(fd, &stat)) { @@ -1107,17 +1104,17 @@ FinishPreparedTransaction(const char *gid, bool isCommit) { GlobalTransaction gxact; TransactionId xid; - char *buf; - char *bufptr; + char *buf; + char *bufptr; TwoPhaseFileHeader *hdr; TransactionId *children; RelFileNode *commitrels; RelFileNode *abortrels; - int i; + int i; /* - * Validate the GID, and lock the GXACT to ensure that two backends - * do not try to commit the same GID at once. + * Validate the GID, and lock the GXACT to ensure that two backends do not + * try to commit the same GID at once. */ gxact = LockGXact(gid, GetUserId()); xid = gxact->proc.xid; @@ -1148,10 +1145,10 @@ FinishPreparedTransaction(const char *gid, bool isCommit) /* * The order of operations here is critical: make the XLOG entry for * commit or abort, then mark the transaction committed or aborted in - * pg_clog, then remove its PGPROC from the global ProcArray (which - * means TransactionIdIsInProgress will stop saying the prepared xact - * is in progress), then run the post-commit or post-abort callbacks. - * The callbacks will release the locks the transaction held. + * pg_clog, then remove its PGPROC from the global ProcArray (which means + * TransactionIdIsInProgress will stop saying the prepared xact is in + * progress), then run the post-commit or post-abort callbacks. The + * callbacks will release the locks the transaction held. */ if (isCommit) RecordTransactionCommitPrepared(xid, @@ -1165,18 +1162,18 @@ FinishPreparedTransaction(const char *gid, bool isCommit) ProcArrayRemove(&gxact->proc); /* - * In case we fail while running the callbacks, mark the gxact invalid - * so no one else will try to commit/rollback, and so it can be recycled - * properly later. It is still locked by our XID so it won't go away yet. + * In case we fail while running the callbacks, mark the gxact invalid so + * no one else will try to commit/rollback, and so it can be recycled + * properly later. It is still locked by our XID so it won't go away yet. * * (We assume it's safe to do this without taking TwoPhaseStateLock.) */ gxact->valid = false; /* - * We have to remove any files that were supposed to be dropped. - * For consistency with the regular xact.c code paths, must do this - * before releasing locks, so do it before running the callbacks. + * We have to remove any files that were supposed to be dropped. For + * consistency with the regular xact.c code paths, must do this before + * releasing locks, so do it before running the callbacks. * * NB: this code knows that we couldn't be dropping any temp rels ... */ @@ -1228,8 +1225,8 @@ ProcessRecords(char *bufptr, TransactionId xid, bufptr += MAXALIGN(sizeof(TwoPhaseRecordOnDisk)); if (callbacks[record->rmid] != NULL) - callbacks[record->rmid](xid, record->info, - (void *) bufptr, record->len); + callbacks[record->rmid] (xid, record->info, + (void *) bufptr, record->len); bufptr += MAXALIGN(record->len); } @@ -1244,15 +1241,15 @@ ProcessRecords(char *bufptr, TransactionId xid, void RemoveTwoPhaseFile(TransactionId xid, bool giveWarning) { - char path[MAXPGPATH]; + char path[MAXPGPATH]; TwoPhaseFilePath(path, xid); if (unlink(path)) if (errno != ENOENT || giveWarning) ereport(WARNING, (errcode_for_file_access(), - errmsg("could not remove two-phase state file \"%s\": %m", - path))); + errmsg("could not remove two-phase state file \"%s\": %m", + path))); } /* @@ -1300,8 +1297,8 @@ RecreateTwoPhaseFile(TransactionId xid, void *content, int len) } /* - * We must fsync the file because the end-of-replay checkpoint will - * not do so, there being no GXACT in shared memory yet to tell it to. + * We must fsync the file because the end-of-replay checkpoint will not do + * so, there being no GXACT in shared memory yet to tell it to. */ if (pg_fsync(fd) != 0) { @@ -1343,15 +1340,15 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) int i; /* - * We don't want to hold the TwoPhaseStateLock while doing I/O, - * so we grab it just long enough to make a list of the XIDs that - * require fsyncing, and then do the I/O afterwards. + * We don't want to hold the TwoPhaseStateLock while doing I/O, so we grab + * it just long enough to make a list of the XIDs that require fsyncing, + * and then do the I/O afterwards. * - * This approach creates a race condition: someone else could delete - * a GXACT between the time we release TwoPhaseStateLock and the time - * we try to open its state file. We handle this by special-casing - * ENOENT failures: if we see that, we verify that the GXACT is no - * longer valid, and if so ignore the failure. + * This approach creates a race condition: someone else could delete a GXACT + * between the time we release TwoPhaseStateLock and the time we try to + * open its state file. We handle this by special-casing ENOENT failures: + * if we see that, we verify that the GXACT is no longer valid, and if so + * ignore the failure. */ if (max_prepared_xacts <= 0) return; /* nothing to do */ @@ -1362,9 +1359,9 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { - GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; + GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; - if (gxact->valid && + if (gxact->valid && XLByteLE(gxact->prepare_lsn, redo_horizon)) xids[nxids++] = gxact->proc.xid; } @@ -1374,7 +1371,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) for (i = 0; i < nxids; i++) { TransactionId xid = xids[i]; - int fd; + int fd; TwoPhaseFilePath(path, xid); @@ -1424,7 +1421,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) * * We throw away any prepared xacts with main XID beyond nextXid --- if any * are present, it suggests that the DBA has done a PITR recovery to an - * earlier point in time without cleaning out pg_twophase. We dare not + * earlier point in time without cleaning out pg_twophase. We dare not * try to recover such prepared xacts since they likely depend on database * state that doesn't exist now. * @@ -1442,7 +1439,7 @@ PrescanPreparedTransactions(void) { TransactionId origNextXid = ShmemVariableCache->nextXid; TransactionId result = origNextXid; - DIR *cldir; + DIR *cldir; struct dirent *clde; cldir = AllocateDir(TWOPHASE_DIR); @@ -1452,10 +1449,10 @@ PrescanPreparedTransactions(void) strspn(clde->d_name, "0123456789ABCDEF") == 8) { TransactionId xid; - char *buf; - TwoPhaseFileHeader *hdr; + char *buf; + TwoPhaseFileHeader *hdr; TransactionId *subxids; - int i; + int i; xid = (TransactionId) strtoul(clde->d_name, NULL, 16); @@ -1541,8 +1538,8 @@ PrescanPreparedTransactions(void) void RecoverPreparedTransactions(void) { - char dir[MAXPGPATH]; - DIR *cldir; + char dir[MAXPGPATH]; + DIR *cldir; struct dirent *clde; snprintf(dir, MAXPGPATH, "%s", TWOPHASE_DIR); @@ -1554,12 +1551,12 @@ RecoverPreparedTransactions(void) strspn(clde->d_name, "0123456789ABCDEF") == 8) { TransactionId xid; - char *buf; - char *bufptr; - TwoPhaseFileHeader *hdr; + char *buf; + char *bufptr; + TwoPhaseFileHeader *hdr; TransactionId *subxids; - GlobalTransaction gxact; - int i; + GlobalTransaction gxact; + int i; xid = (TransactionId) strtoul(clde->d_name, NULL, 16); @@ -1598,8 +1595,8 @@ RecoverPreparedTransactions(void) /* * Reconstruct subtrans state for the transaction --- needed - * because pg_subtrans is not preserved over a restart. Note - * that we are linking all the subtransactions directly to the + * because pg_subtrans is not preserved over a restart. Note that + * we are linking all the subtransactions directly to the * top-level XID; there may originally have been a more complex * hierarchy, but there's no need to restore that exactly. */ @@ -1609,12 +1606,12 @@ RecoverPreparedTransactions(void) /* * Recreate its GXACT and dummy PGPROC * - * Note: since we don't have the PREPARE record's WAL location - * at hand, we leave prepare_lsn zeroes. This means the GXACT - * will be fsync'd on every future checkpoint. We assume this + * Note: since we don't have the PREPARE record's WAL location at + * hand, we leave prepare_lsn zeroes. This means the GXACT will + * be fsync'd on every future checkpoint. We assume this * situation is infrequent enough that the performance cost is - * negligible (especially since we know the state file has - * already been fsynced). + * negligible (especially since we know the state file has already + * been fsynced). */ gxact = MarkAsPreparing(xid, hdr->gid, hdr->prepared_at, @@ -1773,12 +1770,11 @@ RecordTransactionAbortPrepared(TransactionId xid, XLogFlush(recptr); /* - * Mark the transaction aborted in clog. This is not absolutely - * necessary but we may as well do it while we are here. + * Mark the transaction aborted in clog. This is not absolutely necessary + * but we may as well do it while we are here. */ TransactionIdAbort(xid); TransactionIdAbortTree(nchildren, children); END_CRIT_SECTION(); } - diff --git a/src/backend/access/transam/twophase_rmgr.c b/src/backend/access/transam/twophase_rmgr.c index e78f8b2fbb3..eab442404f9 100644 --- a/src/backend/access/transam/twophase_rmgr.c +++ b/src/backend/access/transam/twophase_rmgr.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.1 2005/06/17 22:32:42 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.2 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -21,29 +21,29 @@ #include "utils/inval.h" -const TwoPhaseCallback twophase_recover_callbacks[TWOPHASE_RM_MAX_ID + 1] = +const TwoPhaseCallback twophase_recover_callbacks[TWOPHASE_RM_MAX_ID + 1] = { - NULL, /* END ID */ - lock_twophase_recover, /* Lock */ - NULL, /* Inval */ - NULL, /* flat file update */ - NULL /* notify/listen */ + NULL, /* END ID */ + lock_twophase_recover, /* Lock */ + NULL, /* Inval */ + NULL, /* flat file update */ + NULL /* notify/listen */ }; -const TwoPhaseCallback twophase_postcommit_callbacks[TWOPHASE_RM_MAX_ID + 1] = +const TwoPhaseCallback twophase_postcommit_callbacks[TWOPHASE_RM_MAX_ID + 1] = { - NULL, /* END ID */ - lock_twophase_postcommit, /* Lock */ - inval_twophase_postcommit, /* Inval */ - flatfile_twophase_postcommit, /* flat file update */ - notify_twophase_postcommit /* notify/listen */ + NULL, /* END ID */ + lock_twophase_postcommit, /* Lock */ + inval_twophase_postcommit, /* Inval */ + flatfile_twophase_postcommit, /* flat file update */ + notify_twophase_postcommit /* notify/listen */ }; -const TwoPhaseCallback twophase_postabort_callbacks[TWOPHASE_RM_MAX_ID + 1] = +const TwoPhaseCallback twophase_postabort_callbacks[TWOPHASE_RM_MAX_ID + 1] = { - NULL, /* END ID */ - lock_twophase_postabort, /* Lock */ - NULL, /* Inval */ - NULL, /* flat file update */ - NULL /* notify/listen */ + NULL, /* END ID */ + lock_twophase_postabort, /* Lock */ + NULL, /* Inval */ + NULL, /* flat file update */ + NULL /* notify/listen */ }; diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index 99d9213af0c..bff646afb61 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -6,7 +6,7 @@ * Copyright (c) 2000-2005, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.66 2005/08/22 16:59:47 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.67 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -49,21 +49,21 @@ GetNewTransactionId(bool isSubXact) xid = ShmemVariableCache->nextXid; /* - * Check to see if it's safe to assign another XID. This protects - * against catastrophic data loss due to XID wraparound. The basic - * rules are: warn if we're past xidWarnLimit, and refuse to execute - * transactions if we're past xidStopLimit, unless we are running in - * a standalone backend (which gives an escape hatch to the DBA who - * ignored all those warnings). + * Check to see if it's safe to assign another XID. This protects against + * catastrophic data loss due to XID wraparound. The basic rules are: + * warn if we're past xidWarnLimit, and refuse to execute transactions if + * we're past xidStopLimit, unless we are running in a standalone backend + * (which gives an escape hatch to the DBA who ignored all those + * warnings). * - * Test is coded to fall out as fast as possible during normal operation, - * ie, when the warn limit is set and we haven't violated it. + * Test is coded to fall out as fast as possible during normal operation, ie, + * when the warn limit is set and we haven't violated it. */ if (TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidWarnLimit) && TransactionIdIsValid(ShmemVariableCache->xidWarnLimit)) { if (IsUnderPostmaster && - TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidStopLimit)) + TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidStopLimit)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("database is not accepting queries to avoid wraparound data loss in database \"%s\"", @@ -72,20 +72,19 @@ GetNewTransactionId(bool isSubXact) NameStr(ShmemVariableCache->limit_datname)))); else ereport(WARNING, - (errmsg("database \"%s\" must be vacuumed within %u transactions", - NameStr(ShmemVariableCache->limit_datname), - ShmemVariableCache->xidWrapLimit - xid), - errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".", - NameStr(ShmemVariableCache->limit_datname)))); + (errmsg("database \"%s\" must be vacuumed within %u transactions", + NameStr(ShmemVariableCache->limit_datname), + ShmemVariableCache->xidWrapLimit - xid), + errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".", + NameStr(ShmemVariableCache->limit_datname)))); } /* * If we are allocating the first XID of a new page of the commit log, - * zero out that commit-log page before returning. We must do this - * while holding XidGenLock, else another xact could acquire and - * commit a later XID before we zero the page. Fortunately, a page of - * the commit log holds 32K or more transactions, so we don't have to - * do this very often. + * zero out that commit-log page before returning. We must do this while + * holding XidGenLock, else another xact could acquire and commit a later + * XID before we zero the page. Fortunately, a page of the commit log + * holds 32K or more transactions, so we don't have to do this very often. * * Extend pg_subtrans too. */ @@ -93,45 +92,43 @@ GetNewTransactionId(bool isSubXact) ExtendSUBTRANS(xid); /* - * Now advance the nextXid counter. This must not happen until after - * we have successfully completed ExtendCLOG() --- if that routine - * fails, we want the next incoming transaction to try it again. We - * cannot assign more XIDs until there is CLOG space for them. + * Now advance the nextXid counter. This must not happen until after we + * have successfully completed ExtendCLOG() --- if that routine fails, we + * want the next incoming transaction to try it again. We cannot assign + * more XIDs until there is CLOG space for them. */ TransactionIdAdvance(ShmemVariableCache->nextXid); /* - * We must store the new XID into the shared PGPROC array before - * releasing XidGenLock. This ensures that when GetSnapshotData calls + * We must store the new XID into the shared PGPROC array before releasing + * XidGenLock. This ensures that when GetSnapshotData calls * ReadNewTransactionId, all active XIDs before the returned value of - * nextXid are already present in PGPROC. Else we have a race - * condition. + * nextXid are already present in PGPROC. Else we have a race condition. * * XXX by storing xid into MyProc without acquiring ProcArrayLock, we are * relying on fetch/store of an xid to be atomic, else other backends * might see a partially-set xid here. But holding both locks at once - * would be a nasty concurrency hit (and in fact could cause a - * deadlock against GetSnapshotData). So for now, assume atomicity. - * Note that readers of PGPROC xid field should be careful to fetch - * the value only once, rather than assume they can read it multiple - * times and get the same answer each time. + * would be a nasty concurrency hit (and in fact could cause a deadlock + * against GetSnapshotData). So for now, assume atomicity. Note that + * readers of PGPROC xid field should be careful to fetch the value only + * once, rather than assume they can read it multiple times and get the + * same answer each time. * * The same comments apply to the subxact xid count and overflow fields. * - * A solution to the atomic-store problem would be to give each PGPROC - * its own spinlock used only for fetching/storing that PGPROC's xid - * and related fields. + * A solution to the atomic-store problem would be to give each PGPROC its + * own spinlock used only for fetching/storing that PGPROC's xid and + * related fields. * * If there's no room to fit a subtransaction XID into PGPROC, set the * cache-overflowed flag instead. This forces readers to look in - * pg_subtrans to map subtransaction XIDs up to top-level XIDs. There - * is a race-condition window, in that the new XID will not appear as - * running until its parent link has been placed into pg_subtrans. - * However, that will happen before anyone could possibly have a - * reason to inquire about the status of the XID, so it seems OK. - * (Snapshots taken during this window *will* include the parent XID, - * so they will deliver the correct answer later on when someone does - * have a reason to inquire.) + * pg_subtrans to map subtransaction XIDs up to top-level XIDs. There is a + * race-condition window, in that the new XID will not appear as running + * until its parent link has been placed into pg_subtrans. However, that + * will happen before anyone could possibly have a reason to inquire about + * the status of the XID, so it seems OK. (Snapshots taken during this + * window *will* include the parent XID, so they will deliver the correct + * answer later on when someone does have a reason to inquire.) */ if (MyProc != NULL) { @@ -197,27 +194,26 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, xidWrapLimit += FirstNormalTransactionId; /* - * We'll refuse to continue assigning XIDs in interactive mode once - * we get within 1M transactions of data loss. This leaves lots - * of room for the DBA to fool around fixing things in a standalone - * backend, while not being significant compared to total XID space. - * (Note that since vacuuming requires one transaction per table - * cleaned, we had better be sure there's lots of XIDs left...) + * We'll refuse to continue assigning XIDs in interactive mode once we get + * within 1M transactions of data loss. This leaves lots of room for the + * DBA to fool around fixing things in a standalone backend, while not + * being significant compared to total XID space. (Note that since + * vacuuming requires one transaction per table cleaned, we had better be + * sure there's lots of XIDs left...) */ xidStopLimit = xidWrapLimit - 1000000; if (xidStopLimit < FirstNormalTransactionId) xidStopLimit -= FirstNormalTransactionId; /* - * We'll start complaining loudly when we get within 10M transactions - * of the stop point. This is kind of arbitrary, but if you let your - * gas gauge get down to 1% of full, would you be looking for the - * next gas station? We need to be fairly liberal about this number - * because there are lots of scenarios where most transactions are - * done by automatic clients that won't pay attention to warnings. - * (No, we're not gonna make this configurable. If you know enough to - * configure it, you know enough to not get in this kind of trouble in - * the first place.) + * We'll start complaining loudly when we get within 10M transactions of + * the stop point. This is kind of arbitrary, but if you let your gas + * gauge get down to 1% of full, would you be looking for the next gas + * station? We need to be fairly liberal about this number because there + * are lots of scenarios where most transactions are done by automatic + * clients that won't pay attention to warnings. (No, we're not gonna make + * this configurable. If you know enough to configure it, you know enough + * to not get in this kind of trouble in the first place.) */ xidWarnLimit = xidStopLimit - 10000000; if (xidWarnLimit < FirstNormalTransactionId) @@ -234,16 +230,16 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, /* Log the info */ ereport(LOG, - (errmsg("transaction ID wrap limit is %u, limited by database \"%s\"", - xidWrapLimit, NameStr(*oldest_datname)))); + (errmsg("transaction ID wrap limit is %u, limited by database \"%s\"", + xidWrapLimit, NameStr(*oldest_datname)))); /* Give an immediate warning if past the wrap warn point */ if (TransactionIdFollowsOrEquals(curXid, xidWarnLimit)) ereport(WARNING, - (errmsg("database \"%s\" must be vacuumed within %u transactions", - NameStr(*oldest_datname), - xidWrapLimit - curXid), - errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".", - NameStr(*oldest_datname)))); + (errmsg("database \"%s\" must be vacuumed within %u transactions", + NameStr(*oldest_datname), + xidWrapLimit - curXid), + errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".", + NameStr(*oldest_datname)))); } @@ -272,11 +268,11 @@ GetNewObjectId(void) * right after a wrap occurs, so as to avoid a possibly large number of * iterations in GetNewOid.) Note we are relying on unsigned comparison. * - * During initdb, we start the OID generator at FirstBootstrapObjectId, - * so we only enforce wrapping to that point when in bootstrap or - * standalone mode. The first time through this routine after normal - * postmaster start, the counter will be forced up to FirstNormalObjectId. - * This mechanism leaves the OIDs between FirstBootstrapObjectId and + * During initdb, we start the OID generator at FirstBootstrapObjectId, so we + * only enforce wrapping to that point when in bootstrap or standalone + * mode. The first time through this routine after normal postmaster + * start, the counter will be forced up to FirstNormalObjectId. This + * mechanism leaves the OIDs between FirstBootstrapObjectId and * FirstNormalObjectId available for automatic assignment during initdb, * while ensuring they will never conflict with user-assigned OIDs. */ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index eabcb117cc5..ea19e075640 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -10,7 +10,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.214 2005/08/20 23:45:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.215 2005/10/15 02:49:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -110,15 +110,14 @@ typedef enum TBlockState */ typedef struct TransactionStateData { - TransactionId transactionId; /* my XID, or Invalid if none */ + TransactionId transactionId; /* my XID, or Invalid if none */ SubTransactionId subTransactionId; /* my subxact ID */ char *name; /* savepoint name, if any */ int savepointLevel; /* savepoint level */ TransState state; /* low-level state */ TBlockState blockState; /* high-level state */ int nestingLevel; /* nest depth */ - MemoryContext curTransactionContext; /* my xact-lifetime - * context */ + MemoryContext curTransactionContext; /* my xact-lifetime context */ ResourceOwner curTransactionOwner; /* my query resources */ List *childXids; /* subcommitted child XIDs */ Oid currentUser; /* subxact start current_user */ @@ -219,8 +218,8 @@ static void AtStart_Memory(void); static void AtStart_ResourceOwner(void); static void CallXactCallbacks(XactEvent event); static void CallSubXactCallbacks(SubXactEvent event, - SubTransactionId mySubid, - SubTransactionId parentSubid); + SubTransactionId mySubid, + SubTransactionId parentSubid); static void CleanupTransaction(void); static void CommitTransaction(void); static void RecordTransactionAbort(void); @@ -349,18 +348,18 @@ AssignSubTransactionId(TransactionState s) /* * Generate a new Xid and record it in PG_PROC and pg_subtrans. * - * NB: we must make the subtrans entry BEFORE the Xid appears anywhere - * in shared storage other than PG_PROC; because if there's no room for - * it in PG_PROC, the subtrans entry is needed to ensure that other - * backends see the Xid as "running". See GetNewTransactionId. + * NB: we must make the subtrans entry BEFORE the Xid appears anywhere in + * shared storage other than PG_PROC; because if there's no room for it in + * PG_PROC, the subtrans entry is needed to ensure that other backends see + * the Xid as "running". See GetNewTransactionId. */ s->transactionId = GetNewTransactionId(true); SubTransSetParent(s->transactionId, s->parent->transactionId); /* - * Acquire lock on the transaction XID. (We assume this cannot block.) - * We have to be sure that the lock is assigned to the transaction's + * Acquire lock on the transaction XID. (We assume this cannot block.) We + * have to be sure that the lock is assigned to the transaction's * ResourceOwner. */ currentOwner = CurrentResourceOwner; @@ -453,22 +452,22 @@ TransactionIdIsCurrentTransactionId(TransactionId xid) /* * We always say that BootstrapTransactionId is "not my transaction ID" - * even when it is (ie, during bootstrap). Along with the fact that + * even when it is (ie, during bootstrap). Along with the fact that * transam.c always treats BootstrapTransactionId as already committed, - * this causes the tqual.c routines to see all tuples as committed, - * which is what we need during bootstrap. (Bootstrap mode only inserts - * tuples, it never updates or deletes them, so all tuples can be presumed - * good immediately.) + * this causes the tqual.c routines to see all tuples as committed, which + * is what we need during bootstrap. (Bootstrap mode only inserts tuples, + * it never updates or deletes them, so all tuples can be presumed good + * immediately.) */ if (xid == BootstrapTransactionId) return false; /* - * We will return true for the Xid of the current subtransaction, any - * of its subcommitted children, any of its parents, or any of their - * previously subcommitted children. However, a transaction being - * aborted is no longer "current", even though it may still have an - * entry on the state stack. + * We will return true for the Xid of the current subtransaction, any of + * its subcommitted children, any of its parents, or any of their + * previously subcommitted children. However, a transaction being aborted + * is no longer "current", even though it may still have an entry on the + * state stack. */ for (s = CurrentTransactionState; s != NULL; s = s->parent) { @@ -498,12 +497,12 @@ void CommandCounterIncrement(void) { currentCommandId += 1; - if (currentCommandId == FirstCommandId) /* check for overflow */ + if (currentCommandId == FirstCommandId) /* check for overflow */ { currentCommandId -= 1; ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("cannot have more than 2^32-1 commands in a transaction"))); + errmsg("cannot have more than 2^32-1 commands in a transaction"))); } /* Propagate new command ID into static snapshots, if set */ @@ -607,16 +606,15 @@ AtSubStart_Memory(void) Assert(CurTransactionContext != NULL); /* - * Create a CurTransactionContext, which will be used to hold data - * that survives subtransaction commit but disappears on - * subtransaction abort. We make it a child of the immediate parent's - * CurTransactionContext. + * Create a CurTransactionContext, which will be used to hold data that + * survives subtransaction commit but disappears on subtransaction abort. + * We make it a child of the immediate parent's CurTransactionContext. */ CurTransactionContext = AllocSetContextCreate(CurTransactionContext, "CurTransactionContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); s->curTransactionContext = CurTransactionContext; /* Make the CurTransactionContext active. */ @@ -634,8 +632,8 @@ AtSubStart_ResourceOwner(void) Assert(s->parent != NULL); /* - * Create a resource owner for the subtransaction. We make it a child - * of the immediate parent's resource owner. + * Create a resource owner for the subtransaction. We make it a child of + * the immediate parent's resource owner. */ s->curTransactionOwner = ResourceOwnerCreate(s->parent->curTransactionOwner, @@ -666,11 +664,10 @@ RecordTransactionCommit(void) nchildren = xactGetCommittedChildren(&children); /* - * If we made neither any XLOG entries nor any temp-rel updates, and - * have no files to be deleted, we can omit recording the transaction - * commit at all. (This test includes the effects of subtransactions, - * so the presence of committed subxacts need not alone force a - * write.) + * If we made neither any XLOG entries nor any temp-rel updates, and have + * no files to be deleted, we can omit recording the transaction commit at + * all. (This test includes the effects of subtransactions, so the + * presence of committed subxacts need not alone force a write.) */ if (MyXactMadeXLogEntry || MyXactMadeTempRelUpdate || nrels > 0) { @@ -684,18 +681,17 @@ RecordTransactionCommit(void) START_CRIT_SECTION(); /* - * If our transaction made any transaction-controlled XLOG - * entries, we need to lock out checkpoint start between writing - * our XLOG record and updating pg_clog. Otherwise it is possible - * for the checkpoint to set REDO after the XLOG record but fail - * to flush the pg_clog update to disk, leading to loss of the - * transaction commit if we crash a little later. Slightly klugy - * fix for problem discovered 2004-08-10. + * If our transaction made any transaction-controlled XLOG entries, we + * need to lock out checkpoint start between writing our XLOG record + * and updating pg_clog. Otherwise it is possible for the checkpoint + * to set REDO after the XLOG record but fail to flush the pg_clog + * update to disk, leading to loss of the transaction commit if we + * crash a little later. Slightly klugy fix for problem discovered + * 2004-08-10. * - * (If it made no transaction-controlled XLOG entries, its XID - * appears nowhere in permanent storage, so no one else will ever - * care if it committed; so it doesn't matter if we lose the - * commit flag.) + * (If it made no transaction-controlled XLOG entries, its XID appears + * nowhere in permanent storage, so no one else will ever care if it + * committed; so it doesn't matter if we lose the commit flag.) * * Note we only need a shared lock. */ @@ -704,8 +700,8 @@ RecordTransactionCommit(void) LWLockAcquire(CheckpointStartLock, LW_SHARED); /* - * We only need to log the commit in XLOG if the transaction made - * any transaction-controlled XLOG entries or will delete files. + * We only need to log the commit in XLOG if the transaction made any + * transaction-controlled XLOG entries or will delete files. */ if (madeTCentries || nrels > 0) { @@ -748,26 +744,26 @@ RecordTransactionCommit(void) } /* - * We must flush our XLOG entries to disk if we made any XLOG - * entries, whether in or out of transaction control. For - * example, if we reported a nextval() result to the client, this - * ensures that any XLOG record generated by nextval will hit the - * disk before we report the transaction committed. + * We must flush our XLOG entries to disk if we made any XLOG entries, + * whether in or out of transaction control. For example, if we + * reported a nextval() result to the client, this ensures that any + * XLOG record generated by nextval will hit the disk before we report + * the transaction committed. * - * Note: if we generated a commit record above, MyXactMadeXLogEntry - * will certainly be set now. + * Note: if we generated a commit record above, MyXactMadeXLogEntry will + * certainly be set now. */ if (MyXactMadeXLogEntry) { /* * Sleep before flush! So we can flush more than one commit - * records per single fsync. (The idea is some other backend - * may do the XLogFlush while we're sleeping. This needs work - * still, because on most Unixen, the minimum select() delay - * is 10msec or more, which is way too long.) + * records per single fsync. (The idea is some other backend may + * do the XLogFlush while we're sleeping. This needs work still, + * because on most Unixen, the minimum select() delay is 10msec or + * more, which is way too long.) * - * We do not sleep if enableFsync is not turned on, nor if there - * are fewer than CommitSiblings other backends with active + * We do not sleep if enableFsync is not turned on, nor if there are + * fewer than CommitSiblings other backends with active * transactions. */ if (CommitDelay > 0 && enableFsync && @@ -778,14 +774,13 @@ RecordTransactionCommit(void) } /* - * We must mark the transaction committed in clog if its XID - * appears either in permanent rels or in local temporary rels. We - * test this by seeing if we made transaction-controlled entries - * *OR* local-rel tuple updates. Note that if we made only the - * latter, we have not emitted an XLOG record for our commit, and - * so in the event of a crash the clog update might be lost. This - * is okay because no one else will ever care whether we - * committed. + * We must mark the transaction committed in clog if its XID appears + * either in permanent rels or in local temporary rels. We test this + * by seeing if we made transaction-controlled entries *OR* local-rel + * tuple updates. Note that if we made only the latter, we have not + * emitted an XLOG record for our commit, and so in the event of a + * crash the clog update might be lost. This is okay because no one + * else will ever care whether we committed. */ if (madeTCentries || MyXactMadeTempRelUpdate) { @@ -833,9 +828,8 @@ static void AtCommit_Memory(void) { /* - * Now that we're "out" of a transaction, have the system allocate - * things in the top memory context instead of per-transaction - * contexts. + * Now that we're "out" of a transaction, have the system allocate things + * in the top memory context instead of per-transaction contexts. */ MemoryContextSwitchTo(TopMemoryContext); @@ -870,9 +864,9 @@ AtSubCommit_Memory(void) /* * Ordinarily we cannot throw away the child's CurTransactionContext, - * since the data it contains will be needed at upper commit. However, - * if there isn't actually anything in it, we can throw it away. This - * avoids a small memory leak in the common case of "trivial" subxacts. + * since the data it contains will be needed at upper commit. However, if + * there isn't actually anything in it, we can throw it away. This avoids + * a small memory leak in the common case of "trivial" subxacts. */ if (MemoryContextIsEmpty(s->curTransactionContext)) { @@ -908,9 +902,10 @@ AtSubCommit_childXids(void) { s->parent->childXids = list_concat(s->parent->childXids, s->childXids); + /* - * list_concat doesn't free the list header for the second list; - * do so here to avoid memory leakage (kluge) + * list_concat doesn't free the list header for the second list; do so + * here to avoid memory leakage (kluge) */ pfree(s->childXids); s->childXids = NIL; @@ -929,14 +924,14 @@ RecordSubTransactionCommit(void) * We do not log the subcommit in XLOG; it doesn't matter until the * top-level transaction commits. * - * We must mark the subtransaction subcommitted in clog if its XID - * appears either in permanent rels or in local temporary rels. We - * test this by seeing if we made transaction-controlled entries *OR* - * local-rel tuple updates. (The test here actually covers the entire - * transaction tree so far, so it may mark subtransactions that don't - * really need it, but it's probably not worth being tenser. Note that - * if a prior subtransaction dirtied these variables, then - * RecordTransactionCommit will have to do the full pushup anyway...) + * We must mark the subtransaction subcommitted in clog if its XID appears + * either in permanent rels or in local temporary rels. We test this by + * seeing if we made transaction-controlled entries *OR* local-rel tuple + * updates. (The test here actually covers the entire transaction tree so + * far, so it may mark subtransactions that don't really need it, but it's + * probably not worth being tenser. Note that if a prior subtransaction + * dirtied these variables, then RecordTransactionCommit will have to do + * the full pushup anyway...) */ if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate) { @@ -974,9 +969,9 @@ RecordTransactionAbort(void) /* * If we made neither any transaction-controlled XLOG entries nor any - * temp-rel updates, and are not going to delete any files, we can - * omit recording the transaction abort at all. No one will ever care - * that it aborted. (These tests cover our whole transaction tree.) + * temp-rel updates, and are not going to delete any files, we can omit + * recording the transaction abort at all. No one will ever care that it + * aborted. (These tests cover our whole transaction tree.) */ if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate || nrels > 0) { @@ -992,16 +987,16 @@ RecordTransactionAbort(void) START_CRIT_SECTION(); /* - * We only need to log the abort in XLOG if the transaction made - * any transaction-controlled XLOG entries or will delete files. - * (If it made no transaction-controlled XLOG entries, its XID - * appears nowhere in permanent storage, so no one else will ever - * care if it committed.) + * We only need to log the abort in XLOG if the transaction made any + * transaction-controlled XLOG entries or will delete files. (If it + * made no transaction-controlled XLOG entries, its XID appears + * nowhere in permanent storage, so no one else will ever care if it + * committed.) * - * We do not flush XLOG to disk unless deleting files, since the - * default assumption after a crash would be that we aborted, - * anyway. For the same reason, we don't need to worry about - * interlocking against checkpoint start. + * We do not flush XLOG to disk unless deleting files, since the default + * assumption after a crash would be that we aborted, anyway. For the + * same reason, we don't need to worry about interlocking against + * checkpoint start. */ if (MyLastRecPtr.xrecoff != 0 || nrels > 0) { @@ -1047,8 +1042,8 @@ RecordTransactionAbort(void) * Mark the transaction aborted in clog. This is not absolutely * necessary but we may as well do it while we are here. * - * The ordering here isn't critical but it seems best to mark the - * parent first. This assures an atomic transition of all the + * The ordering here isn't critical but it seems best to mark the parent + * first. This assures an atomic transition of all the * subtransactions to aborted state from the point of view of * concurrent TransactionIdDidAbort calls. */ @@ -1078,8 +1073,8 @@ AtAbort_Memory(void) { /* * Make sure we are in a valid context (not a child of - * TopTransactionContext...). Note that it is possible for this code - * to be called when we aren't in a transaction at all; go directly to + * TopTransactionContext...). Note that it is possible for this code to + * be called when we aren't in a transaction at all; go directly to * TopMemoryContext in that case. */ if (TopTransactionContext != NULL) @@ -1087,8 +1082,8 @@ AtAbort_Memory(void) MemoryContextSwitchTo(TopTransactionContext); /* - * We do not want to destroy the transaction's global state yet, - * so we can't free any memory here. + * We do not want to destroy the transaction's global state yet, so we + * can't free any memory here. */ } else @@ -1114,8 +1109,8 @@ static void AtAbort_ResourceOwner(void) { /* - * Make sure we have a valid ResourceOwner, if possible (else it - * will be NULL, which is OK) + * Make sure we have a valid ResourceOwner, if possible (else it will be + * NULL, which is OK) */ CurrentResourceOwner = TopTransactionResourceOwner; } @@ -1143,7 +1138,7 @@ AtSubAbort_childXids(void) /* * We keep the child-XID lists in TopTransactionContext (see - * AtSubCommit_childXids). This means we'd better free the list + * AtSubCommit_childXids). This means we'd better free the list * explicitly at abort to avoid leakage. */ list_free(s->childXids); @@ -1168,11 +1163,11 @@ RecordSubTransactionAbort(void) /* * If we made neither any transaction-controlled XLOG entries nor any - * temp-rel updates, and are not going to delete any files, we can - * omit recording the transaction abort at all. No one will ever care - * that it aborted. (These tests cover our whole transaction tree, - * and therefore may mark subxacts that don't really need it, but it's - * probably not worth being tenser.) + * temp-rel updates, and are not going to delete any files, we can omit + * recording the transaction abort at all. No one will ever care that it + * aborted. (These tests cover our whole transaction tree, and therefore + * may mark subxacts that don't really need it, but it's probably not + * worth being tenser.) * * In this case we needn't worry about marking subcommitted children as * aborted, because they didn't mark themselves as subcommitted in the @@ -1183,8 +1178,8 @@ RecordSubTransactionAbort(void) START_CRIT_SECTION(); /* - * We only need to log the abort in XLOG if the transaction made - * any transaction-controlled XLOG entries or will delete files. + * We only need to log the abort in XLOG if the transaction made any + * transaction-controlled XLOG entries or will delete files. */ if (MyLastRecPtr.xrecoff != 0 || nrels > 0) { @@ -1238,11 +1233,10 @@ RecordSubTransactionAbort(void) } /* - * We can immediately remove failed XIDs from PGPROC's cache of - * running child XIDs. It's easiest to do it here while we have the - * child XID array at hand, even though in the main-transaction case - * the equivalent work happens just after return from - * RecordTransactionAbort. + * We can immediately remove failed XIDs from PGPROC's cache of running + * child XIDs. It's easiest to do it here while we have the child XID + * array at hand, even though in the main-transaction case the equivalent + * work happens just after return from RecordTransactionAbort. */ XidCacheRemoveRunningXids(xid, nchildren, children); @@ -1265,9 +1259,8 @@ static void AtCleanup_Memory(void) { /* - * Now that we're "out" of a transaction, have the system allocate - * things in the top memory context instead of per-transaction - * contexts. + * Now that we're "out" of a transaction, have the system allocate things + * in the top memory context instead of per-transaction contexts. */ MemoryContextSwitchTo(TopMemoryContext); @@ -1304,9 +1297,9 @@ AtSubCleanup_Memory(void) CurTransactionContext = s->parent->curTransactionContext; /* - * Delete the subxact local memory contexts. Its CurTransactionContext - * can go too (note this also kills CurTransactionContexts from any - * children of the subxact). + * Delete the subxact local memory contexts. Its CurTransactionContext can + * go too (note this also kills CurTransactionContexts from any children + * of the subxact). */ if (s->curTransactionContext) MemoryContextDelete(s->curTransactionContext); @@ -1344,11 +1337,10 @@ StartTransaction(void) * start processing */ s->state = TRANS_START; - s->transactionId = InvalidTransactionId; /* until assigned */ + s->transactionId = InvalidTransactionId; /* until assigned */ /* - * Make sure we've freed any old snapshot, and reset xact state - * variables + * Make sure we've freed any old snapshot, and reset xact state variables */ FreeXactSnapshot(); XactIsoLevel = DefaultXactIsoLevel; @@ -1386,10 +1378,10 @@ StartTransaction(void) s->childXids = NIL; /* - * You might expect to see "s->currentUser = GetUserId();" here, but - * you won't because it doesn't work during startup; the userid isn't - * set yet during a backend's first transaction start. We only use - * the currentUser field in sub-transaction state structs. + * You might expect to see "s->currentUser = GetUserId();" here, but you + * won't because it doesn't work during startup; the userid isn't set yet + * during a backend's first transaction start. We only use the + * currentUser field in sub-transaction state structs. * * prevXactReadOnly is also valid only in sub-transactions. */ @@ -1432,13 +1424,12 @@ CommitTransaction(void) Assert(s->parent == NULL); /* - * Do pre-commit processing (most of this stuff requires database - * access, and in fact could still cause an error...) + * Do pre-commit processing (most of this stuff requires database access, + * and in fact could still cause an error...) * - * It is possible for CommitHoldablePortals to invoke functions that - * queue deferred triggers, and it's also possible that triggers create - * holdable cursors. So we have to loop until there's nothing left to - * do. + * It is possible for CommitHoldablePortals to invoke functions that queue + * deferred triggers, and it's also possible that triggers create holdable + * cursors. So we have to loop until there's nothing left to do. */ for (;;) { @@ -1525,19 +1516,19 @@ CommitTransaction(void) } /* - * This is all post-commit cleanup. Note that if an error is raised - * here, it's too late to abort the transaction. This should be just + * This is all post-commit cleanup. Note that if an error is raised here, + * it's too late to abort the transaction. This should be just * noncritical resource releasing. * - * The ordering of operations is not entirely random. The idea is: - * release resources visible to other backends (eg, files, buffer - * pins); then release locks; then release backend-local resources. We - * want to release locks at the point where any backend waiting for us - * will see our transaction as being fully cleaned up. + * The ordering of operations is not entirely random. The idea is: release + * resources visible to other backends (eg, files, buffer pins); then + * release locks; then release backend-local resources. We want to release + * locks at the point where any backend waiting for us will see our + * transaction as being fully cleaned up. * - * Resources that can be associated with individual queries are handled - * by the ResourceOwner mechanism. The other calls here are for - * backend-wide state. + * Resources that can be associated with individual queries are handled by + * the ResourceOwner mechanism. The other calls here are for backend-wide + * state. */ CallXactCallbacks(XACT_EVENT_COMMIT); @@ -1553,12 +1544,11 @@ CommitTransaction(void) AtEOXact_RelationCache(true); /* - * Make catalog changes visible to all backends. This has to happen - * after relcache references are dropped (see comments for - * AtEOXact_RelationCache), but before locks are released (if anyone - * is waiting for lock on a relation we've modified, we want them to - * know about the catalog change before they start using the - * relation). + * Make catalog changes visible to all backends. This has to happen after + * relcache references are dropped (see comments for + * AtEOXact_RelationCache), but before locks are released (if anyone is + * waiting for lock on a relation we've modified, we want them to know + * about the catalog change before they start using the relation). */ AtEOXact_Inval(true); @@ -1621,10 +1611,10 @@ CommitTransaction(void) static void PrepareTransaction(void) { - TransactionState s = CurrentTransactionState; - TransactionId xid = GetCurrentTransactionId(); - GlobalTransaction gxact; - TimestampTz prepared_at; + TransactionState s = CurrentTransactionState; + TransactionId xid = GetCurrentTransactionId(); + GlobalTransaction gxact; + TimestampTz prepared_at; ShowTransactionState("PrepareTransaction"); @@ -1637,13 +1627,12 @@ PrepareTransaction(void) Assert(s->parent == NULL); /* - * Do pre-commit processing (most of this stuff requires database - * access, and in fact could still cause an error...) + * Do pre-commit processing (most of this stuff requires database access, + * and in fact could still cause an error...) * - * It is possible for PrepareHoldablePortals to invoke functions that - * queue deferred triggers, and it's also possible that triggers create - * holdable cursors. So we have to loop until there's nothing left to - * do. + * It is possible for PrepareHoldablePortals to invoke functions that queue + * deferred triggers, and it's also possible that triggers create holdable + * cursors. So we have to loop until there's nothing left to do. */ for (;;) { @@ -1693,8 +1682,8 @@ PrepareTransaction(void) BufmgrCommit(); /* - * Reserve the GID for this transaction. This could fail if the - * requested GID is invalid or already in use. + * Reserve the GID for this transaction. This could fail if the requested + * GID is invalid or already in use. */ gxact = MarkAsPreparing(xid, prepareGID, prepared_at, GetUserId(), MyDatabaseId); @@ -1707,14 +1696,14 @@ PrepareTransaction(void) * want transaction abort to be able to clean up. (In particular, the * AtPrepare routines may error out if they find cases they cannot * handle.) State cleanup should happen in the PostPrepare routines - * below. However, some modules can go ahead and clear state here - * because they wouldn't do anything with it during abort anyway. + * below. However, some modules can go ahead and clear state here because + * they wouldn't do anything with it during abort anyway. * * Note: because the 2PC state file records will be replayed in the same - * order they are made, the order of these calls has to match the order - * in which we want things to happen during COMMIT PREPARED or - * ROLLBACK PREPARED; in particular, pay attention to whether things - * should happen before or after releasing the transaction's locks. + * order they are made, the order of these calls has to match the order in + * which we want things to happen during COMMIT PREPARED or ROLLBACK + * PREPARED; in particular, pay attention to whether things should happen + * before or after releasing the transaction's locks. */ StartPrepare(gxact); @@ -1726,15 +1715,14 @@ PrepareTransaction(void) /* * Here is where we really truly prepare. * - * We have to record transaction prepares even if we didn't - * make any updates, because the transaction manager might - * get confused if we lose a global transaction. + * We have to record transaction prepares even if we didn't make any updates, + * because the transaction manager might get confused if we lose a global + * transaction. */ EndPrepare(gxact); /* - * Now we clean up backend-internal state and release internal - * resources. + * Now we clean up backend-internal state and release internal resources. */ /* Break the chain of back-links in the XLOG records I output */ @@ -1743,9 +1731,9 @@ PrepareTransaction(void) MyXactMadeTempRelUpdate = false; /* - * Let others know about no transaction in progress by me. This has - * to be done *after* the prepared transaction has been marked valid, - * else someone may think it is unlocked and recyclable. + * Let others know about no transaction in progress by me. This has to be + * done *after* the prepared transaction has been marked valid, else + * someone may think it is unlocked and recyclable. */ /* Lock ProcArrayLock because that's what GetSnapshotData uses. */ @@ -1762,7 +1750,7 @@ PrepareTransaction(void) /* * This is all post-transaction cleanup. Note that if an error is raised * here, it's too late to abort the transaction. This should be just - * noncritical resource releasing. See notes in CommitTransaction. + * noncritical resource releasing. See notes in CommitTransaction. */ CallXactCallbacks(XACT_EVENT_PREPARE); @@ -1819,8 +1807,8 @@ PrepareTransaction(void) s->childXids = NIL; /* - * done with 1st phase commit processing, set current transaction - * state back to default + * done with 1st phase commit processing, set current transaction state + * back to default */ s->state = TRANS_DEFAULT; @@ -1842,8 +1830,8 @@ AbortTransaction(void) /* * Release any LW locks we might be holding as quickly as possible. * (Regular locks, however, must be held till we finish aborting.) - * Releasing LW locks is critical since we might try to grab them - * again while cleaning up! + * Releasing LW locks is critical since we might try to grab them again + * while cleaning up! */ LWLockReleaseAll(); @@ -1852,8 +1840,8 @@ AbortTransaction(void) UnlockBuffers(); /* - * Also clean up any open wait for lock, since the lock manager will - * choke if we try to wait for another lock before doing this. + * Also clean up any open wait for lock, since the lock manager will choke + * if we try to wait for another lock before doing this. */ LockWaitCancel(); @@ -1866,8 +1854,8 @@ AbortTransaction(void) Assert(s->parent == NULL); /* - * set the current transaction state information appropriately during - * the abort processing + * set the current transaction state information appropriately during the + * abort processing */ s->state = TRANS_ABORT; @@ -1876,15 +1864,14 @@ AbortTransaction(void) AtAbort_ResourceOwner(); /* - * Reset user id which might have been changed transiently. We cannot - * use s->currentUser, since it may not be set yet; instead rely on - * internal state of miscinit.c. + * Reset user id which might have been changed transiently. We cannot use + * s->currentUser, since it may not be set yet; instead rely on internal + * state of miscinit.c. * - * (Note: it is not necessary to restore session authorization here - * because that can only be changed via GUC, and GUC will take care of - * rolling it back if need be. However, an error within a SECURITY - * DEFINER function could send control here with the wrong current - * userid.) + * (Note: it is not necessary to restore session authorization here because + * that can only be changed via GUC, and GUC will take care of rolling it + * back if need be. However, an error within a SECURITY DEFINER function + * could send control here with the wrong current userid.) */ AtAbort_UserId(); @@ -1898,15 +1885,15 @@ AbortTransaction(void) AtEOXact_UpdateFlatFiles(false); /* - * Advertise the fact that we aborted in pg_clog (assuming that we - * got as far as assigning an XID to advertise). + * Advertise the fact that we aborted in pg_clog (assuming that we got as + * far as assigning an XID to advertise). */ if (TransactionIdIsValid(s->transactionId)) RecordTransactionAbort(); /* - * Let others know about no transaction in progress by me. Note that - * this must be done _before_ releasing locks we hold and _after_ + * Let others know about no transaction in progress by me. Note that this + * must be done _before_ releasing locks we hold and _after_ * RecordTransactionAbort. */ if (MyProc != NULL) @@ -2012,8 +1999,8 @@ StartTransactionCommand(void) switch (s->blockState) { /* - * if we aren't in a transaction block, we just do our usual - * start transaction. + * if we aren't in a transaction block, we just do our usual start + * transaction. */ case TBLOCK_DEFAULT: StartTransaction(); @@ -2021,23 +2008,23 @@ StartTransactionCommand(void) break; /* - * We are somewhere in a transaction block or subtransaction - * and about to start a new command. For now we do nothing, - * but someday we may do command-local resource initialization. - * (Note that any needed CommandCounterIncrement was done by - * the previous CommitTransactionCommand.) + * We are somewhere in a transaction block or subtransaction and + * about to start a new command. For now we do nothing, but + * someday we may do command-local resource initialization. (Note + * that any needed CommandCounterIncrement was done by the + * previous CommitTransactionCommand.) */ case TBLOCK_INPROGRESS: case TBLOCK_SUBINPROGRESS: break; /* - * Here we are in a failed transaction block (one of - * the commands caused an abort) so we do nothing but remain in - * the abort state. Eventually we will get a ROLLBACK command - * which will get us out of this state. (It is up to other - * code to ensure that no commands other than ROLLBACK will be - * processed in these states.) + * Here we are in a failed transaction block (one of the commands + * caused an abort) so we do nothing but remain in the abort + * state. Eventually we will get a ROLLBACK command which will + * get us out of this state. (It is up to other code to ensure + * that no commands other than ROLLBACK will be processed in these + * states.) */ case TBLOCK_ABORT: case TBLOCK_SUBABORT: @@ -2099,10 +2086,10 @@ CommitTransactionCommand(void) break; /* - * We are completing a "BEGIN TRANSACTION" command, so we - * change to the "transaction block in progress" state and - * return. (We assume the BEGIN did nothing to the database, - * so we need no CommandCounterIncrement.) + * We are completing a "BEGIN TRANSACTION" command, so we change + * to the "transaction block in progress" state and return. (We + * assume the BEGIN did nothing to the database, so we need no + * CommandCounterIncrement.) */ case TBLOCK_BEGIN: s->blockState = TBLOCK_INPROGRESS; @@ -2110,8 +2097,8 @@ CommitTransactionCommand(void) /* * This is the case when we have finished executing a command - * someplace within a transaction block. We increment the - * command counter and return. + * someplace within a transaction block. We increment the command + * counter and return. */ case TBLOCK_INPROGRESS: case TBLOCK_SUBINPROGRESS: @@ -2119,8 +2106,8 @@ CommitTransactionCommand(void) break; /* - * We are completing a "COMMIT" command. Do it and return to - * the idle state. + * We are completing a "COMMIT" command. Do it and return to the + * idle state. */ case TBLOCK_END: CommitTransaction(); @@ -2128,17 +2115,17 @@ CommitTransactionCommand(void) break; /* - * Here we are in the middle of a transaction block but one of - * the commands caused an abort so we do nothing but remain in - * the abort state. Eventually we will get a ROLLBACK comand. + * Here we are in the middle of a transaction block but one of the + * commands caused an abort so we do nothing but remain in the + * abort state. Eventually we will get a ROLLBACK comand. */ case TBLOCK_ABORT: case TBLOCK_SUBABORT: break; /* - * Here we were in an aborted transaction block and we just - * got the ROLLBACK command from the user, so clean up the + * Here we were in an aborted transaction block and we just got + * the ROLLBACK command from the user, so clean up the * already-aborted transaction and return to the idle state. */ case TBLOCK_ABORT_END: @@ -2147,9 +2134,9 @@ CommitTransactionCommand(void) break; /* - * Here we were in a perfectly good transaction block but the - * user told us to ROLLBACK anyway. We have to abort the - * transaction and then clean up. + * Here we were in a perfectly good transaction block but the user + * told us to ROLLBACK anyway. We have to abort the transaction + * and then clean up. */ case TBLOCK_ABORT_PENDING: AbortTransaction(); @@ -2169,8 +2156,8 @@ CommitTransactionCommand(void) /* * We were just issued a SAVEPOINT inside a transaction block. * Start a subtransaction. (DefineSavepoint already did - * PushTransaction, so as to have someplace to put the - * SUBBEGIN state.) + * PushTransaction, so as to have someplace to put the SUBBEGIN + * state.) */ case TBLOCK_SUBBEGIN: StartSubTransaction(); @@ -2259,8 +2246,8 @@ CommitTransactionCommand(void) break; /* - * Same as above, but the subtransaction had already failed, - * so we don't need AbortSubTransaction. + * Same as above, but the subtransaction had already failed, so we + * don't need AbortSubTransaction. */ case TBLOCK_SUBABORT_RESTART: { @@ -2320,8 +2307,8 @@ AbortCurrentTransaction(void) break; /* - * if we aren't in a transaction block, we just do the basic - * abort & cleanup transaction. + * if we aren't in a transaction block, we just do the basic abort + * & cleanup transaction. */ case TBLOCK_STARTED: AbortTransaction(); @@ -2330,11 +2317,11 @@ AbortCurrentTransaction(void) break; /* - * If we are in TBLOCK_BEGIN it means something screwed up - * right after reading "BEGIN TRANSACTION". We assume that - * the user will interpret the error as meaning the BEGIN - * failed to get him into a transaction block, so we should - * abort and return to idle state. + * If we are in TBLOCK_BEGIN it means something screwed up right + * after reading "BEGIN TRANSACTION". We assume that the user + * will interpret the error as meaning the BEGIN failed to get him + * into a transaction block, so we should abort and return to idle + * state. */ case TBLOCK_BEGIN: AbortTransaction(); @@ -2354,9 +2341,9 @@ AbortCurrentTransaction(void) break; /* - * Here, we failed while trying to COMMIT. Clean up the - * transaction and return to idle state (we do not want to - * stay in the transaction). + * Here, we failed while trying to COMMIT. Clean up the + * transaction and return to idle state (we do not want to stay in + * the transaction). */ case TBLOCK_END: AbortTransaction(); @@ -2365,9 +2352,9 @@ AbortCurrentTransaction(void) break; /* - * Here, we are already in an aborted transaction state and - * are waiting for a ROLLBACK, but for some reason we failed - * again! So we just remain in the abort state. + * Here, we are already in an aborted transaction state and are + * waiting for a ROLLBACK, but for some reason we failed again! + * So we just remain in the abort state. */ case TBLOCK_ABORT: case TBLOCK_SUBABORT: @@ -2375,8 +2362,8 @@ AbortCurrentTransaction(void) /* * We are in a failed transaction and we got the ROLLBACK command. - * We have already aborted, we just need to cleanup and go to - * idle state. + * We have already aborted, we just need to cleanup and go to idle + * state. */ case TBLOCK_ABORT_END: CleanupTransaction(); @@ -2395,8 +2382,8 @@ AbortCurrentTransaction(void) /* * Here, we failed while trying to PREPARE. Clean up the - * transaction and return to idle state (we do not want to - * stay in the transaction). + * transaction and return to idle state (we do not want to stay in + * the transaction). */ case TBLOCK_PREPARE: AbortTransaction(); @@ -2406,8 +2393,8 @@ AbortCurrentTransaction(void) /* * We got an error inside a subtransaction. Abort just the - * subtransaction, and go to the persistent SUBABORT state - * until we get ROLLBACK. + * subtransaction, and go to the persistent SUBABORT state until + * we get ROLLBACK. */ case TBLOCK_SUBINPROGRESS: AbortSubTransaction(); @@ -2416,7 +2403,7 @@ AbortCurrentTransaction(void) /* * If we failed while trying to create a subtransaction, clean up - * the broken subtransaction and abort the parent. The same + * the broken subtransaction and abort the parent. The same * applies if we get a failure while ending a subtransaction. */ case TBLOCK_SUBBEGIN: @@ -2479,15 +2466,15 @@ PreventTransactionChain(void *stmtNode, const char *stmtType) stmtType))); /* - * Are we inside a function call? If the statement's parameter block - * was allocated in QueryContext, assume it is an interactive command. + * Are we inside a function call? If the statement's parameter block was + * allocated in QueryContext, assume it is an interactive command. * Otherwise assume it is coming from a function. */ if (!MemoryContextContains(QueryContext, stmtNode)) ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), /* translator: %s represents an SQL statement name */ - errmsg("%s cannot be executed from a function", stmtType))); + errmsg("%s cannot be executed from a function", stmtType))); /* If we got past IsTransactionBlock test, should be in default state */ if (CurrentTransactionState->blockState != TBLOCK_DEFAULT && @@ -2529,8 +2516,8 @@ RequireTransactionChain(void *stmtNode, const char *stmtType) return; /* - * Are we inside a function call? If the statement's parameter block - * was allocated in QueryContext, assume it is an interactive command. + * Are we inside a function call? If the statement's parameter block was + * allocated in QueryContext, assume it is an interactive command. * Otherwise assume it is coming from a function. */ if (!MemoryContextContains(QueryContext, stmtNode)) @@ -2556,8 +2543,8 @@ bool IsInTransactionChain(void *stmtNode) { /* - * Return true on same conditions that would make - * PreventTransactionChain error out + * Return true on same conditions that would make PreventTransactionChain + * error out */ if (IsTransactionBlock()) return true; @@ -2705,8 +2692,7 @@ BeginTransactionBlock(void) switch (s->blockState) { /* - * We are not inside a transaction block, so allow one to - * begin. + * We are not inside a transaction block, so allow one to begin. */ case TBLOCK_STARTED: s->blockState = TBLOCK_BEGIN; @@ -2721,7 +2707,7 @@ BeginTransactionBlock(void) case TBLOCK_SUBABORT: ereport(WARNING, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), - errmsg("there is already a transaction in progress"))); + errmsg("there is already a transaction in progress"))); break; /* These cases are invalid. */ @@ -2759,7 +2745,7 @@ bool PrepareTransactionBlock(char *gid) { TransactionState s; - bool result; + bool result; /* Set up to commit the current transaction */ result = EndTransactionBlock(); @@ -2832,8 +2818,8 @@ EndTransactionBlock(void) break; /* - * We are in a live subtransaction block. Set up to subcommit - * all open subtransactions and then commit the main transaction. + * We are in a live subtransaction block. Set up to subcommit all + * open subtransactions and then commit the main transaction. */ case TBLOCK_SUBINPROGRESS: while (s->parent != NULL) @@ -2854,9 +2840,9 @@ EndTransactionBlock(void) break; /* - * Here we are inside an aborted subtransaction. Treat the - * COMMIT as ROLLBACK: set up to abort everything and exit - * the main transaction. + * Here we are inside an aborted subtransaction. Treat the COMMIT + * as ROLLBACK: set up to abort everything and exit the main + * transaction. */ case TBLOCK_SUBABORT: while (s->parent != NULL) @@ -2927,9 +2913,9 @@ UserAbortTransactionBlock(void) switch (s->blockState) { /* - * We are inside a transaction block and we got a ROLLBACK - * command from the user, so tell CommitTransactionCommand - * to abort and exit the transaction block. + * We are inside a transaction block and we got a ROLLBACK command + * from the user, so tell CommitTransactionCommand to abort and + * exit the transaction block. */ case TBLOCK_INPROGRESS: s->blockState = TBLOCK_ABORT_PENDING; @@ -2937,17 +2923,17 @@ UserAbortTransactionBlock(void) /* * We are inside a failed transaction block and we got a ROLLBACK - * command from the user. Abort processing is already done, - * so CommitTransactionCommand just has to cleanup and go back - * to idle state. + * command from the user. Abort processing is already done, so + * CommitTransactionCommand just has to cleanup and go back to + * idle state. */ case TBLOCK_ABORT: s->blockState = TBLOCK_ABORT_END; break; /* - * We are inside a subtransaction. Mark everything - * up to top level as exitable. + * We are inside a subtransaction. Mark everything up to top + * level as exitable. */ case TBLOCK_SUBINPROGRESS: case TBLOCK_SUBABORT: @@ -2972,8 +2958,8 @@ UserAbortTransactionBlock(void) break; /* - * The user issued ABORT when not inside a transaction. Issue - * a WARNING and go to abort state. The upcoming call to + * The user issued ABORT when not inside a transaction. Issue a + * WARNING and go to abort state. The upcoming call to * CommitTransactionCommand() will then put us back into the * default state. */ @@ -3021,8 +3007,8 @@ DefineSavepoint(char *name) s = CurrentTransactionState; /* changed by push */ /* - * Savepoint names, like the TransactionState block itself, - * live in TopTransactionContext. + * Savepoint names, like the TransactionState block itself, live + * in TopTransactionContext. */ if (name) s->name = MemoryContextStrdup(TopTransactionContext, name); @@ -3078,8 +3064,8 @@ ReleaseSavepoint(List *options) break; /* - * We are in a non-aborted subtransaction. This is the only - * valid case. + * We are in a non-aborted subtransaction. This is the only valid + * case. */ case TBLOCK_SUBINPROGRESS: break; @@ -3134,8 +3120,8 @@ ReleaseSavepoint(List *options) /* * Mark "commit pending" all subtransactions up to the target - * subtransaction. The actual commits will happen when control gets - * to CommitTransactionCommand. + * subtransaction. The actual commits will happen when control gets to + * CommitTransactionCommand. */ xact = CurrentTransactionState; for (;;) @@ -3232,8 +3218,8 @@ RollbackToSavepoint(List *options) /* * Mark "abort pending" all subtransactions up to the target - * subtransaction. The actual aborts will happen when control gets - * to CommitTransactionCommand. + * subtransaction. The actual aborts will happen when control gets to + * CommitTransactionCommand. */ xact = CurrentTransactionState; for (;;) @@ -3284,8 +3270,8 @@ BeginInternalSubTransaction(char *name) s = CurrentTransactionState; /* changed by push */ /* - * Savepoint names, like the TransactionState block itself, - * live in TopTransactionContext. + * Savepoint names, like the TransactionState block itself, live + * in TopTransactionContext. */ if (name) s->name = MemoryContextStrdup(TopTransactionContext, name); @@ -3333,7 +3319,7 @@ ReleaseCurrentSubTransaction(void) Assert(s->state == TRANS_INPROGRESS); MemoryContextSwitchTo(CurTransactionContext); CommitSubTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ Assert(s->state == TRANS_INPROGRESS); } @@ -3433,8 +3419,7 @@ AbortOutOfAnyTransaction(void) break; /* - * In a subtransaction, so clean it up and abort parent - * too + * In a subtransaction, so clean it up and abort parent too */ case TBLOCK_SUBBEGIN: case TBLOCK_SUBINPROGRESS: @@ -3667,9 +3652,9 @@ CommitSubTransaction(void) s->parent->subTransactionId); /* - * We need to restore the upper transaction's read-only state, in case - * the upper is read-write while the child is read-only; GUC will - * incorrectly think it should leave the child state in place. + * We need to restore the upper transaction's read-only state, in case the + * upper is read-write while the child is read-only; GUC will incorrectly + * think it should leave the child state in place. */ XactReadOnly = s->prevXactReadOnly; @@ -3706,8 +3691,8 @@ AbortSubTransaction(void) /* * Release any LW locks we might be holding as quickly as possible. * (Regular locks, however, must be held till we finish aborting.) - * Releasing LW locks is critical since we might try to grab them - * again while cleaning up! + * Releasing LW locks is critical since we might try to grab them again + * while cleaning up! * * FIXME This may be incorrect --- Are there some locks we should keep? * Buffer locks, for example? I don't think so but I'm not sure. @@ -3726,8 +3711,8 @@ AbortSubTransaction(void) AtSubAbort_ResourceOwner(); /* - * We can skip all this stuff if the subxact failed before creating - * a ResourceOwner... + * We can skip all this stuff if the subxact failed before creating a + * ResourceOwner... */ if (s->curTransactionOwner) { @@ -3777,25 +3762,23 @@ AbortSubTransaction(void) } /* - * Reset user id which might have been changed transiently. Here we - * want to restore to the userid that was current at subxact entry. - * (As in AbortTransaction, we need not worry about the session - * userid.) + * Reset user id which might have been changed transiently. Here we want + * to restore to the userid that was current at subxact entry. (As in + * AbortTransaction, we need not worry about the session userid.) * - * Must do this after AtEOXact_GUC to handle the case where we entered - * the subxact inside a SECURITY DEFINER function (hence current and - * session userids were different) and then session auth was changed - * inside the subxact. GUC will reset both current and session - * userids to the entry-time session userid. This is right in every - * other scenario so it seems simplest to let GUC do that and fix it - * here. + * Must do this after AtEOXact_GUC to handle the case where we entered the + * subxact inside a SECURITY DEFINER function (hence current and session + * userids were different) and then session auth was changed inside the + * subxact. GUC will reset both current and session userids to the + * entry-time session userid. This is right in every other scenario so it + * seems simplest to let GUC do that and fix it here. */ SetUserId(s->currentUser); /* - * Restore the upper transaction's read-only state, too. This should - * be redundant with GUC's cleanup but we may as well do it for - * consistency with the commit case. + * Restore the upper transaction's read-only state, too. This should be + * redundant with GUC's cleanup but we may as well do it for consistency + * with the commit case. */ XactReadOnly = s->prevXactReadOnly; @@ -3846,11 +3829,11 @@ PushTransaction(void) { TransactionState p = CurrentTransactionState; TransactionState s; - Oid currentUser; + Oid currentUser; /* - * At present, GetUserId cannot fail, but let's not assume that. Get - * the ID before entering the critical code sequence. + * At present, GetUserId cannot fail, but let's not assume that. Get the + * ID before entering the critical code sequence. */ currentUser = GetUserId(); @@ -3860,6 +3843,7 @@ PushTransaction(void) s = (TransactionState) MemoryContextAllocZero(TopTransactionContext, sizeof(TransactionStateData)); + /* * Assign a subtransaction ID, watching out for counter wraparound. */ @@ -3872,11 +3856,12 @@ PushTransaction(void) (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("cannot have more than 2^32-1 subtransactions in a transaction"))); } + /* * We can now stack a minimally valid subtransaction without fear of * failure. */ - s->transactionId = InvalidTransactionId; /* until assigned */ + s->transactionId = InvalidTransactionId; /* until assigned */ s->subTransactionId = currentSubTransactionId; s->parent = p; s->nestingLevel = p->nestingLevel + 1; @@ -3889,10 +3874,10 @@ PushTransaction(void) CurrentTransactionState = s; /* - * AbortSubTransaction and CleanupSubTransaction have to be able to - * cope with the subtransaction from here on out; in particular they - * should not assume that it necessarily has a transaction context, - * resource owner, or XID. + * AbortSubTransaction and CleanupSubTransaction have to be able to cope + * with the subtransaction from here on out; in particular they should not + * assume that it necessarily has a transaction context, resource owner, + * or XID. */ } @@ -3959,7 +3944,7 @@ ShowTransactionStateRec(TransactionState s) /* use ereport to suppress computation if msg will not be printed */ ereport(DEBUG3, (errmsg_internal("name: %s; blockState: %13s; state: %7s, xid/subid/cid: %u/%u/%u, nestlvl: %d, children: %s", - PointerIsValid(s->name) ? s->name : "unnamed", + PointerIsValid(s->name) ? s->name : "unnamed", BlockStateAsString(s->blockState), TransStateAsString(s->state), (unsigned int) s->transactionId, @@ -4215,7 +4200,7 @@ xact_desc_commit(char *buf, xl_xact_commit *xlrec) if (xlrec->nsubxacts > 0) { TransactionId *xacts = (TransactionId *) - &xlrec->xnodes[xlrec->nrels]; + &xlrec->xnodes[xlrec->nrels]; sprintf(buf + strlen(buf), "; subxacts:"); for (i = 0; i < xlrec->nsubxacts; i++) @@ -4246,7 +4231,7 @@ xact_desc_abort(char *buf, xl_xact_abort *xlrec) if (xlrec->nsubxacts > 0) { TransactionId *xacts = (TransactionId *) - &xlrec->xnodes[xlrec->nrels]; + &xlrec->xnodes[xlrec->nrels]; sprintf(buf + strlen(buf), "; subxacts:"); for (i = 0; i < xlrec->nsubxacts; i++) diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 878d7e21efc..7a37c656dc1 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.219 2005/10/03 00:28:41 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.220 2005/10/15 02:49:10 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -51,7 +51,7 @@ /* * Because O_DIRECT bypasses the kernel buffers, and because we never * read those buffers except during crash recovery, it is a win to use - * it in all cases where we sync on each write(). We could allow O_DIRECT + * it in all cases where we sync on each write(). We could allow O_DIRECT * with fsync(), but because skipping the kernel buffer forces writes out * quickly, it seems best just to use it for O_SYNC. It is hard to imagine * how fsync() could be a win for O_DIRECT compared to O_SYNC and O_DIRECT. @@ -85,14 +85,14 @@ #if O_DSYNC != BARE_OPEN_SYNC_FLAG #define OPEN_DATASYNC_FLAG (O_DSYNC | PG_O_DIRECT) #endif -#else /* !defined(OPEN_SYNC_FLAG) */ +#else /* !defined(OPEN_SYNC_FLAG) */ /* Win32 only has O_DSYNC */ #define OPEN_DATASYNC_FLAG (O_DSYNC | PG_O_DIRECT) #endif #endif #if defined(OPEN_DATASYNC_FLAG) -#define DEFAULT_SYNC_METHOD_STR "open_datasync" +#define DEFAULT_SYNC_METHOD_STR "open_datasync" #define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN #define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG #elif defined(HAVE_FDATASYNC) @@ -154,7 +154,7 @@ bool XLOG_DEBUG = false; /* these are derived from XLOG_sync_method by assign_xlog_sync_method */ -int sync_method = DEFAULT_SYNC_METHOD; +int sync_method = DEFAULT_SYNC_METHOD; static int open_sync_bit = DEFAULT_SYNC_FLAGBIT; #define XLOG_SYNC_BIT (enableFsync ? open_sync_bit : 0) @@ -368,10 +368,9 @@ typedef struct XLogCtlData XLogCtlWrite Write; /* - * These values do not change after startup, although the pointed-to - * pages and xlblocks values certainly do. Permission to read/write - * the pages and xlblocks values depends on WALInsertLock and - * WALWriteLock. + * These values do not change after startup, although the pointed-to pages + * and xlblocks values certainly do. Permission to read/write the pages + * and xlblocks values depends on WALInsertLock and WALWriteLock. */ char *pages; /* buffers for unwritten XLOG pages */ XLogRecPtr *xlblocks; /* 1st byte ptr-s + BLCKSZ */ @@ -449,8 +448,8 @@ static char *readRecordBuf = NULL; static uint32 readRecordBufSize = 0; /* State information for XLOG reading */ -static XLogRecPtr ReadRecPtr; /* start of last record read */ -static XLogRecPtr EndRecPtr; /* end+1 of last record read */ +static XLogRecPtr ReadRecPtr; /* start of last record read */ +static XLogRecPtr EndRecPtr; /* end+1 of last record read */ static XLogRecord *nextRecord = NULL; static TimeLineID lastPageTLI = 0; @@ -467,7 +466,7 @@ static void exitArchiveRecovery(TimeLineID endTLI, static bool recoveryStopsHere(XLogRecord *record, bool *includeThis); static bool XLogCheckBuffer(XLogRecData *rdata, - XLogRecPtr *lsn, BkpBlock *bkpb); + XLogRecPtr *lsn, BkpBlock *bkpb); static bool AdvanceXLInsertBuffer(void); static void XLogWrite(XLogwrtRqst WriteRqst, bool flexible); static int XLogFileInit(uint32 log, uint32 seg, @@ -481,7 +480,7 @@ static bool RestoreArchivedFile(char *path, const char *xlogfname, const char *recovername, off_t expectedSize); static int PreallocXlogFiles(XLogRecPtr endptr); static void MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr, - int *nsegsremoved, int *nsegsrecycled); + int *nsegsremoved, int *nsegsrecycled); static void RemoveOldBackupHistory(void); static XLogRecord *ReadRecord(XLogRecPtr *RecPtr, int emode); static bool ValidXLOGHeader(XLogPageHeader hdr, int emode); @@ -554,36 +553,34 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata) } /* - * In bootstrap mode, we don't actually log anything but XLOG - * resources; return a phony record pointer. + * In bootstrap mode, we don't actually log anything but XLOG resources; + * return a phony record pointer. */ if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID) { RecPtr.xlogid = 0; - RecPtr.xrecoff = SizeOfXLogLongPHD; /* start of 1st chkpt - * record */ + RecPtr.xrecoff = SizeOfXLogLongPHD; /* start of 1st chkpt record */ return (RecPtr); } /* * Here we scan the rdata chain, determine which buffers must be backed * up, and compute the CRC values for the data. Note that the record - * header isn't added into the CRC initially since we don't know the - * final length or info bits quite yet. Thus, the CRC will represent - * the CRC of the whole record in the order "rdata, then backup blocks, - * then record header". + * header isn't added into the CRC initially since we don't know the final + * length or info bits quite yet. Thus, the CRC will represent the CRC of + * the whole record in the order "rdata, then backup blocks, then record + * header". * - * We may have to loop back to here if a race condition is detected - * below. We could prevent the race by doing all this work while - * holding the insert lock, but it seems better to avoid doing CRC - * calculations while holding the lock. This means we have to be - * careful about modifying the rdata chain until we know we aren't - * going to loop back again. The only change we allow ourselves to - * make earlier is to set rdt->data = NULL in chain items we have - * decided we will have to back up the whole buffer for. This is OK - * because we will certainly decide the same thing again for those - * items if we do it over; doing it here saves an extra pass over the - * chain later. + * We may have to loop back to here if a race condition is detected below. We + * could prevent the race by doing all this work while holding the insert + * lock, but it seems better to avoid doing CRC calculations while holding + * the lock. This means we have to be careful about modifying the rdata + * chain until we know we aren't going to loop back again. The only + * change we allow ourselves to make earlier is to set rdt->data = NULL in + * chain items we have decided we will have to back up the whole buffer + * for. This is OK because we will certainly decide the same thing again + * for those items if we do it over; doing it here saves an extra pass + * over the chain later. */ begin:; for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++) @@ -680,12 +677,12 @@ begin:; } /* - * NOTE: the test for len == 0 here is somewhat fishy, since in theory - * all of the rmgr data might have been suppressed in favor of backup - * blocks. Currently, all callers of XLogInsert provide at least some - * not-in-a-buffer data and so len == 0 should never happen, but that - * may not be true forever. If you need to remove the len == 0 check, - * also remove the check for xl_len == 0 in ReadRecord, below. + * NOTE: the test for len == 0 here is somewhat fishy, since in theory all + * of the rmgr data might have been suppressed in favor of backup blocks. + * Currently, all callers of XLogInsert provide at least some + * not-in-a-buffer data and so len == 0 should never happen, but that may + * not be true forever. If you need to remove the len == 0 check, also + * remove the check for xl_len == 0 in ReadRecord, below. */ if (len == 0) elog(PANIC, "invalid xlog record length %u", len); @@ -718,9 +715,9 @@ begin:; * Since the amount of data we write here is completely optional * anyway, tell XLogWrite it can be "flexible" and stop at a * convenient boundary. This allows writes triggered by this - * mechanism to synchronize with the cache boundaries, so that - * in a long transaction we'll basically dump alternating halves - * of the buffer array. + * mechanism to synchronize with the cache boundaries, so that in + * a long transaction we'll basically dump alternating halves of + * the buffer array. */ LogwrtResult = XLogCtl->Write.LogwrtResult; if (XLByteLT(LogwrtResult.Write, LogwrtRqst.Write)) @@ -733,10 +730,9 @@ begin:; LWLockAcquire(WALInsertLock, LW_EXCLUSIVE); /* - * Check to see if my RedoRecPtr is out of date. If so, may have to - * go back and recompute everything. This can only happen just after - * a checkpoint, so it's better to be slow in this case and fast - * otherwise. + * Check to see if my RedoRecPtr is out of date. If so, may have to go + * back and recompute everything. This can only happen just after a + * checkpoint, so it's better to be slow in this case and fast otherwise. */ if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr)) { @@ -751,8 +747,8 @@ begin:; XLByteLE(dtbuf_lsn[i], RedoRecPtr)) { /* - * Oops, this buffer now needs to be backed up, but we - * didn't think so above. Start over. + * Oops, this buffer now needs to be backed up, but we didn't + * think so above. Start over. */ LWLockRelease(WALInsertLock); END_CRIT_SECTION(); @@ -762,15 +758,14 @@ begin:; } /* - * Make additional rdata chain entries for the backup blocks, so that - * we don't need to special-case them in the write loop. Note that we - * have now irrevocably changed the input rdata chain. At the exit of - * this loop, write_len includes the backup block data. + * Make additional rdata chain entries for the backup blocks, so that we + * don't need to special-case them in the write loop. Note that we have + * now irrevocably changed the input rdata chain. At the exit of this + * loop, write_len includes the backup block data. * - * Also set the appropriate info bits to show which buffers were backed - * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th - * distinct buffer value (ignoring InvalidBuffer) appearing in the - * rdata chain. + * Also set the appropriate info bits to show which buffers were backed up. + * The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct buffer + * value (ignoring InvalidBuffer) appearing in the rdata chain. */ write_len = len; for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++) @@ -822,8 +817,7 @@ begin:; /* * If there isn't enough space on the current XLOG page for a record - * header, advance to the next page (leaving the unused space as - * zeroes). + * header, advance to the next page (leaving the unused space as zeroes). */ updrqst = false; freespace = INSERT_FREESPACE(Insert); @@ -925,15 +919,15 @@ begin:; freespace = INSERT_FREESPACE(Insert); /* - * The recptr I return is the beginning of the *next* record. This - * will be stored as LSN for changed data pages... + * The recptr I return is the beginning of the *next* record. This will be + * stored as LSN for changed data pages... */ INSERT_RECPTR(RecPtr, Insert, curridx); /* Need to update shared LogwrtRqst if some block was filled up */ if (freespace < SizeOfXLogRecord) - updrqst = true; /* curridx is filled and available for - * writing out */ + updrqst = true; /* curridx is filled and available for writing + * out */ else curridx = PrevBufIdx(curridx); WriteRqst = XLogCtl->xlblocks[curridx]; @@ -975,9 +969,9 @@ XLogCheckBuffer(XLogRecData *rdata, page = (PageHeader) BufferGetBlock(rdata->buffer); /* - * XXX We assume page LSN is first data on *every* page that can be - * passed to XLogInsert, whether it otherwise has the standard page - * layout or not. + * XXX We assume page LSN is first data on *every* page that can be passed + * to XLogInsert, whether it otherwise has the standard page layout or + * not. */ *lsn = page->pd_lsn; @@ -1163,9 +1157,9 @@ AdvanceXLInsertBuffer(void) LogwrtResult = Insert->LogwrtResult; /* - * Get ending-offset of the buffer page we need to replace (this may - * be zero if the buffer hasn't been used yet). Fall through if it's - * already written out. + * Get ending-offset of the buffer page we need to replace (this may be + * zero if the buffer hasn't been used yet). Fall through if it's already + * written out. */ OldPageRqstPtr = XLogCtl->xlblocks[nextidx]; if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write)) @@ -1208,9 +1202,8 @@ AdvanceXLInsertBuffer(void) else { /* - * Have to write buffers while holding insert lock. This - * is not good, so only write as much as we absolutely - * must. + * Have to write buffers while holding insert lock. This is + * not good, so only write as much as we absolutely must. */ WriteRqst.Write = OldPageRqstPtr; WriteRqst.Flush.xlogid = 0; @@ -1223,8 +1216,8 @@ AdvanceXLInsertBuffer(void) } /* - * Now the next buffer slot is free and we can set it up to be the - * next output page. + * Now the next buffer slot is free and we can set it up to be the next + * output page. */ NewPageEndPtr = XLogCtl->xlblocks[Insert->curridx]; if (NewPageEndPtr.xrecoff >= XLogFileSize) @@ -1237,24 +1230,27 @@ AdvanceXLInsertBuffer(void) NewPageEndPtr.xrecoff += BLCKSZ; XLogCtl->xlblocks[nextidx] = NewPageEndPtr; NewPage = (XLogPageHeader) (XLogCtl->pages + nextidx * (Size) BLCKSZ); + Insert->curridx = nextidx; Insert->currpage = NewPage; - Insert->currpos = ((char *) NewPage) + SizeOfXLogShortPHD; + + Insert->currpos = ((char *) NewPage) +SizeOfXLogShortPHD; /* - * Be sure to re-zero the buffer so that bytes beyond what we've - * written will look like zeroes and not valid XLOG records... + * Be sure to re-zero the buffer so that bytes beyond what we've written + * will look like zeroes and not valid XLOG records... */ MemSet((char *) NewPage, 0, BLCKSZ); /* * Fill the new page's header */ - NewPage->xlp_magic = XLOG_PAGE_MAGIC; + NewPage ->xlp_magic = XLOG_PAGE_MAGIC; + /* NewPage->xlp_info = 0; */ /* done by memset */ - NewPage->xlp_tli = ThisTimeLineID; - NewPage->xlp_pageaddr.xlogid = NewPageEndPtr.xlogid; - NewPage->xlp_pageaddr.xrecoff = NewPageEndPtr.xrecoff - BLCKSZ; + NewPage ->xlp_tli = ThisTimeLineID; + NewPage ->xlp_pageaddr.xlogid = NewPageEndPtr.xlogid; + NewPage ->xlp_pageaddr.xrecoff = NewPageEndPtr.xrecoff - BLCKSZ; /* * If first page of an XLOG segment file, make it a long header. @@ -1265,8 +1261,9 @@ AdvanceXLInsertBuffer(void) NewLongPage->xlp_sysid = ControlFile->system_identifier; NewLongPage->xlp_seg_size = XLogSegSize; - NewPage->xlp_info |= XLP_LONG_HEADER; - Insert->currpos = ((char *) NewPage) + SizeOfXLogLongPHD; + NewPage ->xlp_info |= XLP_LONG_HEADER; + + Insert->currpos = ((char *) NewPage) +SizeOfXLogLongPHD; } return update_needed; @@ -1298,19 +1295,18 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) Assert(CritSectionCount > 0); /* - * Update local LogwrtResult (caller probably did this already, - * but...) + * Update local LogwrtResult (caller probably did this already, but...) */ LogwrtResult = Write->LogwrtResult; /* * Since successive pages in the xlog cache are consecutively allocated, * we can usually gather multiple pages together and issue just one - * write() call. npages is the number of pages we have determined can - * be written together; startidx is the cache block index of the first - * one, and startoffset is the file offset at which it should go. - * The latter two variables are only valid when npages > 0, but we must - * initialize all of them to keep the compiler quiet. + * write() call. npages is the number of pages we have determined can be + * written together; startidx is the cache block index of the first one, + * and startoffset is the file offset at which it should go. The latter + * two variables are only valid when npages > 0, but we must initialize + * all of them to keep the compiler quiet. */ npages = 0; startidx = 0; @@ -1320,18 +1316,17 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) * Within the loop, curridx is the cache block index of the page to * consider writing. We advance Write->curridx only after successfully * writing pages. (Right now, this refinement is useless since we are - * going to PANIC if any error occurs anyway; but someday it may come - * in useful.) + * going to PANIC if any error occurs anyway; but someday it may come in + * useful.) */ curridx = Write->curridx; while (XLByteLT(LogwrtResult.Write, WriteRqst.Write)) { /* - * Make sure we're not ahead of the insert process. This could - * happen if we're passed a bogus WriteRqst.Write that is past the - * end of the last page that's been initialized by - * AdvanceXLInsertBuffer. + * Make sure we're not ahead of the insert process. This could happen + * if we're passed a bogus WriteRqst.Write that is past the end of the + * last page that's been initialized by AdvanceXLInsertBuffer. */ if (!XLByteLT(LogwrtResult.Write, XLogCtl->xlblocks[curridx])) elog(PANIC, "xlog write request %X/%X is past end of log %X/%X", @@ -1355,8 +1350,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) if (close(openLogFile)) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not close log file %u, segment %u: %m", - openLogId, openLogSeg))); + errmsg("could not close log file %u, segment %u: %m", + openLogId, openLogSeg))); openLogFile = -1; } XLByteToPrevSeg(LogwrtResult.Write, openLogId, openLogSeg); @@ -1379,13 +1374,13 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) UpdateControlFile(); /* - * Signal bgwriter to start a checkpoint if it's been - * too long since the last one. (We look at local copy of - * RedoRecPtr which might be a little out of date, but - * should be close enough for this purpose.) + * Signal bgwriter to start a checkpoint if it's been too long + * since the last one. (We look at local copy of RedoRecPtr + * which might be a little out of date, but should be close + * enough for this purpose.) * - * A straight computation of segment number could overflow - * 32 bits. Rather than assuming we have working 64-bit + * A straight computation of segment number could overflow 32 + * bits. Rather than assuming we have working 64-bit * arithmetic, we compare the highest-order bits separately, * and force a checkpoint immediately when they change. */ @@ -1434,10 +1429,10 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) npages++; /* - * Dump the set if this will be the last loop iteration, or if - * we are at the last page of the cache area (since the next page - * won't be contiguous in memory), or if we are at the end of the - * logfile segment. + * Dump the set if this will be the last loop iteration, or if we are + * at the last page of the cache area (since the next page won't be + * contiguous in memory), or if we are at the end of the logfile + * segment. */ finishing_seg = !ispartialpage && (startoffset + npages * BLCKSZ) >= XLogSegSize; @@ -1496,7 +1491,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) if (finishing_seg) { issue_xlog_fsync(); - LogwrtResult.Flush = LogwrtResult.Write; /* end of page */ + LogwrtResult.Flush = LogwrtResult.Write; /* end of page */ if (XLogArchivingActive()) XLogArchiveNotifySeg(openLogId, openLogSeg); @@ -1526,20 +1521,20 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) XLByteLT(LogwrtResult.Flush, LogwrtResult.Write)) { /* - * Could get here without iterating above loop, in which case we - * might have no open file or the wrong one. However, we do not - * need to fsync more than one file. + * Could get here without iterating above loop, in which case we might + * have no open file or the wrong one. However, we do not need to + * fsync more than one file. */ if (sync_method != SYNC_METHOD_OPEN) { if (openLogFile >= 0 && - !XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg)) + !XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg)) { if (close(openLogFile)) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not close log file %u, segment %u: %m", - openLogId, openLogSeg))); + errmsg("could not close log file %u, segment %u: %m", + openLogId, openLogSeg))); openLogFile = -1; } if (openLogFile < 0) @@ -1557,8 +1552,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) * Update shared-memory status * * We make sure that the shared 'request' values do not fall behind the - * 'result' values. This is not absolutely essential, but it saves - * some code in a couple of places. + * 'result' values. This is not absolutely essential, but it saves some + * code in a couple of places. */ { /* use volatile pointer to prevent code rearrangement */ @@ -1608,11 +1603,10 @@ XLogFlush(XLogRecPtr record) /* * Since fsync is usually a horribly expensive operation, we try to - * piggyback as much data as we can on each fsync: if we see any more - * data entered into the xlog buffer, we'll write and fsync that too, - * so that the final value of LogwrtResult.Flush is as large as - * possible. This gives us some chance of avoiding another fsync - * immediately after. + * piggyback as much data as we can on each fsync: if we see any more data + * entered into the xlog buffer, we'll write and fsync that too, so that + * the final value of LogwrtResult.Flush is as large as possible. This + * gives us some chance of avoiding another fsync immediately after. */ /* initialize to given target; may increase below */ @@ -1669,31 +1663,29 @@ XLogFlush(XLogRecPtr record) /* * If we still haven't flushed to the request point then we have a - * problem; most likely, the requested flush point is past end of - * XLOG. This has been seen to occur when a disk page has a corrupted - * LSN. + * problem; most likely, the requested flush point is past end of XLOG. + * This has been seen to occur when a disk page has a corrupted LSN. * - * Formerly we treated this as a PANIC condition, but that hurts the - * system's robustness rather than helping it: we do not want to take - * down the whole system due to corruption on one data page. In - * particular, if the bad page is encountered again during recovery - * then we would be unable to restart the database at all! (This - * scenario has actually happened in the field several times with 7.1 - * releases. Note that we cannot get here while InRedo is true, but if - * the bad page is brought in and marked dirty during recovery then - * CreateCheckPoint will try to flush it at the end of recovery.) + * Formerly we treated this as a PANIC condition, but that hurts the system's + * robustness rather than helping it: we do not want to take down the + * whole system due to corruption on one data page. In particular, if the + * bad page is encountered again during recovery then we would be unable + * to restart the database at all! (This scenario has actually happened + * in the field several times with 7.1 releases. Note that we cannot get + * here while InRedo is true, but if the bad page is brought in and marked + * dirty during recovery then CreateCheckPoint will try to flush it at the + * end of recovery.) * - * The current approach is to ERROR under normal conditions, but only - * WARNING during recovery, so that the system can be brought up even - * if there's a corrupt LSN. Note that for calls from xact.c, the - * ERROR will be promoted to PANIC since xact.c calls this routine - * inside a critical section. However, calls from bufmgr.c are not - * within critical sections and so we will not force a restart for a - * bad LSN on a data page. + * The current approach is to ERROR under normal conditions, but only WARNING + * during recovery, so that the system can be brought up even if there's a + * corrupt LSN. Note that for calls from xact.c, the ERROR will be + * promoted to PANIC since xact.c calls this routine inside a critical + * section. However, calls from bufmgr.c are not within critical sections + * and so we will not force a restart for a bad LSN on a data page. */ if (XLByteLT(LogwrtResult.Flush, record)) elog(InRecovery ? WARNING : ERROR, - "xlog flush request %X/%X is not satisfied --- flushed only to %X/%X", + "xlog flush request %X/%X is not satisfied --- flushed only to %X/%X", record.xlogid, record.xrecoff, LogwrtResult.Flush.xlogid, LogwrtResult.Flush.xrecoff); } @@ -1734,8 +1726,7 @@ XLogFileInit(uint32 log, uint32 seg, XLogFilePath(path, ThisTimeLineID, log, seg); /* - * Try to use existent file (checkpoint maker may have created it - * already) + * Try to use existent file (checkpoint maker may have created it already) */ if (*use_existent) { @@ -1754,10 +1745,10 @@ XLogFileInit(uint32 log, uint32 seg, } /* - * Initialize an empty (all zeroes) segment. NOTE: it is possible - * that another process is doing the same thing. If so, we will end - * up pre-creating an extra log segment. That seems OK, and better - * than holding the lock throughout this lengthy process. + * Initialize an empty (all zeroes) segment. NOTE: it is possible that + * another process is doing the same thing. If so, we will end up + * pre-creating an extra log segment. That seems OK, and better than + * holding the lock throughout this lengthy process. */ snprintf(tmppath, MAXPGPATH, XLOGDIR "/xlogtemp.%d", (int) getpid()); @@ -1772,13 +1763,13 @@ XLogFileInit(uint32 log, uint32 seg, errmsg("could not create file \"%s\": %m", tmppath))); /* - * Zero-fill the file. We have to do this the hard way to ensure that - * all the file space has really been allocated --- on platforms that - * allow "holes" in files, just seeking to the end doesn't allocate - * intermediate space. This way, we know that we have all the space - * and (after the fsync below) that all the indirect blocks are down - * on disk. Therefore, fdatasync(2) or O_DSYNC will be sufficient to - * sync future writes to the log file. + * Zero-fill the file. We have to do this the hard way to ensure that all + * the file space has really been allocated --- on platforms that allow + * "holes" in files, just seeking to the end doesn't allocate intermediate + * space. This way, we know that we have all the space and (after the + * fsync below) that all the indirect blocks are down on disk. Therefore, + * fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the + * log file. */ MemSet(zbuffer, 0, sizeof(zbuffer)); for (nbytes = 0; nbytes < XLogSegSize; nbytes += sizeof(zbuffer)) @@ -1789,8 +1780,7 @@ XLogFileInit(uint32 log, uint32 seg, int save_errno = errno; /* - * If we fail to make the file, delete it to release disk - * space + * If we fail to make the file, delete it to release disk space */ unlink(tmppath); /* if write didn't set errno, assume problem is no disk space */ @@ -1798,7 +1788,7 @@ XLogFileInit(uint32 log, uint32 seg, ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to file \"%s\": %m", tmppath))); + errmsg("could not write to file \"%s\": %m", tmppath))); } } @@ -1816,9 +1806,9 @@ XLogFileInit(uint32 log, uint32 seg, * Now move the segment into place with its final name. * * If caller didn't want to use a pre-existing file, get rid of any - * pre-existing file. Otherwise, cope with possibility that someone - * else has created the file while we were filling ours: if so, use - * ours to pre-create a future log segment. + * pre-existing file. Otherwise, cope with possibility that someone else + * has created the file while we were filling ours: if so, use ours to + * pre-create a future log segment. */ installed_log = log; installed_seg = seg; @@ -1840,8 +1830,8 @@ XLogFileInit(uint32 log, uint32 seg, if (fd < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not open file \"%s\" (log file %u, segment %u): %m", - path, log, seg))); + errmsg("could not open file \"%s\" (log file %u, segment %u): %m", + path, log, seg))); return (fd); } @@ -1908,7 +1898,7 @@ XLogFileCopy(uint32 log, uint32 seg, errmsg("could not read file \"%s\": %m", path))); else ereport(ERROR, - (errmsg("not enough data in file \"%s\"", path))); + (errmsg("not enough data in file \"%s\"", path))); } errno = 0; if ((int) write(fd, buffer, sizeof(buffer)) != (int) sizeof(buffer)) @@ -1916,8 +1906,7 @@ XLogFileCopy(uint32 log, uint32 seg, int save_errno = errno; /* - * If we fail to make the file, delete it to release disk - * space + * If we fail to make the file, delete it to release disk space */ unlink(tmppath); /* if write didn't set errno, assume problem is no disk space */ @@ -1925,7 +1914,7 @@ XLogFileCopy(uint32 log, uint32 seg, ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to file \"%s\": %m", tmppath))); + errmsg("could not write to file \"%s\": %m", tmppath))); } } @@ -2057,8 +2046,8 @@ XLogFileOpen(uint32 log, uint32 seg) if (fd < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not open file \"%s\" (log file %u, segment %u): %m", - path, log, seg))); + errmsg("could not open file \"%s\" (log file %u, segment %u): %m", + path, log, seg))); return fd; } @@ -2075,14 +2064,14 @@ XLogFileRead(uint32 log, uint32 seg, int emode) int fd; /* - * Loop looking for a suitable timeline ID: we might need to read any - * of the timelines listed in expectedTLIs. + * Loop looking for a suitable timeline ID: we might need to read any of + * the timelines listed in expectedTLIs. * * We expect curFileTLI on entry to be the TLI of the preceding file in - * sequence, or 0 if there was no predecessor. We do not allow - * curFileTLI to go backwards; this prevents us from picking up the - * wrong file when a parent timeline extends to higher segment numbers - * than the child we want to read. + * sequence, or 0 if there was no predecessor. We do not allow curFileTLI + * to go backwards; this prevents us from picking up the wrong file when a + * parent timeline extends to higher segment numbers than the child we + * want to read. */ foreach(cell, expectedTLIs) { @@ -2111,8 +2100,8 @@ XLogFileRead(uint32 log, uint32 seg, int emode) if (errno != ENOENT) /* unexpected failure? */ ereport(PANIC, (errcode_for_file_access(), - errmsg("could not open file \"%s\" (log file %u, segment %u): %m", - path, log, seg))); + errmsg("could not open file \"%s\" (log file %u, segment %u): %m", + path, log, seg))); } /* Couldn't find it. For simplicity, complain about front timeline */ @@ -2120,8 +2109,8 @@ XLogFileRead(uint32 log, uint32 seg, int emode) errno = ENOENT; ereport(emode, (errcode_for_file_access(), - errmsg("could not open file \"%s\" (log file %u, segment %u): %m", - path, log, seg))); + errmsg("could not open file \"%s\" (log file %u, segment %u): %m", + path, log, seg))); return -1; } @@ -2152,29 +2141,27 @@ RestoreArchivedFile(char *path, const char *xlogfname, struct stat stat_buf; /* - * When doing archive recovery, we always prefer an archived log file - * even if a file of the same name exists in XLOGDIR. The reason is - * that the file in XLOGDIR could be an old, un-filled or - * partly-filled version that was copied and restored as part of - * backing up $PGDATA. + * When doing archive recovery, we always prefer an archived log file even + * if a file of the same name exists in XLOGDIR. The reason is that the + * file in XLOGDIR could be an old, un-filled or partly-filled version + * that was copied and restored as part of backing up $PGDATA. * * We could try to optimize this slightly by checking the local copy - * lastchange timestamp against the archived copy, but we have no API - * to do this, nor can we guarantee that the lastchange timestamp was - * preserved correctly when we copied to archive. Our aim is - * robustness, so we elect not to do this. + * lastchange timestamp against the archived copy, but we have no API to + * do this, nor can we guarantee that the lastchange timestamp was + * preserved correctly when we copied to archive. Our aim is robustness, + * so we elect not to do this. * - * If we cannot obtain the log file from the archive, however, we will - * try to use the XLOGDIR file if it exists. This is so that we can - * make use of log segments that weren't yet transferred to the - * archive. + * If we cannot obtain the log file from the archive, however, we will try to + * use the XLOGDIR file if it exists. This is so that we can make use of + * log segments that weren't yet transferred to the archive. * - * Notice that we don't actually overwrite any files when we copy back - * from archive because the recoveryRestoreCommand may inadvertently - * restore inappropriate xlogs, or they may be corrupt, so we may wish - * to fallback to the segments remaining in current XLOGDIR later. The - * copy-from-archive filename is always the same, ensuring that we - * don't run out of disk space on long recoveries. + * Notice that we don't actually overwrite any files when we copy back from + * archive because the recoveryRestoreCommand may inadvertently restore + * inappropriate xlogs, or they may be corrupt, so we may wish to fallback + * to the segments remaining in current XLOGDIR later. The + * copy-from-archive filename is always the same, ensuring that we don't + * run out of disk space on long recoveries. */ snprintf(xlogpath, MAXPGPATH, XLOGDIR "/%s", recovername); @@ -2259,11 +2246,11 @@ RestoreArchivedFile(char *path, const char *xlogfname, * command apparently succeeded, but let's make sure the file is * really there now and has the correct size. * - * XXX I made wrong-size a fatal error to ensure the DBA would notice - * it, but is that too strong? We could try to plow ahead with a - * local copy of the file ... but the problem is that there - * probably isn't one, and we'd incorrectly conclude we've reached - * the end of WAL and we're done recovering ... + * XXX I made wrong-size a fatal error to ensure the DBA would notice it, + * but is that too strong? We could try to plow ahead with a local + * copy of the file ... but the problem is that there probably isn't + * one, and we'd incorrectly conclude we've reached the end of WAL and + * we're done recovering ... */ if (stat(xlogpath, &stat_buf) == 0) { @@ -2296,18 +2283,17 @@ RestoreArchivedFile(char *path, const char *xlogfname, /* * remember, we rollforward UNTIL the restore fails so failure here is * just part of the process... that makes it difficult to determine - * whether the restore failed because there isn't an archive to - * restore, or because the administrator has specified the restore - * program incorrectly. We have to assume the former. + * whether the restore failed because there isn't an archive to restore, + * or because the administrator has specified the restore program + * incorrectly. We have to assume the former. */ ereport(DEBUG2, - (errmsg("could not restore file \"%s\" from archive: return code %d", - xlogfname, rc))); + (errmsg("could not restore file \"%s\" from archive: return code %d", + xlogfname, rc))); /* - * if an archived file is not available, there might still be a - * version of this file in XLOGDIR, so return that as the filename to - * open. + * if an archived file is not available, there might still be a version of + * this file in XLOGDIR, so return that as the filename to open. * * In many recovery scenarios we expect this to fail also, but if so that * just means we've reached the end of WAL. @@ -2375,8 +2361,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr, if (xldir == NULL) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not open transaction log directory \"%s\": %m", - XLOGDIR))); + errmsg("could not open transaction log directory \"%s\": %m", + XLOGDIR))); XLogFileName(lastoff, ThisTimeLineID, log, seg); @@ -2384,14 +2370,14 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr, { /* * We ignore the timeline part of the XLOG segment identifiers in - * deciding whether a segment is still needed. This ensures that - * we won't prematurely remove a segment from a parent timeline. - * We could probably be a little more proactive about removing - * segments of non-parent timelines, but that would be a whole lot - * more complicated. + * deciding whether a segment is still needed. This ensures that we + * won't prematurely remove a segment from a parent timeline. We could + * probably be a little more proactive about removing segments of + * non-parent timelines, but that would be a whole lot more + * complicated. * - * We use the alphanumeric sorting property of the filenames to - * decide which ones are earlier than the lastoff segment. + * We use the alphanumeric sorting property of the filenames to decide + * which ones are earlier than the lastoff segment. */ if (strlen(xlde->d_name) == 24 && strspn(xlde->d_name, "0123456789ABCDEF") == 24 && @@ -2409,16 +2395,16 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr, snprintf(path, MAXPGPATH, XLOGDIR "/%s", xlde->d_name); /* - * Before deleting the file, see if it can be recycled as - * a future log segment. + * Before deleting the file, see if it can be recycled as a + * future log segment. */ if (InstallXLogFileSegment(&endlogId, &endlogSeg, path, true, &max_advance, true)) { ereport(DEBUG2, - (errmsg("recycled transaction log file \"%s\"", - xlde->d_name))); + (errmsg("recycled transaction log file \"%s\"", + xlde->d_name))); (*nsegsrecycled)++; /* Needn't recheck that slot on future iterations */ if (max_advance > 0) @@ -2431,8 +2417,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr, { /* No need for any more future segments... */ ereport(DEBUG2, - (errmsg("removing transaction log file \"%s\"", - xlde->d_name))); + (errmsg("removing transaction log file \"%s\"", + xlde->d_name))); unlink(path); (*nsegsremoved)++; } @@ -2459,8 +2445,8 @@ RemoveOldBackupHistory(void) if (xldir == NULL) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not open transaction log directory \"%s\": %m", - XLOGDIR))); + errmsg("could not open transaction log directory \"%s\": %m", + XLOGDIR))); while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL) { @@ -2473,8 +2459,8 @@ RemoveOldBackupHistory(void) if (!XLogArchivingActive() || XLogArchiveIsDone(xlde->d_name)) { ereport(DEBUG2, - (errmsg("removing transaction log backup history file \"%s\"", - xlde->d_name))); + (errmsg("removing transaction log backup history file \"%s\"", + xlde->d_name))); snprintf(path, MAXPGPATH, XLOGDIR "/%s", xlde->d_name); unlink(path); XLogArchiveCleanup(xlde->d_name); @@ -2576,7 +2562,7 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode) blk = (char *) XLogRecGetData(record) + len; for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++) { - uint32 blen; + uint32 blen; if (!(record->xl_info & XLR_SET_BKP_BLOCK(i))) continue; @@ -2611,8 +2597,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode) if (!EQ_CRC32(record->xl_crc, crc)) { ereport(emode, - (errmsg("incorrect resource manager data checksum in record at %X/%X", - recptr.xlogid, recptr.xrecoff))); + (errmsg("incorrect resource manager data checksum in record at %X/%X", + recptr.xlogid, recptr.xrecoff))); return false; } @@ -2647,12 +2633,11 @@ ReadRecord(XLogRecPtr *RecPtr, int emode) if (readBuf == NULL) { /* - * First time through, permanently allocate readBuf. We do it - * this way, rather than just making a static array, for two - * reasons: (1) no need to waste the storage in most - * instantiations of the backend; (2) a static char array isn't - * guaranteed to have any particular alignment, whereas malloc() - * will provide MAXALIGN'd storage. + * First time through, permanently allocate readBuf. We do it this + * way, rather than just making a static array, for two reasons: (1) + * no need to waste the storage in most instantiations of the backend; + * (2) a static char array isn't guaranteed to have any particular + * alignment, whereas malloc() will provide MAXALIGN'd storage. */ readBuf = (char *) malloc(BLCKSZ); Assert(readBuf != NULL); @@ -2685,11 +2670,11 @@ ReadRecord(XLogRecPtr *RecPtr, int emode) RecPtr->xlogid, RecPtr->xrecoff))); /* - * Since we are going to a random position in WAL, forget any - * prior state about what timeline we were in, and allow it to be - * any timeline in expectedTLIs. We also set a flag to allow - * curFileTLI to go backwards (but we can't reset that variable - * right here, since we might not change files at all). + * Since we are going to a random position in WAL, forget any prior + * state about what timeline we were in, and allow it to be any + * timeline in expectedTLIs. We also set a flag to allow curFileTLI + * to go backwards (but we can't reset that variable right here, since + * we might not change files at all). */ lastPageTLI = 0; /* see comment in ValidXLOGHeader */ randAccess = true; /* allow curFileTLI to go backwards too */ @@ -2741,9 +2726,9 @@ ReadRecord(XLogRecPtr *RecPtr, int emode) if (targetRecOff == 0) { /* - * Can only get here in the continuing-from-prev-page case, - * because XRecOffIsValid eliminated the zero-page-offset case - * otherwise. Need to skip over the new page's header. + * Can only get here in the continuing-from-prev-page case, because + * XRecOffIsValid eliminated the zero-page-offset case otherwise. Need + * to skip over the new page's header. */ tmpRecPtr.xrecoff += pageHeaderSize; targetRecOff = pageHeaderSize; @@ -2791,14 +2776,14 @@ got_record:; { ereport(emode, (errmsg("invalid resource manager ID %u at %X/%X", - record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff))); + record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff))); goto next_record_is_invalid; } if (randAccess) { /* - * We can't exactly verify the prev-link, but surely it should be - * less than the record's own address. + * We can't exactly verify the prev-link, but surely it should be less + * than the record's own address. */ if (!XLByteLT(record->xl_prev, *RecPtr)) { @@ -2812,9 +2797,9 @@ got_record:; else { /* - * Record's prev-link should exactly match our previous location. - * This check guards against torn WAL pages where a stale but - * valid-looking WAL record starts on a sector boundary. + * Record's prev-link should exactly match our previous location. This + * check guards against torn WAL pages where a stale but valid-looking + * WAL record starts on a sector boundary. */ if (!XLByteEQ(record->xl_prev, ReadRecPtr)) { @@ -2827,11 +2812,10 @@ got_record:; } /* - * Allocate or enlarge readRecordBuf as needed. To avoid useless - * small increases, round its size to a multiple of BLCKSZ, and make - * sure it's at least 4*BLCKSZ to start with. (That is enough for all - * "normal" records, but very large commit or abort records might need - * more space.) + * Allocate or enlarge readRecordBuf as needed. To avoid useless small + * increases, round its size to a multiple of BLCKSZ, and make sure it's + * at least 4*BLCKSZ to start with. (That is enough for all "normal" + * records, but very large commit or abort records might need more space.) */ total_len = record->xl_tot_len; if (total_len > readRecordBufSize) @@ -2927,7 +2911,7 @@ got_record:; MAXALIGN(SizeOfXLogContRecord + contrecord->xl_rem_len)) { nextRecord = (XLogRecord *) ((char *) contrecord + - MAXALIGN(SizeOfXLogContRecord + contrecord->xl_rem_len)); + MAXALIGN(SizeOfXLogContRecord + contrecord->xl_rem_len)); } EndRecPtr.xlogid = readId; EndRecPtr.xrecoff = readSeg * XLogSegSize + readOff + @@ -2991,8 +2975,8 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode) char sysident_str[32]; /* - * Format sysids separately to keep platform-dependent format - * code out of the translatable message string. + * Format sysids separately to keep platform-dependent format code + * out of the translatable message string. */ snprintf(fhdrident_str, sizeof(fhdrident_str), UINT64_FORMAT, longhdr->xlp_sysid); @@ -3000,15 +2984,15 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode) ControlFile->system_identifier); ereport(emode, (errmsg("WAL file is from different system"), - errdetail("WAL file SYSID is %s, pg_control SYSID is %s", - fhdrident_str, sysident_str))); + errdetail("WAL file SYSID is %s, pg_control SYSID is %s", + fhdrident_str, sysident_str))); return false; } if (longhdr->xlp_seg_size != XLogSegSize) { ereport(emode, (errmsg("WAL file is from different system"), - errdetail("Incorrect XLOG_SEG_SIZE in page header."))); + errdetail("Incorrect XLOG_SEG_SIZE in page header."))); return false; } } @@ -3018,7 +3002,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode) { ereport(emode, (errmsg("unexpected pageaddr %X/%X in log file %u, segment %u, offset %u", - hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff, + hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff, readId, readSeg, readOff))); return false; } @@ -3040,9 +3024,9 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode) * immediate parent's TLI, we should never see TLI go backwards across * successive pages of a consistent WAL sequence. * - * Of course this check should only be applied when advancing - * sequentially across pages; therefore ReadRecord resets lastPageTLI - * to zero when going to a random page. + * Of course this check should only be applied when advancing sequentially + * across pages; therefore ReadRecord resets lastPageTLI to zero when + * going to a random page. */ if (hdr->xlp_tli < lastPageTLI) { @@ -3123,7 +3107,7 @@ readTimeLineHistory(TimeLineID targetTLI) tli <= (TimeLineID) linitial_int(result)) ereport(FATAL, (errmsg("invalid data in history file: %s", fline), - errhint("Timeline IDs must be in increasing sequence."))); + errhint("Timeline IDs must be in increasing sequence."))); /* Build list with newest item first */ result = lcons_int((int) tli, result); @@ -3137,7 +3121,7 @@ readTimeLineHistory(TimeLineID targetTLI) targetTLI <= (TimeLineID) linitial_int(result)) ereport(FATAL, (errmsg("invalid data in history file \"%s\"", path), - errhint("Timeline IDs must be less than child timeline's ID."))); + errhint("Timeline IDs must be less than child timeline's ID."))); result = lcons_int((int) targetTLI, result); @@ -3196,8 +3180,8 @@ findNewestTimeLine(TimeLineID startTLI) TimeLineID probeTLI; /* - * The algorithm is just to probe for the existence of timeline - * history files. XXX is it useful to allow gaps in the sequence? + * The algorithm is just to probe for the existence of timeline history + * files. XXX is it useful to allow gaps in the sequence? */ newestTLI = startTLI; @@ -3302,14 +3286,13 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, unlink(tmppath); /* - * if write didn't set errno, assume problem is no disk - * space + * if write didn't set errno, assume problem is no disk space */ errno = save_errno ? save_errno : ENOSPC; ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to file \"%s\": %m", tmppath))); + errmsg("could not write to file \"%s\": %m", tmppath))); } } close(srcfd); @@ -3454,11 +3437,11 @@ WriteControlFile(void) FIN_CRC32(ControlFile->crc); /* - * We write out BLCKSZ bytes into pg_control, zero-padding the excess - * over sizeof(ControlFileData). This reduces the odds of - * premature-EOF errors when reading pg_control. We'll still fail - * when we check the contents of the file, but hopefully with a more - * specific error than "couldn't read pg_control". + * We write out BLCKSZ bytes into pg_control, zero-padding the excess over + * sizeof(ControlFileData). This reduces the odds of premature-EOF errors + * when reading pg_control. We'll still fail when we check the contents + * of the file, but hopefully with a more specific error than "couldn't + * read pg_control". */ if (sizeof(ControlFileData) > BLCKSZ) ereport(PANIC, @@ -3524,17 +3507,17 @@ ReadControlFile(void) close(fd); /* - * Check for expected pg_control format version. If this is wrong, - * the CRC check will likely fail because we'll be checking the wrong - * number of bytes. Complaining about wrong version will probably be - * more enlightening than complaining about wrong CRC. + * Check for expected pg_control format version. If this is wrong, the + * CRC check will likely fail because we'll be checking the wrong number + * of bytes. Complaining about wrong version will probably be more + * enlightening than complaining about wrong CRC. */ if (ControlFile->pg_control_version != PG_CONTROL_VERSION) ereport(FATAL, (errmsg("database files are incompatible with server"), errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d," - " but the server was compiled with PG_CONTROL_VERSION %d.", - ControlFile->pg_control_version, PG_CONTROL_VERSION), + " but the server was compiled with PG_CONTROL_VERSION %d.", + ControlFile->pg_control_version, PG_CONTROL_VERSION), errhint("It looks like you need to initdb."))); /* Now check the CRC. */ INIT_CRC32(crc); @@ -3548,31 +3531,30 @@ ReadControlFile(void) (errmsg("incorrect checksum in control file"))); /* - * Do compatibility checking immediately. We do this here for 2 - * reasons: + * Do compatibility checking immediately. We do this here for 2 reasons: * - * (1) if the database isn't compatible with the backend executable, we - * want to abort before we can possibly do any damage; + * (1) if the database isn't compatible with the backend executable, we want + * to abort before we can possibly do any damage; * * (2) this code is executed in the postmaster, so the setlocale() will - * propagate to forked backends, which aren't going to read this file - * for themselves. (These locale settings are considered critical + * propagate to forked backends, which aren't going to read this file for + * themselves. (These locale settings are considered critical * compatibility items because they can affect sort order of indexes.) */ if (ControlFile->catalog_version_no != CATALOG_VERSION_NO) ereport(FATAL, (errmsg("database files are incompatible with server"), errdetail("The database cluster was initialized with CATALOG_VERSION_NO %d," - " but the server was compiled with CATALOG_VERSION_NO %d.", - ControlFile->catalog_version_no, CATALOG_VERSION_NO), + " but the server was compiled with CATALOG_VERSION_NO %d.", + ControlFile->catalog_version_no, CATALOG_VERSION_NO), errhint("It looks like you need to initdb."))); if (ControlFile->maxAlign != MAXIMUM_ALIGNOF) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with MAXALIGN %d," - " but the server was compiled with MAXALIGN %d.", - ControlFile->maxAlign, MAXIMUM_ALIGNOF), - errhint("It looks like you need to initdb."))); + errdetail("The database cluster was initialized with MAXALIGN %d," + " but the server was compiled with MAXALIGN %d.", + ControlFile->maxAlign, MAXIMUM_ALIGNOF), + errhint("It looks like you need to initdb."))); if (ControlFile->floatFormat != FLOATFORMAT_VALUE) ereport(FATAL, (errmsg("database files are incompatible with server"), @@ -3581,76 +3563,76 @@ ReadControlFile(void) if (ControlFile->blcksz != BLCKSZ) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with BLCKSZ %d," - " but the server was compiled with BLCKSZ %d.", - ControlFile->blcksz, BLCKSZ), - errhint("It looks like you need to recompile or initdb."))); + errdetail("The database cluster was initialized with BLCKSZ %d," + " but the server was compiled with BLCKSZ %d.", + ControlFile->blcksz, BLCKSZ), + errhint("It looks like you need to recompile or initdb."))); if (ControlFile->relseg_size != RELSEG_SIZE) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with RELSEG_SIZE %d," - " but the server was compiled with RELSEG_SIZE %d.", - ControlFile->relseg_size, RELSEG_SIZE), - errhint("It looks like you need to recompile or initdb."))); + errdetail("The database cluster was initialized with RELSEG_SIZE %d," + " but the server was compiled with RELSEG_SIZE %d.", + ControlFile->relseg_size, RELSEG_SIZE), + errhint("It looks like you need to recompile or initdb."))); if (ControlFile->xlog_seg_size != XLOG_SEG_SIZE) ereport(FATAL, (errmsg("database files are incompatible with server"), errdetail("The database cluster was initialized with XLOG_SEG_SIZE %d," - " but the server was compiled with XLOG_SEG_SIZE %d.", + " but the server was compiled with XLOG_SEG_SIZE %d.", ControlFile->xlog_seg_size, XLOG_SEG_SIZE), - errhint("It looks like you need to recompile or initdb."))); + errhint("It looks like you need to recompile or initdb."))); if (ControlFile->nameDataLen != NAMEDATALEN) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with NAMEDATALEN %d," - " but the server was compiled with NAMEDATALEN %d.", - ControlFile->nameDataLen, NAMEDATALEN), - errhint("It looks like you need to recompile or initdb."))); + errdetail("The database cluster was initialized with NAMEDATALEN %d," + " but the server was compiled with NAMEDATALEN %d.", + ControlFile->nameDataLen, NAMEDATALEN), + errhint("It looks like you need to recompile or initdb."))); if (ControlFile->indexMaxKeys != INDEX_MAX_KEYS) ereport(FATAL, (errmsg("database files are incompatible with server"), errdetail("The database cluster was initialized with INDEX_MAX_KEYS %d," - " but the server was compiled with INDEX_MAX_KEYS %d.", + " but the server was compiled with INDEX_MAX_KEYS %d.", ControlFile->indexMaxKeys, INDEX_MAX_KEYS), - errhint("It looks like you need to recompile or initdb."))); + errhint("It looks like you need to recompile or initdb."))); #ifdef HAVE_INT64_TIMESTAMP if (ControlFile->enableIntTimes != TRUE) ereport(FATAL, (errmsg("database files are incompatible with server"), errdetail("The database cluster was initialized without HAVE_INT64_TIMESTAMP" - " but the server was compiled with HAVE_INT64_TIMESTAMP."), - errhint("It looks like you need to recompile or initdb."))); + " but the server was compiled with HAVE_INT64_TIMESTAMP."), + errhint("It looks like you need to recompile or initdb."))); #else if (ControlFile->enableIntTimes != FALSE) ereport(FATAL, (errmsg("database files are incompatible with server"), errdetail("The database cluster was initialized with HAVE_INT64_TIMESTAMP" - " but the server was compiled without HAVE_INT64_TIMESTAMP."), - errhint("It looks like you need to recompile or initdb."))); + " but the server was compiled without HAVE_INT64_TIMESTAMP."), + errhint("It looks like you need to recompile or initdb."))); #endif if (ControlFile->localeBuflen != LOCALE_NAME_BUFLEN) ereport(FATAL, (errmsg("database files are incompatible with server"), errdetail("The database cluster was initialized with LOCALE_NAME_BUFLEN %d," - " but the server was compiled with LOCALE_NAME_BUFLEN %d.", + " but the server was compiled with LOCALE_NAME_BUFLEN %d.", ControlFile->localeBuflen, LOCALE_NAME_BUFLEN), - errhint("It looks like you need to recompile or initdb."))); + errhint("It looks like you need to recompile or initdb."))); if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL) ereport(FATAL, - (errmsg("database files are incompatible with operating system"), - errdetail("The database cluster was initialized with LC_COLLATE \"%s\"," - " which is not recognized by setlocale().", - ControlFile->lc_collate), - errhint("It looks like you need to initdb or install locale support."))); + (errmsg("database files are incompatible with operating system"), + errdetail("The database cluster was initialized with LC_COLLATE \"%s\"," + " which is not recognized by setlocale().", + ControlFile->lc_collate), + errhint("It looks like you need to initdb or install locale support."))); if (setlocale(LC_CTYPE, ControlFile->lc_ctype) == NULL) ereport(FATAL, - (errmsg("database files are incompatible with operating system"), - errdetail("The database cluster was initialized with LC_CTYPE \"%s\"," - " which is not recognized by setlocale().", - ControlFile->lc_ctype), - errhint("It looks like you need to initdb or install locale support."))); + (errmsg("database files are incompatible with operating system"), + errdetail("The database cluster was initialized with LC_CTYPE \"%s\"," + " which is not recognized by setlocale().", + ControlFile->lc_ctype), + errhint("It looks like you need to initdb or install locale support."))); /* Make the fixed locale settings visible as GUC variables, too */ SetConfigOption("lc_collate", ControlFile->lc_collate, @@ -3719,9 +3701,9 @@ XLOGShmemSize(void) size = add_size(size, mul_size(BLCKSZ, XLOGbuffers)); /* - * Note: we don't count ControlFileData, it comes out of the "slop - * factor" added by CreateSharedMemoryAndSemaphores. This lets us - * use this routine again below to compute the actual allocation size. + * Note: we don't count ControlFileData, it comes out of the "slop factor" + * added by CreateSharedMemoryAndSemaphores. This lets us use this + * routine again below to compute the actual allocation size. */ return size; @@ -3749,9 +3731,9 @@ XLOGShmemInit(void) memset(XLogCtl, 0, sizeof(XLogCtlData)); /* - * Since XLogCtlData contains XLogRecPtr fields, its sizeof should be - * a multiple of the alignment for same, so no extra alignment padding - * is needed here. + * Since XLogCtlData contains XLogRecPtr fields, its sizeof should be a + * multiple of the alignment for same, so no extra alignment padding is + * needed here. */ allocptr = ((char *) XLogCtl) + sizeof(XLogCtlData); XLogCtl->xlblocks = (XLogRecPtr *) allocptr; @@ -3766,18 +3748,19 @@ XLOGShmemInit(void) memset(XLogCtl->pages, 0, (Size) BLCKSZ * XLOGbuffers); /* - * Do basic initialization of XLogCtl shared data. (StartupXLOG will - * fill in additional info.) + * Do basic initialization of XLogCtl shared data. (StartupXLOG will fill + * in additional info.) */ - XLogCtl->XLogCacheByte = (Size) BLCKSZ * XLOGbuffers; + XLogCtl->XLogCacheByte = (Size) BLCKSZ *XLOGbuffers; + XLogCtl->XLogCacheBlck = XLOGbuffers - 1; XLogCtl->Insert.currpage = (XLogPageHeader) (XLogCtl->pages); SpinLockInit(&XLogCtl->info_lck); /* - * If we are not in bootstrap mode, pg_control should already exist. - * Read and validate it immediately (see comments in ReadControlFile() - * for the reasons why). + * If we are not in bootstrap mode, pg_control should already exist. Read + * and validate it immediately (see comments in ReadControlFile() for the + * reasons why). */ if (!IsBootstrapProcessingMode()) ReadControlFile(); @@ -3801,17 +3784,16 @@ BootStrapXLOG(void) pg_crc32 crc; /* - * Select a hopefully-unique system identifier code for this - * installation. We use the result of gettimeofday(), including the - * fractional seconds field, as being about as unique as we can easily - * get. (Think not to use random(), since it hasn't been seeded and - * there's no portable way to seed it other than the system clock - * value...) The upper half of the uint64 value is just the tv_sec - * part, while the lower half is the XOR of tv_sec and tv_usec. This - * is to ensure that we don't lose uniqueness unnecessarily if - * "uint64" is really only 32 bits wide. A person knowing this - * encoding can determine the initialization time of the installation, - * which could perhaps be useful sometimes. + * Select a hopefully-unique system identifier code for this installation. + * We use the result of gettimeofday(), including the fractional seconds + * field, as being about as unique as we can easily get. (Think not to + * use random(), since it hasn't been seeded and there's no portable way + * to seed it other than the system clock value...) The upper half of the + * uint64 value is just the tv_sec part, while the lower half is the XOR + * of tv_sec and tv_usec. This is to ensure that we don't lose uniqueness + * unnecessarily if "uint64" is really only 32 bits wide. A person + * knowing this encoding can determine the initialization time of the + * installation, which could perhaps be useful sometimes. */ gettimeofday(&tv, NULL); sysidentifier = ((uint64) tv.tv_sec) << 32; @@ -3821,7 +3803,7 @@ BootStrapXLOG(void) ThisTimeLineID = 1; /* page buffer must be aligned suitably for O_DIRECT */ - buffer = (char *) palloc(BLCKSZ + ALIGNOF_XLOG_BUFFER); + buffer = (char *) palloc(BLCKSZ + ALIGNOF_XLOG_BUFFER); page = (XLogPageHeader) TYPEALIGN(ALIGNOF_XLOG_BUFFER, buffer); memset(page, 0, BLCKSZ); @@ -3882,18 +3864,18 @@ BootStrapXLOG(void) errno = ENOSPC; ereport(PANIC, (errcode_for_file_access(), - errmsg("could not write bootstrap transaction log file: %m"))); + errmsg("could not write bootstrap transaction log file: %m"))); } if (pg_fsync(openLogFile) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fsync bootstrap transaction log file: %m"))); + errmsg("could not fsync bootstrap transaction log file: %m"))); if (close(openLogFile)) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not close bootstrap transaction log file: %m"))); + errmsg("could not close bootstrap transaction log file: %m"))); openLogFile = -1; @@ -4036,8 +4018,8 @@ readRecoveryCommandFile(void) recoveryTargetXid = (TransactionId) strtoul(tok2, NULL, 0); if (errno == EINVAL || errno == ERANGE) ereport(FATAL, - (errmsg("recovery_target_xid is not a valid number: \"%s\"", - tok2))); + (errmsg("recovery_target_xid is not a valid number: \"%s\"", + tok2))); ereport(LOG, (errmsg("recovery_target_xid = %u", recoveryTargetXid))); @@ -4056,17 +4038,17 @@ readRecoveryCommandFile(void) recoveryTargetExact = false; /* - * Convert the time string given by the user to the time_t - * format. We use type abstime's input converter because we - * know abstime has the same representation as time_t. + * Convert the time string given by the user to the time_t format. + * We use type abstime's input converter because we know abstime + * has the same representation as time_t. */ recoveryTargetTime = (time_t) DatumGetAbsoluteTime(DirectFunctionCall1(abstimein, - CStringGetDatum(tok2))); + CStringGetDatum(tok2))); ereport(LOG, (errmsg("recovery_target_time = %s", - DatumGetCString(DirectFunctionCall1(abstimeout, - AbsoluteTimeGetDatum((AbsoluteTime) recoveryTargetTime)))))); + DatumGetCString(DirectFunctionCall1(abstimeout, + AbsoluteTimeGetDatum((AbsoluteTime) recoveryTargetTime)))))); } else if (strcmp(tok1, "recovery_target_inclusive") == 0) { @@ -4095,7 +4077,7 @@ readRecoveryCommandFile(void) ereport(FATAL, (errmsg("syntax error in recovery command file: %s", cmdline), - errhint("Lines should have the format parameter = 'value'."))); + errhint("Lines should have the format parameter = 'value'."))); /* Check that required parameters were supplied */ if (recoveryRestoreCommand == NULL) @@ -4107,10 +4089,10 @@ readRecoveryCommandFile(void) InArchiveRecovery = true; /* - * If user specified recovery_target_timeline, validate it or compute - * the "latest" value. We can't do this until after we've gotten the - * restore command and set InArchiveRecovery, because we need to fetch - * timeline history files from the archive. + * If user specified recovery_target_timeline, validate it or compute the + * "latest" value. We can't do this until after we've gotten the restore + * command and set InArchiveRecovery, because we need to fetch timeline + * history files from the archive. */ if (rtliGiven) { @@ -4119,8 +4101,8 @@ readRecoveryCommandFile(void) /* Timeline 1 does not have a history file, all else should */ if (rtli != 1 && !existsTimeLineHistory(rtli)) ereport(FATAL, - (errmsg("recovery_target_timeline %u does not exist", - rtli))); + (errmsg("recovery_target_timeline %u does not exist", + rtli))); recoveryTargetTLI = rtli; } else @@ -4146,9 +4128,9 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) InArchiveRecovery = false; /* - * We should have the ending log segment currently open. Verify, and - * then close it (to avoid problems on Windows with trying to rename - * or delete an open file). + * We should have the ending log segment currently open. Verify, and then + * close it (to avoid problems on Windows with trying to rename or delete + * an open file). */ Assert(readFile >= 0); Assert(readId == endLogId); @@ -4158,17 +4140,17 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) readFile = -1; /* - * If the segment was fetched from archival storage, we want to - * replace the existing xlog segment (if any) with the archival - * version. This is because whatever is in XLOGDIR is very possibly - * older than what we have from the archives, since it could have come - * from restoring a PGDATA backup. In any case, the archival version - * certainly is more descriptive of what our current database state - * is, because that is what we replayed from. + * If the segment was fetched from archival storage, we want to replace + * the existing xlog segment (if any) with the archival version. This is + * because whatever is in XLOGDIR is very possibly older than what we have + * from the archives, since it could have come from restoring a PGDATA + * backup. In any case, the archival version certainly is more + * descriptive of what our current database state is, because that is what + * we replayed from. * - * Note that if we are establishing a new timeline, ThisTimeLineID is - * already set to the new value, and so we will create a new file - * instead of overwriting any existing file. + * Note that if we are establishing a new timeline, ThisTimeLineID is already + * set to the new value, and so we will create a new file instead of + * overwriting any existing file. */ snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG"); XLogFilePath(xlogpath, ThisTimeLineID, endLogId, endLogSeg); @@ -4195,9 +4177,9 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) unlink(recoveryPath); /* ignore any error */ /* - * If we are establishing a new timeline, we have to copy data - * from the last WAL segment of the old timeline to create a - * starting WAL segment for the new timeline. + * If we are establishing a new timeline, we have to copy data from + * the last WAL segment of the old timeline to create a starting WAL + * segment for the new timeline. */ if (endTLI != ThisTimeLineID) XLogFileCopy(endLogId, endLogSeg, @@ -4205,8 +4187,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) } /* - * Let's just make real sure there are not .ready or .done flags - * posted for the new segment. + * Let's just make real sure there are not .ready or .done flags posted + * for the new segment. */ XLogFileName(xlogpath, ThisTimeLineID, endLogId, endLogSeg); XLogArchiveCleanup(xlogpath); @@ -4216,8 +4198,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) unlink(recoveryPath); /* ignore any error */ /* - * Rename the config file out of the way, so that we don't - * accidentally re-enter archive recovery mode in a subsequent crash. + * Rename the config file out of the way, so that we don't accidentally + * re-enter archive recovery mode in a subsequent crash. */ unlink(RECOVERY_COMMAND_DONE); if (rename(RECOVERY_COMMAND_FILE, RECOVERY_COMMAND_DONE) != 0) @@ -4278,9 +4260,9 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) * transactionid * * when testing for an xid, we MUST test for equality only, since - * transactions are numbered in the order they start, not the - * order they complete. A higher numbered xid will complete before - * you about 50% of the time... + * transactions are numbered in the order they start, not the order + * they complete. A higher numbered xid will complete before you about + * 50% of the time... */ stopsHere = (record->xl_xid == recoveryTargetXid); if (stopsHere) @@ -4289,9 +4271,9 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) else { /* - * there can be many transactions that share the same commit time, - * so we stop after the last one, if we are inclusive, or stop at - * the first one if we are exclusive + * there can be many transactions that share the same commit time, so + * we stop after the last one, if we are inclusive, or stop at the + * first one if we are exclusive */ if (recoveryTargetInclusive) stopsHere = (recordXtime > recoveryTargetTime); @@ -4312,22 +4294,22 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) if (recoveryStopAfter) ereport(LOG, (errmsg("recovery stopping after commit of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); else ereport(LOG, (errmsg("recovery stopping before commit of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); } else { if (recoveryStopAfter) ereport(LOG, (errmsg("recovery stopping after abort of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); else ereport(LOG, (errmsg("recovery stopping before abort of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); } } @@ -4359,8 +4341,8 @@ StartupXLOG(void) /* * Read control file and check XLOG status looks valid. * - * Note: in most control paths, *ControlFile is already valid and we need - * not do ReadControlFile() here, but might as well do it to be sure. + * Note: in most control paths, *ControlFile is already valid and we need not + * do ReadControlFile() here, but might as well do it to be sure. */ ReadControlFile(); @@ -4381,10 +4363,10 @@ StartupXLOG(void) str_time(ControlFile->time)))); else if (ControlFile->state == DB_IN_RECOVERY) ereport(LOG, - (errmsg("database system was interrupted while in recovery at %s", - str_time(ControlFile->time)), - errhint("This probably means that some data is corrupted and" - " you will have to use the last backup for recovery."))); + (errmsg("database system was interrupted while in recovery at %s", + str_time(ControlFile->time)), + errhint("This probably means that some data is corrupted and" + " you will have to use the last backup for recovery."))); else if (ControlFile->state == DB_IN_PRODUCTION) ereport(LOG, (errmsg("database system was interrupted at %s", @@ -4397,8 +4379,8 @@ StartupXLOG(void) #endif /* - * Initialize on the assumption we want to recover to the same - * timeline that's active according to pg_control. + * Initialize on the assumption we want to recover to the same timeline + * that's active according to pg_control. */ recoveryTargetTLI = ControlFile->checkPointCopy.ThisTimeLineID; @@ -4417,7 +4399,7 @@ StartupXLOG(void) * timeline. */ if (!list_member_int(expectedTLIs, - (int) ControlFile->checkPointCopy.ThisTimeLineID)) + (int) ControlFile->checkPointCopy.ThisTimeLineID)) ereport(FATAL, (errmsg("requested timeline %u is not a child of database system timeline %u", recoveryTargetTLI, @@ -4426,30 +4408,29 @@ StartupXLOG(void) if (read_backup_label(&checkPointLoc)) { /* - * When a backup_label file is present, we want to roll forward - * from the checkpoint it identifies, rather than using - * pg_control. + * When a backup_label file is present, we want to roll forward from + * the checkpoint it identifies, rather than using pg_control. */ record = ReadCheckpointRecord(checkPointLoc, 0); if (record != NULL) { ereport(LOG, (errmsg("checkpoint record is at %X/%X", - checkPointLoc.xlogid, checkPointLoc.xrecoff))); + checkPointLoc.xlogid, checkPointLoc.xrecoff))); InRecovery = true; /* force recovery even if SHUTDOWNED */ } else { ereport(PANIC, - (errmsg("could not locate required checkpoint record"), - errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir))); + (errmsg("could not locate required checkpoint record"), + errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir))); } } else { /* - * Get the last valid checkpoint record. If the latest one - * according to pg_control is broken, try the next-to-last one. + * Get the last valid checkpoint record. If the latest one according + * to pg_control is broken, try the next-to-last one. */ checkPointLoc = ControlFile->checkPoint; record = ReadCheckpointRecord(checkPointLoc, 1); @@ -4457,7 +4438,7 @@ StartupXLOG(void) { ereport(LOG, (errmsg("checkpoint record is at %X/%X", - checkPointLoc.xlogid, checkPointLoc.xrecoff))); + checkPointLoc.xlogid, checkPointLoc.xrecoff))); } else { @@ -4466,14 +4447,13 @@ StartupXLOG(void) if (record != NULL) { ereport(LOG, - (errmsg("using previous checkpoint record at %X/%X", - checkPointLoc.xlogid, checkPointLoc.xrecoff))); - InRecovery = true; /* force recovery even if - * SHUTDOWNED */ + (errmsg("using previous checkpoint record at %X/%X", + checkPointLoc.xlogid, checkPointLoc.xrecoff))); + InRecovery = true; /* force recovery even if SHUTDOWNED */ } else ereport(PANIC, - (errmsg("could not locate a valid checkpoint record"))); + (errmsg("could not locate a valid checkpoint record"))); } } @@ -4482,10 +4462,10 @@ StartupXLOG(void) wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN); ereport(LOG, - (errmsg("redo record is at %X/%X; undo record is at %X/%X; shutdown %s", - checkPoint.redo.xlogid, checkPoint.redo.xrecoff, - checkPoint.undo.xlogid, checkPoint.undo.xrecoff, - wasShutdown ? "TRUE" : "FALSE"))); + (errmsg("redo record is at %X/%X; undo record is at %X/%X; shutdown %s", + checkPoint.redo.xlogid, checkPoint.redo.xrecoff, + checkPoint.undo.xlogid, checkPoint.undo.xrecoff, + wasShutdown ? "TRUE" : "FALSE"))); ereport(LOG, (errmsg("next transaction ID: %u; next OID: %u", checkPoint.nextXid, checkPoint.nextOid))); @@ -4502,9 +4482,9 @@ StartupXLOG(void) MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset); /* - * We must replay WAL entries using the same TimeLineID they were - * created under, so temporarily adopt the TLI indicated by the - * checkpoint (see also xlog_redo()). + * We must replay WAL entries using the same TimeLineID they were created + * under, so temporarily adopt the TLI indicated by the checkpoint (see + * also xlog_redo()). */ ThisTimeLineID = checkPoint.ThisTimeLineID; @@ -4518,15 +4498,15 @@ StartupXLOG(void) /* * Check whether we need to force recovery from WAL. If it appears to - * have been a clean shutdown and we did not have a recovery.conf - * file, then assume no recovery needed. + * have been a clean shutdown and we did not have a recovery.conf file, + * then assume no recovery needed. */ if (XLByteLT(checkPoint.undo, RecPtr) || XLByteLT(checkPoint.redo, RecPtr)) { if (wasShutdown) ereport(PANIC, - (errmsg("invalid redo/undo record in shutdown checkpoint"))); + (errmsg("invalid redo/undo record in shutdown checkpoint"))); InRecovery = true; } else if (ControlFile->state != DB_SHUTDOWNED) @@ -4563,8 +4543,8 @@ StartupXLOG(void) } /* - * Find the first record that logically follows the checkpoint --- - * it might physically precede it, though. + * Find the first record that logically follows the checkpoint --- it + * might physically precede it, though. */ if (XLByteLT(checkPoint.redo, RecPtr)) { @@ -4603,7 +4583,7 @@ StartupXLOG(void) xlog_outrec(buf, record); strcat(buf, " - "); RmgrTable[record->xl_rmid].rm_desc(buf, - record->xl_info, XLogRecGetData(record)); + record->xl_info, XLogRecGetData(record)); elog(LOG, "%s", buf); } #endif @@ -4621,7 +4601,7 @@ StartupXLOG(void) /* nextXid must be beyond record's xid */ if (TransactionIdFollowsOrEquals(record->xl_xid, - ShmemVariableCache->nextXid)) + ShmemVariableCache->nextXid)) { ShmemVariableCache->nextXid = record->xl_xid; TransactionIdAdvance(ShmemVariableCache->nextXid); @@ -4655,8 +4635,8 @@ StartupXLOG(void) } /* - * Re-fetch the last valid or last applied record, so we can identify - * the exact endpoint of what we consider the valid portion of WAL. + * Re-fetch the last valid or last applied record, so we can identify the + * exact endpoint of what we consider the valid portion of WAL. */ record = ReadRecord(&LastRec, PANIC); EndOfLog = EndRecPtr; @@ -4682,8 +4662,8 @@ StartupXLOG(void) * * If we stopped short of the end of WAL during recovery, then we are * generating a new timeline and must assign it a unique new ID. - * Otherwise, we can just extend the timeline we were in when we ran - * out of WAL. + * Otherwise, we can just extend the timeline we were in when we ran out + * of WAL. */ if (needNewTimeLine) { @@ -4698,10 +4678,10 @@ StartupXLOG(void) XLogCtl->ThisTimeLineID = ThisTimeLineID; /* - * We are now done reading the old WAL. Turn off archive fetching if - * it was active, and make a writable copy of the last WAL segment. - * (Note that we also have a copy of the last block of the old WAL in - * readBuf; we will use that below.) + * We are now done reading the old WAL. Turn off archive fetching if it + * was active, and make a writable copy of the last WAL segment. (Note + * that we also have a copy of the last block of the old WAL in readBuf; + * we will use that below.) */ if (InArchiveRecovery) exitArchiveRecovery(curFileTLI, endLogId, endLogSeg); @@ -4724,9 +4704,9 @@ StartupXLOG(void) ((EndOfLog.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ; /* - * Tricky point here: readBuf contains the *last* block that the - * LastRec record spans, not the one it starts in. The last block is - * indeed the one we want to use. + * Tricky point here: readBuf contains the *last* block that the LastRec + * record spans, not the one it starts in. The last block is indeed the + * one we want to use. */ Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize); memcpy((char *) Insert->currpage, readBuf, BLCKSZ); @@ -4752,9 +4732,8 @@ StartupXLOG(void) else { /* - * Whenever Write.LogwrtResult points to exactly the end of a - * page, Write.curridx must point to the *next* page (see - * XLogWrite()). + * Whenever Write.LogwrtResult points to exactly the end of a page, + * Write.curridx must point to the *next* page (see XLogWrite()). * * Note: it might seem we should do AdvanceXLInsertBuffer() here, but * this is sufficient. The first actual attempt to insert a log @@ -4785,17 +4764,16 @@ StartupXLOG(void) pgstat_reset_all(); /* - * Perform a new checkpoint to update our recovery activity to - * disk. + * Perform a new checkpoint to update our recovery activity to disk. * - * Note that we write a shutdown checkpoint rather than an on-line - * one. This is not particularly critical, but since we may be - * assigning a new TLI, using a shutdown checkpoint allows us to - * have the rule that TLI only changes in shutdown checkpoints, - * which allows some extra error checking in xlog_redo. + * Note that we write a shutdown checkpoint rather than an on-line one. + * This is not particularly critical, but since we may be assigning a + * new TLI, using a shutdown checkpoint allows us to have the rule + * that TLI only changes in shutdown checkpoints, which allows some + * extra error checking in xlog_redo. * - * In case we had to use the secondary checkpoint, make sure that it - * will still be shown as the secondary checkpoint after this + * In case we had to use the secondary checkpoint, make sure that it will + * still be shown as the secondary checkpoint after this * CreateCheckPoint operation; we don't want the broken primary * checkpoint to become prevCheckPoint... */ @@ -4810,8 +4788,8 @@ StartupXLOG(void) XLogCloseRelationCache(); /* - * Now that we've checkpointed the recovery, it's safe to flush - * old backup_label, if present. + * Now that we've checkpointed the recovery, it's safe to flush old + * backup_label, if present. */ remove_backup_label(); } @@ -4878,7 +4856,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, int whichChkpt) { case 1: ereport(LOG, - (errmsg("invalid primary checkpoint link in control file"))); + (errmsg("invalid primary checkpoint link in control file"))); break; case 2: ereport(LOG, @@ -4886,7 +4864,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, int whichChkpt) break; default: ereport(LOG, - (errmsg("invalid checkpoint link in backup_label file"))); + (errmsg("invalid checkpoint link in backup_label file"))); break; } return NULL; @@ -4927,7 +4905,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, int whichChkpt) break; default: ereport(LOG, - (errmsg("invalid resource manager ID in checkpoint record"))); + (errmsg("invalid resource manager ID in checkpoint record"))); break; } return NULL; @@ -4939,11 +4917,11 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, int whichChkpt) { case 1: ereport(LOG, - (errmsg("invalid xl_info in primary checkpoint record"))); + (errmsg("invalid xl_info in primary checkpoint record"))); break; case 2: ereport(LOG, - (errmsg("invalid xl_info in secondary checkpoint record"))); + (errmsg("invalid xl_info in secondary checkpoint record"))); break; default: ereport(LOG, @@ -4959,11 +4937,11 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, int whichChkpt) { case 1: ereport(LOG, - (errmsg("invalid length of primary checkpoint record"))); + (errmsg("invalid length of primary checkpoint record"))); break; case 2: ereport(LOG, - (errmsg("invalid length of secondary checkpoint record"))); + (errmsg("invalid length of secondary checkpoint record"))); break; default: ereport(LOG, @@ -5084,10 +5062,10 @@ CreateCheckPoint(bool shutdown, bool force) int nsegsrecycled = 0; /* - * Acquire CheckpointLock to ensure only one checkpoint happens at a - * time. (This is just pro forma, since in the present system - * structure there is only one process that is allowed to issue - * checkpoints at any given time.) + * Acquire CheckpointLock to ensure only one checkpoint happens at a time. + * (This is just pro forma, since in the present system structure there is + * only one process that is allowed to issue checkpoints at any given + * time.) */ LWLockAcquire(CheckpointLock, LW_EXCLUSIVE); @@ -5108,10 +5086,10 @@ CreateCheckPoint(bool shutdown, bool force) checkPoint.time = time(NULL); /* - * We must hold CheckpointStartLock while determining the checkpoint - * REDO pointer. This ensures that any concurrent transaction commits - * will be either not yet logged, or logged and recorded in pg_clog. - * See notes in RecordTransactionCommit(). + * We must hold CheckpointStartLock while determining the checkpoint REDO + * pointer. This ensures that any concurrent transaction commits will be + * either not yet logged, or logged and recorded in pg_clog. See notes in + * RecordTransactionCommit(). */ LWLockAcquire(CheckpointStartLock, LW_EXCLUSIVE); @@ -5119,20 +5097,19 @@ CreateCheckPoint(bool shutdown, bool force) LWLockAcquire(WALInsertLock, LW_EXCLUSIVE); /* - * If this isn't a shutdown or forced checkpoint, and we have not - * inserted any XLOG records since the start of the last checkpoint, - * skip the checkpoint. The idea here is to avoid inserting duplicate - * checkpoints when the system is idle. That wastes log space, and - * more importantly it exposes us to possible loss of both current and - * previous checkpoint records if the machine crashes just as we're - * writing the update. (Perhaps it'd make even more sense to - * checkpoint only when the previous checkpoint record is in a - * different xlog page?) + * If this isn't a shutdown or forced checkpoint, and we have not inserted + * any XLOG records since the start of the last checkpoint, skip the + * checkpoint. The idea here is to avoid inserting duplicate checkpoints + * when the system is idle. That wastes log space, and more importantly it + * exposes us to possible loss of both current and previous checkpoint + * records if the machine crashes just as we're writing the update. + * (Perhaps it'd make even more sense to checkpoint only when the previous + * checkpoint record is in a different xlog page?) * - * We have to make two tests to determine that nothing has happened since - * the start of the last checkpoint: current insertion point must - * match the end of the last checkpoint record, and its redo pointer - * must point to itself. + * We have to make two tests to determine that nothing has happened since the + * start of the last checkpoint: current insertion point must match the + * end of the last checkpoint record, and its redo pointer must point to + * itself. */ if (!shutdown && !force) { @@ -5158,10 +5135,10 @@ CreateCheckPoint(bool shutdown, bool force) /* * Compute new REDO record ptr = location of next XLOG record. * - * NB: this is NOT necessarily where the checkpoint record itself will - * be, since other backends may insert more XLOG records while we're - * off doing the buffer flush work. Those XLOG records are logically - * after the checkpoint, even though physically before it. Got that? + * NB: this is NOT necessarily where the checkpoint record itself will be, + * since other backends may insert more XLOG records while we're off doing + * the buffer flush work. Those XLOG records are logically after the + * checkpoint, even though physically before it. Got that? */ freespace = INSERT_FREESPACE(Insert); if (freespace < SizeOfXLogRecord) @@ -5173,16 +5150,15 @@ CreateCheckPoint(bool shutdown, bool force) INSERT_RECPTR(checkPoint.redo, Insert, Insert->curridx); /* - * Here we update the shared RedoRecPtr for future XLogInsert calls; - * this must be done while holding the insert lock AND the info_lck. + * Here we update the shared RedoRecPtr for future XLogInsert calls; this + * must be done while holding the insert lock AND the info_lck. * * Note: if we fail to complete the checkpoint, RedoRecPtr will be left - * pointing past where it really needs to point. This is okay; the - * only consequence is that XLogInsert might back up whole buffers - * that it didn't really need to. We can't postpone advancing - * RedoRecPtr because XLogInserts that happen while we are dumping - * buffers must assume that their buffer changes are not included in - * the checkpoint. + * pointing past where it really needs to point. This is okay; the only + * consequence is that XLogInsert might back up whole buffers that it + * didn't really need to. We can't postpone advancing RedoRecPtr because + * XLogInserts that happen while we are dumping buffers must assume that + * their buffer changes are not included in the checkpoint. */ { /* use volatile pointer to prevent code rearrangement */ @@ -5219,15 +5195,15 @@ CreateCheckPoint(bool shutdown, bool force) &checkPoint.nextMultiOffset); /* - * Having constructed the checkpoint record, ensure all shmem disk - * buffers and commit-log buffers are flushed to disk. + * Having constructed the checkpoint record, ensure all shmem disk buffers + * and commit-log buffers are flushed to disk. * - * This I/O could fail for various reasons. If so, we will fail to - * complete the checkpoint, but there is no reason to force a system - * panic. Accordingly, exit critical section while doing it. (If - * we are doing a shutdown checkpoint, we probably *should* panic --- - * but that will happen anyway because we'll still be inside the - * critical section established by ShutdownXLOG.) + * This I/O could fail for various reasons. If so, we will fail to complete + * the checkpoint, but there is no reason to force a system panic. + * Accordingly, exit critical section while doing it. (If we are doing a + * shutdown checkpoint, we probably *should* panic --- but that will + * happen anyway because we'll still be inside the critical section + * established by ShutdownXLOG.) */ END_CRIT_SECTION(); @@ -5260,8 +5236,8 @@ CreateCheckPoint(bool shutdown, bool force) XLogFlush(recptr); /* - * We now have ProcLastRecPtr = start of actual checkpoint record, - * recptr = end of actual checkpoint record. + * We now have ProcLastRecPtr = start of actual checkpoint record, recptr + * = end of actual checkpoint record. */ if (shutdown && !XLByteEQ(checkPoint.redo, ProcLastRecPtr)) ereport(PANIC, @@ -5287,8 +5263,8 @@ CreateCheckPoint(bool shutdown, bool force) LWLockRelease(ControlFileLock); /* - * We are now done with critical updates; no need for system panic if - * we have trouble while fooling with offline log segments. + * We are now done with critical updates; no need for system panic if we + * have trouble while fooling with offline log segments. */ END_CRIT_SECTION(); @@ -5304,19 +5280,18 @@ CreateCheckPoint(bool shutdown, bool force) } /* - * Make more log segments if needed. (Do this after deleting offline - * log segments, to avoid having peak disk space usage higher than - * necessary.) + * Make more log segments if needed. (Do this after deleting offline log + * segments, to avoid having peak disk space usage higher than necessary.) */ if (!shutdown) nsegsadded = PreallocXlogFiles(recptr); /* - * Truncate pg_subtrans if possible. We can throw away all data - * before the oldest XMIN of any running transaction. No future - * transaction will attempt to reference any pg_subtrans entry older - * than that (see Asserts in subtrans.c). During recovery, though, we - * mustn't do this because StartupSUBTRANS hasn't been called yet. + * Truncate pg_subtrans if possible. We can throw away all data before + * the oldest XMIN of any running transaction. No future transaction will + * attempt to reference any pg_subtrans entry older than that (see Asserts + * in subtrans.c). During recovery, though, we mustn't do this because + * StartupSUBTRANS hasn't been called yet. */ if (!InRecovery) TruncateSUBTRANS(GetOldestXmin(true)); @@ -5342,13 +5317,14 @@ XLogPutNextOid(Oid nextOid) rdata.buffer = InvalidBuffer; rdata.next = NULL; (void) XLogInsert(RM_XLOG_ID, XLOG_NEXTOID, &rdata); + /* * We need not flush the NEXTOID record immediately, because any of the - * just-allocated OIDs could only reach disk as part of a tuple insert - * or update that would have its own XLOG record that must follow the - * NEXTOID record. Therefore, the standard buffer LSN interlock applied - * to those records will ensure no such OID reaches disk before the - * NEXTOID record does. + * just-allocated OIDs could only reach disk as part of a tuple insert or + * update that would have its own XLOG record that must follow the NEXTOID + * record. Therefore, the standard buffer LSN interlock applied to those + * records will ensure no such OID reaches disk before the NEXTOID record + * does. */ } @@ -5384,8 +5360,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) checkPoint.nextMultiOffset); /* - * TLI may change in a shutdown checkpoint, but it shouldn't - * decrease + * TLI may change in a shutdown checkpoint, but it shouldn't decrease */ if (checkPoint.ThisTimeLineID != ThisTimeLineID) { @@ -5394,7 +5369,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) (int) checkPoint.ThisTimeLineID)) ereport(PANIC, (errmsg("unexpected timeline ID %u (after %u) in checkpoint record", - checkPoint.ThisTimeLineID, ThisTimeLineID))); + checkPoint.ThisTimeLineID, ThisTimeLineID))); /* Following WAL records should be run with new TLI */ ThisTimeLineID = checkPoint.ThisTimeLineID; } @@ -5441,7 +5416,7 @@ xlog_desc(char *buf, uint8 xl_info, char *rec) checkpoint->nextOid, checkpoint->nextMulti, checkpoint->nextMultiOffset, - (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online"); + (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online"); } else if (info == XLOG_NEXTOID) { @@ -5535,23 +5510,23 @@ assign_xlog_sync_method(const char *method, bool doit, GucSource source) /* * To ensure that no blocks escape unsynced, force an fsync on the * currently open log segment (if any). Also, if the open flag is - * changing, close the log file so it will be reopened (with new - * flag bit) at next use. + * changing, close the log file so it will be reopened (with new flag + * bit) at next use. */ if (openLogFile >= 0) { if (pg_fsync(openLogFile) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fsync log file %u, segment %u: %m", - openLogId, openLogSeg))); + errmsg("could not fsync log file %u, segment %u: %m", + openLogId, openLogSeg))); if (open_sync_bit != new_sync_bit) { if (close(openLogFile)) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not close log file %u, segment %u: %m", - openLogId, openLogSeg))); + errmsg("could not close log file %u, segment %u: %m", + openLogId, openLogSeg))); openLogFile = -1; } } @@ -5575,16 +5550,16 @@ issue_xlog_fsync(void) if (pg_fsync_no_writethrough(openLogFile) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fsync log file %u, segment %u: %m", - openLogId, openLogSeg))); + errmsg("could not fsync log file %u, segment %u: %m", + openLogId, openLogSeg))); break; #ifdef HAVE_FSYNC_WRITETHROUGH case SYNC_METHOD_FSYNC_WRITETHROUGH: if (pg_fsync_writethrough(openLogFile) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fsync write-through log file %u, segment %u: %m", - openLogId, openLogSeg))); + errmsg("could not fsync write-through log file %u, segment %u: %m", + openLogId, openLogSeg))); break; #endif #ifdef HAVE_FDATASYNC @@ -5592,8 +5567,8 @@ issue_xlog_fsync(void) if (pg_fdatasync(openLogFile) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fdatasync log file %u, segment %u: %m", - openLogId, openLogSeg))); + errmsg("could not fdatasync log file %u, segment %u: %m", + openLogId, openLogSeg))); break; #endif case SYNC_METHOD_OPEN: @@ -5640,25 +5615,25 @@ pg_start_backup(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), (errmsg("WAL archiving is not active"), - (errhint("archive_command must be defined before " - "online backups can be made safely."))))); + (errhint("archive_command must be defined before " + "online backups can be made safely."))))); backupidstr = DatumGetCString(DirectFunctionCall1(textout, - PointerGetDatum(backupid))); + PointerGetDatum(backupid))); /* - * Force a CHECKPOINT. This is not strictly necessary, but it seems - * like a good idea to minimize the amount of past WAL needed to use - * the backup. Also, this guarantees that two successive backup runs - * will have different checkpoint positions and hence different - * history file names, even if nothing happened in between. + * Force a CHECKPOINT. This is not strictly necessary, but it seems like + * a good idea to minimize the amount of past WAL needed to use the + * backup. Also, this guarantees that two successive backup runs will + * have different checkpoint positions and hence different history file + * names, even if nothing happened in between. */ RequestCheckpoint(true, false); /* - * Now we need to fetch the checkpoint record location, and also its - * REDO pointer. The oldest point in WAL that would be needed to - * restore starting from the checkpoint is precisely the REDO pointer. + * Now we need to fetch the checkpoint record location, and also its REDO + * pointer. The oldest point in WAL that would be needed to restore + * starting from the checkpoint is precisely the REDO pointer. */ LWLockAcquire(ControlFileLock, LW_EXCLUSIVE); checkpointloc = ControlFile->checkPoint; @@ -5669,10 +5644,10 @@ pg_start_backup(PG_FUNCTION_ARGS) XLogFileName(xlogfilename, ThisTimeLineID, _logId, _logSeg); /* - * We deliberately use strftime/localtime not the src/timezone - * functions, so that backup labels will consistently be recorded in - * the same timezone regardless of TimeZone setting. This matches - * elog.c's practice. + * We deliberately use strftime/localtime not the src/timezone functions, + * so that backup labels will consistently be recorded in the same + * timezone regardless of TimeZone setting. This matches elog.c's + * practice. */ stamp_time = time(NULL); strftime(strfbuf, sizeof(strfbuf), @@ -5680,8 +5655,7 @@ pg_start_backup(PG_FUNCTION_ARGS) localtime(&stamp_time)); /* - * Check for existing backup label --- implies a backup is already - * running + * Check for existing backup label --- implies a backup is already running */ if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0) { @@ -5725,7 +5699,7 @@ pg_start_backup(PG_FUNCTION_ARGS) snprintf(xlogfilename, sizeof(xlogfilename), "%X/%X", startpoint.xlogid, startpoint.xrecoff); result = DatumGetTextP(DirectFunctionCall1(textin, - CStringGetDatum(xlogfilename))); + CStringGetDatum(xlogfilename))); PG_RETURN_TEXT_P(result); } @@ -5762,8 +5736,8 @@ pg_stop_backup(PG_FUNCTION_ARGS) (errmsg("must be superuser to run a backup")))); /* - * Get the current end-of-WAL position; it will be unsafe to use this - * dump to restore to a point in advance of this time. + * Get the current end-of-WAL position; it will be unsafe to use this dump + * to restore to a point in advance of this time. */ LWLockAcquire(WALInsertLock, LW_EXCLUSIVE); INSERT_RECPTR(stoppoint, Insert, Insert->curridx); @@ -5773,10 +5747,10 @@ pg_stop_backup(PG_FUNCTION_ARGS) XLogFileName(stopxlogfilename, ThisTimeLineID, _logId, _logSeg); /* - * We deliberately use strftime/localtime not the src/timezone - * functions, so that backup labels will consistently be recorded in - * the same timezone regardless of TimeZone setting. This matches - * elog.c's practice. + * We deliberately use strftime/localtime not the src/timezone functions, + * so that backup labels will consistently be recorded in the same + * timezone regardless of TimeZone setting. This matches elog.c's + * practice. */ stamp_time = time(NULL); strftime(strfbuf, sizeof(strfbuf), @@ -5800,9 +5774,8 @@ pg_stop_backup(PG_FUNCTION_ARGS) } /* - * Read and parse the START WAL LOCATION line (this code is pretty - * crude, but we are not expecting any variability in the file - * format). + * Read and parse the START WAL LOCATION line (this code is pretty crude, + * but we are not expecting any variability in the file format). */ if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %24s)%c", &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename, @@ -5869,7 +5842,7 @@ pg_stop_backup(PG_FUNCTION_ARGS) snprintf(stopxlogfilename, sizeof(stopxlogfilename), "%X/%X", stoppoint.xlogid, stoppoint.xrecoff); result = DatumGetTextP(DirectFunctionCall1(textin, - CStringGetDatum(stopxlogfilename))); + CStringGetDatum(stopxlogfilename))); PG_RETURN_TEXT_P(result); } @@ -5921,9 +5894,9 @@ read_backup_label(XLogRecPtr *checkPointLoc) } /* - * Read and parse the START WAL LOCATION and CHECKPOINT lines (this - * code is pretty crude, but we are not expecting any variability in - * the file format). + * Read and parse the START WAL LOCATION and CHECKPOINT lines (this code + * is pretty crude, but we are not expecting any variability in the file + * format). */ if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %08X%16s)%c", &startpoint.xlogid, &startpoint.xrecoff, &tli, @@ -5963,17 +5936,17 @@ read_backup_label(XLogRecPtr *checkPointLoc) * Parse history file to identify stop point. */ if (fscanf(fp, "START WAL LOCATION: %X/%X (file %24s)%c", - &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename, + &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename, &ch) != 4 || ch != '\n') ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("invalid data in file \"%s\"", histfilename))); + errmsg("invalid data in file \"%s\"", histfilename))); if (fscanf(fp, "STOP WAL LOCATION: %X/%X (file %24s)%c", - &stoppoint.xlogid, &stoppoint.xrecoff, stopxlogfilename, + &stoppoint.xlogid, &stoppoint.xrecoff, stopxlogfilename, &ch) != 4 || ch != '\n') ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("invalid data in file \"%s\"", histfilename))); + errmsg("invalid data in file \"%s\"", histfilename))); recoveryMinXlogOffset = stoppoint; if (ferror(fp) || FreeFile(fp)) ereport(FATAL, diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index 55caf84a04d..485aa52474d 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -11,7 +11,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.38 2005/06/06 17:01:23 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.39 2005/10/15 02:49:11 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -121,7 +121,7 @@ _xl_remove_hash_entry(XLogRelDesc *rdesc) rdesc->moreRecently->lessRecently = rdesc->lessRecently; hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache, - (void *) &(rdesc->reldata.rd_node), HASH_REMOVE, NULL); + (void *) &(rdesc->reldata.rd_node), HASH_REMOVE, NULL); if (hentry == NULL) elog(PANIC, "_xl_remove_hash_entry: file was not found in cache"); @@ -211,11 +211,11 @@ XLogOpenRelation(RelFileNode rnode) res->reldata.rd_node = rnode; /* - * We set up the lockRelId in case anything tries to lock the - * dummy relation. Note that this is fairly bogus since relNode - * may be different from the relation's OID. It shouldn't really - * matter though, since we are presumably running by ourselves and - * can't have any lock conflicts ... + * We set up the lockRelId in case anything tries to lock the dummy + * relation. Note that this is fairly bogus since relNode may be + * different from the relation's OID. It shouldn't really matter + * though, since we are presumably running by ourselves and can't have + * any lock conflicts ... */ res->reldata.rd_lockInfo.lockRelId.dbId = rnode.dbNode; res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode; @@ -233,13 +233,13 @@ XLogOpenRelation(RelFileNode rnode) RelationOpenSmgr(&(res->reldata)); /* - * Create the target file if it doesn't already exist. This lets - * us cope if the replay sequence contains writes to a relation - * that is later deleted. (The original coding of this routine - * would instead return NULL, causing the writes to be suppressed. - * But that seems like it risks losing valuable data if the - * filesystem loses an inode during a crash. Better to write the - * data until we are actually told to delete the file.) + * Create the target file if it doesn't already exist. This lets us + * cope if the replay sequence contains writes to a relation that is + * later deleted. (The original coding of this routine would instead + * return NULL, causing the writes to be suppressed. But that seems + * like it risks losing valuable data if the filesystem loses an inode + * during a crash. Better to write the data until we are actually + * told to delete the file.) */ smgrcreate(res->reldata.rd_smgr, res->reldata.rd_istemp, true); } diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c index 6f74ceaed72..9ea3d741112 100644 --- a/src/backend/bootstrap/bootstrap.c +++ b/src/backend/bootstrap/bootstrap.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.206 2005/08/08 03:11:30 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.207 2005/10/15 02:49:12 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -379,9 +379,8 @@ BootstrapMain(int argc, char *argv[]) BaseInit(); /* - * We aren't going to do the full InitPostgres pushups, but there - * are a couple of things that need to get lit up even in a dummy - * process. + * We aren't going to do the full InitPostgres pushups, but there are a + * couple of things that need to get lit up even in a dummy process. */ if (IsUnderPostmaster) { @@ -445,8 +444,8 @@ BootstrapMain(int argc, char *argv[]) /* * In NOP mode, all we really want to do is create shared memory and - * semaphores (just to prove we can do it with the current GUC - * settings). So, quit now. + * semaphores (just to prove we can do it with the current GUC settings). + * So, quit now. */ if (xlogop == BS_XLOG_NOP) proc_exit(0); @@ -465,8 +464,8 @@ BootstrapMain(int argc, char *argv[]) /* * Process bootstrap input. * - * the sed script boot.sed renamed yyparse to Int_yyparse for the - * bootstrap parser to avoid conflicts with the normal SQL parser + * the sed script boot.sed renamed yyparse to Int_yyparse for the bootstrap + * parser to avoid conflicts with the normal SQL parser */ Int_yyparse(); @@ -537,8 +536,7 @@ bootstrap_signals(void) pqsignal(SIGWINCH, SIG_DFL); /* - * Unblock signals (they were blocked when the postmaster forked - * us) + * Unblock signals (they were blocked when the postmaster forked us) */ PG_SETMASK(&UnBlockSig); } @@ -733,12 +731,12 @@ DefineAttr(char *name, char *type, int attnum) attrtypes[attnum]->attislocal = true; /* - * Mark as "not null" if type is fixed-width and prior columns are - * too. This corresponds to case where column can be accessed directly - * via C struct declaration. + * Mark as "not null" if type is fixed-width and prior columns are too. + * This corresponds to case where column can be accessed directly via C + * struct declaration. * - * oidvector and int2vector are also treated as not-nullable, even - * though they are no longer fixed-width. + * oidvector and int2vector are also treated as not-nullable, even though + * they are no longer fixed-width. */ #define MARKNOTNULL(att) \ ((att)->attlen > 0 || \ @@ -1005,8 +1003,7 @@ MapArrayTypeName(char *s) { int i, j; - static char newStr[NAMEDATALEN]; /* array type names < NAMEDATALEN - * long */ + static char newStr[NAMEDATALEN]; /* array type names < NAMEDATALEN long */ if (s == NULL || s[0] == '\0') return s; @@ -1095,8 +1092,8 @@ FindStr(char *str, int length, hashnode *mderef) while (node != NULL) { /* - * We must differentiate between string constants that might have - * the same value as a identifier and the identifier itself. + * We must differentiate between string constants that might have the + * same value as a identifier and the identifier itself. */ if (!strcmp(str, strtable[node->strnum])) { @@ -1131,11 +1128,11 @@ AddStr(char *str, int strlength, int mderef) elog(FATAL, "bootstrap string table overflow"); /* - * Some of the utilites (eg, define type, create relation) assume that - * the string they're passed is a NAMEDATALEN. We get array bound - * read violations from purify if we don't allocate at least - * NAMEDATALEN bytes for strings of this sort. Because we're lazy, we - * allocate at least NAMEDATALEN bytes all the time. + * Some of the utilites (eg, define type, create relation) assume that the + * string they're passed is a NAMEDATALEN. We get array bound read + * violations from purify if we don't allocate at least NAMEDATALEN bytes + * for strings of this sort. Because we're lazy, we allocate at least + * NAMEDATALEN bytes all the time. */ if ((len = strlength + 1) < NAMEDATALEN) @@ -1191,8 +1188,8 @@ index_register(Oid heap, /* * XXX mao 10/31/92 -- don't gc index reldescs, associated info at - * bootstrap time. we'll declare the indices now, but want to create - * them later. + * bootstrap time. we'll declare the indices now, but want to create them + * later. */ if (nogc == NULL) diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index 689a2ff8196..15a197af81b 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.119 2005/10/10 18:49:01 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.120 2005/10/15 02:49:12 momjian Exp $ * * NOTES * See acl.h. @@ -65,7 +65,7 @@ dumpacl(Acl *acl) for (i = 0; i < ACL_NUM(acl); ++i) elog(DEBUG2, " acl[%d]: %s", i, DatumGetCString(DirectFunctionCall1(aclitemout, - PointerGetDatum(aip + i)))); + PointerGetDatum(aip + i)))); } #endif /* ACLDEBUG */ @@ -101,9 +101,10 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant, Acl *newer_acl; if (grantee->rolname) - aclitem.ai_grantee = get_roleid_checked(grantee->rolname); + aclitem. ai_grantee = get_roleid_checked(grantee->rolname); + else - aclitem.ai_grantee = ACL_ID_PUBLIC; + aclitem. ai_grantee = ACL_ID_PUBLIC; /* * Grant options can only be granted to individual roles, not PUBLIC. @@ -116,19 +117,18 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant, (errcode(ERRCODE_INVALID_GRANT_OPERATION), errmsg("grant options can only be granted to roles"))); - aclitem.ai_grantor = grantorId; + aclitem. ai_grantor = grantorId; /* * The asymmetry in the conditions here comes from the spec. In - * GRANT, the grant_option flag signals WITH GRANT OPTION, which - * means to grant both the basic privilege and its grant option. - * But in REVOKE, plain revoke revokes both the basic privilege - * and its grant option, while REVOKE GRANT OPTION revokes only - * the option. + * GRANT, the grant_option flag signals WITH GRANT OPTION, which means + * to grant both the basic privilege and its grant option. But in + * REVOKE, plain revoke revokes both the basic privilege and its grant + * option, while REVOKE GRANT OPTION revokes only the option. */ ACLITEM_SET_PRIVS_GOPTIONS(aclitem, - (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS, - (!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS); + (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS, + (!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS); newer_acl = aclupdate(new_acl, &aclitem, modechg, ownerId, behavior); @@ -221,8 +221,8 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt) AclMode this_privileges; Acl *old_acl; Acl *new_acl; - Oid grantorId; - Oid ownerId; + Oid grantorId; + Oid ownerId; HeapTuple newtuple; Datum values[Natts_pg_class]; char nulls[Natts_pg_class]; @@ -257,8 +257,8 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt) relvar->relname))); /* - * Get owner ID and working copy of existing ACL. - * If there's no ACL, substitute the proper default. + * Get owner ID and working copy of existing ACL. If there's no ACL, + * substitute the proper default. */ ownerId = pg_class_tuple->relowner; aclDatum = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_relacl, @@ -275,8 +275,8 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt) /* * If we found no grant options, consider whether to issue a hard - * error. Per spec, having any privilege at all on the object - * will get you by here. + * error. Per spec, having any privilege at all on the object will + * get you by here. */ if (avail_goptions == ACL_NO_RIGHTS) { @@ -289,12 +289,12 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt) } /* - * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't - * quite what the spec says to do: the spec seems to want a - * warning only if no privilege bits actually change in the ACL. - * In practice that behavior seems much too noisy, as well as - * inconsistent with the GRANT case.) + * Restrict the operation to what we can actually grant or revoke, and + * issue a warning if appropriate. (For REVOKE this isn't quite what + * the spec says to do: the spec seems to want a warning only if no + * privilege bits actually change in the ACL. In practice that + * behavior seems much too noisy, as well as inconsistent with the + * GRANT case.) */ this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions); if (stmt->is_grant) @@ -323,8 +323,8 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt) /* * Generate new ACL. * - * We need the members of both old and new ACLs so we can correct - * the shared dependency information. + * We need the members of both old and new ACLs so we can correct the + * shared dependency information. */ noldmembers = aclmembers(old_acl, &oldmembers); @@ -411,8 +411,8 @@ ExecuteGrantStmt_Database(GrantStmt *stmt) AclMode this_privileges; Acl *old_acl; Acl *new_acl; - Oid grantorId; - Oid ownerId; + Oid grantorId; + Oid ownerId; HeapTuple newtuple; Datum values[Natts_pg_database]; char nulls[Natts_pg_database]; @@ -436,8 +436,8 @@ ExecuteGrantStmt_Database(GrantStmt *stmt) pg_database_tuple = (Form_pg_database) GETSTRUCT(tuple); /* - * Get owner ID and working copy of existing ACL. - * If there's no ACL, substitute the proper default. + * Get owner ID and working copy of existing ACL. If there's no ACL, + * substitute the proper default. */ ownerId = pg_database_tuple->datdba; aclDatum = heap_getattr(tuple, Anum_pg_database_datacl, @@ -454,8 +454,8 @@ ExecuteGrantStmt_Database(GrantStmt *stmt) /* * If we found no grant options, consider whether to issue a hard - * error. Per spec, having any privilege at all on the object - * will get you by here. + * error. Per spec, having any privilege at all on the object will + * get you by here. */ if (avail_goptions == ACL_NO_RIGHTS) { @@ -468,12 +468,12 @@ ExecuteGrantStmt_Database(GrantStmt *stmt) } /* - * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't - * quite what the spec says to do: the spec seems to want a - * warning only if no privilege bits actually change in the ACL. - * In practice that behavior seems much too noisy, as well as - * inconsistent with the GRANT case.) + * Restrict the operation to what we can actually grant or revoke, and + * issue a warning if appropriate. (For REVOKE this isn't quite what + * the spec says to do: the spec seems to want a warning only if no + * privilege bits actually change in the ACL. In practice that + * behavior seems much too noisy, as well as inconsistent with the + * GRANT case.) */ this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions); if (stmt->is_grant) @@ -502,8 +502,8 @@ ExecuteGrantStmt_Database(GrantStmt *stmt) /* * Generate new ACL. * - * We need the members of both old and new ACLs so we can correct - * the shared dependency information. + * We need the members of both old and new ACLs so we can correct the + * shared dependency information. */ noldmembers = aclmembers(old_acl, &oldmembers); @@ -589,8 +589,8 @@ ExecuteGrantStmt_Function(GrantStmt *stmt) AclMode this_privileges; Acl *old_acl; Acl *new_acl; - Oid grantorId; - Oid ownerId; + Oid grantorId; + Oid ownerId; HeapTuple newtuple; Datum values[Natts_pg_proc]; char nulls[Natts_pg_proc]; @@ -611,8 +611,8 @@ ExecuteGrantStmt_Function(GrantStmt *stmt) pg_proc_tuple = (Form_pg_proc) GETSTRUCT(tuple); /* - * Get owner ID and working copy of existing ACL. - * If there's no ACL, substitute the proper default. + * Get owner ID and working copy of existing ACL. If there's no ACL, + * substitute the proper default. */ ownerId = pg_proc_tuple->proowner; aclDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_proacl, @@ -629,8 +629,8 @@ ExecuteGrantStmt_Function(GrantStmt *stmt) /* * If we found no grant options, consider whether to issue a hard - * error. Per spec, having any privilege at all on the object - * will get you by here. + * error. Per spec, having any privilege at all on the object will + * get you by here. */ if (avail_goptions == ACL_NO_RIGHTS) { @@ -643,12 +643,12 @@ ExecuteGrantStmt_Function(GrantStmt *stmt) } /* - * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't - * quite what the spec says to do: the spec seems to want a - * warning only if no privilege bits actually change in the ACL. - * In practice that behavior seems much too noisy, as well as - * inconsistent with the GRANT case.) + * Restrict the operation to what we can actually grant or revoke, and + * issue a warning if appropriate. (For REVOKE this isn't quite what + * the spec says to do: the spec seems to want a warning only if no + * privilege bits actually change in the ACL. In practice that + * behavior seems much too noisy, as well as inconsistent with the + * GRANT case.) */ this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions); if (stmt->is_grant) @@ -677,8 +677,8 @@ ExecuteGrantStmt_Function(GrantStmt *stmt) /* * Generate new ACL. * - * We need the members of both old and new ACLs so we can correct - * the shared dependency information. + * We need the members of both old and new ACLs so we can correct the + * shared dependency information. */ noldmembers = aclmembers(old_acl, &oldmembers); @@ -763,8 +763,8 @@ ExecuteGrantStmt_Language(GrantStmt *stmt) AclMode this_privileges; Acl *old_acl; Acl *new_acl; - Oid grantorId; - Oid ownerId; + Oid grantorId; + Oid ownerId; HeapTuple newtuple; Datum values[Natts_pg_language]; char nulls[Natts_pg_language]; @@ -788,14 +788,14 @@ ExecuteGrantStmt_Language(GrantStmt *stmt) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("language \"%s\" is not trusted", langname), - errhint("Only superusers may use untrusted languages."))); + errhint("Only superusers may use untrusted languages."))); /* - * Get owner ID and working copy of existing ACL. - * If there's no ACL, substitute the proper default. + * Get owner ID and working copy of existing ACL. If there's no ACL, + * substitute the proper default. * - * Note: for now, languages are treated as owned by the bootstrap - * user. We should add an owner column to pg_language instead. + * Note: for now, languages are treated as owned by the bootstrap user. + * We should add an owner column to pg_language instead. */ ownerId = BOOTSTRAP_SUPERUSERID; aclDatum = SysCacheGetAttr(LANGNAME, tuple, Anum_pg_language_lanacl, @@ -812,8 +812,8 @@ ExecuteGrantStmt_Language(GrantStmt *stmt) /* * If we found no grant options, consider whether to issue a hard - * error. Per spec, having any privilege at all on the object - * will get you by here. + * error. Per spec, having any privilege at all on the object will + * get you by here. */ if (avail_goptions == ACL_NO_RIGHTS) { @@ -826,12 +826,12 @@ ExecuteGrantStmt_Language(GrantStmt *stmt) } /* - * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't - * quite what the spec says to do: the spec seems to want a - * warning only if no privilege bits actually change in the ACL. - * In practice that behavior seems much too noisy, as well as - * inconsistent with the GRANT case.) + * Restrict the operation to what we can actually grant or revoke, and + * issue a warning if appropriate. (For REVOKE this isn't quite what + * the spec says to do: the spec seems to want a warning only if no + * privilege bits actually change in the ACL. In practice that + * behavior seems much too noisy, as well as inconsistent with the + * GRANT case.) */ this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions); if (stmt->is_grant) @@ -860,8 +860,8 @@ ExecuteGrantStmt_Language(GrantStmt *stmt) /* * Generate new ACL. * - * We need the members of both old and new ACLs so we can correct - * the shared dependency information. + * We need the members of both old and new ACLs so we can correct the + * shared dependency information. */ noldmembers = aclmembers(old_acl, &oldmembers); @@ -946,8 +946,8 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt) AclMode this_privileges; Acl *old_acl; Acl *new_acl; - Oid grantorId; - Oid ownerId; + Oid grantorId; + Oid ownerId; HeapTuple newtuple; Datum values[Natts_pg_namespace]; char nulls[Natts_pg_namespace]; @@ -968,8 +968,8 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt) pg_namespace_tuple = (Form_pg_namespace) GETSTRUCT(tuple); /* - * Get owner ID and working copy of existing ACL. - * If there's no ACL, substitute the proper default. + * Get owner ID and working copy of existing ACL. If there's no ACL, + * substitute the proper default. */ ownerId = pg_namespace_tuple->nspowner; aclDatum = SysCacheGetAttr(NAMESPACENAME, tuple, @@ -987,8 +987,8 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt) /* * If we found no grant options, consider whether to issue a hard - * error. Per spec, having any privilege at all on the object - * will get you by here. + * error. Per spec, having any privilege at all on the object will + * get you by here. */ if (avail_goptions == ACL_NO_RIGHTS) { @@ -1001,12 +1001,12 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt) } /* - * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't - * quite what the spec says to do: the spec seems to want a - * warning only if no privilege bits actually change in the ACL. - * In practice that behavior seems much too noisy, as well as - * inconsistent with the GRANT case.) + * Restrict the operation to what we can actually grant or revoke, and + * issue a warning if appropriate. (For REVOKE this isn't quite what + * the spec says to do: the spec seems to want a warning only if no + * privilege bits actually change in the ACL. In practice that + * behavior seems much too noisy, as well as inconsistent with the + * GRANT case.) */ this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions); if (stmt->is_grant) @@ -1035,8 +1035,8 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt) /* * Generate new ACL. * - * We need the members of both old and new ACLs so we can correct - * the shared dependency information. + * We need the members of both old and new ACLs so we can correct the + * shared dependency information. */ noldmembers = aclmembers(old_acl, &oldmembers); @@ -1103,8 +1103,8 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) if (priv & ~((AclMode) ACL_ALL_RIGHTS_TABLESPACE)) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), - errmsg("invalid privilege type %s for tablespace", - privilege_to_string(priv)))); + errmsg("invalid privilege type %s for tablespace", + privilege_to_string(priv)))); privileges |= priv; } } @@ -1123,8 +1123,8 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) AclMode this_privileges; Acl *old_acl; Acl *new_acl; - Oid grantorId; - Oid ownerId; + Oid grantorId; + Oid ownerId; HeapTuple newtuple; Datum values[Natts_pg_tablespace]; char nulls[Natts_pg_tablespace]; @@ -1144,12 +1144,12 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace \"%s\" does not exist", spcname))); + errmsg("tablespace \"%s\" does not exist", spcname))); pg_tablespace_tuple = (Form_pg_tablespace) GETSTRUCT(tuple); /* - * Get owner ID and working copy of existing ACL. - * If there's no ACL, substitute the proper default. + * Get owner ID and working copy of existing ACL. If there's no ACL, + * substitute the proper default. */ ownerId = pg_tablespace_tuple->spcowner; aclDatum = heap_getattr(tuple, Anum_pg_tablespace_spcacl, @@ -1166,8 +1166,8 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) /* * If we found no grant options, consider whether to issue a hard - * error. Per spec, having any privilege at all on the object - * will get you by here. + * error. Per spec, having any privilege at all on the object will + * get you by here. */ if (avail_goptions == ACL_NO_RIGHTS) { @@ -1180,12 +1180,12 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) } /* - * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't - * quite what the spec says to do: the spec seems to want a - * warning only if no privilege bits actually change in the ACL. - * In practice that behavior seems much too noisy, as well as - * inconsistent with the GRANT case.) + * Restrict the operation to what we can actually grant or revoke, and + * issue a warning if appropriate. (For REVOKE this isn't quite what + * the spec says to do: the spec seems to want a warning only if no + * privilege bits actually change in the ACL. In practice that + * behavior seems much too noisy, as well as inconsistent with the + * GRANT case.) */ this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions); if (stmt->is_grant) @@ -1214,8 +1214,8 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) /* * Generate new ACL. * - * We need the members of both old and new ACLs so we can correct - * the shared dependency information. + * We need the members of both old and new ACLs so we can correct the + * shared dependency information. */ noldmembers = aclmembers(old_acl, &oldmembers); @@ -1449,7 +1449,7 @@ pg_class_aclmask(Oid table_oid, Oid roleid, Datum aclDatum; bool isNull; Acl *acl; - Oid ownerId; + Oid ownerId; /* * Must get the relation's tuple from pg_class @@ -1467,8 +1467,7 @@ pg_class_aclmask(Oid table_oid, Oid roleid, /* * Deny anyone permission to update a system catalog unless * pg_authid.rolcatupdate is set. (This is to let superusers protect - * themselves from themselves.) Also allow it if - * allowSystemTableMods. + * themselves from themselves.) Also allow it if allowSystemTableMods. * * As of 7.4 we have some updatable system views; those shouldn't be * protected in this way. Assume the view rules can take care of @@ -1543,7 +1542,7 @@ pg_database_aclmask(Oid db_oid, Oid roleid, Datum aclDatum; bool isNull; Acl *acl; - Oid ownerId; + Oid ownerId; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -1607,7 +1606,7 @@ pg_proc_aclmask(Oid proc_oid, Oid roleid, Datum aclDatum; bool isNull; Acl *acl; - Oid ownerId; + Oid ownerId; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -1622,7 +1621,7 @@ pg_proc_aclmask(Oid proc_oid, Oid roleid, if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("function with OID %u does not exist", proc_oid))); + errmsg("function with OID %u does not exist", proc_oid))); ownerId = ((Form_pg_proc) GETSTRUCT(tuple))->proowner; @@ -1663,7 +1662,7 @@ pg_language_aclmask(Oid lang_oid, Oid roleid, Datum aclDatum; bool isNull; Acl *acl; - Oid ownerId; + Oid ownerId; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -1678,7 +1677,7 @@ pg_language_aclmask(Oid lang_oid, Oid roleid, if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("language with OID %u does not exist", lang_oid))); + errmsg("language with OID %u does not exist", lang_oid))); /* XXX pg_language should have an owner column, but doesn't */ ownerId = BOOTSTRAP_SUPERUSERID; @@ -1720,30 +1719,30 @@ pg_namespace_aclmask(Oid nsp_oid, Oid roleid, Datum aclDatum; bool isNull; Acl *acl; - Oid ownerId; + Oid ownerId; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) return mask; /* - * If we have been assigned this namespace as a temp namespace, check - * to make sure we have CREATE TEMP permission on the database, and if - * so act as though we have all standard (but not GRANT OPTION) - * permissions on the namespace. If we don't have CREATE TEMP, act as - * though we have only USAGE (and not CREATE) rights. + * If we have been assigned this namespace as a temp namespace, check to + * make sure we have CREATE TEMP permission on the database, and if so act + * as though we have all standard (but not GRANT OPTION) permissions on + * the namespace. If we don't have CREATE TEMP, act as though we have + * only USAGE (and not CREATE) rights. * - * This may seem redundant given the check in InitTempTableNamespace, but - * it really isn't since current user ID may have changed since then. - * The upshot of this behavior is that a SECURITY DEFINER function can - * create temp tables that can then be accessed (if permission is - * granted) by code in the same session that doesn't have permissions - * to create temp tables. + * This may seem redundant given the check in InitTempTableNamespace, but it + * really isn't since current user ID may have changed since then. The + * upshot of this behavior is that a SECURITY DEFINER function can create + * temp tables that can then be accessed (if permission is granted) by + * code in the same session that doesn't have permissions to create temp + * tables. * * XXX Would it be safe to ereport a special error message as * InitTempTableNamespace does? Returning zero here means we'll get a - * generic "permission denied for schema pg_temp_N" message, which is - * not remarkably user-friendly. + * generic "permission denied for schema pg_temp_N" message, which is not + * remarkably user-friendly. */ if (isTempNamespace(nsp_oid)) { @@ -1807,7 +1806,7 @@ pg_tablespace_aclmask(Oid spc_oid, Oid roleid, Datum aclDatum; bool isNull; Acl *acl; - Oid ownerId; + Oid ownerId; /* * Only shared relations can be stored in global space; don't let even @@ -1835,7 +1834,7 @@ pg_tablespace_aclmask(Oid spc_oid, Oid roleid, if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace with OID %u does not exist", spc_oid))); + errmsg("tablespace with OID %u does not exist", spc_oid))); ownerId = ((Form_pg_tablespace) GETSTRUCT(tuple))->spcowner; @@ -1951,7 +1950,7 @@ bool pg_class_ownercheck(Oid class_oid, Oid roleid) { HeapTuple tuple; - Oid ownerId; + Oid ownerId; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -1963,7 +1962,7 @@ pg_class_ownercheck(Oid class_oid, Oid roleid) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), - errmsg("relation with OID %u does not exist", class_oid))); + errmsg("relation with OID %u does not exist", class_oid))); ownerId = ((Form_pg_class) GETSTRUCT(tuple))->relowner; @@ -1979,7 +1978,7 @@ bool pg_type_ownercheck(Oid type_oid, Oid roleid) { HeapTuple tuple; - Oid ownerId; + Oid ownerId; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -2007,7 +2006,7 @@ bool pg_oper_ownercheck(Oid oper_oid, Oid roleid) { HeapTuple tuple; - Oid ownerId; + Oid ownerId; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -2019,7 +2018,7 @@ pg_oper_ownercheck(Oid oper_oid, Oid roleid) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("operator with OID %u does not exist", oper_oid))); + errmsg("operator with OID %u does not exist", oper_oid))); ownerId = ((Form_pg_operator) GETSTRUCT(tuple))->oprowner; @@ -2035,7 +2034,7 @@ bool pg_proc_ownercheck(Oid proc_oid, Oid roleid) { HeapTuple tuple; - Oid ownerId; + Oid ownerId; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -2047,7 +2046,7 @@ pg_proc_ownercheck(Oid proc_oid, Oid roleid) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("function with OID %u does not exist", proc_oid))); + errmsg("function with OID %u does not exist", proc_oid))); ownerId = ((Form_pg_proc) GETSTRUCT(tuple))->proowner; @@ -2063,7 +2062,7 @@ bool pg_namespace_ownercheck(Oid nsp_oid, Oid roleid) { HeapTuple tuple; - Oid ownerId; + Oid ownerId; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -2094,7 +2093,7 @@ pg_tablespace_ownercheck(Oid spc_oid, Oid roleid) ScanKeyData entry[1]; HeapScanDesc scan; HeapTuple spctuple; - Oid spcowner; + Oid spcowner; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -2113,7 +2112,7 @@ pg_tablespace_ownercheck(Oid spc_oid, Oid roleid) if (!HeapTupleIsValid(spctuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace with OID %u does not exist", spc_oid))); + errmsg("tablespace with OID %u does not exist", spc_oid))); spcowner = ((Form_pg_tablespace) GETSTRUCT(spctuple))->spcowner; @@ -2130,7 +2129,7 @@ bool pg_opclass_ownercheck(Oid opc_oid, Oid roleid) { HeapTuple tuple; - Oid ownerId; + Oid ownerId; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -2162,7 +2161,7 @@ pg_database_ownercheck(Oid db_oid, Oid roleid) ScanKeyData entry[1]; HeapScanDesc scan; HeapTuple dbtuple; - Oid dba; + Oid dba; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -2198,7 +2197,7 @@ bool pg_conversion_ownercheck(Oid conv_oid, Oid roleid) { HeapTuple tuple; - Oid ownerId; + Oid ownerId; /* Superusers bypass all permission checking. */ if (superuser_arg(roleid)) @@ -2210,7 +2209,7 @@ pg_conversion_ownercheck(Oid conv_oid, Oid roleid) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("conversion with OID %u does not exist", conv_oid))); + errmsg("conversion with OID %u does not exist", conv_oid))); ownerId = ((Form_pg_conversion) GETSTRUCT(tuple))->conowner; diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c index 0648b578e9f..69313ea86a2 100644 --- a/src/backend/catalog/catalog.c +++ b/src/backend/catalog/catalog.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.63 2005/08/12 01:35:56 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.64 2005/10/15 02:49:12 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -233,7 +233,7 @@ IsReservedName(const char *name) * Since the OID is not immediately inserted into the table, there is a * race condition here; but a problem could occur only if someone else * managed to cycle through 2^32 OIDs and generate the same OID before we - * finish inserting our row. This seems unlikely to be a problem. Note + * finish inserting our row. This seems unlikely to be a problem. Note * that if we had to *commit* the row to end the race condition, the risk * would be rather higher; therefore we use SnapshotDirty in the test, * so that we will see uncommitted rows. @@ -259,9 +259,9 @@ GetNewOid(Relation relation) if (!OidIsValid(oidIndex)) { /* - * System catalogs that have OIDs should *always* have a unique - * OID index; we should only take this path for user tables. - * Give a warning if it looks like somebody forgot an index. + * System catalogs that have OIDs should *always* have a unique OID + * index; we should only take this path for user tables. Give a + * warning if it looks like somebody forgot an index. */ if (IsSystemRelation(relation)) elog(WARNING, "generating possibly-non-unique OID for \"%s\"", @@ -338,7 +338,7 @@ GetNewOidWithIndex(Relation relation, Relation indexrel) Oid GetNewRelFileNode(Oid reltablespace, bool relisshared, Relation pg_class) { - RelFileNode rnode; + RelFileNode rnode; char *rpath; int fd; bool collides; @@ -369,14 +369,14 @@ GetNewRelFileNode(Oid reltablespace, bool relisshared, Relation pg_class) { /* * Here we have a little bit of a dilemma: if errno is something - * other than ENOENT, should we declare a collision and loop? - * In particular one might think this advisable for, say, EPERM. + * other than ENOENT, should we declare a collision and loop? In + * particular one might think this advisable for, say, EPERM. * However there really shouldn't be any unreadable files in a * tablespace directory, and if the EPERM is actually complaining * that we can't read the directory itself, we'd be in an infinite * loop. In practice it seems best to go ahead regardless of the - * errno. If there is a colliding file we will get an smgr failure - * when we attempt to create the new relation file. + * errno. If there is a colliding file we will get an smgr + * failure when we attempt to create the new relation file. */ collides = false; } diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index 8060055ff72..92d72af0f9c 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.46 2005/10/02 23:50:07 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.47 2005/10/15 02:49:12 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -155,9 +155,9 @@ performDeletion(const ObjectAddress *object, /* * Construct a list of objects that are reachable by AUTO or INTERNAL - * dependencies from the target object. These should be deleted - * silently, even if the actual deletion pass first reaches one of - * them via a non-auto dependency. + * dependencies from the target object. These should be deleted silently, + * even if the actual deletion pass first reaches one of them via a + * non-auto dependency. */ init_object_addresses(&oktodelete); @@ -167,9 +167,9 @@ performDeletion(const ObjectAddress *object, NULL, &oktodelete, depRel)) ereport(ERROR, (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), - errmsg("cannot drop %s because other objects depend on it", - objDescription), - errhint("Use DROP ... CASCADE to drop the dependent objects too."))); + errmsg("cannot drop %s because other objects depend on it", + objDescription), + errhint("Use DROP ... CASCADE to drop the dependent objects too."))); term_object_addresses(&oktodelete); @@ -209,17 +209,17 @@ deleteWhatDependsOn(const ObjectAddress *object, /* * Construct a list of objects that are reachable by AUTO or INTERNAL - * dependencies from the target object. These should be deleted - * silently, even if the actual deletion pass first reaches one of - * them via a non-auto dependency. + * dependencies from the target object. These should be deleted silently, + * even if the actual deletion pass first reaches one of them via a + * non-auto dependency. */ init_object_addresses(&oktodelete); findAutoDeletableObjects(object, &oktodelete, depRel); /* - * Now invoke only step 2 of recursiveDeletion: just recurse to the - * stuff dependent on the given object. + * Now invoke only step 2 of recursiveDeletion: just recurse to the stuff + * dependent on the given object. */ if (!deleteDependentObjects(object, objDescription, DROP_CASCADE, @@ -263,9 +263,9 @@ findAutoDeletableObjects(const ObjectAddress *object, ObjectAddress otherObject; /* - * If this object is already in oktodelete, then we already visited - * it; don't do so again (this prevents infinite recursion if there's - * a loop in pg_depend). Otherwise, add it. + * If this object is already in oktodelete, then we already visited it; + * don't do so again (this prevents infinite recursion if there's a loop + * in pg_depend). Otherwise, add it. */ if (object_address_present(object, oktodelete)) return; @@ -273,11 +273,11 @@ findAutoDeletableObjects(const ObjectAddress *object, /* * Scan pg_depend records that link to this object, showing the things - * that depend on it. For each one that is AUTO or INTERNAL, visit - * the referencing object. + * that depend on it. For each one that is AUTO or INTERNAL, visit the + * referencing object. * - * When dropping a whole object (subId = 0), find pg_depend records for - * its sub-objects too. + * When dropping a whole object (subId = 0), find pg_depend records for its + * sub-objects too. */ ScanKeyInit(&key[0], Anum_pg_depend_refclassid, @@ -322,8 +322,8 @@ findAutoDeletableObjects(const ObjectAddress *object, /* * For a PIN dependency we just ereport immediately; there - * won't be any others to examine, and we aren't ever - * going to let the user delete it. + * won't be any others to examine, and we aren't ever going to + * let the user delete it. */ ereport(ERROR, (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), @@ -406,13 +406,13 @@ recursiveDeletion(const ObjectAddress *object, objDescription = getObjectDescription(object); /* - * Step 1: find and remove pg_depend records that link from this - * object to others. We have to do this anyway, and doing it first - * ensures that we avoid infinite recursion in the case of cycles. - * Also, some dependency types require extra processing here. + * Step 1: find and remove pg_depend records that link from this object to + * others. We have to do this anyway, and doing it first ensures that we + * avoid infinite recursion in the case of cycles. Also, some dependency + * types require extra processing here. * - * When dropping a whole object (subId = 0), remove all pg_depend records - * for its sub-objects too. + * When dropping a whole object (subId = 0), remove all pg_depend records for + * its sub-objects too. */ ScanKeyInit(&key[0], Anum_pg_depend_classid, @@ -456,41 +456,41 @@ recursiveDeletion(const ObjectAddress *object, * This object is part of the internal implementation of * another object. We have three cases: * - * 1. At the outermost recursion level, disallow the DROP. - * (We just ereport here, rather than proceeding, since no - * other dependencies are likely to be interesting.) + * 1. At the outermost recursion level, disallow the DROP. (We + * just ereport here, rather than proceeding, since no other + * dependencies are likely to be interesting.) */ if (callingObject == NULL) { char *otherObjDesc = getObjectDescription(&otherObject); ereport(ERROR, - (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), - errmsg("cannot drop %s because %s requires it", - objDescription, otherObjDesc), - errhint("You may drop %s instead.", - otherObjDesc))); + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("cannot drop %s because %s requires it", + objDescription, otherObjDesc), + errhint("You may drop %s instead.", + otherObjDesc))); } /* - * 2. When recursing from the other end of this - * dependency, it's okay to continue with the deletion. - * This holds when recursing from a whole object that - * includes the nominal other end as a component, too. + * 2. When recursing from the other end of this dependency, + * it's okay to continue with the deletion. This holds when + * recursing from a whole object that includes the nominal + * other end as a component, too. */ if (callingObject->classId == otherObject.classId && callingObject->objectId == otherObject.objectId && - (callingObject->objectSubId == otherObject.objectSubId || - callingObject->objectSubId == 0)) + (callingObject->objectSubId == otherObject.objectSubId || + callingObject->objectSubId == 0)) break; /* * 3. When recursing from anyplace else, transform this - * deletion request into a delete of the other object. - * (This will be an error condition iff RESTRICT mode.) In - * this case we finish deleting my dependencies except for - * the INTERNAL link, which will be needed to cause the - * owning object to recurse back to me. + * deletion request into a delete of the other object. (This + * will be an error condition iff RESTRICT mode.) In this case + * we finish deleting my dependencies except for the INTERNAL + * link, which will be needed to cause the owning object to + * recurse back to me. */ if (amOwned) /* shouldn't happen */ elog(ERROR, "multiple INTERNAL dependencies for %s", @@ -502,8 +502,8 @@ recursiveDeletion(const ObjectAddress *object, case DEPENDENCY_PIN: /* - * Should not happen; PIN dependencies should have zeroes - * in the depender fields... + * Should not happen; PIN dependencies should have zeroes in + * the depender fields... */ elog(ERROR, "incorrect use of PIN dependency with %s", objDescription); @@ -521,10 +521,10 @@ recursiveDeletion(const ObjectAddress *object, systable_endscan(scan); /* - * CommandCounterIncrement here to ensure that preceding changes are - * all visible; in particular, that the above deletions of pg_depend - * entries are visible. That prevents infinite recursion in case of a - * dependency loop (which is perfectly legal). + * CommandCounterIncrement here to ensure that preceding changes are all + * visible; in particular, that the above deletions of pg_depend entries + * are visible. That prevents infinite recursion in case of a dependency + * loop (which is perfectly legal). */ CommandCounterIncrement(); @@ -562,11 +562,11 @@ recursiveDeletion(const ObjectAddress *object, } /* - * Step 2: scan pg_depend records that link to this object, showing - * the things that depend on it. Recursively delete those things. - * Note it's important to delete the dependent objects before the - * referenced one, since the deletion routines might do things like - * try to update the pg_class record when deleting a check constraint. + * Step 2: scan pg_depend records that link to this object, showing the + * things that depend on it. Recursively delete those things. Note it's + * important to delete the dependent objects before the referenced one, + * since the deletion routines might do things like try to update the + * pg_class record when deleting a check constraint. */ if (!deleteDependentObjects(object, objDescription, behavior, msglevel, @@ -584,23 +584,21 @@ recursiveDeletion(const ObjectAddress *object, doDeletion(object); /* - * Delete any comments associated with this object. (This is a - * convenient place to do it instead of having every object type know - * to do it.) + * Delete any comments associated with this object. (This is a convenient + * place to do it instead of having every object type know to do it.) */ DeleteComments(object->objectId, object->classId, object->objectSubId); /* - * Delete shared dependency references related to this object. - * Sub-objects (columns) don't have dependencies on global objects, - * so skip them. + * Delete shared dependency references related to this object. Sub-objects + * (columns) don't have dependencies on global objects, so skip them. */ if (object->objectSubId == 0) deleteSharedDependencyRecordsFor(object->classId, object->objectId); /* - * CommandCounterIncrement here to ensure that preceding changes are - * all visible. + * CommandCounterIncrement here to ensure that preceding changes are all + * visible. */ CommandCounterIncrement(); @@ -691,10 +689,10 @@ deleteDependentObjects(const ObjectAddress *object, case DEPENDENCY_NORMAL: /* - * Perhaps there was another dependency path that would - * have allowed silent deletion of the otherObject, had we - * only taken that path first. In that case, act like this - * link is AUTO, too. + * Perhaps there was another dependency path that would have + * allowed silent deletion of the otherObject, had we only + * taken that path first. In that case, act like this link is + * AUTO, too. */ if (object_address_present(&otherObject, oktodelete)) ereport(DEBUG2, @@ -1023,7 +1021,7 @@ find_expr_references_walker(Node *node, var->varattno > list_length(rte->joinaliasvars)) elog(ERROR, "invalid varattno %d", var->varattno); find_expr_references_walker((Node *) list_nth(rte->joinaliasvars, - var->varattno - 1), + var->varattno - 1), context); list_free(context->rtables); context->rtables = save_rtables; @@ -1037,9 +1035,9 @@ find_expr_references_walker(Node *node, /* * If it's a regclass or similar literal referring to an existing - * object, add a reference to that object. (Currently, only the - * regclass case has any likely use, but we may as well handle all - * the OID-alias datatypes consistently.) + * object, add a reference to that object. (Currently, only the + * regclass case has any likely use, but we may as well handle all the + * OID-alias datatypes consistently.) */ if (!con->constisnull) { @@ -1156,11 +1154,10 @@ find_expr_references_walker(Node *node, bool result; /* - * Add whole-relation refs for each plain relation mentioned in - * the subquery's rtable. (Note: query_tree_walker takes care of - * recursing into RTE_FUNCTION and RTE_SUBQUERY RTEs, so no need - * to do that here. But keep it from looking at join alias - * lists.) + * Add whole-relation refs for each plain relation mentioned in the + * subquery's rtable. (Note: query_tree_walker takes care of + * recursing into RTE_FUNCTION and RTE_SUBQUERY RTEs, so no need to do + * that here. But keep it from looking at join alias lists.) */ foreach(rtable, query->rtable) { @@ -1215,11 +1212,11 @@ eliminate_duplicate_dependencies(ObjectAddresses *addrs) continue; /* identical, so drop thisobj */ /* - * If we have a whole-object reference and a reference to a - * part of the same object, we don't need the whole-object - * reference (for example, we don't need to reference both - * table foo and column foo.bar). The whole-object reference - * will always appear first in the sorted list. + * If we have a whole-object reference and a reference to a part + * of the same object, we don't need the whole-object reference + * (for example, we don't need to reference both table foo and + * column foo.bar). The whole-object reference will always appear + * first in the sorted list. */ if (priorobj->objectSubId == 0) { @@ -1469,8 +1466,8 @@ getObjectDescription(const ObjectAddress *object) getRelationDescription(&buffer, object->objectId); if (object->objectSubId != 0) appendStringInfo(&buffer, _(" column %s"), - get_relid_attribute_name(object->objectId, - object->objectSubId)); + get_relid_attribute_name(object->objectId, + object->objectSubId)); break; case OCLASS_PROC: @@ -1566,13 +1563,13 @@ getObjectDescription(const ObjectAddress *object) HeapTuple conTup; conTup = SearchSysCache(CONOID, - ObjectIdGetDatum(object->objectId), + ObjectIdGetDatum(object->objectId), 0, 0, 0); if (!HeapTupleIsValid(conTup)) elog(ERROR, "cache lookup failed for conversion %u", object->objectId); appendStringInfo(&buffer, _("conversion %s"), - NameStr(((Form_pg_conversion) GETSTRUCT(conTup))->conname)); + NameStr(((Form_pg_conversion) GETSTRUCT(conTup))->conname)); ReleaseSysCache(conTup); break; } @@ -1621,13 +1618,13 @@ getObjectDescription(const ObjectAddress *object) HeapTuple langTup; langTup = SearchSysCache(LANGOID, - ObjectIdGetDatum(object->objectId), + ObjectIdGetDatum(object->objectId), 0, 0, 0); if (!HeapTupleIsValid(langTup)) elog(ERROR, "cache lookup failed for language %u", object->objectId); appendStringInfo(&buffer, _("language %s"), - NameStr(((Form_pg_language) GETSTRUCT(langTup))->lanname)); + NameStr(((Form_pg_language) GETSTRUCT(langTup))->lanname)); ReleaseSysCache(langTup); break; } @@ -1646,7 +1643,7 @@ getObjectDescription(const ObjectAddress *object) char *nspname; opcTup = SearchSysCache(CLAOID, - ObjectIdGetDatum(object->objectId), + ObjectIdGetDatum(object->objectId), 0, 0, 0); if (!HeapTupleIsValid(opcTup)) elog(ERROR, "cache lookup failed for opclass %u", @@ -1669,7 +1666,7 @@ getObjectDescription(const ObjectAddress *object) appendStringInfo(&buffer, _("operator class %s for access method %s"), quote_qualified_identifier(nspname, - NameStr(opcForm->opcname)), + NameStr(opcForm->opcname)), NameStr(amForm->amname)); ReleaseSysCache(amTup); diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index f5f030695be..15c0129c613 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.290 2005/08/26 03:07:12 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.291 2005/10/15 02:49:12 momjian Exp $ * * * INTERFACE ROUTINES @@ -67,7 +67,7 @@ static void AddNewRelationTuple(Relation pg_class_desc, Oid new_rel_oid, Oid new_type_oid, Oid relowner, char relkind); -static Oid AddNewRelationType(const char *typeName, +static Oid AddNewRelationType(const char *typeName, Oid typeNamespace, Oid new_rel_oid, char new_rel_kind); @@ -217,23 +217,24 @@ heap_create(const char *relname, * sanity checks */ if (!allow_system_table_mods && - (IsSystemNamespace(relnamespace) || IsToastNamespace(relnamespace)) && + (IsSystemNamespace(relnamespace) || IsToastNamespace(relnamespace)) && IsNormalProcessingMode()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to create \"%s.%s\"", get_namespace_name(relnamespace), relname), - errdetail("System catalog modifications are currently disallowed."))); + errdetail("System catalog modifications are currently disallowed."))); /* - * Decide if we need storage or not, and handle a couple other - * special cases for particular relkinds. + * Decide if we need storage or not, and handle a couple other special + * cases for particular relkinds. */ switch (relkind) { case RELKIND_VIEW: case RELKIND_COMPOSITE_TYPE: create_storage = false; + /* * Force reltablespace to zero if the relation has no physical * storage. This is mainly just for cleanliness' sake. @@ -242,6 +243,7 @@ heap_create(const char *relname, break; case RELKIND_SEQUENCE: create_storage = true; + /* * Force reltablespace to zero for sequences, since we don't * support moving them around into different tablespaces. @@ -257,8 +259,8 @@ heap_create(const char *relname, * Never allow a pg_class entry to explicitly specify the database's * default tablespace in reltablespace; force it to zero instead. This * ensures that if the database is cloned with a different default - * tablespace, the pg_class entry will still match where CREATE - * DATABASE will put the physically copied relation. + * tablespace, the pg_class entry will still match where CREATE DATABASE + * will put the physically copied relation. * * Yes, this is a bit of a hack. */ @@ -276,8 +278,7 @@ heap_create(const char *relname, shared_relation); /* - * have the storage manager create the relation's disk file, if - * needed. + * have the storage manager create the relation's disk file, if needed. */ if (create_storage) { @@ -453,8 +454,8 @@ AddNewAttributeTuples(Oid new_rel_oid, indstate = CatalogOpenIndexes(rel); /* - * First we add the user attributes. This is also a convenient place - * to add dependencies on their datatypes. + * First we add the user attributes. This is also a convenient place to + * add dependencies on their datatypes. */ dpp = tupdesc->attrs; for (i = 0; i < natts; i++) @@ -488,10 +489,9 @@ AddNewAttributeTuples(Oid new_rel_oid, } /* - * Next we add the system attributes. Skip OID if rel has no OIDs. - * Skip all for a view or type relation. We don't bother with making - * datatype dependencies here, since presumably all these types are - * pinned. + * Next we add the system attributes. Skip OID if rel has no OIDs. Skip + * all for a view or type relation. We don't bother with making datatype + * dependencies here, since presumably all these types are pinned. */ if (relkind != RELKIND_VIEW && relkind != RELKIND_COMPOSITE_TYPE) { @@ -563,8 +563,8 @@ AddNewRelationTuple(Relation pg_class_desc, HeapTuple tup; /* - * first we update some of the information in our uncataloged - * relation's relation descriptor. + * first we update some of the information in our uncataloged relation's + * relation descriptor. */ new_rel_reltup = new_rel_desc->rd_rel; @@ -632,28 +632,28 @@ AddNewRelationType(const char *typeName, char new_rel_kind) { return - TypeCreate(typeName, /* type name */ - typeNamespace, /* type namespace */ - new_rel_oid, /* relation oid */ + TypeCreate(typeName, /* type name */ + typeNamespace, /* type namespace */ + new_rel_oid, /* relation oid */ new_rel_kind, /* relation kind */ - -1, /* internal size (varlena) */ - 'c', /* type-type (complex) */ - ',', /* default array delimiter */ - F_RECORD_IN, /* input procedure */ + -1, /* internal size (varlena) */ + 'c', /* type-type (complex) */ + ',', /* default array delimiter */ + F_RECORD_IN, /* input procedure */ F_RECORD_OUT, /* output procedure */ - F_RECORD_RECV, /* receive procedure */ - F_RECORD_SEND, /* send procedure */ - InvalidOid, /* analyze procedure - default */ - InvalidOid, /* array element type - irrelevant */ - InvalidOid, /* domain base type - irrelevant */ - NULL, /* default value - none */ - NULL, /* default binary representation */ - false, /* passed by reference */ - 'd', /* alignment - must be the largest! */ - 'x', /* fully TOASTable */ - -1, /* typmod */ - 0, /* array dimensions for typBaseType */ - false); /* Type NOT NULL */ + F_RECORD_RECV, /* receive procedure */ + F_RECORD_SEND, /* send procedure */ + InvalidOid, /* analyze procedure - default */ + InvalidOid, /* array element type - irrelevant */ + InvalidOid, /* domain base type - irrelevant */ + NULL, /* default value - none */ + NULL, /* default binary representation */ + false, /* passed by reference */ + 'd', /* alignment - must be the largest! */ + 'x', /* fully TOASTable */ + -1, /* typmod */ + 0, /* array dimensions for typBaseType */ + false); /* Type NOT NULL */ } /* -------------------------------- @@ -697,17 +697,17 @@ heap_create_with_catalog(const char *relname, /* * Allocate an OID for the relation, unless we were told what to use. * - * The OID will be the relfilenode as well, so make sure it doesn't - * collide with either pg_class OIDs or existing physical files. + * The OID will be the relfilenode as well, so make sure it doesn't collide + * with either pg_class OIDs or existing physical files. */ if (!OidIsValid(relid)) relid = GetNewRelFileNode(reltablespace, shared_relation, pg_class_desc); /* - * Create the relcache entry (mostly dummy at this point) and the - * physical disk file. (If we fail further down, it's the smgr's - * responsibility to remove the disk file again.) + * Create the relcache entry (mostly dummy at this point) and the physical + * disk file. (If we fail further down, it's the smgr's responsibility to + * remove the disk file again.) */ new_rel_desc = heap_create(relname, relnamespace, @@ -724,8 +724,8 @@ heap_create_with_catalog(const char *relname, * since defining a relation also defines a complex type, we add a new * system type corresponding to the new relation. * - * NOTE: we could get a unique-index failure here, in case the same name - * has already been used for a type. + * NOTE: we could get a unique-index failure here, in case the same name has + * already been used for a type. */ new_type_oid = AddNewRelationType(relname, relnamespace, @@ -735,9 +735,9 @@ heap_create_with_catalog(const char *relname, /* * now create an entry in pg_class for the relation. * - * NOTE: we could get a unique-index failure here, in case someone else - * is creating the same relation name in parallel but hadn't committed - * yet when we checked for a duplicate name above. + * NOTE: we could get a unique-index failure here, in case someone else is + * creating the same relation name in parallel but hadn't committed yet + * when we checked for a duplicate name above. */ AddNewRelationTuple(pg_class_desc, new_rel_desc, @@ -747,8 +747,7 @@ heap_create_with_catalog(const char *relname, relkind); /* - * now add tuples to pg_attribute for the attributes in our new - * relation. + * now add tuples to pg_attribute for the attributes in our new relation. */ AddNewAttributeTuples(relid, new_rel_desc->rd_att, relkind, oidislocal, oidinhcount); @@ -779,10 +778,9 @@ heap_create_with_catalog(const char *relname, /* * store constraints and defaults passed in the tupdesc, if any. * - * NB: this may do a CommandCounterIncrement and rebuild the relcache - * entry, so the relation must be valid and self-consistent at this - * point. In particular, there are not yet constraints and defaults - * anywhere. + * NB: this may do a CommandCounterIncrement and rebuild the relcache entry, + * so the relation must be valid and self-consistent at this point. In + * particular, there are not yet constraints and defaults anywhere. */ StoreConstraints(new_rel_desc, tupdesc); @@ -793,8 +791,8 @@ heap_create_with_catalog(const char *relname, register_on_commit_action(relid, oncommit); /* - * ok, the relation has been cataloged, so close our relations and - * return the OID of the newly created relation. + * ok, the relation has been cataloged, so close our relations and return + * the OID of the newly created relation. */ heap_close(new_rel_desc, NoLock); /* do not unlock till end of xact */ heap_close(pg_class_desc, RowExclusiveLock); @@ -923,11 +921,11 @@ RemoveAttributeById(Oid relid, AttrNumber attnum) char newattname[NAMEDATALEN]; /* - * Grab an exclusive lock on the target table, which we will NOT - * release until end of transaction. (In the simple case where we are - * directly dropping this column, AlterTableDropColumn already did - * this ... but when cascading from a drop of some other object, we - * may not have any lock.) + * Grab an exclusive lock on the target table, which we will NOT release + * until end of transaction. (In the simple case where we are directly + * dropping this column, AlterTableDropColumn already did this ... but + * when cascading from a drop of some other object, we may not have any + * lock.) */ rel = relation_open(relid, AccessExclusiveLock); @@ -957,12 +955,12 @@ RemoveAttributeById(Oid relid, AttrNumber attnum) /* * Set the type OID to invalid. A dropped attribute's type link - * cannot be relied on (once the attribute is dropped, the type - * might be too). Fortunately we do not need the type row --- the - * only really essential information is the type's typlen and - * typalign, which are preserved in the attribute's attlen and - * attalign. We set atttypid to zero here as a means of catching - * code that incorrectly expects it to be valid. + * cannot be relied on (once the attribute is dropped, the type might + * be too). Fortunately we do not need the type row --- the only + * really essential information is the type's typlen and typalign, + * which are preserved in the attribute's attlen and attalign. We set + * atttypid to zero here as a means of catching code that incorrectly + * expects it to be valid. */ attStruct->atttypid = InvalidOid; @@ -973,8 +971,7 @@ RemoveAttributeById(Oid relid, AttrNumber attnum) attStruct->attstattarget = 0; /* - * Change the column name to something that isn't likely to - * conflict + * Change the column name to something that isn't likely to conflict */ snprintf(newattname, sizeof(newattname), "........pg.dropped.%d........", attnum); @@ -987,9 +984,9 @@ RemoveAttributeById(Oid relid, AttrNumber attnum) } /* - * Because updating the pg_attribute row will trigger a relcache flush - * for the target relation, we need not do anything else to notify - * other backends of the change. + * Because updating the pg_attribute row will trigger a relcache flush for + * the target relation, we need not do anything else to notify other + * backends of the change. */ heap_close(attr_rel, RowExclusiveLock); @@ -1118,8 +1115,8 @@ RemoveAttrDefaultById(Oid attrdefId) CatalogUpdateIndexes(attr_rel, tuple); /* - * Our update of the pg_attribute row will force a relcache rebuild, - * so there's nothing else to do here. + * Our update of the pg_attribute row will force a relcache rebuild, so + * there's nothing else to do here. */ heap_close(attr_rel, RowExclusiveLock); @@ -1157,9 +1154,9 @@ heap_drop_with_catalog(Oid relid) } /* - * Close relcache entry, but *keep* AccessExclusiveLock on the - * relation until transaction commit. This ensures no one else will - * try to do something with the doomed relation. + * Close relcache entry, but *keep* AccessExclusiveLock on the relation + * until transaction commit. This ensures no one else will try to do + * something with the doomed relation. */ relation_close(rel, NoLock); @@ -1170,10 +1167,10 @@ heap_drop_with_catalog(Oid relid) /* * Flush the relation from the relcache. We want to do this before - * starting to remove catalog entries, just to be certain that no - * relcache entry rebuild will happen partway through. (That should - * not really matter, since we don't do CommandCounterIncrement here, - * but let's be safe.) + * starting to remove catalog entries, just to be certain that no relcache + * entry rebuild will happen partway through. (That should not really + * matter, since we don't do CommandCounterIncrement here, but let's be + * safe.) */ RelationForgetRelation(relid); @@ -1228,8 +1225,8 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin) * deparse it */ adsrc = deparse_expression(expr, - deparse_context_for(RelationGetRelationName(rel), - RelationGetRelid(rel)), + deparse_context_for(RelationGetRelationName(rel), + RelationGetRelid(rel)), false, false); /* @@ -1238,9 +1235,9 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin) values[Anum_pg_attrdef_adrelid - 1] = RelationGetRelid(rel); values[Anum_pg_attrdef_adnum - 1] = attnum; values[Anum_pg_attrdef_adbin - 1] = DirectFunctionCall1(textin, - CStringGetDatum(adbin)); + CStringGetDatum(adbin)); values[Anum_pg_attrdef_adsrc - 1] = DirectFunctionCall1(textin, - CStringGetDatum(adsrc)); + CStringGetDatum(adsrc)); adrel = heap_open(AttrDefaultRelationId, RowExclusiveLock); @@ -1285,8 +1282,8 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin) heap_freetuple(atttup); /* - * Make a dependency so that the pg_attrdef entry goes away if the - * column (or whole table) is deleted. + * Make a dependency so that the pg_attrdef entry goes away if the column + * (or whole table) is deleted. */ colobject.classId = RelationRelationId; colobject.objectId = RelationGetRelid(rel); @@ -1325,16 +1322,15 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin) * deparse it */ ccsrc = deparse_expression(expr, - deparse_context_for(RelationGetRelationName(rel), - RelationGetRelid(rel)), + deparse_context_for(RelationGetRelationName(rel), + RelationGetRelid(rel)), false, false); /* * Find columns of rel that are used in ccbin * - * NB: pull_var_clause is okay here only because we don't allow - * subselects in check constraints; it would fail to examine the - * contents of subselects. + * NB: pull_var_clause is okay here only because we don't allow subselects in + * check constraints; it would fail to examine the contents of subselects. */ varList = pull_var_clause(expr, false); keycount = list_length(varList); @@ -1405,10 +1401,9 @@ StoreConstraints(Relation rel, TupleDesc tupdesc) return; /* nothing to do */ /* - * Deparsing of constraint expressions will fail unless the - * just-created pg_attribute tuples for this relation are made - * visible. So, bump the command counter. CAUTION: this will cause a - * relcache entry rebuild. + * Deparsing of constraint expressions will fail unless the just-created + * pg_attribute tuples for this relation are made visible. So, bump the + * command counter. CAUTION: this will cause a relcache entry rebuild. */ CommandCounterIncrement(); @@ -1483,8 +1478,8 @@ AddRelationRawConstraints(Relation rel, } /* - * Create a dummy ParseState and insert the target relation as its - * sole rangetable entry. We need a ParseState for transformExpr. + * Create a dummy ParseState and insert the target relation as its sole + * rangetable entry. We need a ParseState for transformExpr. */ pstate = make_parsestate(NULL); rte = addRangeTableEntryForRelation(pstate, @@ -1546,8 +1541,8 @@ AddRelationRawConstraints(Relation rel, if (list_length(pstate->p_rtable) != 1) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("only table \"%s\" can be referenced in check constraint", - RelationGetRelationName(rel)))); + errmsg("only table \"%s\" can be referenced in check constraint", + RelationGetRelationName(rel)))); /* * No subplans or aggregates, either... @@ -1559,7 +1554,7 @@ AddRelationRawConstraints(Relation rel, if (pstate->p_hasAggs) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), - errmsg("cannot use aggregate function in check constraint"))); + errmsg("cannot use aggregate function in check constraint"))); /* * Check name uniqueness, or generate a name if none was given. @@ -1576,8 +1571,8 @@ AddRelationRawConstraints(Relation rel, ccname)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("constraint \"%s\" for relation \"%s\" already exists", - ccname, RelationGetRelationName(rel)))); + errmsg("constraint \"%s\" for relation \"%s\" already exists", + ccname, RelationGetRelationName(rel)))); /* Check against other new constraints */ /* Needed because we don't do CommandCounterIncrement in loop */ foreach(cell2, checknames) @@ -1585,20 +1580,19 @@ AddRelationRawConstraints(Relation rel, if (strcmp((char *) lfirst(cell2), ccname) == 0) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("check constraint \"%s\" already exists", - ccname))); + errmsg("check constraint \"%s\" already exists", + ccname))); } } else { /* - * When generating a name, we want to create "tab_col_check" - * for a column constraint and "tab_check" for a table - * constraint. We no longer have any info about the syntactic - * positioning of the constraint phrase, so we approximate - * this by seeing whether the expression references more than - * one column. (If the user played by the rules, the result - * is the same...) + * When generating a name, we want to create "tab_col_check" for a + * column constraint and "tab_check" for a table constraint. We + * no longer have any info about the syntactic positioning of the + * constraint phrase, so we approximate this by seeing whether the + * expression references more than one column. (If the user + * played by the rules, the result is the same...) * * Note: pull_var_clause() doesn't descend into sublinks, but we * eliminated those above; and anyway this only needs to be an @@ -1644,11 +1638,11 @@ AddRelationRawConstraints(Relation rel, } /* - * Update the count of constraints in the relation's pg_class tuple. - * We do this even if there was no change, in order to ensure that an - * SI update message is sent out for the pg_class tuple, which will - * force other backends to rebuild their relcache entries for the rel. - * (This is critical if we added defaults but not constraints.) + * Update the count of constraints in the relation's pg_class tuple. We do + * this even if there was no change, in order to ensure that an SI update + * message is sent out for the pg_class tuple, which will force other + * backends to rebuild their relcache entries for the rel. (This is + * critical if we added defaults but not constraints.) */ SetRelationNumChecks(rel, numchecks); @@ -1734,7 +1728,7 @@ cookDefault(ParseState *pstate, if (contain_var_clause(expr)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("cannot use column references in default expression"))); + errmsg("cannot use column references in default expression"))); /* * It can't return a set either. @@ -1754,12 +1748,12 @@ cookDefault(ParseState *pstate, if (pstate->p_hasAggs) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), - errmsg("cannot use aggregate function in default expression"))); + errmsg("cannot use aggregate function in default expression"))); /* - * Coerce the expression to the correct type and typmod, if given. - * This should match the parser's processing of non-defaulted - * expressions --- see updateTargetListEntry(). + * Coerce the expression to the correct type and typmod, if given. This + * should match the parser's processing of non-defaulted expressions --- + * see updateTargetListEntry(). */ if (OidIsValid(atttypid)) { @@ -1777,7 +1771,7 @@ cookDefault(ParseState *pstate, attname, format_type_be(atttypid), format_type_be(type_id)), - errhint("You will need to rewrite or cast the expression."))); + errhint("You will need to rewrite or cast the expression."))); } return expr; @@ -1930,9 +1924,9 @@ RelationTruncateIndexes(Oid heapId) index_build(heapRelation, currentIndex, indexInfo); /* - * index_build will close both the heap and index relations (but - * not give up the locks we hold on them). We're done with this - * index, but we must re-open the heap rel. + * index_build will close both the heap and index relations (but not + * give up the locks we hold on them). We're done with this index, + * but we must re-open the heap rel. */ heapRelation = heap_open(heapId, NoLock); } @@ -1947,7 +1941,7 @@ RelationTruncateIndexes(Oid heapId) * This routine deletes all data within all the specified relations. * * This is not transaction-safe! There is another, transaction-safe - * implementation in commands/tablecmds.c. We now use this only for + * implementation in commands/tablecmds.c. We now use this only for * ON COMMIT truncation of temporary tables, where it doesn't matter. */ void @@ -2039,8 +2033,8 @@ heap_truncate_check_FKs(List *relations, bool tempTables) return; /* - * Otherwise, must scan pg_constraint. Right now, it is a seqscan - * because there is no available index on confrelid. + * Otherwise, must scan pg_constraint. Right now, it is a seqscan because + * there is no available index on confrelid. */ fkeyRel = heap_open(ConstraintRelationId, AccessShareLock); @@ -2056,16 +2050,16 @@ heap_truncate_check_FKs(List *relations, bool tempTables) continue; /* Not for one of our list of tables */ - if (! list_member_oid(oids, con->confrelid)) + if (!list_member_oid(oids, con->confrelid)) continue; /* The referencer should be in our list too */ - if (! list_member_oid(oids, con->conrelid)) + if (!list_member_oid(oids, con->conrelid)) { if (tempTables) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unsupported ON COMMIT and foreign key combination"), + errmsg("unsupported ON COMMIT and foreign key combination"), errdetail("Table \"%s\" references \"%s\" via foreign key constraint \"%s\", but they do not have the same ON COMMIT setting.", get_rel_name(con->conrelid), get_rel_name(con->confrelid), diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 3d543fa06c6..a25f34b85e0 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.260 2005/08/26 03:07:12 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.261 2005/10/15 02:49:12 momjian Exp $ * * * INTERFACE ROUTINES @@ -91,9 +91,9 @@ ConstructTupleDescriptor(Relation heapRelation, indexTupDesc = CreateTemplateTupleDesc(numatts, false); /* - * For simple index columns, we copy the pg_attribute row from the - * parent relation and modify it as necessary. For expressions we - * have to cons up a pg_attribute row the hard way. + * For simple index columns, we copy the pg_attribute row from the parent + * relation and modify it as necessary. For expressions we have to cons + * up a pg_attribute row the hard way. */ for (i = 0; i < numatts; i++) { @@ -114,7 +114,7 @@ ConstructTupleDescriptor(Relation heapRelation, * here we are indexing on a system attribute (-1...-n) */ from = SystemAttributeDefinition(atnum, - heapRelation->rd_rel->relhasoids); + heapRelation->rd_rel->relhasoids); } else { @@ -127,8 +127,8 @@ ConstructTupleDescriptor(Relation heapRelation, } /* - * now that we've determined the "from", let's copy the tuple - * desc data... + * now that we've determined the "from", let's copy the tuple desc + * data... */ memcpy(to, from, ATTRIBUTE_TUPLE_SIZE); @@ -158,14 +158,13 @@ ConstructTupleDescriptor(Relation heapRelation, indexpr_item = lnext(indexpr_item); /* - * Make the attribute's name "pg_expresssion_nnn" (maybe think - * of something better later) + * Make the attribute's name "pg_expresssion_nnn" (maybe think of + * something better later) */ sprintf(NameStr(to->attname), "pg_expression_%d", i + 1); /* - * Lookup the expression type in pg_type for the type length - * etc. + * Lookup the expression type in pg_type for the type length etc. */ keyType = exprType(indexkey); tuple = SearchSysCache(TYPEOID, @@ -193,15 +192,15 @@ ConstructTupleDescriptor(Relation heapRelation, } /* - * We do not yet have the correct relation OID for the index, so - * just set it invalid for now. InitializeAttributeOids() will - * fix it later. + * We do not yet have the correct relation OID for the index, so just + * set it invalid for now. InitializeAttributeOids() will fix it + * later. */ to->attrelid = InvalidOid; /* - * Check the opclass to see if it provides a keytype (overriding - * the attribute type). + * Check the opclass to see if it provides a keytype (overriding the + * attribute type). */ tuple = SearchSysCache(CLAOID, ObjectIdGetDatum(classObjectId[i]), @@ -311,8 +310,8 @@ AppendAttributeTuples(Relation indexRelation, int numatts) for (i = 0; i < numatts; i++) { /* - * There used to be very grotty code here to set these fields, but - * I think it's unnecessary. They should be set already. + * There used to be very grotty code here to set these fields, but I + * think it's unnecessary. They should be set already. */ Assert(indexTupDesc->attrs[i]->attnum == i + 1); Assert(indexTupDesc->attrs[i]->attcacheoff == -1); @@ -380,8 +379,8 @@ UpdateIndexRelation(Oid indexoid, exprsDatum = (Datum) 0; /* - * Convert the index predicate (if any) to a text datum. Note we - * convert implicit-AND format to normal explicit-AND for storage. + * Convert the index predicate (if any) to a text datum. Note we convert + * implicit-AND format to normal explicit-AND for storage. */ if (indexInfo->ii_Predicate != NIL) { @@ -442,7 +441,7 @@ UpdateIndexRelation(Oid indexoid, * index_create * * indexRelationId is normally InvalidOid to let this routine - * generate an OID for the index. During bootstrap it may be + * generate an OID for the index. During bootstrap it may be * nonzero to specify a preselected OID. * * Returns OID of the created index. @@ -500,15 +499,14 @@ index_create(Oid heapRelationId, * We cannot allow indexing a shared relation after initdb (because * there's no way to make the entry in other databases' pg_class). * Unfortunately we can't distinguish initdb from a manually started - * standalone backend (toasting of shared rels happens after the - * bootstrap phase, so checking IsBootstrapProcessingMode() won't - * work). However, we can at least prevent this mistake under normal - * multi-user operation. + * standalone backend (toasting of shared rels happens after the bootstrap + * phase, so checking IsBootstrapProcessingMode() won't work). However, + * we can at least prevent this mistake under normal multi-user operation. */ if (shared_relation && IsUnderPostmaster) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("shared indexes cannot be created after initdb"))); + errmsg("shared indexes cannot be created after initdb"))); if (get_relname_relid(indexRelationName, namespaceId)) ereport(ERROR, @@ -526,17 +524,17 @@ index_create(Oid heapRelationId, /* * Allocate an OID for the index, unless we were told what to use. * - * The OID will be the relfilenode as well, so make sure it doesn't - * collide with either pg_class OIDs or existing physical files. + * The OID will be the relfilenode as well, so make sure it doesn't collide + * with either pg_class OIDs or existing physical files. */ if (!OidIsValid(indexRelationId)) indexRelationId = GetNewRelFileNode(tableSpaceId, shared_relation, pg_class); /* - * create the index relation's relcache entry and physical disk file. - * (If we fail further down, it's the smgr's responsibility to remove - * the disk file again.) + * create the index relation's relcache entry and physical disk file. (If + * we fail further down, it's the smgr's responsibility to remove the disk + * file again.) */ indexRelation = heap_create(indexRelationName, namespaceId, @@ -557,8 +555,8 @@ index_create(Oid heapRelationId, LockRelation(indexRelation, AccessExclusiveLock); /* - * Fill in fields of the index's pg_class entry that are not set - * correctly by heap_create. + * Fill in fields of the index's pg_class entry that are not set correctly + * by heap_create. * * XXX should have a cleaner way to create cataloged indexes */ @@ -602,16 +600,16 @@ index_create(Oid heapRelationId, /* * Register constraint and dependencies for the index. * - * If the index is from a CONSTRAINT clause, construct a pg_constraint - * entry. The index is then linked to the constraint, which in turn - * is linked to the table. If it's not a CONSTRAINT, make the - * dependency directly on the table. + * If the index is from a CONSTRAINT clause, construct a pg_constraint entry. + * The index is then linked to the constraint, which in turn is linked to + * the table. If it's not a CONSTRAINT, make the dependency directly on + * the table. * * We don't need a dependency on the namespace, because there'll be an * indirect dependency via our parent table. * - * During bootstrap we can't register any dependencies, and we don't try - * to make a constraint either. + * During bootstrap we can't register any dependencies, and we don't try to + * make a constraint either. */ if (!IsBootstrapProcessingMode()) { @@ -697,7 +695,7 @@ index_create(Oid heapRelationId, if (indexInfo->ii_Expressions) { recordDependencyOnSingleRelExpr(&myself, - (Node *) indexInfo->ii_Expressions, + (Node *) indexInfo->ii_Expressions, heapRelationId, DEPENDENCY_NORMAL, DEPENDENCY_AUTO); @@ -707,7 +705,7 @@ index_create(Oid heapRelationId, if (indexInfo->ii_Predicate) { recordDependencyOnSingleRelExpr(&myself, - (Node *) indexInfo->ii_Predicate, + (Node *) indexInfo->ii_Predicate, heapRelationId, DEPENDENCY_NORMAL, DEPENDENCY_AUTO); @@ -721,10 +719,10 @@ index_create(Oid heapRelationId, CommandCounterIncrement(); /* - * In bootstrap mode, we have to fill in the index strategy structure - * with information from the catalogs. If we aren't bootstrapping, - * then the relcache entry has already been rebuilt thanks to sinval - * update during CommandCounterIncrement. + * In bootstrap mode, we have to fill in the index strategy structure with + * information from the catalogs. If we aren't bootstrapping, then the + * relcache entry has already been rebuilt thanks to sinval update during + * CommandCounterIncrement. */ if (IsBootstrapProcessingMode()) RelationInitIndexAccessInfo(indexRelation); @@ -732,17 +730,16 @@ index_create(Oid heapRelationId, Assert(indexRelation->rd_indexcxt != NULL); /* - * If this is bootstrap (initdb) time, then we don't actually fill in - * the index yet. We'll be creating more indexes and classes later, - * so we delay filling them in until just before we're done with - * bootstrapping. Similarly, if the caller specified skip_build then - * filling the index is delayed till later (ALTER TABLE can save work - * in some cases with this). Otherwise, we call the AM routine that - * constructs the index. + * If this is bootstrap (initdb) time, then we don't actually fill in the + * index yet. We'll be creating more indexes and classes later, so we + * delay filling them in until just before we're done with bootstrapping. + * Similarly, if the caller specified skip_build then filling the index is + * delayed till later (ALTER TABLE can save work in some cases with this). + * Otherwise, we call the AM routine that constructs the index. * - * In normal processing mode, the heap and index relations are closed, - * but we continue to hold the ShareLock on the heap and the exclusive - * lock on the index that we acquired above, until end of transaction. + * In normal processing mode, the heap and index relations are closed, but we + * continue to hold the ShareLock on the heap and the exclusive lock on + * the index that we acquired above, until end of transaction. */ if (IsBootstrapProcessingMode()) { @@ -784,13 +781,12 @@ index_drop(Oid indexId) * To drop an index safely, we must grab exclusive lock on its parent * table; otherwise there could be other backends using the index! * Exclusive lock on the index alone is insufficient because another - * backend might be in the midst of devising a query plan that will - * use the index. The parser and planner take care to hold an - * appropriate lock on the parent table while working, but having them - * hold locks on all the indexes too seems overly expensive. We do grab - * exclusive lock on the index too, just to be safe. Both locks must - * be held till end of transaction, else other backends will still see - * this index in pg_index. + * backend might be in the midst of devising a query plan that will use + * the index. The parser and planner take care to hold an appropriate + * lock on the parent table while working, but having them hold locks on + * all the indexes too seems overly expensive. We do grab exclusive lock + * on the index too, just to be safe. Both locks must be held till end of + * transaction, else other backends will still see this index in pg_index. */ heapId = IndexGetRelation(indexId); userHeapRelation = heap_open(heapId, AccessExclusiveLock); @@ -806,9 +802,9 @@ index_drop(Oid indexId) userIndexRelation->rd_istemp); /* - * Close and flush the index's relcache entry, to ensure relcache - * doesn't try to rebuild it while we're deleting catalog entries. We - * keep the lock though. + * Close and flush the index's relcache entry, to ensure relcache doesn't + * try to rebuild it while we're deleting catalog entries. We keep the + * lock though. */ index_close(userIndexRelation); @@ -833,8 +829,8 @@ index_drop(Oid indexId) heap_close(indexRelation, RowExclusiveLock); /* - * if it has any expression columns, we might have stored statistics - * about them. + * if it has any expression columns, we might have stored statistics about + * them. */ if (hasexprs) RemoveStatistics(indexId, 0); @@ -850,12 +846,11 @@ index_drop(Oid indexId) DeleteRelationTuple(indexId); /* - * We are presently too lazy to attempt to compute the new correct - * value of relhasindex (the next VACUUM will fix it if necessary). So - * there is no need to update the pg_class tuple for the owning - * relation. But we must send out a shared-cache-inval notice on the - * owning relation to ensure other backends update their relcache - * lists of indexes. + * We are presently too lazy to attempt to compute the new correct value + * of relhasindex (the next VACUUM will fix it if necessary). So there is + * no need to update the pg_class tuple for the owning relation. But we + * must send out a shared-cache-inval notice on the owning relation to + * ensure other backends update their relcache lists of indexes. */ CacheInvalidateRelcache(userHeapRelation); @@ -926,7 +921,7 @@ BuildIndexInfo(Relation index) * context must point to the heap tuple passed in. * * Notice we don't actually call index_form_tuple() here; we just prepare - * its input arrays values[] and isnull[]. This is because the index AM + * its input arrays values[] and isnull[]. This is because the index AM * may wish to alter the data before storage. * ---------------- */ @@ -974,7 +969,7 @@ FormIndexDatum(IndexInfo *indexInfo, if (indexpr_item == NULL) elog(ERROR, "wrong number of index expressions"); iDatum = ExecEvalExprSwitchContext((ExprState *) lfirst(indexpr_item), - GetPerTupleExprContext(estate), + GetPerTupleExprContext(estate), &isNull, NULL); indexpr_item = lnext(indexpr_item); @@ -1013,9 +1008,9 @@ setRelhasindex(Oid relid, bool hasindex, bool isprimary, Oid reltoastidxid) HeapScanDesc pg_class_scan = NULL; /* - * Find the tuple to update in pg_class. In bootstrap mode we can't - * use heap_update, so cheat and overwrite the tuple in-place. In - * normal processing, make a copy to scribble on. + * Find the tuple to update in pg_class. In bootstrap mode we can't use + * heap_update, so cheat and overwrite the tuple in-place. In normal + * processing, make a copy to scribble on. */ pg_class = heap_open(RelationRelationId, RowExclusiveLock); @@ -1135,7 +1130,7 @@ setNewRelfilenode(Relation relation) pg_class = heap_open(RelationRelationId, RowExclusiveLock); tuple = SearchSysCacheCopy(RELOID, - ObjectIdGetDatum(RelationGetRelid(relation)), + ObjectIdGetDatum(RelationGetRelid(relation)), 0, 0, 0); if (!HeapTupleIsValid(tuple)) elog(ERROR, "could not find tuple for relation %u", @@ -1178,7 +1173,7 @@ setNewRelfilenode(Relation relation) * advantage of the opportunity to update pg_class to ensure that the * planner takes advantage of the index we just created. But, only * update statistics during normal index definitions, not for indices - * on system catalogs created during bootstrap processing. We must + * on system catalogs created during bootstrap processing. We must * close the relations before updating statistics to guarantee that * the relcache entries are flushed when we increment the command * counter in UpdateStats(). But we do not release any locks on the @@ -1188,8 +1183,8 @@ void IndexCloseAndUpdateStats(Relation heap, double heapTuples, Relation index, double indexTuples) { - Oid hrelid = RelationGetRelid(heap); - Oid irelid = RelationGetRelid(index); + Oid hrelid = RelationGetRelid(heap); + Oid irelid = RelationGetRelid(index); if (!IsNormalProcessingMode()) return; @@ -1222,9 +1217,9 @@ UpdateStats(Oid relid, double reltuples) /* * This routine handles updates for both the heap and index relation - * statistics. In order to guarantee that we're able to *see* the - * index relation tuple, we bump the command counter id here. The - * index relation tuple was created in the current transaction. + * statistics. In order to guarantee that we're able to *see* the index + * relation tuple, we bump the command counter id here. The index + * relation tuple was created in the current transaction. */ CommandCounterIncrement(); @@ -1232,8 +1227,8 @@ UpdateStats(Oid relid, double reltuples) * CommandCounterIncrement() flushes invalid cache entries, including * those for the heap and index relations for which we're updating * statistics. Now that the cache is flushed, it's safe to open the - * relation again. We need the relation open in order to figure out - * how many blocks it contains. + * relation again. We need the relation open in order to figure out how + * many blocks it contains. */ /* @@ -1242,17 +1237,17 @@ UpdateStats(Oid relid, double reltuples) whichRel = relation_open(relid, ShareLock); /* - * Find the tuple to update in pg_class. Normally we make a copy of - * the tuple using the syscache, modify it, and apply heap_update. But - * in bootstrap mode we can't use heap_update, so we cheat and - * overwrite the tuple in-place. (Note: as of PG 8.0 this isn't called - * during bootstrap, but leave the code here for possible future use.) + * Find the tuple to update in pg_class. Normally we make a copy of the + * tuple using the syscache, modify it, and apply heap_update. But in + * bootstrap mode we can't use heap_update, so we cheat and overwrite the + * tuple in-place. (Note: as of PG 8.0 this isn't called during + * bootstrap, but leave the code here for possible future use.) * - * We also must cheat if reindexing pg_class itself, because the target - * index may presently not be part of the set of indexes that - * CatalogUpdateIndexes would update (see reindex_relation). In this - * case the stats updates will not be WAL-logged and so could be lost - * in a crash. This seems OK considering VACUUM does the same thing. + * We also must cheat if reindexing pg_class itself, because the target index + * may presently not be part of the set of indexes that + * CatalogUpdateIndexes would update (see reindex_relation). In this case + * the stats updates will not be WAL-logged and so could be lost in a + * crash. This seems OK considering VACUUM does the same thing. */ pg_class = heap_open(RelationRelationId, RowExclusiveLock); @@ -1284,9 +1279,9 @@ UpdateStats(Oid relid, double reltuples) /* * Update statistics in pg_class, if they changed. (Avoiding an - * unnecessary update is not just a tiny performance improvement; it - * also reduces the window wherein concurrent CREATE INDEX commands - * may conflict.) + * unnecessary update is not just a tiny performance improvement; it also + * reduces the window wherein concurrent CREATE INDEX commands may + * conflict.) */ relpages = RelationGetNumberOfBlocks(whichRel); @@ -1320,10 +1315,10 @@ UpdateStats(Oid relid, double reltuples) heap_freetuple(tuple); /* - * We shouldn't have to do this, but we do... Modify the reldesc in - * place with the new values so that the cache contains the latest - * copy. (XXX is this really still necessary? The relcache will get - * fixed at next CommandCounterIncrement, so why bother here?) + * We shouldn't have to do this, but we do... Modify the reldesc in place + * with the new values so that the cache contains the latest copy. (XXX + * is this really still necessary? The relcache will get fixed at next + * CommandCounterIncrement, so why bother here?) */ whichRel->rd_rel->relpages = (int32) relpages; whichRel->rd_rel->reltuples = (float4) reltuples; @@ -1405,8 +1400,8 @@ IndexBuildHeapScan(Relation heapRelation, Assert(OidIsValid(indexRelation->rd_rel->relam)); /* - * Need an EState for evaluation of index expressions and - * partial-index predicates. Also a slot to hold the current tuple. + * Need an EState for evaluation of index expressions and partial-index + * predicates. Also a slot to hold the current tuple. */ estate = CreateExecutorState(); econtext = GetPerTupleExprContext(estate); @@ -1421,9 +1416,8 @@ IndexBuildHeapScan(Relation heapRelation, estate); /* - * Ok, begin our scan of the base relation. We use SnapshotAny - * because we must retrieve all tuples and do our own time qual - * checks. + * Ok, begin our scan of the base relation. We use SnapshotAny because we + * must retrieve all tuples and do our own time qual checks. */ if (IsBootstrapProcessingMode()) { @@ -1487,16 +1481,16 @@ IndexBuildHeapScan(Relation heapRelation, case HEAPTUPLE_INSERT_IN_PROGRESS: /* - * Since caller should hold ShareLock or better, we - * should not see any tuples inserted by open - * transactions --- unless it's our own transaction. - * (Consider INSERT followed by CREATE INDEX within a - * transaction.) An exception occurs when reindexing - * a system catalog, because we often release lock on - * system catalogs before committing. + * Since caller should hold ShareLock or better, we should + * not see any tuples inserted by open transactions --- + * unless it's our own transaction. (Consider INSERT + * followed by CREATE INDEX within a transaction.) An + * exception occurs when reindexing a system catalog, + * because we often release lock on system catalogs before + * committing. */ if (!TransactionIdIsCurrentTransactionId( - HeapTupleHeaderGetXmin(heapTuple->t_data)) + HeapTupleHeaderGetXmin(heapTuple->t_data)) && !IsSystemRelation(heapRelation)) elog(ERROR, "concurrent insert in progress"); indexIt = true; @@ -1505,17 +1499,17 @@ IndexBuildHeapScan(Relation heapRelation, case HEAPTUPLE_DELETE_IN_PROGRESS: /* - * Since caller should hold ShareLock or better, we - * should not see any tuples deleted by open - * transactions --- unless it's our own transaction. - * (Consider DELETE followed by CREATE INDEX within a - * transaction.) An exception occurs when reindexing - * a system catalog, because we often release lock on - * system catalogs before committing. + * Since caller should hold ShareLock or better, we should + * not see any tuples deleted by open transactions --- + * unless it's our own transaction. (Consider DELETE + * followed by CREATE INDEX within a transaction.) An + * exception occurs when reindexing a system catalog, + * because we often release lock on system catalogs before + * committing. */ Assert(!(heapTuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI)); if (!TransactionIdIsCurrentTransactionId( - HeapTupleHeaderGetXmax(heapTuple->t_data)) + HeapTupleHeaderGetXmax(heapTuple->t_data)) && !IsSystemRelation(heapRelation)) elog(ERROR, "concurrent delete in progress"); indexIt = true; @@ -1547,9 +1541,8 @@ IndexBuildHeapScan(Relation heapRelation, /* * In a partial index, discard tuples that don't satisfy the - * predicate. We can also discard recently-dead tuples, since - * VACUUM doesn't complain about tuple count mismatch for partial - * indexes. + * predicate. We can also discard recently-dead tuples, since VACUUM + * doesn't complain about tuple count mismatch for partial indexes. */ if (predicate != NIL) { @@ -1560,9 +1553,9 @@ IndexBuildHeapScan(Relation heapRelation, } /* - * For the current heap tuple, extract all the attributes we use - * in this index, and note which are null. This also performs - * evaluation of any expressions needed. + * For the current heap tuple, extract all the attributes we use in + * this index, and note which are null. This also performs evaluation + * of any expressions needed. */ FormIndexDatum(indexInfo, slot, @@ -1571,9 +1564,9 @@ IndexBuildHeapScan(Relation heapRelation, isnull); /* - * You'd think we should go ahead and build the index tuple here, - * but some index AMs want to do further processing on the data - * first. So pass the values[] and isnull[] arrays, instead. + * You'd think we should go ahead and build the index tuple here, but + * some index AMs want to do further processing on the data first. So + * pass the values[] and isnull[] arrays, instead. */ /* Call the AM's callback routine to process the tuple */ @@ -1631,27 +1624,27 @@ reindex_index(Oid indexId) bool inplace; /* - * Open and lock the parent heap relation. ShareLock is sufficient - * since we only need to be sure no schema or data changes are going on. + * Open and lock the parent heap relation. ShareLock is sufficient since + * we only need to be sure no schema or data changes are going on. */ heapId = IndexGetRelation(indexId); heapRelation = heap_open(heapId, ShareLock); /* - * Open the target index relation and get an exclusive lock on it, - * to ensure that no one else is touching this particular index. + * Open the target index relation and get an exclusive lock on it, to + * ensure that no one else is touching this particular index. */ iRel = index_open(indexId); LockRelation(iRel, AccessExclusiveLock); /* - * If it's a shared index, we must do inplace processing (because we - * have no way to update relfilenode in other databases). Otherwise - * we can do it the normal transaction-safe way. + * If it's a shared index, we must do inplace processing (because we have + * no way to update relfilenode in other databases). Otherwise we can do + * it the normal transaction-safe way. * * Since inplace processing isn't crash-safe, we only allow it in a - * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE - * cases, the caller should have detected this.) + * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE cases, + * the caller should have detected this.) */ inplace = iRel->rd_rel->relisshared; @@ -1688,8 +1681,8 @@ reindex_index(Oid indexId) index_build(heapRelation, iRel, indexInfo); /* - * index_build will close both the heap and index relations (but - * not give up the locks we hold on them). So we're done. + * index_build will close both the heap and index relations (but not + * give up the locks we hold on them). So we're done. */ } PG_CATCH(); @@ -1721,8 +1714,8 @@ reindex_relation(Oid relid, bool toast_too) ListCell *indexId; /* - * Open and lock the relation. ShareLock is sufficient since we only - * need to prevent schema and data changes in it. + * Open and lock the relation. ShareLock is sufficient since we only need + * to prevent schema and data changes in it. */ rel = heap_open(relid, ShareLock); @@ -1736,26 +1729,25 @@ reindex_relation(Oid relid, bool toast_too) indexIds = RelationGetIndexList(rel); /* - * reindex_index will attempt to update the pg_class rows for the - * relation and index. If we are processing pg_class itself, we want - * to make sure that the updates do not try to insert index entries - * into indexes we have not processed yet. (When we are trying to - * recover from corrupted indexes, that could easily cause a crash.) - * We can accomplish this because CatalogUpdateIndexes will use the - * relcache's index list to know which indexes to update. We just - * force the index list to be only the stuff we've processed. + * reindex_index will attempt to update the pg_class rows for the relation + * and index. If we are processing pg_class itself, we want to make sure + * that the updates do not try to insert index entries into indexes we + * have not processed yet. (When we are trying to recover from corrupted + * indexes, that could easily cause a crash.) We can accomplish this + * because CatalogUpdateIndexes will use the relcache's index list to know + * which indexes to update. We just force the index list to be only the + * stuff we've processed. * - * It is okay to not insert entries into the indexes we have not - * processed yet because all of this is transaction-safe. If we fail - * partway through, the updated rows are dead and it doesn't matter - * whether they have index entries. Also, a new pg_class index will - * be created with an entry for its own pg_class row because we do - * setNewRelfilenode() before we do index_build(). + * It is okay to not insert entries into the indexes we have not processed + * yet because all of this is transaction-safe. If we fail partway + * through, the updated rows are dead and it doesn't matter whether they + * have index entries. Also, a new pg_class index will be created with an + * entry for its own pg_class row because we do setNewRelfilenode() before + * we do index_build(). * - * Note that we also clear pg_class's rd_oidindex until the loop is done, - * so that that index can't be accessed either. This means we cannot - * safely generate new relation OIDs while in the loop; shouldn't be a - * problem. + * Note that we also clear pg_class's rd_oidindex until the loop is done, so + * that that index can't be accessed either. This means we cannot safely + * generate new relation OIDs while in the loop; shouldn't be a problem. */ is_pg_class = (RelationGetRelid(rel) == RelationRelationId); doneIndexes = NIL; @@ -1787,8 +1779,8 @@ reindex_relation(Oid relid, bool toast_too) result = (indexIds != NIL); /* - * If the relation has a secondary toast rel, reindex that too while - * we still hold the lock on the master table. + * If the relation has a secondary toast rel, reindex that too while we + * still hold the lock on the master table. */ if (toast_too && OidIsValid(toast_relid)) result |= reindex_relation(toast_relid, false); diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c index 905d99d9469..ad193dd7ffb 100644 --- a/src/backend/catalog/indexing.c +++ b/src/backend/catalog/indexing.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/indexing.c,v 1.109 2005/03/21 01:24:01 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/indexing.c,v 1.110 2005/10/15 02:49:14 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -109,8 +109,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) Assert(indexInfo->ii_Predicate == NIL); /* - * FormIndexDatum fills in its values and isnull parameters with - * the appropriate values for the column(s) of the index. + * FormIndexDatum fills in its values and isnull parameters with the + * appropriate values for the column(s) of the index. */ FormIndexDatum(indexInfo, slot, @@ -122,8 +122,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) * The index AM does the rest. */ index_insert(relationDescs[i], /* index relation */ - values, /* array of index Datums */ - isnull, /* is-null flags */ + values, /* array of index Datums */ + isnull, /* is-null flags */ &(heapTuple->t_self), /* tid of heap tuple */ heapRelation, relationDescs[i]->rd_index->indisunique); diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index dc627e42880..0cafa9f9faf 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -13,7 +13,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.78 2005/10/06 22:43:16 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.79 2005/10/15 02:49:14 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -108,7 +108,7 @@ static bool namespaceSearchPathValid = true; * command is first executed). Thereafter it's the OID of the temp namespace. * * myTempNamespaceSubID shows whether we've created the TEMP namespace in the - * current subtransaction. The flag propagates up the subtransaction tree, + * current subtransaction. The flag propagates up the subtransaction tree, * so the main transaction will correctly recognize the flag if all * intermediate subtransactions commit. When it is InvalidSubTransactionId, * we either haven't made the TEMP namespace yet, or have successfully @@ -225,7 +225,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cross-database references are not implemented: \"%s.%s.%s\"", - newRelation->catalogname, newRelation->schemaname, + newRelation->catalogname, newRelation->schemaname, newRelation->relname))); } @@ -235,7 +235,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation) if (newRelation->schemaname) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("temporary tables may not specify a schema name"))); + errmsg("temporary tables may not specify a schema name"))); /* Initialize temp namespace if first time through */ if (!OidIsValid(myTempNamespace)) InitTempTableNamespace(); @@ -246,7 +246,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation) { /* use exact schema given */ namespaceId = GetSysCacheOid(NAMESPACENAME, - CStringGetDatum(newRelation->schemaname), + CStringGetDatum(newRelation->schemaname), 0, 0, 0); if (!OidIsValid(namespaceId)) ereport(ERROR, @@ -322,9 +322,9 @@ RelationIsVisible(Oid relid) recomputeNamespacePath(); /* - * Quick check: if it ain't in the path at all, it ain't visible. - * Items in the system namespace are surely in the path and so we - * needn't even do list_member_oid() for them. + * Quick check: if it ain't in the path at all, it ain't visible. Items in + * the system namespace are surely in the path and so we needn't even do + * list_member_oid() for them. */ relnamespace = relform->relnamespace; if (relnamespace != PG_CATALOG_NAMESPACE && @@ -333,9 +333,9 @@ RelationIsVisible(Oid relid) else { /* - * If it is in the path, it might still not be visible; it could - * be hidden by another relation of the same name earlier in the - * path. So we must do a slow check for conflicting relations. + * If it is in the path, it might still not be visible; it could be + * hidden by another relation of the same name earlier in the path. So + * we must do a slow check for conflicting relations. */ char *relname = NameStr(relform->relname); ListCell *l; @@ -420,9 +420,9 @@ TypeIsVisible(Oid typid) recomputeNamespacePath(); /* - * Quick check: if it ain't in the path at all, it ain't visible. - * Items in the system namespace are surely in the path and so we - * needn't even do list_member_oid() for them. + * Quick check: if it ain't in the path at all, it ain't visible. Items in + * the system namespace are surely in the path and so we needn't even do + * list_member_oid() for them. */ typnamespace = typform->typnamespace; if (typnamespace != PG_CATALOG_NAMESPACE && @@ -431,9 +431,9 @@ TypeIsVisible(Oid typid) else { /* - * If it is in the path, it might still not be visible; it could - * be hidden by another type of the same name earlier in the path. - * So we must do a slow check for conflicting types. + * If it is in the path, it might still not be visible; it could be + * hidden by another type of the same name earlier in the path. So we + * must do a slow check for conflicting types. */ char *typname = NameStr(typform->typname); ListCell *l; @@ -545,14 +545,14 @@ FuncnameGetCandidates(List *names, int nargs) /* * Okay, it's in the search path, but does it have the same - * arguments as something we already accepted? If so, keep - * only the one that appears earlier in the search path. + * arguments as something we already accepted? If so, keep only + * the one that appears earlier in the search path. * * If we have an ordered list from SearchSysCacheList (the normal - * case), then any conflicting proc must immediately adjoin - * this one in the list, so we only need to look at the newest - * result item. If we have an unordered list, we have to scan - * the whole result list. + * case), then any conflicting proc must immediately adjoin this + * one in the list, so we only need to look at the newest result + * item. If we have an unordered list, we have to scan the whole + * result list. */ if (resultList) { @@ -575,9 +575,9 @@ FuncnameGetCandidates(List *names, int nargs) prevResult = prevResult->next) { if (pronargs == prevResult->nargs && - memcmp(procform->proargtypes.values, - prevResult->args, - pronargs * sizeof(Oid)) == 0) + memcmp(procform->proargtypes.values, + prevResult->args, + pronargs * sizeof(Oid)) == 0) break; } } @@ -640,9 +640,9 @@ FunctionIsVisible(Oid funcid) recomputeNamespacePath(); /* - * Quick check: if it ain't in the path at all, it ain't visible. - * Items in the system namespace are surely in the path and so we - * needn't even do list_member_oid() for them. + * Quick check: if it ain't in the path at all, it ain't visible. Items in + * the system namespace are surely in the path and so we needn't even do + * list_member_oid() for them. */ pronamespace = procform->pronamespace; if (pronamespace != PG_CATALOG_NAMESPACE && @@ -651,10 +651,10 @@ FunctionIsVisible(Oid funcid) else { /* - * If it is in the path, it might still not be visible; it could - * be hidden by another proc of the same name and arguments - * earlier in the path. So we must do a slow check to see if this - * is the same proc that would be found by FuncnameGetCandidates. + * If it is in the path, it might still not be visible; it could be + * hidden by another proc of the same name and arguments earlier in + * the path. So we must do a slow check to see if this is the same + * proc that would be found by FuncnameGetCandidates. */ char *proname = NameStr(procform->proname); int nargs = procform->pronargs; @@ -733,13 +733,12 @@ OpernameGetCandidates(List *names, char oprkind) /* * In typical scenarios, most if not all of the operators found by the - * catcache search will end up getting returned; and there can be - * quite a few, for common operator names such as '=' or '+'. To - * reduce the time spent in palloc, we allocate the result space as an - * array large enough to hold all the operators. The original coding - * of this routine did a separate palloc for each operator, but - * profiling revealed that the pallocs used an unreasonably large - * fraction of parsing time. + * catcache search will end up getting returned; and there can be quite a + * few, for common operator names such as '=' or '+'. To reduce the time + * spent in palloc, we allocate the result space as an array large enough + * to hold all the operators. The original coding of this routine did a + * separate palloc for each operator, but profiling revealed that the + * pallocs used an unreasonably large fraction of parsing time. */ #define SPACE_PER_OP MAXALIGN(sizeof(struct _FuncCandidateList) + sizeof(Oid)) @@ -780,14 +779,14 @@ OpernameGetCandidates(List *names, char oprkind) /* * Okay, it's in the search path, but does it have the same - * arguments as something we already accepted? If so, keep - * only the one that appears earlier in the search path. + * arguments as something we already accepted? If so, keep only + * the one that appears earlier in the search path. * * If we have an ordered list from SearchSysCacheList (the normal - * case), then any conflicting oper must immediately adjoin - * this one in the list, so we only need to look at the newest - * result item. If we have an unordered list, we have to scan - * the whole result list. + * case), then any conflicting oper must immediately adjoin this + * one in the list, so we only need to look at the newest result + * item. If we have an unordered list, we have to scan the whole + * result list. */ if (resultList) { @@ -870,9 +869,9 @@ OperatorIsVisible(Oid oprid) recomputeNamespacePath(); /* - * Quick check: if it ain't in the path at all, it ain't visible. - * Items in the system namespace are surely in the path and so we - * needn't even do list_member_oid() for them. + * Quick check: if it ain't in the path at all, it ain't visible. Items in + * the system namespace are surely in the path and so we needn't even do + * list_member_oid() for them. */ oprnamespace = oprform->oprnamespace; if (oprnamespace != PG_CATALOG_NAMESPACE && @@ -881,11 +880,10 @@ OperatorIsVisible(Oid oprid) else { /* - * If it is in the path, it might still not be visible; it could - * be hidden by another operator of the same name and arguments - * earlier in the path. So we must do a slow check to see if this - * is the same operator that would be found by - * OpernameGetCandidates. + * If it is in the path, it might still not be visible; it could be + * hidden by another operator of the same name and arguments earlier + * in the path. So we must do a slow check to see if this is the same + * operator that would be found by OpernameGetCandidates. */ char *oprname = NameStr(oprform->oprname); FuncCandidateList clist; @@ -956,15 +954,14 @@ OpclassGetCandidates(Oid amid) continue; /* opclass is not in search path */ /* - * Okay, it's in the search path, but does it have the same name - * as something we already accepted? If so, keep only the one - * that appears earlier in the search path. + * Okay, it's in the search path, but does it have the same name as + * something we already accepted? If so, keep only the one that + * appears earlier in the search path. * - * If we have an ordered list from SearchSysCacheList (the normal - * case), then any conflicting opclass must immediately adjoin - * this one in the list, so we only need to look at the newest - * result item. If we have an unordered list, we have to scan the - * whole result list. + * If we have an ordered list from SearchSysCacheList (the normal case), + * then any conflicting opclass must immediately adjoin this one in + * the list, so we only need to look at the newest result item. If we + * have an unordered list, we have to scan the whole result list. */ if (resultList) { @@ -1083,9 +1080,9 @@ OpclassIsVisible(Oid opcid) recomputeNamespacePath(); /* - * Quick check: if it ain't in the path at all, it ain't visible. - * Items in the system namespace are surely in the path and so we - * needn't even do list_member_oid() for them. + * Quick check: if it ain't in the path at all, it ain't visible. Items in + * the system namespace are surely in the path and so we needn't even do + * list_member_oid() for them. */ opcnamespace = opcform->opcnamespace; if (opcnamespace != PG_CATALOG_NAMESPACE && @@ -1094,10 +1091,10 @@ OpclassIsVisible(Oid opcid) else { /* - * If it is in the path, it might still not be visible; it could - * be hidden by another opclass of the same name earlier in the - * path. So we must do a slow check to see if this opclass would - * be found by OpclassnameGetOpcid. + * If it is in the path, it might still not be visible; it could be + * hidden by another opclass of the same name earlier in the path. So + * we must do a slow check to see if this opclass would be found by + * OpclassnameGetOpcid. */ char *opcname = NameStr(opcform->opcname); @@ -1164,9 +1161,9 @@ ConversionIsVisible(Oid conid) recomputeNamespacePath(); /* - * Quick check: if it ain't in the path at all, it ain't visible. - * Items in the system namespace are surely in the path and so we - * needn't even do list_member_oid() for them. + * Quick check: if it ain't in the path at all, it ain't visible. Items in + * the system namespace are surely in the path and so we needn't even do + * list_member_oid() for them. */ connamespace = conform->connamespace; if (connamespace != PG_CATALOG_NAMESPACE && @@ -1175,10 +1172,10 @@ ConversionIsVisible(Oid conid) else { /* - * If it is in the path, it might still not be visible; it could - * be hidden by another conversion of the same name earlier in the - * path. So we must do a slow check to see if this conversion - * would be found by ConversionGetConid. + * If it is in the path, it might still not be visible; it could be + * hidden by another conversion of the same name earlier in the path. + * So we must do a slow check to see if this conversion would be found + * by ConversionGetConid. */ char *conname = NameStr(conform->conname); @@ -1226,14 +1223,14 @@ DeconstructQualifiedName(List *names, if (strcmp(catalogname, get_database_name(MyDatabaseId)) != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cross-database references are not implemented: %s", - NameListToString(names)))); + errmsg("cross-database references are not implemented: %s", + NameListToString(names)))); break; default: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("improper qualified name (too many dotted names): %s", - NameListToString(names)))); + errmsg("improper qualified name (too many dotted names): %s", + NameListToString(names)))); break; } @@ -1373,8 +1370,8 @@ makeRangeVarFromNameList(List *names) default: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("improper relation name (too many dotted names): %s", - NameListToString(names)))); + errmsg("improper relation name (too many dotted names): %s", + NameListToString(names)))); break; } @@ -1574,7 +1571,7 @@ FindDefaultConversionProc(int4 for_encoding, int4 to_encoding) static void recomputeNamespacePath(void) { - Oid roleid = GetUserId(); + Oid roleid = GetUserId(); char *rawname; List *namelist; List *oidlist; @@ -1602,9 +1599,9 @@ recomputeNamespacePath(void) /* * Convert the list of names to a list of OIDs. If any names are not - * recognizable or we don't have read access, just leave them out of - * the list. (We can't raise an error, since the search_path setting - * has already been accepted.) Don't make duplicate entries, either. + * recognizable or we don't have read access, just leave them out of the + * list. (We can't raise an error, since the search_path setting has + * already been accepted.) Don't make duplicate entries, either. */ oidlist = NIL; foreach(l, namelist) @@ -1659,8 +1656,8 @@ recomputeNamespacePath(void) firstNS = linitial_oid(oidlist); /* - * Add any implicitly-searched namespaces to the list. Note these go - * on the front, not the back; also notice that we do not check USAGE + * Add any implicitly-searched namespaces to the list. Note these go on + * the front, not the back; also notice that we do not check USAGE * permissions for these. */ if (!list_member_oid(oidlist, PG_CATALOG_NAMESPACE)) @@ -1675,8 +1672,8 @@ recomputeNamespacePath(void) oidlist = lcons_oid(mySpecialNamespace, oidlist); /* - * Now that we've successfully built the new list of namespace OIDs, - * save it in permanent storage. + * Now that we've successfully built the new list of namespace OIDs, save + * it in permanent storage. */ oldcxt = MemoryContextSwitchTo(TopMemoryContext); newpath = list_copy(oidlist); @@ -1717,14 +1714,13 @@ InitTempTableNamespace(void) /* * First, do permission check to see if we are authorized to make temp - * tables. We use a nonstandard error message here since - * "databasename: permission denied" might be a tad cryptic. + * tables. We use a nonstandard error message here since "databasename: + * permission denied" might be a tad cryptic. * - * Note that ACL_CREATE_TEMP rights are rechecked in - * pg_namespace_aclmask; that's necessary since current user ID could - * change during the session. But there's no need to make the - * namespace in the first place until a temp table creation request is - * made by someone with appropriate rights. + * Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask; + * that's necessary since current user ID could change during the session. + * But there's no need to make the namespace in the first place until a + * temp table creation request is made by someone with appropriate rights. */ if (pg_database_aclcheck(MyDatabaseId, GetUserId(), ACL_CREATE_TEMP) != ACLCHECK_OK) @@ -1741,13 +1737,12 @@ InitTempTableNamespace(void) if (!OidIsValid(namespaceId)) { /* - * First use of this temp namespace in this database; create it. - * The temp namespaces are always owned by the superuser. We - * leave their permissions at default --- i.e., no access except - * to superuser --- to ensure that unprivileged users can't peek - * at other backends' temp tables. This works because the places - * that access the temp namespace for my own backend skip - * permissions checks on it. + * First use of this temp namespace in this database; create it. The + * temp namespaces are always owned by the superuser. We leave their + * permissions at default --- i.e., no access except to superuser --- + * to ensure that unprivileged users can't peek at other backends' + * temp tables. This works because the places that access the temp + * namespace for my own backend skip permissions checks on it. */ namespaceId = NamespaceCreate(namespaceName, BOOTSTRAP_SUPERUSERID); /* Advance command counter to make namespace visible */ @@ -1756,16 +1751,16 @@ InitTempTableNamespace(void) else { /* - * If the namespace already exists, clean it out (in case the - * former owner crashed without doing so). + * If the namespace already exists, clean it out (in case the former + * owner crashed without doing so). */ RemoveTempRelations(namespaceId); } /* - * Okay, we've prepared the temp namespace ... but it's not committed - * yet, so all our work could be undone by transaction rollback. Set - * flag for AtEOXact_Namespace to know what to do. + * Okay, we've prepared the temp namespace ... but it's not committed yet, + * so all our work could be undone by transaction rollback. Set flag for + * AtEOXact_Namespace to know what to do. */ myTempNamespace = namespaceId; @@ -1784,11 +1779,11 @@ AtEOXact_Namespace(bool isCommit) { /* * If we abort the transaction in which a temp namespace was selected, - * we'll have to do any creation or cleanout work over again. So, - * just forget the namespace entirely until next time. On the other - * hand, if we commit then register an exit callback to clean out the - * temp tables at backend shutdown. (We only want to register the - * callback once per session, so this is a good place to do it.) + * we'll have to do any creation or cleanout work over again. So, just + * forget the namespace entirely until next time. On the other hand, if + * we commit then register an exit callback to clean out the temp tables + * at backend shutdown. (We only want to register the callback once per + * session, so this is a good place to do it.) */ if (myTempNamespaceSubID != InvalidSubTransactionId) { @@ -1852,9 +1847,9 @@ RemoveTempRelations(Oid tempNamespaceId) ObjectAddress object; /* - * We want to get rid of everything in the target namespace, but not - * the namespace itself (deleting it only to recreate it later would - * be a waste of cycles). We do this by finding everything that has a + * We want to get rid of everything in the target namespace, but not the + * namespace itself (deleting it only to recreate it later would be a + * waste of cycles). We do this by finding everything that has a * dependency on the namespace. */ object.classId = NamespaceRelationId; @@ -1916,15 +1911,13 @@ assign_search_path(const char *newval, bool doit, GucSource source) /* * Verify that all the names are either valid namespace names or * "$user". We do not require $user to correspond to a valid - * namespace. We do not check for USAGE rights, either; should - * we? + * namespace. We do not check for USAGE rights, either; should we? * - * When source == PGC_S_TEST, we are checking the argument of an - * ALTER DATABASE SET or ALTER USER SET command. It could be that - * the intended use of the search path is for some other database, - * so we should not error out if it mentions schemas not present - * in the current database. We reduce the message to NOTICE - * instead. + * When source == PGC_S_TEST, we are checking the argument of an ALTER + * DATABASE SET or ALTER USER SET command. It could be that the + * intended use of the search path is for some other database, so we + * should not error out if it mentions schemas not present in the + * current database. We reduce the message to NOTICE instead. */ foreach(l, namelist) { @@ -1937,7 +1930,7 @@ assign_search_path(const char *newval, bool doit, GucSource source) 0, 0, 0)) ereport((source == PGC_S_TEST) ? NOTICE : ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), - errmsg("schema \"%s\" does not exist", curname))); + errmsg("schema \"%s\" does not exist", curname))); } } @@ -1945,9 +1938,9 @@ assign_search_path(const char *newval, bool doit, GucSource source) list_free(namelist); /* - * We mark the path as needing recomputation, but don't do anything - * until it's needed. This avoids trying to do database access during - * GUC initialization. + * We mark the path as needing recomputation, but don't do anything until + * it's needed. This avoids trying to do database access during GUC + * initialization. */ if (doit) namespaceSearchPathValid = false; @@ -1967,8 +1960,7 @@ InitializeSearchPath(void) { /* * In bootstrap mode, the search path must be 'pg_catalog' so that - * tables are created in the proper namespace; ignore the GUC - * setting. + * tables are created in the proper namespace; ignore the GUC setting. */ MemoryContext oldcxt; @@ -1983,8 +1975,8 @@ InitializeSearchPath(void) else { /* - * In normal mode, arrange for a callback on any syscache - * invalidation of pg_namespace rows. + * In normal mode, arrange for a callback on any syscache invalidation + * of pg_namespace rows. */ CacheRegisterSyscacheCallback(NAMESPACEOID, NamespaceCallback, diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c index 26491e22a15..fb7562e3062 100644 --- a/src/backend/catalog/pg_aggregate.c +++ b/src/backend/catalog/pg_aggregate.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.75 2005/04/14 20:03:23 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.76 2005/10/15 02:49:14 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -76,16 +76,16 @@ AggregateCreate(const char *aggName, elog(ERROR, "aggregate must have a transition function"); /* - * If transtype is polymorphic, basetype must be polymorphic also; - * else we will have no way to deduce the actual transtype. + * If transtype is polymorphic, basetype must be polymorphic also; else we + * will have no way to deduce the actual transtype. */ if ((aggTransType == ANYARRAYOID || aggTransType == ANYELEMENTOID) && !(aggBaseType == ANYARRAYOID || aggBaseType == ANYELEMENTOID)) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot determine transition data type"), - errdetail("An aggregate using \"anyarray\" or \"anyelement\" as " - "transition type must have one of them as its base type."))); + errdetail("An aggregate using \"anyarray\" or \"anyelement\" as " + "transition type must have one of them as its base type."))); /* handle transfn */ fnArgs[0] = aggTransType; @@ -101,13 +101,13 @@ AggregateCreate(const char *aggName, /* * Return type of transfn (possibly after refinement by - * enforce_generic_type_consistency, if transtype isn't polymorphic) - * must exactly match declared transtype. + * enforce_generic_type_consistency, if transtype isn't polymorphic) must + * exactly match declared transtype. * - * In the non-polymorphic-transtype case, it might be okay to allow a - * rettype that's binary-coercible to transtype, but I'm not quite - * convinced that it's either safe or useful. When transtype is - * polymorphic we *must* demand exact equality. + * In the non-polymorphic-transtype case, it might be okay to allow a rettype + * that's binary-coercible to transtype, but I'm not quite convinced that + * it's either safe or useful. When transtype is polymorphic we *must* + * demand exact equality. */ if (rettype != aggTransType) ereport(ERROR, @@ -124,10 +124,9 @@ AggregateCreate(const char *aggName, proc = (Form_pg_proc) GETSTRUCT(tup); /* - * If the transfn is strict and the initval is NULL, make sure input - * type and transtype are the same (or at least binary-compatible), so - * that it's OK to use the first input value as the initial - * transValue. + * If the transfn is strict and the initval is NULL, make sure input type + * and transtype are the same (or at least binary-compatible), so that + * it's OK to use the first input value as the initial transValue. */ if (proc->proisstrict && agginitval == NULL) { @@ -155,20 +154,20 @@ AggregateCreate(const char *aggName, Assert(OidIsValid(finaltype)); /* - * If finaltype (i.e. aggregate return type) is polymorphic, basetype - * must be polymorphic also, else parser will fail to deduce result - * type. (Note: given the previous test on transtype and basetype, - * this cannot happen, unless someone has snuck a finalfn definition - * into the catalogs that itself violates the rule against polymorphic - * result with no polymorphic input.) + * If finaltype (i.e. aggregate return type) is polymorphic, basetype must + * be polymorphic also, else parser will fail to deduce result type. + * (Note: given the previous test on transtype and basetype, this cannot + * happen, unless someone has snuck a finalfn definition into the catalogs + * that itself violates the rule against polymorphic result with no + * polymorphic input.) */ if ((finaltype == ANYARRAYOID || finaltype == ANYELEMENTOID) && !(aggBaseType == ANYARRAYOID || aggBaseType == ANYELEMENTOID)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot determine result data type"), - errdetail("An aggregate returning \"anyarray\" or \"anyelement\" " - "must have one of them as its base type."))); + errdetail("An aggregate returning \"anyarray\" or \"anyelement\" " + "must have one of them as its base type."))); /* handle sortop, if supplied */ if (aggsortopName) @@ -178,8 +177,7 @@ AggregateCreate(const char *aggName, /* * Everything looks okay. Try to create the pg_proc entry for the - * aggregate. (This could fail if there's already a conflicting - * entry.) + * aggregate. (This could fail if there's already a conflicting entry.) */ fnArgs[0] = aggBaseType; @@ -198,7 +196,7 @@ AggregateCreate(const char *aggName, false, /* isStrict (not needed for agg) */ PROVOLATILE_IMMUTABLE, /* volatility (not * needed for agg) */ - buildoidvector(fnArgs, 1), /* paramTypes */ + buildoidvector(fnArgs, 1), /* paramTypes */ PointerGetDatum(NULL), /* allParamTypes */ PointerGetDatum(NULL), /* parameterModes */ PointerGetDatum(NULL)); /* parameterNames */ @@ -235,10 +233,9 @@ AggregateCreate(const char *aggName, heap_close(aggdesc, RowExclusiveLock); /* - * Create dependencies for the aggregate (above and beyond those - * already made by ProcedureCreate). Note: we don't need an explicit - * dependency on aggTransType since we depend on it indirectly through - * transfn. + * Create dependencies for the aggregate (above and beyond those already + * made by ProcedureCreate). Note: we don't need an explicit dependency + * on aggTransType since we depend on it indirectly through transfn. */ myself.classId = ProcedureRelationId; myself.objectId = procOid; @@ -288,8 +285,8 @@ lookup_agg_function(List *fnName, * func_get_detail looks up the function in the catalogs, does * disambiguation for polymorphic functions, handles inheritance, and * returns the funcid and type and set or singleton status of the - * function's return value. it also returns the true argument types - * to the function. + * function's return value. it also returns the true argument types to + * the function. */ fdresult = func_get_detail(fnName, NIL, nargs, input_types, &fnOid, rettype, &retset, @@ -300,21 +297,20 @@ lookup_agg_function(List *fnName, ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("function %s does not exist", - func_signature_string(fnName, nargs, input_types)))); + func_signature_string(fnName, nargs, input_types)))); if (retset) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("function %s returns a set", - func_signature_string(fnName, nargs, input_types)))); + func_signature_string(fnName, nargs, input_types)))); /* - * If the given type(s) are all polymorphic, there's nothing we can - * check. Otherwise, enforce consistency, and possibly refine the - * result type. + * If the given type(s) are all polymorphic, there's nothing we can check. + * Otherwise, enforce consistency, and possibly refine the result type. */ if ((input_types[0] == ANYARRAYOID || input_types[0] == ANYELEMENTOID) && (nargs == 1 || - (input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID))) + (input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID))) { /* nothing to check here */ } @@ -327,8 +323,8 @@ lookup_agg_function(List *fnName, } /* - * func_get_detail will find functions requiring run-time argument - * type coercion, but nodeAgg.c isn't prepared to deal with that + * func_get_detail will find functions requiring run-time argument type + * coercion, but nodeAgg.c isn't prepared to deal with that */ if (true_oid_array[0] != ANYARRAYOID && true_oid_array[0] != ANYELEMENTOID && @@ -336,7 +332,7 @@ lookup_agg_function(List *fnName, ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("function %s requires run-time type coercion", - func_signature_string(fnName, nargs, true_oid_array)))); + func_signature_string(fnName, nargs, true_oid_array)))); if (nargs == 2 && true_oid_array[1] != ANYARRAYOID && @@ -345,7 +341,7 @@ lookup_agg_function(List *fnName, ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("function %s requires run-time type coercion", - func_signature_string(fnName, nargs, true_oid_array)))); + func_signature_string(fnName, nargs, true_oid_array)))); /* Check aggregate creator has permission to call the function */ aclresult = pg_proc_aclcheck(fnOid, GetUserId(), ACL_EXECUTE); diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c index b2cc3d5c474..cf18051f52d 100644 --- a/src/backend/catalog/pg_constraint.c +++ b/src/backend/catalog/pg_constraint.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.26 2005/08/01 04:03:54 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.27 2005/10/15 02:49:14 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -140,7 +140,7 @@ CreateConstraintEntry(const char *constraintName, */ if (conBin) values[Anum_pg_constraint_conbin - 1] = DirectFunctionCall1(textin, - CStringGetDatum(conBin)); + CStringGetDatum(conBin)); else nulls[Anum_pg_constraint_conbin - 1] = 'n'; @@ -149,7 +149,7 @@ CreateConstraintEntry(const char *constraintName, */ if (conSrc) values[Anum_pg_constraint_consrc - 1] = DirectFunctionCall1(textin, - CStringGetDatum(conSrc)); + CStringGetDatum(conSrc)); else nulls[Anum_pg_constraint_consrc - 1] = 'n'; @@ -169,8 +169,8 @@ CreateConstraintEntry(const char *constraintName, if (OidIsValid(relId)) { /* - * Register auto dependency from constraint to owning relation, or - * to specific column(s) if any are mentioned. + * Register auto dependency from constraint to owning relation, or to + * specific column(s) if any are mentioned. */ ObjectAddress relobject; @@ -210,8 +210,8 @@ CreateConstraintEntry(const char *constraintName, if (OidIsValid(foreignRelId)) { /* - * Register normal dependency from constraint to foreign relation, - * or to specific column(s) if any are mentioned. + * Register normal dependency from constraint to foreign relation, or + * to specific column(s) if any are mentioned. */ ObjectAddress relobject; @@ -252,8 +252,8 @@ CreateConstraintEntry(const char *constraintName, if (conExpr != NULL) { /* - * Register dependencies from constraint to objects mentioned in - * CHECK expression. + * Register dependencies from constraint to objects mentioned in CHECK + * expression. */ recordDependencyOnSingleRelExpr(&conobject, conExpr, relId, DEPENDENCY_NORMAL, @@ -450,15 +450,15 @@ RemoveConstraintById(Oid conId) Relation rel; /* - * If the constraint is for a relation, open and exclusive-lock - * the relation it's for. + * If the constraint is for a relation, open and exclusive-lock the + * relation it's for. */ rel = heap_open(con->conrelid, AccessExclusiveLock); /* - * We need to update the relcheck count if it is a check - * constraint being dropped. This update will force backends to - * rebuild relcache entries when we commit. + * We need to update the relcheck count if it is a check constraint + * being dropped. This update will force backends to rebuild relcache + * entries when we commit. */ if (con->contype == CONSTRAINT_CHECK) { @@ -495,11 +495,10 @@ RemoveConstraintById(Oid conId) else if (OidIsValid(con->contypid)) { /* - * XXX for now, do nothing special when dropping a domain - * constraint + * XXX for now, do nothing special when dropping a domain constraint * - * Probably there should be some form of locking on the domain type, - * but we have no such concept at the moment. + * Probably there should be some form of locking on the domain type, but + * we have no such concept at the moment. */ } else @@ -531,9 +530,9 @@ GetConstraintNameForTrigger(Oid triggerId) HeapTuple tup; /* - * We must grovel through pg_depend to find the owning constraint. - * Perhaps pg_trigger should have a column for the owning constraint ... - * but right now this is not performance-critical code. + * We must grovel through pg_depend to find the owning constraint. Perhaps + * pg_trigger should have a column for the owning constraint ... but right + * now this is not performance-critical code. */ depRel = heap_open(DependRelationId, AccessShareLock); @@ -567,7 +566,7 @@ GetConstraintNameForTrigger(Oid triggerId) heap_close(depRel, AccessShareLock); if (!OidIsValid(constraintId)) - return NULL; /* no owning constraint found */ + return NULL; /* no owning constraint found */ conRel = heap_open(ConstraintRelationId, AccessShareLock); @@ -611,10 +610,10 @@ void AlterConstraintNamespaces(Oid ownerId, Oid oldNspId, Oid newNspId, bool isType) { - Relation conRel; - ScanKeyData key[1]; - SysScanDesc scan; - HeapTuple tup; + Relation conRel; + ScanKeyData key[1]; + SysScanDesc scan; + HeapTuple tup; conRel = heap_open(ConstraintRelationId, RowExclusiveLock); diff --git a/src/backend/catalog/pg_conversion.c b/src/backend/catalog/pg_conversion.c index 0cdca75f3ad..21adfbcf94a 100644 --- a/src/backend/catalog/pg_conversion.c +++ b/src/backend/catalog/pg_conversion.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_conversion.c,v 1.26 2005/09/24 17:53:12 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_conversion.c,v 1.27 2005/10/15 02:49:14 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -67,17 +67,17 @@ ConversionCreate(const char *conname, Oid connamespace, if (def) { /* - * make sure there is no existing default <for encoding><to - * encoding> pair in this name space + * make sure there is no existing default <for encoding><to encoding> + * pair in this name space */ if (FindDefaultConversion(connamespace, conforencoding, contoencoding)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("default conversion for %s to %s already exists", - pg_encoding_to_char(conforencoding), - pg_encoding_to_char(contoencoding)))); + errmsg("default conversion for %s to %s already exists", + pg_encoding_to_char(conforencoding), + pg_encoding_to_char(contoencoding)))); } /* open pg_conversion */ @@ -150,7 +150,7 @@ ConversionDrop(Oid conversionOid, DropBehavior behavior) if (!superuser() && ((Form_pg_conversion) GETSTRUCT(tuple))->conowner != GetUserId()) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION, - NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname)); + NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname)); ReleaseSysCache(tuple); @@ -330,9 +330,8 @@ pg_convert_using(PG_FUNCTION_ARGS) ReleaseSysCache(tuple); /* - * build text result structure. we cannot use textin() here, since - * textin assumes that input string encoding is same as database - * encoding. + * build text result structure. we cannot use textin() here, since textin + * assumes that input string encoding is same as database encoding. */ len = strlen(result) + VARHDRSZ; retval = palloc(len); diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c index bf910d09a5d..c8f9e53212d 100644 --- a/src/backend/catalog/pg_depend.c +++ b/src/backend/catalog/pg_depend.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.14 2005/08/01 04:03:54 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.15 2005/10/15 02:49:14 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,8 +62,8 @@ recordMultipleDependencies(const ObjectAddress *depender, return; /* nothing to do */ /* - * During bootstrap, do nothing since pg_depend may not exist yet. - * initdb will fill in appropriate pg_depend entries after bootstrap. + * During bootstrap, do nothing since pg_depend may not exist yet. initdb + * will fill in appropriate pg_depend entries after bootstrap. */ if (IsBootstrapProcessingMode()) return; @@ -78,9 +78,9 @@ recordMultipleDependencies(const ObjectAddress *depender, for (i = 0; i < nreferenced; i++, referenced++) { /* - * If the referenced object is pinned by the system, there's no - * real need to record dependencies on it. This saves lots of - * space in pg_depend, so it's worth the time taken to check. + * If the referenced object is pinned by the system, there's no real + * need to record dependencies on it. This saves lots of space in + * pg_depend, so it's worth the time taken to check. */ if (!isObjectPinned(referenced, dependDesc)) { @@ -190,11 +190,10 @@ changeDependencyFor(Oid classId, Oid objectId, depRel = heap_open(DependRelationId, RowExclusiveLock); /* - * If oldRefObjectId is pinned, there won't be any dependency entries - * on it --- we can't cope in that case. (This isn't really worth - * expending code to fix, in current usage; it just means you can't - * rename stuff out of pg_catalog, which would likely be a bad move - * anyway.) + * If oldRefObjectId is pinned, there won't be any dependency entries on + * it --- we can't cope in that case. (This isn't really worth expending + * code to fix, in current usage; it just means you can't rename stuff out + * of pg_catalog, which would likely be a bad move anyway.) */ objAddr.classId = refClassId; objAddr.objectId = oldRefObjectId; @@ -203,12 +202,12 @@ changeDependencyFor(Oid classId, Oid objectId, if (isObjectPinned(&objAddr, depRel)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot remove dependency on %s because it is a system object", - getObjectDescription(&objAddr)))); + errmsg("cannot remove dependency on %s because it is a system object", + getObjectDescription(&objAddr)))); /* - * We can handle adding a dependency on something pinned, though, - * since that just means deleting the dependency entry. + * We can handle adding a dependency on something pinned, though, since + * that just means deleting the dependency entry. */ objAddr.objectId = newRefObjectId; @@ -293,9 +292,9 @@ isObjectPinned(const ObjectAddress *object, Relation rel) /* * Since we won't generate additional pg_depend entries for pinned - * objects, there can be at most one entry referencing a pinned - * object. Hence, it's sufficient to look at the first returned - * tuple; we don't need to loop. + * objects, there can be at most one entry referencing a pinned object. + * Hence, it's sufficient to look at the first returned tuple; we don't + * need to loop. */ tup = systable_getnext(scan); if (HeapTupleIsValid(tup)) diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c index 903a46ac0f0..8dea69a234f 100644 --- a/src/backend/catalog/pg_operator.c +++ b/src/backend/catalog/pg_operator.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_operator.c,v 1.93 2005/07/07 20:39:57 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_operator.c,v 1.94 2005/10/15 02:49:14 momjian Exp $ * * NOTES * these routines moved here from commands/define.c and somewhat cleaned up. @@ -90,10 +90,10 @@ validOperatorName(const char *name) /* * For SQL92 compatibility, '+' and '-' cannot be the last char of a - * multi-char operator unless the operator contains chars that are not - * in SQL92 operators. The idea is to lex '=-' as two operators, but - * not to forbid operator names like '?-' that could not be sequences - * of SQL92 operators. + * multi-char operator unless the operator contains chars that are not in + * SQL92 operators. The idea is to lex '=-' as two operators, but not to + * forbid operator names like '?-' that could not be sequences of SQL92 + * operators. */ if (len > 1 && (name[len - 1] == '+' || @@ -228,14 +228,14 @@ OperatorShellMake(const char *operatorName, } /* - * initialize values[] with the operator name and input data types. - * Note that oprcode is set to InvalidOid, indicating it's a shell. + * initialize values[] with the operator name and input data types. Note + * that oprcode is set to InvalidOid, indicating it's a shell. */ i = 0; namestrcpy(&oname, operatorName); values[i++] = NameGetDatum(&oname); /* oprname */ values[i++] = ObjectIdGetDatum(operatorNamespace); /* oprnamespace */ - values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */ + values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */ values[i++] = CharGetDatum(leftTypeId ? (rightTypeId ? 'b' : 'r') : 'l'); /* oprkind */ values[i++] = BoolGetDatum(false); /* oprcanhash */ values[i++] = ObjectIdGetDatum(leftTypeId); /* oprleft */ @@ -410,7 +410,7 @@ OperatorCreate(const char *operatorName, if (!OidIsValid(leftTypeId) && !OidIsValid(rightTypeId)) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("at least one of leftarg or rightarg must be specified"))); + errmsg("at least one of leftarg or rightarg must be specified"))); if (!(OidIsValid(leftTypeId) && OidIsValid(rightTypeId))) { @@ -418,11 +418,11 @@ OperatorCreate(const char *operatorName, if (commutatorName) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("only binary operators can have commutators"))); + errmsg("only binary operators can have commutators"))); if (joinName) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("only binary operators can have join selectivity"))); + errmsg("only binary operators can have join selectivity"))); if (canHash) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), @@ -451,9 +451,9 @@ OperatorCreate(const char *operatorName, */ /* - * Look up registered procedures -- find the return type of - * procedureName to place in "result" field. Do this before shells are - * created so we don't have to worry about deleting them later. + * Look up registered procedures -- find the return type of procedureName + * to place in "result" field. Do this before shells are created so we + * don't have to worry about deleting them later. */ if (!OidIsValid(leftTypeId)) { @@ -519,7 +519,7 @@ OperatorCreate(const char *operatorName, namestrcpy(&oname, operatorName); values[i++] = NameGetDatum(&oname); /* oprname */ values[i++] = ObjectIdGetDatum(operatorNamespace); /* oprnamespace */ - values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */ + values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */ values[i++] = CharGetDatum(leftTypeId ? (rightTypeId ? 'b' : 'r') : 'l'); /* oprkind */ values[i++] = BoolGetDatum(canHash); /* oprcanhash */ values[i++] = ObjectIdGetDatum(leftTypeId); /* oprleft */ @@ -660,14 +660,14 @@ OperatorCreate(const char *operatorName, /* * If a commutator and/or negator link is provided, update the other - * operator(s) to point at this one, if they don't already have a - * link. This supports an alternate style of operator definition - * wherein the user first defines one operator without giving negator - * or commutator, then defines the other operator of the pair with the - * proper commutator or negator attribute. That style doesn't require - * creation of a shell, and it's the only style that worked right - * before Postgres version 6.5. This code also takes care of the - * situation where the new operator is its own commutator. + * operator(s) to point at this one, if they don't already have a link. + * This supports an alternate style of operator definition wherein the + * user first defines one operator without giving negator or commutator, + * then defines the other operator of the pair with the proper commutator + * or negator attribute. That style doesn't require creation of a shell, + * and it's the only style that worked right before Postgres version 6.5. + * This code also takes care of the situation where the new operator is + * its own commutator. */ if (selfCommutator) commutatorId = operatorObjectId; @@ -721,7 +721,7 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId, if (!isCommutator) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("operator cannot be its own negator or sort operator"))); + errmsg("operator cannot be its own negator or sort operator"))); return InvalidOid; } @@ -780,9 +780,9 @@ OperatorUpd(Oid baseId, Oid commId, Oid negId) 0, 0, 0); /* - * if the commutator and negator are the same operator, do one update. - * XXX this is probably useless code --- I doubt it ever makes sense - * for commutator and negator to be the same thing... + * if the commutator and negator are the same operator, do one update. XXX + * this is probably useless code --- I doubt it ever makes sense for + * commutator and negator to be the same thing... */ if (commId == negId) { @@ -931,10 +931,10 @@ makeOperatorDependencies(HeapTuple tuple) * NOTE: we do not consider the operator to depend on the associated * operators oprcom, oprnegate, oprlsortop, oprrsortop, oprltcmpop, * oprgtcmpop. We would not want to delete this operator if those go - * away, but only reset the link fields; which is not a function that - * the dependency code can presently handle. (Something could perhaps - * be done with objectSubId though.) For now, it's okay to let those - * links dangle if a referenced operator is removed. + * away, but only reset the link fields; which is not a function that the + * dependency code can presently handle. (Something could perhaps be done + * with objectSubId though.) For now, it's okay to let those links dangle + * if a referenced operator is removed. */ /* Dependency on implementation function */ diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index 691be63dc75..ab3de4ed4be 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.133 2005/09/24 22:54:35 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.134 2005/10/15 02:49:14 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -115,9 +115,9 @@ ProcedureCreate(const char *procedureName, if (allParameterTypes != PointerGetDatum(NULL)) { /* - * We expect the array to be a 1-D OID array; verify that. We - * don't need to use deconstruct_array() since the array data is - * just going to look like a C array of OID values. + * We expect the array to be a 1-D OID array; verify that. We don't + * need to use deconstruct_array() since the array data is just going + * to look like a C array of OID values. */ allParamCount = ARR_DIMS(DatumGetPointer(allParameterTypes))[0]; if (ARR_NDIM(DatumGetPointer(allParameterTypes)) != 1 || @@ -136,8 +136,8 @@ ProcedureCreate(const char *procedureName, /* * Do not allow return type ANYARRAY or ANYELEMENT unless at least one - * input argument is ANYARRAY or ANYELEMENT. Also, do not allow - * return type INTERNAL unless at least one input argument is INTERNAL. + * input argument is ANYARRAY or ANYELEMENT. Also, do not allow return + * type INTERNAL unless at least one input argument is INTERNAL. */ for (i = 0; i < parameterCount; i++) { @@ -158,9 +158,9 @@ ProcedureCreate(const char *procedureName, for (i = 0; i < allParamCount; i++) { /* - * We don't bother to distinguish input and output params here, - * so if there is, say, just an input INTERNAL param then we will - * still set internalOutParam. This is OK since we don't really + * We don't bother to distinguish input and output params here, so + * if there is, say, just an input INTERNAL param then we will + * still set internalOutParam. This is OK since we don't really * care. */ switch (allParams[i]) @@ -240,9 +240,9 @@ ProcedureCreate(const char *procedureName, else nulls[Anum_pg_proc_proargnames - 1] = 'n'; values[Anum_pg_proc_prosrc - 1] = DirectFunctionCall1(textin, - CStringGetDatum(prosrc)); + CStringGetDatum(prosrc)); values[Anum_pg_proc_probin - 1] = DirectFunctionCall1(textin, - CStringGetDatum(probin)); + CStringGetDatum(probin)); /* start out with empty permissions */ nulls[Anum_pg_proc_proacl - 1] = 'n'; @@ -264,8 +264,8 @@ ProcedureCreate(const char *procedureName, if (!replace) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_FUNCTION), - errmsg("function \"%s\" already exists with same argument types", - procedureName))); + errmsg("function \"%s\" already exists with same argument types", + procedureName))); if (!pg_proc_ownercheck(HeapTupleGetOid(oldtup), GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, procedureName); @@ -295,14 +295,14 @@ ProcedureCreate(const char *procedureName, parameterModes, parameterNames); if (olddesc == NULL && newdesc == NULL) - /* ok, both are runtime-defined RECORDs */ ; + /* ok, both are runtime-defined RECORDs */ ; else if (olddesc == NULL || newdesc == NULL || !equalTupleDescs(olddesc, newdesc)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("cannot change return type of existing function"), - errdetail("Row type defined by OUT parameters is different."), - errhint("Use DROP FUNCTION first."))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), + errmsg("cannot change return type of existing function"), + errdetail("Row type defined by OUT parameters is different."), + errhint("Use DROP FUNCTION first."))); } /* Can't change aggregate status, either */ @@ -422,8 +422,8 @@ fmgr_internal_validator(PG_FUNCTION_ARGS) char *prosrc; /* - * We do not honor check_function_bodies since it's unlikely the - * function name will be found later if it isn't there now. + * We do not honor check_function_bodies since it's unlikely the function + * name will be found later if it isn't there now. */ tuple = SearchSysCache(PROCOID, @@ -471,10 +471,9 @@ fmgr_c_validator(PG_FUNCTION_ARGS) char *probin; /* - * It'd be most consistent to skip the check if - * !check_function_bodies, but the purpose of that switch is to be - * helpful for pg_dump loading, and for pg_dump loading it's much - * better if we *do* check. + * It'd be most consistent to skip the check if !check_function_bodies, + * but the purpose of that switch is to be helpful for pg_dump loading, + * and for pg_dump loading it's much better if we *do* check. */ tuple = SearchSysCache(PROCOID, @@ -554,8 +553,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("SQL functions cannot have arguments of type %s", - format_type_be(proc->proargtypes.values[i])))); + errmsg("SQL functions cannot have arguments of type %s", + format_type_be(proc->proargtypes.values[i])))); } } @@ -577,13 +576,13 @@ fmgr_sql_validator(PG_FUNCTION_ARGS) error_context_stack = &sqlerrcontext; /* - * We can't do full prechecking of the function definition if - * there are any polymorphic input types, because actual datatypes - * of expression results will be unresolvable. The check will be - * done at runtime instead. + * We can't do full prechecking of the function definition if there + * are any polymorphic input types, because actual datatypes of + * expression results will be unresolvable. The check will be done at + * runtime instead. * - * We can run the text through the raw parser though; this will at - * least catch silly syntactic errors. + * We can run the text through the raw parser though; this will at least + * catch silly syntactic errors. */ if (!haspolyarg) { @@ -652,8 +651,8 @@ function_parse_error_transpose(const char *prosrc) * Nothing to do unless we are dealing with a syntax error that has a * cursor position. * - * Some PLs may prefer to report the error position as an internal error - * to begin with, so check that too. + * Some PLs may prefer to report the error position as an internal error to + * begin with, so check that too. */ origerrposition = geterrposition(); if (origerrposition <= 0) @@ -703,10 +702,10 @@ match_prosrc_to_query(const char *prosrc, const char *queryText, int cursorpos) { /* - * Rather than fully parsing the CREATE FUNCTION command, we just scan - * the command looking for $prosrc$ or 'prosrc'. This could be fooled - * (though not in any very probable scenarios), so fail if we find - * more than one match. + * Rather than fully parsing the CREATE FUNCTION command, we just scan the + * command looking for $prosrc$ or 'prosrc'. This could be fooled (though + * not in any very probable scenarios), so fail if we find more than one + * match. */ int prosrclen = strlen(prosrc); int querylen = strlen(queryText); @@ -722,8 +721,8 @@ match_prosrc_to_query(const char *prosrc, const char *queryText, { /* * Found a $foo$ match. Since there are no embedded quoting - * characters in a dollar-quoted literal, we don't have to do - * any fancy arithmetic; just offset by the starting position. + * characters in a dollar-quoted literal, we don't have to do any + * fancy arithmetic; just offset by the starting position. */ if (matchpos) return 0; /* multiple matches, fail */ @@ -735,9 +734,8 @@ match_prosrc_to_query(const char *prosrc, const char *queryText, cursorpos, &newcursorpos)) { /* - * Found a 'foo' match. match_prosrc_to_literal() has - * adjusted for any quotes or backslashes embedded in the - * literal. + * Found a 'foo' match. match_prosrc_to_literal() has adjusted + * for any quotes or backslashes embedded in the literal. */ if (matchpos) return 0; /* multiple matches, fail */ @@ -769,8 +767,8 @@ match_prosrc_to_literal(const char *prosrc, const char *literal, * string literal. It does not handle the SQL syntax for literals * continued across line boundaries. * - * We do the comparison a character at a time, not a byte at a time, so - * that we can do the correct cursorpos math. + * We do the comparison a character at a time, not a byte at a time, so that + * we can do the correct cursorpos math. */ while (*prosrc) { diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index bd326b876b3..4cce7ba13cf 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.2 2005/08/30 01:07:54 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.3 2005/10/15 02:49:14 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -36,22 +36,22 @@ typedef enum } objectType; static int getOidListDiff(Oid *list1, int nlist1, Oid *list2, int nlist2, - Oid **diff); -static Oid classIdGetDbId(Oid classId); + Oid **diff); +static Oid classIdGetDbId(Oid classId); static void shdepLockAndCheckObject(Oid classId, Oid objectId); static void shdepChangeDep(Relation sdepRel, Oid classid, Oid objid, - Oid refclassid, Oid refobjid, - SharedDependencyType deptype); + Oid refclassid, Oid refobjid, + SharedDependencyType deptype); static void shdepAddDependency(Relation sdepRel, Oid classId, Oid objectId, - Oid refclassId, Oid refobjId, - SharedDependencyType deptype); + Oid refclassId, Oid refobjId, + SharedDependencyType deptype); static void shdepDropDependency(Relation sdepRel, Oid classId, Oid objectId, - Oid refclassId, Oid refobjId, - SharedDependencyType deptype); + Oid refclassId, Oid refobjId, + SharedDependencyType deptype); static void storeObjectDescription(StringInfo descs, objectType type, - ObjectAddress *object, - SharedDependencyType deptype, - int count); + ObjectAddress *object, + SharedDependencyType deptype, + int count); static bool isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel); @@ -70,7 +70,7 @@ static bool isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel); */ void recordSharedDependencyOn(ObjectAddress *depender, - ObjectAddress *referenced, + ObjectAddress *referenced, SharedDependencyType deptype) { Relation sdepRel; @@ -95,7 +95,7 @@ recordSharedDependencyOn(ObjectAddress *depender, sdepRel)) { shdepAddDependency(sdepRel, depender->classId, depender->objectId, - referenced->classId, referenced->objectId, + referenced->classId, referenced->objectId, deptype); } @@ -132,11 +132,11 @@ recordDependencyOnOwner(Oid classId, Oid objectId, Oid owner) * shdepChangeDep * * Update shared dependency records to account for an updated referenced - * object. This is an internal workhorse for operations such as changing + * object. This is an internal workhorse for operations such as changing * an object's owner. * * There must be no more than one existing entry for the given dependent - * object and dependency type! So in practice this can only be used for + * object and dependency type! So in practice this can only be used for * updating SHARED_DEPENDENCY_OWNER entries, which should have that property. * * If there is no previous entry, we assume it was referencing a PINned @@ -154,12 +154,12 @@ shdepChangeDep(Relation sdepRel, Oid classid, Oid objid, Oid dbid = classIdGetDbId(classid); HeapTuple oldtup = NULL; HeapTuple scantup; - ScanKeyData key[3]; - SysScanDesc scan; + ScanKeyData key[3]; + SysScanDesc scan; /* - * Make sure the new referenced object doesn't go away while we record - * the dependency. + * Make sure the new referenced object doesn't go away while we record the + * dependency. */ shdepLockAndCheckObject(refclassid, refobjid); @@ -167,11 +167,11 @@ shdepChangeDep(Relation sdepRel, Oid classid, Oid objid, * Look for a previous entry */ ScanKeyInit(&key[0], - Anum_pg_shdepend_dbid, + Anum_pg_shdepend_dbid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(dbid)); ScanKeyInit(&key[1], - Anum_pg_shdepend_classid, + Anum_pg_shdepend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(classid)); ScanKeyInit(&key[2], @@ -181,7 +181,7 @@ shdepChangeDep(Relation sdepRel, Oid classid, Oid objid, scan = systable_beginscan(sdepRel, SharedDependDependerIndexId, true, SnapshotNow, 3, key); - + while ((scantup = systable_getnext(scan)) != NULL) { /* Ignore if not of the target dependency type */ @@ -220,8 +220,8 @@ shdepChangeDep(Relation sdepRel, Oid classid, Oid objid, else { /* Need to insert new entry */ - Datum values[Natts_pg_shdepend]; - bool nulls[Natts_pg_shdepend]; + Datum values[Natts_pg_shdepend]; + bool nulls[Natts_pg_shdepend]; memset(nulls, 0, sizeof(nulls)); @@ -234,8 +234,8 @@ shdepChangeDep(Relation sdepRel, Oid classid, Oid objid, values[Anum_pg_shdepend_deptype - 1] = CharGetDatum(deptype); /* - * we are reusing oldtup just to avoid declaring a new variable, - * but it's certainly a new tuple + * we are reusing oldtup just to avoid declaring a new variable, but + * it's certainly a new tuple */ oldtup = heap_form_tuple(RelationGetDescr(sdepRel), values, nulls); simple_heap_insert(sdepRel, oldtup); @@ -271,7 +271,7 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId) * was previously granted some rights to the object. * * This step is analogous to aclnewowner's removal of duplicate entries - * in the ACL. We have to do it to handle this scenario: + * in the ACL. We have to do it to handle this scenario: * A grants some rights on an object to B * ALTER OWNER changes the object's owner to B * ALTER OWNER changes the object's owner to C @@ -296,7 +296,7 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId) * Helper for updateAclDependencies. * * Takes two Oid arrays and returns elements from the first not found in the - * second. We assume both arrays are sorted and de-duped, and that the + * second. We assume both arrays are sorted and de-duped, and that the * second array does not contain any values not found in the first. * * NOTE: Both input arrays are pfreed. @@ -304,17 +304,17 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId) static int getOidListDiff(Oid *list1, int nlist1, Oid *list2, int nlist2, Oid **diff) { - Oid *result; - int i, - j, - k = 0; + Oid *result; + int i, + j, + k = 0; AssertArg(nlist1 >= nlist2 && nlist2 >= 0); result = palloc(sizeof(Oid) * (nlist1 - nlist2)); *diff = result; - for (i = 0, j = 0; i < nlist1 && j < nlist2; ) + for (i = 0, j = 0; i < nlist1 && j < nlist2;) { if (list1[i] == list2[j]) { @@ -350,7 +350,7 @@ getOidListDiff(Oid *list1, int nlist1, Oid *list2, int nlist2, Oid **diff) /* * updateAclDependencies - * Update the pg_shdepend info for an object's ACL during GRANT/REVOKE. + * Update the pg_shdepend info for an object's ACL during GRANT/REVOKE. * * classId, objectId: identify the object whose ACL this is * ownerId: role owning the object @@ -398,12 +398,12 @@ updateAclDependencies(Oid classId, Oid objectId, Oid ownerId, bool isGrant, /* Add or drop the respective dependency */ for (i = 0; i < ndiff; i++) { - Oid roleid = diff[i]; + Oid roleid = diff[i]; /* - * Skip the owner: he has an OWNER shdep entry instead. - * (This is not just a space optimization; it makes ALTER OWNER - * easier. See notes in changeDependencyOnOwner.) + * Skip the owner: he has an OWNER shdep entry instead. (This is + * not just a space optimization; it makes ALTER OWNER easier. + * See notes in changeDependencyOnOwner.) */ if (roleid == ownerId) continue; @@ -416,7 +416,7 @@ updateAclDependencies(Oid classId, Oid objectId, Oid ownerId, bool isGrant, shdepAddDependency(sdepRel, classId, objectId, AuthIdRelationId, roleid, SHARED_DEPENDENCY_ACL); - else + else shdepDropDependency(sdepRel, classId, objectId, AuthIdRelationId, roleid, SHARED_DEPENDENCY_ACL); @@ -433,15 +433,15 @@ updateAclDependencies(Oid classId, Oid objectId, Oid ownerId, bool isGrant, */ typedef struct { - Oid dbOid; - int count; + Oid dbOid; + int count; } remoteDep; /* * checkSharedDependencies * * Check whether there are shared dependency entries for a given shared - * object. Returns a string containing a newline-separated list of object + * object. Returns a string containing a newline-separated list of object * descriptions that depend on the shared object, or NULL if none is found. * * We can find three different kinds of dependencies: dependencies on objects @@ -456,20 +456,20 @@ char * checkSharedDependencies(Oid classId, Oid objectId) { Relation sdepRel; - ScanKeyData key[2]; - SysScanDesc scan; + ScanKeyData key[2]; + SysScanDesc scan; HeapTuple tup; int totalDeps = 0; int numLocalDeps = 0; int numSharedDeps = 0; List *remDeps = NIL; ListCell *cell; - ObjectAddress object; + ObjectAddress object; StringInfoData descs; /* - * We try to limit the number of reported dependencies to something - * sane, both for the user's sake and to avoid blowing out memory. + * We try to limit the number of reported dependencies to something sane, + * both for the user's sake and to avoid blowing out memory. */ #define MAX_REPORTED_DEPS 100 @@ -478,20 +478,20 @@ checkSharedDependencies(Oid classId, Oid objectId) sdepRel = heap_open(SharedDependRelationId, AccessShareLock); ScanKeyInit(&key[0], - Anum_pg_shdepend_refclassid, + Anum_pg_shdepend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(classId)); ScanKeyInit(&key[1], - Anum_pg_shdepend_refobjid, + Anum_pg_shdepend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(objectId)); - + scan = systable_beginscan(sdepRel, SharedDependReferenceIndexId, true, SnapshotNow, 2, key); while (HeapTupleIsValid(tup = systable_getnext(scan))) { - Form_pg_shdepend sdepForm = (Form_pg_shdepend) GETSTRUCT(tup); + Form_pg_shdepend sdepForm = (Form_pg_shdepend) GETSTRUCT(tup); /* This case can be dispatched quickly */ if (sdepForm->deptype == SHARED_DEPENDENCY_PIN) @@ -502,7 +502,7 @@ checkSharedDependencies(Oid classId, Oid objectId) ereport(ERROR, (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), errmsg("cannot drop %s because it is required by the database system", - getObjectDescription(&object)))); + getObjectDescription(&object)))); } object.classId = sdepForm->classid; @@ -513,8 +513,8 @@ checkSharedDependencies(Oid classId, Oid objectId) * If it's a dependency local to this database or it's a shared * object, describe it. * - * If it's a remote dependency, keep track of it so we can report - * the number of them later. + * If it's a remote dependency, keep track of it so we can report the + * number of them later. */ if (sdepForm->dbid == MyDatabaseId) { @@ -537,10 +537,10 @@ checkSharedDependencies(Oid classId, Oid objectId) bool stored = false; /* - * XXX this info is kept on a simple List. Maybe it's not good + * XXX this info is kept on a simple List. Maybe it's not good * for performance, but using a hash table seems needlessly - * complex. The expected number of databases is not high - * anyway, I suppose. + * complex. The expected number of databases is not high anyway, + * I suppose. */ foreach(cell, remDeps) { @@ -572,8 +572,8 @@ checkSharedDependencies(Oid classId, Oid objectId) /* * Report seems unreasonably long, so reduce it to per-database info * - * Note: we don't ever suppress per-database totals, which should - * be OK as long as there aren't too many databases ... + * Note: we don't ever suppress per-database totals, which should be OK + * as long as there aren't too many databases ... */ descs.len = 0; /* reset to empty */ descs.data[0] = '\0'; @@ -592,7 +592,7 @@ checkSharedDependencies(Oid classId, Oid objectId) foreach(cell, remDeps) { - remoteDep *dep = lfirst(cell); + remoteDep *dep = lfirst(cell); object.classId = DatabaseRelationId; object.objectId = dep->dbOid; @@ -624,8 +624,8 @@ copyTemplateDependencies(Oid templateDbId, Oid newDbId) { Relation sdepRel; TupleDesc sdepDesc; - ScanKeyData key[1]; - SysScanDesc scan; + ScanKeyData key[1]; + SysScanDesc scan; HeapTuple tup; CatalogIndexState indstate; Datum values[Natts_pg_shdepend]; @@ -655,11 +655,11 @@ copyTemplateDependencies(Oid templateDbId, Oid newDbId) values[Anum_pg_shdepend_dbid - 1] = ObjectIdGetDatum(newDbId); /* - * Copy the entries of the original database, changing the database Id - * to that of the new database. Note that because we are not copying - * rows with dbId == 0 (ie, rows describing dependent shared objects) - * we won't copy the ownership dependency of the template database - * itself; this is what we want. + * Copy the entries of the original database, changing the database Id to + * that of the new database. Note that because we are not copying rows + * with dbId == 0 (ie, rows describing dependent shared objects) we won't + * copy the ownership dependency of the template database itself; this is + * what we want. */ while (HeapTupleIsValid(tup = systable_getnext(scan))) { @@ -690,15 +690,15 @@ void dropDatabaseDependencies(Oid databaseId) { Relation sdepRel; - ScanKeyData key[1]; - SysScanDesc scan; + ScanKeyData key[1]; + SysScanDesc scan; HeapTuple tup; sdepRel = heap_open(SharedDependRelationId, RowExclusiveLock); /* - * First, delete all the entries that have the database Oid in the - * dbid field. + * First, delete all the entries that have the database Oid in the dbid + * field. */ ScanKeyInit(&key[0], Anum_pg_shdepend_dbid, @@ -747,7 +747,7 @@ deleteSharedDependencyRecordsFor(Oid classId, Oid objectId) /* * shdepAddDependency - * Internal workhorse for inserting into pg_shdepend + * Internal workhorse for inserting into pg_shdepend * * sdepRel must be the pg_shdepend relation, already opened and suitably * locked. @@ -762,9 +762,9 @@ shdepAddDependency(Relation sdepRel, Oid classId, Oid objectId, bool nulls[Natts_pg_shdepend]; /* - * Make sure the object doesn't go away while we record the dependency - * on it. DROP routines should lock the object exclusively before they - * check shared dependencies. + * Make sure the object doesn't go away while we record the dependency on + * it. DROP routines should lock the object exclusively before they check + * shared dependencies. */ shdepLockAndCheckObject(refclassId, refobjId); @@ -794,7 +794,7 @@ shdepAddDependency(Relation sdepRel, Oid classId, Oid objectId, /* * shdepDropDependency - * Internal workhorse for deleting entries from pg_shdepend. + * Internal workhorse for deleting entries from pg_shdepend. * * We drop entries having the following properties: * dependent object is the one identified by classId/objectId @@ -810,17 +810,17 @@ shdepDropDependency(Relation sdepRel, Oid classId, Oid objectId, Oid refclassId, Oid refobjId, SharedDependencyType deptype) { - ScanKeyData key[3]; - SysScanDesc scan; + ScanKeyData key[3]; + SysScanDesc scan; HeapTuple tup; /* Scan for entries matching the dependent object */ ScanKeyInit(&key[0], - Anum_pg_shdepend_dbid, + Anum_pg_shdepend_dbid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(classIdGetDbId(classId))); ScanKeyInit(&key[1], - Anum_pg_shdepend_classid, + Anum_pg_shdepend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(classId)); ScanKeyInit(&key[2], @@ -899,8 +899,8 @@ shdepLockAndCheckObject(Oid classId, Oid objectId) LockSharedObject(classId, objectId, 0, AccessShareLock); /* - * We have to recognize sinval updates here, else our local syscache - * may still contain the object even if it was just dropped. + * We have to recognize sinval updates here, else our local syscache may + * still contain the object even if it was just dropped. */ AcceptInvalidationMessages(); @@ -916,25 +916,26 @@ shdepLockAndCheckObject(Oid classId, Oid objectId) objectId))); break; - /* - * Currently, this routine need not support any other shared object - * types besides roles. If we wanted to record explicit dependencies - * on databases or tablespaces, we'd need code along these lines: - */ + /* + * Currently, this routine need not support any other shared + * object types besides roles. If we wanted to record explicit + * dependencies on databases or tablespaces, we'd need code along + * these lines: + */ #ifdef NOT_USED case TableSpaceRelationId: - { - /* For lack of a syscache on pg_tablespace, do this: */ - char *tablespace = get_tablespace_name(objectId); - - if (tablespace == NULL) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace %u was concurrently dropped", - objectId))); - pfree(tablespace); - break; - } + { + /* For lack of a syscache on pg_tablespace, do this: */ + char *tablespace = get_tablespace_name(objectId); + + if (tablespace == NULL) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("tablespace %u was concurrently dropped", + objectId))); + pfree(tablespace); + break; + } #endif default: @@ -963,13 +964,13 @@ storeObjectDescription(StringInfo descs, objectType type, SharedDependencyType deptype, int count) { - char *objdesc = getObjectDescription(object); + char *objdesc = getObjectDescription(object); /* separate entries with a newline */ if (descs->len != 0) appendStringInfoChar(descs, '\n'); - switch (type) + switch (type) { case LOCAL_OBJECT: case SHARED_OBJECT: @@ -1006,16 +1007,16 @@ static bool isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel) { bool result = false; - ScanKeyData key[2]; - SysScanDesc scan; + ScanKeyData key[2]; + SysScanDesc scan; HeapTuple tup; ScanKeyInit(&key[0], - Anum_pg_shdepend_refclassid, + Anum_pg_shdepend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(classId)); ScanKeyInit(&key[1], - Anum_pg_shdepend_refobjid, + Anum_pg_shdepend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(objectId)); @@ -1024,9 +1025,9 @@ isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel) /* * Since we won't generate additional pg_shdepend entries for pinned - * objects, there can be at most one entry referencing a pinned - * object. Hence, it's sufficient to look at the first returned - * tuple; we don't need to loop. + * objects, there can be at most one entry referencing a pinned object. + * Hence, it's sufficient to look at the first returned tuple; we don't + * need to loop. */ tup = systable_getnext(scan); if (HeapTupleIsValid(tup)) diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c index d84bc2c1ac8..ab250b02ea9 100644 --- a/src/backend/catalog/pg_type.c +++ b/src/backend/catalog/pg_type.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.103 2005/08/12 01:35:57 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.104 2005/10/15 02:49:14 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -75,7 +75,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace) namestrcpy(&name, typeName); values[i++] = NameGetDatum(&name); /* typname */ values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */ - values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */ + values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */ values[i++] = Int16GetDatum(0); /* typlen */ values[i++] = BoolGetDatum(false); /* typbyval */ values[i++] = CharGetDatum(0); /* typtype */ @@ -180,8 +180,8 @@ TypeCreate(const char *typeName, int i; /* - * We assume that the caller validated the arguments individually, but - * did not check for bad combinations. + * We assume that the caller validated the arguments individually, but did + * not check for bad combinations. * * Validate size specifications: either positive (fixed-length) or -1 * (varlena) or -2 (cstring). Pass-by-value types must have a fixed @@ -198,8 +198,8 @@ TypeCreate(const char *typeName, (internalSize <= 0 || internalSize > (int16) sizeof(Datum))) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("internal size %d is invalid for passed-by-value type", - internalSize))); + errmsg("internal size %d is invalid for passed-by-value type", + internalSize))); /* Only varlena types can be toasted */ if (storage != 'p' && internalSize != -1) @@ -224,7 +224,7 @@ TypeCreate(const char *typeName, namestrcpy(&name, typeName); values[i++] = NameGetDatum(&name); /* typname */ values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */ - values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */ + values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */ values[i++] = Int16GetDatum(internalSize); /* typlen */ values[i++] = BoolGetDatum(passedByValue); /* typbyval */ values[i++] = CharGetDatum(typeType); /* typtype */ @@ -245,8 +245,8 @@ TypeCreate(const char *typeName, values[i++] = Int32GetDatum(typNDims); /* typndims */ /* - * initialize the default binary value for this type. Check for nulls - * of course. + * initialize the default binary value for this type. Check for nulls of + * course. */ if (defaultTypeBin) values[i] = DirectFunctionCall1(textin, @@ -260,7 +260,7 @@ TypeCreate(const char *typeName, */ if (defaultTypeValue) values[i] = DirectFunctionCall1(textin, - CStringGetDatum(defaultTypeValue)); + CStringGetDatum(defaultTypeValue)); else nulls[i] = 'n'; i++; /* typdefault */ @@ -356,8 +356,7 @@ TypeCreate(const char *typeName, void GenerateTypeDependencies(Oid typeNamespace, Oid typeObjectId, - Oid relationOid, /* only for 'c'atalog - * types */ + Oid relationOid, /* only for 'c'atalog types */ char relationKind, /* ditto */ Oid owner, Oid inputProcedure, @@ -436,13 +435,12 @@ GenerateTypeDependencies(Oid typeNamespace, /* * If the type is a rowtype for a relation, mark it as internally - * dependent on the relation, *unless* it is a stand-alone composite - * type relation. For the latter case, we have to reverse the - * dependency. + * dependent on the relation, *unless* it is a stand-alone composite type + * relation. For the latter case, we have to reverse the dependency. * * In the former case, this allows the type to be auto-dropped when the - * relation is, and not otherwise. And in the latter, of course we get - * the opposite effect. + * relation is, and not otherwise. And in the latter, of course we get the + * opposite effect. */ if (OidIsValid(relationOid)) { @@ -457,11 +455,10 @@ GenerateTypeDependencies(Oid typeNamespace, } /* - * If the type is an array type, mark it auto-dependent on the base - * type. (This is a compromise between the typical case where the - * array type is automatically generated and the case where it is - * manually created: we'd prefer INTERNAL for the former case and - * NORMAL for the latter.) + * If the type is an array type, mark it auto-dependent on the base type. + * (This is a compromise between the typical case where the array type is + * automatically generated and the case where it is manually created: we'd + * prefer INTERNAL for the former case and NORMAL for the latter.) */ if (OidIsValid(elementType)) { diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c index e3efde249d0..160cd8e488a 100644 --- a/src/backend/commands/aggregatecmds.c +++ b/src/backend/commands/aggregatecmds.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.29 2005/08/22 17:38:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.30 2005/10/15 02:49:14 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -72,8 +72,8 @@ DefineAggregate(List *names, List *parameters) DefElem *defel = (DefElem *) lfirst(pl); /* - * sfunc1, stype1, and initcond1 are accepted as obsolete - * spellings for sfunc, stype, initcond. + * sfunc1, stype1, and initcond1 are accepted as obsolete spellings + * for sfunc, stype, initcond. */ if (pg_strcasecmp(defel->defname, "sfunc") == 0) transfuncName = defGetQualifiedName(defel); @@ -119,11 +119,11 @@ DefineAggregate(List *names, List *parameters) /* * look up the aggregate's base type (input datatype) and transtype. * - * We have historically allowed the command to look like basetype = 'ANY' - * so we must do a case-insensitive comparison for the name ANY. Ugh. + * We have historically allowed the command to look like basetype = 'ANY' so + * we must do a case-insensitive comparison for the name ANY. Ugh. * - * basetype can be a pseudo-type, but transtype can't, since we need to - * be able to store values of the transtype. However, we can allow + * basetype can be a pseudo-type, but transtype can't, since we need to be + * able to store values of the transtype. However, we can allow * polymorphic transtype in some cases (AggregateCreate will check). */ if (pg_strcasecmp(TypeNameToString(baseType), "ANY") == 0) @@ -169,11 +169,11 @@ RemoveAggregate(RemoveAggrStmt *stmt) ObjectAddress object; /* - * if a basetype is passed in, then attempt to find an aggregate for - * that specific type. + * if a basetype is passed in, then attempt to find an aggregate for that + * specific type. * - * else attempt to find an aggregate with a basetype of ANYOID. This - * means that the aggregate is to apply to all basetypes (eg, COUNT). + * else attempt to find an aggregate with a basetype of ANYOID. This means + * that the aggregate is to apply to all basetypes (eg, COUNT). */ if (aggType) basetypeID = typenameTypeId(aggType); @@ -193,8 +193,8 @@ RemoveAggregate(RemoveAggrStmt *stmt) /* Permission check: must own agg or its namespace */ if (!pg_proc_ownercheck(procOid, GetUserId()) && - !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace, - GetUserId())) + !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace, + GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, NameListToString(aggName)); @@ -225,10 +225,10 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname) AclResult aclresult; /* - * if a basetype is passed in, then attempt to find an aggregate for - * that specific type; else attempt to find an aggregate with a - * basetype of ANYOID. This means that the aggregate applies to all - * basetypes (eg, COUNT). + * if a basetype is passed in, then attempt to find an aggregate for that + * specific type; else attempt to find an aggregate with a basetype of + * ANYOID. This means that the aggregate applies to all basetypes (eg, + * COUNT). */ if (basetype) basetypeOid = typenameTypeId(basetype); @@ -258,16 +258,16 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname) if (basetypeOid == ANYOID) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_FUNCTION), - errmsg("function %s(*) already exists in schema \"%s\"", - newname, - get_namespace_name(namespaceOid)))); + errmsg("function %s(*) already exists in schema \"%s\"", + newname, + get_namespace_name(namespaceOid)))); else ereport(ERROR, (errcode(ERRCODE_DUPLICATE_FUNCTION), errmsg("function %s already exists in schema \"%s\"", funcname_signature_string(newname, procForm->pronargs, - procForm->proargtypes.values), + procForm->proargtypes.values), get_namespace_name(namespaceOid)))); } @@ -305,10 +305,10 @@ AlterAggregateOwner(List *name, TypeName *basetype, Oid newOwnerId) AclResult aclresult; /* - * if a basetype is passed in, then attempt to find an aggregate for - * that specific type; else attempt to find an aggregate with a - * basetype of ANYOID. This means that the aggregate applies to all - * basetypes (eg, COUNT). + * if a basetype is passed in, then attempt to find an aggregate for that + * specific type; else attempt to find an aggregate with a basetype of + * ANYOID. This means that the aggregate applies to all basetypes (eg, + * COUNT). */ if (basetype) basetypeOid = typenameTypeId(basetype); @@ -353,8 +353,7 @@ AlterAggregateOwner(List *name, TypeName *basetype, Oid newOwnerId) } /* - * Modify the owner --- okay to scribble on tup because it's a - * copy + * Modify the owner --- okay to scribble on tup because it's a copy */ procForm->proowner = newOwnerId; diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 996d70e1632..102dafb8a2a 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.14 2005/08/01 04:03:55 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.15 2005/10/15 02:49:14 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -102,8 +102,8 @@ ExecRenameStmt(RenameStmt *stmt) { /* * RENAME TABLE requires that we (still) hold - * CREATE rights on the containing namespace, - * as well as ownership of the table. + * CREATE rights on the containing namespace, as + * well as ownership of the table. */ Oid namespaceId = get_rel_namespace(relid); AclResult aclresult; @@ -113,7 +113,7 @@ ExecRenameStmt(RenameStmt *stmt) ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_NAMESPACE, - get_namespace_name(namespaceId)); + get_namespace_name(namespaceId)); renamerel(relid, stmt->newname); break; @@ -122,7 +122,7 @@ ExecRenameStmt(RenameStmt *stmt) renameatt(relid, stmt->subname, /* old att name */ stmt->newname, /* new att name */ - interpretInhOption(stmt->relation->inhOpt), /* recursive? */ + interpretInhOption(stmt->relation->inhOpt), /* recursive? */ false); /* recursing already? */ break; case OBJECT_TRIGGER: @@ -156,18 +156,18 @@ ExecAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt) AlterFunctionNamespace(stmt->object, stmt->objarg, stmt->newschema); break; - + case OBJECT_SEQUENCE: case OBJECT_TABLE: CheckRelationOwnership(stmt->relation, true); AlterTableNamespace(stmt->relation, stmt->newschema); break; - + case OBJECT_TYPE: case OBJECT_DOMAIN: AlterTypeNamespace(stmt->object, stmt->newschema); break; - + default: elog(ERROR, "unrecognized AlterObjectSchemaStmt type: %d", (int) stmt->objectType); @@ -181,7 +181,7 @@ ExecAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt) void ExecAlterOwnerStmt(AlterOwnerStmt *stmt) { - Oid newowner = get_roleid_checked(stmt->newowner); + Oid newowner = get_roleid_checked(stmt->newowner); switch (stmt->objectType) { diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index bd32c8c841e..431e39f3b07 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.88 2005/07/29 19:30:03 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.89 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -119,9 +119,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) elevel = DEBUG2; /* - * Use the current context for storing analysis info. vacuum.c - * ensures that this context will be cleared when I return, thus - * releasing the memory allocated here. + * Use the current context for storing analysis info. vacuum.c ensures + * that this context will be cleared when I return, thus releasing the + * memory allocated here. */ anl_context = CurrentMemoryContext; @@ -132,8 +132,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) CHECK_FOR_INTERRUPTS(); /* - * Race condition -- if the pg_class tuple has gone away since the - * last time we saw it, we don't need to process it. + * Race condition -- if the pg_class tuple has gone away since the last + * time we saw it, we don't need to process it. */ if (!SearchSysCacheExists(RELOID, ObjectIdGetDatum(relid), @@ -141,8 +141,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) return; /* - * Open the class, getting only a read lock on it, and check - * permissions. Permissions check should match vacuum's check! + * Open the class, getting only a read lock on it, and check permissions. + * Permissions check should match vacuum's check! */ onerel = relation_open(relid, AccessShareLock); @@ -159,8 +159,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) } /* - * Check that it's a plain table; we used to do this in get_rel_oids() - * but seems safer to check after we've locked the relation. + * Check that it's a plain table; we used to do this in get_rel_oids() but + * seems safer to check after we've locked the relation. */ if (onerel->rd_rel->relkind != RELKIND_RELATION) { @@ -175,10 +175,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) /* * Silently ignore tables that are temp tables of other backends --- - * trying to analyze these is rather pointless, since their contents - * are probably not up-to-date on disk. (We don't throw a warning - * here; it would just lead to chatter during a database-wide - * ANALYZE.) + * trying to analyze these is rather pointless, since their contents are + * probably not up-to-date on disk. (We don't throw a warning here; it + * would just lead to chatter during a database-wide ANALYZE.) */ if (isOtherTempNamespace(RelationGetNamespace(onerel))) { @@ -239,10 +238,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) } /* - * Open all indexes of the relation, and see if there are any - * analyzable columns in the indexes. We do not analyze index columns - * if there was an explicit column list in the ANALYZE command, - * however. + * Open all indexes of the relation, and see if there are any analyzable + * columns in the indexes. We do not analyze index columns if there was + * an explicit column list in the ANALYZE command, however. */ vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel); hasindex = (nindexes > 0); @@ -280,13 +278,12 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) indexpr_item = lnext(indexpr_item); /* - * Can't analyze if the opclass uses a storage - * type different from the expression result type. - * We'd get confused because the type shown in - * pg_attribute for the index column doesn't match - * what we are getting from the expression. - * Perhaps this can be fixed someday, but for now, - * punt. + * Can't analyze if the opclass uses a storage type + * different from the expression result type. We'd get + * confused because the type shown in pg_attribute for + * the index column doesn't match what we are getting + * from the expression. Perhaps this can be fixed + * someday, but for now, punt. */ if (exprType(indexkey) != Irel[ind]->rd_att->attrs[i]->atttypid) @@ -313,13 +310,13 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) { /* * We report that the table is empty; this is just so that the - * autovacuum code doesn't go nuts trying to get stats about - * a zero-column table. + * autovacuum code doesn't go nuts trying to get stats about a + * zero-column table. */ if (!vacstmt->vacuum) pgstat_report_analyze(RelationGetRelid(onerel), onerel->rd_rel->relisshared, - 0, 0); + 0, 0); vac_close_indexes(nindexes, Irel, AccessShareLock); relation_close(onerel, AccessShareLock); @@ -327,9 +324,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) } /* - * Determine how many rows we need to sample, using the worst case - * from all analyzable columns. We use a lower bound of 100 rows to - * avoid possible overflow in Vitter's algorithm. + * Determine how many rows we need to sample, using the worst case from + * all analyzable columns. We use a lower bound of 100 rows to avoid + * possible overflow in Vitter's algorithm. */ targrows = 100; for (i = 0; i < attr_cnt; i++) @@ -356,10 +353,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) &totalrows, &totaldeadrows); /* - * Compute the statistics. Temporary results during the calculations - * for each column are stored in a child context. The calc routines - * are responsible to make sure that whatever they store into the - * VacAttrStats structure is allocated in anl_context. + * Compute the statistics. Temporary results during the calculations for + * each column are stored in a child context. The calc routines are + * responsible to make sure that whatever they store into the VacAttrStats + * structure is allocated in anl_context. */ if (numrows > 0) { @@ -397,9 +394,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) /* * Emit the completed stats rows into pg_statistic, replacing any - * previous statistics for the target columns. (If there are - * stats in pg_statistic for columns we didn't process, we leave - * them alone.) + * previous statistics for the target columns. (If there are stats in + * pg_statistic for columns we didn't process, we leave them alone.) */ update_attstats(relid, attr_cnt, vacattrstats); @@ -413,11 +409,11 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) } /* - * If we are running a standalone ANALYZE, update pages/tuples stats - * in pg_class. We know the accurate page count from the smgr, but - * only an approximate number of tuples; therefore, if we are part of - * VACUUM ANALYZE do *not* overwrite the accurate count already - * inserted by VACUUM. The same consideration applies to indexes. + * If we are running a standalone ANALYZE, update pages/tuples stats in + * pg_class. We know the accurate page count from the smgr, but only an + * approximate number of tuples; therefore, if we are part of VACUUM + * ANALYZE do *not* overwrite the accurate count already inserted by + * VACUUM. The same consideration applies to indexes. */ if (!vacstmt->vacuum) { @@ -440,7 +436,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) /* report results to the stats collector, too */ pgstat_report_analyze(RelationGetRelid(onerel), onerel->rd_rel->relisshared, - totalrows, totaldeadrows); + totalrows, totaldeadrows); } /* Done with indexes */ @@ -448,8 +444,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) /* * Close source relation now, but keep lock so that no one deletes it - * before we commit. (If someone did, they'd fail to clean up the - * entries we made in pg_statistic.) + * before we commit. (If someone did, they'd fail to clean up the entries + * we made in pg_statistic.) */ relation_close(onerel, NoLock); } @@ -499,8 +495,8 @@ compute_index_stats(Relation onerel, double totalrows, /* * Need an EState for evaluation of index expressions and - * partial-index predicates. Create it in the per-index context - * to be sure it gets cleaned up at the bottom of the loop. + * partial-index predicates. Create it in the per-index context to be + * sure it gets cleaned up at the bottom of the loop. */ estate = CreateExecutorState(); econtext = GetPerTupleExprContext(estate); @@ -539,8 +535,7 @@ compute_index_stats(Relation onerel, double totalrows, { /* * Evaluate the index row to compute expression values. We - * could do this by hand, but FormIndexDatum is - * convenient. + * could do this by hand, but FormIndexDatum is convenient. */ FormIndexDatum(indexInfo, slot, @@ -564,9 +559,8 @@ compute_index_stats(Relation onerel, double totalrows, } /* - * Having counted the number of rows that pass the predicate in - * the sample, we can estimate the total number of rows in the - * index. + * Having counted the number of rows that pass the predicate in the + * sample, we can estimate the total number of rows in the index. */ thisdata->tupleFract = (double) numindexrows / (double) numrows; totalindexrows = ceil(thisdata->tupleFract * totalrows); @@ -644,8 +638,8 @@ examine_attribute(Relation onerel, int attnum) stats->tupattnum = attnum; /* - * Call the type-specific typanalyze function. If none is specified, - * use std_typanalyze(). + * Call the type-specific typanalyze function. If none is specified, use + * std_typanalyze(). */ if (OidIsValid(stats->attrtype->typanalyze)) ok = DatumGetBool(OidFunctionCall1(stats->attrtype->typanalyze, @@ -683,8 +677,8 @@ BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, int samplesize) bs->N = nblocks; /* measured table size */ /* - * If we decide to reduce samplesize for tables that have less or not - * much more than samplesize blocks, here is the place to do it. + * If we decide to reduce samplesize for tables that have less or not much + * more than samplesize blocks, here is the place to do it. */ bs->n = samplesize; bs->t = 0; /* blocks scanned so far */ @@ -815,12 +809,11 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, vacuum_delay_point(); /* - * We must maintain a pin on the target page's buffer to ensure - * that the maxoffset value stays good (else concurrent VACUUM - * might delete tuples out from under us). Hence, pin the page - * until we are done looking at it. We don't maintain a lock on - * the page, so tuples could get added to it, but we ignore such - * tuples. + * We must maintain a pin on the target page's buffer to ensure that + * the maxoffset value stays good (else concurrent VACUUM might delete + * tuples out from under us). Hence, pin the page until we are done + * looking at it. We don't maintain a lock on the page, so tuples + * could get added to it, but we ignore such tuples. */ targbuffer = ReadBuffer(onerel, targblock); LockBuffer(targbuffer, BUFFER_LOCK_SHARE); @@ -842,24 +835,24 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, /* * The first targrows live rows are simply copied into the * reservoir. Then we start replacing tuples in the sample - * until we reach the end of the relation. This algorithm - * is from Jeff Vitter's paper (see full citation below). - * It works by repeatedly computing the number of tuples - * to skip before selecting a tuple, which replaces a - * randomly chosen element of the reservoir (current set - * of tuples). At all times the reservoir is a true - * random sample of the tuples we've passed over so far, - * so when we fall off the end of the relation we're done. + * until we reach the end of the relation. This algorithm is + * from Jeff Vitter's paper (see full citation below). It + * works by repeatedly computing the number of tuples to skip + * before selecting a tuple, which replaces a randomly chosen + * element of the reservoir (current set of tuples). At all + * times the reservoir is a true random sample of the tuples + * we've passed over so far, so when we fall off the end of + * the relation we're done. */ if (numrows < targrows) rows[numrows++] = heap_copytuple(&targtuple); else { /* - * t in Vitter's paper is the number of records - * already processed. If we need to compute a new S - * value, we must use the not-yet-incremented value of - * liverows as t. + * t in Vitter's paper is the number of records already + * processed. If we need to compute a new S value, we + * must use the not-yet-incremented value of liverows as + * t. */ if (rowstoskip < 0) rowstoskip = get_next_S(liverows, targrows, &rstate); @@ -867,8 +860,8 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, if (rowstoskip <= 0) { /* - * Found a suitable tuple, so save it, replacing - * one old tuple at random + * Found a suitable tuple, so save it, replacing one + * old tuple at random */ int k = (int) (targrows * random_fract()); @@ -895,12 +888,12 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, } /* - * If we didn't find as many tuples as we wanted then we're done. No - * sort is needed, since they're already in order. + * If we didn't find as many tuples as we wanted then we're done. No sort + * is needed, since they're already in order. * - * Otherwise we need to sort the collected tuples by position - * (itempointer). It's not worth worrying about corner cases where - * the tuples are already sorted. + * Otherwise we need to sort the collected tuples by position (itempointer). + * It's not worth worrying about corner cases where the tuples are already + * sorted. */ if (numrows == targrows) qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows); @@ -1455,8 +1448,7 @@ compute_minimal_stats(VacAttrStatsP stats, StdAnalyzeData *mystats = (StdAnalyzeData *) stats->extra_data; /* - * We track up to 2*n values for an n-element MCV list; but at least - * 10 + * We track up to 2*n values for an n-element MCV list; but at least 10 */ track_max = 2 * num_mcv; if (track_max < 10) @@ -1488,9 +1480,9 @@ compute_minimal_stats(VacAttrStatsP stats, /* * If it's a variable-width field, add up widths for average width - * calculation. Note that if the value is toasted, we use the - * toasted width. We don't bother with this calculation if it's a - * fixed-width type. + * calculation. Note that if the value is toasted, we use the toasted + * width. We don't bother with this calculation if it's a fixed-width + * type. */ if (is_varlena) { @@ -1498,10 +1490,10 @@ compute_minimal_stats(VacAttrStatsP stats, /* * If the value is toasted, we want to detoast it just once to - * avoid repeated detoastings and resultant excess memory - * usage during the comparisons. Also, check to see if the - * value is excessively wide, and if so don't detoast at all - * --- just ignore the value. + * avoid repeated detoastings and resultant excess memory usage + * during the comparisons. Also, check to see if the value is + * excessively wide, and if so don't detoast at all --- just + * ignore the value. */ if (toast_raw_datum_size(value) > WIDTH_THRESHOLD) { @@ -1594,9 +1586,9 @@ compute_minimal_stats(VacAttrStatsP stats, nmultiple == track_cnt) { /* - * Our track list includes every value in the sample, and - * every value appeared more than once. Assume the column has - * just these values. + * Our track list includes every value in the sample, and every + * value appeared more than once. Assume the column has just + * these values. */ stats->stadistinct = track_cnt; } @@ -1641,22 +1633,22 @@ compute_minimal_stats(VacAttrStatsP stats, } /* - * If we estimated the number of distinct values at more than 10% - * of the total row count (a very arbitrary limit), then assume - * that stadistinct should scale with the row count rather than be - * a fixed value. + * If we estimated the number of distinct values at more than 10% of + * the total row count (a very arbitrary limit), then assume that + * stadistinct should scale with the row count rather than be a fixed + * value. */ if (stats->stadistinct > 0.1 * totalrows) stats->stadistinct = -(stats->stadistinct / totalrows); /* - * Decide how many values are worth storing as most-common values. - * If we are able to generate a complete MCV list (all the values - * in the sample will fit, and we think these are all the ones in - * the table), then do so. Otherwise, store only those values - * that are significantly more common than the (estimated) - * average. We set the threshold rather arbitrarily at 25% more - * than average, with at least 2 instances in the sample. + * Decide how many values are worth storing as most-common values. If + * we are able to generate a complete MCV list (all the values in the + * sample will fit, and we think these are all the ones in the table), + * then do so. Otherwise, store only those values that are + * significantly more common than the (estimated) average. We set the + * threshold rather arbitrarily at 25% more than average, with at + * least 2 instances in the sample. */ if (track_cnt < track_max && toowide_cnt == 0 && stats->stadistinct > 0 && @@ -1725,10 +1717,10 @@ compute_minimal_stats(VacAttrStatsP stats, stats->stats_valid = true; stats->stanullfrac = 1.0; if (is_varwidth) - stats->stawidth = 0; /* "unknown" */ + stats->stawidth = 0; /* "unknown" */ else stats->stawidth = stats->attrtype->typlen; - stats->stadistinct = 0.0; /* "unknown" */ + stats->stadistinct = 0.0; /* "unknown" */ } /* We don't need to bother cleaning up any of our temporary palloc's */ @@ -1802,9 +1794,9 @@ compute_scalar_stats(VacAttrStatsP stats, /* * If it's a variable-width field, add up widths for average width - * calculation. Note that if the value is toasted, we use the - * toasted width. We don't bother with this calculation if it's a - * fixed-width type. + * calculation. Note that if the value is toasted, we use the toasted + * width. We don't bother with this calculation if it's a fixed-width + * type. */ if (is_varlena) { @@ -1812,10 +1804,10 @@ compute_scalar_stats(VacAttrStatsP stats, /* * If the value is toasted, we want to detoast it just once to - * avoid repeated detoastings and resultant excess memory - * usage during the comparisons. Also, check to see if the - * value is excessively wide, and if so don't detoast at all - * --- just ignore the value. + * avoid repeated detoastings and resultant excess memory usage + * during the comparisons. Also, check to see if the value is + * excessively wide, and if so don't detoast at all --- just + * ignore the value. */ if (toast_raw_datum_size(value) > WIDTH_THRESHOLD) { @@ -1854,24 +1846,23 @@ compute_scalar_stats(VacAttrStatsP stats, sizeof(ScalarItem), compare_scalars); /* - * Now scan the values in order, find the most common ones, and - * also accumulate ordering-correlation statistics. + * Now scan the values in order, find the most common ones, and also + * accumulate ordering-correlation statistics. * - * To determine which are most common, we first have to count the - * number of duplicates of each value. The duplicates are - * adjacent in the sorted list, so a brute-force approach is to - * compare successive datum values until we find two that are not - * equal. However, that requires N-1 invocations of the datum - * comparison routine, which are completely redundant with work - * that was done during the sort. (The sort algorithm must at - * some point have compared each pair of items that are adjacent - * in the sorted order; otherwise it could not know that it's - * ordered the pair correctly.) We exploit this by having + * To determine which are most common, we first have to count the number + * of duplicates of each value. The duplicates are adjacent in the + * sorted list, so a brute-force approach is to compare successive + * datum values until we find two that are not equal. However, that + * requires N-1 invocations of the datum comparison routine, which are + * completely redundant with work that was done during the sort. (The + * sort algorithm must at some point have compared each pair of items + * that are adjacent in the sorted order; otherwise it could not know + * that it's ordered the pair correctly.) We exploit this by having * compare_scalars remember the highest tupno index that each * ScalarItem has been found equal to. At the end of the sort, a - * ScalarItem's tupnoLink will still point to itself if and only - * if it is the last item of its group of duplicates (since the - * group will be ordered by tupno). + * ScalarItem's tupnoLink will still point to itself if and only if it + * is the last item of its group of duplicates (since the group will + * be ordered by tupno). */ corr_xysum = 0; ndistinct = 0; @@ -1895,9 +1886,9 @@ compute_scalar_stats(VacAttrStatsP stats, { /* * Found a new item for the mcv list; find its - * position, bubbling down old items if needed. - * Loop invariant is that j points at an empty/ - * replaceable slot. + * position, bubbling down old items if needed. Loop + * invariant is that j points at an empty/ replaceable + * slot. */ int j; @@ -1934,8 +1925,8 @@ compute_scalar_stats(VacAttrStatsP stats, else if (toowide_cnt == 0 && nmultiple == ndistinct) { /* - * Every value in the sample appeared more than once. Assume - * the column has just these values. + * Every value in the sample appeared more than once. Assume the + * column has just these values. */ stats->stadistinct = ndistinct; } @@ -1976,26 +1967,25 @@ compute_scalar_stats(VacAttrStatsP stats, } /* - * If we estimated the number of distinct values at more than 10% - * of the total row count (a very arbitrary limit), then assume - * that stadistinct should scale with the row count rather than be - * a fixed value. + * If we estimated the number of distinct values at more than 10% of + * the total row count (a very arbitrary limit), then assume that + * stadistinct should scale with the row count rather than be a fixed + * value. */ if (stats->stadistinct > 0.1 * totalrows) stats->stadistinct = -(stats->stadistinct / totalrows); /* - * Decide how many values are worth storing as most-common values. - * If we are able to generate a complete MCV list (all the values - * in the sample will fit, and we think these are all the ones in - * the table), then do so. Otherwise, store only those values - * that are significantly more common than the (estimated) - * average. We set the threshold rather arbitrarily at 25% more - * than average, with at least 2 instances in the sample. Also, - * we won't suppress values that have a frequency of at least 1/K - * where K is the intended number of histogram bins; such values - * might otherwise cause us to emit duplicate histogram bin - * boundaries. + * Decide how many values are worth storing as most-common values. If + * we are able to generate a complete MCV list (all the values in the + * sample will fit, and we think these are all the ones in the table), + * then do so. Otherwise, store only those values that are + * significantly more common than the (estimated) average. We set the + * threshold rather arbitrarily at 25% more than average, with at + * least 2 instances in the sample. Also, we won't suppress values + * that have a frequency of at least 1/K where K is the intended + * number of histogram bins; such values might otherwise cause us to + * emit duplicate histogram bin boundaries. */ if (track_cnt == ndistinct && toowide_cnt == 0 && stats->stadistinct > 0 && @@ -2065,9 +2055,9 @@ compute_scalar_stats(VacAttrStatsP stats, } /* - * Generate a histogram slot entry if there are at least two - * distinct values not accounted for in the MCV list. (This - * ensures the histogram won't collapse to empty or a singleton.) + * Generate a histogram slot entry if there are at least two distinct + * values not accounted for in the MCV list. (This ensures the + * histogram won't collapse to empty or a singleton.) */ num_hist = ndistinct - num_mcv; if (num_hist > num_bins) @@ -2085,10 +2075,9 @@ compute_scalar_stats(VacAttrStatsP stats, /* * Collapse out the MCV items from the values[] array. * - * Note we destroy the values[] array here... but we don't need - * it for anything more. We do, however, still need - * values_cnt. nvals will be the number of remaining entries - * in values[]. + * Note we destroy the values[] array here... but we don't need it + * for anything more. We do, however, still need values_cnt. + * nvals will be the number of remaining entries in values[]. */ if (num_mcv > 0) { @@ -2193,10 +2182,10 @@ compute_scalar_stats(VacAttrStatsP stats, stats->stats_valid = true; stats->stanullfrac = 1.0; if (is_varwidth) - stats->stawidth = 0; /* "unknown" */ + stats->stawidth = 0; /* "unknown" */ else stats->stawidth = stats->attrtype->typlen; - stats->stadistinct = 0.0; /* "unknown" */ + stats->stadistinct = 0.0; /* "unknown" */ } /* We don't need to bother cleaning up any of our temporary palloc's */ diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index da133788960..69d97d09237 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.125 2005/10/06 21:30:32 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.126 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -106,8 +106,7 @@ */ static List *pendingNotifies = NIL; -static List *upperPendingNotifies = NIL; /* list of upper-xact - * lists */ +static List *upperPendingNotifies = NIL; /* list of upper-xact lists */ /* * State for inbound notifies consists of two flags: one saying whether @@ -158,8 +157,8 @@ Async_Notify(const char *relname) if (!AsyncExistsPendingNotify(relname)) { /* - * The name list needs to live until end of transaction, so store - * it in the transaction context. + * The name list needs to live until end of transaction, so store it + * in the transaction context. */ MemoryContext oldcontext; @@ -208,7 +207,7 @@ Async_Listen(const char *relname) Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(tuple); if (listener->listenerpid == MyProcPid && - strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0) + strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0) { alreadyListener = true; /* No need to scan the rest of the table */ @@ -298,14 +297,14 @@ Async_Unlisten(const char *relname) Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(tuple); if (listener->listenerpid == MyProcPid && - strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0) + strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0) { /* Found the matching tuple, delete it */ simple_heap_delete(lRel, &tuple->t_self); /* - * We assume there can be only one match, so no need to scan - * the rest of the table + * We assume there can be only one match, so no need to scan the + * rest of the table */ break; } @@ -387,10 +386,10 @@ static void Async_UnlistenOnExit(int code, Datum arg) { /* - * We need to start/commit a transaction for the unlisten, but if - * there is already an active transaction we had better abort that one - * first. Otherwise we'd end up committing changes that probably - * ought to be discarded. + * We need to start/commit a transaction for the unlisten, but if there is + * already an active transaction we had better abort that one first. + * Otherwise we'd end up committing changes that probably ought to be + * discarded. */ AbortOutOfAnyTransaction(); /* Now we can do the unlisten */ @@ -404,14 +403,14 @@ Async_UnlistenOnExit(int code, Datum arg) *-------------------------------------------------------------- * AtPrepare_Notify * - * This is called at the prepare phase of a two-phase + * This is called at the prepare phase of a two-phase * transaction. Save the state for possible commit later. *-------------------------------------------------------------- */ void AtPrepare_Notify(void) { - ListCell *p; + ListCell *p; foreach(p, pendingNotifies) { @@ -423,8 +422,8 @@ AtPrepare_Notify(void) /* * We can clear the state immediately, rather than needing a separate - * PostPrepare call, because if the transaction fails we'd just - * discard the state anyway. + * PostPrepare call, because if the transaction fails we'd just discard + * the state anyway. */ ClearPendingNotifies(); } @@ -464,12 +463,11 @@ AtCommit_Notify(void) nulls[Natts_pg_listener]; if (pendingNotifies == NIL) - return; /* no NOTIFY statements in this - * transaction */ + return; /* no NOTIFY statements in this transaction */ /* - * NOTIFY is disabled if not normal processing mode. This test used to - * be in xact.c, but it seems cleaner to do it here. + * NOTIFY is disabled if not normal processing mode. This test used to be + * in xact.c, but it seems cleaner to do it here. */ if (!IsNormalProcessingMode()) { @@ -503,10 +501,10 @@ AtCommit_Notify(void) if (listenerPID == MyProcPid) { /* - * Self-notify: no need to bother with table update. Indeed, - * we *must not* clear the notification field in this path, or - * we could lose an outside notify, which'd be bad for - * applications that ignore self-notify messages. + * Self-notify: no need to bother with table update. Indeed, we + * *must not* clear the notification field in this path, or we + * could lose an outside notify, which'd be bad for applications + * that ignore self-notify messages. */ if (Trace_notify) @@ -521,27 +519,27 @@ AtCommit_Notify(void) listenerPID); /* - * If someone has already notified this listener, we don't - * bother modifying the table, but we do still send a SIGUSR2 - * signal, just in case that backend missed the earlier signal - * for some reason. It's OK to send the signal first, because - * the other guy can't read pg_listener until we unlock it. + * If someone has already notified this listener, we don't bother + * modifying the table, but we do still send a SIGUSR2 signal, + * just in case that backend missed the earlier signal for some + * reason. It's OK to send the signal first, because the other + * guy can't read pg_listener until we unlock it. */ if (kill(listenerPID, SIGUSR2) < 0) { /* - * Get rid of pg_listener entry if it refers to a PID that - * no longer exists. Presumably, that backend crashed - * without deleting its pg_listener entries. This code - * used to only delete the entry if errno==ESRCH, but as - * far as I can see we should just do it for any failure - * (certainly at least for EPERM too...) + * Get rid of pg_listener entry if it refers to a PID that no + * longer exists. Presumably, that backend crashed without + * deleting its pg_listener entries. This code used to only + * delete the entry if errno==ESRCH, but as far as I can see + * we should just do it for any failure (certainly at least + * for EPERM too...) */ simple_heap_delete(lRel, &lTuple->t_self); } else if (listener->notification == 0) { - HTSU_Result result; + HTSU_Result result; ItemPointerData update_ctid; TransactionId update_xmax; @@ -551,17 +549,16 @@ AtCommit_Notify(void) /* * We cannot use simple_heap_update here because the tuple * could have been modified by an uncommitted transaction; - * specifically, since UNLISTEN releases exclusive lock on - * the table before commit, the other guy could already - * have tried to unlisten. There are no other cases where - * we should be able to see an uncommitted update or - * delete. Therefore, our response to a - * HeapTupleBeingUpdated result is just to ignore it. We - * do *not* wait for the other guy to commit --- that - * would risk deadlock, and we don't want to block while - * holding the table lock anyway for performance reasons. - * We also ignore HeapTupleUpdated, which could occur if - * the other guy commits between our heap_getnext and + * specifically, since UNLISTEN releases exclusive lock on the + * table before commit, the other guy could already have tried + * to unlisten. There are no other cases where we should be + * able to see an uncommitted update or delete. Therefore, our + * response to a HeapTupleBeingUpdated result is just to + * ignore it. We do *not* wait for the other guy to commit + * --- that would risk deadlock, and we don't want to block + * while holding the table lock anyway for performance + * reasons. We also ignore HeapTupleUpdated, which could occur + * if the other guy commits between our heap_getnext and * heap_update calls. */ result = heap_update(lRel, &lTuple->t_self, rTuple, @@ -603,10 +600,10 @@ AtCommit_Notify(void) /* * We do NOT release the lock on pg_listener here; we need to hold it - * until end of transaction (which is about to happen, anyway) to - * ensure that notified backends see our tuple updates when they look. - * Else they might disregard the signal, which would make the - * application programmer very unhappy. + * until end of transaction (which is about to happen, anyway) to ensure + * that notified backends see our tuple updates when they look. Else they + * might disregard the signal, which would make the application programmer + * very unhappy. */ heap_close(lRel, NoLock); @@ -676,8 +673,7 @@ AtSubCommit_Notify(void) GetCurrentTransactionNestLevel() - 2); /* - * We could try to eliminate duplicates here, but it seems not - * worthwhile. + * We could try to eliminate duplicates here, but it seems not worthwhile. */ pendingNotifies = list_concat(parentPendingNotifies, pendingNotifies); } @@ -695,10 +691,10 @@ AtSubAbort_Notify(void) * subxact are no longer interesting, and the space will be freed when * CurTransactionContext is recycled. * - * This routine could be called more than once at a given nesting level - * if there is trouble during subxact abort. Avoid dumping core by - * using GetCurrentTransactionNestLevel as the indicator of how far - * we need to prune the list. + * This routine could be called more than once at a given nesting level if + * there is trouble during subxact abort. Avoid dumping core by using + * GetCurrentTransactionNestLevel as the indicator of how far we need to + * prune the list. */ while (list_length(upperPendingNotifies) > my_level - 2) { @@ -731,9 +727,9 @@ NotifyInterruptHandler(SIGNAL_ARGS) /* * Note: this is a SIGNAL HANDLER. You must be very wary what you do - * here. Some helpful soul had this routine sprinkled with TPRINTFs, - * which would likely lead to corruption of stdio buffers if they were - * ever turned on. + * here. Some helpful soul had this routine sprinkled with TPRINTFs, which + * would likely lead to corruption of stdio buffers if they were ever + * turned on. */ /* Don't joggle the elbow of proc_exit */ @@ -745,19 +741,18 @@ NotifyInterruptHandler(SIGNAL_ARGS) bool save_ImmediateInterruptOK = ImmediateInterruptOK; /* - * We may be called while ImmediateInterruptOK is true; turn it - * off while messing with the NOTIFY state. (We would have to - * save and restore it anyway, because PGSemaphore operations - * inside ProcessIncomingNotify() might reset it.) + * We may be called while ImmediateInterruptOK is true; turn it off + * while messing with the NOTIFY state. (We would have to save and + * restore it anyway, because PGSemaphore operations inside + * ProcessIncomingNotify() might reset it.) */ ImmediateInterruptOK = false; /* * I'm not sure whether some flavors of Unix might allow another - * SIGUSR2 occurrence to recursively interrupt this routine. To - * cope with the possibility, we do the same sort of dance that - * EnableNotifyInterrupt must do --- see that routine for - * comments. + * SIGUSR2 occurrence to recursively interrupt this routine. To cope + * with the possibility, we do the same sort of dance that + * EnableNotifyInterrupt must do --- see that routine for comments. */ notifyInterruptEnabled = 0; /* disable any recursive signal */ notifyInterruptOccurred = 1; /* do at least one iteration */ @@ -781,8 +776,7 @@ NotifyInterruptHandler(SIGNAL_ARGS) } /* - * Restore ImmediateInterruptOK, and check for interrupts if - * needed. + * Restore ImmediateInterruptOK, and check for interrupts if needed. */ ImmediateInterruptOK = save_ImmediateInterruptOK; if (save_ImmediateInterruptOK) @@ -791,8 +785,7 @@ NotifyInterruptHandler(SIGNAL_ARGS) else { /* - * In this path it is NOT SAFE to do much of anything, except - * this: + * In this path it is NOT SAFE to do much of anything, except this: */ notifyInterruptOccurred = 1; } @@ -820,27 +813,25 @@ EnableNotifyInterrupt(void) return; /* not really idle */ /* - * This code is tricky because we are communicating with a signal - * handler that could interrupt us at any point. If we just checked - * notifyInterruptOccurred and then set notifyInterruptEnabled, we - * could fail to respond promptly to a signal that happens in between - * those two steps. (A very small time window, perhaps, but Murphy's - * Law says you can hit it...) Instead, we first set the enable flag, - * then test the occurred flag. If we see an unserviced interrupt has - * occurred, we re-clear the enable flag before going off to do the - * service work. (That prevents re-entrant invocation of - * ProcessIncomingNotify() if another interrupt occurs.) If an - * interrupt comes in between the setting and clearing of - * notifyInterruptEnabled, then it will have done the service work and - * left notifyInterruptOccurred zero, so we have to check again after - * clearing enable. The whole thing has to be in a loop in case - * another interrupt occurs while we're servicing the first. Once we - * get out of the loop, enable is set and we know there is no - * unserviced interrupt. + * This code is tricky because we are communicating with a signal handler + * that could interrupt us at any point. If we just checked + * notifyInterruptOccurred and then set notifyInterruptEnabled, we could + * fail to respond promptly to a signal that happens in between those two + * steps. (A very small time window, perhaps, but Murphy's Law says you + * can hit it...) Instead, we first set the enable flag, then test the + * occurred flag. If we see an unserviced interrupt has occurred, we + * re-clear the enable flag before going off to do the service work. + * (That prevents re-entrant invocation of ProcessIncomingNotify() if + * another interrupt occurs.) If an interrupt comes in between the setting + * and clearing of notifyInterruptEnabled, then it will have done the + * service work and left notifyInterruptOccurred zero, so we have to check + * again after clearing enable. The whole thing has to be in a loop in + * case another interrupt occurs while we're servicing the first. Once we + * get out of the loop, enable is set and we know there is no unserviced + * interrupt. * - * NB: an overenthusiastic optimizing compiler could easily break this - * code. Hopefully, they all understand what "volatile" means these - * days. + * NB: an overenthusiastic optimizing compiler could easily break this code. + * Hopefully, they all understand what "volatile" means these days. */ for (;;) { @@ -960,8 +951,7 @@ ProcessIncomingNotify(void) * Rewrite the tuple with 0 in notification column. * * simple_heap_update is safe here because no one else would have - * tried to UNLISTEN us, so there can be no uncommitted - * changes. + * tried to UNLISTEN us, so there can be no uncommitted changes. */ rTuple = heap_modifytuple(lTuple, tdesc, value, nulls, repl); simple_heap_update(lRel, &lTuple->t_self, rTuple); @@ -975,18 +965,17 @@ ProcessIncomingNotify(void) /* * We do NOT release the lock on pg_listener here; we need to hold it - * until end of transaction (which is about to happen, anyway) to - * ensure that other backends see our tuple updates when they look. - * Otherwise, a transaction started after this one might mistakenly - * think it doesn't need to send this backend a new NOTIFY. + * until end of transaction (which is about to happen, anyway) to ensure + * that other backends see our tuple updates when they look. Otherwise, a + * transaction started after this one might mistakenly think it doesn't + * need to send this backend a new NOTIFY. */ heap_close(lRel, NoLock); CommitTransactionCommand(); /* - * Must flush the notify messages to ensure frontend gets them - * promptly. + * Must flush the notify messages to ensure frontend gets them promptly. */ pq_flush(); @@ -1022,8 +1011,7 @@ NotifyMyFrontEnd(char *relname, int32 listenerPID) /* * NOTE: we do not do pq_flush() here. For a self-notify, it will * happen at the end of the transaction, and for incoming notifies - * ProcessIncomingNotify will do it after finding all the - * notifies. + * ProcessIncomingNotify will do it after finding all the notifies. */ } else @@ -1052,11 +1040,11 @@ static void ClearPendingNotifies(void) { /* - * We used to have to explicitly deallocate the list members and - * nodes, because they were malloc'd. Now, since we know they are - * palloc'd in CurTransactionContext, we need not do that --- they'll - * go away automatically at transaction exit. We need only reset the - * list head pointer. + * We used to have to explicitly deallocate the list members and nodes, + * because they were malloc'd. Now, since we know they are palloc'd in + * CurTransactionContext, we need not do that --- they'll go away + * automatically at transaction exit. We need only reset the list head + * pointer. */ pendingNotifies = NIL; } @@ -1071,11 +1059,10 @@ notify_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len) { /* - * Set up to issue the NOTIFY at the end of my own - * current transaction. (XXX this has some issues if my own - * transaction later rolls back, or if there is any significant - * delay before I commit. OK for now because we disallow - * COMMIT PREPARED inside a transaction block.) + * Set up to issue the NOTIFY at the end of my own current transaction. + * (XXX this has some issues if my own transaction later rolls back, or if + * there is any significant delay before I commit. OK for now because we + * disallow COMMIT PREPARED inside a transaction block.) */ Async_Notify((char *) recdata); } diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 1d5a916c544..35420a87c0b 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -11,7 +11,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.139 2005/08/26 03:07:16 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.140 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -144,8 +144,8 @@ cluster(ClusterStmt *stmt) if (!OidIsValid(indexOid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("index \"%s\" for table \"%s\" does not exist", - stmt->indexname, stmt->relation->relname))); + errmsg("index \"%s\" for table \"%s\" does not exist", + stmt->indexname, stmt->relation->relname))); } /* All other checks are done in cluster_rel() */ @@ -161,24 +161,24 @@ cluster(ClusterStmt *stmt) else { /* - * This is the "multi relation" case. We need to cluster all - * tables that have some index with indisclustered set. + * This is the "multi relation" case. We need to cluster all tables + * that have some index with indisclustered set. */ MemoryContext cluster_context; List *rvs; ListCell *rv; /* - * We cannot run this form of CLUSTER inside a user transaction - * block; we'd be holding locks way too long. + * We cannot run this form of CLUSTER inside a user transaction block; + * we'd be holding locks way too long. */ PreventTransactionChain((void *) stmt, "CLUSTER"); /* * Create special memory context for cross-transaction storage. * - * Since it is a child of PortalContext, it will go away even in case - * of error. + * Since it is a child of PortalContext, it will go away even in case of + * error. */ cluster_context = AllocSetContextCreate(PortalContext, "Cluster", @@ -187,8 +187,8 @@ cluster(ClusterStmt *stmt) ALLOCSET_DEFAULT_MAXSIZE); /* - * Build the list of relations to cluster. Note that this lives - * in cluster_context. + * Build the list of relations to cluster. Note that this lives in + * cluster_context. */ rvs = get_tables_to_cluster(cluster_context); @@ -239,12 +239,12 @@ cluster_rel(RelToCluster *rvtc, bool recheck) CHECK_FOR_INTERRUPTS(); /* - * Since we may open a new transaction for each relation, we have to - * check that the relation still is what we think it is. + * Since we may open a new transaction for each relation, we have to check + * that the relation still is what we think it is. * - * If this is a single-transaction CLUSTER, we can skip these tests. We - * *must* skip the one on indisclustered since it would reject an - * attempt to cluster a not-previously-clustered index. + * If this is a single-transaction CLUSTER, we can skip these tests. We *must* + * skip the one on indisclustered since it would reject an attempt to + * cluster a not-previously-clustered index. */ if (recheck) { @@ -284,10 +284,10 @@ cluster_rel(RelToCluster *rvtc, bool recheck) } /* - * We grab exclusive access to the target rel and index for the - * duration of the transaction. (This is redundant for the single- - * transaction case, since cluster() already did it.) The index lock - * is taken inside check_index_is_clusterable. + * We grab exclusive access to the target rel and index for the duration + * of the transaction. (This is redundant for the single- transaction + * case, since cluster() already did it.) The index lock is taken inside + * check_index_is_clusterable. */ OldHeap = heap_open(rvtc->tableOid, AccessExclusiveLock); @@ -328,26 +328,26 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck) RelationGetRelationName(OldHeap)))); /* - * Disallow clustering on incomplete indexes (those that might not - * index every row of the relation). We could relax this by making a - * separate seqscan pass over the table to copy the missing rows, but - * that seems expensive and tedious. + * Disallow clustering on incomplete indexes (those that might not index + * every row of the relation). We could relax this by making a separate + * seqscan pass over the table to copy the missing rows, but that seems + * expensive and tedious. */ if (!heap_attisnull(OldIndex->rd_indextuple, Anum_pg_index_indpred)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot cluster on partial index \"%s\"", RelationGetRelationName(OldIndex)))); - + if (!OldIndex->rd_am->amindexnulls) { AttrNumber colno; /* - * If the AM doesn't index nulls, then it's a partial index unless - * we can prove all the rows are non-null. Note we only need look - * at the first column; multicolumn-capable AMs are *required* to - * index nulls in columns after the first. + * If the AM doesn't index nulls, then it's a partial index unless we + * can prove all the rows are non-null. Note we only need look at the + * first column; multicolumn-capable AMs are *required* to index nulls + * in columns after the first. */ colno = OldIndex->rd_index->indkey.values[0]; if (colno > 0) @@ -358,11 +358,11 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot cluster on index \"%s\" because access method\n" "does not handle null values", - RelationGetRelationName(OldIndex)), + RelationGetRelationName(OldIndex)), errhint("You may be able to work around this by marking column \"%s\" NOT NULL%s", - NameStr(OldHeap->rd_att->attrs[colno - 1]->attname), - recheck ? ",\nor use ALTER TABLE ... SET WITHOUT CLUSTER to remove the cluster\n" - "specification from the table." : "."))); + NameStr(OldHeap->rd_att->attrs[colno - 1]->attname), + recheck ? ",\nor use ALTER TABLE ... SET WITHOUT CLUSTER to remove the cluster\n" + "specification from the table." : "."))); } else if (colno < 0) { @@ -374,15 +374,15 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot cluster on expressional index \"%s\" because its index access\n" "method does not handle null values", - RelationGetRelationName(OldIndex)))); + RelationGetRelationName(OldIndex)))); } /* - * Disallow clustering system relations. This will definitely NOT - * work for shared relations (we have no way to update pg_class rows - * in other databases), nor for nailed-in-cache relations (the - * relfilenode values for those are hardwired, see relcache.c). It - * might work for other system relations, but I ain't gonna risk it. + * Disallow clustering system relations. This will definitely NOT work + * for shared relations (we have no way to update pg_class rows in other + * databases), nor for nailed-in-cache relations (the relfilenode values + * for those are hardwired, see relcache.c). It might work for other + * system relations, but I ain't gonna risk it. */ if (IsSystemRelation(OldHeap)) ereport(ERROR, @@ -391,13 +391,13 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck) RelationGetRelationName(OldHeap)))); /* - * Don't allow cluster on temp tables of other backends ... their - * local buffer manager is not going to cope. + * Don't allow cluster on temp tables of other backends ... their local + * buffer manager is not going to cope. */ if (isOtherTempNamespace(RelationGetNamespace(OldHeap))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot cluster temporary tables of other sessions"))); + errmsg("cannot cluster temporary tables of other sessions"))); /* Drop relcache refcnt on OldIndex, but keep lock */ index_close(OldIndex); @@ -454,8 +454,8 @@ mark_index_clustered(Relation rel, Oid indexOid) indexForm = (Form_pg_index) GETSTRUCT(indexTuple); /* - * Unset the bit if set. We know it's wrong because we checked - * this earlier. + * Unset the bit if set. We know it's wrong because we checked this + * earlier. */ if (indexForm->indisclustered) { @@ -503,20 +503,18 @@ rebuild_relation(Relation OldHeap, Oid indexOid) heap_close(OldHeap, NoLock); /* - * Create the new heap, using a temporary name in the same namespace - * as the existing table. NOTE: there is some risk of collision with - * user relnames. Working around this seems more trouble than it's - * worth; in particular, we can't create the new heap in a different - * namespace from the old, or we will have problems with the TEMP - * status of temp tables. + * Create the new heap, using a temporary name in the same namespace as + * the existing table. NOTE: there is some risk of collision with user + * relnames. Working around this seems more trouble than it's worth; in + * particular, we can't create the new heap in a different namespace from + * the old, or we will have problems with the TEMP status of temp tables. */ snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", tableOid); OIDNewHeap = make_new_heap(tableOid, NewHeapName, tableSpace); /* - * We don't need CommandCounterIncrement() because make_new_heap did - * it. + * We don't need CommandCounterIncrement() because make_new_heap did it. */ /* @@ -546,9 +544,9 @@ rebuild_relation(Relation OldHeap, Oid indexOid) /* performDeletion does CommandCounterIncrement at end */ /* - * Rebuild each index on the relation (but not the toast table, which - * is all-new at this point). We do not need - * CommandCounterIncrement() because reindex_relation does it. + * Rebuild each index on the relation (but not the toast table, which is + * all-new at this point). We do not need CommandCounterIncrement() + * because reindex_relation does it. */ reindex_relation(tableOid, false); } @@ -587,15 +585,15 @@ make_new_heap(Oid OIDOldHeap, const char *NewName, Oid NewTableSpace) allowSystemTableMods); /* - * Advance command counter so that the newly-created relation's - * catalog tuples will be visible to heap_open. + * Advance command counter so that the newly-created relation's catalog + * tuples will be visible to heap_open. */ CommandCounterIncrement(); /* * If necessary, create a TOAST table for the new relation. Note that - * AlterTableCreateToastTable ends with CommandCounterIncrement(), so - * that the TOAST table will be visible for insertion. + * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that + * the TOAST table will be visible for insertion. */ AlterTableCreateToastTable(OIDNewHeap, true); @@ -629,8 +627,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex) OldIndex = index_open(OIDOldIndex); /* - * Their tuple descriptors should be exactly alike, but here we only - * need assume that they have the same number of columns. + * Their tuple descriptors should be exactly alike, but here we only need + * assume that they have the same number of columns. */ oldTupDesc = RelationGetDescr(OldHeap); newTupDesc = RelationGetDescr(NewHeap); @@ -654,15 +652,14 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex) * We cannot simply pass the tuple to heap_insert(), for several * reasons: * - * 1. heap_insert() will overwrite the commit-status fields of the - * tuple it's handed. This would trash the source relation, which is - * bad news if we abort later on. (This was a bug in releases thru - * 7.0) + * 1. heap_insert() will overwrite the commit-status fields of the tuple + * it's handed. This would trash the source relation, which is bad + * news if we abort later on. (This was a bug in releases thru 7.0) * - * 2. We'd like to squeeze out the values of any dropped columns, - * both to save space and to ensure we have no corner-case failures. - * (It's possible for example that the new table hasn't got a TOAST - * table and so is unable to store any large values of dropped cols.) + * 2. We'd like to squeeze out the values of any dropped columns, both to + * save space and to ensure we have no corner-case failures. (It's + * possible for example that the new table hasn't got a TOAST table + * and so is unable to store any large values of dropped cols.) * * 3. The tuple might not even be legal for the new table; this is * currently only known to happen as an after-effect of ALTER TABLE @@ -784,19 +781,18 @@ swap_relation_files(Oid r1, Oid r2) CatalogCloseIndexes(indstate); /* - * If we have toast tables associated with the relations being - * swapped, change their dependency links to re-associate them with - * their new owning relations. Otherwise the wrong one will get - * dropped ... + * If we have toast tables associated with the relations being swapped, + * change their dependency links to re-associate them with their new + * owning relations. Otherwise the wrong one will get dropped ... * * NOTE: it is possible that only one table has a toast table; this can - * happen in CLUSTER if there were dropped columns in the old table, - * and in ALTER TABLE when adding or changing type of columns. + * happen in CLUSTER if there were dropped columns in the old table, and + * in ALTER TABLE when adding or changing type of columns. * - * NOTE: at present, a TOAST table's only dependency is the one on its - * owning table. If more are ever created, we'd need to use something - * more selective than deleteDependencyRecordsFor() to get rid of only - * the link we want. + * NOTE: at present, a TOAST table's only dependency is the one on its owning + * table. If more are ever created, we'd need to use something more + * selective than deleteDependencyRecordsFor() to get rid of only the link + * we want. */ if (relform1->reltoastrelid || relform2->reltoastrelid) { @@ -845,16 +841,16 @@ swap_relation_files(Oid r1, Oid r2) /* * Blow away the old relcache entries now. We need this kluge because - * relcache.c keeps a link to the smgr relation for the physical file, - * and that will be out of date as soon as we do - * CommandCounterIncrement. Whichever of the rels is the second to be - * cleared during cache invalidation will have a dangling reference to - * an already-deleted smgr relation. Rather than trying to avoid this - * by ordering operations just so, it's easiest to not have the - * relcache entries there at all. (Fortunately, since one of the - * entries is local in our transaction, it's sufficient to clear out - * our own relcache this way; the problem cannot arise for other - * backends when they see our update on the non-local relation.) + * relcache.c keeps a link to the smgr relation for the physical file, and + * that will be out of date as soon as we do CommandCounterIncrement. + * Whichever of the rels is the second to be cleared during cache + * invalidation will have a dangling reference to an already-deleted smgr + * relation. Rather than trying to avoid this by ordering operations just + * so, it's easiest to not have the relcache entries there at all. + * (Fortunately, since one of the entries is local in our transaction, + * it's sufficient to clear out our own relcache this way; the problem + * cannot arise for other backends when they see our update on the + * non-local relation.) */ RelationForgetRelation(r1); RelationForgetRelation(r2); @@ -886,9 +882,9 @@ get_tables_to_cluster(MemoryContext cluster_context) /* * Get all indexes that have indisclustered set and are owned by - * appropriate user. System relations or nailed-in relations cannot - * ever have indisclustered set, because CLUSTER will refuse to set it - * when called with one of them as argument. + * appropriate user. System relations or nailed-in relations cannot ever + * have indisclustered set, because CLUSTER will refuse to set it when + * called with one of them as argument. */ indRelation = heap_open(IndexRelationId, AccessShareLock); ScanKeyInit(&entry, @@ -904,8 +900,8 @@ get_tables_to_cluster(MemoryContext cluster_context) continue; /* - * We have to build the list in a different memory context so it - * will survive the cross-transaction processing + * We have to build the list in a different memory context so it will + * survive the cross-transaction processing */ old_context = MemoryContextSwitchTo(cluster_context); diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c index 8177e39c71c..cf7dc06fa7f 100644 --- a/src/backend/commands/comment.c +++ b/src/backend/commands/comment.c @@ -7,7 +7,7 @@ * Copyright (c) 1996-2005, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.83 2005/04/14 20:03:23 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.84 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -310,10 +310,9 @@ CommentRelation(int objtype, List *relname, char *comment) tgtrel = makeRangeVarFromNameList(relname); /* - * Open the relation. We do this mainly to acquire a lock that - * ensures no one else drops the relation before we commit. (If they - * did, they'd fail to remove the entry we are about to make in - * pg_description.) + * Open the relation. We do this mainly to acquire a lock that ensures no + * one else drops the relation before we commit. (If they did, they'd + * fail to remove the entry we are about to make in pg_description.) */ relation = relation_openrv(tgtrel, AccessShareLock); @@ -441,17 +440,16 @@ CommentDatabase(List *qualname, char *comment) database = strVal(linitial(qualname)); /* - * We cannot currently support cross-database comments (since other - * DBs cannot see pg_description of this database). So, we reject - * attempts to comment on a database other than the current one. - * Someday this might be improved, but it would take a redesigned - * infrastructure. + * We cannot currently support cross-database comments (since other DBs + * cannot see pg_description of this database). So, we reject attempts to + * comment on a database other than the current one. Someday this might be + * improved, but it would take a redesigned infrastructure. * - * When loading a dump, we may see a COMMENT ON DATABASE for the old name - * of the database. Erroring out would prevent pg_restore from - * completing (which is really pg_restore's fault, but for now we will - * work around the problem here). Consensus is that the best fix is - * to treat wrong database name as a WARNING not an ERROR. + * When loading a dump, we may see a COMMENT ON DATABASE for the old name of + * the database. Erroring out would prevent pg_restore from completing + * (which is really pg_restore's fault, but for now we will work around + * the problem here). Consensus is that the best fix is to treat wrong + * database name as a WARNING not an ERROR. */ /* First get the database OID */ @@ -467,8 +465,8 @@ CommentDatabase(List *qualname, char *comment) /* Only allow comments on the current database */ if (oid != MyDatabaseId) { - ereport(WARNING, /* throw just a warning so pg_restore - * doesn't fail */ + ereport(WARNING, /* throw just a warning so pg_restore doesn't + * fail */ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("database comments may only be applied to the current database"))); return; @@ -587,8 +585,8 @@ CommentRule(List *qualname, char *comment) ForwardScanDirection))) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("there are multiple rules named \"%s\"", rulename), - errhint("Specify a relation name as well as a rule name."))); + errmsg("there are multiple rules named \"%s\"", rulename), + errhint("Specify a relation name as well as a rule name."))); heap_endscan(scanDesc); heap_close(RewriteRelation, AccessShareLock); @@ -616,8 +614,8 @@ CommentRule(List *qualname, char *comment) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("rule \"%s\" for relation \"%s\" does not exist", - rulename, RelationGetRelationName(relation)))); + errmsg("rule \"%s\" for relation \"%s\" does not exist", + rulename, RelationGetRelationName(relation)))); Assert(reloid == ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class); ruleoid = HeapTupleGetOid(tuple); ReleaseSysCache(tuple); @@ -802,8 +800,8 @@ CommentTrigger(List *qualname, char *comment) RelationGetRelationName(relation)); /* - * Fetch the trigger tuple from pg_trigger. There can be only one - * because of the unique index. + * Fetch the trigger tuple from pg_trigger. There can be only one because + * of the unique index. */ pg_trigger = heap_open(TriggerRelationId, AccessShareLock); ScanKeyInit(&entry[0], @@ -879,9 +877,9 @@ CommentConstraint(List *qualname, char *comment) RelationGetRelationName(relation)); /* - * Fetch the constraint tuple from pg_constraint. There may be more - * than one match, because constraints are not required to have unique - * names; if so, error out. + * Fetch the constraint tuple from pg_constraint. There may be more than + * one match, because constraints are not required to have unique names; + * if so, error out. */ pg_constraint = heap_open(ConstraintRelationId, AccessShareLock); @@ -902,8 +900,8 @@ CommentConstraint(List *qualname, char *comment) if (OidIsValid(conOid)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("table \"%s\" has multiple constraints named \"%s\"", - RelationGetRelationName(relation), conName))); + errmsg("table \"%s\" has multiple constraints named \"%s\"", + RelationGetRelationName(relation), conName))); conOid = HeapTupleGetOid(tuple); } } @@ -914,8 +912,8 @@ CommentConstraint(List *qualname, char *comment) if (!OidIsValid(conOid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("constraint \"%s\" for table \"%s\" does not exist", - conName, RelationGetRelationName(relation)))); + errmsg("constraint \"%s\" for table \"%s\" does not exist", + conName, RelationGetRelationName(relation)))); /* Call CreateComments() to create/drop the comments */ CreateComments(conOid, ConstraintRelationId, 0, comment); @@ -988,7 +986,7 @@ CommentLanguage(List *qualname, char *comment) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to comment on procedural language"))); + errmsg("must be superuser to comment on procedural language"))); /* Call CreateComments() to create/drop the comments */ CreateComments(oid, LanguageRelationId, 0, comment); @@ -1111,7 +1109,7 @@ CommentLargeObject(List *qualname, char *comment) * strings. */ loid = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(strVal(node)))); + CStringGetDatum(strVal(node)))); break; default: elog(ERROR, "unrecognized node type: %d", diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c index 912f35ea20b..53b3f854ce6 100644 --- a/src/backend/commands/conversioncmds.c +++ b/src/backend/commands/conversioncmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.22 2005/08/22 17:38:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.23 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -74,8 +74,8 @@ CreateConversionCommand(CreateConversionStmt *stmt) to_encoding_name))); /* - * Check the existence of the conversion function. Function name could - * be a qualified name. + * Check the existence of the conversion function. Function name could be + * a qualified name. */ funcoid = LookupFuncName(func_name, sizeof(funcargs) / sizeof(Oid), funcargs, false); @@ -87,8 +87,8 @@ CreateConversionCommand(CreateConversionStmt *stmt) NameListToString(func_name)); /* - * All seem ok, go ahead (possible failure would be a duplicate - * conversion name) + * All seem ok, go ahead (possible failure would be a duplicate conversion + * name) */ ConversionCreate(conversion_name, namespaceId, GetUserId(), from_encoding, to_encoding, funcoid, stmt->def); @@ -148,11 +148,11 @@ RenameConversion(List *name, const char *newname) 0, 0)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("conversion \"%s\" already exists in schema \"%s\"", - newname, get_namespace_name(namespaceOid)))); + errmsg("conversion \"%s\" already exists in schema \"%s\"", + newname, get_namespace_name(namespaceOid)))); /* must be owner */ - if (!pg_conversion_ownercheck(conversionOid,GetUserId())) + if (!pg_conversion_ownercheck(conversionOid, GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION, NameListToString(name)); @@ -210,7 +210,7 @@ AlterConversionOwner(List *name, Oid newOwnerId) if (!superuser()) { /* Otherwise, must be owner of the existing object */ - if (!pg_conversion_ownercheck(HeapTupleGetOid(tup),GetUserId())) + if (!pg_conversion_ownercheck(HeapTupleGetOid(tup), GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION, NameListToString(name)); @@ -227,8 +227,7 @@ AlterConversionOwner(List *name, Oid newOwnerId) } /* - * Modify the owner --- okay to scribble on tup because it's a - * copy + * Modify the owner --- okay to scribble on tup because it's a copy */ convForm->conowner = newOwnerId; diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index cd215cb4154..8ab402e6b74 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.252 2005/10/03 23:43:09 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.253 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -100,7 +100,7 @@ typedef struct CopyStateData bool fe_eof; /* true if detected end of copy data */ EolType eol_type; /* EOL type of input */ int client_encoding; /* remote side's character encoding */ - bool need_transcoding; /* client encoding diff from server? */ + bool need_transcoding; /* client encoding diff from server? */ bool client_only_encoding; /* encoding not valid on server? */ /* parameters from the COPY command */ @@ -111,12 +111,12 @@ typedef struct CopyStateData bool csv_mode; /* Comma Separated Value format? */ bool header_line; /* CSV header line? */ char *null_print; /* NULL marker string (server encoding!) */ - int null_print_len; /* length of same */ + int null_print_len; /* length of same */ char *delim; /* column delimiter (must be 1 byte) */ char *quote; /* CSV quote char (must be 1 byte) */ char *escape; /* CSV escape char (must be 1 byte) */ - List *force_quote_atts; /* integer list of attnums to FQ */ - List *force_notnull_atts; /* integer list of attnums to FNN */ + List *force_quote_atts; /* integer list of attnums to FQ */ + List *force_notnull_atts; /* integer list of attnums to FNN */ /* these are just for error messages, see copy_in_error_callback */ const char *cur_relname; /* table name for error messages */ @@ -127,26 +127,26 @@ typedef struct CopyStateData /* * These variables are used to reduce overhead in textual COPY FROM. * - * attribute_buf holds the separated, de-escaped text for each field of - * the current line. The CopyReadAttributes functions return arrays of + * attribute_buf holds the separated, de-escaped text for each field of the + * current line. The CopyReadAttributes functions return arrays of * pointers into this buffer. We avoid palloc/pfree overhead by re-using * the buffer on each cycle. */ StringInfoData attribute_buf; /* - * Similarly, line_buf holds the whole input line being processed. - * The input cycle is first to read the whole line into line_buf, - * convert it to server encoding there, and then extract the individual - * attribute fields into attribute_buf. line_buf is preserved unmodified - * so that we can display it in error messages if appropriate. + * Similarly, line_buf holds the whole input line being processed. The + * input cycle is first to read the whole line into line_buf, convert it + * to server encoding there, and then extract the individual attribute + * fields into attribute_buf. line_buf is preserved unmodified so that we + * can display it in error messages if appropriate. */ StringInfoData line_buf; - bool line_buf_converted; /* converted to server encoding? */ + bool line_buf_converted; /* converted to server encoding? */ /* * Finally, raw_buf holds raw data read from the data source (file or - * client connection). CopyReadLine parses this data sufficiently to + * client connection). CopyReadLine parses this data sufficiently to * locate line boundaries, then transfers the data to line_buf and * converts it. Note: we guarantee that there is a \0 at * raw_buf[raw_buf_len]. @@ -170,17 +170,17 @@ static void CopyFrom(CopyState cstate); static bool CopyReadLine(CopyState cstate); static bool CopyReadLineText(CopyState cstate); static bool CopyReadLineCSV(CopyState cstate); -static int CopyReadAttributesText(CopyState cstate, int maxfields, - char **fieldvals); -static int CopyReadAttributesCSV(CopyState cstate, int maxfields, - char **fieldvals); +static int CopyReadAttributesText(CopyState cstate, int maxfields, + char **fieldvals); +static int CopyReadAttributesCSV(CopyState cstate, int maxfields, + char **fieldvals); static Datum CopyReadBinaryAttribute(CopyState cstate, - int column_no, FmgrInfo *flinfo, - Oid typioparam, int32 typmod, - bool *isnull); + int column_no, FmgrInfo *flinfo, + Oid typioparam, int32 typmod, + bool *isnull); static void CopyAttributeOutText(CopyState cstate, char *server_string); static void CopyAttributeOutCSV(CopyState cstate, char *server_string, - bool use_quote); + bool use_quote); static List *CopyGetAttnums(Relation rel, List *attnamelist); static char *limit_printout_length(const char *str); @@ -192,8 +192,8 @@ static void CopySendData(CopyState cstate, void *databuf, int datasize); static void CopySendString(CopyState cstate, const char *str); static void CopySendChar(CopyState cstate, char c); static void CopySendEndOfRow(CopyState cstate); -static int CopyGetData(CopyState cstate, void *databuf, - int minread, int maxread); +static int CopyGetData(CopyState cstate, void *databuf, + int minread, int maxread); static void CopySendInt32(CopyState cstate, int32 val); static bool CopyGetInt32(CopyState cstate, int32 *val); static void CopySendInt16(CopyState cstate, int16 val); @@ -230,7 +230,7 @@ SendCopyBegin(CopyState cstate) if (cstate->binary) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY BINARY is not supported to stdout or from stdin"))); + errmsg("COPY BINARY is not supported to stdout or from stdin"))); pq_putemptymessage('H'); /* grottiness needed for old COPY OUT protocol */ pq_startcopyout(); @@ -242,7 +242,7 @@ SendCopyBegin(CopyState cstate) if (cstate->binary) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY BINARY is not supported to stdout or from stdin"))); + errmsg("COPY BINARY is not supported to stdout or from stdin"))); pq_putemptymessage('B'); /* grottiness needed for old COPY OUT protocol */ pq_startcopyout(); @@ -276,7 +276,7 @@ ReceiveCopyBegin(CopyState cstate) if (cstate->binary) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY BINARY is not supported to stdout or from stdin"))); + errmsg("COPY BINARY is not supported to stdout or from stdin"))); pq_putemptymessage('G'); cstate->copy_dest = COPY_OLD_FE; } @@ -286,7 +286,7 @@ ReceiveCopyBegin(CopyState cstate) if (cstate->binary) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY BINARY is not supported to stdout or from stdin"))); + errmsg("COPY BINARY is not supported to stdout or from stdin"))); pq_putemptymessage('D'); cstate->copy_dest = COPY_OLD_FE; } @@ -408,7 +408,7 @@ CopySendEndOfRow(CopyState cstate) * CopyGetData reads data from the source (file or frontend) * * We attempt to read at least minread, and at most maxread, bytes from - * the source. The actual number of bytes read is returned; if this is + * the source. The actual number of bytes read is returned; if this is * less than minread, EOF was detected. * * Note: when copying from the frontend, we expect a proper EOF mark per @@ -420,7 +420,7 @@ CopySendEndOfRow(CopyState cstate) static int CopyGetData(CopyState cstate, void *databuf, int minread, int maxread) { - int bytesread = 0; + int bytesread = 0; switch (cstate->copy_dest) { @@ -432,12 +432,13 @@ CopyGetData(CopyState cstate, void *databuf, int minread, int maxread) errmsg("could not read from COPY file: %m"))); break; case COPY_OLD_FE: + /* * We cannot read more than minread bytes (which in practice is 1) * because old protocol doesn't have any clear way of separating - * the COPY stream from following data. This is slow, but not - * any slower than the code path was originally, and we don't - * care much anymore about the performance of old protocol. + * the COPY stream from following data. This is slow, but not any + * slower than the code path was originally, and we don't care + * much anymore about the performance of old protocol. */ if (pq_getbytes((char *) databuf, minread)) { @@ -463,11 +464,11 @@ CopyGetData(CopyState cstate, void *databuf, int minread, int maxread) if (mtype == EOF) ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("unexpected EOF on client connection"))); + errmsg("unexpected EOF on client connection"))); if (pq_getmessage(cstate->fe_msgbuf, 0)) ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("unexpected EOF on client connection"))); + errmsg("unexpected EOF on client connection"))); switch (mtype) { case 'd': /* CopyData */ @@ -480,16 +481,16 @@ CopyGetData(CopyState cstate, void *databuf, int minread, int maxread) ereport(ERROR, (errcode(ERRCODE_QUERY_CANCELED), errmsg("COPY from stdin failed: %s", - pq_getmsgstring(cstate->fe_msgbuf)))); + pq_getmsgstring(cstate->fe_msgbuf)))); break; case 'H': /* Flush */ case 'S': /* Sync */ /* - * Ignore Flush/Sync for the convenience of - * client libraries (such as libpq) that may - * send those without noticing that the - * command they just sent was COPY. + * Ignore Flush/Sync for the convenience of client + * libraries (such as libpq) that may send those + * without noticing that the command they just + * sent was COPY. */ goto readmessage; default: @@ -593,8 +594,8 @@ CopyGetInt16(CopyState cstate, int16 *val) static bool CopyLoadRawBuf(CopyState cstate) { - int nbytes; - int inbytes; + int nbytes; + int inbytes; if (cstate->raw_buf_index < cstate->raw_buf_len) { @@ -791,7 +792,7 @@ DoCopy(const CopyStmt *stmt) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("COPY delimiter must be a single character"))); - /* Check header */ + /* Check header */ if (!cstate->csv_mode && cstate->header_line) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -827,23 +828,23 @@ DoCopy(const CopyStmt *stmt) if (force_quote != NIL && is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY force quote only available using COPY TO"))); + errmsg("COPY force quote only available using COPY TO"))); /* Check force_notnull */ if (!cstate->csv_mode && force_notnull != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY force not null available only in CSV mode"))); + errmsg("COPY force not null available only in CSV mode"))); if (force_notnull != NIL && !is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY force not null only available using COPY FROM"))); + errmsg("COPY force not null only available using COPY FROM"))); /* Don't allow the delimiter to appear in the null string. */ if (strchr(cstate->null_print, cstate->delim[0]) != NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY delimiter must not appear in the NULL specification"))); + errmsg("COPY delimiter must not appear in the NULL specification"))); /* Don't allow the CSV quote char to appear in the null string. */ if (cstate->csv_mode && @@ -874,7 +875,7 @@ DoCopy(const CopyStmt *stmt) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to COPY to or from a file"), errhint("Anyone can COPY to stdout or from stdin. " - "psql's \\copy command also works for anyone."))); + "psql's \\copy command also works for anyone."))); /* Don't allow COPY w/ OIDs to or from a table without them */ if (cstate->oids && !cstate->rel->rd_rel->relhasoids) @@ -902,8 +903,8 @@ DoCopy(const CopyStmt *stmt) if (!list_member_int(cstate->attnumlist, attnum)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("FORCE QUOTE column \"%s\" not referenced by COPY", - NameStr(attr[attnum - 1]->attname)))); + errmsg("FORCE QUOTE column \"%s\" not referenced by COPY", + NameStr(attr[attnum - 1]->attname)))); } } @@ -924,8 +925,8 @@ DoCopy(const CopyStmt *stmt) if (!list_member_int(cstate->attnumlist, attnum)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("FORCE NOT NULL column \"%s\" not referenced by COPY", - NameStr(attr[attnum - 1]->attname)))); + errmsg("FORCE NOT NULL column \"%s\" not referenced by COPY", + NameStr(attr[attnum - 1]->attname)))); } } @@ -960,8 +961,8 @@ DoCopy(const CopyStmt *stmt) else ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot copy to non-table relation \"%s\"", - RelationGetRelationName(cstate->rel)))); + errmsg("cannot copy to non-table relation \"%s\"", + RelationGetRelationName(cstate->rel)))); } if (pipe) { @@ -979,8 +980,8 @@ DoCopy(const CopyStmt *stmt) if (cstate->copy_file == NULL) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not open file \"%s\" for reading: %m", - filename))); + errmsg("could not open file \"%s\" for reading: %m", + filename))); fstat(fileno(cstate->copy_file), &st); if (S_ISDIR(st.st_mode)) @@ -1011,8 +1012,8 @@ DoCopy(const CopyStmt *stmt) else ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot copy from non-table relation \"%s\"", - RelationGetRelationName(cstate->rel)))); + errmsg("cannot copy from non-table relation \"%s\"", + RelationGetRelationName(cstate->rel)))); } if (pipe) { @@ -1027,13 +1028,13 @@ DoCopy(const CopyStmt *stmt) struct stat st; /* - * Prevent write to relative path ... too easy to shoot - * oneself in the foot by overwriting a database file ... + * Prevent write to relative path ... too easy to shoot oneself in + * the foot by overwriting a database file ... */ if (!is_absolute_path(filename)) ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), - errmsg("relative path not allowed for COPY to file"))); + errmsg("relative path not allowed for COPY to file"))); oumask = umask((mode_t) 022); cstate->copy_file = AllocateFile(filename, PG_BINARY_W); @@ -1042,8 +1043,8 @@ DoCopy(const CopyStmt *stmt) if (cstate->copy_file == NULL) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not open file \"%s\" for writing: %m", - filename))); + errmsg("could not open file \"%s\" for writing: %m", + filename))); fstat(fileno(cstate->copy_file), &st); if (S_ISDIR(st.st_mode)) @@ -1069,10 +1070,9 @@ DoCopy(const CopyStmt *stmt) } /* - * Close the relation. If reading, we can release the AccessShareLock - * we got; if writing, we should hold the lock until end of - * transaction to ensure that updates will be committed before lock is - * released. + * Close the relation. If reading, we can release the AccessShareLock we + * got; if writing, we should hold the lock until end of transaction to + * ensure that updates will be committed before lock is released. */ heap_close(cstate->rel, (is_from ? NoLock : AccessShareLock)); @@ -1105,8 +1105,8 @@ DoCopyTo(CopyState cstate) { /* * Make sure we turn off old-style COPY OUT mode upon error. It is - * okay to do this in all cases, since it does nothing if the mode - * is not on. + * okay to do this in all cases, since it does nothing if the mode is + * not on. */ pq_endcopyout(true); PG_RE_THROW(); @@ -1138,7 +1138,7 @@ CopyTo(CopyState cstate) attr = tupDesc->attrs; num_phys_attrs = tupDesc->natts; attr_count = list_length(cstate->attnumlist); - null_print_client = cstate->null_print; /* default */ + null_print_client = cstate->null_print; /* default */ /* Get info about the columns we need to process. */ out_functions = (FmgrInfo *) palloc(num_phys_attrs * sizeof(FmgrInfo)); @@ -1167,9 +1167,9 @@ CopyTo(CopyState cstate) /* * Create a temporary memory context that we can reset once per row to - * recover palloc'd memory. This avoids any problems with leaks - * inside datatype output routines, and should be faster than retail - * pfree's anyway. (We don't need a whole econtext as CopyFrom does.) + * recover palloc'd memory. This avoids any problems with leaks inside + * datatype output routines, and should be faster than retail pfree's + * anyway. (We don't need a whole econtext as CopyFrom does.) */ mycontext = AllocSetContextCreate(CurrentMemoryContext, "COPY TO", @@ -1206,12 +1206,12 @@ CopyTo(CopyState cstate) /* if a header has been requested send the line */ if (cstate->header_line) { - bool hdr_delim = false; - + bool hdr_delim = false; + foreach(cur, cstate->attnumlist) { int attnum = lfirst_int(cur); - char *colname; + char *colname; if (hdr_delim) CopySendChar(cstate, cstate->delim[0]); @@ -1258,7 +1258,7 @@ CopyTo(CopyState cstate) if (cstate->oids) { string = DatumGetCString(DirectFunctionCall1(oidout, - ObjectIdGetDatum(HeapTupleGetOid(tuple)))); + ObjectIdGetDatum(HeapTupleGetOid(tuple)))); CopySendString(cstate, string); need_delim = true; } @@ -1356,7 +1356,7 @@ copy_in_error_callback(void *arg) if (cstate->cur_attname && cstate->cur_attval) { /* error is relevant to a particular column */ - char *attval; + char *attval; attval = limit_printout_length(cstate->cur_attval); errcontext("COPY %s, line %d, column %s: \"%s\"", @@ -1369,7 +1369,7 @@ copy_in_error_callback(void *arg) /* error is relevant to a particular line */ if (cstate->line_buf_converted || !cstate->need_transcoding) { - char *lineval; + char *lineval; lineval = limit_printout_length(cstate->line_buf.data); errcontext("COPY %s, line %d: \"%s\"", @@ -1379,12 +1379,12 @@ copy_in_error_callback(void *arg) else { /* - * Here, the line buffer is still in a foreign encoding, - * and indeed it's quite likely that the error is precisely - * a failure to do encoding conversion (ie, bad data). We - * dare not try to convert it, and at present there's no way - * to regurgitate it without conversion. So we have to punt - * and just report the line number. + * Here, the line buffer is still in a foreign encoding, and + * indeed it's quite likely that the error is precisely a + * failure to do encoding conversion (ie, bad data). We dare + * not try to convert it, and at present there's no way to + * regurgitate it without conversion. So we have to punt and + * just report the line number. */ errcontext("COPY %s, line %d", cstate->cur_relname, cstate->cur_lineno); @@ -1474,8 +1474,8 @@ CopyFrom(CopyState cstate) /* * We need a ResultRelInfo so we can use the regular executor's - * index-entry-making machinery. (There used to be a huge amount of - * code here that basically duplicated execUtils.c ...) + * index-entry-making machinery. (There used to be a huge amount of code + * here that basically duplicated execUtils.c ...) */ resultRelInfo = makeNode(ResultRelInfo); resultRelInfo->ri_RangeTableIndex = 1; /* dummy */ @@ -1499,9 +1499,9 @@ CopyFrom(CopyState cstate) /* * Pick up the required catalog information for each attribute in the - * relation, including the input function, the element type (to pass - * to the input function), and info about defaults and constraints. - * (Which input function we use depends on text/binary format choice.) + * relation, including the input function, the element type (to pass to + * the input function), and info about defaults and constraints. (Which + * input function we use depends on text/binary format choice.) */ in_functions = (FmgrInfo *) palloc(num_phys_attrs * sizeof(FmgrInfo)); typioparams = (Oid *) palloc(num_phys_attrs * sizeof(Oid)); @@ -1519,7 +1519,7 @@ CopyFrom(CopyState cstate) /* Fetch the input function and typioparam info */ if (cstate->binary) getTypeBinaryInputInfo(attr[attnum - 1]->atttypid, - &in_func_oid, &typioparams[attnum - 1]); + &in_func_oid, &typioparams[attnum - 1]); else getTypeInputInfo(attr[attnum - 1]->atttypid, &in_func_oid, &typioparams[attnum - 1]); @@ -1553,12 +1553,12 @@ CopyFrom(CopyState cstate) Node *node; /* - * Easiest way to do this is to use parse_coerce.c to set up - * an expression that checks the constraints. (At present, - * the expression might contain a length-coercion-function - * call and/or CoerceToDomain nodes.) The bottom of the - * expression is a Param node so that we can fill in the - * actual datum during the data input loop. + * Easiest way to do this is to use parse_coerce.c to set up an + * expression that checks the constraints. (At present, the + * expression might contain a length-coercion-function call and/or + * CoerceToDomain nodes.) The bottom of the expression is a Param + * node so that we can fill in the actual datum during the data + * input loop. */ prm = makeNode(Param); prm->paramkind = PARAM_EXEC; @@ -1580,11 +1580,10 @@ CopyFrom(CopyState cstate) AfterTriggerBeginQuery(); /* - * Check BEFORE STATEMENT insertion triggers. It's debateable whether - * we should do this for COPY, since it's not really an "INSERT" - * statement as such. However, executing these triggers maintains - * consistency with the EACH ROW triggers that we already fire on - * COPY. + * Check BEFORE STATEMENT insertion triggers. It's debateable whether we + * should do this for COPY, since it's not really an "INSERT" statement as + * such. However, executing these triggers maintains consistency with the + * EACH ROW triggers that we already fire on COPY. */ ExecBSInsertTriggers(estate, resultRelInfo); @@ -1612,20 +1611,20 @@ CopyFrom(CopyState cstate) if ((tmp >> 16) != 0) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), - errmsg("unrecognized critical flags in COPY file header"))); + errmsg("unrecognized critical flags in COPY file header"))); /* Header extension length */ if (!CopyGetInt32(cstate, &tmp) || tmp < 0) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), - errmsg("invalid COPY file header (missing length)"))); + errmsg("invalid COPY file header (missing length)"))); /* Skip extension header, if present */ while (tmp-- > 0) { if (CopyGetData(cstate, readSig, 1, 1) != 1) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), - errmsg("invalid COPY file header (wrong length)"))); + errmsg("invalid COPY file header (wrong length)"))); } } @@ -1700,9 +1699,8 @@ CopyFrom(CopyState cstate) /* * EOF at start of line means we're done. If we see EOF after - * some characters, we act as though it was newline followed - * by EOF, ie, process the line and then exit loop on next - * iteration. + * some characters, we act as though it was newline followed by + * EOF, ie, process the line and then exit loop on next iteration. */ if (done && cstate->line_buf.len == 0) break; @@ -1732,7 +1730,7 @@ CopyFrom(CopyState cstate) cstate->cur_attname = "oid"; cstate->cur_attval = string; loaded_oid = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(string))); + CStringGetDatum(string))); if (loaded_oid == InvalidOid) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), @@ -1768,8 +1766,8 @@ CopyFrom(CopyState cstate) cstate->cur_attval = string; values[m] = FunctionCall3(&in_functions[m], CStringGetDatum(string), - ObjectIdGetDatum(typioparams[m]), - Int32GetDatum(attr[m]->atttypmod)); + ObjectIdGetDatum(typioparams[m]), + Int32GetDatum(attr[m]->atttypmod)); nulls[m] = ' '; cstate->cur_attname = NULL; cstate->cur_attval = NULL; @@ -1834,9 +1832,9 @@ CopyFrom(CopyState cstate) } /* - * Now compute and insert any defaults available for the columns - * not provided by the input data. Anything not processed here or - * above will remain NULL. + * Now compute and insert any defaults available for the columns not + * provided by the input data. Anything not processed here or above + * will remain NULL. */ for (i = 0; i < num_defaults; i++) { @@ -1863,9 +1861,9 @@ CopyFrom(CopyState cstate) prmdata->isnull = (nulls[i] == 'n'); /* - * Execute the constraint expression. Allow the - * expression to replace the value (consider e.g. a - * timestamp precision restriction). + * Execute the constraint expression. Allow the expression to + * replace the value (consider e.g. a timestamp precision + * restriction). */ values[i] = ExecEvalExpr(exprstate, econtext, &isnull, NULL); @@ -1886,7 +1884,7 @@ CopyFrom(CopyState cstate) /* BEFORE ROW INSERT Triggers */ if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0) + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0) { HeapTuple newtuple; @@ -1956,7 +1954,7 @@ CopyFrom(CopyState cstate) * server encoding. * * Result is true if read was terminated by EOF, false if terminated - * by newline. The terminating newline or EOF marker is not included + * by newline. The terminating newline or EOF marker is not included * in the final value of line_buf. */ static bool @@ -1981,12 +1979,13 @@ CopyReadLine(CopyState cstate) { /* * Reached EOF. In protocol version 3, we should ignore anything - * after \. up to the protocol end of copy data. (XXX maybe - * better not to treat \. as special?) + * after \. up to the protocol end of copy data. (XXX maybe better + * not to treat \. as special?) */ if (cstate->copy_dest == COPY_NEW_FE) { - do { + do + { cstate->raw_buf_index = cstate->raw_buf_len; } while (CopyLoadRawBuf(cstate)); } @@ -2070,25 +2069,24 @@ CopyReadLineText(CopyState cstate) result = false; /* - * The objective of this loop is to transfer the entire next input - * line into line_buf. Hence, we only care for detecting newlines - * (\r and/or \n) and the end-of-copy marker (\.). + * The objective of this loop is to transfer the entire next input line + * into line_buf. Hence, we only care for detecting newlines (\r and/or + * \n) and the end-of-copy marker (\.). * * For backwards compatibility we allow backslashes to escape newline - * characters. Backslashes other than the end marker get put into the + * characters. Backslashes other than the end marker get put into the * line_buf, since CopyReadAttributesText does its own escape processing. * * These four characters, and only these four, are assumed the same in * frontend and backend encodings. * - * For speed, we try to move data to line_buf in chunks rather than - * one character at a time. raw_buf_ptr points to the next character - * to examine; any characters from raw_buf_index to raw_buf_ptr have - * been determined to be part of the line, but not yet transferred - * to line_buf. + * For speed, we try to move data to line_buf in chunks rather than one + * character at a time. raw_buf_ptr points to the next character to + * examine; any characters from raw_buf_index to raw_buf_ptr have been + * determined to be part of the line, but not yet transferred to line_buf. * - * For a little extra speed within the loop, we copy raw_buf and - * raw_buf_len into local variables. + * For a little extra speed within the loop, we copy raw_buf and raw_buf_len + * into local variables. */ copy_raw_buf = cstate->raw_buf; raw_buf_ptr = cstate->raw_buf_index; @@ -2098,31 +2096,33 @@ CopyReadLineText(CopyState cstate) for (;;) { - int prev_raw_ptr; - char c; + int prev_raw_ptr; + char c; /* Load more data if needed */ if (raw_buf_ptr >= copy_buf_len || need_data) { /* - * Transfer any approved data to line_buf; must do this to - * be sure there is some room in raw_buf. + * Transfer any approved data to line_buf; must do this to be sure + * there is some room in raw_buf. */ if (raw_buf_ptr > cstate->raw_buf_index) { appendBinaryStringInfo(&cstate->line_buf, - cstate->raw_buf + cstate->raw_buf_index, + cstate->raw_buf + cstate->raw_buf_index, raw_buf_ptr - cstate->raw_buf_index); cstate->raw_buf_index = raw_buf_ptr; } + /* - * Try to read some more data. This will certainly reset + * Try to read some more data. This will certainly reset * raw_buf_index to zero, and raw_buf_ptr must go with it. */ if (!CopyLoadRawBuf(cstate)) hit_eof = true; raw_buf_ptr = 0; copy_buf_len = cstate->raw_buf_len; + /* * If we are completely out of data, break out of the loop, * reporting EOF. @@ -2148,12 +2148,12 @@ CopyReadLineText(CopyState cstate) /* * If need more data, go back to loop top to load it. * - * Note that if we are at EOF, c will wind up as '\0' - * because of the guaranteed pad of raw_buf. + * Note that if we are at EOF, c will wind up as '\0' because of + * the guaranteed pad of raw_buf. */ if (raw_buf_ptr >= copy_buf_len && !hit_eof) { - raw_buf_ptr = prev_raw_ptr; /* undo fetch */ + raw_buf_ptr = prev_raw_ptr; /* undo fetch */ need_data = true; continue; } @@ -2161,8 +2161,8 @@ CopyReadLineText(CopyState cstate) if (c == '\n') { - raw_buf_ptr++; /* eat newline */ - cstate->eol_type = EOL_CRNL; /* in case not set yet */ + raw_buf_ptr++; /* eat newline */ + cstate->eol_type = EOL_CRNL; /* in case not set yet */ } else { @@ -2170,11 +2170,12 @@ CopyReadLineText(CopyState cstate) if (cstate->eol_type == EOL_CRNL) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), - errmsg("literal carriage return found in data"), + errmsg("literal carriage return found in data"), errhint("Use \"\\r\" to represent carriage return."))); + /* - * if we got here, it is the first line and we didn't - * find \n, so don't consume the peeked character + * if we got here, it is the first line and we didn't find + * \n, so don't consume the peeked character */ cstate->eol_type = EOL_CR; } @@ -2183,7 +2184,7 @@ CopyReadLineText(CopyState cstate) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), errmsg("literal carriage return found in data"), - errhint("Use \"\\r\" to represent carriage return."))); + errhint("Use \"\\r\" to represent carriage return."))); /* If reach here, we have found the line terminator */ break; } @@ -2195,7 +2196,7 @@ CopyReadLineText(CopyState cstate) (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), errmsg("literal newline found in data"), errhint("Use \"\\n\" to represent newline."))); - cstate->eol_type = EOL_NL; /* in case not set yet */ + cstate->eol_type = EOL_NL; /* in case not set yet */ /* If reach here, we have found the line terminator */ break; } @@ -2219,8 +2220,8 @@ CopyReadLineText(CopyState cstate) } /* - * In non-CSV mode, backslash quotes the following character - * even if it's a newline, so we always advance to next character + * In non-CSV mode, backslash quotes the following character even + * if it's a newline, so we always advance to next character */ c = copy_raw_buf[raw_buf_ptr++]; @@ -2230,7 +2231,7 @@ CopyReadLineText(CopyState cstate) { if (raw_buf_ptr >= copy_buf_len && !hit_eof) { - raw_buf_ptr = prev_raw_ptr; /* undo fetch */ + raw_buf_ptr = prev_raw_ptr; /* undo fetch */ need_data = true; continue; } @@ -2247,7 +2248,7 @@ CopyReadLineText(CopyState cstate) } if (raw_buf_ptr >= copy_buf_len && !hit_eof) { - raw_buf_ptr = prev_raw_ptr; /* undo fetch */ + raw_buf_ptr = prev_raw_ptr; /* undo fetch */ need_data = true; continue; } @@ -2265,13 +2266,13 @@ CopyReadLineText(CopyState cstate) errmsg("end-of-copy marker does not match previous newline style"))); /* - * Transfer only the data before the \. into line_buf, - * then discard the data and the \. sequence. + * Transfer only the data before the \. into line_buf, then + * discard the data and the \. sequence. */ if (prev_raw_ptr > cstate->raw_buf_index) appendBinaryStringInfo(&cstate->line_buf, - cstate->raw_buf + cstate->raw_buf_index, - prev_raw_ptr - cstate->raw_buf_index); + cstate->raw_buf + cstate->raw_buf_index, + prev_raw_ptr - cstate->raw_buf_index); cstate->raw_buf_index = raw_buf_ptr; result = true; /* report EOF */ break; @@ -2280,10 +2281,10 @@ CopyReadLineText(CopyState cstate) /* * Do we need to be careful about trailing bytes of multibyte - * characters? (See note above about client_only_encoding) + * characters? (See note above about client_only_encoding) * - * We assume here that pg_encoding_mblen only looks at the first - * byte of the character! + * We assume here that pg_encoding_mblen only looks at the first byte of + * the character! */ if (cstate->client_only_encoding) { @@ -2291,7 +2292,7 @@ CopyReadLineText(CopyState cstate) s[0] = c; mblen = pg_encoding_mblen(cstate->client_encoding, s); - if (raw_buf_ptr + (mblen-1) > copy_buf_len) + if (raw_buf_ptr + (mblen - 1) > copy_buf_len) { if (hit_eof) { @@ -2300,11 +2301,11 @@ CopyReadLineText(CopyState cstate) result = true; break; } - raw_buf_ptr = prev_raw_ptr; /* undo fetch */ + raw_buf_ptr = prev_raw_ptr; /* undo fetch */ need_data = true; continue; } - raw_buf_ptr += mblen-1; + raw_buf_ptr += mblen - 1; } } /* end of outer loop */ @@ -2337,7 +2338,8 @@ CopyReadLineCSV(CopyState cstate) bool need_data; bool hit_eof; char s[2]; - bool in_quote = false, last_was_esc = false; + bool in_quote = false, + last_was_esc = false; char quotec = cstate->quote[0]; char escapec = cstate->escape[0]; @@ -2351,25 +2353,24 @@ CopyReadLineCSV(CopyState cstate) result = false; /* - * The objective of this loop is to transfer the entire next input - * line into line_buf. Hence, we only care for detecting newlines - * (\r and/or \n) and the end-of-copy marker (\.). + * The objective of this loop is to transfer the entire next input line + * into line_buf. Hence, we only care for detecting newlines (\r and/or + * \n) and the end-of-copy marker (\.). * - * In CSV mode, \r and \n inside a quoted field are just part of the - * data value and are put in line_buf. We keep just enough state - * to know if we are currently in a quoted field or not. + * In CSV mode, \r and \n inside a quoted field are just part of the data + * value and are put in line_buf. We keep just enough state to know if we + * are currently in a quoted field or not. * - * These four characters, and the CSV escape and quote characters, - * are assumed the same in frontend and backend encodings. + * These four characters, and the CSV escape and quote characters, are + * assumed the same in frontend and backend encodings. * - * For speed, we try to move data to line_buf in chunks rather than - * one character at a time. raw_buf_ptr points to the next character - * to examine; any characters from raw_buf_index to raw_buf_ptr have - * been determined to be part of the line, but not yet transferred - * to line_buf. + * For speed, we try to move data to line_buf in chunks rather than one + * character at a time. raw_buf_ptr points to the next character to + * examine; any characters from raw_buf_index to raw_buf_ptr have been + * determined to be part of the line, but not yet transferred to line_buf. * - * For a little extra speed within the loop, we copy raw_buf and - * raw_buf_len into local variables. + * For a little extra speed within the loop, we copy raw_buf and raw_buf_len + * into local variables. */ copy_raw_buf = cstate->raw_buf; raw_buf_ptr = cstate->raw_buf_index; @@ -2379,31 +2380,33 @@ CopyReadLineCSV(CopyState cstate) for (;;) { - int prev_raw_ptr; - char c; + int prev_raw_ptr; + char c; /* Load more data if needed */ if (raw_buf_ptr >= copy_buf_len || need_data) { /* - * Transfer any approved data to line_buf; must do this to - * be sure there is some room in raw_buf. + * Transfer any approved data to line_buf; must do this to be sure + * there is some room in raw_buf. */ if (raw_buf_ptr > cstate->raw_buf_index) { appendBinaryStringInfo(&cstate->line_buf, - cstate->raw_buf + cstate->raw_buf_index, + cstate->raw_buf + cstate->raw_buf_index, raw_buf_ptr - cstate->raw_buf_index); cstate->raw_buf_index = raw_buf_ptr; } + /* - * Try to read some more data. This will certainly reset + * Try to read some more data. This will certainly reset * raw_buf_index to zero, and raw_buf_ptr must go with it. */ if (!CopyLoadRawBuf(cstate)) hit_eof = true; raw_buf_ptr = 0; copy_buf_len = cstate->raw_buf_len; + /* * If we are completely out of data, break out of the loop, * reporting EOF. @@ -2422,44 +2425,44 @@ CopyReadLineCSV(CopyState cstate) /* * If character is '\\' or '\r', we may need to look ahead below. - * Force fetch of the next character if we don't already have it. - * We need to do this before changing CSV state, in case one of - * these characters is also the quote or escape character. + * Force fetch of the next character if we don't already have it. We + * need to do this before changing CSV state, in case one of these + * characters is also the quote or escape character. * - * Note: old-protocol does not like forced prefetch, but it's OK - * here since we cannot validly be at EOF. + * Note: old-protocol does not like forced prefetch, but it's OK here + * since we cannot validly be at EOF. */ if (c == '\\' || c == '\r') { if (raw_buf_ptr >= copy_buf_len && !hit_eof) { - raw_buf_ptr = prev_raw_ptr; /* undo fetch */ + raw_buf_ptr = prev_raw_ptr; /* undo fetch */ need_data = true; continue; } } - /* - * Dealing with quotes and escapes here is mildly tricky. If the - * quote char is also the escape char, there's no problem - we - * just use the char as a toggle. If they are different, we need - * to ensure that we only take account of an escape inside a quoted - * field and immediately preceding a quote char, and not the - * second in a escape-escape sequence. - */ + /* + * Dealing with quotes and escapes here is mildly tricky. If the quote + * char is also the escape char, there's no problem - we just use the + * char as a toggle. If they are different, we need to ensure that we + * only take account of an escape inside a quoted field and + * immediately preceding a quote char, and not the second in a + * escape-escape sequence. + */ if (in_quote && c == escapec) - last_was_esc = ! last_was_esc; - if (c == quotec && ! last_was_esc) - in_quote = ! in_quote; + last_was_esc = !last_was_esc; + if (c == quotec && !last_was_esc) + in_quote = !in_quote; if (c != escapec) last_was_esc = false; /* - * Updating the line count for embedded CR and/or LF chars is - * necessarily a little fragile - this test is probably about - * the best we can do. (XXX it's arguable whether we should - * do this at all --- is cur_lineno a physical or logical count?) - */ + * Updating the line count for embedded CR and/or LF chars is + * necessarily a little fragile - this test is probably about the best + * we can do. (XXX it's arguable whether we should do this at all --- + * is cur_lineno a physical or logical count?) + */ if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r')) cstate->cur_lineno++; @@ -2472,12 +2475,12 @@ CopyReadLineCSV(CopyState cstate) /* * If need more data, go back to loop top to load it. * - * Note that if we are at EOF, c will wind up as '\0' - * because of the guaranteed pad of raw_buf. + * Note that if we are at EOF, c will wind up as '\0' because of + * the guaranteed pad of raw_buf. */ if (raw_buf_ptr >= copy_buf_len && !hit_eof) { - raw_buf_ptr = prev_raw_ptr; /* undo fetch */ + raw_buf_ptr = prev_raw_ptr; /* undo fetch */ need_data = true; continue; } @@ -2485,8 +2488,8 @@ CopyReadLineCSV(CopyState cstate) if (c == '\n') { - raw_buf_ptr++; /* eat newline */ - cstate->eol_type = EOL_CRNL; /* in case not set yet */ + raw_buf_ptr++; /* eat newline */ + cstate->eol_type = EOL_CRNL; /* in case not set yet */ } else { @@ -2494,11 +2497,12 @@ CopyReadLineCSV(CopyState cstate) if (cstate->eol_type == EOL_CRNL) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), - errmsg("unquoted carriage return found in data"), + errmsg("unquoted carriage return found in data"), errhint("Use quoted CSV field to represent carriage return."))); + /* - * if we got here, it is the first line and we didn't - * find \n, so don't consume the peeked character + * if we got here, it is the first line and we didn't find + * \n, so don't consume the peeked character */ cstate->eol_type = EOL_CR; } @@ -2518,8 +2522,8 @@ CopyReadLineCSV(CopyState cstate) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), errmsg("unquoted newline found in data"), - errhint("Use quoted CSV field to represent newline."))); - cstate->eol_type = EOL_NL; /* in case not set yet */ + errhint("Use quoted CSV field to represent newline."))); + cstate->eol_type = EOL_NL; /* in case not set yet */ /* If reach here, we have found the line terminator */ break; } @@ -2529,7 +2533,7 @@ CopyReadLineCSV(CopyState cstate) */ if (c == '\\' && cstate->line_buf.len == 0) { - char c2; + char c2; /* * If need more data, go back to loop top to load it. @@ -2548,25 +2552,25 @@ CopyReadLineCSV(CopyState cstate) } /* - * Note: we do not change c here since we aren't treating \ - * as escaping the next character. + * Note: we do not change c here since we aren't treating \ as + * escaping the next character. */ c2 = copy_raw_buf[raw_buf_ptr]; if (c2 == '.') { - raw_buf_ptr++; /* consume the '.' */ + raw_buf_ptr++; /* consume the '.' */ /* * Note: if we loop back for more data here, it does not - * matter that the CSV state change checks are re-executed; - * we will come back here with no important state changed. + * matter that the CSV state change checks are re-executed; we + * will come back here with no important state changed. */ if (cstate->eol_type == EOL_CRNL) { if (raw_buf_ptr >= copy_buf_len && !hit_eof) { - raw_buf_ptr = prev_raw_ptr; /* undo fetch */ + raw_buf_ptr = prev_raw_ptr; /* undo fetch */ need_data = true; continue; } @@ -2583,7 +2587,7 @@ CopyReadLineCSV(CopyState cstate) } if (raw_buf_ptr >= copy_buf_len && !hit_eof) { - raw_buf_ptr = prev_raw_ptr; /* undo fetch */ + raw_buf_ptr = prev_raw_ptr; /* undo fetch */ need_data = true; continue; } @@ -2601,12 +2605,12 @@ CopyReadLineCSV(CopyState cstate) errmsg("end-of-copy marker does not match previous newline style"))); /* - * Transfer only the data before the \. into line_buf, - * then discard the data and the \. sequence. + * Transfer only the data before the \. into line_buf, then + * discard the data and the \. sequence. */ if (prev_raw_ptr > cstate->raw_buf_index) appendBinaryStringInfo(&cstate->line_buf, cstate->raw_buf + cstate->raw_buf_index, - prev_raw_ptr - cstate->raw_buf_index); + prev_raw_ptr - cstate->raw_buf_index); cstate->raw_buf_index = raw_buf_ptr; result = true; /* report EOF */ break; @@ -2615,10 +2619,10 @@ CopyReadLineCSV(CopyState cstate) /* * Do we need to be careful about trailing bytes of multibyte - * characters? (See note above about client_only_encoding) + * characters? (See note above about client_only_encoding) * - * We assume here that pg_encoding_mblen only looks at the first - * byte of the character! + * We assume here that pg_encoding_mblen only looks at the first byte of + * the character! */ if (cstate->client_only_encoding) { @@ -2626,7 +2630,7 @@ CopyReadLineCSV(CopyState cstate) s[0] = c; mblen = pg_encoding_mblen(cstate->client_encoding, s); - if (raw_buf_ptr + (mblen-1) > copy_buf_len) + if (raw_buf_ptr + (mblen - 1) > copy_buf_len) { if (hit_eof) { @@ -2635,11 +2639,11 @@ CopyReadLineCSV(CopyState cstate) result = true; break; } - raw_buf_ptr = prev_raw_ptr; /* undo fetch */ + raw_buf_ptr = prev_raw_ptr; /* undo fetch */ need_data = true; continue; } - raw_buf_ptr += mblen-1; + raw_buf_ptr += mblen - 1; } } /* end of outer loop */ @@ -2684,7 +2688,7 @@ GetDecimalFromHex(char hex) * null_print is the null marker string. Note that this is compared to * the pre-de-escaped input string. * - * The return value is the number of fields actually read. (We error out + * The return value is the number of fields actually read. (We error out * if this would exceed maxfields, which is the length of fieldvals[].) */ static int @@ -2716,9 +2720,9 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals) /* * The de-escaped attributes will certainly not be longer than the input * data line, so we can just force attribute_buf to be large enough and - * then transfer data without any checks for enough space. We need to - * do it this way because enlarging attribute_buf mid-stream would - * invalidate pointers already stored into fieldvals[]. + * then transfer data without any checks for enough space. We need to do + * it this way because enlarging attribute_buf mid-stream would invalidate + * pointers already stored into fieldvals[]. */ if (cstate->attribute_buf.maxlen <= cstate->line_buf.len) enlargeStringInfo(&cstate->attribute_buf, cstate->line_buf.len); @@ -2750,7 +2754,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals) /* Scan data for field */ for (;;) { - char c; + char c; end_ptr = cur_ptr; if (cur_ptr >= line_end_ptr) @@ -2776,41 +2780,41 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals) case '5': case '6': case '7': - { - /* handle \013 */ - int val; - - val = OCTVALUE(c); - if (cur_ptr < line_end_ptr) { - c = *cur_ptr; - if (ISOCTAL(c)) + /* handle \013 */ + int val; + + val = OCTVALUE(c); + if (cur_ptr < line_end_ptr) { - cur_ptr++; - val = (val << 3) + OCTVALUE(c); - if (cur_ptr < line_end_ptr) + c = *cur_ptr; + if (ISOCTAL(c)) { - c = *cur_ptr; - if (ISOCTAL(c)) + cur_ptr++; + val = (val << 3) + OCTVALUE(c); + if (cur_ptr < line_end_ptr) { - cur_ptr++; - val = (val << 3) + OCTVALUE(c); + c = *cur_ptr; + if (ISOCTAL(c)) + { + cur_ptr++; + val = (val << 3) + OCTVALUE(c); + } } } } + c = val & 0377; } - c = val & 0377; - } - break; + break; case 'x': /* Handle \x3F */ if (cur_ptr < line_end_ptr) { - char hexchar = *cur_ptr; + char hexchar = *cur_ptr; if (isxdigit((unsigned char) hexchar)) { - int val = GetDecimalFromHex(hexchar); + int val = GetDecimalFromHex(hexchar); cur_ptr++; if (cur_ptr < line_end_ptr) @@ -2916,9 +2920,9 @@ CopyReadAttributesCSV(CopyState cstate, int maxfields, char **fieldvals) /* * The de-escaped attributes will certainly not be longer than the input * data line, so we can just force attribute_buf to be large enough and - * then transfer data without any checks for enough space. We need to - * do it this way because enlarging attribute_buf mid-stream would - * invalidate pointers already stored into fieldvals[]. + * then transfer data without any checks for enough space. We need to do + * it this way because enlarging attribute_buf mid-stream would invalidate + * pointers already stored into fieldvals[]. */ if (cstate->attribute_buf.maxlen <= cstate->line_buf.len) enlargeStringInfo(&cstate->attribute_buf, cstate->line_buf.len); @@ -2952,7 +2956,7 @@ CopyReadAttributesCSV(CopyState cstate, int maxfields, char **fieldvals) /* Scan data for field */ for (;;) { - char c; + char c; end_ptr = cur_ptr; if (cur_ptr >= line_end_ptr) @@ -2980,7 +2984,7 @@ CopyReadAttributesCSV(CopyState cstate, int maxfields, char **fieldvals) */ if (cur_ptr < line_end_ptr) { - char nextc = *cur_ptr; + char nextc = *cur_ptr; if (nextc == escapec || nextc == quotec) { @@ -2990,6 +2994,7 @@ CopyReadAttributesCSV(CopyState cstate, int maxfields, char **fieldvals) } } } + /* * end of quoted field. Must do this test after testing for escape * in case quote char and escape char are the same (which is the @@ -3141,9 +3146,9 @@ CopyAttributeOutText(CopyState cstate, char *server_string) CopySendChar(cstate, '\\'); /* - * We can skip pg_encoding_mblen() overhead when encoding - * is safe, because in valid backend encodings, extra - * bytes of a multibyte character never look like ASCII. + * We can skip pg_encoding_mblen() overhead when encoding is + * safe, because in valid backend encodings, extra bytes of a + * multibyte character never look like ASCII. */ if (cstate->client_only_encoding) mblen = pg_encoding_mblen(cstate->client_encoding, string); diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 548648066b8..accbafc8486 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -15,7 +15,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.172 2005/10/10 20:02:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.173 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -202,11 +202,11 @@ createdb(const CreatedbStmt *stmt) datdba = GetUserId(); /* - * To create a database, must have createdb privilege and must be able - * to become the target role (this does not imply that the target role - * itself must have createdb privilege). The latter provision guards - * against "giveaway" attacks. Note that a superuser will always have - * both of these privileges a fortiori. + * To create a database, must have createdb privilege and must be able to + * become the target role (this does not imply that the target role itself + * must have createdb privilege). The latter provision guards against + * "giveaway" attacks. Note that a superuser will always have both of + * these privileges a fortiori. */ if (!have_createdb_privilege()) ereport(ERROR, @@ -218,10 +218,10 @@ createdb(const CreatedbStmt *stmt) /* * Check for db name conflict. There is a race condition here, since * another backend could create the same DB name before we commit. - * However, holding an exclusive lock on pg_database for the whole - * time we are copying the source database doesn't seem like a good - * idea, so accept possibility of race to create. We will check again - * after we grab the exclusive lock. + * However, holding an exclusive lock on pg_database for the whole time we + * are copying the source database doesn't seem like a good idea, so + * accept possibility of race to create. We will check again after we + * grab the exclusive lock. */ if (get_db_info(dbname, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)) @@ -240,7 +240,7 @@ createdb(const CreatedbStmt *stmt) &src_vacuumxid, &src_frozenxid, &src_deftablespace)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_DATABASE), - errmsg("template database \"%s\" does not exist", dbtemplate))); + errmsg("template database \"%s\" does not exist", dbtemplate))); /* * Permission check: to copy a DB that's not marked datistemplate, you @@ -264,8 +264,8 @@ createdb(const CreatedbStmt *stmt) if (DatabaseHasActiveBackends(src_dboid, true)) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), - errmsg("source database \"%s\" is being accessed by other users", - dbtemplate))); + errmsg("source database \"%s\" is being accessed by other users", + dbtemplate))); /* If encoding is defaulted, use source's encoding */ if (encoding < 0) @@ -300,7 +300,7 @@ createdb(const CreatedbStmt *stmt) /* * If we are trying to change the default tablespace of the template, * we require that the template not have any files in the new default - * tablespace. This is necessary because otherwise the copied + * tablespace. This is necessary because otherwise the copied * database would contain pg_class rows that refer to its default * tablespace both explicitly (by OID) and implicitly (as zero), which * would cause problems. For example another CREATE DATABASE using @@ -337,7 +337,7 @@ createdb(const CreatedbStmt *stmt) /* * Normally we mark the new database with the same datvacuumxid and - * datfrozenxid as the source. However, if the source is not allowing + * datfrozenxid as the source. However, if the source is not allowing * connections then we assume it is fully frozen, and we can set the * current transaction ID as the xid limits. This avoids immediately * starting to generate warnings after cloning template0. @@ -346,9 +346,9 @@ createdb(const CreatedbStmt *stmt) src_vacuumxid = src_frozenxid = GetCurrentTransactionId(); /* - * Preassign OID for pg_database tuple, so that we can compute db - * path. We have to open pg_database to do this, but we don't want - * to take ExclusiveLock yet, so just do it and close again. + * Preassign OID for pg_database tuple, so that we can compute db path. + * We have to open pg_database to do this, but we don't want to take + * ExclusiveLock yet, so just do it and close again. */ pg_database_rel = heap_open(DatabaseRelationId, AccessShareLock); dboid = GetNewOid(pg_database_rel); @@ -357,23 +357,23 @@ createdb(const CreatedbStmt *stmt) /* * Force dirty buffers out to disk, to ensure source database is - * up-to-date for the copy. (We really only need to flush buffers for - * the source database, but bufmgr.c provides no API for that.) + * up-to-date for the copy. (We really only need to flush buffers for the + * source database, but bufmgr.c provides no API for that.) */ BufferSync(); /* - * Once we start copying subdirectories, we need to be able to clean - * 'em up if we fail. Establish a TRY block to make sure this happens. - * (This is not a 100% solution, because of the possibility of failure - * during transaction commit after we leave this routine, but it should - * handle most scenarios.) + * Once we start copying subdirectories, we need to be able to clean 'em + * up if we fail. Establish a TRY block to make sure this happens. (This + * is not a 100% solution, because of the possibility of failure during + * transaction commit after we leave this routine, but it should handle + * most scenarios.) */ PG_TRY(); { /* - * Iterate through all tablespaces of the template database, - * and copy each one to the new database. + * Iterate through all tablespaces of the template database, and copy + * each one to the new database. */ rel = heap_open(TableSpaceRelationId, AccessShareLock); scan = heap_beginscan(rel, SnapshotNow, 0, NULL); @@ -478,8 +478,8 @@ createdb(const CreatedbStmt *stmt) tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls); - HeapTupleSetOid(tuple, dboid); /* override heap_insert's OID - * selection */ + HeapTupleSetOid(tuple, dboid); /* override heap_insert's OID + * selection */ simple_heap_insert(pg_database_rel, tuple); @@ -495,30 +495,31 @@ createdb(const CreatedbStmt *stmt) /* * We force a checkpoint before committing. This effectively means * that committed XLOG_DBASE_CREATE operations will never need to be - * replayed (at least not in ordinary crash recovery; we still have - * to make the XLOG entry for the benefit of PITR operations). - * This avoids two nasty scenarios: + * replayed (at least not in ordinary crash recovery; we still have to + * make the XLOG entry for the benefit of PITR operations). This + * avoids two nasty scenarios: * * #1: When PITR is off, we don't XLOG the contents of newly created * indexes; therefore the drop-and-recreate-whole-directory behavior * of DBASE_CREATE replay would lose such indexes. * * #2: Since we have to recopy the source database during DBASE_CREATE - * replay, we run the risk of copying changes in it that were committed - * after the original CREATE DATABASE command but before the system - * crash that led to the replay. This is at least unexpected and at - * worst could lead to inconsistencies, eg duplicate table names. + * replay, we run the risk of copying changes in it that were + * committed after the original CREATE DATABASE command but before the + * system crash that led to the replay. This is at least unexpected + * and at worst could lead to inconsistencies, eg duplicate table + * names. * * (Both of these were real bugs in releases 8.0 through 8.0.3.) * - * In PITR replay, the first of these isn't an issue, and the second - * is only a risk if the CREATE DATABASE and subsequent template - * database change both occur while a base backup is being taken. - * There doesn't seem to be much we can do about that except document - * it as a limitation. + * In PITR replay, the first of these isn't an issue, and the second is + * only a risk if the CREATE DATABASE and subsequent template database + * change both occur while a base backup is being taken. There doesn't + * seem to be much we can do about that except document it as a + * limitation. * - * Perhaps if we ever implement CREATE DATABASE in a less cheesy - * way, we can avoid this. + * Perhaps if we ever implement CREATE DATABASE in a less cheesy way, we + * can avoid this. */ RequestCheckpoint(true, false); @@ -569,16 +570,16 @@ dropdb(const char *dbname) errmsg("cannot drop the currently open database"))); /* - * Obtain exclusive lock on pg_database. We need this to ensure that - * no new backend starts up in the target database while we are - * deleting it. (Actually, a new backend might still manage to start - * up, because it isn't able to lock pg_database while starting. But - * it will detect its error in ReverifyMyDatabase and shut down before - * any serious damage is done. See postinit.c.) + * Obtain exclusive lock on pg_database. We need this to ensure that no + * new backend starts up in the target database while we are deleting it. + * (Actually, a new backend might still manage to start up, because it + * isn't able to lock pg_database while starting. But it will detect its + * error in ReverifyMyDatabase and shut down before any serious damage is + * done. See postinit.c.) * - * An ExclusiveLock, rather than AccessExclusiveLock, is sufficient - * since ReverifyMyDatabase takes RowShareLock. This allows ordinary - * readers of pg_database to proceed in parallel. + * An ExclusiveLock, rather than AccessExclusiveLock, is sufficient since + * ReverifyMyDatabase takes RowShareLock. This allows ordinary readers of + * pg_database to proceed in parallel. */ pgdbrel = heap_open(DatabaseRelationId, ExclusiveLock); @@ -594,8 +595,8 @@ dropdb(const char *dbname) /* * Disallow dropping a DB that is marked istemplate. This is just to - * prevent people from accidentally dropping template0 or template1; - * they can do so if they're really determined ... + * prevent people from accidentally dropping template0 or template1; they + * can do so if they're really determined ... */ if (db_istemplate) ereport(ERROR, @@ -608,8 +609,8 @@ dropdb(const char *dbname) if (DatabaseHasActiveBackends(db_id, false)) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), - errmsg("database \"%s\" is being accessed by other users", - dbname))); + errmsg("database \"%s\" is being accessed by other users", + dbname))); /* * Find the database's tuple by OID (should be unique). @@ -626,8 +627,8 @@ dropdb(const char *dbname) if (!HeapTupleIsValid(tup)) { /* - * This error should never come up since the existence of the - * database is checked earlier + * This error should never come up since the existence of the database + * is checked earlier */ elog(ERROR, "database \"%s\" doesn't exist despite earlier reports to the contrary", dbname); @@ -641,8 +642,8 @@ dropdb(const char *dbname) /* * Delete any comments associated with the database * - * NOTE: this is probably dead code since any such comments should have - * been in that database, not mine. + * NOTE: this is probably dead code since any such comments should have been + * in that database, not mine. */ DeleteComments(db_id, DatabaseRelationId, 0); @@ -652,9 +653,9 @@ dropdb(const char *dbname) dropDatabaseDependencies(db_id); /* - * Drop pages for this database that are in the shared buffer cache. - * This is important to ensure that no remaining backend tries to - * write out a dirty buffer to the dead database later... + * Drop pages for this database that are in the shared buffer cache. This + * is important to ensure that no remaining backend tries to write out a + * dirty buffer to the dead database later... */ DropBuffers(db_id); @@ -701,8 +702,8 @@ RenameDatabase(const char *oldname, const char *newname) key2; /* - * Obtain ExclusiveLock so that no new session gets started - * while the rename is in progress. + * Obtain ExclusiveLock so that no new session gets started while the + * rename is in progress. */ rel = heap_open(DatabaseRelationId, ExclusiveLock); @@ -720,10 +721,10 @@ RenameDatabase(const char *oldname, const char *newname) errmsg("database \"%s\" does not exist", oldname))); /* - * XXX Client applications probably store the current database - * somewhere, so renaming it could cause confusion. On the other - * hand, there may not be an actual problem besides a little - * confusion, so think about this and decide. + * XXX Client applications probably store the current database somewhere, + * so renaming it could cause confusion. On the other hand, there may not + * be an actual problem besides a little confusion, so think about this + * and decide. */ if (HeapTupleGetOid(tup) == MyDatabaseId) ereport(ERROR, @@ -737,8 +738,8 @@ RenameDatabase(const char *oldname, const char *newname) if (DatabaseHasActiveBackends(HeapTupleGetOid(tup), false)) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), - errmsg("database \"%s\" is being accessed by other users", - oldname))); + errmsg("database \"%s\" is being accessed by other users", + oldname))); /* make sure the new name doesn't exist */ ScanKeyInit(&key2, @@ -822,8 +823,7 @@ AlterDatabase(AlterDatabaseStmt *stmt) connlimit = intVal(dconnlimit->arg); /* - * We don't need ExclusiveLock since we aren't updating the - * flat file. + * We don't need ExclusiveLock since we aren't updating the flat file. */ rel = heap_open(DatabaseRelationId, RowExclusiveLock); ScanKeyInit(&scankey, @@ -868,8 +868,8 @@ AlterDatabase(AlterDatabaseStmt *stmt) heap_close(rel, NoLock); /* - * We don't bother updating the flat file since the existing options - * for ALTER DATABASE don't affect it. + * We don't bother updating the flat file since the existing options for + * ALTER DATABASE don't affect it. */ } @@ -893,8 +893,7 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt) valuestr = flatten_set_variable_args(stmt->variable, stmt->value); /* - * We don't need ExclusiveLock since we aren't updating the - * flat file. + * We don't need ExclusiveLock since we aren't updating the flat file. */ rel = heap_open(DatabaseRelationId, RowExclusiveLock); ScanKeyInit(&scankey, @@ -958,8 +957,8 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt) heap_close(rel, NoLock); /* - * We don't bother updating the flat file since ALTER DATABASE SET - * doesn't affect it. + * We don't bother updating the flat file since ALTER DATABASE SET doesn't + * affect it. */ } @@ -977,8 +976,7 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId) Form_pg_database datForm; /* - * We don't need ExclusiveLock since we aren't updating the - * flat file. + * We don't need ExclusiveLock since we aren't updating the flat file. */ rel = heap_open(DatabaseRelationId, RowExclusiveLock); ScanKeyInit(&scankey, @@ -1011,7 +1009,7 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId) HeapTuple newtuple; /* Otherwise, must be owner of the existing object */ - if (!pg_database_ownercheck(HeapTupleGetOid(tuple),GetUserId())) + if (!pg_database_ownercheck(HeapTupleGetOid(tuple), GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, dbname); @@ -1019,18 +1017,18 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId) check_is_member_of_role(GetUserId(), newOwnerId); /* - * must have createdb rights + * must have createdb rights * - * NOTE: This is different from other alter-owner checks in - * that the current user is checked for createdb privileges - * instead of the destination owner. This is consistent - * with the CREATE case for databases. Because superusers - * will always have this right, we need no special case for them. + * NOTE: This is different from other alter-owner checks in that the + * current user is checked for createdb privileges instead of the + * destination owner. This is consistent with the CREATE case for + * databases. Because superusers will always have this right, we need + * no special case for them. */ if (!have_createdb_privilege()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied to change owner of database"))); + errmsg("permission denied to change owner of database"))); memset(repl_null, ' ', sizeof(repl_null)); memset(repl_repl, ' ', sizeof(repl_repl)); @@ -1332,10 +1330,9 @@ dbase_redo(XLogRecPtr lsn, XLogRecord *record) dst_path = GetDatabasePath(xlrec->db_id, xlrec->tablespace_id); /* - * Our theory for replaying a CREATE is to forcibly drop the - * target subdirectory if present, then re-copy the source data. - * This may be more work than needed, but it is simple to - * implement. + * Our theory for replaying a CREATE is to forcibly drop the target + * subdirectory if present, then re-copy the source data. This may be + * more work than needed, but it is simple to implement. */ if (stat(dst_path, &st) == 0 && S_ISDIR(st.st_mode)) { @@ -1367,8 +1364,7 @@ dbase_redo(XLogRecPtr lsn, XLogRecord *record) dst_path = GetDatabasePath(xlrec->db_id, xlrec->tablespace_id); /* - * Drop pages for this database that are in the shared buffer - * cache + * Drop pages for this database that are in the shared buffer cache */ DropBuffers(xlrec->db_id); diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c index 68b1360bca0..da5a112bf10 100644 --- a/src/backend/commands/define.c +++ b/src/backend/commands/define.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.92 2004/12/31 21:59:41 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.93 2005/10/15 02:49:15 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -157,11 +157,11 @@ defGetInt64(DefElem *def) /* * Values too large for int4 will be represented as Float - * constants by the lexer. Accept these if they are valid - * int8 strings. + * constants by the lexer. Accept these if they are valid int8 + * strings. */ return DatumGetInt64(DirectFunctionCall1(int8in, - CStringGetDatum(strVal(def->arg)))); + CStringGetDatum(strVal(def->arg)))); default: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 0a330a2137e..d470990e942 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994-5, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.137 2005/06/04 02:07:09 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.138 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -75,12 +75,12 @@ ExplainQuery(ExplainStmt *stmt, DestReceiver *dest) ListCell *l; /* - * Because the planner is not cool about not scribbling on its input, - * we make a preliminary copy of the source querytree. This prevents + * Because the planner is not cool about not scribbling on its input, we + * make a preliminary copy of the source querytree. This prevents * problems in the case that the EXPLAIN is in a portal or plpgsql * function and is executed repeatedly. (See also the same hack in - * DECLARE CURSOR and PREPARE.) XXX the planner really shouldn't - * modify its input ... FIXME someday. + * DECLARE CURSOR and PREPARE.) XXX the planner really shouldn't modify + * its input ... FIXME someday. */ query = copyObject(query); @@ -219,7 +219,7 @@ void ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt, TupOutputState *tstate) { - instr_time starttime; + instr_time starttime; double totaltime = 0; ExplainState *es; StringInfo str; @@ -264,7 +264,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt, pfree(s); do_text_output_multiline(tstate, f); pfree(f); - do_text_output_oneline(tstate, ""); /* separator line */ + do_text_output_oneline(tstate, ""); /* separator line */ } } @@ -289,21 +289,21 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt, if (es->printAnalyze) { ResultRelInfo *rInfo; - int numrels = queryDesc->estate->es_num_result_relations; - int nr; + int numrels = queryDesc->estate->es_num_result_relations; + int nr; rInfo = queryDesc->estate->es_result_relations; for (nr = 0; nr < numrels; rInfo++, nr++) { - int nt; + int nt; if (!rInfo->ri_TrigDesc || !rInfo->ri_TrigInstrument) continue; for (nt = 0; nt < rInfo->ri_TrigDesc->numtriggers; nt++) { - Trigger *trig = rInfo->ri_TrigDesc->triggers + nt; + Trigger *trig = rInfo->ri_TrigDesc->triggers + nt; Instrumentation *instr = rInfo->ri_TrigInstrument + nt; - char *conname; + char *conname; /* Must clean up instrumentation state */ InstrEndLoop(instr); @@ -316,7 +316,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt, continue; if (trig->tgisconstraint && - (conname = GetConstraintNameForTrigger(trig->tgoid)) != NULL) + (conname = GetConstraintNameForTrigger(trig->tgoid)) != NULL) { appendStringInfo(str, "Trigger for constraint %s", conname); @@ -327,7 +327,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt, if (numrels > 1) appendStringInfo(str, " on %s", - RelationGetRelationName(rInfo->ri_RelationDesc)); + RelationGetRelationName(rInfo->ri_RelationDesc)); appendStringInfo(str, ": time=%.3f calls=%.0f\n", 1000.0 * instr->total, @@ -337,8 +337,8 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt, } /* - * Close down the query and free resources. Include time for this - * in the total runtime (although it should be pretty minimal). + * Close down the query and free resources. Include time for this in the + * total runtime (although it should be pretty minimal). */ INSTR_TIME_SET_CURRENT(starttime); @@ -366,7 +366,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt, static double elapsed_time(instr_time *starttime) { - instr_time endtime; + instr_time endtime; INSTR_TIME_SET_CURRENT(endtime); @@ -378,7 +378,7 @@ elapsed_time(instr_time *starttime) endtime.tv_usec += 1000000; endtime.tv_sec--; } -#else /* WIN32 */ +#else /* WIN32 */ endtime.QuadPart -= starttime->QuadPart; #endif @@ -583,7 +583,7 @@ explain_outNode(StringInfo str, if (ScanDirectionIsBackward(((IndexScan *) plan)->indexorderdir)) appendStringInfoString(str, " Backward"); appendStringInfo(str, " using %s", - quote_identifier(get_rel_name(((IndexScan *) plan)->indexid))); + quote_identifier(get_rel_name(((IndexScan *) plan)->indexid))); /* FALL THRU */ case T_SeqScan: case T_BitmapHeapScan: @@ -604,7 +604,7 @@ explain_outNode(StringInfo str, quote_identifier(relname)); if (strcmp(rte->eref->aliasname, relname) != 0) appendStringInfo(str, " %s", - quote_identifier(rte->eref->aliasname)); + quote_identifier(rte->eref->aliasname)); } break; case T_BitmapIndexScan: @@ -632,10 +632,10 @@ explain_outNode(StringInfo str, Assert(rte->rtekind == RTE_FUNCTION); /* - * If the expression is still a function call, we can get - * the real name of the function. Otherwise, punt (this - * can happen if the optimizer simplified away the - * function call, for example). + * If the expression is still a function call, we can get the + * real name of the function. Otherwise, punt (this can + * happen if the optimizer simplified away the function call, + * for example). */ if (rte->funcexpr && IsA(rte->funcexpr, FuncExpr)) { @@ -652,20 +652,20 @@ explain_outNode(StringInfo str, quote_identifier(proname)); if (strcmp(rte->eref->aliasname, proname) != 0) appendStringInfo(str, " %s", - quote_identifier(rte->eref->aliasname)); + quote_identifier(rte->eref->aliasname)); } break; default: break; } - + appendStringInfo(str, " (cost=%.2f..%.2f rows=%.0f width=%d)", plan->startup_cost, plan->total_cost, plan->plan_rows, plan->plan_width); /* - * We have to forcibly clean up the instrumentation state because - * we haven't done ExecutorEnd yet. This is pretty grotty ... + * We have to forcibly clean up the instrumentation state because we + * haven't done ExecutorEnd yet. This is pretty grotty ... */ if (planstate->instrument) InstrEndLoop(planstate->instrument); @@ -675,8 +675,8 @@ explain_outNode(StringInfo str, double nloops = planstate->instrument->nloops; appendStringInfo(str, " (actual time=%.3f..%.3f rows=%.0f loops=%.0f)", - 1000.0 * planstate->instrument->startup / nloops, - 1000.0 * planstate->instrument->total / nloops, + 1000.0 * planstate->instrument->startup / nloops, + 1000.0 * planstate->instrument->total / nloops, planstate->instrument->ntuples / nloops, planstate->instrument->nloops); } @@ -833,9 +833,10 @@ explain_outNode(StringInfo str, for (i = 0; i < indent; i++) appendStringInfo(str, " "); appendStringInfo(str, " -> "); + /* - * Ordinarily we don't pass down our own outer_plan value to our - * child nodes, but in bitmap scan trees we must, since the bottom + * Ordinarily we don't pass down our own outer_plan value to our child + * nodes, but in bitmap scan trees we must, since the bottom * BitmapIndexScan nodes may have outer references. */ explain_outNode(str, outerPlan(plan), @@ -882,7 +883,7 @@ explain_outNode(StringInfo str, if (IsA(plan, BitmapAnd)) { - BitmapAnd *bitmapandplan = (BitmapAnd *) plan; + BitmapAnd *bitmapandplan = (BitmapAnd *) plan; BitmapAndState *bitmapandstate = (BitmapAndState *) planstate; ListCell *lst; int j; @@ -898,7 +899,7 @@ explain_outNode(StringInfo str, explain_outNode(str, subnode, bitmapandstate->bitmapplans[j], - outer_plan, /* pass down same outer plan */ + outer_plan, /* pass down same outer plan */ indent + 3, es); j++; } @@ -906,7 +907,7 @@ explain_outNode(StringInfo str, if (IsA(plan, BitmapOr)) { - BitmapOr *bitmaporplan = (BitmapOr *) plan; + BitmapOr *bitmaporplan = (BitmapOr *) plan; BitmapOrState *bitmaporstate = (BitmapOrState *) planstate; ListCell *lst; int j; @@ -922,7 +923,7 @@ explain_outNode(StringInfo str, explain_outNode(str, subnode, bitmaporstate->bitmapplans[j], - outer_plan, /* pass down same outer plan */ + outer_plan, /* pass down same outer plan */ indent + 3, es); j++; } @@ -1008,9 +1009,9 @@ show_scan_qual(List *qual, const char *qlabel, scancontext = deparse_context_for_rte(rte); /* - * If we have an outer plan that is referenced by the qual, add it to - * the deparse context. If not, don't (so that we don't force - * prefixes unnecessarily). + * If we have an outer plan that is referenced by the qual, add it to the + * deparse context. If not, don't (so that we don't force prefixes + * unnecessarily). */ if (outer_plan) { @@ -1018,7 +1019,7 @@ show_scan_qual(List *qual, const char *qlabel, if (bms_is_member(OUTER, varnos)) outercontext = deparse_context_for_subplan("outer", - outer_plan->targetlist, + outer_plan->targetlist, es->rtable); else outercontext = NULL; @@ -1111,11 +1112,10 @@ show_sort_keys(List *tlist, int nkeys, AttrNumber *keycols, /* * In this routine we expect that the plan node's tlist has not been - * processed by set_plan_references(). Normally, any Vars will - * contain valid varnos referencing the actual rtable. But we might - * instead be looking at a dummy tlist generated by prepunion.c; if - * there are Vars with zero varno, use the tlist itself to determine - * their names. + * processed by set_plan_references(). Normally, any Vars will contain + * valid varnos referencing the actual rtable. But we might instead be + * looking at a dummy tlist generated by prepunion.c; if there are Vars + * with zero varno, use the tlist itself to determine their names. */ varnos = pull_varnos((Node *) tlist); if (bms_is_member(0, varnos)) diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index a2a8f56e23c..f4d6164775e 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -10,7 +10,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.68 2005/09/24 22:54:36 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.69 2005/10/15 02:49:15 momjian Exp $ * * DESCRIPTION * These routines take the parse tree and pick out the @@ -83,8 +83,8 @@ compute_return_type(TypeName *returnType, Oid languageOid, if (languageOid == SQLlanguageId) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("SQL function cannot return shell type %s", - TypeNameToString(returnType)))); + errmsg("SQL function cannot return shell type %s", + TypeNameToString(returnType)))); else ereport(NOTICE, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -158,7 +158,7 @@ examine_parameter_list(List *parameters, Oid languageOid, ListCell *x; int i; - *requiredResultType = InvalidOid; /* default result */ + *requiredResultType = InvalidOid; /* default result */ inTypes = (Oid *) palloc(parameterCount * sizeof(Oid)); allTypes = (Datum *) palloc(parameterCount * sizeof(Datum)); @@ -182,8 +182,8 @@ examine_parameter_list(List *parameters, Oid languageOid, if (languageOid == SQLlanguageId) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("SQL function cannot accept shell type %s", - TypeNameToString(t)))); + errmsg("SQL function cannot accept shell type %s", + TypeNameToString(t)))); else ereport(NOTICE, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -307,13 +307,13 @@ duplicate_error: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"))); - return false; /* keep compiler quiet */ + return false; /* keep compiler quiet */ } static char interpret_func_volatility(DefElem *defel) { - char *str = strVal(defel->arg); + char *str = strVal(defel->arg); if (strcmp(str, "immutable") == 0) return PROVOLATILE_IMMUTABLE; @@ -324,7 +324,7 @@ interpret_func_volatility(DefElem *defel) else { elog(ERROR, "invalid volatility \"%s\"", str); - return 0; /* keep compiler quiet */ + return 0; /* keep compiler quiet */ } } @@ -445,8 +445,8 @@ compute_attributes_with_style(List *parameters, bool *isStrict_p, char *volatili else ereport(WARNING, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("unrecognized function attribute \"%s\" ignored", - param->defname))); + errmsg("unrecognized function attribute \"%s\" ignored", + param->defname))); } } @@ -469,8 +469,8 @@ interpret_AS_clause(Oid languageOid, const char *languageName, List *as, if (languageOid == ClanguageId) { /* - * For "C" language, store the file name in probin and, when - * given, the link symbol name in prosrc. + * For "C" language, store the file name in probin and, when given, + * the link symbol name in prosrc. */ *probin_str_p = strVal(linitial(as)); if (list_length(as) == 1) @@ -541,7 +541,7 @@ CreateFunction(CreateFunctionStmt *stmt) /* override attributes from explicit list */ compute_attributes_sql_style(stmt->options, - &as_clause, &language, &volatility, &isStrict, &security); + &as_clause, &language, &volatility, &isStrict, &security); /* Convert language name to canonical case */ languageName = case_translate_language_name(language); @@ -630,10 +630,10 @@ CreateFunction(CreateFunctionStmt *stmt) /* * In PostgreSQL versions before 6.5, the SQL name of the created * function could not be different from the internal name, and - * "prosrc" wasn't used. So there is code out there that does - * CREATE FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some - * modicum of backwards compatibility, accept an empty "prosrc" - * value as meaning the supplied SQL function name. + * "prosrc" wasn't used. So there is code out there that does CREATE + * FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some modicum of + * backwards compatibility, accept an empty "prosrc" value as meaning + * the supplied SQL function name. */ if (strlen(prosrc_str) == 0) prosrc_str = funcname; @@ -647,8 +647,8 @@ CreateFunction(CreateFunctionStmt *stmt) } /* - * And now that we have all the parameters, and know we're permitted - * to do so, go ahead and create the function. + * And now that we have all the parameters, and know we're permitted to do + * so, go ahead and create the function. */ ProcedureCreate(funcname, namespaceId, @@ -696,8 +696,8 @@ RemoveFunction(RemoveFuncStmt *stmt) /* Permission check: must own func or its namespace */ if (!pg_proc_ownercheck(funcOid, GetUserId()) && - !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace, - GetUserId())) + !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace, + GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, NameListToString(functionName)); @@ -706,7 +706,7 @@ RemoveFunction(RemoveFuncStmt *stmt) (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is an aggregate function", NameListToString(functionName)), - errhint("Use DROP AGGREGATE to drop aggregate functions."))); + errhint("Use DROP AGGREGATE to drop aggregate functions."))); if (((Form_pg_proc) GETSTRUCT(tup))->prolang == INTERNALlanguageId) { @@ -812,7 +812,7 @@ RenameFunction(List *name, List *argtypes, const char *newname) (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is an aggregate function", NameListToString(name)), - errhint("Use ALTER AGGREGATE to rename aggregate functions."))); + errhint("Use ALTER AGGREGATE to rename aggregate functions."))); namespaceOid = procForm->pronamespace; @@ -828,7 +828,7 @@ RenameFunction(List *name, List *argtypes, const char *newname) errmsg("function %s already exists in schema \"%s\"", funcname_signature_string(newname, procForm->pronargs, - procForm->proargtypes.values), + procForm->proargtypes.values), get_namespace_name(namespaceOid)))); } @@ -900,7 +900,7 @@ AlterFunctionOwner(List *name, List *argtypes, Oid newOwnerId) if (!superuser()) { /* Otherwise, must be owner of the existing object */ - if (!pg_proc_ownercheck(procOid,GetUserId())) + if (!pg_proc_ownercheck(procOid, GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, NameListToString(name)); @@ -960,14 +960,14 @@ AlterFunctionOwner(List *name, List *argtypes, Oid newOwnerId) void AlterFunction(AlterFunctionStmt *stmt) { - HeapTuple tup; - Oid funcOid; + HeapTuple tup; + Oid funcOid; Form_pg_proc procForm; - Relation rel; - ListCell *l; - DefElem *volatility_item = NULL; - DefElem *strict_item = NULL; - DefElem *security_def_item = NULL; + Relation rel; + ListCell *l; + DefElem *volatility_item = NULL; + DefElem *strict_item = NULL; + DefElem *security_def_item = NULL; rel = heap_open(ProcedureRelationId, RowExclusiveLock); @@ -995,9 +995,9 @@ AlterFunction(AlterFunctionStmt *stmt) NameListToString(stmt->func->funcname)))); /* Examine requested actions. */ - foreach (l, stmt->actions) + foreach(l, stmt->actions) { - DefElem *defel = (DefElem *) lfirst(l); + DefElem *defel = (DefElem *) lfirst(l); if (compute_common_attribute(defel, &volatility_item, @@ -1182,27 +1182,27 @@ CreateCast(CreateCastStmt *stmt) if (nargs < 1 || nargs > 3) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("cast function must take one to three arguments"))); + errmsg("cast function must take one to three arguments"))); if (procstruct->proargtypes.values[0] != sourcetypeid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("argument of cast function must match source data type"))); + errmsg("argument of cast function must match source data type"))); if (nargs > 1 && procstruct->proargtypes.values[1] != INT4OID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("second argument of cast function must be type integer"))); + errmsg("second argument of cast function must be type integer"))); if (nargs > 2 && procstruct->proargtypes.values[2] != BOOLOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("third argument of cast function must be type boolean"))); + errmsg("third argument of cast function must be type boolean"))); if (procstruct->prorettype != targettypeid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("return data type of cast function must match target data type"))); /* - * Restricting the volatility of a cast function may or may not be - * a good idea in the abstract, but it definitely breaks many old + * Restricting the volatility of a cast function may or may not be a + * good idea in the abstract, but it definitely breaks many old * user-defined types. Disable this check --- tgl 2/1/03 */ #ifdef NOT_USED @@ -1214,7 +1214,7 @@ CreateCast(CreateCastStmt *stmt) if (procstruct->proisagg) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("cast function must not be an aggregate function"))); + errmsg("cast function must not be an aggregate function"))); if (procstruct->proretset) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), @@ -1242,13 +1242,13 @@ CreateCast(CreateCastStmt *stmt) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to create a cast WITHOUT FUNCTION"))); + errmsg("must be superuser to create a cast WITHOUT FUNCTION"))); /* * Also, insist that the types match as to size, alignment, and - * pass-by-value attributes; this provides at least a crude check - * that they have similar representations. A pair of types that - * fail this test should certainly not be equated. + * pass-by-value attributes; this provides at least a crude check that + * they have similar representations. A pair of types that fail this + * test should certainly not be equated. */ get_typlenbyvalalign(sourcetypeid, &typ1len, &typ1byval, &typ1align); get_typlenbyvalalign(targettypeid, &typ2len, &typ2byval, &typ2align); @@ -1267,7 +1267,7 @@ CreateCast(CreateCastStmt *stmt) if (sourcetypeid == targettypeid && nargs < 2) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("source data type and target data type are the same"))); + errmsg("source data type and target data type are the same"))); /* convert CoercionContext enum to char value for castcontext */ switch (stmt->context) @@ -1290,9 +1290,9 @@ CreateCast(CreateCastStmt *stmt) relation = heap_open(CastRelationId, RowExclusiveLock); /* - * Check for duplicate. This is just to give a friendly error - * message, the unique index would catch it anyway (so no need to - * sweat about race conditions). + * Check for duplicate. This is just to give a friendly error message, + * the unique index would catch it anyway (so no need to sweat about race + * conditions). */ tuple = SearchSysCache(CASTSOURCETARGET, ObjectIdGetDatum(sourcetypeid), @@ -1442,12 +1442,12 @@ DropCastById(Oid castOid) void AlterFunctionNamespace(List *name, List *argtypes, const char *newschema) { - Oid procOid; - Oid oldNspOid; - Oid nspOid; - HeapTuple tup; - Relation procRel; - Form_pg_proc proc; + Oid procOid; + Oid oldNspOid; + Oid nspOid; + HeapTuple tup; + Relation procRel; + Form_pg_proc proc; procRel = heap_open(ProcedureRelationId, RowExclusiveLock); @@ -1482,7 +1482,7 @@ AlterFunctionNamespace(List *name, List *argtypes, const char *newschema) if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot move objects into or out of temporary schemas"))); + errmsg("cannot move objects into or out of temporary schemas"))); /* same for TOAST schema */ if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE) diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 6bfa8a04e24..07654e455ab 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.133 2005/06/22 21:14:29 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.134 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -145,10 +145,9 @@ DefineIndex(RangeVar *heapRelation, /* * Verify we (still) have CREATE rights in the rel's namespace. - * (Presumably we did when the rel was created, but maybe not - * anymore.) Skip check if caller doesn't want it. Also skip check - * if bootstrapping, since permissions machinery may not be working - * yet. + * (Presumably we did when the rel was created, but maybe not anymore.) + * Skip check if caller doesn't want it. Also skip check if + * bootstrapping, since permissions machinery may not be working yet. */ if (check_rights && !IsBootstrapProcessingMode()) { @@ -193,8 +192,8 @@ DefineIndex(RangeVar *heapRelation, } /* - * Force shared indexes into the pg_global tablespace. This is a bit of - * a hack but seems simpler than marking them in the BKI commands. + * Force shared indexes into the pg_global tablespace. This is a bit of a + * hack but seems simpler than marking them in the BKI commands. */ if (rel->rd_rel->relisshared) tablespaceId = GLOBALTABLESPACE_OID; @@ -221,8 +220,7 @@ DefineIndex(RangeVar *heapRelation, } /* - * look up the access method, verify it can handle the requested - * features + * look up the access method, verify it can handle the requested features */ tuple = SearchSysCache(AMNAME, PointerGetDatum(accessMethodName), @@ -238,13 +236,13 @@ DefineIndex(RangeVar *heapRelation, if (unique && !accessMethodForm->amcanunique) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("access method \"%s\" does not support unique indexes", - accessMethodName))); + errmsg("access method \"%s\" does not support unique indexes", + accessMethodName))); if (numberOfAttributes > 1 && !accessMethodForm->amcanmulticol) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("access method \"%s\" does not support multicolumn indexes", - accessMethodName))); + errmsg("access method \"%s\" does not support multicolumn indexes", + accessMethodName))); ReleaseSysCache(tuple); @@ -275,23 +273,23 @@ DefineIndex(RangeVar *heapRelation, ListCell *keys; /* - * If ALTER TABLE, check that there isn't already a PRIMARY KEY. - * In CREATE TABLE, we have faith that the parser rejected - * multiple pkey clauses; and CREATE INDEX doesn't have a way to - * say PRIMARY KEY, so it's no problem either. + * If ALTER TABLE, check that there isn't already a PRIMARY KEY. In + * CREATE TABLE, we have faith that the parser rejected multiple pkey + * clauses; and CREATE INDEX doesn't have a way to say PRIMARY KEY, so + * it's no problem either. */ if (is_alter_table && relationHasPrimaryKey(rel)) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("multiple primary keys for table \"%s\" are not allowed", - RelationGetRelationName(rel)))); + errmsg("multiple primary keys for table \"%s\" are not allowed", + RelationGetRelationName(rel)))); } /* - * Check that all of the attributes in a primary key are marked as - * not null, otherwise attempt to ALTER TABLE .. SET NOT NULL + * Check that all of the attributes in a primary key are marked as not + * null, otherwise attempt to ALTER TABLE .. SET NOT NULL */ cmds = NIL; foreach(keys, attributeList) @@ -326,35 +324,35 @@ DefineIndex(RangeVar *heapRelation, else { /* - * This shouldn't happen during CREATE TABLE, but can - * happen during ALTER TABLE. Keep message in sync with + * This shouldn't happen during CREATE TABLE, but can happen + * during ALTER TABLE. Keep message in sync with * transformIndexConstraints() in parser/analyze.c. */ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" named in key does not exist", - key->name))); + errmsg("column \"%s\" named in key does not exist", + key->name))); } } /* * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child - * tables? Currently, since the PRIMARY KEY itself doesn't - * cascade, we don't cascade the notnull constraint(s) either; but - * this is pretty debatable. + * tables? Currently, since the PRIMARY KEY itself doesn't cascade, + * we don't cascade the notnull constraint(s) either; but this is + * pretty debatable. * - * XXX: possible future improvement: when being called from ALTER - * TABLE, it would be more efficient to merge this with the outer - * ALTER TABLE, so as to avoid two scans. But that seems to - * complicate DefineIndex's API unduly. + * XXX: possible future improvement: when being called from ALTER TABLE, + * it would be more efficient to merge this with the outer ALTER + * TABLE, so as to avoid two scans. But that seems to complicate + * DefineIndex's API unduly. */ if (cmds) AlterTableInternal(relationId, cmds, false); } /* - * Prepare arguments for index_create, primarily an IndexInfo - * structure. Note that ii_Predicate must be in implicit-AND format. + * Prepare arguments for index_create, primarily an IndexInfo structure. + * Note that ii_Predicate must be in implicit-AND format. */ indexInfo = makeNode(IndexInfo); indexInfo->ii_NumIndexAttrs = numberOfAttributes; @@ -372,15 +370,15 @@ DefineIndex(RangeVar *heapRelation, heap_close(rel, NoLock); /* - * Report index creation if appropriate (delay this till after most of - * the error checks) + * Report index creation if appropriate (delay this till after most of the + * error checks) */ if (isconstraint && !quiet) ereport(NOTICE, - (errmsg("%s %s will create implicit index \"%s\" for table \"%s\"", - is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /", - primary ? "PRIMARY KEY" : "UNIQUE", - indexRelationName, RelationGetRelationName(rel)))); + (errmsg("%s %s will create implicit index \"%s\" for table \"%s\"", + is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /", + primary ? "PRIMARY KEY" : "UNIQUE", + indexRelationName, RelationGetRelationName(rel)))); index_create(relationId, indexRelationName, indexRelationId, indexInfo, accessMethodId, tablespaceId, classObjectId, @@ -391,8 +389,8 @@ DefineIndex(RangeVar *heapRelation, * We update the relation's pg_class tuple even if it already has * relhasindex = true. This is needed to cause a shared-cache-inval * message to be sent for the pg_class tuple, which will cause other - * backends to flush their relcache entries and in particular their - * cached lists of the indexes for this relation. + * backends to flush their relcache entries and in particular their cached + * lists of the indexes for this relation. */ setRelhasindex(relationId, true, primary, InvalidOid); } @@ -414,8 +412,7 @@ CheckPredicate(Expr *predicate) { /* * We don't currently support generation of an actual query plan for a - * predicate, only simple scalar expressions; hence these - * restrictions. + * predicate, only simple scalar expressions; hence these restrictions. */ if (contain_subplans((Node *) predicate)) ereport(ERROR, @@ -433,7 +430,7 @@ CheckPredicate(Expr *predicate) if (contain_mutable_functions((Node *) predicate)) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("functions in index predicate must be marked IMMUTABLE"))); + errmsg("functions in index predicate must be marked IMMUTABLE"))); } static void @@ -470,8 +467,8 @@ ComputeIndexAttrs(IndexInfo *indexInfo, if (isconstraint) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" named in key does not exist", - attribute->name))); + errmsg("column \"%s\" named in key does not exist", + attribute->name))); else ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), @@ -501,24 +498,23 @@ ComputeIndexAttrs(IndexInfo *indexInfo, atttype = exprType(attribute->expr); /* - * We don't currently support generation of an actual query - * plan for an index expression, only simple scalar - * expressions; hence these restrictions. + * We don't currently support generation of an actual query plan + * for an index expression, only simple scalar expressions; hence + * these restrictions. */ if (contain_subplans(attribute->expr)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use subquery in index expression"))); + errmsg("cannot use subquery in index expression"))); if (contain_agg_clause(attribute->expr)) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), - errmsg("cannot use aggregate function in index expression"))); + errmsg("cannot use aggregate function in index expression"))); /* - * A expression using mutable functions is probably wrong, - * since if you aren't going to get the same result for the - * same data every time, it's not clear what the index entries - * mean at all. + * A expression using mutable functions is probably wrong, since + * if you aren't going to get the same result for the same data + * every time, it's not clear what the index entries mean at all. */ if (contain_mutable_functions(attribute->expr)) ereport(ERROR, @@ -548,16 +544,16 @@ GetIndexOpClass(List *opclass, Oid attrType, opInputType; /* - * Release 7.0 removed network_ops, timespan_ops, and datetime_ops, so - * we ignore those opclass names so the default *_ops is used. This - * can be removed in some later release. bjm 2000/02/07 + * Release 7.0 removed network_ops, timespan_ops, and datetime_ops, so we + * ignore those opclass names so the default *_ops is used. This can be + * removed in some later release. bjm 2000/02/07 * * Release 7.1 removes lztext_ops, so suppress that too for a while. tgl * 2000/07/30 * - * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that - * too for awhile. I'm starting to think we need a better approach. - * tgl 2000/10/01 + * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that too + * for awhile. I'm starting to think we need a better approach. tgl + * 2000/10/01 * * Release 8.0 removes bigbox_ops (which was dead code for a long while * anyway). tgl 2003/11/11 @@ -628,8 +624,8 @@ GetIndexOpClass(List *opclass, Oid attrType, NameListToString(opclass), accessMethodName))); /* - * Verify that the index operator class accepts this datatype. Note - * we will accept binary compatibility. + * Verify that the index operator class accepts this datatype. Note we + * will accept binary compatibility. */ opClassId = HeapTupleGetOid(tuple); opInputType = ((Form_pg_opclass) GETSTRUCT(tuple))->opcintype; @@ -637,8 +633,8 @@ GetIndexOpClass(List *opclass, Oid attrType, if (!IsBinaryCoercible(attrType, opInputType)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("operator class \"%s\" does not accept data type %s", - NameListToString(opclass), format_type_be(attrType)))); + errmsg("operator class \"%s\" does not accept data type %s", + NameListToString(opclass), format_type_be(attrType)))); ReleaseSysCache(tuple); @@ -663,8 +659,8 @@ GetDefaultOpClass(Oid attrType, Oid accessMethodId) * (either exactly or binary-compatibly, but prefer an exact match). * * We could find more than one binary-compatible match, in which case we - * require the user to specify which one he wants. If we find more - * than one exact match, then someone put bogus entries in pg_opclass. + * require the user to specify which one he wants. If we find more than + * one exact match, then someone put bogus entries in pg_opclass. * * The initial search is done by namespace.c so that we only consider * opclasses visible in the current namespace search path. (See also @@ -694,8 +690,8 @@ GetDefaultOpClass(Oid attrType, Oid accessMethodId) if (nexact != 0) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("there are multiple default operator classes for data type %s", - format_type_be(attrType)))); + errmsg("there are multiple default operator classes for data type %s", + format_type_be(attrType)))); if (ncompatible == 1) return compatibleOid; @@ -749,8 +745,8 @@ makeObjectName(const char *name1, const char *name2, const char *label) /* * If we must truncate, preferentially truncate the longer name. This - * logic could be expressed without a loop, but it's simple and - * obvious as a loop. + * logic could be expressed without a loop, but it's simple and obvious as + * a loop. */ while (name1chars + name2chars > availchars) { @@ -842,9 +838,9 @@ relationHasPrimaryKey(Relation rel) ListCell *indexoidscan; /* - * Get the list of index OIDs for the table from the relcache, and - * look up each one in the pg_index syscache until we find one marked - * primary key (hopefully there isn't more than one such). + * Get the list of index OIDs for the table from the relcache, and look up + * each one in the pg_index syscache until we find one marked primary key + * (hopefully there isn't more than one such). */ indexoidlist = RelationGetIndexList(rel); @@ -1004,16 +1000,16 @@ ReindexDatabase(const char *databaseName, bool do_system, bool do_user) /* * We cannot run inside a user transaction block; if we were inside a - * transaction, then our commit- and start-transaction-command calls - * would not have the intended effect! + * transaction, then our commit- and start-transaction-command calls would + * not have the intended effect! */ PreventTransactionChain((void *) databaseName, "REINDEX DATABASE"); /* - * Create a memory context that will survive forced transaction - * commits we do below. Since it is a child of PortalContext, it will - * go away eventually even if we suffer an error; there's no need for - * special abort cleanup logic. + * Create a memory context that will survive forced transaction commits we + * do below. Since it is a child of PortalContext, it will go away + * eventually even if we suffer an error; there's no need for special + * abort cleanup logic. */ private_context = AllocSetContextCreate(PortalContext, "ReindexDatabase", @@ -1022,10 +1018,10 @@ ReindexDatabase(const char *databaseName, bool do_system, bool do_user) ALLOCSET_DEFAULT_MAXSIZE); /* - * We always want to reindex pg_class first. This ensures that if - * there is any corruption in pg_class' indexes, they will be fixed - * before we process any other tables. This is critical because - * reindexing itself will try to update pg_class. + * We always want to reindex pg_class first. This ensures that if there + * is any corruption in pg_class' indexes, they will be fixed before we + * process any other tables. This is critical because reindexing itself + * will try to update pg_class. */ if (do_system) { diff --git a/src/backend/commands/lockcmds.c b/src/backend/commands/lockcmds.c index b4a87a4d39b..72a61ad9c27 100644 --- a/src/backend/commands/lockcmds.c +++ b/src/backend/commands/lockcmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/lockcmds.c,v 1.12 2004/12/31 21:59:41 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/commands/lockcmds.c,v 1.13 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -31,8 +31,8 @@ LockTableCommand(LockStmt *lockstmt) ListCell *p; /* - * Iterate over the list and open, lock, and close the relations one - * at a time + * Iterate over the list and open, lock, and close the relations one at a + * time */ foreach(p, lockstmt->relations) @@ -43,8 +43,8 @@ LockTableCommand(LockStmt *lockstmt) Relation rel; /* - * We don't want to open the relation until we've checked - * privilege. So, manually get the relation OID. + * We don't want to open the relation until we've checked privilege. + * So, manually get the relation OID. */ reloid = RangeVarGetRelid(relation, false); diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c index 186fac96edb..ea8afcfccbf 100644 --- a/src/backend/commands/opclasscmds.c +++ b/src/backend/commands/opclasscmds.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.37 2005/08/23 01:41:30 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.38 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -117,16 +117,16 @@ DefineOpClass(CreateOpClassStmt *stmt) ReleaseSysCache(tup); /* - * Currently, we require superuser privileges to create an opclass. - * This seems necessary because we have no way to validate that the - * offered set of operators and functions are consistent with the AM's - * expectations. It would be nice to provide such a check someday, if - * it can be done without solving the halting problem :-( + * Currently, we require superuser privileges to create an opclass. This + * seems necessary because we have no way to validate that the offered set + * of operators and functions are consistent with the AM's expectations. + * It would be nice to provide such a check someday, if it can be done + * without solving the halting problem :-( */ if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to create an operator class"))); + errmsg("must be superuser to create an operator class"))); /* Look up the datatype */ typeoid = typenameTypeId(stmt->datatype); @@ -223,7 +223,7 @@ DefineOpClass(CreateOpClassStmt *stmt) if (OidIsValid(storageoid)) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("storage type specified more than once"))); + errmsg("storage type specified more than once"))); storageoid = typenameTypeId(item->storedtype); break; default: @@ -244,8 +244,8 @@ DefineOpClass(CreateOpClassStmt *stmt) { /* * Currently, only GiST allows storagetype different from - * datatype. This hardcoded test should be eliminated in - * favor of adding another boolean column to pg_am ... + * datatype. This hardcoded test should be eliminated in favor of + * adding another boolean column to pg_am ... */ if (amoid != GIST_AM_OID) ereport(ERROR, @@ -258,8 +258,8 @@ DefineOpClass(CreateOpClassStmt *stmt) rel = heap_open(OperatorClassRelationId, RowExclusiveLock); /* - * Make sure there is no existing opclass of this name (this is just - * to give a more friendly error message than "duplicate key"). + * Make sure there is no existing opclass of this name (this is just to + * give a more friendly error message than "duplicate key"). */ if (SearchSysCacheExists(CLAAMNAMENSP, ObjectIdGetDatum(amoid), @@ -272,10 +272,9 @@ DefineOpClass(CreateOpClassStmt *stmt) opcname, stmt->amname))); /* - * If we are creating a default opclass, check there isn't one - * already. (Note we do not restrict this test to visible opclasses; - * this ensures that typcache.c can find unique solutions to its - * questions.) + * If we are creating a default opclass, check there isn't one already. + * (Note we do not restrict this test to visible opclasses; this ensures + * that typcache.c can find unique solutions to its questions.) */ if (stmt->isDefault) { @@ -300,8 +299,8 @@ DefineOpClass(CreateOpClassStmt *stmt) errmsg("could not make operator class \"%s\" be default for type %s", opcname, TypeNameToString(stmt->datatype)), - errdetail("Operator class \"%s\" already is the default.", - NameStr(opclass->opcname)))); + errdetail("Operator class \"%s\" already is the default.", + NameStr(opclass->opcname)))); } systable_endscan(scan); @@ -321,7 +320,7 @@ DefineOpClass(CreateOpClassStmt *stmt) namestrcpy(&opcName, opcname); values[i++] = NameGetDatum(&opcName); /* opcname */ values[i++] = ObjectIdGetDatum(namespaceoid); /* opcnamespace */ - values[i++] = ObjectIdGetDatum(GetUserId()); /* opcowner */ + values[i++] = ObjectIdGetDatum(GetUserId()); /* opcowner */ values[i++] = ObjectIdGetDatum(typeoid); /* opcintype */ values[i++] = BoolGetDatum(stmt->isDefault); /* opcdefault */ values[i++] = ObjectIdGetDatum(storageoid); /* opckeytype */ @@ -342,8 +341,8 @@ DefineOpClass(CreateOpClassStmt *stmt) storeProcedures(opclassoid, procedures); /* - * Create dependencies. Note: we do not create a dependency link to - * the AM, because we don't currently support DROP ACCESS METHOD. + * Create dependencies. Note: we do not create a dependency link to the + * AM, because we don't currently support DROP ACCESS METHOD. */ myself.classId = OperatorClassRelationId; myself.objectId = opclassoid; @@ -424,8 +423,8 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid) opform = (Form_pg_operator) GETSTRUCT(optup); /* - * btree operators must be binary ops returning boolean, and the - * left-side input type must match the operator class' input type. + * btree operators must be binary ops returning boolean, and the left-side + * input type must match the operator class' input type. */ if (opform->oprkind != 'b') ereport(ERROR, @@ -438,11 +437,11 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid) if (opform->oprleft != typeoid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree operators must have index type as left input"))); + errmsg("btree operators must have index type as left input"))); /* - * The subtype is "default" (0) if oprright matches the operator - * class, otherwise it is oprright. + * The subtype is "default" (0) if oprright matches the operator class, + * otherwise it is oprright. */ if (opform->oprright == typeoid) subtype = InvalidOid; @@ -478,8 +477,8 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid) procform = (Form_pg_proc) GETSTRUCT(proctup); /* - * btree support procs must be 2-arg procs returning int4, and the - * first input type must match the operator class' input type. + * btree support procs must be 2-arg procs returning int4, and the first + * input type must match the operator class' input type. */ if (procform->pronargs != 2) ereport(ERROR, @@ -492,11 +491,11 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid) if (procform->proargtypes.values[0] != typeoid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree procedures must have index type as first input"))); + errmsg("btree procedures must have index type as first input"))); /* - * The subtype is "default" (0) if second input type matches the - * operator class, otherwise it is the second input type. + * The subtype is "default" (0) if second input type matches the operator + * class, otherwise it is the second input type. */ if (procform->proargtypes.values[1] == typeoid) subtype = InvalidOid; @@ -525,13 +524,13 @@ addClassMember(List **list, OpClassMember *member, bool isProc) if (isProc) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("procedure number %d appears more than once", - member->number))); + errmsg("procedure number %d appears more than once", + member->number))); else ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("operator number %d appears more than once", - member->number))); + errmsg("operator number %d appears more than once", + member->number))); } } *list = lappend(*list, member); @@ -688,7 +687,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("operator class \"%s\" does not exist for access method \"%s\"", - NameListToString(stmt->opclassname), stmt->amname))); + NameListToString(stmt->opclassname), stmt->amname))); opcID = HeapTupleGetOid(tuple); @@ -956,7 +955,7 @@ AlterOpClassOwner(List *name, const char *access_method, Oid newOwnerId) if (!superuser()) { /* Otherwise, must be owner of the existing object */ - if (!pg_opclass_ownercheck(HeapTupleGetOid(tup),GetUserId())) + if (!pg_opclass_ownercheck(HeapTupleGetOid(tup), GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPCLASS, NameListToString(name)); @@ -972,8 +971,7 @@ AlterOpClassOwner(List *name, const char *access_method, Oid newOwnerId) } /* - * Modify the owner --- okay to scribble on tup because it's a - * copy + * Modify the owner --- okay to scribble on tup because it's a copy */ opcForm->opcowner = newOwnerId; diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c index f9db742e844..07877962e3f 100644 --- a/src/backend/commands/operatorcmds.c +++ b/src/backend/commands/operatorcmds.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.25 2005/08/22 17:38:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.26 2005/10/15 02:49:15 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -69,11 +69,9 @@ DefineOperator(List *names, List *parameters) TypeName *typeName2 = NULL; /* second type name */ Oid typeId1 = InvalidOid; /* types converted to OID */ Oid typeId2 = InvalidOid; - List *commutatorName = NIL; /* optional commutator operator - * name */ + List *commutatorName = NIL; /* optional commutator operator name */ List *negatorName = NIL; /* optional negator operator name */ - List *restrictionName = NIL; /* optional restrict. sel. - * procedure */ + List *restrictionName = NIL; /* optional restrict. sel. procedure */ List *joinName = NIL; /* optional join sel. procedure */ List *leftSortName = NIL; /* optional left sort operator */ List *rightSortName = NIL; /* optional right sort operator */ @@ -103,7 +101,7 @@ DefineOperator(List *names, List *parameters) if (typeName1->setof) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("setof type not allowed for operator argument"))); + errmsg("setof type not allowed for operator argument"))); } else if (pg_strcasecmp(defel->defname, "rightarg") == 0) { @@ -111,7 +109,7 @@ DefineOperator(List *names, List *parameters) if (typeName2->setof) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("setof type not allowed for operator argument"))); + errmsg("setof type not allowed for operator argument"))); } else if (pg_strcasecmp(defel->defname, "procedure") == 0) functionName = defGetQualifiedName(defel); @@ -157,8 +155,8 @@ DefineOperator(List *names, List *parameters) typeId2 = typenameTypeId(typeName2); /* - * If any of the mergejoin support operators were given, then canMerge - * is implicit. If canMerge is specified or implicit, fill in default + * If any of the mergejoin support operators were given, then canMerge is + * implicit. If canMerge is specified or implicit, fill in default * operator names for any missing mergejoin support operators. */ if (leftSortName || rightSortName || ltCompareName || gtCompareName) @@ -184,11 +182,9 @@ DefineOperator(List *names, List *parameters) typeId1, /* left type id */ typeId2, /* right type id */ functionName, /* function for operator */ - commutatorName, /* optional commutator operator - * name */ + commutatorName, /* optional commutator operator name */ negatorName, /* optional negator operator name */ - restrictionName, /* optional restrict. sel. - * procedure */ + restrictionName, /* optional restrict. sel. procedure */ joinName, /* optional join sel. procedure name */ canHash, /* operator hashes */ leftSortName, /* optional left sort operator */ @@ -300,7 +296,7 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2, if (!superuser()) { /* Otherwise, must be owner of the existing object */ - if (!pg_oper_ownercheck(operOid,GetUserId())) + if (!pg_oper_ownercheck(operOid, GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPER, NameListToString(name)); @@ -317,8 +313,7 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2, } /* - * Modify the owner --- okay to scribble on tup because it's a - * copy + * Modify the owner --- okay to scribble on tup because it's a copy */ oprForm->oprowner = newOwnerId; diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c index 0ff53666136..e68d221f01d 100644 --- a/src/backend/commands/portalcmds.c +++ b/src/backend/commands/portalcmds.c @@ -14,7 +14,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.42 2005/06/03 23:05:28 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.43 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -54,27 +54,26 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params) errmsg("invalid cursor name: must not be empty"))); /* - * If this is a non-holdable cursor, we require that this statement - * has been executed inside a transaction block (or else, it would - * have no user-visible effect). + * If this is a non-holdable cursor, we require that this statement has + * been executed inside a transaction block (or else, it would have no + * user-visible effect). */ if (!(stmt->options & CURSOR_OPT_HOLD)) RequireTransactionChain((void *) stmt, "DECLARE CURSOR"); /* - * Because the planner is not cool about not scribbling on its input, - * we make a preliminary copy of the source querytree. This prevents + * Because the planner is not cool about not scribbling on its input, we + * make a preliminary copy of the source querytree. This prevents * problems in the case that the DECLARE CURSOR is in a portal and is - * executed repeatedly. XXX the planner really shouldn't modify its - * input ... FIXME someday. + * executed repeatedly. XXX the planner really shouldn't modify its input + * ... FIXME someday. */ query = copyObject(stmt->query); /* * The query has been through parse analysis, but not rewriting or - * planning as yet. Note that the grammar ensured we have a SELECT - * query, so we are not expecting rule rewriting to do anything - * strange. + * planning as yet. Note that the grammar ensured we have a SELECT query, + * so we are not expecting rule rewriting to do anything strange. */ AcquireRewriteLocks(query); rewritten = QueryRewrite(query); @@ -91,14 +90,13 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params) if (query->rowMarks != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("DECLARE CURSOR ... FOR UPDATE/SHARE is not supported"), + errmsg("DECLARE CURSOR ... FOR UPDATE/SHARE is not supported"), errdetail("Cursors must be READ ONLY."))); plan = planner(query, true, stmt->options, NULL); /* - * Create a portal and copy the query and plan into its memory - * context. + * Create a portal and copy the query and plan into its memory context. */ portal = CreatePortal(stmt->portalname, false, false); @@ -116,11 +114,10 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params) /* * Also copy the outer portal's parameter list into the inner portal's - * memory context. We want to pass down the parameter values in case - * we had a command like DECLARE c CURSOR FOR SELECT ... WHERE foo = - * $1 This will have been parsed using the outer parameter set and the - * parameter value needs to be preserved for use when the cursor is - * executed. + * memory context. We want to pass down the parameter values in case we + * had a command like DECLARE c CURSOR FOR SELECT ... WHERE foo = $1 This + * will have been parsed using the outer parameter set and the parameter + * value needs to be preserved for use when the cursor is executed. */ params = copyParamList(params); @@ -130,8 +127,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params) * Set up options for portal. * * If the user didn't specify a SCROLL type, allow or disallow scrolling - * based on whether it would require any additional runtime overhead - * to do so. + * based on whether it would require any additional runtime overhead to do + * so. */ portal->cursorOptions = stmt->options; if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL))) @@ -150,8 +147,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params) Assert(portal->strategy == PORTAL_ONE_SELECT); /* - * We're done; the query won't actually be run until - * PerformPortalFetch is called. + * We're done; the query won't actually be run until PerformPortalFetch is + * called. */ } @@ -189,7 +186,7 @@ PerformPortalFetch(FetchStmt *stmt, { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_CURSOR), - errmsg("cursor \"%s\" does not exist", stmt->portalname))); + errmsg("cursor \"%s\" does not exist", stmt->portalname))); return; /* keep compiler happy */ } @@ -264,10 +261,9 @@ PortalCleanup(Portal portal) AssertArg(portal->cleanup == PortalCleanup); /* - * Shut down executor, if still running. We skip this during error - * abort, since other mechanisms will take care of releasing executor - * resources, and we can't be sure that ExecutorEnd itself wouldn't - * fail. + * Shut down executor, if still running. We skip this during error abort, + * since other mechanisms will take care of releasing executor resources, + * and we can't be sure that ExecutorEnd itself wouldn't fail. */ queryDesc = PortalGetQueryDesc(portal); if (queryDesc) @@ -367,9 +363,8 @@ PersistHoldablePortal(Portal portal) MemoryContextSwitchTo(PortalContext); /* - * Rewind the executor: we need to store the entire result set in - * the tuplestore, so that subsequent backward FETCHs can be - * processed. + * Rewind the executor: we need to store the entire result set in the + * tuplestore, so that subsequent backward FETCHs can be processed. */ ExecutorRewind(queryDesc); @@ -391,10 +386,10 @@ PersistHoldablePortal(Portal portal) /* * Reset the position in the result set: ideally, this could be - * implemented by just skipping straight to the tuple # that we - * need to be at, but the tuplestore API doesn't support that. So - * we start at the beginning of the tuplestore and iterate through - * it until we reach where we need to be. FIXME someday? + * implemented by just skipping straight to the tuple # that we need + * to be at, but the tuplestore API doesn't support that. So we start + * at the beginning of the tuplestore and iterate through it until we + * reach where we need to be. FIXME someday? */ MemoryContextSwitchTo(portal->holdContext); @@ -404,8 +399,8 @@ PersistHoldablePortal(Portal portal) if (portal->posOverflow) /* oops, cannot trust portalPos */ ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("could not reposition held cursor"))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("could not reposition held cursor"))); tuplestore_rescan(portal->holdStore); @@ -453,10 +448,10 @@ PersistHoldablePortal(Portal portal) QueryContext = saveQueryContext; /* - * We can now release any subsidiary memory of the portal's heap - * context; we'll never use it again. The executor already dropped - * its context, but this will clean up anything that glommed onto the - * portal's heap via PortalContext. + * We can now release any subsidiary memory of the portal's heap context; + * we'll never use it again. The executor already dropped its context, + * but this will clean up anything that glommed onto the portal's heap via + * PortalContext. */ MemoryContextDeleteChildren(PortalGetHeapMemory(portal)); } diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index dec3d249dfa..5420da4a626 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -10,7 +10,7 @@ * Copyright (c) 2002-2005, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.40 2005/06/22 17:45:45 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.41 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -84,17 +84,17 @@ PrepareQuery(PrepareStmt *stmt) } /* - * Parse analysis is already done, but we must still rewrite and plan - * the query. + * Parse analysis is already done, but we must still rewrite and plan the + * query. */ /* - * Because the planner is not cool about not scribbling on its input, - * we make a preliminary copy of the source querytree. This prevents + * Because the planner is not cool about not scribbling on its input, we + * make a preliminary copy of the source querytree. This prevents * problems in the case that the PREPARE is in a portal or plpgsql * function and is executed repeatedly. (See also the same hack in - * DECLARE CURSOR and EXPLAIN.) XXX the planner really shouldn't - * modify its input ... FIXME someday. + * DECLARE CURSOR and EXPLAIN.) XXX the planner really shouldn't modify + * its input ... FIXME someday. */ query = copyObject(stmt->query); @@ -106,8 +106,8 @@ PrepareQuery(PrepareStmt *stmt) plan_list = pg_plan_queries(query_list, NULL, false); /* - * Save the results. We don't have the query string for this PREPARE, - * but we do have the string we got from the client, so use that. + * Save the results. We don't have the query string for this PREPARE, but + * we do have the string we got from the client, so use that. */ StorePreparedStatement(stmt->name, debug_query_string, @@ -146,8 +146,8 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest, char *completionTag) if (entry->argtype_list != NIL) { /* - * Need an EState to evaluate parameters; must not delete it till - * end of query, in case parameters are pass-by-reference. + * Need an EState to evaluate parameters; must not delete it till end + * of query, in case parameters are pass-by-reference. */ estate = CreateExecutorState(); paramLI = EvaluateParams(estate, stmt->params, entry->argtype_list); @@ -159,10 +159,10 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest, char *completionTag) portal = CreateNewPortal(); /* - * For CREATE TABLE / AS EXECUTE, make a copy of the stored query so - * that we can modify its destination (yech, but this has always been - * ugly). For regular EXECUTE we can just use the stored query where - * it sits, since the executor is read-only. + * For CREATE TABLE / AS EXECUTE, make a copy of the stored query so that + * we can modify its destination (yech, but this has always been ugly). + * For regular EXECUTE we can just use the stored query where it sits, + * since the executor is read-only. */ if (stmt->into) { @@ -245,7 +245,7 @@ EvaluateParams(EState *estate, List *params, List *argtypes) bool isNull; paramLI[i].value = ExecEvalExprSwitchContext(n, - GetPerTupleExprContext(estate), + GetPerTupleExprContext(estate), &isNull, NULL); paramLI[i].kind = PARAM_NUM; @@ -333,8 +333,8 @@ StorePreparedStatement(const char *stmt_name, /* * We need to copy the data so that it is stored in the correct memory * context. Do this before making hashtable entry, so that an - * out-of-memory failure only wastes memory and doesn't leave us with - * an incomplete (ie corrupt) hashtable entry. + * out-of-memory failure only wastes memory and doesn't leave us with an + * incomplete (ie corrupt) hashtable entry. */ qstring = query_string ? pstrdup(query_string) : NULL; query_list = (List *) copyObject(query_list); @@ -380,9 +380,9 @@ FetchPreparedStatement(const char *stmt_name, bool throwError) if (prepared_queries) { /* - * We can't just use the statement name as supplied by the user: - * the hash package is picky enough that it needs to be - * NULL-padded out to the appropriate length to work correctly. + * We can't just use the statement name as supplied by the user: the + * hash package is picky enough that it needs to be NULL-padded out to + * the appropriate length to work correctly. */ StrNCpy(key, stmt_name, sizeof(key)); @@ -447,7 +447,7 @@ FetchPreparedStatementResultDesc(PreparedStatement *stmt) /* * Given a prepared statement that returns tuples, extract the query - * targetlist. Returns NIL if the statement doesn't have a determinable + * targetlist. Returns NIL if the statement doesn't have a determinable * targetlist. * * Note: do not modify the result. @@ -464,31 +464,31 @@ FetchPreparedStatementTargetList(PreparedStatement *stmt) return ((Query *) linitial(stmt->query_list))->targetList; if (strategy == PORTAL_UTIL_SELECT) { - Node *utilityStmt; + Node *utilityStmt; utilityStmt = ((Query *) linitial(stmt->query_list))->utilityStmt; switch (nodeTag(utilityStmt)) { case T_FetchStmt: - { - FetchStmt *substmt = (FetchStmt *) utilityStmt; - Portal subportal; + { + FetchStmt *substmt = (FetchStmt *) utilityStmt; + Portal subportal; - Assert(!substmt->ismove); - subportal = GetPortalByName(substmt->portalname); - Assert(PortalIsValid(subportal)); - return FetchPortalTargetList(subportal); - } + Assert(!substmt->ismove); + subportal = GetPortalByName(substmt->portalname); + Assert(PortalIsValid(subportal)); + return FetchPortalTargetList(subportal); + } case T_ExecuteStmt: - { - ExecuteStmt *substmt = (ExecuteStmt *) utilityStmt; - PreparedStatement *entry; + { + ExecuteStmt *substmt = (ExecuteStmt *) utilityStmt; + PreparedStatement *entry; - Assert(!substmt->into); - entry = FetchPreparedStatement(substmt->name, true); - return FetchPreparedStatementTargetList(entry); - } + Assert(!substmt->into); + entry = FetchPreparedStatement(substmt->name, true); + return FetchPreparedStatementTargetList(entry); + } default: break; @@ -564,8 +564,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate) if (entry->argtype_list != NIL) { /* - * Need an EState to evaluate parameters; must not delete it till - * end of query, in case parameters are pass-by-reference. + * Need an EState to evaluate parameters; must not delete it till end + * of query, in case parameters are pass-by-reference. */ estate = CreateExecutorState(); paramLI = EvaluateParams(estate, execstmt->params, @@ -597,7 +597,7 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate) if (query->commandType != CMD_SELECT) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("prepared statement is not a SELECT"))); + errmsg("prepared statement is not a SELECT"))); /* Copy the query so we can modify it */ query = copyObject(query); diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c index 4155dc179ad..b13f7234dba 100644 --- a/src/backend/commands/proclang.c +++ b/src/backend/commands/proclang.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.62 2005/09/08 20:07:42 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.63 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -44,7 +44,7 @@ typedef struct } PLTemplate; static void create_proc_lang(const char *languageName, - Oid handlerOid, Oid valOid, bool trusted); + Oid handlerOid, Oid valOid, bool trusted); static PLTemplate *find_language_template(const char *languageName); @@ -68,7 +68,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to create procedural language"))); + errmsg("must be superuser to create procedural language"))); /* * Translate the language name and check that this language doesn't @@ -89,7 +89,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) */ if ((pltemplate = find_language_template(languageName)) != NULL) { - List *funcname; + List *funcname; /* * Give a notice if we are ignoring supplied parameters. @@ -99,9 +99,9 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) (errmsg("using pg_pltemplate information instead of CREATE LANGUAGE parameters"))); /* - * Find or create the handler function, which we force to be in - * the pg_catalog schema. If already present, it must have the - * correct return type. + * Find or create the handler function, which we force to be in the + * pg_catalog schema. If already present, it must have the correct + * return type. */ funcname = SystemFuncName(pltemplate->tmplhandler); handlerOid = LookupFuncName(funcname, 0, funcargtypes, true); @@ -111,23 +111,23 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) if (funcrettype != LANGUAGE_HANDLEROID) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function %s must return type \"language_handler\"", - NameListToString(funcname)))); + errmsg("function %s must return type \"language_handler\"", + NameListToString(funcname)))); } else { handlerOid = ProcedureCreate(pltemplate->tmplhandler, PG_CATALOG_NAMESPACE, - false, /* replace */ - false, /* returnsSet */ + false, /* replace */ + false, /* returnsSet */ LANGUAGE_HANDLEROID, ClanguageId, F_FMGR_C_VALIDATOR, pltemplate->tmplhandler, pltemplate->tmpllibrary, - false, /* isAgg */ - false, /* security_definer */ - false, /* isStrict */ + false, /* isAgg */ + false, /* security_definer */ + false, /* isStrict */ PROVOLATILE_VOLATILE, buildoidvector(funcargtypes, 0), PointerGetDatum(NULL), @@ -148,16 +148,16 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) { valOid = ProcedureCreate(pltemplate->tmplvalidator, PG_CATALOG_NAMESPACE, - false, /* replace */ - false, /* returnsSet */ + false, /* replace */ + false, /* returnsSet */ VOIDOID, ClanguageId, F_FMGR_C_VALIDATOR, pltemplate->tmplvalidator, pltemplate->tmpllibrary, - false, /* isAgg */ - false, /* security_definer */ - false, /* isStrict */ + false, /* isAgg */ + false, /* security_definer */ + false, /* isStrict */ PROVOLATILE_VOLATILE, buildoidvector(funcargtypes, 1), PointerGetDatum(NULL), @@ -175,9 +175,9 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) else { /* - * No template, so use the provided information. If there's - * no handler clause, the user is trying to rely on a template - * that we don't have, so complain accordingly. + * No template, so use the provided information. If there's no + * handler clause, the user is trying to rely on a template that we + * don't have, so complain accordingly. */ if (!stmt->plhandler) ereport(ERROR, @@ -210,8 +210,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) else ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function %s must return type \"language_handler\"", - NameListToString(stmt->plhandler)))); + errmsg("function %s must return type \"language_handler\"", + NameListToString(stmt->plhandler)))); } /* validate the validator function */ @@ -385,7 +385,7 @@ DropProceduralLanguage(DropPLangStmt *stmt) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to drop procedural language"))); + errmsg("must be superuser to drop procedural language"))); /* * Translate the language name, check that the language exists @@ -471,7 +471,7 @@ RenameLanguage(const char *oldname, const char *newname) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to rename procedural language"))); + errmsg("must be superuser to rename procedural language"))); /* rename */ namestrcpy(&(((Form_pg_language) GETSTRUCT(tup))->lanname), newname); diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index f0ae06f15c6..56a3359a532 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.34 2005/08/22 17:38:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.35 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -42,8 +42,8 @@ CreateSchemaCommand(CreateSchemaStmt *stmt) Oid namespaceId; List *parsetree_list; ListCell *parsetree_item; - Oid owner_uid; - Oid saved_uid; + Oid owner_uid; + Oid saved_uid; AclResult aclresult; saved_uid = GetUserId(); @@ -60,8 +60,8 @@ CreateSchemaCommand(CreateSchemaStmt *stmt) * To create a schema, must have schema-create privilege on the current * database and must be able to become the target role (this does not * imply that the target role itself must have create-schema privilege). - * The latter provision guards against "giveaway" attacks. Note that - * a superuser will always have both of these privileges a fortiori. + * The latter provision guards against "giveaway" attacks. Note that a + * superuser will always have both of these privileges a fortiori. */ aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE); if (aclresult != ACLCHECK_OK) @@ -75,15 +75,15 @@ CreateSchemaCommand(CreateSchemaStmt *stmt) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("unacceptable schema name \"%s\"", schemaName), - errdetail("The prefix \"pg_\" is reserved for system schemas."))); + errdetail("The prefix \"pg_\" is reserved for system schemas."))); /* * If the requested authorization is different from the current user, - * temporarily set the current user so that the object(s) will be - * created with the correct ownership. + * temporarily set the current user so that the object(s) will be created + * with the correct ownership. * - * (The setting will revert to session user on error or at the end of - * this routine.) + * (The setting will revert to session user on error or at the end of this + * routine.) */ if (saved_uid != owner_uid) SetUserId(owner_uid); @@ -95,19 +95,18 @@ CreateSchemaCommand(CreateSchemaStmt *stmt) CommandCounterIncrement(); /* - * Temporarily make the new namespace be the front of the search path, - * as well as the default creation target namespace. This will be - * undone at the end of this routine, or upon error. + * Temporarily make the new namespace be the front of the search path, as + * well as the default creation target namespace. This will be undone at + * the end of this routine, or upon error. */ PushSpecialNamespace(namespaceId); /* - * Examine the list of commands embedded in the CREATE SCHEMA command, - * and reorganize them into a sequentially executable order with no - * forward references. Note that the result is still a list of raw - * parsetrees in need of parse analysis --- we cannot, in general, run - * analyze.c on one statement until we have actually executed the - * prior ones. + * Examine the list of commands embedded in the CREATE SCHEMA command, and + * reorganize them into a sequentially executable order with no forward + * references. Note that the result is still a list of raw parsetrees in + * need of parse analysis --- we cannot, in general, run analyze.c on one + * statement until we have actually executed the prior ones. */ parsetree_list = analyzeCreateSchemaStmt(stmt); @@ -174,8 +173,8 @@ RemoveSchema(List *names, DropBehavior behavior) namespaceName); /* - * Do the deletion. Objects contained in the schema are removed by - * means of their dependency links to the schema. + * Do the deletion. Objects contained in the schema are removed by means + * of their dependency links to the schema. */ object.classId = NamespaceRelationId; object.objectId = namespaceId; @@ -254,7 +253,7 @@ RenameSchema(const char *oldname, const char *newname) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("unacceptable schema name \"%s\"", newname), - errdetail("The prefix \"pg_\" is reserved for system schemas."))); + errdetail("The prefix \"pg_\" is reserved for system schemas."))); /* rename */ namestrcpy(&(((Form_pg_namespace) GETSTRUCT(tup))->nspname), newname); @@ -302,21 +301,21 @@ AlterSchemaOwner(const char *name, Oid newOwnerId) AclResult aclresult; /* Otherwise, must be owner of the existing object */ - if (!pg_namespace_ownercheck(HeapTupleGetOid(tup),GetUserId())) + if (!pg_namespace_ownercheck(HeapTupleGetOid(tup), GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_NAMESPACE, name); /* Must be able to become new owner */ - check_is_member_of_role(GetUserId(),newOwnerId); + check_is_member_of_role(GetUserId(), newOwnerId); /* * must have create-schema rights * - * NOTE: This is different from other alter-owner checks in - * that the current user is checked for create privileges - * instead of the destination owner. This is consistent - * with the CREATE case for schemas. Because superusers - * will always have this right, we need no special case for them. + * NOTE: This is different from other alter-owner checks in that the + * current user is checked for create privileges instead of the + * destination owner. This is consistent with the CREATE case for + * schemas. Because superusers will always have this right, we need + * no special case for them. */ aclresult = pg_database_aclcheck(MyDatabaseId, GetUserId(), ACL_CREATE); diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 9bf801f2308..201fcbf0c6b 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.124 2005/10/02 23:50:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.125 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -219,17 +219,17 @@ DefineSequence(CreateSeqStmt *seq) /* * Two special hacks here: * - * 1. Since VACUUM does not process sequences, we have to force the tuple - * to have xmin = FrozenTransactionId now. Otherwise it would become + * 1. Since VACUUM does not process sequences, we have to force the tuple to + * have xmin = FrozenTransactionId now. Otherwise it would become * invisible to SELECTs after 2G transactions. It is okay to do this * because if the current transaction aborts, no other xact will ever * examine the sequence tuple anyway. * - * 2. Even though heap_insert emitted a WAL log record, we have to emit - * an XLOG_SEQ_LOG record too, since (a) the heap_insert record will - * not have the right xmin, and (b) REDO of the heap_insert record - * would re-init page and sequence magic number would be lost. This - * means two log records instead of one :-( + * 2. Even though heap_insert emitted a WAL log record, we have to emit an + * XLOG_SEQ_LOG record too, since (a) the heap_insert record will not have + * the right xmin, and (b) REDO of the heap_insert record would re-init + * page and sequence magic number would be lost. This means two log + * records instead of one :-( */ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); @@ -237,12 +237,11 @@ DefineSequence(CreateSeqStmt *seq) { /* - * Note that the "tuple" structure is still just a local tuple - * record created by heap_formtuple; its t_data pointer doesn't - * point at the disk buffer. To scribble on the disk buffer we - * need to fetch the item pointer. But do the same to the local - * tuple, since that will be the source for the WAL log record, - * below. + * Note that the "tuple" structure is still just a local tuple record + * created by heap_formtuple; its t_data pointer doesn't point at the + * disk buffer. To scribble on the disk buffer we need to fetch the + * item pointer. But do the same to the local tuple, since that will + * be the source for the WAL log record, below. */ ItemId itemId; Item item; @@ -334,8 +333,8 @@ AlterSequence(AlterSeqStmt *stmt) /* Clear local cache so that we don't think we have cached numbers */ elm->last = new.last_value; /* last returned number */ - elm->cached = new.last_value; /* last cached number (forget - * cached values) */ + elm->cached = new.last_value; /* last cached number (forget cached + * values) */ START_CRIT_SECTION(); @@ -456,14 +455,14 @@ nextval_internal(Oid relid) } /* - * Decide whether we should emit a WAL log record. If so, force up - * the fetch count to grab SEQ_LOG_VALS more values than we actually - * need to cache. (These will then be usable without logging.) + * Decide whether we should emit a WAL log record. If so, force up the + * fetch count to grab SEQ_LOG_VALS more values than we actually need to + * cache. (These will then be usable without logging.) * - * If this is the first nextval after a checkpoint, we must force a new - * WAL record to be written anyway, else replay starting from the - * checkpoint would fail to advance the sequence past the logged - * values. In this case we may as well fetch extra values. + * If this is the first nextval after a checkpoint, we must force a new WAL + * record to be written anyway, else replay starting from the checkpoint + * would fail to advance the sequence past the logged values. In this + * case we may as well fetch extra values. */ if (log < fetch) { @@ -486,8 +485,8 @@ nextval_internal(Oid relid) while (fetch) /* try to fetch cache [+ log ] numbers */ { /* - * Check MAXVALUE for ascending sequences and MINVALUE for - * descending sequences + * Check MAXVALUE for ascending sequences and MINVALUE for descending + * sequences */ if (incby > 0) { @@ -503,9 +502,9 @@ nextval_internal(Oid relid) snprintf(buf, sizeof(buf), INT64_FORMAT, maxv); ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("nextval: reached maximum value of sequence \"%s\" (%s)", - RelationGetRelationName(seqrel), buf))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("nextval: reached maximum value of sequence \"%s\" (%s)", + RelationGetRelationName(seqrel), buf))); } next = minv; } @@ -526,9 +525,9 @@ nextval_internal(Oid relid) snprintf(buf, sizeof(buf), INT64_FORMAT, minv); ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("nextval: reached minimum value of sequence \"%s\" (%s)", - RelationGetRelationName(seqrel), buf))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("nextval: reached minimum value of sequence \"%s\" (%s)", + RelationGetRelationName(seqrel), buf))); } next = maxv; } @@ -721,8 +720,7 @@ do_setval(Oid relid, int64 next, bool iscalled) /* save info in local cache */ elm->last = next; /* last returned number */ - elm->cached = next; /* last cached number (forget cached - * values) */ + elm->cached = next; /* last cached number (forget cached values) */ START_CRIT_SECTION(); @@ -805,7 +803,7 @@ setval3_oid(PG_FUNCTION_ARGS) /* * If we haven't touched the sequence already in this transaction, - * we need to acquire AccessShareLock. We arrange for the lock to + * we need to acquire AccessShareLock. We arrange for the lock to * be owned by the top transaction, so that we don't need to do it * more than once per xact. */ @@ -869,15 +867,15 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel) /* * Allocate new seqtable entry if we didn't find one. * - * NOTE: seqtable entries remain in the list for the life of a backend. - * If the sequence itself is deleted then the entry becomes wasted - * memory, but it's small enough that this should not matter. + * NOTE: seqtable entries remain in the list for the life of a backend. If + * the sequence itself is deleted then the entry becomes wasted memory, + * but it's small enough that this should not matter. */ if (elm == NULL) { /* - * Time to make a new seqtable entry. These entries live as long - * as the backend does, so we use plain malloc for them. + * Time to make a new seqtable entry. These entries live as long as + * the backend does, so we use plain malloc for them. */ elm = (SeqTable) malloc(sizeof(SeqTableData)); if (elm == NULL) @@ -1094,8 +1092,8 @@ init_params(List *options, Form_pg_sequence new, bool isInit) snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->min_value); ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("START value (%s) can't be less than MINVALUE (%s)", - bufs, bufm))); + errmsg("START value (%s) can't be less than MINVALUE (%s)", + bufs, bufm))); } if (new->last_value > new->max_value) { @@ -1106,8 +1104,8 @@ init_params(List *options, Form_pg_sequence new, bool isInit) snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->max_value); ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("START value (%s) can't be greater than MAXVALUE (%s)", - bufs, bufm))); + errmsg("START value (%s) can't be greater than MAXVALUE (%s)", + bufs, bufm))); } /* CACHE */ @@ -1152,7 +1150,7 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record) buffer = XLogReadBuffer(true, reln, 0); if (!BufferIsValid(buffer)) elog(PANIC, "seq_redo: can't read block 0 of rel %u/%u/%u", - xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode); + xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode); page = (Page) BufferGetPage(buffer); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 7df645af9d3..abec1a835d1 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.173 2005/10/03 02:45:12 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.174 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -164,10 +164,10 @@ static int findAttrByName(const char *attributeName, List *schema); static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass); static bool needs_toast_table(Relation rel); static void AlterIndexNamespaces(Relation classRel, Relation rel, - Oid oldNspOid, Oid newNspOid); + Oid oldNspOid, Oid newNspOid); static void AlterSeqNamespaces(Relation classRel, Relation rel, - Oid oldNspOid, Oid newNspOid, - const char *newNspName); + Oid oldNspOid, Oid newNspOid, + const char *newNspName); static int transformColumnNameList(Oid relId, List *colList, int16 *attnums, Oid *atttypids); static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, @@ -238,14 +238,14 @@ static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab); static void ATPostAlterTypeParse(char *cmd, List **wqueue); static void ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing); static void change_owner_recurse_to_sequences(Oid relationOid, - Oid newOwnerId); + Oid newOwnerId); static void ATExecClusterOn(Relation rel, const char *indexName); static void ATExecDropCluster(Relation rel); static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename); static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace); static void ATExecEnableDisableTrigger(Relation rel, char *trigname, - bool enable, bool skip_system); + bool enable, bool skip_system); static void copy_relation_data(Relation rel, SMgrRelation dst); static void update_ri_trigger_args(Oid relid, const char *oldname, @@ -281,8 +281,8 @@ DefineRelation(CreateStmt *stmt, char relkind) AttrNumber attnum; /* - * Truncate relname to appropriate length (probably a waste of time, - * as parser should have done this already). + * Truncate relname to appropriate length (probably a waste of time, as + * parser should have done this already). */ StrNCpy(relname, stmt->relation->relname, NAMEDATALEN); @@ -292,12 +292,12 @@ DefineRelation(CreateStmt *stmt, char relkind) if (stmt->oncommit != ONCOMMIT_NOOP && !stmt->relation->istemp) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("ON COMMIT can only be used on temporary tables"))); + errmsg("ON COMMIT can only be used on temporary tables"))); /* - * Look up the namespace in which we are supposed to create the - * relation. Check we have permission to create there. Skip check if - * bootstrapping, since permissions machinery may not be working yet. + * Look up the namespace in which we are supposed to create the relation. + * Check we have permission to create there. Skip check if bootstrapping, + * since permissions machinery may not be working yet. */ namespaceId = RangeVarGetCreationNamespace(stmt->relation); @@ -344,19 +344,19 @@ DefineRelation(CreateStmt *stmt, char relkind) } /* - * Look up inheritance ancestors and generate relation schema, - * including inherited attributes. + * Look up inheritance ancestors and generate relation schema, including + * inherited attributes. */ schema = MergeAttributes(schema, stmt->inhRelations, stmt->relation->istemp, - &inheritOids, &old_constraints, &parentOidCount); + &inheritOids, &old_constraints, &parentOidCount); /* - * Create a relation descriptor from the relation schema and create - * the relation. Note that in this stage only inherited (pre-cooked) - * defaults and constraints will be included into the new relation. - * (BuildDescForRelation takes care of the inherited defaults, but we - * have to copy inherited constraints here.) + * Create a relation descriptor from the relation schema and create the + * relation. Note that in this stage only inherited (pre-cooked) defaults + * and constraints will be included into the new relation. + * (BuildDescForRelation takes care of the inherited defaults, but we have + * to copy inherited constraints here.) */ descriptor = BuildDescForRelation(schema); @@ -380,11 +380,10 @@ DefineRelation(CreateStmt *stmt, char relkind) Assert(cdef->raw_expr == NULL && cdef->cooked_expr != NULL); /* - * In multiple-inheritance situations, it's possible to - * inherit the same grandparent constraint through multiple - * parents. Hence, discard inherited constraints that match as - * to both name and expression. Otherwise, gripe if the names - * conflict. + * In multiple-inheritance situations, it's possible to inherit + * the same grandparent constraint through multiple parents. + * Hence, discard inherited constraints that match as to both name + * and expression. Otherwise, gripe if the names conflict. */ for (i = 0; i < ncheck; i++) { @@ -444,25 +443,24 @@ DefineRelation(CreateStmt *stmt, char relkind) /* * Open the new relation and acquire exclusive lock on it. This isn't - * really necessary for locking out other backends (since they can't - * see the new rel anyway until we commit), but it keeps the lock - * manager from complaining about deadlock risks. + * really necessary for locking out other backends (since they can't see + * the new rel anyway until we commit), but it keeps the lock manager from + * complaining about deadlock risks. */ rel = relation_open(relationId, AccessExclusiveLock); /* - * Now add any newly specified column default values and CHECK - * constraints to the new relation. These are passed to us in the - * form of raw parsetrees; we need to transform them to executable - * expression trees before they can be added. The most convenient way - * to do that is to apply the parser's transformExpr routine, but - * transformExpr doesn't work unless we have a pre-existing relation. - * So, the transformation has to be postponed to this final step of - * CREATE TABLE. + * Now add any newly specified column default values and CHECK constraints + * to the new relation. These are passed to us in the form of raw + * parsetrees; we need to transform them to executable expression trees + * before they can be added. The most convenient way to do that is to + * apply the parser's transformExpr routine, but transformExpr doesn't + * work unless we have a pre-existing relation. So, the transformation has + * to be postponed to this final step of CREATE TABLE. * - * Another task that's conveniently done at this step is to add - * dependency links between columns and supporting relations (such as - * SERIAL sequences). + * Another task that's conveniently done at this step is to add dependency + * links between columns and supporting relations (such as SERIAL + * sequences). * * First, scan schema to find new column defaults. */ @@ -528,7 +526,7 @@ RemoveRelation(const RangeVar *relation, DropBehavior behavior) /* * ExecuteTruncate - * Executes a TRUNCATE command. + * Executes a TRUNCATE command. * * This is a multi-relation truncate. It first opens and grabs exclusive * locks on all relations involved, checking permissions and otherwise @@ -540,8 +538,8 @@ RemoveRelation(const RangeVar *relation, DropBehavior behavior) void ExecuteTruncate(List *relations) { - List *rels = NIL; - ListCell *cell; + List *rels = NIL; + ListCell *cell; foreach(cell, relations) { @@ -556,18 +554,18 @@ ExecuteTruncate(List *relations) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a table", - RelationGetRelationName(rel)))); + RelationGetRelationName(rel)))); /* Permissions checks */ if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, - RelationGetRelationName(rel)); + RelationGetRelationName(rel)); if (!allowSystemTableMods && IsSystemRelation(rel)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied: \"%s\" is a system catalog", - RelationGetRelationName(rel)))); + RelationGetRelationName(rel)))); /* * We can never allow truncation of shared or nailed-in-cache @@ -578,7 +576,7 @@ ExecuteTruncate(List *relations) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot truncate system relation \"%s\"", - RelationGetRelationName(rel)))); + RelationGetRelationName(rel)))); /* * Don't allow truncate on temp tables of other backends ... their @@ -587,7 +585,7 @@ ExecuteTruncate(List *relations) if (isOtherTempNamespace(RelationGetNamespace(rel))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot truncate temporary tables of other sessions"))); + errmsg("cannot truncate temporary tables of other sessions"))); /* Save it into the list of rels to truncate */ rels = lappend(rels, rel); @@ -704,20 +702,19 @@ MergeAttributes(List *schema, List *supers, bool istemp, List *constraints = NIL; int parentsWithOids = 0; bool have_bogus_defaults = false; - char *bogus_marker = "Bogus!"; /* marks conflicting - * defaults */ + char *bogus_marker = "Bogus!"; /* marks conflicting defaults */ int child_attno; /* - * Check for and reject tables with too many columns. We perform - * this check relatively early for two reasons: (a) we don't run - * the risk of overflowing an AttrNumber in subsequent code (b) an - * O(n^2) algorithm is okay if we're processing <= 1600 columns, - * but could take minutes to execute if the user attempts to - * create a table with hundreds of thousands of columns. + * Check for and reject tables with too many columns. We perform this + * check relatively early for two reasons: (a) we don't run the risk of + * overflowing an AttrNumber in subsequent code (b) an O(n^2) algorithm is + * okay if we're processing <= 1600 columns, but could take minutes to + * execute if the user attempts to create a table with hundreds of + * thousands of columns. * - * Note that we also need to check that any we do not exceed this - * figure after including columns from inherited relations. + * Note that we also need to check that any we do not exceed this figure + * after including columns from inherited relations. */ if (list_length(schema) > MaxHeapAttributeNumber) ereport(ERROR, @@ -728,9 +725,9 @@ MergeAttributes(List *schema, List *supers, bool istemp, /* * Check for duplicate names in the explicit list of attributes. * - * Although we might consider merging such entries in the same way that - * we handle name conflicts for inherited attributes, it seems to make - * more sense to assume such conflicts are errors. + * Although we might consider merging such entries in the same way that we + * handle name conflicts for inherited attributes, it seems to make more + * sense to assume such conflicts are errors. */ foreach(entry, schema) { @@ -750,9 +747,9 @@ MergeAttributes(List *schema, List *supers, bool istemp, } /* - * Scan the parents left-to-right, and merge their attributes to form - * a list of inherited attributes (inhSchema). Also check to see if - * we need to inherit an OID column. + * Scan the parents left-to-right, and merge their attributes to form a + * list of inherited attributes (inhSchema). Also check to see if we need + * to inherit an OID column. */ child_attno = 0; foreach(entry, supers) @@ -775,8 +772,8 @@ MergeAttributes(List *schema, List *supers, bool istemp, if (!istemp && isTempNamespace(RelationGetNamespace(relation))) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot inherit from temporary relation \"%s\"", - parent->relname))); + errmsg("cannot inherit from temporary relation \"%s\"", + parent->relname))); /* * We should have an UNDER permission flag for this, but for now, @@ -804,10 +801,9 @@ MergeAttributes(List *schema, List *supers, bool istemp, constr = tupleDesc->constr; /* - * newattno[] will contain the child-table attribute numbers for - * the attributes of this parent table. (They are not the same - * for parents after the first one, nor if we have dropped - * columns.) + * newattno[] will contain the child-table attribute numbers for the + * attributes of this parent table. (They are not the same for + * parents after the first one, nor if we have dropped columns.) */ newattno = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber)); @@ -828,8 +824,8 @@ MergeAttributes(List *schema, List *supers, bool istemp, { /* * change_varattnos_of_a_node asserts that this is greater - * than zero, so if anything tries to use it, we should - * find out. + * than zero, so if anything tries to use it, we should find + * out. */ newattno[parent_attno - 1] = 0; continue; @@ -853,11 +849,11 @@ MergeAttributes(List *schema, List *supers, bool istemp, def->typename->typmod != attribute->atttypmod) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("inherited column \"%s\" has a type conflict", - attributeName), + errmsg("inherited column \"%s\" has a type conflict", + attributeName), errdetail("%s versus %s", TypeNameToString(def->typename), - format_type_be(attribute->atttypid)))); + format_type_be(attribute->atttypid)))); def->inhcount++; /* Merge of NOT NULL constraints = OR 'em together */ def->is_not_null |= attribute->attnotnull; @@ -909,15 +905,14 @@ MergeAttributes(List *schema, List *supers, bool istemp, Assert(this_default != NULL); /* - * If default expr could contain any vars, we'd need to - * fix 'em, but it can't; so default is ready to apply to - * child. + * If default expr could contain any vars, we'd need to fix + * 'em, but it can't; so default is ready to apply to child. * - * If we already had a default from some prior parent, check - * to see if they are the same. If so, no problem; if - * not, mark the column as having a bogus default. Below, - * we will complain if the bogus default isn't overridden - * by the child schema. + * If we already had a default from some prior parent, check to + * see if they are the same. If so, no problem; if not, mark + * the column as having a bogus default. Below, we will + * complain if the bogus default isn't overridden by the child + * schema. */ Assert(def->raw_default == NULL); if (def->cooked_default == NULL) @@ -931,8 +926,8 @@ MergeAttributes(List *schema, List *supers, bool istemp, } /* - * Now copy the constraints of this parent, adjusting attnos using - * the completed newattno[] map + * Now copy the constraints of this parent, adjusting attnos using the + * completed newattno[] map */ if (constr && constr->num_check > 0) { @@ -958,17 +953,17 @@ MergeAttributes(List *schema, List *supers, bool istemp, pfree(newattno); /* - * Close the parent rel, but keep our AccessShareLock on it until - * xact commit. That will prevent someone else from deleting or - * ALTERing the parent before the child is committed. + * Close the parent rel, but keep our AccessShareLock on it until xact + * commit. That will prevent someone else from deleting or ALTERing + * the parent before the child is committed. */ heap_close(relation, NoLock); } /* * If we had no inherited attributes, the result schema is just the - * explicitly declared columns. Otherwise, we need to merge the - * declared columns into the inherited schema list. + * explicitly declared columns. Otherwise, we need to merge the declared + * columns into the inherited schema list. */ if (inhSchema != NIL) { @@ -991,8 +986,8 @@ MergeAttributes(List *schema, List *supers, bool istemp, * have the same type and typmod. */ ereport(NOTICE, - (errmsg("merging column \"%s\" with inherited definition", - attributeName))); + (errmsg("merging column \"%s\" with inherited definition", + attributeName))); def = (ColumnDef *) list_nth(inhSchema, exist_attno - 1); if (typenameTypeId(def->typename) != typenameTypeId(newdef->typename) || def->typename->typmod != newdef->typename->typmod) @@ -1002,7 +997,7 @@ MergeAttributes(List *schema, List *supers, bool istemp, attributeName), errdetail("%s versus %s", TypeNameToString(def->typename), - TypeNameToString(newdef->typename)))); + TypeNameToString(newdef->typename)))); /* Mark the column as locally defined */ def->is_local = true; /* Merge of NOT NULL constraints = OR 'em together */ @@ -1026,8 +1021,8 @@ MergeAttributes(List *schema, List *supers, bool istemp, schema = inhSchema; /* - * Check that we haven't exceeded the legal # of columns after - * merging in inherited columns. + * Check that we haven't exceeded the legal # of columns after merging + * in inherited columns. */ if (list_length(schema) > MaxHeapAttributeNumber) ereport(ERROR, @@ -1037,8 +1032,8 @@ MergeAttributes(List *schema, List *supers, bool istemp, } /* - * If we found any conflicting parent default values, check to make - * sure they were overridden by the child. + * If we found any conflicting parent default values, check to make sure + * they were overridden by the child. */ if (have_bogus_defaults) { @@ -1049,8 +1044,8 @@ MergeAttributes(List *schema, List *supers, bool istemp, if (def->cooked_default == bogus_marker) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_DEFINITION), - errmsg("column \"%s\" inherits conflicting default values", - def->colname), + errmsg("column \"%s\" inherits conflicting default values", + def->colname), errhint("To resolve the conflict, specify a default explicitly."))); } } @@ -1083,9 +1078,9 @@ change_varattnos_walker(Node *node, const AttrNumber *newattno) var->varattno > 0) { /* - * ??? the following may be a problem when the node is - * multiply referenced though stringToNode() doesn't create - * such a node currently. + * ??? the following may be a problem when the node is multiply + * referenced though stringToNode() doesn't create such a node + * currently. */ Assert(newattno[var->varattno - 1] > 0); var->varattno = newattno[var->varattno - 1]; @@ -1126,13 +1121,13 @@ StoreCatalogInheritance(Oid relationId, List *supers) return; /* - * Store INHERITS information in pg_inherits using direct ancestors - * only. Also enter dependencies on the direct ancestors, and make - * sure they are marked with relhassubclass = true. + * Store INHERITS information in pg_inherits using direct ancestors only. + * Also enter dependencies on the direct ancestors, and make sure they are + * marked with relhassubclass = true. * - * (Once upon a time, both direct and indirect ancestors were found here - * and then entered into pg_ipl. Since that catalog doesn't exist - * anymore, there's no need to look for indirect ancestors.) + * (Once upon a time, both direct and indirect ancestors were found here and + * then entered into pg_ipl. Since that catalog doesn't exist anymore, + * there's no need to look for indirect ancestors.) */ relation = heap_open(InheritsRelationId, RowExclusiveLock); desc = RelationGetDescr(relation); @@ -1222,8 +1217,8 @@ setRelhassubclassInRelation(Oid relationId, bool relhassubclass) /* * Fetch a modifiable copy of the tuple, modify it, update pg_class. * - * If the tuple already has the right relhassubclass setting, we don't - * need to update it, but we still need to issue an SI inval message. + * If the tuple already has the right relhassubclass setting, we don't need + * to update it, but we still need to issue an SI inval message. */ relationRelation = heap_open(RelationRelationId, RowExclusiveLock); tuple = SearchSysCacheCopy(RELOID, @@ -1282,14 +1277,14 @@ renameatt(Oid myrelid, ListCell *indexoidscan; /* - * Grab an exclusive lock on the target table, which we will NOT - * release until end of transaction. + * Grab an exclusive lock on the target table, which we will NOT release + * until end of transaction. */ targetrelation = relation_open(myrelid, AccessExclusiveLock); /* - * permissions checking. this would normally be done in utility.c, - * but this particular routine is recursive. + * permissions checking. this would normally be done in utility.c, but + * this particular routine is recursive. * * normally, only the owner of a class can change its schema. */ @@ -1307,9 +1302,8 @@ renameatt(Oid myrelid, * attribute in all classes that inherit from 'relname' (as well as in * 'relname'). * - * any permissions or problems with duplicate attributes will cause the - * whole transaction to abort, which is what we want -- all or - * nothing. + * any permissions or problems with duplicate attributes will cause the whole + * transaction to abort, which is what we want -- all or nothing. */ if (recurse) { @@ -1320,9 +1314,9 @@ renameatt(Oid myrelid, children = find_all_inheritors(myrelid); /* - * find_all_inheritors does the recursive search of the - * inheritance hierarchy, so all we have to do is process all of - * the relids in the list that it returns. + * find_all_inheritors does the recursive search of the inheritance + * hierarchy, so all we have to do is process all of the relids in the + * list that it returns. */ foreach(child, children) { @@ -1337,8 +1331,8 @@ renameatt(Oid myrelid, else { /* - * If we are told not to recurse, there had better not be any - * child tables; else the rename would put them out of step. + * If we are told not to recurse, there had better not be any child + * tables; else the rename would put them out of step. */ if (!recursing && find_inheritance_children(myrelid) != NIL) @@ -1384,7 +1378,7 @@ renameatt(Oid myrelid, ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), errmsg("column \"%s\" of relation \"%s\" already exists", - newattname, RelationGetRelationName(targetrelation)))); + newattname, RelationGetRelationName(targetrelation)))); namestrcpy(&(attform->attname), newattname); @@ -1396,8 +1390,7 @@ renameatt(Oid myrelid, heap_freetuple(atttup); /* - * Update column names of indexes that refer to the column being - * renamed. + * Update column names of indexes that refer to the column being renamed. */ indexoidlist = RelationGetIndexList(targetrelation); @@ -1494,8 +1487,8 @@ renamerel(Oid myrelid, const char *newrelname) bool relhastriggers; /* - * Grab an exclusive lock on the target table or index, which we will - * NOT release until end of transaction. + * Grab an exclusive lock on the target table or index, which we will NOT + * release until end of transaction. */ targetrelation = relation_open(myrelid, AccessExclusiveLock); @@ -1512,8 +1505,7 @@ renamerel(Oid myrelid, const char *newrelname) relhastriggers = (targetrelation->rd_rel->reltriggers > 0); /* - * Find relation's pg_class tuple, and make sure newrelname isn't in - * use. + * Find relation's pg_class tuple, and make sure newrelname isn't in use. */ relrelation = heap_open(RelationRelationId, RowExclusiveLock); @@ -1530,8 +1522,8 @@ renamerel(Oid myrelid, const char *newrelname) newrelname))); /* - * Update pg_class tuple with new relname. (Scribbling on reltup is - * OK because it's a copy...) + * Update pg_class tuple with new relname. (Scribbling on reltup is OK + * because it's a copy...) */ namestrcpy(&(((Form_pg_class) GETSTRUCT(reltup))->relname), newrelname); @@ -1641,8 +1633,8 @@ update_ri_trigger_args(Oid relid, /* * It is an RI trigger, so parse the tgargs bytea. * - * NB: we assume the field will never be compressed or moved out of - * line; so does trigger.c ... + * NB: we assume the field will never be compressed or moved out of line; + * so does trigger.c ... */ tgnargs = pg_trigger->tgnargs; val = (bytea *) @@ -1663,11 +1655,11 @@ update_ri_trigger_args(Oid relid, } /* - * Figure out which item(s) to look at. If the trigger is - * primary-key type and attached to my rel, I should look at the - * PK fields; if it is foreign-key type and attached to my rel, I - * should look at the FK fields. But the opposite rule holds when - * examining triggers found by tgconstrrel search. + * Figure out which item(s) to look at. If the trigger is primary-key + * type and attached to my rel, I should look at the PK fields; if it + * is foreign-key type and attached to my rel, I should look at the FK + * fields. But the opposite rule holds when examining triggers found + * by tgconstrrel search. */ examine_pk = (tg_type == RI_TRIGGER_PK) == (!fk_scan); @@ -1763,9 +1755,9 @@ update_ri_trigger_args(Oid relid, heap_close(tgrel, RowExclusiveLock); /* - * Increment cmd counter to make updates visible; this is needed in - * case the same tuple has to be updated again by next pass (can - * happen in case of a self-referential FK relationship). + * Increment cmd counter to make updates visible; this is needed in case + * the same tuple has to be updated again by next pass (can happen in case + * of a self-referential FK relationship). */ CommandCounterIncrement(); } @@ -1870,14 +1862,14 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, /* * Copy the original subcommand for each table. This avoids conflicts * when different child tables need to make different parse - * transformations (for example, the same column may have different - * column numbers in different children). + * transformations (for example, the same column may have different column + * numbers in different children). */ cmd = copyObject(cmd); /* - * Do permissions checking, recursion to child tables if needed, and - * any additional phase-1 processing needed. + * Do permissions checking, recursion to child tables if needed, and any + * additional phase-1 processing needed. */ switch (cmd->subtype) { @@ -1890,8 +1882,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, case AT_ColumnDefault: /* ALTER COLUMN DEFAULT */ /* - * We allow defaults on views so that INSERT into a view can - * have default-ish behavior. This works because the rewriter + * We allow defaults on views so that INSERT into a view can have + * default-ish behavior. This works because the rewriter * substitutes default values into INSERTs before it expands * rules. */ @@ -1943,8 +1935,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, /* * Currently we recurse only for CHECK constraints, never for - * foreign-key constraints. UNIQUE/PKEY constraints won't be - * seen here. + * foreign-key constraints. UNIQUE/PKEY constraints won't be seen + * here. */ if (IsA(cmd->def, Constraint)) ATSimpleRecursion(wqueue, rel, cmd, recurse); @@ -2042,11 +2034,11 @@ ATRewriteCatalogs(List **wqueue) ListCell *ltab; /* - * We process all the tables "in parallel", one pass at a time. This - * is needed because we may have to propagate work from one table to - * another (specifically, ALTER TYPE on a foreign key's PK has to - * dispatch the re-adding of the foreign key constraint to the other - * table). Work can only be propagated into later passes, however. + * We process all the tables "in parallel", one pass at a time. This is + * needed because we may have to propagate work from one table to another + * (specifically, ALTER TYPE on a foreign key's PK has to dispatch the + * re-adding of the foreign key constraint to the other table). Work can + * only be propagated into later passes, however. */ for (pass = 0; pass < AT_NUM_PASSES; pass++) { @@ -2062,8 +2054,7 @@ ATRewriteCatalogs(List **wqueue) continue; /* - * Exclusive lock was obtained by phase 1, needn't get it - * again + * Exclusive lock was obtained by phase 1, needn't get it again */ rel = relation_open(tab->relid, NoLock); @@ -2071,9 +2062,9 @@ ATRewriteCatalogs(List **wqueue) ATExecCmd(tab, rel, (AlterTableCmd *) lfirst(lcmd)); /* - * After the ALTER TYPE pass, do cleanup work (this is not - * done in ATExecAlterColumnType since it should be done only - * once if multiple columns of a table are altered). + * After the ALTER TYPE pass, do cleanup work (this is not done in + * ATExecAlterColumnType since it should be done only once if + * multiple columns of a table are altered). */ if (pass == AT_PASS_ALTER_TYPE) ATPostAlterTypeCleanup(wqueue, tab); @@ -2083,8 +2074,8 @@ ATRewriteCatalogs(List **wqueue) } /* - * Do an implicit CREATE TOAST TABLE if we executed any subcommands - * that might have added a column or changed column storage. + * Do an implicit CREATE TOAST TABLE if we executed any subcommands that + * might have added a column or changed column storage. */ foreach(ltab, *wqueue) { @@ -2190,7 +2181,7 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd) case AT_EnableTrigUser: /* ENABLE TRIGGER USER */ ATExecEnableDisableTrigger(rel, NULL, true, true); break; - case AT_DisableTrigUser: /* DISABLE TRIGGER USER */ + case AT_DisableTrigUser: /* DISABLE TRIGGER USER */ ATExecEnableDisableTrigger(rel, NULL, false, true); break; default: /* oops */ @@ -2200,8 +2191,8 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd) } /* - * Bump the command counter to ensure the next subcommand in the - * sequence can see the changes so far + * Bump the command counter to ensure the next subcommand in the sequence + * can see the changes so far */ CommandCounterIncrement(); } @@ -2220,8 +2211,8 @@ ATRewriteTables(List **wqueue) AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab); /* - * We only need to rewrite the table if at least one column needs - * to be recomputed. + * We only need to rewrite the table if at least one column needs to + * be recomputed. */ if (tab->newvals != NIL) { @@ -2236,8 +2227,8 @@ ATRewriteTables(List **wqueue) /* * We can never allow rewriting of shared or nailed-in-cache - * relations, because we can't support changing their - * relfilenode values. + * relations, because we can't support changing their relfilenode + * values. */ if (OldHeap->rd_rel->relisshared || OldHeap->rd_isnailed) ereport(ERROR, @@ -2246,13 +2237,13 @@ ATRewriteTables(List **wqueue) RelationGetRelationName(OldHeap)))); /* - * Don't allow rewrite on temp tables of other backends ... - * their local buffer manager is not going to cope. + * Don't allow rewrite on temp tables of other backends ... their + * local buffer manager is not going to cope. */ if (isOtherTempNamespace(RelationGetNamespace(OldHeap))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot rewrite temporary tables of other sessions"))); + errmsg("cannot rewrite temporary tables of other sessions"))); /* * Select destination tablespace (same as original unless user @@ -2267,12 +2258,11 @@ ATRewriteTables(List **wqueue) /* * Create the new heap, using a temporary name in the same - * namespace as the existing table. NOTE: there is some risk - * of collision with user relnames. Working around this seems - * more trouble than it's worth; in particular, we can't - * create the new heap in a different namespace from the old, - * or we will have problems with the TEMP status of temp - * tables. + * namespace as the existing table. NOTE: there is some risk of + * collision with user relnames. Working around this seems more + * trouble than it's worth; in particular, we can't create the new + * heap in a different namespace from the old, or we will have + * problems with the TEMP status of temp tables. */ snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", tab->relid); @@ -2304,8 +2294,8 @@ ATRewriteTables(List **wqueue) /* performDeletion does CommandCounterIncrement at end */ /* - * Rebuild each index on the relation (but not the toast - * table, which is all-new anyway). We do not need + * Rebuild each index on the relation (but not the toast table, + * which is all-new anyway). We do not need * CommandCounterIncrement() because reindex_relation does it. */ reindex_relation(tab->relid, false); @@ -2313,16 +2303,15 @@ ATRewriteTables(List **wqueue) else { /* - * Test the current data within the table against new - * constraints generated by ALTER TABLE commands, but don't - * rebuild data. + * Test the current data within the table against new constraints + * generated by ALTER TABLE commands, but don't rebuild data. */ if (tab->constraints != NIL) ATRewriteTable(tab, InvalidOid); /* - * If we had SET TABLESPACE but no reason to reconstruct - * tuples, just do a block-by-block copy. + * If we had SET TABLESPACE but no reason to reconstruct tuples, + * just do a block-by-block copy. */ if (tab->newTableSpace) ATExecSetTableSpace(tab->relid, tab->newTableSpace); @@ -2331,10 +2320,10 @@ ATRewriteTables(List **wqueue) /* * Foreign key constraints are checked in a final pass, since (a) it's - * generally best to examine each one separately, and (b) it's at - * least theoretically possible that we have changed both relations of - * the foreign key, and we'd better have finished both rewrites before - * we try to read the tables. + * generally best to examine each one separately, and (b) it's at least + * theoretically possible that we have changed both relations of the + * foreign key, and we'd better have finished both rewrites before we try + * to read the tables. */ foreach(ltab, *wqueue) { @@ -2401,12 +2390,12 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) newrel = NULL; /* - * If we need to rewrite the table, the operation has to be propagated - * to tables that use this table's rowtype as a column type. + * If we need to rewrite the table, the operation has to be propagated to + * tables that use this table's rowtype as a column type. * - * (Eventually this will probably become true for scans as well, but at - * the moment a composite type does not enforce any constraints, so - * it's not necessary/appropriate to enforce them just during ALTER.) + * (Eventually this will probably become true for scans as well, but at the + * moment a composite type does not enforce any constraints, so it's not + * necessary/appropriate to enforce them just during ALTER.) */ if (newrel) find_composite_type_dependencies(oldrel->rd_rel->reltype, @@ -2461,15 +2450,15 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) HeapScanDesc scan; HeapTuple tuple; MemoryContext oldCxt; - List *dropped_attrs = NIL; - ListCell *lc; + List *dropped_attrs = NIL; + ListCell *lc; econtext = GetPerTupleExprContext(estate); /* - * Make tuple slots for old and new tuples. Note that even when - * the tuples are the same, the tupDescs might not be (consider - * ADD COLUMN without a default). + * Make tuple slots for old and new tuples. Note that even when the + * tuples are the same, the tupDescs might not be (consider ADD COLUMN + * without a default). */ oldslot = MakeSingleTupleTableSlot(oldTupDesc); newslot = MakeSingleTupleTableSlot(newTupDesc); @@ -2483,9 +2472,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) /* * Any attributes that are dropped according to the new tuple - * descriptor can be set to NULL. We precompute the list of - * dropped attributes to avoid needing to do so in the - * per-tuple loop. + * descriptor can be set to NULL. We precompute the list of dropped + * attributes to avoid needing to do so in the per-tuple loop. */ for (i = 0; i < newTupDesc->natts; i++) { @@ -2500,8 +2488,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) scan = heap_beginscan(oldrel, SnapshotNow, 0, NULL); /* - * Switch to per-tuple memory context and reset it for each - * tuple produced, so we don't leak memory. + * Switch to per-tuple memory context and reset it for each tuple + * produced, so we don't leak memory. */ oldCxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -2509,7 +2497,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) { if (newrel) { - Oid tupOid = InvalidOid; + Oid tupOid = InvalidOid; /* Extract data from old tuple */ heap_deform_tuple(tuple, oldTupDesc, values, isnull); @@ -2517,12 +2505,12 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) tupOid = HeapTupleGetOid(tuple); /* Set dropped attributes to null in new tuple */ - foreach (lc, dropped_attrs) + foreach(lc, dropped_attrs) isnull[lfirst_int(lc)] = true; /* - * Process supplied expressions to replace selected - * columns. Expression inputs come from the old tuple. + * Process supplied expressions to replace selected columns. + * Expression inputs come from the old tuple. */ ExecStoreTuple(tuple, oldslot, InvalidBuffer, false); econtext->ecxt_scantuple = oldslot; @@ -2533,14 +2521,13 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, - &isnull[ex->attnum - 1], + &isnull[ex->attnum - 1], NULL); } /* - * Form the new tuple. Note that we don't explicitly - * pfree it, since the per-tuple memory context will - * be reset shortly. + * Form the new tuple. Note that we don't explicitly pfree it, + * since the per-tuple memory context will be reset shortly. */ tuple = heap_form_tuple(newTupDesc, values, isnull); @@ -2575,10 +2562,10 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) &isnull); if (isnull) ereport(ERROR, - (errcode(ERRCODE_NOT_NULL_VIOLATION), - errmsg("column \"%s\" contains null values", - get_attname(tab->relid, - con->attnum)))); + (errcode(ERRCODE_NOT_NULL_VIOLATION), + errmsg("column \"%s\" contains null values", + get_attname(tab->relid, + con->attnum)))); } break; case CONSTR_FOREIGN: @@ -2706,9 +2693,9 @@ ATSimpleRecursion(List **wqueue, Relation rel, children = find_all_inheritors(relid); /* - * find_all_inheritors does the recursive search of the - * inheritance hierarchy, so all we have to do is process all of - * the relids in the list that it returns. + * find_all_inheritors does the recursive search of the inheritance + * hierarchy, so all we have to do is process all of the relids in the + * list that it returns. */ foreach(child, children) { @@ -2775,8 +2762,8 @@ find_composite_type_dependencies(Oid typeOid, const char *origTblName) HeapTuple depTup; /* - * We scan pg_depend to find those things that depend on the rowtype. - * (We assume we can ignore refobjsubid for a rowtype.) + * We scan pg_depend to find those things that depend on the rowtype. (We + * assume we can ignore refobjsubid for a rowtype.) */ depRel = heap_open(DependRelationId, AccessShareLock); @@ -2819,9 +2806,8 @@ find_composite_type_dependencies(Oid typeOid, const char *origTblName) else if (OidIsValid(rel->rd_rel->reltype)) { /* - * A view or composite type itself isn't a problem, but we - * must recursively check for indirect dependencies via its - * rowtype. + * A view or composite type itself isn't a problem, but we must + * recursively check for indirect dependencies via its rowtype. */ find_composite_type_dependencies(rel->rd_rel->reltype, origTblName); @@ -2851,9 +2837,9 @@ ATPrepAddColumn(List **wqueue, Relation rel, bool recurse, /* * Recurse to add the column to child classes, if requested. * - * We must recurse one level at a time, so that multiply-inheriting - * children are visited the right number of times and end up with the - * right attinhcount. + * We must recurse one level at a time, so that multiply-inheriting children + * are visited the right number of times and end up with the right + * attinhcount. */ if (recurse) { @@ -2871,8 +2857,8 @@ ATPrepAddColumn(List **wqueue, Relation rel, bool recurse, else { /* - * If we are told not to recurse, there had better not be any - * child tables; else the addition would put them out of step. + * If we are told not to recurse, there had better not be any child + * tables; else the addition would put them out of step. */ if (find_inheritance_children(RelationGetRelid(rel)) != NIL) ereport(ERROR, @@ -2903,8 +2889,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, attrdesc = heap_open(AttributeRelationId, RowExclusiveLock); /* - * Are we adding the column to a recursion child? If so, check - * whether to merge with an existing definition for the column. + * Are we adding the column to a recursion child? If so, check whether to + * merge with an existing definition for the column. */ if (colDef->inhcount > 0) { @@ -2922,7 +2908,7 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("child table \"%s\" has different type for column \"%s\"", - RelationGetRelationName(rel), colDef->colname))); + RelationGetRelationName(rel), colDef->colname))); /* Bump the existing child att's inhcount */ childatt->attinhcount++; @@ -2933,8 +2919,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, /* Inform the user about the merge */ ereport(NOTICE, - (errmsg("merging definition of column \"%s\" for child \"%s\"", - colDef->colname, RelationGetRelationName(rel)))); + (errmsg("merging definition of column \"%s\" for child \"%s\"", + colDef->colname, RelationGetRelationName(rel)))); heap_close(attrdesc, RowExclusiveLock); return; @@ -2950,9 +2936,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, elog(ERROR, "cache lookup failed for relation %u", myrelid); /* - * this test is deliberately not attisdropped-aware, since if one - * tries to add a column matching a dropped column name, it's gonna - * fail anyway. + * this test is deliberately not attisdropped-aware, since if one tries to + * add a column matching a dropped column name, it's gonna fail anyway. */ if (SearchSysCacheExists(ATTNAME, ObjectIdGetDatum(myrelid), @@ -3054,30 +3039,30 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, /* * Tell Phase 3 to fill in the default expression, if there is one. * - * If there is no default, Phase 3 doesn't have to do anything, because - * that effectively means that the default is NULL. The heap tuple - * access routines always check for attnum > # of attributes in tuple, - * and return NULL if so, so without any modification of the tuple - * data we will get the effect of NULL values in the new column. + * If there is no default, Phase 3 doesn't have to do anything, because that + * effectively means that the default is NULL. The heap tuple access + * routines always check for attnum > # of attributes in tuple, and return + * NULL if so, so without any modification of the tuple data we will get + * the effect of NULL values in the new column. * - * An exception occurs when the new column is of a domain type: the - * domain might have a NOT NULL constraint, or a check constraint that - * indirectly rejects nulls. If there are any domain constraints then - * we construct an explicit NULL default value that will be passed through - * CoerceToDomain processing. (This is a tad inefficient, since it - * causes rewriting the table which we really don't have to do, but - * the present design of domain processing doesn't offer any simple way - * of checking the constraints more directly.) + * An exception occurs when the new column is of a domain type: the domain + * might have a NOT NULL constraint, or a check constraint that indirectly + * rejects nulls. If there are any domain constraints then we construct + * an explicit NULL default value that will be passed through + * CoerceToDomain processing. (This is a tad inefficient, since it causes + * rewriting the table which we really don't have to do, but the present + * design of domain processing doesn't offer any simple way of checking + * the constraints more directly.) * * Note: we use build_column_default, and not just the cooked default - * returned by AddRelationRawConstraints, so that the right thing - * happens when a datatype's default applies. + * returned by AddRelationRawConstraints, so that the right thing happens + * when a datatype's default applies. */ defval = (Expr *) build_column_default(rel, attribute->attnum); if (!defval && GetDomainConstraints(typeOid) != NIL) { - Oid basetype = getBaseType(typeOid); + Oid basetype = getBaseType(typeOid); defval = (Expr *) makeNullConst(basetype); defval = (Expr *) coerce_to_target_type(NULL, @@ -3355,8 +3340,8 @@ ATPrepSetStatistics(Relation rel, const char *colName, Node *flagValue) { /* * We do our own permission checking because (a) we want to allow SET - * STATISTICS on indexes (for expressional index columns), and (b) we - * want to allow SET STATISTICS on system catalogs without requiring + * STATISTICS on indexes (for expressional index columns), and (b) we want + * to allow SET STATISTICS on system catalogs without requiring * allowSystemTableMods to be turned on. */ if (rel->rd_rel->relkind != RELKIND_RELATION && @@ -3481,8 +3466,8 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue) colName))); /* - * safety check: do not allow toasted storage modes unless column - * datatype is TOAST-aware. + * safety check: do not allow toasted storage modes unless column datatype + * is TOAST-aware. */ if (newstorage == 'p' || TypeIsToastable(attrtuple->atttypid)) attrtuple->attstorage = newstorage; @@ -3560,8 +3545,8 @@ ATExecDropColumn(Relation rel, const char *colName, /* * Propagate to children as appropriate. Unlike most other ALTER - * routines, we have to do this one level of recursion at a time; we - * can't use find_all_inheritors to do it in one pass. + * routines, we have to do this one level of recursion at a time; we can't + * use find_all_inheritors to do it in one pass. */ children = find_inheritance_children(RelationGetRelid(rel)); @@ -3593,8 +3578,8 @@ ATExecDropColumn(Relation rel, const char *colName, { /* * If the child column has other definition sources, just - * decrement its inheritance count; if not, recurse to - * delete it. + * decrement its inheritance count; if not, recurse to delete + * it. */ if (childatt->attinhcount == 1 && !childatt->attislocal) { @@ -3618,9 +3603,9 @@ ATExecDropColumn(Relation rel, const char *colName, else { /* - * If we were told to drop ONLY in this table (no - * recursion), we need to mark the inheritors' attribute - * as locally defined rather than inherited. + * If we were told to drop ONLY in this table (no recursion), + * we need to mark the inheritors' attribute as locally + * defined rather than inherited. */ childatt->attinhcount--; childatt->attislocal = true; @@ -3661,7 +3646,7 @@ ATExecDropColumn(Relation rel, const char *colName, class_rel = heap_open(RelationRelationId, RowExclusiveLock); tuple = SearchSysCacheCopy(RELOID, - ObjectIdGetDatum(RelationGetRelid(rel)), + ObjectIdGetDatum(RelationGetRelid(rel)), 0, 0, 0); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for relation %u", @@ -3734,8 +3719,8 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint) /* * Currently, we only expect to see CONSTR_CHECK nodes * arriving here (see the preprocessing done in - * parser/analyze.c). Use a switch anyway to make it - * easier to add more code later. + * parser/analyze.c). Use a switch anyway to make it easier + * to add more code later. */ switch (constr->contype) { @@ -3745,12 +3730,11 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint) ListCell *lcon; /* - * Call AddRelationRawConstraints to do the - * work. It returns a list of cooked - * constraints. + * Call AddRelationRawConstraints to do the work. + * It returns a list of cooked constraints. */ newcons = AddRelationRawConstraints(rel, NIL, - list_make1(constr)); + list_make1(constr)); /* Add each constraint to Phase 3's queue */ foreach(lcon, newcons) { @@ -3798,7 +3782,7 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint) else fkconstraint->constr_name = ChooseConstraintName(RelationGetRelationName(rel), - strVal(linitial(fkconstraint->fk_attrs)), + strVal(linitial(fkconstraint->fk_attrs)), "fkey", RelationGetNamespace(rel), NIL); @@ -3838,19 +3822,19 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, Oid constrOid; /* - * Grab an exclusive lock on the pk table, so that someone doesn't - * delete rows out from under us. (Although a lesser lock would do for - * that purpose, we'll need exclusive lock anyway to add triggers to - * the pk table; trying to start with a lesser lock will just create a - * risk of deadlock.) + * Grab an exclusive lock on the pk table, so that someone doesn't delete + * rows out from under us. (Although a lesser lock would do for that + * purpose, we'll need exclusive lock anyway to add triggers to the pk + * table; trying to start with a lesser lock will just create a risk of + * deadlock.) */ pkrel = heap_openrv(fkconstraint->pktable, AccessExclusiveLock); /* * Validity and permissions checks * - * Note: REFERENCES permissions checks are redundant with CREATE TRIGGER, - * but we may as well error out sooner instead of later. + * Note: REFERENCES permissions checks are redundant with CREATE TRIGGER, but + * we may as well error out sooner instead of later. */ if (pkrel->rd_rel->relkind != RELKIND_RELATION) ereport(ERROR, @@ -3877,12 +3861,12 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, RelationGetRelationName(rel)); /* - * Disallow reference from permanent table to temp table or vice - * versa. (The ban on perm->temp is for fairly obvious reasons. The - * ban on temp->perm is because other backends might need to run the - * RI triggers on the perm table, but they can't reliably see tuples - * the owning backend has created in the temp table, because - * non-shared buffers are used for temp tables.) + * Disallow reference from permanent table to temp table or vice versa. + * (The ban on perm->temp is for fairly obvious reasons. The ban on + * temp->perm is because other backends might need to run the RI triggers + * on the perm table, but they can't reliably see tuples the owning + * backend has created in the temp table, because non-shared buffers are + * used for temp tables.) */ if (isTempNamespace(RelationGetNamespace(pkrel))) { @@ -3900,8 +3884,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, } /* - * Look up the referencing attributes to make sure they exist, and - * record their attnums and type OIDs. + * Look up the referencing attributes to make sure they exist, and record + * their attnums and type OIDs. */ MemSet(pkattnum, 0, sizeof(pkattnum)); MemSet(fkattnum, 0, sizeof(fkattnum)); @@ -3914,11 +3898,10 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, fkattnum, fktypoid); /* - * If the attribute list for the referenced table was omitted, lookup - * the definition of the primary key and use it. Otherwise, validate - * the supplied attribute list. In either case, discover the index - * OID and index opclasses, and the attnums and type OIDs of the - * attributes. + * If the attribute list for the referenced table was omitted, lookup the + * definition of the primary key and use it. Otherwise, validate the + * supplied attribute list. In either case, discover the index OID and + * index opclasses, and the attnums and type OIDs of the attributes. */ if (fkconstraint->pk_attrs == NIL) { @@ -3946,15 +3929,15 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, for (i = 0; i < numpks; i++) { /* - * pktypoid[i] is the primary key table's i'th key's type - * fktypoid[i] is the foreign key table's i'th key's type + * pktypoid[i] is the primary key table's i'th key's type fktypoid[i] + * is the foreign key table's i'th key's type * - * Note that we look for an operator with the PK type on the left; - * when the types are different this is critical because the PK - * index will need operators with the indexkey on the left. - * (Ordinarily both commutator operators will exist if either - * does, but we won't get the right answer from the test below on - * opclass membership unless we select the proper operator.) + * Note that we look for an operator with the PK type on the left; when + * the types are different this is critical because the PK index will + * need operators with the indexkey on the left. (Ordinarily both + * commutator operators will exist if either does, but we won't get + * the right answer from the test below on opclass membership unless + * we select the proper operator.) */ Operator o = oper(list_make1(makeString("=")), pktypoid[i], fktypoid[i], true); @@ -3967,15 +3950,15 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, fkconstraint->constr_name), errdetail("Key columns \"%s\" and \"%s\" " "are of incompatible types: %s and %s.", - strVal(list_nth(fkconstraint->fk_attrs, i)), - strVal(list_nth(fkconstraint->pk_attrs, i)), + strVal(list_nth(fkconstraint->fk_attrs, i)), + strVal(list_nth(fkconstraint->pk_attrs, i)), format_type_be(fktypoid[i]), format_type_be(pktypoid[i])))); /* - * Check that the found operator is compatible with the PK index, - * and generate a warning if not, since otherwise costly seqscans - * will be incurred to check FK validity. + * Check that the found operator is compatible with the PK index, and + * generate a warning if not, since otherwise costly seqscans will be + * incurred to check FK validity. */ if (!op_in_opclass(oprid(o), opclasses[i])) ereport(WARNING, @@ -3984,8 +3967,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, fkconstraint->constr_name), errdetail("Key columns \"%s\" and \"%s\" " "are of different types: %s and %s.", - strVal(list_nth(fkconstraint->fk_attrs, i)), - strVal(list_nth(fkconstraint->pk_attrs, i)), + strVal(list_nth(fkconstraint->fk_attrs, i)), + strVal(list_nth(fkconstraint->pk_attrs, i)), format_type_be(fktypoid[i]), format_type_be(pktypoid[i])))); @@ -3993,8 +3976,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, } /* - * Tell Phase 3 to check that the constraint is satisfied by existing - * rows (we can skip this during table creation). + * Tell Phase 3 to check that the constraint is satisfied by existing rows + * (we can skip this during table creation). */ if (!fkconstraint->skip_validation) { @@ -4072,8 +4055,8 @@ transformColumnNameList(Oid relId, List *colList, if (attnum >= INDEX_MAX_KEYS) ereport(ERROR, (errcode(ERRCODE_TOO_MANY_COLUMNS), - errmsg("cannot have more than %d keys in a foreign key", - INDEX_MAX_KEYS))); + errmsg("cannot have more than %d keys in a foreign key", + INDEX_MAX_KEYS))); attnums[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->attnum; atttypids[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->atttypid; ReleaseSysCache(atttuple); @@ -4111,9 +4094,9 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, int i; /* - * Get the list of index OIDs for the table from the relcache, and - * look up each one in the pg_index syscache until we find one marked - * primary key (hopefully there isn't more than one such). + * Get the list of index OIDs for the table from the relcache, and look up + * each one in the pg_index syscache until we find one marked primary key + * (hopefully there isn't more than one such). */ *indexOid = InvalidOid; @@ -4145,8 +4128,8 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, if (!OidIsValid(*indexOid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("there is no primary key for referenced table \"%s\"", - RelationGetRelationName(pkrel)))); + errmsg("there is no primary key for referenced table \"%s\"", + RelationGetRelationName(pkrel)))); /* Must get indclass the hard way */ indclassDatum = SysCacheGetAttr(INDEXRELID, indexTuple, @@ -4167,7 +4150,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, atttypids[i] = attnumTypeId(pkrel, pkattno); opclasses[i] = indclass->values[i]; *attnamelist = lappend(*attnamelist, - makeString(pstrdup(NameStr(*attnumAttName(pkrel, pkattno))))); + makeString(pstrdup(NameStr(*attnumAttName(pkrel, pkattno))))); } ReleaseSysCache(indexTuple); @@ -4194,9 +4177,9 @@ transformFkeyCheckAttrs(Relation pkrel, ListCell *indexoidscan; /* - * Get the list of index OIDs for the table from the relcache, and - * look up each one in the pg_index syscache, and match unique indexes - * to the list of attnums we are given. + * Get the list of index OIDs for the table from the relcache, and look up + * each one in the pg_index syscache, and match unique indexes to the list + * of attnums we are given. */ indexoidlist = RelationGetIndexList(pkrel); @@ -4235,8 +4218,8 @@ transformFkeyCheckAttrs(Relation pkrel, indclass = (oidvector *) DatumGetPointer(indclassDatum); /* - * The given attnum list may match the index columns in any - * order. Check that each list is a subset of the other. + * The given attnum list may match the index columns in any order. + * Check that each list is a subset of the other. */ for (i = 0; i < numattrs; i++) { @@ -4312,9 +4295,9 @@ validateForeignKeyConstraint(FkConstraint *fkconstraint, return; /* - * Scan through each tuple, calling RI_FKey_check_ins (insert trigger) - * as if that tuple had just been inserted. If any of those fail, it - * should ereport(ERROR) and that's that. + * Scan through each tuple, calling RI_FKey_check_ins (insert trigger) as + * if that tuple had just been inserted. If any of those fail, it should + * ereport(ERROR) and that's that. */ MemSet(&trig, 0, sizeof(trig)); trig.tgoid = InvalidOid; @@ -4326,8 +4309,8 @@ validateForeignKeyConstraint(FkConstraint *fkconstraint, trig.tginitdeferred = FALSE; trig.tgargs = (char **) palloc(sizeof(char *) * - (4 + list_length(fkconstraint->fk_attrs) - + list_length(fkconstraint->pk_attrs))); + (4 + list_length(fkconstraint->fk_attrs) + + list_length(fkconstraint->pk_attrs))); trig.tgargs[0] = trig.tgname; trig.tgargs[1] = RelationGetRelationName(rel); @@ -4426,9 +4409,9 @@ CreateFKCheckTrigger(RangeVar *myRel, FkConstraint *fkconstraint, fk_trigger->args = lappend(fk_trigger->args, makeString(myRel->relname)); fk_trigger->args = lappend(fk_trigger->args, - makeString(fkconstraint->pktable->relname)); + makeString(fkconstraint->pktable->relname)); fk_trigger->args = lappend(fk_trigger->args, - makeString(fkMatchTypeToString(fkconstraint->fk_matchtype))); + makeString(fkMatchTypeToString(fkconstraint->fk_matchtype))); if (list_length(fkconstraint->fk_attrs) != list_length(fkconstraint->pk_attrs)) ereport(ERROR, (errcode(ERRCODE_INVALID_FOREIGN_KEY), @@ -4465,8 +4448,7 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint, constrobj; /* - * Reconstruct a RangeVar for my relation (not passed in, - * unfortunately). + * Reconstruct a RangeVar for my relation (not passed in, unfortunately). */ myRel = makeRangeVar(get_namespace_name(RelationGetNamespace(rel)), pstrdup(RelationGetRelationName(rel))); @@ -4484,8 +4466,8 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint, CommandCounterIncrement(); /* - * Build and execute a CREATE CONSTRAINT TRIGGER statement for the - * CHECK action for both INSERTs and UPDATEs on the referencing table. + * Build and execute a CREATE CONSTRAINT TRIGGER statement for the CHECK + * action for both INSERTs and UPDATEs on the referencing table. */ CreateFKCheckTrigger(myRel, fkconstraint, &constrobj, &trigobj, true); CreateFKCheckTrigger(myRel, fkconstraint, &constrobj, &trigobj, false); @@ -4543,9 +4525,9 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint, fk_trigger->args = lappend(fk_trigger->args, makeString(myRel->relname)); fk_trigger->args = lappend(fk_trigger->args, - makeString(fkconstraint->pktable->relname)); + makeString(fkconstraint->pktable->relname)); fk_trigger->args = lappend(fk_trigger->args, - makeString(fkMatchTypeToString(fkconstraint->fk_matchtype))); + makeString(fkMatchTypeToString(fkconstraint->fk_matchtype))); forboth(fk_attr, fkconstraint->fk_attrs, pk_attr, fkconstraint->pk_attrs) { @@ -4613,9 +4595,9 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint, fk_trigger->args = lappend(fk_trigger->args, makeString(myRel->relname)); fk_trigger->args = lappend(fk_trigger->args, - makeString(fkconstraint->pktable->relname)); + makeString(fkconstraint->pktable->relname)); fk_trigger->args = lappend(fk_trigger->args, - makeString(fkMatchTypeToString(fkconstraint->fk_matchtype))); + makeString(fkMatchTypeToString(fkconstraint->fk_matchtype))); forboth(fk_attr, fkconstraint->fk_attrs, pk_attr, fkconstraint->pk_attrs) { @@ -4690,8 +4672,8 @@ ATExecDropConstraint(Relation rel, const char *constrName, /* Otherwise if more than one constraint deleted, notify */ else if (deleted > 1) ereport(NOTICE, - (errmsg("multiple constraints named \"%s\" were dropped", - constrName))); + (errmsg("multiple constraints named \"%s\" were dropped", + constrName))); } } @@ -4750,12 +4732,12 @@ ATPrepAlterColumnType(List **wqueue, CheckAttributeType(colName, targettype); /* - * Set up an expression to transform the old data value to the new - * type. If a USING option was given, transform and use that - * expression, else just take the old value and try to coerce it. We - * do this first so that type incompatibility can be detected before - * we waste effort, and because we need the expression to be parsed - * against the original table rowtype. + * Set up an expression to transform the old data value to the new type. + * If a USING option was given, transform and use that expression, else + * just take the old value and try to coerce it. We do this first so that + * type incompatibility can be detected before we waste effort, and + * because we need the expression to be parsed against the original table + * rowtype. */ if (cmd->transform) { @@ -4775,17 +4757,17 @@ ATPrepAlterColumnType(List **wqueue, if (expression_returns_set(transform)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("transform expression must not return a set"))); + errmsg("transform expression must not return a set"))); /* No subplans or aggregates, either... */ if (pstate->p_hasSubLinks) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use subquery in transform expression"))); + errmsg("cannot use subquery in transform expression"))); if (pstate->p_hasAggs) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), - errmsg("cannot use aggregate function in transform expression"))); + errmsg("cannot use aggregate function in transform expression"))); } else { @@ -4818,9 +4800,9 @@ ATPrepAlterColumnType(List **wqueue, ReleaseSysCache(tuple); /* - * The recursion case is handled by ATSimpleRecursion. However, if we - * are told not to recurse, there had better not be any child tables; - * else the alter would put them out of step. + * The recursion case is handled by ATSimpleRecursion. However, if we are + * told not to recurse, there had better not be any child tables; else the + * alter would put them out of step. */ if (recurse) ATSimpleRecursion(wqueue, rel, cmd, recurse); @@ -4875,17 +4857,16 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, targettype = HeapTupleGetOid(typeTuple); /* - * If there is a default expression for the column, get it and ensure - * we can coerce it to the new datatype. (We must do this before - * changing the column type, because build_column_default itself will - * try to coerce, and will not issue the error message we want if it - * fails.) + * If there is a default expression for the column, get it and ensure we + * can coerce it to the new datatype. (We must do this before changing + * the column type, because build_column_default itself will try to + * coerce, and will not issue the error message we want if it fails.) * - * We remove any implicit coercion steps at the top level of the old - * default expression; this has been agreed to satisfy the principle - * of least surprise. (The conversion to the new column type should - * act like it started from what the user sees as the stored expression, - * and the implicit coercions aren't going to be shown.) + * We remove any implicit coercion steps at the top level of the old default + * expression; this has been agreed to satisfy the principle of least + * surprise. (The conversion to the new column type should act like it + * started from what the user sees as the stored expression, and the + * implicit coercions aren't going to be shown.) */ if (attTup->atthasdef) { @@ -4893,32 +4874,32 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, Assert(defaultexpr); defaultexpr = strip_implicit_coercions(defaultexpr); defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */ - defaultexpr, exprType(defaultexpr), + defaultexpr, exprType(defaultexpr), targettype, typename->typmod, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST); if (defaultexpr == NULL) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("default for column \"%s\" cannot be cast to type \"%s\"", - colName, TypeNameToString(typename)))); + errmsg("default for column \"%s\" cannot be cast to type \"%s\"", + colName, TypeNameToString(typename)))); } else defaultexpr = NULL; /* - * Find everything that depends on the column (constraints, indexes, - * etc), and record enough information to let us recreate the objects. + * Find everything that depends on the column (constraints, indexes, etc), + * and record enough information to let us recreate the objects. * * The actual recreation does not happen here, but only after we have - * performed all the individual ALTER TYPE operations. We have to - * save the info before executing ALTER TYPE, though, else the - * deparser will get confused. + * performed all the individual ALTER TYPE operations. We have to save + * the info before executing ALTER TYPE, though, else the deparser will + * get confused. * - * There could be multiple entries for the same object, so we must check - * to ensure we process each one only once. Note: we assume that an - * index that implements a constraint will not show a direct - * dependency on the column. + * There could be multiple entries for the same object, so we must check to + * ensure we process each one only once. Note: we assume that an index + * that implements a constraint will not show a direct dependency on the + * column. */ depRel = heap_open(DependRelationId, RowExclusiveLock); @@ -4963,16 +4944,16 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, if (!list_member_oid(tab->changedIndexOids, foundObject.objectId)) { tab->changedIndexOids = lappend_oid(tab->changedIndexOids, - foundObject.objectId); + foundObject.objectId); tab->changedIndexDefs = lappend(tab->changedIndexDefs, - pg_get_indexdef_string(foundObject.objectId)); + pg_get_indexdef_string(foundObject.objectId)); } } else if (relKind == RELKIND_SEQUENCE) { /* - * This must be a SERIAL column's sequence. We - * need not do anything to it. + * This must be a SERIAL column's sequence. We need + * not do anything to it. */ Assert(foundObject.objectSubId == 0); } @@ -4990,9 +4971,9 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, if (!list_member_oid(tab->changedConstraintOids, foundObject.objectId)) { tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids, - foundObject.objectId); + foundObject.objectId); tab->changedConstraintDefs = lappend(tab->changedConstraintDefs, - pg_get_constraintdef_string(foundObject.objectId)); + pg_get_constraintdef_string(foundObject.objectId)); } break; @@ -5009,8 +4990,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, case OCLASS_DEFAULT: /* - * Ignore the column's default expression, since we will - * fix it below. + * Ignore the column's default expression, since we will fix + * it below. */ Assert(defaultexpr); break; @@ -5026,8 +5007,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, case OCLASS_SCHEMA: /* - * We don't expect any of these sorts of objects to depend - * on a column. + * We don't expect any of these sorts of objects to depend on + * a column. */ elog(ERROR, "unexpected object depending on column: %s", getObjectDescription(&foundObject)); @@ -5043,8 +5024,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, /* * Now scan for dependencies of this column on other things. The only - * thing we should find is the dependency on the column datatype, - * which we want to remove. + * thing we should find is the dependency on the column datatype, which we + * want to remove. */ ScanKeyInit(&key[0], Anum_pg_depend_classid, @@ -5105,17 +5086,16 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, add_column_datatype_dependency(RelationGetRelid(rel), attnum, targettype); /* - * Drop any pg_statistic entry for the column, since it's now wrong - * type + * Drop any pg_statistic entry for the column, since it's now wrong type */ RemoveStatistics(RelationGetRelid(rel), attnum); /* - * Update the default, if present, by brute force --- remove and - * re-add the default. Probably unsafe to take shortcuts, since the - * new version may well have additional dependencies. (It's okay to - * do this now, rather than after other ALTER TYPE commands, since the - * default won't depend on other column types.) + * Update the default, if present, by brute force --- remove and re-add + * the default. Probably unsafe to take shortcuts, since the new version + * may well have additional dependencies. (It's okay to do this now, + * rather than after other ALTER TYPE commands, since the default won't + * depend on other column types.) */ if (defaultexpr) { @@ -5123,8 +5103,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, CommandCounterIncrement(); /* - * We use RESTRICT here for safety, but at present we do not - * expect anything to depend on the default. + * We use RESTRICT here for safety, but at present we do not expect + * anything to depend on the default. */ RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, true); @@ -5147,12 +5127,12 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab) ListCell *l; /* - * Re-parse the index and constraint definitions, and attach them to - * the appropriate work queue entries. We do this before dropping - * because in the case of a FOREIGN KEY constraint, we might not yet - * have exclusive lock on the table the constraint is attached to, and - * we need to get that before dropping. It's safe because the parser - * won't actually look at the catalogs to detect the existing entry. + * Re-parse the index and constraint definitions, and attach them to the + * appropriate work queue entries. We do this before dropping because in + * the case of a FOREIGN KEY constraint, we might not yet have exclusive + * lock on the table the constraint is attached to, and we need to get + * that before dropping. It's safe because the parser won't actually look + * at the catalogs to detect the existing entry. */ foreach(l, tab->changedIndexDefs) ATPostAlterTypeParse((char *) lfirst(l), wqueue); @@ -5160,10 +5140,10 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab) ATPostAlterTypeParse((char *) lfirst(l), wqueue); /* - * Now we can drop the existing constraints and indexes --- - * constraints first, since some of them might depend on the indexes. - * It should be okay to use DROP_RESTRICT here, since nothing else - * should be depending on these objects. + * Now we can drop the existing constraints and indexes --- constraints + * first, since some of them might depend on the indexes. It should be + * okay to use DROP_RESTRICT here, since nothing else should be depending + * on these objects. */ foreach(l, tab->changedConstraintOids) { @@ -5182,8 +5162,8 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab) } /* - * The objects will get recreated during subsequent passes over the - * work queue. + * The objects will get recreated during subsequent passes over the work + * queue. */ } @@ -5195,8 +5175,8 @@ ATPostAlterTypeParse(char *cmd, List **wqueue) ListCell *list_item; /* - * We expect that we only have to do raw parsing and parse analysis, - * not any rule rewriting, since these will all be utility statements. + * We expect that we only have to do raw parsing and parse analysis, not + * any rule rewriting, since these will all be utility statements. */ raw_parsetree_list = raw_parser(cmd); querytree_list = NIL; @@ -5209,9 +5189,8 @@ ATPostAlterTypeParse(char *cmd, List **wqueue) } /* - * Attach each generated command to the proper place in the work - * queue. Note this could result in creation of entirely new - * work-queue entries. + * Attach each generated command to the proper place in the work queue. + * Note this could result in creation of entirely new work-queue entries. */ foreach(list_item, querytree_list) { @@ -5294,8 +5273,8 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing) Form_pg_class tuple_class; /* - * Get exclusive lock till end of transaction on the target table. - * Use relation_open so that we can work on indexes and sequences. + * Get exclusive lock till end of transaction on the target table. Use + * relation_open so that we can work on indexes and sequences. */ target_rel = relation_open(relationOid, AccessExclusiveLock); @@ -5368,11 +5347,11 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing) /* Superusers can always do it */ if (!superuser()) { - Oid namespaceOid = tuple_class->relnamespace; + Oid namespaceOid = tuple_class->relnamespace; AclResult aclresult; /* Otherwise, must be owner of the existing object */ - if (!pg_class_ownercheck(relationOid,GetUserId())) + if (!pg_class_ownercheck(relationOid, GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, RelationGetRelationName(target_rel)); @@ -5426,9 +5405,9 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing) AlterTypeOwnerInternal(tuple_class->reltype, newOwnerId); /* - * If we are operating on a table, also change the ownership of - * any indexes and sequences that belong to the table, as well as - * the table's toast table (if it has one) + * If we are operating on a table, also change the ownership of any + * indexes and sequences that belong to the table, as well as the + * table's toast table (if it has one) */ if (tuple_class->relkind == RELKIND_RELATION || tuple_class->relkind == RELKIND_TOASTVALUE) @@ -5475,23 +5454,23 @@ change_owner_recurse_to_sequences(Oid relationOid, Oid newOwnerId) { Relation depRel; SysScanDesc scan; - ScanKeyData key[2]; + ScanKeyData key[2]; HeapTuple tup; /* - * SERIAL sequences are those having an internal dependency on one - * of the table's columns (we don't care *which* column, exactly). + * SERIAL sequences are those having an internal dependency on one of the + * table's columns (we don't care *which* column, exactly). */ depRel = heap_open(DependRelationId, AccessShareLock); ScanKeyInit(&key[0], - Anum_pg_depend_refclassid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationRelationId)); + Anum_pg_depend_refclassid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); ScanKeyInit(&key[1], - Anum_pg_depend_refobjid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(relationOid)); + Anum_pg_depend_refobjid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(relationOid)); /* we leave refobjsubid unspecified */ scan = systable_beginscan(depRel, DependReferenceIndexId, true, @@ -5605,7 +5584,7 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename) if (!OidIsValid(tablespaceId)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace \"%s\" does not exist", tablespacename))); + errmsg("tablespace \"%s\" does not exist", tablespacename))); /* Check its permissions */ aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE); @@ -5616,7 +5595,7 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename) if (OidIsValid(tab->newTableSpace)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("cannot have multiple SET TABLESPACE subcommands"))); + errmsg("cannot have multiple SET TABLESPACE subcommands"))); tab->newTableSpace = tablespaceId; } @@ -5650,13 +5629,13 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace) RelationGetRelationName(rel)))); /* - * Don't allow moving temp tables of other backends ... their local - * buffer manager is not going to cope. + * Don't allow moving temp tables of other backends ... their local buffer + * manager is not going to cope. */ if (isOtherTempNamespace(RelationGetNamespace(rel))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot move temporary tables of other sessions"))); + errmsg("cannot move temporary tables of other sessions"))); /* * No work if no change in tablespace. @@ -5738,16 +5717,16 @@ copy_relation_data(Relation rel, SMgrRelation dst) Page page = (Page) buf; /* - * Since we copy the file directly without looking at the shared - * buffers, we'd better first flush out any pages of the source - * relation that are in shared buffers. We assume no new changes - * will be made while we are holding exclusive lock on the rel. + * Since we copy the file directly without looking at the shared buffers, + * we'd better first flush out any pages of the source relation that are + * in shared buffers. We assume no new changes will be made while we are + * holding exclusive lock on the rel. */ FlushRelationBuffers(rel); /* - * We need to log the copied data in WAL iff WAL archiving is enabled - * AND it's not a temp rel. + * We need to log the copied data in WAL iff WAL archiving is enabled AND + * it's not a temp rel. */ use_wal = XLogArchivingActive() && !rel->rd_istemp; @@ -5791,27 +5770,26 @@ copy_relation_data(Relation rel, SMgrRelation dst) } /* - * Now write the page. We say isTemp = true even if it's not a - * temp rel, because there's no need for smgr to schedule an fsync - * for this write; we'll do it ourselves below. + * Now write the page. We say isTemp = true even if it's not a temp + * rel, because there's no need for smgr to schedule an fsync for this + * write; we'll do it ourselves below. */ smgrwrite(dst, blkno, buf, true); } /* - * If the rel isn't temp, we must fsync it down to disk before it's - * safe to commit the transaction. (For a temp rel we don't care - * since the rel will be uninteresting after a crash anyway.) + * If the rel isn't temp, we must fsync it down to disk before it's safe + * to commit the transaction. (For a temp rel we don't care since the rel + * will be uninteresting after a crash anyway.) * - * It's obvious that we must do this when not WAL-logging the copy. It's - * less obvious that we have to do it even if we did WAL-log the - * copied pages. The reason is that since we're copying outside - * shared buffers, a CHECKPOINT occurring during the copy has no way - * to flush the previously written data to disk (indeed it won't know - * the new rel even exists). A crash later on would replay WAL from - * the checkpoint, therefore it wouldn't replay our earlier WAL - * entries. If we do not fsync those pages here, they might still not - * be on disk when the crash occurs. + * It's obvious that we must do this when not WAL-logging the copy. It's less + * obvious that we have to do it even if we did WAL-log the copied pages. + * The reason is that since we're copying outside shared buffers, a + * CHECKPOINT occurring during the copy has no way to flush the previously + * written data to disk (indeed it won't know the new rel even exists). A + * crash later on would replay WAL from the checkpoint, therefore it + * wouldn't replay our earlier WAL entries. If we do not fsync those pages + * here, they might still not be on disk when the crash occurs. */ if (!rel->rd_istemp) smgrimmedsync(dst); @@ -5855,21 +5833,21 @@ AlterTableCreateToastTable(Oid relOid, bool silent) toastobject; /* - * Grab an exclusive lock on the target table, which we will NOT - * release until end of transaction. (This is probably redundant in - * all present uses...) + * Grab an exclusive lock on the target table, which we will NOT release + * until end of transaction. (This is probably redundant in all present + * uses...) */ rel = heap_open(relOid, AccessExclusiveLock); /* * Toast table is shared if and only if its parent is. * - * We cannot allow toasting a shared relation after initdb (because - * there's no way to mark it toasted in other databases' pg_class). - * Unfortunately we can't distinguish initdb from a manually started - * standalone backend (toasting happens after the bootstrap phase, so - * checking IsBootstrapProcessingMode() won't work). However, we can - * at least prevent this mistake under normal multi-user operation. + * We cannot allow toasting a shared relation after initdb (because there's + * no way to mark it toasted in other databases' pg_class). Unfortunately + * we can't distinguish initdb from a manually started standalone backend + * (toasting happens after the bootstrap phase, so checking + * IsBootstrapProcessingMode() won't work). However, we can at least + * prevent this mistake under normal multi-user operation. */ shared_relation = rel->rd_rel->relisshared; if (shared_relation && IsUnderPostmaster) @@ -5944,11 +5922,10 @@ AlterTableCreateToastTable(Oid relOid, bool silent) tupdesc->attrs[2]->attstorage = 'p'; /* - * Note: the toast relation is placed in the regular pg_toast - * namespace even if its master relation is a temp table. There - * cannot be any naming collision, and the toast rel will be destroyed - * when its master is, so there's no need to handle the toast rel as - * temp. + * Note: the toast relation is placed in the regular pg_toast namespace + * even if its master relation is a temp table. There cannot be any + * naming collision, and the toast rel will be destroyed when its master + * is, so there's no need to handle the toast rel as temp. */ toast_relid = heap_create_with_catalog(toast_relname, PG_TOAST_NAMESPACE, @@ -5971,11 +5948,11 @@ AlterTableCreateToastTable(Oid relOid, bool silent) * * NOTE: the normal TOAST access routines could actually function with a * single-column index on chunk_id only. However, the slice access - * routines use both columns for faster access to an individual chunk. - * In addition, we want it to be unique as a check against the - * possibility of duplicate TOAST chunk OIDs. The index might also be - * a little more efficient this way, since btree isn't all that happy - * with large numbers of equal keys. + * routines use both columns for faster access to an individual chunk. In + * addition, we want it to be unique as a check against the possibility of + * duplicate TOAST chunk OIDs. The index might also be a little more + * efficient this way, since btree isn't all that happy with large numbers + * of equal keys. */ indexInfo = makeNode(IndexInfo); @@ -6000,8 +5977,8 @@ AlterTableCreateToastTable(Oid relOid, bool silent) /* * Update toast rel's pg_class entry to show that it has an index. The - * index OID is stored into the reltoastidxid field for easy access by - * the tuple toaster. + * index OID is stored into the reltoastidxid field for easy access by the + * tuple toaster. */ setRelhasindex(toast_relid, true, true, toast_idxid); @@ -6142,7 +6119,7 @@ AlterTableNamespace(RangeVar *relation, const char *newschema) if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot move objects into or out of temporary schemas"))); + errmsg("cannot move objects into or out of temporary schemas"))); /* same for TOAST schema */ if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE) @@ -6182,7 +6159,7 @@ AlterRelationNamespaceInternal(Relation classRel, Oid relOid, Oid oldNspOid, Oid newNspOid, bool hasDependEntry) { - HeapTuple classTup; + HeapTuple classTup; Form_pg_class classForm; classTup = SearchSysCacheCopy(RELOID, @@ -6236,12 +6213,12 @@ AlterIndexNamespaces(Relation classRel, Relation rel, foreach(l, indexList) { - Oid indexOid = lfirst_oid(l); + Oid indexOid = lfirst_oid(l); /* - * Note: currently, the index will not have its own dependency - * on the namespace, so we don't need to do changeDependencyFor(). - * There's no rowtype in pg_type, either. + * Note: currently, the index will not have its own dependency on the + * namespace, so we don't need to do changeDependencyFor(). There's no + * rowtype in pg_type, either. */ AlterRelationNamespaceInternal(classRel, indexOid, oldNspOid, newNspOid, @@ -6264,12 +6241,12 @@ AlterSeqNamespaces(Relation classRel, Relation rel, { Relation depRel; SysScanDesc scan; - ScanKeyData key[2]; + ScanKeyData key[2]; HeapTuple tup; /* - * SERIAL sequences are those having an internal dependency on one - * of the table's columns (we don't care *which* column, exactly). + * SERIAL sequences are those having an internal dependency on one of the + * table's columns (we don't care *which* column, exactly). */ depRel = heap_open(DependRelationId, AccessShareLock); @@ -6313,9 +6290,10 @@ AlterSeqNamespaces(Relation classRel, Relation rel, AlterRelationNamespaceInternal(classRel, depForm->objid, oldNspOid, newNspOid, true); + /* - * Sequences have entries in pg_type. We need to be careful - * to move them to the new namespace, too. + * Sequences have entries in pg_type. We need to be careful to move + * them to the new namespace, too. */ AlterTypeNamespaceInternal(RelationGetForm(seqRel)->reltype, newNspOid, false); @@ -6348,8 +6326,8 @@ register_on_commit_action(Oid relid, OnCommitAction action) MemoryContext oldcxt; /* - * We needn't bother registering the relation unless there is an ON - * COMMIT action we need to take. + * We needn't bother registering the relation unless there is an ON COMMIT + * action we need to take. */ if (action == ONCOMMIT_NOOP || action == ONCOMMIT_PRESERVE_ROWS) return; @@ -6429,8 +6407,8 @@ PreCommit_on_commit_actions(void) /* * Note that table deletion will call - * remove_on_commit_action, so the entry should get - * marked as deleted. + * remove_on_commit_action, so the entry should get marked + * as deleted. */ Assert(oc->deleting_subid != InvalidSubTransactionId); break; @@ -6440,7 +6418,7 @@ PreCommit_on_commit_actions(void) if (oids_to_truncate != NIL) { heap_truncate(oids_to_truncate); - CommandCounterIncrement(); /* XXX needed? */ + CommandCounterIncrement(); /* XXX needed? */ } } diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 4bf2a4777f3..f83d1ab8843 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -37,7 +37,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.27 2005/08/30 01:08:47 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.28 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -67,7 +67,7 @@ /* GUC variable */ -char *default_tablespace = NULL; +char *default_tablespace = NULL; static bool remove_tablespace_directories(Oid tablespaceoid, bool redo); @@ -118,9 +118,9 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) if (errno == ENOENT) { /* - * Acquire ExclusiveLock on pg_tablespace to ensure that no - * DROP TABLESPACE or TablespaceCreateDbspace is running - * concurrently. Simple reads from pg_tablespace are OK. + * Acquire ExclusiveLock on pg_tablespace to ensure that no DROP + * TABLESPACE or TablespaceCreateDbspace is running concurrently. + * Simple reads from pg_tablespace are OK. */ Relation rel; @@ -130,8 +130,8 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) rel = NULL; /* - * Recheck to see if someone created the directory while we - * were waiting for lock. + * Recheck to see if someone created the directory while we were + * waiting for lock. */ if (stat(dir, &st) == 0 && S_ISDIR(st.st_mode)) { @@ -147,22 +147,22 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) if (errno != ENOENT || !isRedo) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create directory \"%s\": %m", - dir))); + errmsg("could not create directory \"%s\": %m", + dir))); /* Try to make parent directory too */ parentdir = pstrdup(dir); get_parent_directory(parentdir); if (mkdir(parentdir, S_IRWXU) < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create directory \"%s\": %m", - parentdir))); + errmsg("could not create directory \"%s\": %m", + parentdir))); pfree(parentdir); if (mkdir(dir, S_IRWXU) < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create directory \"%s\": %m", - dir))); + errmsg("could not create directory \"%s\": %m", + dir))); } } @@ -209,7 +209,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) Oid tablespaceoid; char *location; char *linkloc; - Oid ownerId; + Oid ownerId; /* validate */ @@ -238,7 +238,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) if (strchr(location, '\'')) ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), - errmsg("tablespace location may not contain single quotes"))); + errmsg("tablespace location may not contain single quotes"))); /* * Allowing relative paths seems risky @@ -251,9 +251,9 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) errmsg("tablespace location must be an absolute path"))); /* - * Check that location isn't too long. Remember that we're going to - * append '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole - * path explicitly? This may be overly conservative.) + * Check that location isn't too long. Remember that we're going to append + * '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole path + * explicitly? This may be overly conservative.) */ if (strlen(location) >= (MAXPGPATH - 1 - 10 - 1 - 10 - 1 - 10)) ereport(ERROR, @@ -270,7 +270,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) (errcode(ERRCODE_RESERVED_NAME), errmsg("unacceptable tablespace name \"%s\"", stmt->tablespacename), - errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); + errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); /* * Check that there is no other tablespace by this name. (The unique @@ -284,9 +284,9 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) stmt->tablespacename))); /* - * Insert tuple into pg_tablespace. The purpose of doing this first - * is to lock the proposed tablename against other would-be creators. - * The insertion will roll back if we find problems below. + * Insert tuple into pg_tablespace. The purpose of doing this first is to + * lock the proposed tablename against other would-be creators. The + * insertion will roll back if we find problems below. */ rel = heap_open(TableSpaceRelationId, RowExclusiveLock); @@ -312,14 +312,14 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) recordDependencyOnOwner(TableSpaceRelationId, tablespaceoid, ownerId); /* - * Attempt to coerce target directory to safe permissions. If this - * fails, it doesn't exist or has the wrong owner. + * Attempt to coerce target directory to safe permissions. If this fails, + * it doesn't exist or has the wrong owner. */ if (chmod(location, 0700) != 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not set permissions on directory \"%s\": %m", - location))); + errmsg("could not set permissions on directory \"%s\": %m", + location))); /* * Check the target directory is empty. @@ -331,11 +331,11 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) location))); /* - * Create the PG_VERSION file in the target directory. This has - * several purposes: to make sure we can write in the directory, to - * prevent someone from creating another tablespace pointing at the - * same directory (the emptiness check above will fail), and to label - * tablespace directories by PG version. + * Create the PG_VERSION file in the target directory. This has several + * purposes: to make sure we can write in the directory, to prevent + * someone from creating another tablespace pointing at the same directory + * (the emptiness check above will fail), and to label tablespace + * directories by PG version. */ set_short_version(location); @@ -375,7 +375,6 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) /* We keep the lock on pg_tablespace until commit */ heap_close(rel, NoLock); - #else /* !HAVE_SYMLINK */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -403,9 +402,8 @@ DropTableSpace(DropTableSpaceStmt *stmt) PreventTransactionChain((void *) stmt, "DROP TABLESPACE"); /* - * Acquire ExclusiveLock on pg_tablespace to ensure that no one else - * is trying to do DROP TABLESPACE or TablespaceCreateDbspace - * concurrently. + * Acquire ExclusiveLock on pg_tablespace to ensure that no one else is + * trying to do DROP TABLESPACE or TablespaceCreateDbspace concurrently. */ rel = heap_open(TableSpaceRelationId, ExclusiveLock); @@ -439,8 +437,7 @@ DropTableSpace(DropTableSpaceStmt *stmt) tablespacename); /* - * Remove the pg_tablespace tuple (this will roll back if we fail - * below) + * Remove the pg_tablespace tuple (this will roll back if we fail below) */ simple_heap_delete(rel, &tuple->t_self); @@ -476,7 +473,6 @@ DropTableSpace(DropTableSpaceStmt *stmt) /* We keep the lock on pg_tablespace until commit */ heap_close(rel, NoLock); - #else /* !HAVE_SYMLINK */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -504,17 +500,17 @@ remove_tablespace_directories(Oid tablespaceoid, bool redo) sprintf(location, "pg_tblspc/%u", tablespaceoid); /* - * Check if the tablespace still contains any files. We try to rmdir - * each per-database directory we find in it. rmdir failure implies - * there are still files in that subdirectory, so give up. (We do not - * have to worry about undoing any already completed rmdirs, since the - * next attempt to use the tablespace from that database will simply - * recreate the subdirectory via TablespaceCreateDbspace.) + * Check if the tablespace still contains any files. We try to rmdir each + * per-database directory we find in it. rmdir failure implies there are + * still files in that subdirectory, so give up. (We do not have to worry + * about undoing any already completed rmdirs, since the next attempt to + * use the tablespace from that database will simply recreate the + * subdirectory via TablespaceCreateDbspace.) * * Since we hold exclusive lock, no one else should be creating any fresh - * subdirectories in parallel. It is possible that new files are - * being created within subdirectories, though, so the rmdir call - * could fail. Worst consequence is a less friendly error message. + * subdirectories in parallel. It is possible that new files are being + * created within subdirectories, though, so the rmdir call could fail. + * Worst consequence is a less friendly error message. */ dirdesc = AllocateDir(location); if (dirdesc == NULL) @@ -558,8 +554,8 @@ remove_tablespace_directories(Oid tablespaceoid, bool redo) FreeDir(dirdesc); /* - * Okay, try to unlink PG_VERSION (we allow it to not be there, even - * in non-REDO case, for robustness). + * Okay, try to unlink PG_VERSION (we allow it to not be there, even in + * non-REDO case, for robustness). */ subfile = palloc(strlen(location) + 11 + 1); sprintf(subfile, "%s/PG_VERSION", location); @@ -577,9 +573,9 @@ remove_tablespace_directories(Oid tablespaceoid, bool redo) /* * Okay, try to remove the symlink. We must however deal with the - * possibility that it's a directory instead of a symlink --- this - * could happen during WAL replay (see TablespaceCreateDbspace), and - * it is also the normal case on Windows. + * possibility that it's a directory instead of a symlink --- this could + * happen during WAL replay (see TablespaceCreateDbspace), and it is also + * the normal case on Windows. */ if (lstat(location, &st) == 0 && S_ISDIR(st.st_mode)) { @@ -725,7 +721,7 @@ RenameTableSpace(const char *oldname, const char *newname) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("unacceptable tablespace name \"%s\"", newname), - errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); + errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); /* Make sure the new name doesn't exist */ ScanKeyInit(&entry[0], @@ -802,13 +798,13 @@ AlterTableSpaceOwner(const char *name, Oid newOwnerId) check_is_member_of_role(GetUserId(), newOwnerId); /* - * Normally we would also check for create permissions here, - * but there are none for tablespaces so we follow what rename - * tablespace does and omit the create permissions check. + * Normally we would also check for create permissions here, but there + * are none for tablespaces so we follow what rename tablespace does + * and omit the create permissions check. * - * NOTE: Only superusers may create tablespaces to begin with and - * so initially only a superuser would be able to change its - * ownership anyway. + * NOTE: Only superusers may create tablespaces to begin with and so + * initially only a superuser would be able to change its ownership + * anyway. */ memset(repl_null, ' ', sizeof(repl_null)); @@ -860,7 +856,7 @@ assign_default_tablespace(const char *newval, bool doit, GucSource source) { /* * If we aren't inside a transaction, we cannot do database access so - * cannot verify the name. Must accept the value on faith. + * cannot verify the name. Must accept the value on faith. */ if (IsTransactionState()) { @@ -895,15 +891,16 @@ GetDefaultTablespace(void) /* Fast path for default_tablespace == "" */ if (default_tablespace == NULL || default_tablespace[0] == '\0') return InvalidOid; + /* * It is tempting to cache this lookup for more speed, but then we would - * fail to detect the case where the tablespace was dropped since the - * GUC variable was set. Note also that we don't complain if the value - * fails to refer to an existing tablespace; we just silently return - * InvalidOid, causing the new object to be created in the database's - * tablespace. + * fail to detect the case where the tablespace was dropped since the GUC + * variable was set. Note also that we don't complain if the value fails + * to refer to an existing tablespace; we just silently return InvalidOid, + * causing the new object to be created in the database's tablespace. */ result = get_tablespace_oid(default_tablespace); + /* * Allow explicit specification of database's default tablespace in * default_tablespace without triggering permissions checks. @@ -1001,14 +998,14 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record) char *linkloc; /* - * Attempt to coerce target directory to safe permissions. If - * this fails, it doesn't exist or has the wrong owner. + * Attempt to coerce target directory to safe permissions. If this + * fails, it doesn't exist or has the wrong owner. */ if (chmod(location, 0700) != 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not set permissions on directory \"%s\": %m", - location))); + errmsg("could not set permissions on directory \"%s\": %m", + location))); /* Create or re-create the PG_VERSION file in the target directory */ set_short_version(location); @@ -1022,8 +1019,8 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record) if (errno != EEXIST) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create symbolic link \"%s\": %m", - linkloc))); + errmsg("could not create symbolic link \"%s\": %m", + linkloc))); } pfree(linkloc); diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index b3caaa4ce3c..a3f7c37dc28 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.194 2005/08/24 17:38:35 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.195 2005/10/15 02:49:15 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -52,7 +52,7 @@ static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata, Instrumentation *instr, MemoryContext per_tuple_context); static void AfterTriggerSaveEvent(ResultRelInfo *relinfo, int event, - bool row_trigger, HeapTuple oldtup, HeapTuple newtup); + bool row_trigger, HeapTuple oldtup, HeapTuple newtup); /* @@ -98,15 +98,14 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint) { /* * If this trigger is a constraint (and a foreign key one) then we - * really need a constrrelid. Since we don't have one, we'll try - * to generate one from the argument information. + * really need a constrrelid. Since we don't have one, we'll try to + * generate one from the argument information. * * This is really just a workaround for a long-ago pg_dump bug that * omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER - * commands. We don't want to bomb out completely here if we - * can't determine the correct relation, because that would - * prevent loading the dump file. Instead, NOTICE here and ERROR - * in the trigger. + * commands. We don't want to bomb out completely here if we can't + * determine the correct relation, because that would prevent loading + * the dump file. Instead, NOTICE here and ERROR in the trigger. */ bool needconstrrelid = false; void *elem = NULL; @@ -181,8 +180,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint) } /* - * Generate the trigger's OID now, so that we can use it in the name - * if needed. + * Generate the trigger's OID now, so that we can use it in the name if + * needed. */ tgrel = heap_open(TriggerRelationId, RowExclusiveLock); @@ -190,9 +189,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint) /* * If trigger is an RI constraint, use specified trigger name as - * constraint name and build a unique trigger name instead. This is - * mainly for backwards compatibility with CREATE CONSTRAINT TRIGGER - * commands. + * constraint name and build a unique trigger name instead. This is mainly + * for backwards compatibility with CREATE CONSTRAINT TRIGGER commands. */ if (stmt->isconstraint) { @@ -246,10 +244,10 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint) } /* - * Scan pg_trigger for existing triggers on relation. We do this - * mainly because we must count them; a secondary benefit is to give a - * nice error message if there's already a trigger of the same name. - * (The unique index on tgrelid/tgname would complain anyway.) + * Scan pg_trigger for existing triggers on relation. We do this mainly + * because we must count them; a secondary benefit is to give a nice error + * message if there's already a trigger of the same name. (The unique + * index on tgrelid/tgname would complain anyway.) * * NOTE that this is cool only because we have AccessExclusiveLock on the * relation, so the trigger set won't be changing underneath us. @@ -267,8 +265,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint) if (namestrcmp(&(pg_trigger->tgname), trigname) == 0) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("trigger \"%s\" for relation \"%s\" already exists", - trigname, stmt->relation->relname))); + errmsg("trigger \"%s\" for relation \"%s\" already exists", + trigname, stmt->relation->relname))); found++; } systable_endscan(tgscan); @@ -281,8 +279,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint) if (funcrettype != TRIGGEROID) { /* - * We allow OPAQUE just so we can load old dump files. When we - * see a trigger function declared OPAQUE, change it to TRIGGER. + * We allow OPAQUE just so we can load old dump files. When we see a + * trigger function declared OPAQUE, change it to TRIGGER. */ if (funcrettype == OPAQUEOID) { @@ -305,13 +303,13 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint) values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel)); values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein, - CStringGetDatum(trigname)); + CStringGetDatum(trigname)); values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid); values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype); values[Anum_pg_trigger_tgenabled - 1] = BoolGetDatum(true); values[Anum_pg_trigger_tgisconstraint - 1] = BoolGetDatum(stmt->isconstraint); values[Anum_pg_trigger_tgconstrname - 1] = DirectFunctionCall1(namein, - CStringGetDatum(constrname)); + CStringGetDatum(constrname)); values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid); values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable); values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred); @@ -351,13 +349,13 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint) } values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs); values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain, - CStringGetDatum(args)); + CStringGetDatum(args)); } else { values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0); values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain, - CStringGetDatum("")); + CStringGetDatum("")); } /* tgattr is currently always a zero-length array */ tgattr = buildint2vector(NULL, 0); @@ -386,9 +384,9 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint) pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1])); /* - * Update relation's pg_class entry. Crucial side-effect: other - * backends (and this one too!) are sent SI message to make them - * rebuild relcache entries. + * Update relation's pg_class entry. Crucial side-effect: other backends + * (and this one too!) are sent SI message to make them rebuild relcache + * entries. */ pgrel = heap_open(RelationRelationId, RowExclusiveLock); tuple = SearchSysCacheCopy(RELOID, @@ -409,19 +407,18 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint) /* * We used to try to update the rel's relcache entry here, but that's - * fairly pointless since it will happen as a byproduct of the - * upcoming CommandCounterIncrement... + * fairly pointless since it will happen as a byproduct of the upcoming + * CommandCounterIncrement... */ /* - * Record dependencies for trigger. Always place a normal dependency - * on the function. If we are doing this in response to an explicit - * CREATE TRIGGER command, also make trigger be auto-dropped if its - * relation is dropped or if the FK relation is dropped. (Auto drop - * is compatible with our pre-7.3 behavior.) If the trigger is being - * made for a constraint, we can skip the relation links; the - * dependency on the constraint will indirectly depend on the - * relations. + * Record dependencies for trigger. Always place a normal dependency on + * the function. If we are doing this in response to an explicit CREATE + * TRIGGER command, also make trigger be auto-dropped if its relation is + * dropped or if the FK relation is dropped. (Auto drop is compatible + * with our pre-7.3 behavior.) If the trigger is being made for a + * constraint, we can skip the relation links; the dependency on the + * constraint will indirectly depend on the relations. */ referenced.classId = ProcedureRelationId; referenced.objectId = funcoid; @@ -565,13 +562,12 @@ RemoveTriggerById(Oid trigOid) heap_close(tgrel, RowExclusiveLock); /* - * Update relation's pg_class entry. Crucial side-effect: other - * backends (and this one too!) are sent SI message to make them - * rebuild relcache entries. + * Update relation's pg_class entry. Crucial side-effect: other backends + * (and this one too!) are sent SI message to make them rebuild relcache + * entries. * - * Note this is OK only because we have AccessExclusiveLock on the rel, - * so no one else is creating/deleting triggers on this rel at the - * same time. + * Note this is OK only because we have AccessExclusiveLock on the rel, so no + * one else is creating/deleting triggers on this rel at the same time. */ pgrel = heap_open(RelationRelationId, RowExclusiveLock); tuple = SearchSysCacheCopy(RELOID, @@ -623,16 +619,16 @@ renametrig(Oid relid, ScanKeyData key[2]; /* - * Grab an exclusive lock on the target table, which we will NOT - * release until end of transaction. + * Grab an exclusive lock on the target table, which we will NOT release + * until end of transaction. */ targetrel = heap_open(relid, AccessExclusiveLock); /* - * Scan pg_trigger twice for existing triggers on relation. We do - * this in order to ensure a trigger does not exist with newname (The - * unique index on tgrelid/tgname would complain anyway) and to ensure - * a trigger does exist with oldname. + * Scan pg_trigger twice for existing triggers on relation. We do this in + * order to ensure a trigger does not exist with newname (The unique index + * on tgrelid/tgname would complain anyway) and to ensure a trigger does + * exist with oldname. * * NOTE that this is cool only because we have AccessExclusiveLock on the * relation, so the trigger set won't be changing underneath us. @@ -655,8 +651,8 @@ renametrig(Oid relid, if (HeapTupleIsValid(tuple = systable_getnext(tgscan))) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("trigger \"%s\" for relation \"%s\" already exists", - newname, RelationGetRelationName(targetrel)))); + errmsg("trigger \"%s\" for relation \"%s\" already exists", + newname, RelationGetRelationName(targetrel)))); systable_endscan(tgscan); /* @@ -687,10 +683,9 @@ renametrig(Oid relid, CatalogUpdateIndexes(tgrel, tuple); /* - * Invalidate relation's relcache entry so that other backends - * (and this one too!) are sent SI message to make them rebuild - * relcache entries. (Ideally this should happen - * automatically...) + * Invalidate relation's relcache entry so that other backends (and + * this one too!) are sent SI message to make them rebuild relcache + * entries. (Ideally this should happen automatically...) */ CacheInvalidateRelcache(targetrel); } @@ -732,13 +727,13 @@ void EnableDisableTrigger(Relation rel, const char *tgname, bool enable, bool skip_system) { - Relation tgrel; - int nkeys; + Relation tgrel; + int nkeys; ScanKeyData keys[2]; SysScanDesc tgscan; - HeapTuple tuple; - bool found; - bool changed; + HeapTuple tuple; + bool found; + bool changed; /* Scan the relevant entries in pg_triggers */ tgrel = heap_open(TriggerRelationId, RowExclusiveLock); @@ -775,8 +770,8 @@ EnableDisableTrigger(Relation rel, const char *tgname, if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied: \"%s\" is a system trigger", - NameStr(oldtrig->tgname)))); + errmsg("permission denied: \"%s\" is a system trigger", + NameStr(oldtrig->tgname)))); } found = true; @@ -784,7 +779,7 @@ EnableDisableTrigger(Relation rel, const char *tgname, if (oldtrig->tgenabled != enable) { /* need to change this one ... make a copy to scribble on */ - HeapTuple newtup = heap_copytuple(tuple); + HeapTuple newtup = heap_copytuple(tuple); Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup); newtrig->tgenabled = enable; @@ -848,10 +843,10 @@ RelationBuildTriggers(Relation relation) triggers = (Trigger *) palloc(ntrigs * sizeof(Trigger)); /* - * Note: since we scan the triggers using TriggerRelidNameIndexId, we - * will be reading the triggers in name order, except possibly during - * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This - * in turn ensures that triggers will be fired in name order. + * Note: since we scan the triggers using TriggerRelidNameIndexId, we will + * be reading the triggers in name order, except possibly during + * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This in + * turn ensures that triggers will be fired in name order. */ ScanKeyInit(&skey, Anum_pg_trigger_tgrelid, @@ -874,7 +869,7 @@ RelationBuildTriggers(Relation relation) build->tgoid = HeapTupleGetOid(htup); build->tgname = DatumGetCString(DirectFunctionCall1(nameout, - NameGetDatum(&pg_trigger->tgname))); + NameGetDatum(&pg_trigger->tgname))); build->tgfoid = pg_trigger->tgfoid; build->tgtype = pg_trigger->tgtype; build->tgenabled = pg_trigger->tgenabled; @@ -1183,12 +1178,12 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2) j; /* - * We need not examine the "index" data, just the trigger array - * itself; if we have the same triggers with the same types, the - * derived index data should match. + * We need not examine the "index" data, just the trigger array itself; if + * we have the same triggers with the same types, the derived index data + * should match. * - * As of 7.3 we assume trigger set ordering is significant in the - * comparison; so we just compare corresponding slots of the two sets. + * As of 7.3 we assume trigger set ordering is significant in the comparison; + * so we just compare corresponding slots of the two sets. */ if (trigdesc1 != NULL) { @@ -1279,9 +1274,9 @@ ExecCallTriggerFunc(TriggerData *trigdata, /* * Do the function evaluation in the per-tuple memory context, so that - * leaked memory will be reclaimed once per tuple. Note in particular - * that any new tuple created by the trigger function will live till - * the end of the tuple cycle. + * leaked memory will be reclaimed once per tuple. Note in particular that + * any new tuple created by the trigger function will live till the end of + * the tuple cycle. */ oldContext = MemoryContextSwitchTo(per_tuple_context); @@ -1295,8 +1290,8 @@ ExecCallTriggerFunc(TriggerData *trigdata, MemoryContextSwitchTo(oldContext); /* - * Trigger protocol allows function to return a null pointer, but NOT - * to set the isnull result flag. + * Trigger protocol allows function to return a null pointer, but NOT to + * set the isnull result flag. */ if (fcinfo.isnull) ereport(ERROR, @@ -1305,8 +1300,8 @@ ExecCallTriggerFunc(TriggerData *trigdata, fcinfo.flinfo->fn_oid))); /* - * If doing EXPLAIN ANALYZE, stop charging time to this trigger, - * and count one "tuple returned" (really the number of firings). + * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count + * one "tuple returned" (really the number of firings). */ if (instr) InstrStopNode(instr + tgindx, true); @@ -1359,7 +1354,7 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo) if (newtuple) ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - errmsg("BEFORE STATEMENT trigger cannot return a value"))); + errmsg("BEFORE STATEMENT trigger cannot return a value"))); } } @@ -1470,7 +1465,7 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo) if (newtuple) ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - errmsg("BEFORE STATEMENT trigger cannot return a value"))); + errmsg("BEFORE STATEMENT trigger cannot return a value"))); } } @@ -1601,7 +1596,7 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo) if (newtuple) ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - errmsg("BEFORE STATEMENT trigger cannot return a value"))); + errmsg("BEFORE STATEMENT trigger cannot return a value"))); } } @@ -1703,7 +1698,7 @@ GetTupleForTrigger(EState *estate, ResultRelInfo *relinfo, if (newSlot != NULL) { - HTSU_Result test; + HTSU_Result test; ItemPointerData update_ctid; TransactionId update_xmax; @@ -1751,8 +1746,8 @@ ltrmark:; } /* - * if tuple was deleted or PlanQual failed for updated - * tuple - we have not process this tuple! + * if tuple was deleted or PlanQual failed for updated tuple - + * we have not process this tuple! */ return NULL; @@ -1799,7 +1794,7 @@ ltrmark:; * they will easily go away during subtransaction abort. * * Because the list of pending events can grow large, we go to some effort - * to minimize memory consumption. We do not use the generic List mechanism + * to minimize memory consumption. We do not use the generic List mechanism * but thread the events manually. * * XXX We need to be able to save the per-event data in a file if it grows too @@ -1832,7 +1827,7 @@ typedef struct SetConstraintStateData bool all_isdeferred; int numstates; /* number of trigstates[] entries in use */ int numalloc; /* allocated size of trigstates[] */ - SetConstraintTriggerData trigstates[1]; /* VARIABLE LENGTH ARRAY */ + SetConstraintTriggerData trigstates[1]; /* VARIABLE LENGTH ARRAY */ } SetConstraintStateData; typedef SetConstraintStateData *SetConstraintState; @@ -1849,12 +1844,12 @@ typedef struct AfterTriggerEventData *AfterTriggerEvent; typedef struct AfterTriggerEventData { - AfterTriggerEvent ate_next; /* list link */ - TriggerEvent ate_event; /* event type and status bits */ - CommandId ate_firing_id; /* ID for firing cycle */ - Oid ate_tgoid; /* the trigger's ID */ - Oid ate_relid; /* the relation it's on */ - ItemPointerData ate_oldctid; /* specific tuple(s) involved */ + AfterTriggerEvent ate_next; /* list link */ + TriggerEvent ate_event; /* event type and status bits */ + CommandId ate_firing_id; /* ID for firing cycle */ + Oid ate_tgoid; /* the trigger's ID */ + Oid ate_relid; /* the relation it's on */ + ItemPointerData ate_oldctid; /* specific tuple(s) involved */ ItemPointerData ate_newctid; } AfterTriggerEventData; @@ -1873,7 +1868,7 @@ typedef struct AfterTriggerEventList * * firing_counter is incremented for each call of afterTriggerInvokeEvents. * We mark firable events with the current firing cycle's ID so that we can - * tell which ones to work on. This ensures sane behavior if a trigger + * tell which ones to work on. This ensures sane behavior if a trigger * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will * only fire those events that weren't already scheduled for firing. * @@ -1881,7 +1876,7 @@ typedef struct AfterTriggerEventList * This is saved and restored across failed subtransactions. * * events is the current list of deferred events. This is global across - * all subtransactions of the current transaction. In a subtransaction + * all subtransactions of the current transaction. In a subtransaction * abort, we know that the events added by the subtransaction are at the * end of the list, so it is relatively easy to discard them. * @@ -1908,31 +1903,31 @@ typedef struct AfterTriggerEventList * which we similarly use to clean up at subtransaction abort. * * firing_stack is a stack of copies of subtransaction-start-time - * firing_counter. We use this to recognize which deferred triggers were + * firing_counter. We use this to recognize which deferred triggers were * fired (or marked for firing) within an aborted subtransaction. * * We use GetCurrentTransactionNestLevel() to determine the correct array * index in these stacks. maxtransdepth is the number of allocated entries in - * each stack. (By not keeping our own stack pointer, we can avoid trouble + * each stack. (By not keeping our own stack pointer, we can avoid trouble * in cases where errors during subxact abort cause multiple invocations * of AfterTriggerEndSubXact() at the same nesting depth.) */ typedef struct AfterTriggersData { - CommandId firing_counter; /* next firing ID to assign */ - SetConstraintState state; /* the active S C state */ + CommandId firing_counter; /* next firing ID to assign */ + SetConstraintState state; /* the active S C state */ AfterTriggerEventList events; /* deferred-event list */ - int query_depth; /* current query list index */ - AfterTriggerEventList *query_stack; /* events pending from each query */ - int maxquerydepth; /* allocated len of above array */ + int query_depth; /* current query list index */ + AfterTriggerEventList *query_stack; /* events pending from each query */ + int maxquerydepth; /* allocated len of above array */ /* these fields are just for resetting at subtrans abort: */ SetConstraintState *state_stack; /* stacked S C states */ - AfterTriggerEventList *events_stack; /* stacked list pointers */ - int *depth_stack; /* stacked query_depths */ - CommandId *firing_stack; /* stacked firing_counters */ - int maxtransdepth; /* allocated len of above arrays */ + AfterTriggerEventList *events_stack; /* stacked list pointers */ + int *depth_stack; /* stacked query_depths */ + CommandId *firing_stack; /* stacked firing_counters */ + int maxtransdepth; /* allocated len of above arrays */ } AfterTriggersData; typedef AfterTriggersData *AfterTriggers; @@ -1941,14 +1936,14 @@ static AfterTriggers afterTriggers; static void AfterTriggerExecute(AfterTriggerEvent event, - Relation rel, TriggerDesc *trigdesc, - FmgrInfo *finfo, - Instrumentation *instr, - MemoryContext per_tuple_context); + Relation rel, TriggerDesc *trigdesc, + FmgrInfo *finfo, + Instrumentation *instr, + MemoryContext per_tuple_context); static SetConstraintState SetConstraintStateCreate(int numalloc); static SetConstraintState SetConstraintStateCopy(SetConstraintState state); static SetConstraintState SetConstraintStateAddItem(SetConstraintState state, - Oid tgoid, bool tgisdeferred); + Oid tgoid, bool tgisdeferred); /* ---------- @@ -2075,8 +2070,8 @@ AfterTriggerExecute(AfterTriggerEvent event, elog(ERROR, "could not find trigger %u", tgoid); /* - * If doing EXPLAIN ANALYZE, start charging time to this trigger. - * We want to include time spent re-fetching tuples in the trigger cost. + * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want + * to include time spent re-fetching tuples in the trigger cost. */ if (instr) InstrStartNode(instr + tgindx); @@ -2133,8 +2128,8 @@ AfterTriggerExecute(AfterTriggerEvent event, MemoryContextReset(per_tuple_context); /* - * Call the trigger and throw away any possibly returned updated - * tuple. (Don't let ExecCallTriggerFunc measure EXPLAIN time.) + * Call the trigger and throw away any possibly returned updated tuple. + * (Don't let ExecCallTriggerFunc measure EXPLAIN time.) */ rettuple = ExecCallTriggerFunc(&LocTriggerData, tgindx, @@ -2153,8 +2148,8 @@ AfterTriggerExecute(AfterTriggerEvent event, ReleaseBuffer(newbuffer); /* - * If doing EXPLAIN ANALYZE, stop charging time to this trigger, - * and count one "tuple returned" (really the number of firings). + * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count + * one "tuple returned" (really the number of firings). */ if (instr) InstrStopNode(instr + tgindx, true); @@ -2264,7 +2259,7 @@ afterTriggerMarkEvents(AfterTriggerEventList *events, * * If estate isn't NULL, then we expect that all the firable events are * for triggers of the relations included in the estate's result relation - * array. This allows us to re-use the estate's open relations and + * array. This allows us to re-use the estate's open relations and * trigger cache info. When estate is NULL, we have to find the relations * the hard way. * @@ -2308,8 +2303,8 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, event->ate_firing_id == firing_id) { /* - * So let's fire it... but first, open the correct - * relation if this is not the same relation as before. + * So let's fire it... but first, open the correct relation if + * this is not the same relation as before. */ if (rel == NULL || rel->rd_id != event->ate_relid) { @@ -2317,7 +2312,7 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, { /* Find target relation among estate's result rels */ ResultRelInfo *rInfo; - int nr; + int nr; rInfo = estate->es_result_relations; nr = estate->es_num_result_relations; @@ -2328,7 +2323,7 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, rInfo++; nr--; } - if (nr <= 0) /* should not happen */ + if (nr <= 0) /* should not happen */ elog(ERROR, "could not find relation %u among query result relations", event->ate_relid); rel = rInfo->ri_RelationDesc; @@ -2345,17 +2340,17 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, FreeTriggerDesc(trigdesc); if (finfo) pfree(finfo); - Assert(instr == NULL); /* never used in this case */ + Assert(instr == NULL); /* never used in this case */ /* - * We assume that an appropriate lock is still held by - * the executor, so grab no new lock here. + * We assume that an appropriate lock is still held by the + * executor, so grab no new lock here. */ rel = heap_open(event->ate_relid, NoLock); /* - * Copy relation's trigger info so that we have a - * stable copy no matter what the called triggers do. + * Copy relation's trigger info so that we have a stable + * copy no matter what the called triggers do. */ trigdesc = CopyTriggerDesc(rel->trigdesc); @@ -2364,8 +2359,7 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, event->ate_relid); /* - * Allocate space to cache fmgr lookup info for - * triggers. + * Allocate space to cache fmgr lookup info for triggers. */ finfo = (FmgrInfo *) palloc0(trigdesc->numtriggers * sizeof(FmgrInfo)); @@ -2376,8 +2370,8 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, /* * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is still - * set, so recursive examinations of the event list won't try - * to re-fire it. + * set, so recursive examinations of the event list won't try to + * re-fire it. */ AfterTriggerExecute(event, rel, trigdesc, finfo, instr, per_tuple_context); @@ -2393,9 +2387,9 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, * If it's now done, throw it away, if allowed. * * NB: it's possible the trigger call above added more events to the - * queue, or that calls we will do later will want to add more, so - * we have to be careful about maintaining list validity at all - * points here. + * queue, or that calls we will do later will want to add more, so we + * have to be careful about maintaining list validity at all points + * here. */ next_event = event->ate_next; @@ -2499,7 +2493,7 @@ AfterTriggerBeginQuery(void) if (afterTriggers->query_depth >= afterTriggers->maxquerydepth) { /* repalloc will keep the stack in the same context */ - int new_alloc = afterTriggers->maxquerydepth * 2; + int new_alloc = afterTriggers->maxquerydepth * 2; afterTriggers->query_stack = (AfterTriggerEventList *) repalloc(afterTriggers->query_stack, @@ -2537,21 +2531,21 @@ AfterTriggerEndQuery(EState *estate) Assert(afterTriggers->query_depth >= 0); /* - * Process all immediate-mode triggers queued by the query, and move - * the deferred ones to the main list of deferred events. + * Process all immediate-mode triggers queued by the query, and move the + * deferred ones to the main list of deferred events. * - * Notice that we decide which ones will be fired, and put the deferred - * ones on the main list, before anything is actually fired. This - * ensures reasonably sane behavior if a trigger function does - * SET CONSTRAINTS ... IMMEDIATE: all events we have decided to defer - * will be available for it to fire. + * Notice that we decide which ones will be fired, and put the deferred ones + * on the main list, before anything is actually fired. This ensures + * reasonably sane behavior if a trigger function does SET CONSTRAINTS ... + * IMMEDIATE: all events we have decided to defer will be available for it + * to fire. * * If we find no firable events, we don't have to increment firing_counter. */ events = &afterTriggers->query_stack[afterTriggers->query_depth]; if (afterTriggerMarkEvents(events, &afterTriggers->events, true)) { - CommandId firing_id = afterTriggers->firing_counter++; + CommandId firing_id = afterTriggers->firing_counter++; /* OK to delete the immediate events after processing them */ afterTriggerInvokeEvents(events, firing_id, estate, true); @@ -2584,21 +2578,21 @@ AfterTriggerFireDeferred(void) Assert(afterTriggers->query_depth == -1); /* - * If there are any triggers to fire, make sure we have set a snapshot - * for them to use. (Since PortalRunUtility doesn't set a snap for - * COMMIT, we can't assume ActiveSnapshot is valid on entry.) + * If there are any triggers to fire, make sure we have set a snapshot for + * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we + * can't assume ActiveSnapshot is valid on entry.) */ events = &afterTriggers->events; if (events->head != NULL) ActiveSnapshot = CopySnapshot(GetTransactionSnapshot()); /* - * Run all the remaining triggers. Loop until they are all gone, - * just in case some trigger queues more for us to do. + * Run all the remaining triggers. Loop until they are all gone, just in + * case some trigger queues more for us to do. */ while (afterTriggerMarkEvents(events, NULL, false)) { - CommandId firing_id = afterTriggers->firing_counter++; + CommandId firing_id = afterTriggers->firing_counter++; afterTriggerInvokeEvents(events, firing_id, NULL, true); } @@ -2643,7 +2637,7 @@ AfterTriggerBeginSubXact(void) int my_level = GetCurrentTransactionNestLevel(); /* - * Ignore call if the transaction is in aborted state. (Probably + * Ignore call if the transaction is in aborted state. (Probably * shouldn't happen?) */ if (afterTriggers == NULL) @@ -2676,7 +2670,7 @@ AfterTriggerBeginSubXact(void) else { /* repalloc will keep the stacks in the same context */ - int new_alloc = afterTriggers->maxtransdepth * 2; + int new_alloc = afterTriggers->maxtransdepth * 2; afterTriggers->state_stack = (SetConstraintState *) repalloc(afterTriggers->state_stack, @@ -2695,8 +2689,8 @@ AfterTriggerBeginSubXact(void) } /* - * Push the current information into the stack. The SET CONSTRAINTS - * state is not saved until/unless changed. + * Push the current information into the stack. The SET CONSTRAINTS state + * is not saved until/unless changed. */ afterTriggers->state_stack[my_level] = NULL; afterTriggers->events_stack[my_level] = afterTriggers->events; @@ -2718,7 +2712,8 @@ AfterTriggerEndSubXact(bool isCommit) CommandId subxact_firing_id; /* - * Ignore call if the transaction is in aborted state. (Probably unneeded) + * Ignore call if the transaction is in aborted state. (Probably + * unneeded) */ if (afterTriggers == NULL) return; @@ -2759,8 +2754,8 @@ AfterTriggerEndSubXact(bool isCommit) */ /* - * Restore the trigger state. If the saved state is NULL, then - * this subxact didn't save it, so it doesn't need restoring. + * Restore the trigger state. If the saved state is NULL, then this + * subxact didn't save it, so it doesn't need restoring. */ state = afterTriggers->state_stack[my_level]; if (state != NULL) @@ -2772,12 +2767,12 @@ AfterTriggerEndSubXact(bool isCommit) afterTriggers->state_stack[my_level] = NULL; /* - * Scan for any remaining deferred events that were marked DONE - * or IN PROGRESS by this subxact or a child, and un-mark them. - * We can recognize such events because they have a firing ID - * greater than or equal to the firing_counter value we saved at - * subtransaction start. (This essentially assumes that the - * current subxact includes all subxacts started after it.) + * Scan for any remaining deferred events that were marked DONE or IN + * PROGRESS by this subxact or a child, and un-mark them. We can + * recognize such events because they have a firing ID greater than or + * equal to the firing_counter value we saved at subtransaction start. + * (This essentially assumes that the current subxact includes all + * subxacts started after it.) */ subxact_firing_id = afterTriggers->firing_stack[my_level]; for (event = afterTriggers->events.head; @@ -2813,7 +2808,7 @@ SetConstraintStateCreate(int numalloc) state = (SetConstraintState) MemoryContextAllocZero(TopTransactionContext, sizeof(SetConstraintStateData) + - (numalloc - 1) *sizeof(SetConstraintTriggerData)); + (numalloc - 1) *sizeof(SetConstraintTriggerData)); state->numalloc = numalloc; @@ -2840,7 +2835,7 @@ SetConstraintStateCopy(SetConstraintState origstate) } /* - * Add a per-trigger item to a SetConstraintState. Returns possibly-changed + * Add a per-trigger item to a SetConstraintState. Returns possibly-changed * pointer to the state object (it will change if we have to repalloc). */ static SetConstraintState @@ -2885,9 +2880,8 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) return; /* - * If in a subtransaction, and we didn't save the current state - * already, save it so it can be restored if the subtransaction - * aborts. + * If in a subtransaction, and we didn't save the current state already, + * save it so it can be restored if the subtransaction aborts. */ if (my_level > 1 && afterTriggers->state_stack[my_level] == NULL) @@ -2939,7 +2933,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) if (strlen(cname) == 0) ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), - errmsg("unnamed constraints cannot be set explicitly"))); + errmsg("unnamed constraints cannot be set explicitly"))); /* * Setup to scan pg_trigger by tgconstrname ... @@ -2962,9 +2956,9 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup); /* - * If we found some, check that they fit the deferrability - * but skip referential action ones, since they are - * silently never deferrable. + * If we found some, check that they fit the deferrability but + * skip referential action ones, since they are silently never + * deferrable. */ if (pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD && pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_DEL && @@ -3026,15 +3020,15 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) } /* - * SQL99 requires that when a constraint is set to IMMEDIATE, any - * deferred checks against that constraint must be made when the SET - * CONSTRAINTS command is executed -- i.e. the effects of the SET - * CONSTRAINTS command apply retroactively. We've updated the - * constraints state, so scan the list of previously deferred events - * to fire any that have now become immediate. + * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred + * checks against that constraint must be made when the SET CONSTRAINTS + * command is executed -- i.e. the effects of the SET CONSTRAINTS command + * apply retroactively. We've updated the constraints state, so scan the + * list of previously deferred events to fire any that have now become + * immediate. * - * Obviously, if this was SET ... DEFERRED then it can't have converted - * any unfired events to immediate, so we need do nothing in that case. + * Obviously, if this was SET ... DEFERRED then it can't have converted any + * unfired events to immediate, so we need do nothing in that case. */ if (!stmt->deferred) { @@ -3042,12 +3036,12 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) if (afterTriggerMarkEvents(events, NULL, true)) { - CommandId firing_id = afterTriggers->firing_counter++; + CommandId firing_id = afterTriggers->firing_counter++; /* - * We can delete fired events if we are at top transaction - * level, but we'd better not if inside a subtransaction, since - * the subtransaction could later get rolled back. + * We can delete fired events if we are at top transaction level, + * but we'd better not if inside a subtransaction, since the + * subtransaction could later get rolled back. */ afterTriggerInvokeEvents(events, firing_id, NULL, !IsSubTransaction()); @@ -3116,9 +3110,9 @@ AfterTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger, continue; /* - * If this is an UPDATE of a PK table or FK table that does - * not change the PK or FK respectively, we can skip queuing - * the event: there is no need to fire the trigger. + * If this is an UPDATE of a PK table or FK table that does not change + * the PK or FK respectively, we can skip queuing the event: there is + * no need to fire the trigger. */ if ((event & TRIGGER_EVENT_OPMASK) == TRIGGER_EVENT_UPDATE) { @@ -3134,17 +3128,17 @@ AfterTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger, break; case RI_TRIGGER_FK: + /* * Update on FK table * - * There is one exception when updating FK tables: - * if the updated row was inserted by our own - * transaction and the FK is deferred, we still - * need to fire the trigger. This is because our - * UPDATE will invalidate the INSERT so the - * end-of-transaction INSERT RI trigger will not - * do anything, so we have to do the check for the - * UPDATE anyway. + * There is one exception when updating FK tables: if the + * updated row was inserted by our own transaction and the + * FK is deferred, we still need to fire the trigger. This + * is because our UPDATE will invalidate the INSERT so the + * end-of-transaction INSERT RI trigger will not do + * anything, so we have to do the check for the UPDATE + * anyway. */ if (HeapTupleHeaderGetXmin(oldtup->t_data) != GetCurrentTransactionId() && diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index ee69821bcfb..7caacdacd2f 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.80 2005/08/22 17:38:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.81 2005/10/15 02:49:16 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -130,8 +130,7 @@ DefineType(List *names, List *parameters) /* * Type names must be one character shorter than other names, allowing - * room to create the corresponding array type name with prepended - * "_". + * room to create the corresponding array type name with prepended "_". */ if (strlen(typeName) > (NAMEDATALEN - 2)) ereport(ERROR, @@ -183,10 +182,9 @@ DefineType(List *names, List *parameters) char *a = defGetString(defel); /* - * Note: if argument was an unquoted identifier, parser will - * have applied translations to it, so be prepared to - * recognize translated type names as well as the nominal - * form. + * Note: if argument was an unquoted identifier, parser will have + * applied translations to it, so be prepared to recognize + * translated type names as well as the nominal form. */ if (pg_strcasecmp(a, "double") == 0 || pg_strcasecmp(a, "float8") == 0 || @@ -303,8 +301,8 @@ DefineType(List *names, List *parameters) else ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type output function %s must return type \"cstring\"", - NameListToString(outputName)))); + errmsg("type output function %s must return type \"cstring\"", + NameListToString(outputName)))); } if (receiveOid) { @@ -312,8 +310,8 @@ DefineType(List *names, List *parameters) if (resulttype != typoid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type receive function %s must return type %s", - NameListToString(receiveName), typeName))); + errmsg("type receive function %s must return type %s", + NameListToString(receiveName), typeName))); } if (sendOid) { @@ -321,14 +319,13 @@ DefineType(List *names, List *parameters) if (resulttype != BYTEAOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type send function %s must return type \"bytea\"", - NameListToString(sendName)))); + errmsg("type send function %s must return type \"bytea\"", + NameListToString(sendName)))); } /* - * Convert analysis function proc name to an OID. If no analysis - * function is specified, we'll use zero to select the built-in - * default algorithm. + * Convert analysis function proc name to an OID. If no analysis function + * is specified, we'll use zero to select the built-in default algorithm. */ if (analyzeName) analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid); @@ -361,8 +358,8 @@ DefineType(List *names, List *parameters) false); /* Type NOT NULL */ /* - * When we create a base type (as opposed to a complex type) we need - * to have an array entry for it in pg_type as well. + * When we create a base type (as opposed to a complex type) we need to + * have an array entry for it in pg_type as well. */ shadow_type = makeArrayTypeName(typeName); @@ -430,8 +427,8 @@ RemoveType(List *names, DropBehavior behavior) /* Permission check: must own type or its namespace */ if (!pg_type_ownercheck(typeoid, GetUserId()) && - !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace, - GetUserId())) + !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace, + GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TYPE, TypeNameToString(typename)); @@ -522,12 +519,11 @@ DefineDomain(CreateDomainStmt *stmt) get_namespace_name(domainNamespace)); /* - * Domainnames, unlike typenames don't need to account for the '_' - * prefix. So they can be one character longer. (This test is - * presently useless since the parser will have truncated the name to - * fit. But leave it here since we may someday support arrays of - * domains, in which case we'll be back to needing to enforce - * NAMEDATALEN-2.) + * Domainnames, unlike typenames don't need to account for the '_' prefix. + * So they can be one character longer. (This test is presently useless + * since the parser will have truncated the name to fit. But leave it + * here since we may someday support arrays of domains, in which case + * we'll be back to needing to enforce NAMEDATALEN-2.) */ if (strlen(domainName) > (NAMEDATALEN - 1)) ereport(ERROR, @@ -544,10 +540,9 @@ DefineDomain(CreateDomainStmt *stmt) basetypeoid = HeapTupleGetOid(typeTup); /* - * Base type must be a plain base type. Domains over pseudo types - * would create a security hole. Domains of domains might be made to - * work in the future, but not today. Ditto for domains over complex - * types. + * Base type must be a plain base type. Domains over pseudo types would + * create a security hole. Domains of domains might be made to work in + * the future, but not today. Ditto for domains over complex types. */ typtype = baseType->typtype; if (typtype != 'b') @@ -613,7 +608,7 @@ DefineDomain(CreateDomainStmt *stmt) if (IsA(newConstraint, FkConstraint)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("foreign key constraints not possible for domains"))); + errmsg("foreign key constraints not possible for domains"))); /* otherwise it should be a plain Constraint */ if (!IsA(newConstraint, Constraint)) @@ -627,8 +622,8 @@ DefineDomain(CreateDomainStmt *stmt) case CONSTR_DEFAULT: /* - * The inherited default value may be overridden by the - * user with the DEFAULT <expr> statement. + * The inherited default value may be overridden by the user + * with the DEFAULT <expr> statement. */ if (defaultExpr) ereport(ERROR, @@ -639,8 +634,8 @@ DefineDomain(CreateDomainStmt *stmt) pstate = make_parsestate(NULL); /* - * Cook the constr->raw_expr into an expression. Note: - * Name is strictly for error message + * Cook the constr->raw_expr into an expression. Note: Name is + * strictly for error message */ defaultExpr = cookDefault(pstate, constr->raw_expr, basetypeoid, @@ -648,13 +643,13 @@ DefineDomain(CreateDomainStmt *stmt) domainName); /* - * Expression must be stored as a nodeToString result, but - * we also require a valid textual representation (mainly - * to make life easier for pg_dump). + * Expression must be stored as a nodeToString result, but we + * also require a valid textual representation (mainly to make + * life easier for pg_dump). */ defaultValue = deparse_expression(defaultExpr, - deparse_context_for(domainName, - InvalidOid), + deparse_context_for(domainName, + InvalidOid), false, false); defaultValueBin = nodeToString(defaultExpr); break; @@ -663,7 +658,7 @@ DefineDomain(CreateDomainStmt *stmt) if (nullDefined && !typNotNull) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("conflicting NULL/NOT NULL constraints"))); + errmsg("conflicting NULL/NOT NULL constraints"))); typNotNull = true; nullDefined = true; break; @@ -672,7 +667,7 @@ DefineDomain(CreateDomainStmt *stmt) if (nullDefined && typNotNull) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("conflicting NULL/NOT NULL constraints"))); + errmsg("conflicting NULL/NOT NULL constraints"))); typNotNull = false; nullDefined = true; break; @@ -691,13 +686,13 @@ DefineDomain(CreateDomainStmt *stmt) case CONSTR_UNIQUE: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("unique constraints not possible for domains"))); + errmsg("unique constraints not possible for domains"))); break; case CONSTR_PRIMARY: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("primary key constraints not possible for domains"))); + errmsg("primary key constraints not possible for domains"))); break; case CONSTR_ATTR_DEFERRABLE: @@ -744,8 +739,7 @@ DefineDomain(CreateDomainStmt *stmt) typNotNull); /* Type NOT NULL */ /* - * Process constraints which refer to the domain ID returned by - * TypeCreate + * Process constraints which refer to the domain ID returned by TypeCreate */ foreach(listptr, schema) { @@ -815,8 +809,8 @@ RemoveDomain(List *names, DropBehavior behavior) /* Permission check: must own type or its namespace */ if (!pg_type_ownercheck(typeoid, GetUserId()) && - !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace, - GetUserId())) + !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace, + GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TYPE, TypeNameToString(typename)); @@ -856,11 +850,11 @@ findTypeInputFunction(List *procname, Oid typeOid) Oid procOid; /* - * Input functions can take a single argument of type CSTRING, or - * three arguments (string, typioparam OID, typmod). + * Input functions can take a single argument of type CSTRING, or three + * arguments (string, typioparam OID, typmod). * - * For backwards compatibility we allow OPAQUE in place of CSTRING; if we - * see this, we issue a warning and fix up the pg_proc entry. + * For backwards compatibility we allow OPAQUE in place of CSTRING; if we see + * this, we issue a warning and fix up the pg_proc entry. */ argList[0] = CSTRINGOID; @@ -897,8 +891,8 @@ findTypeInputFunction(List *procname, Oid typeOid) SetFunctionArgType(procOid, 0, CSTRINGOID); /* - * Need CommandCounterIncrement since DefineType will likely try - * to alter the pg_proc tuple again. + * Need CommandCounterIncrement since DefineType will likely try to + * alter the pg_proc tuple again. */ CommandCounterIncrement(); @@ -925,9 +919,8 @@ findTypeOutputFunction(List *procname, Oid typeOid) /* * Output functions can take a single argument of the type. * - * For backwards compatibility we allow OPAQUE in place of the actual - * type name; if we see this, we issue a warning and fix up the - * pg_proc entry. + * For backwards compatibility we allow OPAQUE in place of the actual type + * name; if we see this, we issue a warning and fix up the pg_proc entry. */ argList[0] = typeOid; @@ -944,13 +937,13 @@ findTypeOutputFunction(List *procname, Oid typeOid) { /* Found, but must complain and fix the pg_proc entry */ ereport(WARNING, - (errmsg("changing argument type of function %s from \"opaque\" to %s", - NameListToString(procname), format_type_be(typeOid)))); + (errmsg("changing argument type of function %s from \"opaque\" to %s", + NameListToString(procname), format_type_be(typeOid)))); SetFunctionArgType(procOid, 0, typeOid); /* - * Need CommandCounterIncrement since DefineType will likely try - * to alter the pg_proc tuple again. + * Need CommandCounterIncrement since DefineType will likely try to + * alter the pg_proc tuple again. */ CommandCounterIncrement(); @@ -975,8 +968,8 @@ findTypeReceiveFunction(List *procname, Oid typeOid) Oid procOid; /* - * Receive functions can take a single argument of type INTERNAL, or - * three arguments (internal, typioparam OID, typmod). + * Receive functions can take a single argument of type INTERNAL, or three + * arguments (internal, typioparam OID, typmod). */ argList[0] = INTERNALOID; @@ -1029,8 +1022,7 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid) Oid procOid; /* - * Analyze functions always take one INTERNAL argument and return - * bool. + * Analyze functions always take one INTERNAL argument and return bool. */ argList[0] = INTERNALOID; @@ -1044,8 +1036,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid) if (get_func_rettype(procOid) != BOOLOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type analyze function %s must return type \"boolean\"", - NameListToString(procname)))); + errmsg("type analyze function %s must return type \"boolean\"", + NameListToString(procname)))); return procOid; } @@ -1073,7 +1065,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist) if (coldeflist == NIL) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("composite type must have at least one attribute"))); + errmsg("composite type must have at least one attribute"))); /* * now set the parameters for keys/inheritance etc. All of these are @@ -1165,28 +1157,28 @@ AlterDomainDefault(List *names, Node *defaultRaw) /* * Expression must be stored as a nodeToString result, but we also - * require a valid textual representation (mainly to make life - * easier for pg_dump). + * require a valid textual representation (mainly to make life easier + * for pg_dump). */ defaultValue = deparse_expression(defaultExpr, - deparse_context_for(NameStr(typTup->typname), - InvalidOid), + deparse_context_for(NameStr(typTup->typname), + InvalidOid), false, false); /* * Form an updated tuple with the new default and write it back. */ new_record[Anum_pg_type_typdefaultbin - 1] = DirectFunctionCall1(textin, - CStringGetDatum( - nodeToString(defaultExpr))); + CStringGetDatum( + nodeToString(defaultExpr))); new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r'; new_record[Anum_pg_type_typdefault - 1] = DirectFunctionCall1(textin, - CStringGetDatum(defaultValue)); + CStringGetDatum(defaultValue)); new_record_repl[Anum_pg_type_typdefault - 1] = 'r'; } else - /* Default is NULL, drop it */ + /* Default is NULL, drop it */ { new_record_nulls[Anum_pg_type_typdefaultbin - 1] = 'n'; new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r'; @@ -1305,8 +1297,8 @@ AlterDomainNotNull(List *names, bool notNull) ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("column \"%s\" of table \"%s\" contains null values", - NameStr(tupdesc->attrs[attnum - 1]->attname), - RelationGetRelationName(testrel)))); + NameStr(tupdesc->attrs[attnum - 1]->attname), + RelationGetRelationName(testrel)))); } } heap_endscan(scan); @@ -1317,8 +1309,8 @@ AlterDomainNotNull(List *names, bool notNull) } /* - * Okay to update pg_type row. We can scribble on typTup because it's - * a copy. + * Okay to update pg_type row. We can scribble on typTup because it's a + * copy. */ typTup->typnotnull = notNull; @@ -1467,7 +1459,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) if (IsA(newConstraint, FkConstraint)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("foreign key constraints not possible for domains"))); + errmsg("foreign key constraints not possible for domains"))); /* otherwise it should be a plain Constraint */ if (!IsA(newConstraint, Constraint)) @@ -1485,13 +1477,13 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) case CONSTR_UNIQUE: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("unique constraints not possible for domains"))); + errmsg("unique constraints not possible for domains"))); break; case CONSTR_PRIMARY: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("primary key constraints not possible for domains"))); + errmsg("primary key constraints not possible for domains"))); break; case CONSTR_ATTR_DEFERRABLE: @@ -1511,8 +1503,8 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) /* * Since all other constraint types throw errors, this must be a check - * constraint. First, process the constraint expression and add an - * entry to pg_constraint. + * constraint. First, process the constraint expression and add an entry + * to pg_constraint. */ ccbin = domainAddConstraint(HeapTupleGetOid(tup), typTup->typnamespace, @@ -1572,7 +1564,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) ereport(ERROR, (errcode(ERRCODE_CHECK_VIOLATION), errmsg("column \"%s\" of table \"%s\" contains values that violate the new constraint", - NameStr(tupdesc->attrs[attnum - 1]->attname), + NameStr(tupdesc->attrs[attnum - 1]->attname), RelationGetRelationName(testrel)))); } @@ -1626,8 +1618,8 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode) HeapTuple depTup; /* - * We scan pg_depend to find those things that depend on the domain. - * (We assume we can ignore refobjsubid for a domain.) + * We scan pg_depend to find those things that depend on the domain. (We + * assume we can ignore refobjsubid for a domain.) */ depRel = heap_open(DependRelationId, AccessShareLock); @@ -1693,10 +1685,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode) } /* - * Confirm column has not been dropped, and is of the expected - * type. This defends against an ALTER DROP COLUMN occuring just - * before we acquired lock ... but if the whole table were - * dropped, we'd still have a problem. + * Confirm column has not been dropped, and is of the expected type. + * This defends against an ALTER DROP COLUMN occuring just before we + * acquired lock ... but if the whole table were dropped, we'd still + * have a problem. */ if (pg_depend->objsubid > RelationGetNumberOfAttributes(rtc->rel)) continue; @@ -1705,9 +1697,9 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode) continue; /* - * Okay, add column to result. We store the columns in - * column-number order; this is just a hack to improve - * predictability of regression test output ... + * Okay, add column to result. We store the columns in column-number + * order; this is just a hack to improve predictability of regression + * test output ... */ Assert(rtc->natts < RelationGetNumberOfAttributes(rtc->rel)); @@ -1777,8 +1769,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, constr->name)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("constraint \"%s\" for domain \"%s\" already exists", - constr->name, domainName))); + errmsg("constraint \"%s\" for domain \"%s\" already exists", + constr->name, domainName))); } else constr->name = ChooseConstraintName(domainName, @@ -1793,11 +1785,11 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, pstate = make_parsestate(NULL); /* - * Set up a CoerceToDomainValue to represent the occurrence of VALUE - * in the expression. Note that it will appear to have the type of - * the base type, not the domain. This seems correct since within the - * check expression, we should not assume the input value can be - * considered a member of the domain. + * Set up a CoerceToDomainValue to represent the occurrence of VALUE in + * the expression. Note that it will appear to have the type of the base + * type, not the domain. This seems correct since within the check + * expression, we should not assume the input value can be considered a + * member of the domain. */ domVal = makeNode(CoerceToDomainValue); domVal->typeId = baseTypeOid; @@ -1818,7 +1810,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, if (list_length(pstate->p_rtable) != 0) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("cannot use table references in domain check constraint"))); + errmsg("cannot use table references in domain check constraint"))); /* * Domains don't allow var clauses (this should be redundant with the @@ -1827,7 +1819,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, if (contain_var_clause(expr)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("cannot use table references in domain check constraint"))); + errmsg("cannot use table references in domain check constraint"))); /* * No subplans or aggregates, either... @@ -1849,8 +1841,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, /* * Deparse it to produce text for consrc. * - * Since VARNOs aren't allowed in domain constraints, relation context - * isn't required as anything other than a shell. + * Since VARNOs aren't allowed in domain constraints, relation context isn't + * required as anything other than a shell. */ ccsrc = deparse_expression(expr, deparse_context_for(domainName, @@ -1881,8 +1873,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, ccsrc); /* Source form check constraint */ /* - * Return the compiled constraint expression so the calling routine - * can perform any additional required tests. + * Return the compiled constraint expression so the calling routine can + * perform any additional required tests. */ return ccbin; } @@ -1956,8 +1948,7 @@ GetDomainConstraints(Oid typeOid) continue; /* - * Not expecting conbin to be NULL, but we'll test for it - * anyway + * Not expecting conbin to be NULL, but we'll test for it anyway */ val = fastgetattr(conTup, Anum_pg_constraint_conbin, conRel->rd_att, &isNull); @@ -1978,8 +1969,8 @@ GetDomainConstraints(Oid typeOid) r->check_expr = ExecInitExpr(check_expr, NULL); /* - * use lcons() here because constraints of lower domains - * should be applied earlier. + * use lcons() here because constraints of lower domains should be + * applied earlier. */ result = lcons(r, result); } @@ -1994,8 +1985,8 @@ GetDomainConstraints(Oid typeOid) heap_close(conRel, AccessShareLock); /* - * Only need to add one NOT NULL check regardless of how many domains - * in the stack request it. + * Only need to add one NOT NULL check regardless of how many domains in + * the stack request it. */ if (notNull) { @@ -2071,7 +2062,7 @@ AlterTypeOwner(List *names, Oid newOwnerId) if (!superuser()) { /* Otherwise, must be owner of the existing object */ - if (!pg_type_ownercheck(HeapTupleGetOid(tup),GetUserId())) + if (!pg_type_ownercheck(HeapTupleGetOid(tup), GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TYPE, TypeNameToString(typename)); @@ -2088,8 +2079,7 @@ AlterTypeOwner(List *names, Oid newOwnerId) } /* - * Modify the owner --- okay to scribble on typTup because it's a - * copy + * Modify the owner --- okay to scribble on typTup because it's a copy */ typTup->typowner = newOwnerId; @@ -2128,8 +2118,7 @@ AlterTypeOwnerInternal(Oid typeOid, Oid newOwnerId) typTup = (Form_pg_type) GETSTRUCT(tup); /* - * Modify the owner --- okay to scribble on typTup because it's a - * copy + * Modify the owner --- okay to scribble on typTup because it's a copy */ typTup->typowner = newOwnerId; @@ -2150,9 +2139,9 @@ AlterTypeOwnerInternal(Oid typeOid, Oid newOwnerId) void AlterTypeNamespace(List *names, const char *newschema) { - TypeName *typename; - Oid typeOid; - Oid nspOid; + TypeName *typename; + Oid typeOid; + Oid nspOid; /* get type OID */ typename = makeNode(TypeName); @@ -2221,7 +2210,7 @@ AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid, if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot move objects into or out of temporary schemas"))); + errmsg("cannot move objects into or out of temporary schemas"))); /* same for TOAST schema */ if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE) @@ -2264,18 +2253,18 @@ AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid, /* * Composite types have pg_class entries. * - * We need to modify the pg_class tuple as well to - * reflect the change of schema. + * We need to modify the pg_class tuple as well to reflect the change of + * schema. */ if (isCompositeType) { - Relation classRel; + Relation classRel; classRel = heap_open(RelationRelationId, RowExclusiveLock); /* - * The dependency on the schema is listed under the pg_class entry, - * so tell AlterRelationNamespaceInternal to fix it. + * The dependency on the schema is listed under the pg_class entry, so + * tell AlterRelationNamespaceInternal to fix it. */ AlterRelationNamespaceInternal(classRel, typform->typrelid, oldNspOid, nspOid, @@ -2284,8 +2273,8 @@ AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid, heap_close(classRel, RowExclusiveLock); /* - * Check for constraints associated with the composite type - * (we don't currently support this, but probably will someday). + * Check for constraints associated with the composite type (we don't + * currently support this, but probably will someday). */ AlterConstraintNamespaces(typform->typrelid, oldNspOid, nspOid, false); @@ -2297,12 +2286,12 @@ AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid, AlterConstraintNamespaces(typeOid, oldNspOid, nspOid, true); /* - * Update dependency on schema, if any --- a table rowtype has not - * got one. + * Update dependency on schema, if any --- a table rowtype has not got + * one. */ if (typform->typtype != 'c') if (changeDependencyFor(TypeRelationId, typeOid, - NamespaceRelationId, oldNspOid, nspOid) != 1) + NamespaceRelationId, oldNspOid, nspOid) != 1) elog(ERROR, "failed to change schema dependency for type %s", format_type_be(typeOid)); } diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 082ea0cf7a0..706e85dea5b 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.160 2005/07/31 17:19:17 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.161 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -34,11 +34,11 @@ extern bool Password_encryption; static List *roleNamesToIds(List *memberNames); static void AddRoleMems(const char *rolename, Oid roleid, - List *memberNames, List *memberIds, - Oid grantorId, bool admin_opt); + List *memberNames, List *memberIds, + Oid grantorId, bool admin_opt); static void DelRoleMems(const char *rolename, Oid roleid, - List *memberNames, List *memberIds, - bool admin_opt); + List *memberNames, List *memberIds, + bool admin_opt); /* Check if current user has createrole privileges */ @@ -78,16 +78,16 @@ CreateRole(CreateRoleStmt *stmt) Oid roleid; ListCell *item; ListCell *option; - char *password = NULL; /* user password */ + char *password = NULL; /* user password */ bool encrypt_password = Password_encryption; /* encrypt password? */ char encrypted_password[MD5_PASSWD_LEN + 1]; - bool issuper = false; /* Make the user a superuser? */ - bool inherit = true; /* Auto inherit privileges? */ + bool issuper = false; /* Make the user a superuser? */ + bool inherit = true; /* Auto inherit privileges? */ bool createrole = false; /* Can this user create roles? */ bool createdb = false; /* Can the user create databases? */ bool canlogin = false; /* Can this user login? */ - int connlimit = -1; /* maximum connections allowed */ - List *addroleto = NIL; /* roles to make this a member of */ + int connlimit = -1; /* maximum connections allowed */ + List *addroleto = NIL; /* roles to make this a member of */ List *rolemembers = NIL; /* roles to be members of this role */ List *adminmembers = NIL; /* roles to be admins of this role */ char *validUntil = NULL; /* time the login is valid until */ @@ -272,9 +272,9 @@ CreateRole(CreateRoleStmt *stmt) stmt->role))); /* - * Check the pg_authid relation to be certain the role doesn't - * already exist. Note we secure exclusive lock because - * we need to protect our eventual update of the flat auth file. + * Check the pg_authid relation to be certain the role doesn't already + * exist. Note we secure exclusive lock because we need to protect our + * eventual update of the flat auth file. */ pg_authid_rel = heap_open(AuthIdRelationId, ExclusiveLock); pg_authid_dsc = RelationGetDescr(pg_authid_rel); @@ -344,8 +344,8 @@ CreateRole(CreateRoleStmt *stmt) CatalogUpdateIndexes(pg_authid_rel, tuple); /* - * Advance command counter so we can see new record; else tests - * in AddRoleMems may fail. + * Advance command counter so we can see new record; else tests in + * AddRoleMems may fail. */ if (addroleto || adminmembers || rolemembers) CommandCounterIncrement(); @@ -355,8 +355,8 @@ CreateRole(CreateRoleStmt *stmt) */ foreach(item, addroleto) { - char *oldrolename = strVal(lfirst(item)); - Oid oldroleid = get_roleid_checked(oldrolename); + char *oldrolename = strVal(lfirst(item)); + Oid oldroleid = get_roleid_checked(oldrolename); AddRoleMems(oldrolename, oldroleid, list_make1(makeString(stmt->role)), @@ -365,8 +365,8 @@ CreateRole(CreateRoleStmt *stmt) } /* - * Add the specified members to this new role. adminmembers get the - * admin option, rolemembers don't. + * Add the specified members to this new role. adminmembers get the admin + * option, rolemembers don't. */ AddRoleMems(stmt->role, roleid, adminmembers, roleNamesToIds(adminmembers), @@ -406,15 +406,15 @@ AlterRole(AlterRoleStmt *stmt) HeapTuple tuple, new_tuple; ListCell *option; - char *password = NULL; /* user password */ + char *password = NULL; /* user password */ bool encrypt_password = Password_encryption; /* encrypt password? */ char encrypted_password[MD5_PASSWD_LEN + 1]; - int issuper = -1; /* Make the user a superuser? */ - int inherit = -1; /* Auto inherit privileges? */ - int createrole = -1; /* Can this user create roles? */ - int createdb = -1; /* Can the user create databases? */ - int canlogin = -1; /* Can this user login? */ - int connlimit = -1; /* maximum connections allowed */ + int issuper = -1; /* Make the user a superuser? */ + int inherit = -1; /* Auto inherit privileges? */ + int createrole = -1; /* Can this user create roles? */ + int createdb = -1; /* Can the user create databases? */ + int canlogin = -1; /* Can this user login? */ + int connlimit = -1; /* maximum connections allowed */ List *rolemembers = NIL; /* roles to be added/removed */ char *validUntil = NULL; /* time the login is valid until */ DefElem *dpassword = NULL; @@ -591,9 +591,9 @@ AlterRole(AlterRoleStmt *stmt) * issuper/createrole/catupdate/etc * * XXX It's rather unclear how to handle catupdate. It's probably best to - * keep it equal to the superuser status, otherwise you could end up - * with a situation where no existing superuser can alter the - * catalogs, including pg_authid! + * keep it equal to the superuser status, otherwise you could end up with + * a situation where no existing superuser can alter the catalogs, + * including pg_authid! */ if (issuper >= 0) { @@ -673,8 +673,8 @@ AlterRole(AlterRoleStmt *stmt) heap_freetuple(new_tuple); /* - * Advance command counter so we can see new record; else tests - * in AddRoleMems may fail. + * Advance command counter so we can see new record; else tests in + * AddRoleMems may fail. */ if (rolemembers) CommandCounterIncrement(); @@ -801,7 +801,8 @@ AlterRoleSet(AlterRoleSetStmt *stmt) void DropRole(DropRoleStmt *stmt) { - Relation pg_authid_rel, pg_auth_members_rel; + Relation pg_authid_rel, + pg_auth_members_rel; ListCell *item; if (!have_createrole_privilege()) @@ -811,9 +812,9 @@ DropRole(DropRoleStmt *stmt) /* * Scan the pg_authid relation to find the Oid of the role(s) to be - * deleted. Note we secure exclusive lock on pg_authid, because we - * need to protect our update of the flat auth file. A regular - * writer's lock on pg_auth_members is sufficient though. + * deleted. Note we secure exclusive lock on pg_authid, because we need + * to protect our update of the flat auth file. A regular writer's lock + * on pg_auth_members is sufficient though. */ pg_authid_rel = heap_open(AuthIdRelationId, ExclusiveLock); pg_auth_members_rel = heap_open(AuthMemRelationId, RowExclusiveLock); @@ -823,7 +824,7 @@ DropRole(DropRoleStmt *stmt) const char *role = strVal(lfirst(item)); HeapTuple tuple, tmp_tuple; - ScanKeyData scankey; + ScanKeyData scankey; char *detail; SysScanDesc sscan; Oid roleid; @@ -865,7 +866,7 @@ DropRole(DropRoleStmt *stmt) /* * Lock the role, so nobody can add dependencies to her while we drop * her. We keep the lock until the end of transaction. - */ + */ LockSharedObject(AuthIdRelationId, roleid, 0, AccessExclusiveLock); /* Check for pg_shdepend entries depending on this role */ @@ -873,7 +874,7 @@ DropRole(DropRoleStmt *stmt) ereport(ERROR, (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), errmsg("role \"%s\" cannot be dropped because some objects depend on it", - role), + role), errdetail("%s", detail))); /* @@ -884,10 +885,10 @@ DropRole(DropRoleStmt *stmt) ReleaseSysCache(tuple); /* - * Remove role from the pg_auth_members table. We have to remove - * all tuples that show it as either a role or a member. + * Remove role from the pg_auth_members table. We have to remove all + * tuples that show it as either a role or a member. * - * XXX what about grantor entries? Maybe we should do one heap scan. + * XXX what about grantor entries? Maybe we should do one heap scan. */ ScanKeyInit(&scankey, Anum_pg_auth_members_roleid, @@ -920,13 +921,13 @@ DropRole(DropRoleStmt *stmt) systable_endscan(sscan); /* - * Advance command counter so that later iterations of this loop - * will see the changes already made. This is essential if, for - * example, we are trying to drop both a role and one of its - * direct members --- we'll get an error if we try to delete the - * linking pg_auth_members tuple twice. (We do not need a CCI - * between the two delete loops above, because it's not allowed - * for a role to directly contain itself.) + * Advance command counter so that later iterations of this loop will + * see the changes already made. This is essential if, for example, + * we are trying to drop both a role and one of its direct members --- + * we'll get an error if we try to delete the linking pg_auth_members + * tuple twice. (We do not need a CCI between the two delete loops + * above, because it's not allowed for a role to directly contain + * itself.) */ CommandCounterIncrement(); } @@ -975,11 +976,11 @@ RenameRole(const char *oldname, const char *newname) errmsg("role \"%s\" does not exist", oldname))); /* - * XXX Client applications probably store the session user somewhere, - * so renaming it could cause confusion. On the other hand, there may - * not be an actual problem besides a little confusion, so think about - * this and decide. Same for SET ROLE ... we don't restrict renaming - * the current effective userid, though. + * XXX Client applications probably store the session user somewhere, so + * renaming it could cause confusion. On the other hand, there may not be + * an actual problem besides a little confusion, so think about this and + * decide. Same for SET ROLE ... we don't restrict renaming the current + * effective userid, though. */ roleid = HeapTupleGetOid(oldtuple); @@ -1032,7 +1033,7 @@ RenameRole(const char *oldname, const char *newname) repl_repl[Anum_pg_authid_rolname - 1] = 'r'; repl_val[Anum_pg_authid_rolname - 1] = DirectFunctionCall1(namein, - CStringGetDatum(newname)); + CStringGetDatum(newname)); repl_null[Anum_pg_authid_rolname - 1] = ' '; datum = heap_getattr(oldtuple, Anum_pg_authid_rolpassword, dsc, &isnull); @@ -1082,23 +1083,22 @@ GrantRole(GrantRoleStmt *stmt) grantee_ids = roleNamesToIds(stmt->grantee_roles); /* - * Even though this operation doesn't change pg_authid, we must - * secure exclusive lock on it to protect our update of the flat - * auth file. + * Even though this operation doesn't change pg_authid, we must secure + * exclusive lock on it to protect our update of the flat auth file. */ pg_authid_rel = heap_open(AuthIdRelationId, ExclusiveLock); /* - * Step through all of the granted roles and add/remove - * entries for the grantees, or, if admin_opt is set, then - * just add/remove the admin option. + * Step through all of the granted roles and add/remove entries for the + * grantees, or, if admin_opt is set, then just add/remove the admin + * option. * * Note: Permissions checking is done by AddRoleMems/DelRoleMems */ foreach(item, stmt->granted_roles) { - char *rolename = strVal(lfirst(item)); - Oid roleid = get_roleid_checked(rolename); + char *rolename = strVal(lfirst(item)); + Oid roleid = get_roleid_checked(rolename); if (stmt->is_grant) AddRoleMems(rolename, roleid, @@ -1132,8 +1132,8 @@ roleNamesToIds(List *memberNames) foreach(l, memberNames) { - char *rolename = strVal(lfirst(l)); - Oid roleid = get_roleid_checked(rolename); + char *rolename = strVal(lfirst(l)); + Oid roleid = get_roleid_checked(rolename); result = lappend_oid(result, roleid); } @@ -1160,8 +1160,8 @@ AddRoleMems(const char *rolename, Oid roleid, { Relation pg_authmem_rel; TupleDesc pg_authmem_dsc; - ListCell *nameitem; - ListCell *iditem; + ListCell *nameitem; + ListCell *iditem; Assert(list_length(memberNames) == list_length(memberIds)); @@ -1170,9 +1170,8 @@ AddRoleMems(const char *rolename, Oid roleid, return; /* - * Check permissions: must have createrole or admin option on the - * role to be changed. To mess with a superuser role, you gotta - * be superuser. + * Check permissions: must have createrole or admin option on the role to + * be changed. To mess with a superuser role, you gotta be superuser. */ if (superuser_arg(roleid)) { @@ -1207,32 +1206,32 @@ AddRoleMems(const char *rolename, Oid roleid, Oid memberid = lfirst_oid(iditem); HeapTuple authmem_tuple; HeapTuple tuple; - Datum new_record[Natts_pg_auth_members]; - char new_record_nulls[Natts_pg_auth_members]; - char new_record_repl[Natts_pg_auth_members]; + Datum new_record[Natts_pg_auth_members]; + char new_record_nulls[Natts_pg_auth_members]; + char new_record_repl[Natts_pg_auth_members]; /* * Refuse creation of membership loops, including the trivial case - * where a role is made a member of itself. We do this by checking - * to see if the target role is already a member of the proposed - * member role. + * where a role is made a member of itself. We do this by checking to + * see if the target role is already a member of the proposed member + * role. */ if (is_member_of_role(roleid, memberid)) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), - (errmsg("role \"%s\" is a member of role \"%s\"", - rolename, membername)))); + (errmsg("role \"%s\" is a member of role \"%s\"", + rolename, membername)))); /* - * Check if entry for this role/member already exists; - * if so, give warning unless we are adding admin option. + * Check if entry for this role/member already exists; if so, give + * warning unless we are adding admin option. */ authmem_tuple = SearchSysCache(AUTHMEMROLEMEM, ObjectIdGetDatum(roleid), ObjectIdGetDatum(memberid), 0, 0); if (HeapTupleIsValid(authmem_tuple) && - (!admin_opt || + (!admin_opt || ((Form_pg_auth_members) GETSTRUCT(authmem_tuple))->admin_option)) { ereport(NOTICE, @@ -1301,8 +1300,8 @@ DelRoleMems(const char *rolename, Oid roleid, { Relation pg_authmem_rel; TupleDesc pg_authmem_dsc; - ListCell *nameitem; - ListCell *iditem; + ListCell *nameitem; + ListCell *iditem; Assert(list_length(memberNames) == list_length(memberIds)); @@ -1311,9 +1310,8 @@ DelRoleMems(const char *rolename, Oid roleid, return; /* - * Check permissions: must have createrole or admin option on the - * role to be changed. To mess with a superuser role, you gotta - * be superuser. + * Check permissions: must have createrole or admin option on the role to + * be changed. To mess with a superuser role, you gotta be superuser. */ if (superuser_arg(roleid)) { @@ -1366,9 +1364,9 @@ DelRoleMems(const char *rolename, Oid roleid, { /* Just turn off the admin option */ HeapTuple tuple; - Datum new_record[Natts_pg_auth_members]; - char new_record_nulls[Natts_pg_auth_members]; - char new_record_repl[Natts_pg_auth_members]; + Datum new_record[Natts_pg_auth_members]; + char new_record_nulls[Natts_pg_auth_members]; + char new_record_repl[Natts_pg_auth_members]; /* Build a tuple to update with */ MemSet(new_record, 0, sizeof(new_record)); diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 4f9eb192123..506eb23e707 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -13,7 +13,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.316 2005/10/03 22:52:21 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.317 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -198,7 +198,7 @@ static TransactionId FreezeLimit; /* non-export function prototypes */ static List *get_rel_oids(List *relids, const RangeVar *vacrel, - const char *stmttype); + const char *stmttype); static void vac_update_dbstats(Oid dbid, TransactionId vacuumXID, TransactionId frozenXID); @@ -281,17 +281,16 @@ vacuum(VacuumStmt *vacstmt, List *relids) elevel = DEBUG2; /* - * We cannot run VACUUM inside a user transaction block; if we were - * inside a transaction, then our commit- and - * start-transaction-command calls would not have the intended effect! - * Furthermore, the forced commit that occurs before truncating the - * relation's file would have the effect of committing the rest of the - * user's transaction too, which would certainly not be the desired - * behavior. (This only applies to VACUUM FULL, though. We could in - * theory run lazy VACUUM inside a transaction block, but we choose to - * disallow that case because we'd rather commit as soon as possible - * after finishing the vacuum. This is mainly so that we can let go - * the AccessExclusiveLock that we may be holding.) + * We cannot run VACUUM inside a user transaction block; if we were inside + * a transaction, then our commit- and start-transaction-command calls + * would not have the intended effect! Furthermore, the forced commit that + * occurs before truncating the relation's file would have the effect of + * committing the rest of the user's transaction too, which would + * certainly not be the desired behavior. (This only applies to VACUUM + * FULL, though. We could in theory run lazy VACUUM inside a transaction + * block, but we choose to disallow that case because we'd rather commit + * as soon as possible after finishing the vacuum. This is mainly so that + * we can let go the AccessExclusiveLock that we may be holding.) * * ANALYZE (without VACUUM) can run either way. */ @@ -306,16 +305,16 @@ vacuum(VacuumStmt *vacstmt, List *relids) /* * Disallow the combination VACUUM FULL FREEZE; although it would mostly * work, VACUUM FULL's ability to move tuples around means that it is - * injecting its own XID into tuple visibility checks. We'd have to + * injecting its own XID into tuple visibility checks. We'd have to * guarantee that every moved tuple is properly marked XMIN_COMMITTED or * XMIN_INVALID before the end of the operation. There are corner cases - * where this does not happen, and getting rid of them all seems hard - * (not to mention fragile to maintain). On the whole it's not worth it + * where this does not happen, and getting rid of them all seems hard (not + * to mention fragile to maintain). On the whole it's not worth it * compared to telling people to use two operations. See pgsql-hackers * discussion of 27-Nov-2004, and comments below for update_hint_bits(). * - * Note: this is enforced here, and not in the grammar, since (a) we can - * give a better error message, and (b) we might want to allow it again + * Note: this is enforced here, and not in the grammar, since (a) we can give + * a better error message, and (b) we might want to allow it again * someday. */ if (vacstmt->vacuum && vacstmt->full && vacstmt->freeze) @@ -333,9 +332,8 @@ vacuum(VacuumStmt *vacstmt, List *relids) /* * Create special memory context for cross-transaction storage. * - * Since it is a child of PortalContext, it will go away eventually even - * if we suffer an error; there's no need for special abort cleanup - * logic. + * Since it is a child of PortalContext, it will go away eventually even if + * we suffer an error; there's no need for special abort cleanup logic. */ vac_context = AllocSetContextCreate(PortalContext, "Vacuum", @@ -347,8 +345,8 @@ vacuum(VacuumStmt *vacstmt, List *relids) all_rels = (relids == NIL && vacstmt->relation == NULL); /* - * Build list of relations to process, unless caller gave us one. - * (If we build one, we put it in vac_context for safekeeping.) + * Build list of relations to process, unless caller gave us one. (If we + * build one, we put it in vac_context for safekeeping.) */ relations = get_rel_oids(relids, vacstmt->relation, stmttype); @@ -357,21 +355,21 @@ vacuum(VacuumStmt *vacstmt, List *relids) /* * It's a database-wide VACUUM. * - * Compute the initially applicable OldestXmin and FreezeLimit XIDs, - * so that we can record these values at the end of the VACUUM. - * Note that individual tables may well be processed with newer - * values, but we can guarantee that no (non-shared) relations are - * processed with older ones. + * Compute the initially applicable OldestXmin and FreezeLimit XIDs, so + * that we can record these values at the end of the VACUUM. Note that + * individual tables may well be processed with newer values, but we + * can guarantee that no (non-shared) relations are processed with + * older ones. * - * It is okay to record non-shared values in pg_database, even though - * we may vacuum shared relations with older cutoffs, because only - * the minimum of the values present in pg_database matters. We - * can be sure that shared relations have at some time been - * vacuumed with cutoffs no worse than the global minimum; for, if - * there is a backend in some other DB with xmin = OLDXMIN that's - * determining the cutoff with which we vacuum shared relations, - * it is not possible for that database to have a cutoff newer - * than OLDXMIN recorded in pg_database. + * It is okay to record non-shared values in pg_database, even though we + * may vacuum shared relations with older cutoffs, because only the + * minimum of the values present in pg_database matters. We can be + * sure that shared relations have at some time been vacuumed with + * cutoffs no worse than the global minimum; for, if there is a + * backend in some other DB with xmin = OLDXMIN that's determining the + * cutoff with which we vacuum shared relations, it is not possible + * for that database to have a cutoff newer than OLDXMIN recorded in + * pg_database. */ vacuum_set_xid_limits(vacstmt, false, &initialOldestXmin, @@ -381,16 +379,15 @@ vacuum(VacuumStmt *vacstmt, List *relids) /* * Decide whether we need to start/commit our own transactions. * - * For VACUUM (with or without ANALYZE): always do so, so that we can - * release locks as soon as possible. (We could possibly use the - * outer transaction for a one-table VACUUM, but handling TOAST tables - * would be problematic.) + * For VACUUM (with or without ANALYZE): always do so, so that we can release + * locks as soon as possible. (We could possibly use the outer + * transaction for a one-table VACUUM, but handling TOAST tables would be + * problematic.) * * For ANALYZE (no VACUUM): if inside a transaction block, we cannot - * start/commit our own transactions. Also, there's no need to do so - * if only processing one relation. For multiple relations when not - * within a transaction block, use own transactions so we can release - * locks sooner. + * start/commit our own transactions. Also, there's no need to do so if + * only processing one relation. For multiple relations when not within a + * transaction block, use own transactions so we can release locks sooner. */ if (vacstmt->vacuum) use_own_xacts = true; @@ -406,8 +403,8 @@ vacuum(VacuumStmt *vacstmt, List *relids) } /* - * If we are running ANALYZE without per-table transactions, we'll - * need a memory context with table lifetime. + * If we are running ANALYZE without per-table transactions, we'll need a + * memory context with table lifetime. */ if (!use_own_xacts) anl_context = AllocSetContextCreate(PortalContext, @@ -417,12 +414,12 @@ vacuum(VacuumStmt *vacstmt, List *relids) ALLOCSET_DEFAULT_MAXSIZE); /* - * vacuum_rel expects to be entered with no transaction active; it - * will start and commit its own transaction. But we are called by an - * SQL command, and so we are executing inside a transaction already. - * We commit the transaction started in PostgresMain() here, and start - * another one before exiting to match the commit waiting for us back - * in PostgresMain(). + * vacuum_rel expects to be entered with no transaction active; it will + * start and commit its own transaction. But we are called by an SQL + * command, and so we are executing inside a transaction already. We + * commit the transaction started in PostgresMain() here, and start + * another one before exiting to match the commit waiting for us back in + * PostgresMain(). */ if (use_own_xacts) { @@ -455,11 +452,11 @@ vacuum(VacuumStmt *vacstmt, List *relids) MemoryContext old_context = NULL; /* - * If using separate xacts, start one for analyze. - * Otherwise, we can use the outer transaction, but we - * still need to call analyze_rel in a memory context that - * will be cleaned up on return (else we leak memory while - * processing multiple tables). + * If using separate xacts, start one for analyze. Otherwise, + * we can use the outer transaction, but we still need to call + * analyze_rel in a memory context that will be cleaned up on + * return (else we leak memory while processing multiple + * tables). */ if (use_own_xacts) { @@ -471,8 +468,8 @@ vacuum(VacuumStmt *vacstmt, List *relids) old_context = MemoryContextSwitchTo(anl_context); /* - * Tell the buffer replacement strategy that vacuum is - * causing the IO + * Tell the buffer replacement strategy that vacuum is causing + * the IO */ StrategyHintVacuum(true); @@ -518,16 +515,16 @@ vacuum(VacuumStmt *vacstmt, List *relids) if (vacstmt->vacuum) { /* - * If it was a database-wide VACUUM, print FSM usage statistics - * (we don't make you be superuser to see these). + * If it was a database-wide VACUUM, print FSM usage statistics (we + * don't make you be superuser to see these). */ if (all_rels) PrintFreeSpaceMapStatistics(elevel); /* * If we completed a database-wide VACUUM without skipping any - * relations, update the database's pg_database row with info - * about the transaction IDs used, and try to truncate pg_clog. + * relations, update the database's pg_database row with info about + * the transaction IDs used, and try to truncate pg_clog. */ if (all_rels) { @@ -539,8 +536,8 @@ vacuum(VacuumStmt *vacstmt, List *relids) /* * Clean up working storage --- note we must do this after - * StartTransactionCommand, else we might be trying to delete the - * active context! + * StartTransactionCommand, else we might be trying to delete the active + * context! */ MemoryContextDelete(vac_context); vac_context = NULL; @@ -725,10 +722,10 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples, LockBuffer(buffer, BUFFER_LOCK_UNLOCK); /* - * Invalidate the tuple in the catcaches; this also arranges to flush - * the relation's relcache entry. (If we fail to commit for some - * reason, no flush will occur, but no great harm is done since there - * are no noncritical state updates here.) + * Invalidate the tuple in the catcaches; this also arranges to flush the + * relation's relcache entry. (If we fail to commit for some reason, no + * flush will occur, but no great harm is done since there are no + * noncritical state updates here.) */ CacheInvalidateHeapTuple(rd, &rtup); @@ -878,8 +875,8 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID) heap_close(relation, AccessShareLock); /* - * Do not truncate CLOG if we seem to have suffered wraparound - * already; the computed minimum XID might be bogus. + * Do not truncate CLOG if we seem to have suffered wraparound already; + * the computed minimum XID might be bogus. */ if (vacuumAlreadyWrapped) { @@ -893,8 +890,8 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID) TruncateCLOG(vacuumXID); /* - * Do not update varsup.c if we seem to have suffered wraparound - * already; the computed XID might be bogus. + * Do not update varsup.c if we seem to have suffered wraparound already; + * the computed XID might be bogus. */ if (frozenAlreadyWrapped) { @@ -911,11 +908,11 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID) age = (int32) (myXID - frozenXID); if (age > (int32) ((MaxTransactionId >> 3) * 3)) ereport(WARNING, - (errmsg("database \"%s\" must be vacuumed within %u transactions", - NameStr(oldest_datname), - (MaxTransactionId >> 1) - age), - errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".", - NameStr(oldest_datname)))); + (errmsg("database \"%s\" must be vacuumed within %u transactions", + NameStr(oldest_datname), + (MaxTransactionId >> 1) - age), + errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".", + NameStr(oldest_datname)))); } @@ -970,8 +967,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) CHECK_FOR_INTERRUPTS(); /* - * Race condition -- if the pg_class tuple has gone away since the - * last time we saw it, we don't need to vacuum it. + * Race condition -- if the pg_class tuple has gone away since the last + * time we saw it, we don't need to vacuum it. */ if (!SearchSysCacheExists(RELOID, ObjectIdGetDatum(relid), @@ -983,24 +980,22 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) } /* - * Determine the type of lock we want --- hard exclusive lock for a - * FULL vacuum, but just ShareUpdateExclusiveLock for concurrent - * vacuum. Either way, we can be sure that no other backend is - * vacuuming the same table. + * Determine the type of lock we want --- hard exclusive lock for a FULL + * vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum. + * Either way, we can be sure that no other backend is vacuuming the same + * table. */ lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock; /* - * Open the class, get an appropriate lock on it, and check - * permissions. + * Open the class, get an appropriate lock on it, and check permissions. * - * We allow the user to vacuum a table if he is superuser, the table - * owner, or the database owner (but in the latter case, only if it's - * not a shared relation). pg_class_ownercheck includes the superuser - * case. + * We allow the user to vacuum a table if he is superuser, the table owner, + * or the database owner (but in the latter case, only if it's not a + * shared relation). pg_class_ownercheck includes the superuser case. * - * Note we choose to treat permissions failure as a WARNING and keep - * trying to vacuum the rest of the DB --- is this appropriate? + * Note we choose to treat permissions failure as a WARNING and keep trying + * to vacuum the rest of the DB --- is this appropriate? */ onerel = relation_open(relid, lmode); @@ -1017,8 +1012,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) } /* - * Check that it's a plain table; we used to do this in get_rel_oids() - * but seems safer to check after we've locked the relation. + * Check that it's a plain table; we used to do this in get_rel_oids() but + * seems safer to check after we've locked the relation. */ if (onerel->rd_rel->relkind != expected_relkind) { @@ -1043,15 +1038,14 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) relation_close(onerel, lmode); StrategyHintVacuum(false); CommitTransactionCommand(); - return true; /* assume no long-lived data in temp - * tables */ + return true; /* assume no long-lived data in temp tables */ } /* * Get a session-level lock too. This will protect our access to the * relation across multiple transactions, so that we can vacuum the - * relation's TOAST table (if any) secure in the knowledge that no one - * is deleting the parent relation. + * relation's TOAST table (if any) secure in the knowledge that no one is + * deleting the parent relation. * * NOTE: this cannot block, even if someone else is waiting for access, * because the lock manager knows that both lock requests are from the @@ -1087,9 +1081,9 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) /* * If the relation has a secondary toast rel, vacuum that too while we * still hold the session lock on the master table. Note however that - * "analyze" will not get done on the toast table. This is good, - * because the toaster always uses hardcoded index access and - * statistics are totally unimportant for toast relations. + * "analyze" will not get done on the toast table. This is good, because + * the toaster always uses hardcoded index access and statistics are + * totally unimportant for toast relations. */ if (toast_relid != InvalidOid) { @@ -1128,8 +1122,8 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt) { VacPageListData vacuum_pages; /* List of pages to vacuum and/or * clean indexes */ - VacPageListData fraged_pages; /* List of pages with space enough - * for re-using */ + VacPageListData fraged_pages; /* List of pages with space enough for + * re-using */ Relation *Irel; int nindexes, i; @@ -1198,7 +1192,7 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt) /* report results to the stats collector, too */ pgstat_report_vacuum(RelationGetRelid(onerel), onerel->rd_rel->relisshared, - vacstmt->analyze, vacrelstats->rel_tuples); + vacstmt->analyze, vacrelstats->rel_tuples); } @@ -1275,11 +1269,11 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, /* * Since we are holding exclusive lock on the relation, no other - * backend can be accessing the page; however it is possible that - * the background writer will try to write the page if it's already - * marked dirty. To ensure that invalid data doesn't get written to - * disk, we must take exclusive buffer lock wherever we potentially - * modify pages. + * backend can be accessing the page; however it is possible that the + * background writer will try to write the page if it's already marked + * dirty. To ensure that invalid data doesn't get written to disk, we + * must take exclusive buffer lock wherever we potentially modify + * pages. */ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); @@ -1292,8 +1286,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, VacPage vacpagecopy; ereport(WARNING, - (errmsg("relation \"%s\" page %u is uninitialized --- fixing", - relname, blkno))); + (errmsg("relation \"%s\" page %u is uninitialized --- fixing", + relname, blkno))); PageInit(page, BufferGetPageSize(buf), 0); vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower; free_space += vacpage->free; @@ -1357,8 +1351,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, case HEAPTUPLE_LIVE: /* - * Tuple is good. Consider whether to replace its - * xmin value with FrozenTransactionId. + * Tuple is good. Consider whether to replace its xmin + * value with FrozenTransactionId. */ if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) && TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data), @@ -1381,15 +1375,14 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, case HEAPTUPLE_RECENTLY_DEAD: /* - * If tuple is recently deleted then we must not - * remove it from relation. + * If tuple is recently deleted then we must not remove it + * from relation. */ nkeep += 1; /* - * If we do shrinking and this tuple is updated one - * then remember it to construct updated tuple - * dependencies. + * If we do shrinking and this tuple is updated one then + * remember it to construct updated tuple dependencies. */ if (do_shrinking && !(ItemPointerEquals(&(tuple.t_self), @@ -1399,8 +1392,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, { free_vtlinks = 1000; vtlinks = (VTupleLink) repalloc(vtlinks, - (free_vtlinks + num_vtlinks) * - sizeof(VTupleLinkData)); + (free_vtlinks + num_vtlinks) * + sizeof(VTupleLinkData)); } vtlinks[num_vtlinks].new_tid = tuple.t_data->t_ctid; vtlinks[num_vtlinks].this_tid = tuple.t_self; @@ -1411,10 +1404,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, case HEAPTUPLE_INSERT_IN_PROGRESS: /* - * This should not happen, since we hold exclusive - * lock on the relation; shouldn't we raise an error? - * (Actually, it can happen in system catalogs, since - * we tend to release write lock before commit there.) + * This should not happen, since we hold exclusive lock on + * the relation; shouldn't we raise an error? (Actually, + * it can happen in system catalogs, since we tend to + * release write lock before commit there.) */ ereport(NOTICE, (errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- can't shrink relation", @@ -1424,10 +1417,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, case HEAPTUPLE_DELETE_IN_PROGRESS: /* - * This should not happen, since we hold exclusive - * lock on the relation; shouldn't we raise an error? - * (Actually, it can happen in system catalogs, since - * we tend to release write lock before commit there.) + * This should not happen, since we hold exclusive lock on + * the relation; shouldn't we raise an error? (Actually, + * it can happen in system catalogs, since we tend to + * release write lock before commit there.) */ ereport(NOTICE, (errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- can't shrink relation", @@ -1444,12 +1437,12 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ItemId lpp; /* - * Here we are building a temporary copy of the page with - * dead tuples removed. Below we will apply + * Here we are building a temporary copy of the page with dead + * tuples removed. Below we will apply * PageRepairFragmentation to the copy, so that we can - * determine how much space will be available after - * removal of dead tuples. But note we are NOT changing - * the real page yet... + * determine how much space will be available after removal of + * dead tuples. But note we are NOT changing the real page + * yet... */ if (tempPage == NULL) { @@ -1499,8 +1492,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, /* * Add the page to fraged_pages if it has a useful amount of free * space. "Useful" means enough for a minimal-sized tuple. But we - * don't know that accurately near the start of the relation, so - * add pages unconditionally if they have >= BLCKSZ/10 free space. + * don't know that accurately near the start of the relation, so add + * pages unconditionally if they have >= BLCKSZ/10 free space. */ do_frag = (vacpage->free >= min_tlen || vacpage->free >= BLCKSZ / 10); @@ -1516,8 +1509,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, /* * Include the page in empty_end_pages if it will be empty after - * vacuuming; this is to keep us from using it as a move - * destination. + * vacuuming; this is to keep us from using it as a move destination. */ if (notup) { @@ -1588,11 +1580,11 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, RelationGetRelationName(onerel), tups_vacuumed, num_tuples, nblocks), errdetail("%.0f dead row versions cannot be removed yet.\n" - "Nonremovable row versions range from %lu to %lu bytes long.\n" + "Nonremovable row versions range from %lu to %lu bytes long.\n" "There were %.0f unused item pointers.\n" - "Total free space (including removable row versions) is %.0f bytes.\n" + "Total free space (including removable row versions) is %.0f bytes.\n" "%u pages are or will become empty, including %u at the end of the table.\n" - "%u pages containing %.0f free bytes are potential move destinations.\n" + "%u pages containing %.0f free bytes are potential move destinations.\n" "%s.", nkeep, (unsigned long) min_tlen, (unsigned long) max_tlen, @@ -1663,14 +1655,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, vacpage->offsets_used = vacpage->offsets_free = 0; /* - * Scan pages backwards from the last nonempty page, trying to move - * tuples down to lower pages. Quit when we reach a page that we have - * moved any tuples onto, or the first page if we haven't moved - * anything, or when we find a page we cannot completely empty (this - * last condition is handled by "break" statements within the loop). + * Scan pages backwards from the last nonempty page, trying to move tuples + * down to lower pages. Quit when we reach a page that we have moved any + * tuples onto, or the first page if we haven't moved anything, or when we + * find a page we cannot completely empty (this last condition is handled + * by "break" statements within the loop). * - * NB: this code depends on the vacuum_pages and fraged_pages lists being - * in order by blkno. + * NB: this code depends on the vacuum_pages and fraged_pages lists being in + * order by blkno. */ nblocks = vacrelstats->rel_pages; for (blkno = nblocks - vacuum_pages->empty_end_pages - 1; @@ -1688,18 +1680,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, vacuum_delay_point(); /* - * Forget fraged_pages pages at or after this one; they're no - * longer useful as move targets, since we only want to move down. - * Note that since we stop the outer loop at last_move_dest_block, - * pages removed here cannot have had anything moved onto them - * already. + * Forget fraged_pages pages at or after this one; they're no longer + * useful as move targets, since we only want to move down. Note that + * since we stop the outer loop at last_move_dest_block, pages removed + * here cannot have had anything moved onto them already. * - * Also note that we don't change the stored fraged_pages list, only - * our local variable num_fraged_pages; so the forgotten pages are - * still available to be loaded into the free space map later. + * Also note that we don't change the stored fraged_pages list, only our + * local variable num_fraged_pages; so the forgotten pages are still + * available to be loaded into the free space map later. */ while (num_fraged_pages > 0 && - fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno) + fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno) { Assert(fraged_pages->pagedesc[num_fraged_pages - 1]->offsets_used == 0); --num_fraged_pages; @@ -1752,8 +1743,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, else Assert(!isempty); - chain_tuple_moved = false; /* no one chain-tuple was moved - * off this page, yet */ + chain_tuple_moved = false; /* no one chain-tuple was moved off + * this page, yet */ vacpage->blkno = blkno; maxoff = PageGetMaxOffsetNumber(page); for (offnum = FirstOffsetNumber; @@ -1807,9 +1798,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, elog(ERROR, "invalid XVAC in tuple header"); /* - * If this (chain) tuple is moved by me already then I - * have to check is it in vacpage or not - i.e. is it - * moved while cleaning this page or some previous one. + * If this (chain) tuple is moved by me already then I have to + * check is it in vacpage or not - i.e. is it moved while + * cleaning this page or some previous one. */ /* Can't we Assert(keep_tuples > 0) here? */ @@ -1839,34 +1830,33 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, } /* - * If this tuple is in a chain of tuples created in updates - * by "recent" transactions then we have to move the whole chain - * of tuples to other places, so that we can write new t_ctid - * links that preserve the chain relationship. + * If this tuple is in a chain of tuples created in updates by + * "recent" transactions then we have to move the whole chain of + * tuples to other places, so that we can write new t_ctid links + * that preserve the chain relationship. * * This test is complicated. Read it as "if tuple is a recently - * created updated version, OR if it is an obsoleted version". - * (In the second half of the test, we needn't make any check - * on XMAX --- it must be recently obsoleted, else scan_heap - * would have deemed it removable.) + * created updated version, OR if it is an obsoleted version". (In + * the second half of the test, we needn't make any check on XMAX + * --- it must be recently obsoleted, else scan_heap would have + * deemed it removable.) * - * NOTE: this test is not 100% accurate: it is possible for a - * tuple to be an updated one with recent xmin, and yet not - * match any new_tid entry in the vtlinks list. Presumably - * there was once a parent tuple with xmax matching the xmin, - * but it's possible that that tuple has been removed --- for - * example, if it had xmin = xmax and wasn't itself an updated - * version, then HeapTupleSatisfiesVacuum would deem it removable - * as soon as the xmin xact completes. + * NOTE: this test is not 100% accurate: it is possible for a tuple + * to be an updated one with recent xmin, and yet not match any + * new_tid entry in the vtlinks list. Presumably there was once a + * parent tuple with xmax matching the xmin, but it's possible + * that that tuple has been removed --- for example, if it had + * xmin = xmax and wasn't itself an updated version, then + * HeapTupleSatisfiesVacuum would deem it removable as soon as the + * xmin xact completes. * - * To be on the safe side, we abandon the repair_frag process if - * we cannot find the parent tuple in vtlinks. This may be - * overly conservative; AFAICS it would be safe to move the - * chain. + * To be on the safe side, we abandon the repair_frag process if we + * cannot find the parent tuple in vtlinks. This may be overly + * conservative; AFAICS it would be safe to move the chain. */ if (((tuple.t_data->t_infomask & HEAP_UPDATED) && - !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data), - OldestXmin)) || + !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data), + OldestXmin)) || (!(tuple.t_data->t_infomask & (HEAP_XMAX_INVALID | HEAP_IS_LOCKED)) && !(ItemPointerEquals(&(tuple.t_self), @@ -1899,10 +1889,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, } /* - * If this tuple is in the begin/middle of the chain then - * we have to move to the end of chain. As with any - * t_ctid chase, we have to verify that each new tuple - * is really the descendant of the tuple we came from. + * If this tuple is in the begin/middle of the chain then we + * have to move to the end of chain. As with any t_ctid + * chase, we have to verify that each new tuple is really the + * descendant of the tuple we came from. */ while (!(tp.t_data->t_infomask & (HEAP_XMAX_INVALID | HEAP_IS_LOCKED)) && @@ -1963,9 +1953,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, free_vtmove = 100; /* - * Now, walk backwards up the chain (towards older tuples) - * and check if all items in chain can be moved. We record - * all the moves that need to be made in the vtmove array. + * Now, walk backwards up the chain (towards older tuples) and + * check if all items in chain can be moved. We record all + * the moves that need to be made in the vtmove array. */ for (;;) { @@ -2020,9 +2010,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, /* Done if at beginning of chain */ if (!(tp.t_data->t_infomask & HEAP_UPDATED) || - TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data), - OldestXmin)) - break; /* out of check-all-items loop */ + TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data), + OldestXmin)) + break; /* out of check-all-items loop */ /* Move to tuple with prior row version */ vtld.new_tid = tp.t_self; @@ -2041,10 +2031,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, } tp.t_self = vtlp->this_tid; Pbuf = ReadBuffer(onerel, - ItemPointerGetBlockNumber(&(tp.t_self))); + ItemPointerGetBlockNumber(&(tp.t_self))); Ppage = BufferGetPage(Pbuf); Pitemid = PageGetItemId(Ppage, - ItemPointerGetOffsetNumber(&(tp.t_self))); + ItemPointerGetOffsetNumber(&(tp.t_self))); /* this can't happen since we saw tuple earlier: */ if (!ItemIdIsUsed(Pitemid)) elog(ERROR, "parent itemid marked as unused"); @@ -2056,19 +2046,18 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, /* * Read above about cases when !ItemIdIsUsed(nextItemid) - * (child item is removed)... Due to the fact that at - * the moment we don't remove unuseful part of - * update-chain, it's possible to get non-matching parent - * row here. Like as in the case which caused this - * problem, we stop shrinking here. I could try to - * find real parent row but want not to do it because - * of real solution will be implemented anyway, later, - * and we are too close to 6.5 release. - vadim - * 06/11/99 + * (child item is removed)... Due to the fact that at the + * moment we don't remove unuseful part of update-chain, + * it's possible to get non-matching parent row here. Like + * as in the case which caused this problem, we stop + * shrinking here. I could try to find real parent row but + * want not to do it because of real solution will be + * implemented anyway, later, and we are too close to 6.5 + * release. - vadim 06/11/99 */ if ((PTdata->t_infomask & HEAP_XMAX_IS_MULTI) || !(TransactionIdEquals(HeapTupleHeaderGetXmax(PTdata), - HeapTupleHeaderGetXmin(tp.t_data)))) + HeapTupleHeaderGetXmin(tp.t_data)))) { ReleaseBuffer(Pbuf); elog(DEBUG2, "too old parent tuple found --- can't continue repair_frag"); @@ -2091,9 +2080,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, if (chain_move_failed) { /* - * Undo changes to offsets_used state. We don't - * bother cleaning up the amount-free state, since - * we're not going to do any further tuple motion. + * Undo changes to offsets_used state. We don't bother + * cleaning up the amount-free state, since we're not + * going to do any further tuple motion. */ for (i = 0; i < num_vtmove; i++) { @@ -2119,7 +2108,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, /* Get page to move from */ tuple.t_self = vtmove[ti].tid; Cbuf = ReadBuffer(onerel, - ItemPointerGetBlockNumber(&(tuple.t_self))); + ItemPointerGetBlockNumber(&(tuple.t_self))); /* Get page to move to */ dst_buffer = ReadBuffer(onerel, destvacpage->blkno); @@ -2132,7 +2121,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, Cpage = BufferGetPage(Cbuf); Citemid = PageGetItemId(Cpage, - ItemPointerGetOffsetNumber(&(tuple.t_self))); + ItemPointerGetOffsetNumber(&(tuple.t_self))); tuple.t_datamcxt = NULL; tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid); tuple_len = tuple.t_len = ItemIdGetLength(Citemid); @@ -2211,18 +2200,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, } /* walk along page */ /* - * If we broke out of the walk-along-page loop early (ie, still - * have offnum <= maxoff), then we failed to move some tuple off - * this page. No point in shrinking any more, so clean up and - * exit the per-page loop. + * If we broke out of the walk-along-page loop early (ie, still have + * offnum <= maxoff), then we failed to move some tuple off this page. + * No point in shrinking any more, so clean up and exit the per-page + * loop. */ if (offnum < maxoff && keep_tuples > 0) { OffsetNumber off; /* - * Fix vacpage state for any unvisited tuples remaining on - * page + * Fix vacpage state for any unvisited tuples remaining on page */ for (off = OffsetNumberNext(offnum); off <= maxoff; @@ -2238,8 +2226,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, continue; /* - * See comments in the walk-along-page loop above about - * why only MOVED_OFF tuples should be found here. + * See comments in the walk-along-page loop above about why + * only MOVED_OFF tuples should be found here. */ if (htup->t_infomask & HEAP_MOVED_IN) elog(ERROR, "HEAP_MOVED_IN was not expected"); @@ -2307,20 +2295,20 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, * We have to commit our tuple movings before we truncate the * relation. Ideally we should do Commit/StartTransactionCommand * here, relying on the session-level table lock to protect our - * exclusive access to the relation. However, that would require - * a lot of extra code to close and re-open the relation, indexes, - * etc. For now, a quick hack: record status of current - * transaction as committed, and continue. + * exclusive access to the relation. However, that would require a + * lot of extra code to close and re-open the relation, indexes, etc. + * For now, a quick hack: record status of current transaction as + * committed, and continue. */ RecordTransactionCommit(); } /* * We are not going to move any more tuples across pages, but we still - * need to apply vacuum_page to compact free space in the remaining - * pages in vacuum_pages list. Note that some of these pages may also - * be in the fraged_pages list, and may have had tuples moved onto - * them; if so, we already did vacuum_page and needn't do it again. + * need to apply vacuum_page to compact free space in the remaining pages + * in vacuum_pages list. Note that some of these pages may also be in the + * fraged_pages list, and may have had tuples moved onto them; if so, we + * already did vacuum_page and needn't do it again. */ for (i = 0, curpage = vacuum_pages->pagedesc; i < vacuumed_pages; @@ -2354,17 +2342,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, last_move_dest_block, num_moved); /* - * It'd be cleaner to make this report at the bottom of this routine, - * but then the rusage would double-count the second pass of index - * vacuuming. So do it here and ignore the relatively small amount of - * processing that occurs below. + * It'd be cleaner to make this report at the bottom of this routine, but + * then the rusage would double-count the second pass of index vacuuming. + * So do it here and ignore the relatively small amount of processing that + * occurs below. */ ereport(elevel, - (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages", - RelationGetRelationName(onerel), - num_moved, nblocks, blkno), - errdetail("%s.", - pg_rusage_show(&ru0)))); + (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages", + RelationGetRelationName(onerel), + num_moved, nblocks, blkno), + errdetail("%s.", + pg_rusage_show(&ru0)))); /* * Reflect the motion of system tuples to catalog cache here. @@ -2382,7 +2370,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, /* re-sort Nvacpagelist.pagedesc */ for (vpleft = Nvacpagelist.pagedesc, - vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1; + vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1; vpleft < vpright; vpleft++, vpright--) { vpsave = *vpleft; @@ -2391,11 +2379,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, } /* - * keep_tuples is the number of tuples that have been moved - * off a page during chain moves but not been scanned over - * subsequently. The tuple ids of these tuples are not - * recorded as free offsets for any VacPage, so they will not - * be cleared from the indexes. + * keep_tuples is the number of tuples that have been moved off a + * page during chain moves but not been scanned over subsequently. + * The tuple ids of these tuples are not recorded as free offsets + * for any VacPage, so they will not be cleared from the indexes. */ Assert(keep_tuples >= 0); for (i = 0; i < nindexes; i++) @@ -2406,9 +2393,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, /* * Clean moved-off tuples from last page in Nvacpagelist list. * - * We need only do this in this one page, because higher-numbered - * pages are going to be truncated from the relation entirely. - * But see comments for update_hint_bits(). + * We need only do this in this one page, because higher-numbered pages + * are going to be truncated from the relation entirely. But see + * comments for update_hint_bits(). */ if (vacpage->blkno == (blkno - 1) && vacpage->offsets_free > 0) @@ -2439,8 +2426,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, continue; /* - * See comments in the walk-along-page loop above about - * why only MOVED_OFF tuples should be found here. + * See comments in the walk-along-page loop above about why + * only MOVED_OFF tuples should be found here. */ if (htup->t_infomask & HEAP_MOVED_IN) elog(ERROR, "HEAP_MOVED_IN was not expected"); @@ -2470,8 +2457,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, else { /* - * No XLOG record, but still need to flag that XID exists - * on disk + * No XLOG record, but still need to flag that XID exists on + * disk */ MyXactMadeTempRelUpdate = true; } @@ -2554,20 +2541,20 @@ move_chain_tuple(Relation rel, /* * If this page was not used before - clean it. * - * NOTE: a nasty bug used to lurk here. It is possible for the source - * and destination pages to be the same (since this tuple-chain member - * can be on a page lower than the one we're currently processing in - * the outer loop). If that's true, then after vacuum_page() the - * source tuple will have been moved, and tuple.t_data will be - * pointing at garbage. Therefore we must do everything that uses - * old_tup->t_data BEFORE this step!! + * NOTE: a nasty bug used to lurk here. It is possible for the source and + * destination pages to be the same (since this tuple-chain member can be + * on a page lower than the one we're currently processing in the outer + * loop). If that's true, then after vacuum_page() the source tuple will + * have been moved, and tuple.t_data will be pointing at garbage. + * Therefore we must do everything that uses old_tup->t_data BEFORE this + * step!! * - * This path is different from the other callers of vacuum_page, because - * we have already incremented the vacpage's offsets_used field to - * account for the tuple(s) we expect to move onto the page. Therefore - * vacuum_page's check for offsets_used == 0 is wrong. But since - * that's a good debugging check for all other callers, we work around - * it here rather than remove it. + * This path is different from the other callers of vacuum_page, because we + * have already incremented the vacpage's offsets_used field to account + * for the tuple(s) we expect to move onto the page. Therefore + * vacuum_page's check for offsets_used == 0 is wrong. But since that's a + * good debugging check for all other callers, we work around it here + * rather than remove it. */ if (!PageIsEmpty(dst_page) && cleanVpd) { @@ -2579,8 +2566,8 @@ move_chain_tuple(Relation rel, } /* - * Update the state of the copied tuple, and store it on the - * destination page. + * Update the state of the copied tuple, and store it on the destination + * page. */ newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | @@ -2601,9 +2588,9 @@ move_chain_tuple(Relation rel, ItemPointerSet(&(newtup.t_self), dst_vacpage->blkno, newoff); /* - * Set new tuple's t_ctid pointing to itself if last tuple in chain, - * and to next tuple in chain otherwise. (Since we move the chain - * in reverse order, this is actually the previously processed tuple.) + * Set new tuple's t_ctid pointing to itself if last tuple in chain, and + * to next tuple in chain otherwise. (Since we move the chain in reverse + * order, this is actually the previously processed tuple.) */ if (!ItemPointerIsValid(ctid)) newtup.t_data->t_ctid = newtup.t_self; @@ -2678,8 +2665,8 @@ move_plain_tuple(Relation rel, * register invalidation of source tuple in catcaches. * * (Note: we do not need to register the copied tuple, because we are not - * changing the tuple contents and so there cannot be any need to - * flush negative catcache entries.) + * changing the tuple contents and so there cannot be any need to flush + * negative catcache entries.) */ CacheInvalidateHeapTuple(rel, old_tup); @@ -2957,9 +2944,9 @@ scan_index(Relation indrel, double num_tuples) /* * Even though we're not planning to delete anything, we use the - * ambulkdelete call, because (a) the scan happens within the index AM - * for more speed, and (b) it may want to pass private statistics to - * the amvacuumcleanup call. + * ambulkdelete call, because (a) the scan happens within the index AM for + * more speed, and (b) it may want to pass private statistics to the + * amvacuumcleanup call. */ stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL); @@ -2978,18 +2965,18 @@ scan_index(Relation indrel, double num_tuples) false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%u index pages have been deleted, %u are currently reusable.\n" - "%s.", - stats->pages_deleted, stats->pages_free, - pg_rusage_show(&ru0)))); + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%u index pages have been deleted, %u are currently reusable.\n" + "%s.", + stats->pages_deleted, stats->pages_free, + pg_rusage_show(&ru0)))); /* - * Check for tuple count mismatch. If the index is partial, then it's - * OK for it to have fewer tuples than the heap; else we got trouble. + * Check for tuple count mismatch. If the index is partial, then it's OK + * for it to have fewer tuples than the heap; else we got trouble. */ if (stats->num_index_tuples != num_tuples) { @@ -3045,20 +3032,20 @@ vacuum_index(VacPageList vacpagelist, Relation indrel, false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%.0f index row versions were removed.\n" - "%u index pages have been deleted, %u are currently reusable.\n" - "%s.", - stats->tuples_removed, - stats->pages_deleted, stats->pages_free, - pg_rusage_show(&ru0)))); + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%.0f index row versions were removed.\n" + "%u index pages have been deleted, %u are currently reusable.\n" + "%s.", + stats->tuples_removed, + stats->pages_deleted, stats->pages_free, + pg_rusage_show(&ru0)))); /* - * Check for tuple count mismatch. If the index is partial, then it's - * OK for it to have fewer tuples than the heap; else we got trouble. + * Check for tuple count mismatch. If the index is partial, then it's OK + * for it to have fewer tuples than the heap; else we got trouble. */ if (stats->num_index_tuples != num_tuples + keep_tuples) { @@ -3067,7 +3054,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel, ereport(WARNING, (errmsg("index \"%s\" contains %.0f row versions, but table contains %.0f row versions", RelationGetRelationName(indrel), - stats->num_index_tuples, num_tuples + keep_tuples), + stats->num_index_tuples, num_tuples + keep_tuples), errhint("Rebuild the index with REINDEX."))); } @@ -3152,14 +3139,13 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages, /* * We only report pages with free space at least equal to the average - * request size --- this avoids cluttering FSM with uselessly-small - * bits of space. Although FSM would discard pages with little free - * space anyway, it's important to do this prefiltering because (a) it - * reduces the time spent holding the FSM lock in - * RecordRelationFreeSpace, and (b) FSM uses the number of pages - * reported as a statistic for guiding space management. If we didn't - * threshold our reports the same way vacuumlazy.c does, we'd be - * skewing that statistic. + * request size --- this avoids cluttering FSM with uselessly-small bits + * of space. Although FSM would discard pages with little free space + * anyway, it's important to do this prefiltering because (a) it reduces + * the time spent holding the FSM lock in RecordRelationFreeSpace, and (b) + * FSM uses the number of pages reported as a statistic for guiding space + * management. If we didn't threshold our reports the same way + * vacuumlazy.c does, we'd be skewing that statistic. */ threshold = GetAvgFSMRequestSize(&onerel->rd_node); @@ -3170,9 +3156,9 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages, for (i = 0; i < nPages; i++) { /* - * fraged_pages may contain entries for pages that we later - * decided to truncate from the relation; don't enter them into - * the free space map! + * fraged_pages may contain entries for pages that we later decided to + * truncate from the relation; don't enter them into the free space + * map! */ if (pagedesc[i]->blkno >= rel_pages) break; @@ -3198,7 +3184,7 @@ copy_vac_page(VacPage vacpage) /* allocate a VacPageData entry */ newvacpage = (VacPage) palloc(sizeof(VacPageData) + - vacpage->offsets_free * sizeof(OffsetNumber)); + vacpage->offsets_free * sizeof(OffsetNumber)); /* fill it in */ if (vacpage->offsets_free > 0) @@ -3368,7 +3354,7 @@ vac_open_indexes(Relation relation, LOCKMODE lockmode, } /* - * Release the resources acquired by vac_open_indexes. Optionally release + * Release the resources acquired by vac_open_indexes. Optionally release * the locks (say NoLock to keep 'em). */ void @@ -3396,8 +3382,7 @@ bool vac_is_partial_index(Relation indrel) { /* - * If the index's AM doesn't support nulls, it's partial for our - * purposes + * If the index's AM doesn't support nulls, it's partial for our purposes */ if (!indrel->rd_am->amindexnulls) return true; diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 8a109237efc..7f276199015 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -31,7 +31,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.60 2005/10/03 22:52:22 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.61 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -67,7 +67,7 @@ typedef struct LVRelStats /* Overall statistics about rel */ BlockNumber rel_pages; double rel_tuples; - BlockNumber pages_removed; + BlockNumber pages_removed; double tuples_deleted; BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */ Size threshold; /* minimum interesting free space */ @@ -97,9 +97,9 @@ static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats); static void lazy_scan_index(Relation indrel, LVRelStats *vacrelstats); static void lazy_vacuum_index(Relation indrel, - double *index_tups_vacuumed, - BlockNumber *index_pages_removed, - LVRelStats *vacrelstats); + double *index_tups_vacuumed, + BlockNumber *index_pages_removed, + LVRelStats *vacrelstats); static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats); static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats); @@ -167,7 +167,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt) */ possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages; if (possibly_freeable >= REL_TRUNCATE_MINIMUM || - possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) + possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) lazy_truncate_heap(onerel, vacrelstats); /* Update shared free space map with final free space info */ @@ -181,7 +181,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt) /* report results to the stats collector, too */ pgstat_report_vacuum(RelationGetRelid(onerel), onerel->rd_rel->relisshared, - vacstmt->analyze, vacrelstats->rel_tuples); + vacstmt->analyze, vacrelstats->rel_tuples); } @@ -228,7 +228,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, * track of the total number of rows and pages removed from each index. * index_tups_vacuumed[i] is the number removed so far from the i'th * index. (For partial indexes this could well be different from - * tups_vacuumed.) Likewise for index_pages_removed[i]. + * tups_vacuumed.) Likewise for index_pages_removed[i]. */ index_tups_vacuumed = (double *) palloc0(nindexes * sizeof(double)); index_pages_removed = (BlockNumber *) palloc0(nindexes * sizeof(BlockNumber)); @@ -253,9 +253,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, vacuum_delay_point(); /* - * If we are close to overrunning the available space for - * dead-tuple TIDs, pause and do a cycle of vacuuming before we - * tackle this page. + * If we are close to overrunning the available space for dead-tuple + * TIDs, pause and do a cycle of vacuuming before we tackle this page. */ if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage && vacrelstats->num_dead_tuples > 0) @@ -283,25 +282,25 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, if (PageIsNew(page)) { /* - * An all-zeroes page could be left over if a backend extends - * the relation but crashes before initializing the page. - * Reclaim such pages for use. + * An all-zeroes page could be left over if a backend extends the + * relation but crashes before initializing the page. Reclaim such + * pages for use. * - * We have to be careful here because we could be looking at - * a page that someone has just added to the relation and not - * yet been able to initialize (see RelationGetBufferForTuple). - * To interlock against that, release the buffer read lock - * (which we must do anyway) and grab the relation extension - * lock before re-locking in exclusive mode. If the page is - * still uninitialized by then, it must be left over from a - * crashed backend, and we can initialize it. + * We have to be careful here because we could be looking at a page + * that someone has just added to the relation and not yet been + * able to initialize (see RelationGetBufferForTuple). To + * interlock against that, release the buffer read lock (which we + * must do anyway) and grab the relation extension lock before + * re-locking in exclusive mode. If the page is still + * uninitialized by then, it must be left over from a crashed + * backend, and we can initialize it. * - * We don't really need the relation lock when this is a new - * or temp relation, but it's probably not worth the code space - * to check that, since this surely isn't a critical path. + * We don't really need the relation lock when this is a new or temp + * relation, but it's probably not worth the code space to check + * that, since this surely isn't a critical path. * - * Note: the comparable code in vacuum.c need not worry - * because it's got exclusive lock on the whole relation. + * Note: the comparable code in vacuum.c need not worry because it's + * got exclusive lock on the whole relation. */ LockBuffer(buf, BUFFER_LOCK_UNLOCK); LockRelationForExtension(onerel, ExclusiveLock); @@ -310,8 +309,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, if (PageIsNew(page)) { ereport(WARNING, - (errmsg("relation \"%s\" page %u is uninitialized --- fixing", - relname, blkno))); + (errmsg("relation \"%s\" page %u is uninitialized --- fixing", + relname, blkno))); PageInit(page, BufferGetPageSize(buf), 0); empty_pages++; lazy_record_free_space(vacrelstats, blkno, @@ -365,15 +364,15 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, case HEAPTUPLE_LIVE: /* - * Tuple is good. Consider whether to replace its - * xmin value with FrozenTransactionId. + * Tuple is good. Consider whether to replace its xmin + * value with FrozenTransactionId. * - * NB: Since we hold only a shared buffer lock here, we - * are assuming that TransactionId read/write is - * atomic. This is not the only place that makes such - * an assumption. It'd be possible to avoid the - * assumption by momentarily acquiring exclusive lock, - * but for the moment I see no need to. + * NB: Since we hold only a shared buffer lock here, we are + * assuming that TransactionId read/write is atomic. This + * is not the only place that makes such an assumption. + * It'd be possible to avoid the assumption by momentarily + * acquiring exclusive lock, but for the moment I see no + * need to. */ if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) && TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data), @@ -396,8 +395,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, case HEAPTUPLE_RECENTLY_DEAD: /* - * If tuple is recently deleted then we must not - * remove it from relation. + * If tuple is recently deleted then we must not remove it + * from relation. */ nkeep += 1; break; @@ -426,9 +425,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, /* * If we remembered any tuples for deletion, then the page will be - * visited again by lazy_vacuum_heap, which will compute and - * record its post-compaction free space. If not, then we're done - * with this page, so remember its free space as-is. + * visited again by lazy_vacuum_heap, which will compute and record + * its post-compaction free space. If not, then we're done with this + * page, so remember its free space as-is. */ if (vacrelstats->num_dead_tuples == prev_dead_count) { @@ -608,8 +607,8 @@ lazy_scan_index(Relation indrel, LVRelStats *vacrelstats) pg_rusage_init(&ru0); /* - * Acquire appropriate type of lock on index: must be exclusive if - * index AM isn't concurrent-safe. + * Acquire appropriate type of lock on index: must be exclusive if index + * AM isn't concurrent-safe. */ if (indrel->rd_am->amconcurrent) LockRelation(indrel, RowExclusiveLock); @@ -618,9 +617,9 @@ lazy_scan_index(Relation indrel, LVRelStats *vacrelstats) /* * Even though we're not planning to delete anything, we use the - * ambulkdelete call, because (a) the scan happens within the index AM - * for more speed, and (b) it may want to pass private statistics to - * the amvacuumcleanup call. + * ambulkdelete call, because (a) the scan happens within the index AM for + * more speed, and (b) it may want to pass private statistics to the + * amvacuumcleanup call. */ stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL); @@ -648,14 +647,14 @@ lazy_scan_index(Relation indrel, LVRelStats *vacrelstats) false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%u index pages have been deleted, %u are currently reusable.\n" - "%s.", - stats->pages_deleted, stats->pages_free, - pg_rusage_show(&ru0)))); + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%u index pages have been deleted, %u are currently reusable.\n" + "%s.", + stats->pages_deleted, stats->pages_free, + pg_rusage_show(&ru0)))); pfree(stats); } @@ -685,8 +684,8 @@ lazy_vacuum_index(Relation indrel, pg_rusage_init(&ru0); /* - * Acquire appropriate type of lock on index: must be exclusive if - * index AM isn't concurrent-safe. + * Acquire appropriate type of lock on index: must be exclusive if index + * AM isn't concurrent-safe. */ if (indrel->rd_am->amconcurrent) LockRelation(indrel, RowExclusiveLock); @@ -724,16 +723,16 @@ lazy_vacuum_index(Relation indrel, false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%.0f index row versions were removed.\n" - "%u index pages have been deleted, %u are currently reusable.\n" - "%s.", - stats->tuples_removed, - stats->pages_deleted, stats->pages_free, - pg_rusage_show(&ru0)))); + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%.0f index row versions were removed.\n" + "%u index pages have been deleted, %u are currently reusable.\n" + "%s.", + stats->tuples_removed, + stats->pages_deleted, stats->pages_free, + pg_rusage_show(&ru0)))); pfree(stats); } @@ -755,19 +754,18 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats) pg_rusage_init(&ru0); /* - * We need full exclusive lock on the relation in order to do - * truncation. If we can't get it, give up rather than waiting --- we - * don't want to block other backends, and we don't want to deadlock - * (which is quite possible considering we already hold a lower-grade - * lock). + * We need full exclusive lock on the relation in order to do truncation. + * If we can't get it, give up rather than waiting --- we don't want to + * block other backends, and we don't want to deadlock (which is quite + * possible considering we already hold a lower-grade lock). */ if (!ConditionalLockRelation(onerel, AccessExclusiveLock)) return; /* * Now that we have exclusive lock, look to see if the rel has grown - * whilst we were vacuuming with non-exclusive lock. If so, give up; - * the newly added pages presumably contain non-deletable tuples. + * whilst we were vacuuming with non-exclusive lock. If so, give up; the + * newly added pages presumably contain non-deletable tuples. */ new_rel_pages = RelationGetNumberOfBlocks(onerel); if (new_rel_pages != old_rel_pages) @@ -780,9 +778,9 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats) /* * Scan backwards from the end to verify that the end pages actually - * contain nothing we need to keep. This is *necessary*, not - * optional, because other backends could have added tuples to these - * pages whilst we were vacuuming. + * contain nothing we need to keep. This is *necessary*, not optional, + * because other backends could have added tuples to these pages whilst we + * were vacuuming. */ new_rel_pages = count_nondeletable_pages(onerel, vacrelstats); @@ -905,8 +903,8 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) case HEAPTUPLE_RECENTLY_DEAD: /* - * If tuple is recently deleted then we must not - * remove it from relation. + * If tuple is recently deleted then we must not remove it + * from relation. */ break; case HEAPTUPLE_INSERT_IN_PROGRESS: @@ -938,8 +936,8 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) /* * If we fall out of the loop, all the previously-thought-to-be-empty - * pages really are; we need not bother to look at the last - * known-nonempty page. + * pages really are; we need not bother to look at the last known-nonempty + * page. */ return vacrelstats->nonempty_pages; } @@ -1010,18 +1008,16 @@ lazy_record_free_space(LVRelStats *vacrelstats, /* * A page with less than stats->threshold free space will be forgotten * immediately, and never passed to the free space map. Removing the - * uselessly small entries early saves cycles, and in particular - * reduces the amount of time we spend holding the FSM lock when we - * finally call RecordRelationFreeSpace. Since the FSM will probably - * drop pages with little free space anyway, there's no point in - * making this really small. + * uselessly small entries early saves cycles, and in particular reduces + * the amount of time we spend holding the FSM lock when we finally call + * RecordRelationFreeSpace. Since the FSM will probably drop pages with + * little free space anyway, there's no point in making this really small. * - * XXX Is it worth trying to measure average tuple size, and using that - * to adjust the threshold? Would be worthwhile if FSM has no stats - * yet for this relation. But changing the threshold as we scan the - * rel might lead to bizarre behavior, too. Also, it's probably - * better if vacuum.c has the same thresholding behavior as we do - * here. + * XXX Is it worth trying to measure average tuple size, and using that to + * adjust the threshold? Would be worthwhile if FSM has no stats yet for + * this relation. But changing the threshold as we scan the rel might + * lead to bizarre behavior, too. Also, it's probably better if vacuum.c + * has the same thresholding behavior as we do here. */ if (avail < vacrelstats->threshold) return; @@ -1055,8 +1051,8 @@ lazy_record_free_space(LVRelStats *vacrelstats, { /* * Scan backwards through the array, "sift-up" each value into its - * correct position. We can start the scan at n/2-1 since each - * entry above that position has no children to worry about. + * correct position. We can start the scan at n/2-1 since each entry + * above that position has no children to worry about. */ int l = n / 2; @@ -1092,9 +1088,9 @@ lazy_record_free_space(LVRelStats *vacrelstats, { /* * Notionally, we replace the zero'th entry with the new data, and - * then sift-up to maintain the heap property. Physically, the - * new data doesn't get stored into the arrays until we find the - * right location for it. + * then sift-up to maintain the heap property. Physically, the new + * data doesn't get stored into the arrays until we find the right + * location for it. */ int i = 0; /* i is where the "hole" is */ diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c index 845c59625d6..31113fffe2d 100644 --- a/src/backend/commands/variable.c +++ b/src/backend/commands/variable.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.113 2005/08/08 23:39:01 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.114 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -63,7 +63,7 @@ assign_datestyle(const char *value, bool doit, GucSource source) if (source >= PGC_S_INTERACTIVE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid list syntax for parameter \"datestyle\""))); + errmsg("invalid list syntax for parameter \"datestyle\""))); return NULL; } @@ -131,11 +131,11 @@ assign_datestyle(const char *value, bool doit, GucSource source) else if (pg_strcasecmp(tok, "DEFAULT") == 0) { /* - * Easiest way to get the current DEFAULT state is to fetch - * the DEFAULT string from guc.c and recursively parse it. + * Easiest way to get the current DEFAULT state is to fetch the + * DEFAULT string from guc.c and recursively parse it. * - * We can't simply "return assign_datestyle(...)" because we need - * to handle constructs like "DEFAULT, ISO". + * We can't simply "return assign_datestyle(...)" because we need to + * handle constructs like "DEFAULT, ISO". */ int saveDateStyle = DateStyle; int saveDateOrder = DateOrder; @@ -163,8 +163,8 @@ assign_datestyle(const char *value, bool doit, GucSource source) if (source >= PGC_S_INTERACTIVE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("unrecognized \"datestyle\" key word: \"%s\"", - tok))); + errmsg("unrecognized \"datestyle\" key word: \"%s\"", + tok))); ok = false; break; } @@ -224,8 +224,8 @@ assign_datestyle(const char *value, bool doit, GucSource source) } /* - * Finally, it's safe to assign to the global variables; the - * assignment cannot fail now. + * Finally, it's safe to assign to the global variables; the assignment + * cannot fail now. */ DateStyle = newDateStyle; DateOrder = newDateOrder; @@ -274,14 +274,14 @@ assign_timezone(const char *value, bool doit, GucSource source) /* * Try to parse it. XXX an invalid interval format will result in - * ereport, which is not desirable for GUC. We did what we could - * to guard against this in flatten_set_variable_args, but a - * string coming in from postgresql.conf might contain anything. + * ereport, which is not desirable for GUC. We did what we could to + * guard against this in flatten_set_variable_args, but a string + * coming in from postgresql.conf might contain anything. */ interval = DatumGetIntervalP(DirectFunctionCall3(interval_in, - CStringGetDatum(val), - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(-1))); + CStringGetDatum(val), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1))); pfree(val); if (interval->month != 0) @@ -336,15 +336,14 @@ assign_timezone(const char *value, bool doit, GucSource source) * UNKNOWN is the value shown as the "default" for TimeZone in * guc.c. We interpret it as being a complete no-op; we don't * change the timezone setting. Note that if there is a known - * timezone setting, we will return that name rather than - * UNKNOWN as the canonical spelling. + * timezone setting, we will return that name rather than UNKNOWN + * as the canonical spelling. * - * During GUC initialization, since the timezone library isn't - * set up yet, pg_get_timezone_name will return NULL and we - * will leave the setting as UNKNOWN. If this isn't - * overridden from the config file then - * pg_timezone_initialize() will eventually select a default - * value from the environment. + * During GUC initialization, since the timezone library isn't set up + * yet, pg_get_timezone_name will return NULL and we will leave + * the setting as UNKNOWN. If this isn't overridden from the + * config file then pg_timezone_initialize() will eventually + * select a default value from the environment. */ if (doit) { @@ -359,7 +358,7 @@ assign_timezone(const char *value, bool doit, GucSource source) /* * Otherwise assume it is a timezone name, and try to load it. */ - pg_tz *new_tz; + pg_tz *new_tz; new_tz = pg_tzset(value); @@ -376,9 +375,9 @@ assign_timezone(const char *value, bool doit, GucSource source) { ereport((source >= PGC_S_INTERACTIVE) ? ERROR : LOG, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("time zone \"%s\" appears to use leap seconds", - value), - errdetail("PostgreSQL does not support leap seconds."))); + errmsg("time zone \"%s\" appears to use leap seconds", + value), + errdetail("PostgreSQL does not support leap seconds."))); return NULL; } @@ -406,7 +405,7 @@ assign_timezone(const char *value, bool doit, GucSource source) if (!result) return NULL; snprintf(result, 64, "%.5f", - (double) (-CTimeZone) / (double)SECS_PER_HOUR); + (double) (-CTimeZone) / (double) SECS_PER_HOUR); } else result = strdup(value); @@ -424,7 +423,7 @@ show_timezone(void) if (HasCTZSet) { - Interval interval; + Interval interval; interval.month = 0; interval.day = 0; @@ -435,7 +434,7 @@ show_timezone(void) #endif tzn = DatumGetCString(DirectFunctionCall1(interval_out, - IntervalPGetDatum(&interval))); + IntervalPGetDatum(&interval))); } else tzn = pg_get_timezone_name(global_timezone); @@ -559,18 +558,18 @@ assign_client_encoding(const char *value, bool doit, GucSource source) return NULL; /* - * Note: if we are in startup phase then SetClientEncoding may not be - * able to really set the encoding. In this case we will assume that - * the encoding is okay, and InitializeClientEncoding() will fix - * things once initialization is complete. + * Note: if we are in startup phase then SetClientEncoding may not be able + * to really set the encoding. In this case we will assume that the + * encoding is okay, and InitializeClientEncoding() will fix things once + * initialization is complete. */ if (SetClientEncoding(encoding, doit) < 0) { if (source >= PGC_S_INTERACTIVE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("conversion between %s and %s is not supported", - value, GetDatabaseEncodingName()))); + errmsg("conversion between %s and %s is not supported", + value, GetDatabaseEncodingName()))); return NULL; } return value; @@ -594,7 +593,7 @@ extern char *session_authorization_string; /* in guc.c */ const char * assign_session_authorization(const char *value, bool doit, GucSource source) { - Oid roleid = InvalidOid; + Oid roleid = InvalidOid; bool is_superuser = false; const char *actual_rolename = NULL; char *result; @@ -603,7 +602,7 @@ assign_session_authorization(const char *value, bool doit, GucSource source) (value[NAMEDATALEN] == 'T' || value[NAMEDATALEN] == 'F')) { /* might be a saved userid string */ - Oid savedoid; + Oid savedoid; char *endptr; savedoid = (Oid) strtoul(value + NAMEDATALEN + 1, &endptr, 10); @@ -625,9 +624,9 @@ assign_session_authorization(const char *value, bool doit, GucSource source) if (!IsTransactionState()) { /* - * Can't do catalog lookups, so fail. The upshot of this is - * that session_authorization cannot be set in - * postgresql.conf, which seems like a good thing anyway. + * Can't do catalog lookups, so fail. The upshot of this is that + * session_authorization cannot be set in postgresql.conf, which + * seems like a good thing anyway. */ return NULL; } @@ -676,7 +675,7 @@ show_session_authorization(void) * assign_session_authorization */ const char *value = session_authorization_string; - Oid savedoid; + Oid savedoid; char *endptr; Assert(strspn(value, "x") == NAMEDATALEN && @@ -706,7 +705,7 @@ extern char *role_string; /* in guc.c */ const char * assign_role(const char *value, bool doit, GucSource source) { - Oid roleid = InvalidOid; + Oid roleid = InvalidOid; bool is_superuser = false; const char *actual_rolename = value; char *result; @@ -715,7 +714,7 @@ assign_role(const char *value, bool doit, GucSource source) (value[NAMEDATALEN] == 'T' || value[NAMEDATALEN] == 'F')) { /* might be a saved userid string */ - Oid savedoid; + Oid savedoid; char *endptr; savedoid = (Oid) strtoul(value + NAMEDATALEN + 1, &endptr, 10); @@ -738,9 +737,9 @@ assign_role(const char *value, bool doit, GucSource source) if (!IsTransactionState()) { /* - * Can't do catalog lookups, so fail. The upshot of this is - * that role cannot be set in postgresql.conf, which seems - * like a good thing anyway. + * Can't do catalog lookups, so fail. The upshot of this is that + * role cannot be set in postgresql.conf, which seems like a good + * thing anyway. */ return NULL; } @@ -797,11 +796,10 @@ const char * show_role(void) { /* - * Extract the role name from the stored string; see - * assign_role + * Extract the role name from the stored string; see assign_role */ const char *value = role_string; - Oid savedoid; + Oid savedoid; char *endptr; /* This special case only applies if no SET ROLE has been done */ @@ -816,11 +814,11 @@ show_role(void) Assert(endptr != value + NAMEDATALEN + 1 && *endptr == ','); /* - * Check that the stored string still matches the effective setting, - * else return "none". This is a kluge to deal with the fact that - * SET SESSION AUTHORIZATION logically resets SET ROLE to NONE, but - * we cannot set the GUC role variable from assign_session_authorization - * (because we haven't got enough info to call set_config_option). + * Check that the stored string still matches the effective setting, else + * return "none". This is a kluge to deal with the fact that SET SESSION + * AUTHORIZATION logically resets SET ROLE to NONE, but we cannot set the + * GUC role variable from assign_session_authorization (because we haven't + * got enough info to call set_config_option). */ if (savedoid != GetCurrentRoleId()) return "none"; diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index 6158b16654c..54030452f8a 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.90 2005/04/14 01:38:17 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.91 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -55,16 +55,18 @@ isViewOnTempTable_walker(Node *node, void *context) if (IsA(node, Query)) { - Query *query = (Query *) node; - ListCell *rtable; + Query *query = (Query *) node; + ListCell *rtable; - foreach (rtable, query->rtable) + foreach(rtable, query->rtable) { RangeTblEntry *rte = lfirst(rtable); + if (rte->rtekind == RTE_RELATION) { - Relation rel = heap_open(rte->relid, AccessShareLock); - bool istemp = rel->rd_istemp; + Relation rel = heap_open(rte->relid, AccessShareLock); + bool istemp = rel->rd_istemp; + heap_close(rel, AccessShareLock); if (istemp) return true; @@ -101,8 +103,8 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace) ListCell *t; /* - * create a list of ColumnDef nodes based on the names and types of - * the (non-junk) targetlist items from the view's SELECT list. + * create a list of ColumnDef nodes based on the names and types of the + * (non-junk) targetlist items from the view's SELECT list. */ attrList = NIL; foreach(t, tlist) @@ -167,15 +169,15 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace) RelationGetRelationName(rel)); /* - * Due to the namespace visibility rules for temporary - * objects, we should only end up replacing a temporary view - * with another temporary view, and vice versa. + * Due to the namespace visibility rules for temporary objects, we + * should only end up replacing a temporary view with another + * temporary view, and vice versa. */ Assert(relation->istemp == rel->rd_istemp); /* - * Create a tuple descriptor to compare against the existing view, - * and verify it matches. + * Create a tuple descriptor to compare against the existing view, and + * verify it matches. */ descriptor = BuildDescForRelation(attrList); checkViewTupleDesc(descriptor, rel->rd_att); @@ -190,8 +192,8 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace) else { /* - * now set the parameters for keys/inheritance etc. All of these - * are uninteresting for views... + * now set the parameters for keys/inheritance etc. All of these are + * uninteresting for views... */ createStmt->relation = (RangeVar *) relation; createStmt->tableElts = attrList; @@ -203,8 +205,8 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace) /* * finally create the relation (this will error out if there's an - * existing view, so we don't need more code to complain if - * "replace" is false). + * existing view, so we don't need more code to complain if "replace" + * is false). */ return DefineRelation(createStmt, RELKIND_VIEW); } @@ -247,8 +249,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc) newattr->atttypmod != oldattr->atttypmod) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("cannot change data type of view column \"%s\"", - NameStr(oldattr->attname)))); + errmsg("cannot change data type of view column \"%s\"", + NameStr(oldattr->attname)))); /* We can ignore the remaining attributes of an attribute... */ } @@ -265,8 +267,8 @@ FormViewRetrieveRule(const RangeVar *view, Query *viewParse, bool replace) RuleStmt *rule; /* - * Create a RuleStmt that corresponds to the suitable rewrite rule - * args for DefineQueryRewrite(); + * Create a RuleStmt that corresponds to the suitable rewrite rule args + * for DefineQueryRewrite(); */ rule = makeNode(RuleStmt); rule->relation = copyObject((RangeVar *) view); @@ -336,11 +338,11 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse) /* * Make a copy of the given parsetree. It's not so much that we don't - * want to scribble on our input, it's that the parser has a bad habit - * of outputting multiple links to the same subtree for constructs - * like BETWEEN, and we mustn't have OffsetVarNodes increment the - * varno of a Var node twice. copyObject will expand any - * multiply-referenced subtree into multiple copies. + * want to scribble on our input, it's that the parser has a bad habit of + * outputting multiple links to the same subtree for constructs like + * BETWEEN, and we mustn't have OffsetVarNodes increment the varno of a + * Var node twice. copyObject will expand any multiply-referenced subtree + * into multiple copies. */ viewParse = (Query *) copyObject(viewParse); @@ -348,8 +350,8 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse) viewRel = relation_open(viewOid, AccessShareLock); /* - * Create the 2 new range table entries and form the new range - * table... OLD first, then NEW.... + * Create the 2 new range table entries and form the new range table... + * OLD first, then NEW.... */ rt_entry1 = addRangeTableEntryForRelation(NULL, viewRel, makeAlias("*OLD*", NIL), @@ -393,8 +395,8 @@ DefineView(RangeVar *view, Query *viewParse, bool replace) Oid viewOid; /* - * If the user didn't explicitly ask for a temporary view, check - * whether we need one implicitly. + * If the user didn't explicitly ask for a temporary view, check whether + * we need one implicitly. */ if (!view->istemp) { @@ -404,25 +406,24 @@ DefineView(RangeVar *view, Query *viewParse, bool replace) (errmsg("view \"%s\" will be a temporary view", view->relname))); } - + /* * Create the view relation * - * NOTE: if it already exists and replace is false, the xact will be - * aborted. + * NOTE: if it already exists and replace is false, the xact will be aborted. */ viewOid = DefineVirtualRelation(view, viewParse->targetList, replace); /* - * The relation we have just created is not visible to any other - * commands running with the same transaction & command id. So, - * increment the command id counter (but do NOT pfree any memory!!!!) + * The relation we have just created is not visible to any other commands + * running with the same transaction & command id. So, increment the + * command id counter (but do NOT pfree any memory!!!!) */ CommandCounterIncrement(); /* - * The range table of 'viewParse' does not contain entries for the - * "OLD" and "NEW" relations. So... add them! + * The range table of 'viewParse' does not contain entries for the "OLD" + * and "NEW" relations. So... add them! */ viewParse = UpdateRangeTableOfViewParse(viewOid, viewParse); diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index c2cb4b68835..06e4ab7b232 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.84 2005/05/15 21:19:54 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.85 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -251,10 +251,10 @@ ExecMarkPos(PlanState *node) * * NOTE: the semantics of this are that the first ExecProcNode following * the restore operation will yield the same tuple as the first one following - * the mark operation. It is unspecified what happens to the plan node's + * the mark operation. It is unspecified what happens to the plan node's * result TupleTableSlot. (In most cases the result slot is unchanged by * a restore, but the node may choose to clear it or to load it with the - * restored-to tuple.) Hence the caller should discard any previously + * restored-to tuple.) Hence the caller should discard any previously * returned TupleTableSlot after doing a restore. */ void @@ -398,15 +398,14 @@ ExecMayReturnRawTuples(PlanState *node) { /* * At a table scan node, we check whether ExecAssignScanProjectionInfo - * decided to do projection or not. Most non-scan nodes always - * project and so we can return "false" immediately. For nodes that - * don't project but just pass up input tuples, we have to recursively - * examine the input plan node. + * decided to do projection or not. Most non-scan nodes always project + * and so we can return "false" immediately. For nodes that don't project + * but just pass up input tuples, we have to recursively examine the input + * plan node. * - * Note: Hash and Material are listed here because they sometimes return - * an original input tuple, not a copy. But Sort and SetOp never - * return an original tuple, so they can be treated like projecting - * nodes. + * Note: Hash and Material are listed here because they sometimes return an + * original input tuple, not a copy. But Sort and SetOp never return an + * original tuple, so they can be treated like projecting nodes. */ switch (nodeTag(node)) { diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index 1bf46d815cc..688e2157e8b 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.15 2005/05/29 04:23:03 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.16 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -66,11 +66,10 @@ execTuplesMatch(TupleTableSlot *slot1, oldContext = MemoryContextSwitchTo(evalContext); /* - * We cannot report a match without checking all the fields, but we - * can report a non-match as soon as we find unequal fields. So, - * start comparing at the last field (least significant sort key). - * That's the most likely to be different if we are dealing with - * sorted input. + * We cannot report a match without checking all the fields, but we can + * report a non-match as soon as we find unequal fields. So, start + * comparing at the last field (least significant sort key). That's the + * most likely to be different if we are dealing with sorted input. */ result = true; @@ -137,11 +136,10 @@ execTuplesUnequal(TupleTableSlot *slot1, oldContext = MemoryContextSwitchTo(evalContext); /* - * We cannot report a match without checking all the fields, but we - * can report a non-match as soon as we find unequal fields. So, - * start comparing at the last field (least significant sort key). - * That's the most likely to be different if we are dealing with - * sorted input. + * We cannot report a match without checking all the fields, but we can + * report a non-match as soon as we find unequal fields. So, start + * comparing at the last field (least significant sort key). That's the + * most likely to be different if we are dealing with sorted input. */ result = false; @@ -288,7 +286,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, Assert(entrysize >= sizeof(TupleHashEntryData)); hashtable = (TupleHashTable) MemoryContextAlloc(tablecxt, - sizeof(TupleHashTableData)); + sizeof(TupleHashTableData)); hashtable->numCols = numCols; hashtable->keyColIdx = keyColIdx; @@ -297,7 +295,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, hashtable->tablecxt = tablecxt; hashtable->tempcxt = tempcxt; hashtable->entrysize = entrysize; - hashtable->tableslot = NULL; /* will be made on first lookup */ + hashtable->tableslot = NULL; /* will be made on first lookup */ hashtable->inputslot = NULL; MemSet(&hash_ctl, 0, sizeof(hash_ctl)); @@ -308,7 +306,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, hash_ctl.hcxt = tablecxt; hashtable->hashtab = hash_create("TupleHashTable", (long) nbuckets, &hash_ctl, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); + HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); return hashtable; } @@ -341,6 +339,7 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, TupleDesc tupdesc; oldContext = MemoryContextSwitchTo(hashtable->tablecxt); + /* * We copy the input tuple descriptor just for safety --- we assume * all input tuples will have equivalent descriptors. @@ -382,9 +381,9 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, /* * created new entry * - * Zero any caller-requested space in the entry. (This zaps - * the "key data" dynahash.c copied into the new entry, but we - * don't care since we're about to overwrite it anyway.) + * Zero any caller-requested space in the entry. (This zaps the "key + * data" dynahash.c copied into the new entry, but we don't care + * since we're about to overwrite it anyway.) */ MemSet(entry, 0, hashtable->entrysize); @@ -482,6 +481,7 @@ static int TupleHashTableMatch(const void *key1, const void *key2, Size keysize) { HeapTuple tuple1 = ((const TupleHashEntryData *) key1)->firstTuple; + #ifdef USE_ASSERT_CHECKING HeapTuple tuple2 = ((const TupleHashEntryData *) key2)->firstTuple; #endif diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c index 1cf403f88dd..2245c61e7fe 100644 --- a/src/backend/executor/execJunk.c +++ b/src/backend/executor/execJunk.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.49 2005/04/06 16:34:04 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.50 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -55,7 +55,7 @@ * * Initialize the Junk filter. * - * The source targetlist is passed in. The output tuple descriptor is + * The source targetlist is passed in. The output tuple descriptor is * built from the non-junk tlist entries, plus the passed specification * of whether to include room for an OID or not. * An optional resultSlot can be passed as well. @@ -87,11 +87,11 @@ ExecInitJunkFilter(List *targetList, bool hasoid, TupleTableSlot *slot) * Now calculate the mapping between the original tuple's attributes and * the "clean" tuple's attributes. * - * The "map" is an array of "cleanLength" attribute numbers, i.e. one - * entry for every attribute of the "clean" tuple. The value of this - * entry is the attribute number of the corresponding attribute of the - * "original" tuple. (Zero indicates a NULL output attribute, but we - * do not use that feature in this routine.) + * The "map" is an array of "cleanLength" attribute numbers, i.e. one entry + * for every attribute of the "clean" tuple. The value of this entry is + * the attribute number of the corresponding attribute of the "original" + * tuple. (Zero indicates a NULL output attribute, but we do not use that + * feature in this routine.) */ cleanLength = cleanTupType->natts; if (cleanLength > 0) @@ -155,14 +155,14 @@ ExecInitJunkFilterConversion(List *targetList, slot = MakeSingleTupleTableSlot(cleanTupType); /* - * Calculate the mapping between the original tuple's attributes and - * the "clean" tuple's attributes. + * Calculate the mapping between the original tuple's attributes and the + * "clean" tuple's attributes. * - * The "map" is an array of "cleanLength" attribute numbers, i.e. one - * entry for every attribute of the "clean" tuple. The value of this - * entry is the attribute number of the corresponding attribute of the - * "original" tuple. We store zero for any deleted attributes, marking - * that a NULL is needed in the output tuple. + * The "map" is an array of "cleanLength" attribute numbers, i.e. one entry + * for every attribute of the "clean" tuple. The value of this entry is + * the attribute number of the corresponding attribute of the "original" + * tuple. We store zero for any deleted attributes, marking that a NULL + * is needed in the output tuple. */ cleanLength = cleanTupType->natts; if (cleanLength > 0) @@ -220,8 +220,8 @@ ExecGetJunkAttribute(JunkFilter *junkfilter, ListCell *t; /* - * Look in the junkfilter's target list for an attribute with - * the given name + * Look in the junkfilter's target list for an attribute with the given + * name */ foreach(t, junkfilter->jf_targetList) { diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 05b4a48be29..2a96a161c81 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -26,7 +26,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.255 2005/08/26 03:07:25 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.256 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -208,8 +208,7 @@ ExecutorRun(QueryDesc *queryDesc, oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); /* - * extract information from the query descriptor and the query - * feature. + * extract information from the query descriptor and the query feature. */ operation = queryDesc->operation; dest = queryDesc->dest; @@ -352,15 +351,15 @@ ExecCheckRTEPerms(RangeTblEntry *rte) { AclMode requiredPerms; Oid relOid; - Oid userid; + Oid userid; /* - * Only plain-relation RTEs need to be checked here. Subquery RTEs - * are checked by ExecInitSubqueryScan if the subquery is still a - * separate subquery --- if it's been pulled up into our query level - * then the RTEs are in our rangetable and will be checked here. - * Function RTEs are checked by init_fcache when the function is - * prepared for execution. Join and special RTEs need no checks. + * Only plain-relation RTEs need to be checked here. Subquery RTEs are + * checked by ExecInitSubqueryScan if the subquery is still a separate + * subquery --- if it's been pulled up into our query level then the RTEs + * are in our rangetable and will be checked here. Function RTEs are + * checked by init_fcache when the function is prepared for execution. + * Join and special RTEs need no checks. */ if (rte->rtekind != RTE_RELATION) return; @@ -375,19 +374,17 @@ ExecCheckRTEPerms(RangeTblEntry *rte) relOid = rte->relid; /* - * userid to check as: current user unless we have a setuid - * indication. + * userid to check as: current user unless we have a setuid indication. * - * Note: GetUserId() is presently fast enough that there's no harm in - * calling it separately for each RTE. If that stops being true, we - * could call it once in ExecCheckRTPerms and pass the userid down - * from there. But for now, no need for the extra clutter. + * Note: GetUserId() is presently fast enough that there's no harm in calling + * it separately for each RTE. If that stops being true, we could call it + * once in ExecCheckRTPerms and pass the userid down from there. But for + * now, no need for the extra clutter. */ userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); /* - * We must have *all* the requiredPerms bits, so use aclmask not - * aclcheck. + * We must have *all* the requiredPerms bits, so use aclmask not aclcheck. */ if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL) != requiredPerms) @@ -515,8 +512,7 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) else { /* - * Single result relation identified by - * parseTree->resultRelation + * Single result relation identified by parseTree->resultRelation */ numResultRelations = 1; resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo)); @@ -544,8 +540,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) /* * Detect whether we're doing SELECT INTO. If so, set the es_into_oids - * flag appropriately so that the plan tree will be initialized with - * the correct tuple descriptors. + * flag appropriately so that the plan tree will be initialized with the + * correct tuple descriptors. */ do_select_into = false; @@ -583,10 +579,10 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) } /* - * initialize the executor "tuple" table. We need slots for all the - * plan nodes, plus possibly output slots for the junkfilter(s). At - * this point we aren't sure if we need junkfilters, so just add slots - * for them unconditionally. + * initialize the executor "tuple" table. We need slots for all the plan + * nodes, plus possibly output slots for the junkfilter(s). At this point + * we aren't sure if we need junkfilters, so just add slots for them + * unconditionally. */ { int nSlots = ExecCountSlotsNode(plan); @@ -606,26 +602,26 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) estate->es_useEvalPlan = false; /* - * initialize the private state information for all the nodes in the - * query tree. This opens files, allocates storage and leaves us - * ready to start processing tuples. + * initialize the private state information for all the nodes in the query + * tree. This opens files, allocates storage and leaves us ready to start + * processing tuples. */ planstate = ExecInitNode(plan, estate); /* - * Get the tuple descriptor describing the type of tuples to return. - * (this is especially important if we are creating a relation with - * "SELECT INTO") + * Get the tuple descriptor describing the type of tuples to return. (this + * is especially important if we are creating a relation with "SELECT + * INTO") */ tupType = ExecGetResultType(planstate); /* - * Initialize the junk filter if needed. SELECT and INSERT queries - * need a filter if there are any junk attrs in the tlist. INSERT and - * SELECT INTO also need a filter if the plan may return raw disk - * tuples (else heap_insert will be scribbling on the source - * relation!). UPDATE and DELETE always need a filter, since there's - * always a junk 'ctid' attribute present --- no need to look first. + * Initialize the junk filter if needed. SELECT and INSERT queries need a + * filter if there are any junk attrs in the tlist. INSERT and SELECT + * INTO also need a filter if the plan may return raw disk tuples (else + * heap_insert will be scribbling on the source relation!). UPDATE and + * DELETE always need a filter, since there's always a junk 'ctid' + * attribute present --- no need to look first. */ { bool junk_filter_needed = false; @@ -661,10 +657,9 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) if (junk_filter_needed) { /* - * If there are multiple result relations, each one needs its - * own junk filter. Note this is only possible for - * UPDATE/DELETE, so we can't be fooled by some needing a - * filter and some not. + * If there are multiple result relations, each one needs its own + * junk filter. Note this is only possible for UPDATE/DELETE, so + * we can't be fooled by some needing a filter and some not. */ if (parseTree->resultRelations != NIL) { @@ -687,15 +682,15 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) JunkFilter *j; j = ExecInitJunkFilter(subplan->plan->targetlist, - resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, - ExecAllocTableSlot(estate->es_tupleTable)); + resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, + ExecAllocTableSlot(estate->es_tupleTable)); resultRelInfo->ri_junkFilter = j; resultRelInfo++; } /* - * Set active junkfilter too; at this point ExecInitAppend - * has already selected an active result relation... + * Set active junkfilter too; at this point ExecInitAppend has + * already selected an active result relation... */ estate->es_junkFilter = estate->es_result_relation_info->ri_junkFilter; @@ -707,7 +702,7 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) j = ExecInitJunkFilter(planstate->plan->targetlist, tupType->tdhasoid, - ExecAllocTableSlot(estate->es_tupleTable)); + ExecAllocTableSlot(estate->es_tupleTable)); estate->es_junkFilter = j; if (estate->es_result_relation_info) estate->es_result_relation_info->ri_junkFilter = j; @@ -777,10 +772,9 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) CommandCounterIncrement(); /* - * If necessary, create a TOAST table for the into relation. Note - * that AlterTableCreateToastTable ends with - * CommandCounterIncrement(), so that the TOAST table will be - * visible for insertion. + * If necessary, create a TOAST table for the into relation. Note that + * AlterTableCreateToastTable ends with CommandCounterIncrement(), so + * that the TOAST table will be visible for insertion. */ AlterTableCreateToastTable(intoRelationId, true); @@ -795,11 +789,11 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) /* * We can skip WAL-logging the insertions, unless PITR is in use. * - * Note that for a non-temp INTO table, this is safe only because - * we know that the catalog changes above will have been WAL-logged, - * and so RecordTransactionCommit will think it needs to WAL-log the - * eventual transaction commit. Else the commit might be lost, even - * though all the data is safely fsync'd ... + * Note that for a non-temp INTO table, this is safe only because we know + * that the catalog changes above will have been WAL-logged, and so + * RecordTransactionCommit will think it needs to WAL-log the eventual + * transaction commit. Else the commit might be lost, even though all + * the data is safely fsync'd ... */ estate->es_into_relation_use_wal = XLogArchivingActive(); } @@ -832,19 +826,19 @@ initResultRelInfo(ResultRelInfo *resultRelInfo, ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot change sequence \"%s\"", - RelationGetRelationName(resultRelationDesc)))); + RelationGetRelationName(resultRelationDesc)))); break; case RELKIND_TOASTVALUE: ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot change TOAST relation \"%s\"", - RelationGetRelationName(resultRelationDesc)))); + RelationGetRelationName(resultRelationDesc)))); break; case RELKIND_VIEW: ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot change view \"%s\"", - RelationGetRelationName(resultRelationDesc)))); + RelationGetRelationName(resultRelationDesc)))); break; } @@ -859,7 +853,7 @@ initResultRelInfo(ResultRelInfo *resultRelInfo, resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc); if (resultRelInfo->ri_TrigDesc) { - int n = resultRelInfo->ri_TrigDesc->numtriggers; + int n = resultRelInfo->ri_TrigDesc->numtriggers; resultRelInfo->ri_TrigFunctions = (FmgrInfo *) palloc0(n * sizeof(FmgrInfo)); @@ -878,9 +872,9 @@ initResultRelInfo(ResultRelInfo *resultRelInfo, /* * If there are indices on the result relation, open them and save - * descriptors in the result relation info, so that we can add new - * index entries for the tuples we add/update. We need not do this - * for a DELETE, however, since deletion doesn't affect indexes. + * descriptors in the result relation info, so that we can add new index + * entries for the tuples we add/update. We need not do this for a + * DELETE, however, since deletion doesn't affect indexes. */ if (resultRelationDesc->rd_rel->relhasindex && operation != CMD_DELETE) @@ -981,8 +975,7 @@ ExecEndPlan(PlanState *planstate, EState *estate) estate->es_tupleTable = NULL; /* - * close the result relation(s) if any, but hold locks until xact - * commit. + * close the result relation(s) if any, but hold locks until xact commit. */ resultRelInfo = estate->es_result_relations; for (i = estate->es_num_result_relations; i > 0; i--) @@ -999,10 +992,10 @@ ExecEndPlan(PlanState *planstate, EState *estate) if (estate->es_into_relation_descriptor != NULL) { /* - * If we skipped using WAL, and it's not a temp relation, - * we must force the relation down to disk before it's - * safe to commit the transaction. This requires forcing - * out any dirty buffers and then doing a forced fsync. + * If we skipped using WAL, and it's not a temp relation, we must + * force the relation down to disk before it's safe to commit the + * transaction. This requires forcing out any dirty buffers and then + * doing a forced fsync. */ if (!estate->es_into_relation_use_wal && !estate->es_into_relation_descriptor->rd_istemp) @@ -1087,8 +1080,7 @@ ExecutePlan(EState *estate, } /* - * Loop until we've processed the proper number of tuples from the - * plan. + * Loop until we've processed the proper number of tuples from the plan. */ for (;;) @@ -1120,12 +1112,12 @@ lnext: ; } /* - * if we have a junk filter, then project a new tuple with the - * junk removed. + * if we have a junk filter, then project a new tuple with the junk + * removed. * * Store this new "clean" tuple in the junkfilter's resultSlot. - * (Formerly, we stored it back over the "dirty" tuple, which is - * WRONG because that tuple slot has the wrong descriptor.) + * (Formerly, we stored it back over the "dirty" tuple, which is WRONG + * because that tuple slot has the wrong descriptor.) * * Also, extract all the junk information we need. */ @@ -1151,10 +1143,10 @@ lnext: ; elog(ERROR, "ctid is NULL"); tupleid = (ItemPointer) DatumGetPointer(datum); - tuple_ctid = *tupleid; /* make sure we don't free the - * ctid!! */ + tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */ tupleid = &tuple_ctid; } + /* * Process any FOR UPDATE or FOR SHARE locking requested. */ @@ -1171,8 +1163,8 @@ lnext: ; ItemPointerData update_ctid; TransactionId update_xmax; TupleTableSlot *newSlot; - LockTupleMode lockmode; - HTSU_Result test; + LockTupleMode lockmode; + HTSU_Result test; if (!ExecGetJunkAttribute(junkfilter, slot, @@ -1210,8 +1202,8 @@ lnext: ; case HeapTupleUpdated: if (IsXactIsoLevelSerializable) ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); if (!ItemPointerEquals(&update_ctid, &tuple.t_self)) { @@ -1230,8 +1222,7 @@ lnext: ; /* * if tuple was deleted or PlanQual failed for - * updated tuple - we must not return this - * tuple! + * updated tuple - we must not return this tuple! */ goto lnext; @@ -1251,9 +1242,9 @@ lnext: ; } /* - * now that we have a tuple, do the appropriate thing with it.. - * either return it to the user, add it to a relation someplace, - * delete it from a relation, or modify some of its attributes. + * now that we have a tuple, do the appropriate thing with it.. either + * return it to the user, add it to a relation someplace, delete it + * from a relation, or modify some of its attributes. */ switch (operation) { @@ -1287,9 +1278,9 @@ lnext: ; } /* - * check our tuple count.. if we've processed the proper number - * then quit, else loop again and process more tuples. Zero - * numberTuples means no limit. + * check our tuple count.. if we've processed the proper number then + * quit, else loop again and process more tuples. Zero numberTuples + * means no limit. */ current_tuple_count++; if (numberTuples && numberTuples == current_tuple_count) @@ -1383,8 +1374,8 @@ ExecInsert(TupleTableSlot *slot, Oid newId; /* - * get the heap tuple out of the tuple table slot, making sure - * we have a writable copy + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy */ tuple = ExecMaterializeSlot(slot); @@ -1396,7 +1387,7 @@ ExecInsert(TupleTableSlot *slot, /* BEFORE ROW INSERT Triggers */ if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0) + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0) { HeapTuple newtuple; @@ -1409,9 +1400,9 @@ ExecInsert(TupleTableSlot *slot, { /* * Insert modified tuple into tuple table slot, replacing the - * original. We assume that it was allocated in per-tuple - * memory context, and therefore will go away by itself. The - * tuple table slot should not try to clear it. + * original. We assume that it was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. */ ExecStoreTuple(newtuple, slot, InvalidBuffer, false); tuple = newtuple; @@ -1427,8 +1418,8 @@ ExecInsert(TupleTableSlot *slot, /* * insert the tuple * - * Note: heap_insert returns the tid (location) of the new tuple - * in the t_self field. + * Note: heap_insert returns the tid (location) of the new tuple in the + * t_self field. */ newId = heap_insert(resultRelationDesc, tuple, estate->es_snapshot->curcid, @@ -1463,7 +1454,7 @@ ExecDelete(TupleTableSlot *slot, { ResultRelInfo *resultRelInfo; Relation resultRelationDesc; - HTSU_Result result; + HTSU_Result result; ItemPointerData update_ctid; TransactionId update_xmax; @@ -1475,7 +1466,7 @@ ExecDelete(TupleTableSlot *slot, /* BEFORE ROW DELETE Triggers */ if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0) + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0) { bool dodelete; @@ -1489,9 +1480,9 @@ ExecDelete(TupleTableSlot *slot, /* * delete the tuple * - * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that - * the row to be deleted is visible to that snapshot, and throw a can't- - * serialize error if not. This is a special-case behavior needed for + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the + * row to be deleted is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for * referential integrity updates in serializable transactions. */ ldelete:; @@ -1543,9 +1534,9 @@ ldelete:; * Note: Normally one would think that we have to delete index tuples * associated with the heap tuple now.. * - * ... but in POSTGRES, we have no need to do this because the vacuum - * daemon automatically opens an index scan and deletes index tuples - * when it finds deleted heap tuples. -cim 9/27/89 + * ... but in POSTGRES, we have no need to do this because the vacuum daemon + * automatically opens an index scan and deletes index tuples when it + * finds deleted heap tuples. -cim 9/27/89 */ /* AFTER ROW DELETE Triggers */ @@ -1571,7 +1562,7 @@ ExecUpdate(TupleTableSlot *slot, HeapTuple tuple; ResultRelInfo *resultRelInfo; Relation resultRelationDesc; - HTSU_Result result; + HTSU_Result result; ItemPointerData update_ctid; TransactionId update_xmax; @@ -1582,8 +1573,8 @@ ExecUpdate(TupleTableSlot *slot, elog(ERROR, "cannot UPDATE during bootstrap"); /* - * get the heap tuple out of the tuple table slot, making sure - * we have a writable copy + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy */ tuple = ExecMaterializeSlot(slot); @@ -1595,7 +1586,7 @@ ExecUpdate(TupleTableSlot *slot, /* BEFORE ROW UPDATE Triggers */ if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) { HeapTuple newtuple; @@ -1610,9 +1601,9 @@ ExecUpdate(TupleTableSlot *slot, { /* * Insert modified tuple into tuple table slot, replacing the - * original. We assume that it was allocated in per-tuple - * memory context, and therefore will go away by itself. The - * tuple table slot should not try to clear it. + * original. We assume that it was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. */ ExecStoreTuple(newtuple, slot, InvalidBuffer, false); tuple = newtuple; @@ -1622,11 +1613,11 @@ ExecUpdate(TupleTableSlot *slot, /* * Check the constraints of the tuple * - * If we generate a new candidate tuple after EvalPlanQual testing, we - * must loop back here and recheck constraints. (We don't need to - * redo triggers, however. If there are any BEFORE triggers then - * trigger.c will have done heap_lock_tuple to lock the correct tuple, - * so there's no need to do them again.) + * If we generate a new candidate tuple after EvalPlanQual testing, we must + * loop back here and recheck constraints. (We don't need to redo + * triggers, however. If there are any BEFORE triggers then trigger.c + * will have done heap_lock_tuple to lock the correct tuple, so there's no + * need to do them again.) */ lreplace:; if (resultRelationDesc->rd_att->constr) @@ -1635,9 +1626,9 @@ lreplace:; /* * replace the heap tuple * - * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that - * the row to be updated is visible to that snapshot, and throw a can't- - * serialize error if not. This is a special-case behavior needed for + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the + * row to be updated is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for * referential integrity updates in serializable transactions. */ result = heap_update(resultRelationDesc, tupleid, tuple, @@ -1687,18 +1678,18 @@ lreplace:; (estate->es_processed)++; /* - * Note: instead of having to update the old index tuples associated - * with the heap tuple, all we do is form and insert new index tuples. - * This is because UPDATEs are actually DELETEs and INSERTs, and index - * tuple deletion is done automagically by the vacuum daemon. All we - * do is insert new index tuples. -cim 9/27/89 + * Note: instead of having to update the old index tuples associated with + * the heap tuple, all we do is form and insert new index tuples. This is + * because UPDATEs are actually DELETEs and INSERTs, and index tuple + * deletion is done automagically by the vacuum daemon. All we do is + * insert new index tuples. -cim 9/27/89 */ /* * insert index entries for tuple * - * Note: heap_update returns the tid (location) of the new tuple - * in the t_self field. + * Note: heap_update returns the tid (location) of the new tuple in the + * t_self field. */ if (resultRelInfo->ri_NumIndices > 0) ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); @@ -1721,8 +1712,8 @@ ExecRelCheck(ResultRelInfo *resultRelInfo, /* * If first time through for this result relation, build expression - * nodetrees for rel's constraint expressions. Keep them in the - * per-query memory context so they'll survive throughout the query. + * nodetrees for rel's constraint expressions. Keep them in the per-query + * memory context so they'll survive throughout the query. */ if (resultRelInfo->ri_ConstraintExprs == NULL) { @@ -1740,8 +1731,8 @@ ExecRelCheck(ResultRelInfo *resultRelInfo, } /* - * We will use the EState's per-tuple context for evaluating - * constraint expressions (creating it if it's not already there). + * We will use the EState's per-tuple context for evaluating constraint + * expressions (creating it if it's not already there). */ econtext = GetPerTupleExprContext(estate); @@ -1787,7 +1778,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo, ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("null value in column \"%s\" violates not-null constraint", - NameStr(rel->rd_att->attrs[attrChk - 1]->attname)))); + NameStr(rel->rd_att->attrs[attrChk - 1]->attname)))); } } @@ -1870,9 +1861,9 @@ EvalPlanQual(EState *estate, Index rti, { /* * If xmin isn't what we're expecting, the slot must have been - * recycled and reused for an unrelated tuple. This implies - * that the latest version of the row was deleted, so we need - * do nothing. (Should be safe to examine xmin without getting + * recycled and reused for an unrelated tuple. This implies that + * the latest version of the row was deleted, so we need do + * nothing. (Should be safe to examine xmin without getting * buffer's content lock, since xmin never changes in an existing * tuple.) */ @@ -1888,8 +1879,8 @@ EvalPlanQual(EState *estate, Index rti, elog(ERROR, "t_xmin is uncommitted in tuple to be updated"); /* - * If tuple is being updated by other transaction then we have - * to wait for its commit/abort. + * If tuple is being updated by other transaction then we have to + * wait for its commit/abort. */ if (TransactionIdIsValid(SnapshotDirty->xmax)) { @@ -1907,8 +1898,8 @@ EvalPlanQual(EState *estate, Index rti, } /* - * If the referenced slot was actually empty, the latest version - * of the row must have been deleted, so we need do nothing. + * If the referenced slot was actually empty, the latest version of + * the row must have been deleted, so we need do nothing. */ if (tuple.t_data == NULL) { @@ -1928,15 +1919,15 @@ EvalPlanQual(EState *estate, Index rti, /* * If we get here, the tuple was found but failed SnapshotDirty. - * Assuming the xmin is either a committed xact or our own xact - * (as it certainly should be if we're trying to modify the tuple), - * this must mean that the row was updated or deleted by either - * a committed xact or our own xact. If it was deleted, we can - * ignore it; if it was updated then chain up to the next version - * and repeat the whole test. + * Assuming the xmin is either a committed xact or our own xact (as it + * certainly should be if we're trying to modify the tuple), this must + * mean that the row was updated or deleted by either a committed xact + * or our own xact. If it was deleted, we can ignore it; if it was + * updated then chain up to the next version and repeat the whole + * test. * - * As above, it should be safe to examine xmax and t_ctid without - * the buffer content lock, because they can't be changing. + * As above, it should be safe to examine xmax and t_ctid without the + * buffer content lock, because they can't be changing. */ if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid)) { @@ -1954,8 +1945,8 @@ EvalPlanQual(EState *estate, Index rti, } /* - * For UPDATE/DELETE we have to return tid of actual row we're - * executing PQ for. + * For UPDATE/DELETE we have to return tid of actual row we're executing + * PQ for. */ *tid = tuple.t_self; @@ -1974,10 +1965,10 @@ EvalPlanQual(EState *estate, Index rti, } /* - * If this is request for another RTE - Ra, - then we have to check - * wasn't PlanQual requested for Ra already and if so then Ra' row was - * updated again and we have to re-start old execution for Ra and - * forget all what we done after Ra was suspended. Cool? -:)) + * If this is request for another RTE - Ra, - then we have to check wasn't + * PlanQual requested for Ra already and if so then Ra' row was updated + * again and we have to re-start old execution for Ra and forget all what + * we done after Ra was suspended. Cool? -:)) */ if (epq != NULL && epq->rti != rti && epq->estate->es_evTuple[rti - 1] != NULL) @@ -1999,8 +1990,8 @@ EvalPlanQual(EState *estate, Index rti, } /* - * If we are requested for another RTE then we have to suspend - * execution of current PlanQual and start execution for new one. + * If we are requested for another RTE then we have to suspend execution + * of current PlanQual and start execution for new one. */ if (epq == NULL || epq->rti != rti) { @@ -2031,18 +2022,17 @@ EvalPlanQual(EState *estate, Index rti, Assert(epq->rti == rti); /* - * Ok - we're requested for the same RTE. Unfortunately we still have - * to end and restart execution of the plan, because ExecReScan - * wouldn't ensure that upper plan nodes would reset themselves. We - * could make that work if insertion of the target tuple were - * integrated with the Param mechanism somehow, so that the upper plan - * nodes know that their children's outputs have changed. + * Ok - we're requested for the same RTE. Unfortunately we still have to + * end and restart execution of the plan, because ExecReScan wouldn't + * ensure that upper plan nodes would reset themselves. We could make + * that work if insertion of the target tuple were integrated with the + * Param mechanism somehow, so that the upper plan nodes know that their + * children's outputs have changed. * * Note that the stack of free evalPlanQual nodes is quite useless at the * moment, since it only saves us from pallocing/releasing the - * evalPlanQual nodes themselves. But it will be useful once we - * implement ReScan instead of end/restart for re-using PlanQual - * nodes. + * evalPlanQual nodes themselves. But it will be useful once we implement + * ReScan instead of end/restart for re-using PlanQual nodes. */ if (endNode) { @@ -2055,15 +2045,14 @@ EvalPlanQual(EState *estate, Index rti, * * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to * instead copy down changeable state from the top plan (including - * es_result_relation_info, es_junkFilter) and reset locally - * changeable state in the epq (including es_param_exec_vals, - * es_evTupleNull). + * es_result_relation_info, es_junkFilter) and reset locally changeable + * state in the epq (including es_param_exec_vals, es_evTupleNull). */ EvalPlanQualStart(epq, estate, epq->next); /* - * free old RTE' tuple, if any, and store target tuple where - * relation's scan node will see it + * free old RTE' tuple, if any, and store target tuple where relation's + * scan node will see it */ epqstate = epq->estate; if (epqstate->es_evTuple[rti - 1] != NULL) @@ -2171,10 +2160,10 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq) oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt); /* - * The epqstates share the top query's copy of unchanging state such - * as the snapshot, rangetable, result-rel info, and external Param - * info. They need their own copies of local state, including a tuple - * table, es_param_exec_vals, etc. + * The epqstates share the top query's copy of unchanging state such as + * the snapshot, rangetable, result-rel info, and external Param info. + * They need their own copies of local state, including a tuple table, + * es_param_exec_vals, etc. */ epqstate->es_direction = ForwardScanDirection; epqstate->es_snapshot = estate->es_snapshot; @@ -2199,9 +2188,9 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq) epqstate->es_topPlan = estate->es_topPlan; /* - * Each epqstate must have its own es_evTupleNull state, but all the - * stack entries share es_evTuple state. This allows sub-rechecks to - * inherit the value being examined by an outer recheck. + * Each epqstate must have its own es_evTupleNull state, but all the stack + * entries share es_evTuple state. This allows sub-rechecks to inherit + * the value being examined by an outer recheck. */ epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool)); if (priorepq == NULL) diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index 28f67a2562f..fe067086d3b 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -12,7 +12,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.50 2005/04/19 22:35:11 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.51 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -240,8 +240,8 @@ ExecInitNode(Plan *node, EState *estate) } /* - * Initialize any initPlans present in this node. The planner put - * them in a separate list for us. + * Initialize any initPlans present in this node. The planner put them in + * a separate list for us. */ subps = NIL; foreach(l, node->initPlan) @@ -258,9 +258,9 @@ ExecInitNode(Plan *node, EState *estate) /* * Initialize any subPlans present in this node. These were found by - * ExecInitExpr during initialization of the PlanState. Note we must - * do this after initializing initPlans, in case their arguments - * contain subPlans (is that actually possible? perhaps not). + * ExecInitExpr during initialization of the PlanState. Note we must do + * this after initializing initPlans, in case their arguments contain + * subPlans (is that actually possible? perhaps not). */ foreach(l, result->subPlan) { @@ -422,7 +422,7 @@ ExecProcNode(PlanState *node) Node * MultiExecProcNode(PlanState *node) { - Node *result; + Node *result; CHECK_FOR_INTERRUPTS(); @@ -431,9 +431,9 @@ MultiExecProcNode(PlanState *node) switch (nodeTag(node)) { - /* - * Only node types that actually support multiexec will be listed - */ + /* + * Only node types that actually support multiexec will be listed + */ case T_HashState: result = MultiExecHash((HashState *) node); diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index 87fcf53bf05..d535e6453d5 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.180 2005/06/26 22:05:36 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.181 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -89,8 +89,8 @@ static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext, static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalConvertRowtype(ConvertRowtypeExprState *cstate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalCaseTestExpr(ExprState *exprstate, @@ -106,8 +106,8 @@ static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalMinMax(MinMaxExprState *minmaxExpr, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalNullIf(FuncExprState *nullIfExpr, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); @@ -243,8 +243,8 @@ ExecEvalArrayRef(ArrayRefExprState *astate, isDone)); /* - * If refexpr yields NULL, and it's a fetch, then result is NULL. In - * the assignment case, we'll cons up something below. + * If refexpr yields NULL, and it's a fetch, then result is NULL. In the + * assignment case, we'll cons up something below. */ if (*isNull) { @@ -298,8 +298,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate, NULL)); /* - * If any index expr yields NULL, result is NULL or source - * array + * If any index expr yields NULL, result is NULL or source array */ if (eisnull) { @@ -326,13 +325,12 @@ ExecEvalArrayRef(ArrayRefExprState *astate, /* * Evaluate the value to be assigned into the array. * - * XXX At some point we'll need to look into making the old value of - * the array element available via CaseTestExpr, as is done by - * ExecEvalFieldStore. This is not needed now but will be needed - * to support arrays of composite types; in an assignment to a - * field of an array member, the parser would generate a - * FieldStore that expects to fetch its input tuple via - * CaseTestExpr. + * XXX At some point we'll need to look into making the old value of the + * array element available via CaseTestExpr, as is done by + * ExecEvalFieldStore. This is not needed now but will be needed to + * support arrays of composite types; in an assignment to a field of + * an array member, the parser would generate a FieldStore that + * expects to fetch its input tuple via CaseTestExpr. */ sourceData = ExecEvalExpr(astate->refassgnexpr, econtext, @@ -340,19 +338,18 @@ ExecEvalArrayRef(ArrayRefExprState *astate, NULL); /* - * For now, can't cope with inserting NULL into an array, so make - * it a no-op per discussion above... + * For now, can't cope with inserting NULL into an array, so make it a + * no-op per discussion above... */ if (eisnull) return PointerGetDatum(array_source); /* - * For an assignment, if all the subscripts and the input - * expression are non-null but the original array is null, then - * substitute an empty (zero-dimensional) array and proceed with - * the assignment. This only works for varlena arrays, though; for - * fixed-length array types we punt and return the null input - * array. + * For an assignment, if all the subscripts and the input expression + * are non-null but the original array is null, then substitute an + * empty (zero-dimensional) array and proceed with the assignment. + * This only works for varlena arrays, though; for fixed-length array + * types we punt and return the null input array. */ if (*isNull) { @@ -379,7 +376,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate, else resultArray = array_set_slice(array_source, i, upper.indx, lower.indx, - (ArrayType *) DatumGetPointer(sourceData), + (ArrayType *) DatumGetPointer(sourceData), astate->refattrlength, astate->refelemlength, astate->refelembyval, @@ -451,10 +448,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, /* * Get the slot and attribute number we want * - * The asserts check that references to system attributes only appear at - * the level of a relation scan; at higher levels, system attributes - * must be treated as ordinary variables (since we no longer have - * access to the original tuple). + * The asserts check that references to system attributes only appear at the + * level of a relation scan; at higher levels, system attributes must be + * treated as ordinary variables (since we no longer have access to the + * original tuple). */ attnum = variable->varattno; @@ -477,6 +474,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, } #ifdef USE_ASSERT_CHECKING + /* * Some checks that are only applied for user attribute numbers (bogus * system attnums will be caught inside slot_getattr). @@ -491,11 +489,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, Assert(attnum <= tuple_type->natts); /* - * This assert checks that the datatype the plan expects to get - * (as told by our "variable" argument) is in fact the datatype of - * the attribute being fetched (as seen in the current context, - * identified by our "econtext" argument). Otherwise crashes are - * likely. + * This assert checks that the datatype the plan expects to get (as + * told by our "variable" argument) is in fact the datatype of the + * attribute being fetched (as seen in the current context, identified + * by our "econtext" argument). Otherwise crashes are likely. * * Note that we can't check dropped columns, since their atttypid has * been zeroed. @@ -503,7 +500,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, Assert(variable->vartype == tuple_type->attrs[attnum - 1]->atttypid || tuple_type->attrs[attnum - 1]->attisdropped); } -#endif /* USE_ASSERT_CHECKING */ +#endif /* USE_ASSERT_CHECKING */ return slot_getattr(slot, attnum, isNull); } @@ -559,9 +556,8 @@ ExecEvalParam(ExprState *exprstate, ExprContext *econtext, if (thisParamKind == PARAM_EXEC) { /* - * PARAM_EXEC params (internal executor parameters) are stored in - * the ecxt_param_exec_vals array, and can be accessed by array - * index. + * PARAM_EXEC params (internal executor parameters) are stored in the + * ecxt_param_exec_vals array, and can be accessed by array index. */ ParamExecData *prm; @@ -579,8 +575,7 @@ ExecEvalParam(ExprState *exprstate, ExprContext *econtext, else { /* - * All other parameter types must be sought in - * ecxt_param_list_info. + * All other parameter types must be sought in ecxt_param_list_info. */ ParamListInfo paramInfo; @@ -641,9 +636,9 @@ GetAttributeByNum(HeapTupleHeader tuple, tupDesc = lookup_rowtype_tupdesc(tupType, tupTypmod); /* - * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set - * all the fields in the struct just in case user tries to inspect - * system columns. + * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all + * the fields in the struct just in case user tries to inspect system + * columns. */ tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple); ItemPointerSetInvalid(&(tmptup.t_self)); @@ -699,9 +694,9 @@ GetAttributeByName(HeapTupleHeader tuple, const char *attname, bool *isNull) elog(ERROR, "attribute \"%s\" does not exist", attname); /* - * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set - * all the fields in the struct just in case user tries to inspect - * system columns. + * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all + * the fields in the struct just in case user tries to inspect system + * columns. */ tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple); ItemPointerSetInvalid(&(tmptup.t_self)); @@ -730,9 +725,9 @@ init_fcache(Oid foid, FuncExprState *fcache, MemoryContext fcacheCxt) /* * Safety check on nargs. Under normal circumstances this should never - * fail, as parser should check sooner. But possibly it might fail - * if server has been compiled with FUNC_MAX_ARGS smaller than some - * functions declared in pg_proc? + * fail, as parser should check sooner. But possibly it might fail if + * server has been compiled with FUNC_MAX_ARGS smaller than some functions + * declared in pg_proc? */ if (list_length(fcache->args) > FUNC_MAX_ARGS) ereport(ERROR, @@ -793,10 +788,9 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo, if (thisArgIsDone != ExprSingleResult) { /* - * We allow only one argument to have a set value; we'd need - * much more complexity to keep track of multiple set - * arguments (cf. ExecTargetList) and it doesn't seem worth - * it. + * We allow only one argument to have a set value; we'd need much + * more complexity to keep track of multiple set arguments (cf. + * ExecTargetList) and it doesn't seem worth it. */ if (argIsDone != ExprSingleResult) ereport(ERROR, @@ -835,11 +829,10 @@ ExecMakeFunctionResult(FuncExprState *fcache, check_stack_depth(); /* - * arguments is a list of expressions to evaluate before passing to - * the function manager. We skip the evaluation if it was already - * done in the previous call (ie, we are continuing the evaluation of - * a set-valued function). Otherwise, collect the current argument - * values into fcinfo. + * arguments is a list of expressions to evaluate before passing to the + * function manager. We skip the evaluation if it was already done in the + * previous call (ie, we are continuing the evaluation of a set-valued + * function). Otherwise, collect the current argument values into fcinfo. */ if (!fcache->setArgsValid) { @@ -870,8 +863,7 @@ ExecMakeFunctionResult(FuncExprState *fcache, } /* - * If function returns set, prepare a resultinfo node for - * communication + * If function returns set, prepare a resultinfo node for communication */ if (fcache->func.fn_retset) { @@ -887,14 +879,14 @@ ExecMakeFunctionResult(FuncExprState *fcache, } /* - * now return the value gotten by calling the function manager, - * passing the function the evaluated parameter values. + * now return the value gotten by calling the function manager, passing + * the function the evaluated parameter values. */ if (fcache->func.fn_retset || hasSetArg) { /* - * We need to return a set result. Complain if caller not ready - * to accept one. + * We need to return a set result. Complain if caller not ready to + * accept one. */ if (isDone == NULL) ereport(ERROR, @@ -902,18 +894,18 @@ ExecMakeFunctionResult(FuncExprState *fcache, errmsg("set-valued function called in context that cannot accept a set"))); /* - * This loop handles the situation where we have both a set - * argument and a set-valued function. Once we have exhausted the - * function's value(s) for a particular argument value, we have to - * get the next argument value and start the function over again. - * We might have to do it more than once, if the function produces - * an empty result set for a particular input value. + * This loop handles the situation where we have both a set argument + * and a set-valued function. Once we have exhausted the function's + * value(s) for a particular argument value, we have to get the next + * argument value and start the function over again. We might have to + * do it more than once, if the function produces an empty result set + * for a particular input value. */ for (;;) { /* - * If function is strict, and there are any NULL arguments, - * skip calling the function (at least for this set of args). + * If function is strict, and there are any NULL arguments, skip + * calling the function (at least for this set of args). */ bool callit = true; @@ -948,8 +940,8 @@ ExecMakeFunctionResult(FuncExprState *fcache, { /* * Got a result from current argument. If function itself - * returns set, save the current argument values to re-use - * on the next call. + * returns set, save the current argument values to re-use on + * the next call. */ if (fcache->func.fn_retset && *isDone == ExprMultipleResult) { @@ -961,7 +953,7 @@ ExecMakeFunctionResult(FuncExprState *fcache, { RegisterExprContextCallback(econtext, ShutdownFuncExpr, - PointerGetDatum(fcache)); + PointerGetDatum(fcache)); fcache->shutdown_reg = true; } } @@ -992,8 +984,8 @@ ExecMakeFunctionResult(FuncExprState *fcache, } /* - * If we reach here, loop around to run the function on the - * new argument. + * If we reach here, loop around to run the function on the new + * argument. */ } } @@ -1003,9 +995,9 @@ ExecMakeFunctionResult(FuncExprState *fcache, * Non-set case: much easier. * * We change the ExprState function pointer to use the simpler - * ExecMakeFunctionResultNoSets on subsequent calls. This amounts - * to assuming that no argument can return a set if it didn't do - * so the first time. + * ExecMakeFunctionResultNoSets on subsequent calls. This amounts to + * assuming that no argument can return a set if it didn't do so the + * first time. */ fcache->xprstate.evalfunc = (ExprStateEvalFunc) ExecMakeFunctionResultNoSets; @@ -1074,8 +1066,8 @@ ExecMakeFunctionResultNoSets(FuncExprState *fcache, InitFunctionCallInfoData(fcinfo, &(fcache->func), i, NULL, NULL); /* - * If function is strict, and there are any NULL arguments, skip - * calling the function and return NULL. + * If function is strict, and there are any NULL arguments, skip calling + * the function and return NULL. */ if (fcache->func.fn_strict) { @@ -1100,7 +1092,7 @@ ExecMakeFunctionResultNoSets(FuncExprState *fcache, * ExecMakeTableFunctionResult * * Evaluate a table function, producing a materialized result in a Tuplestore - * object. *returnDesc is set to the tupledesc actually returned by the + * object. *returnDesc is set to the tupledesc actually returned by the * function, or NULL if it didn't provide one. */ Tuplestorestate * @@ -1130,11 +1122,11 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, get_typtype(funcrettype) == 'c'); /* - * Prepare a resultinfo node for communication. We always do this - * even if not expecting a set result, so that we can pass - * expectedDesc. In the generic-expression case, the expression - * doesn't actually get to see the resultinfo, but set it up anyway - * because we use some of the fields as our own state variables. + * Prepare a resultinfo node for communication. We always do this even if + * not expecting a set result, so that we can pass expectedDesc. In the + * generic-expression case, the expression doesn't actually get to see the + * resultinfo, but set it up anyway because we use some of the fields as + * our own state variables. */ InitFunctionCallInfoData(fcinfo, NULL, 0, NULL, (Node *) &rsinfo); rsinfo.type = T_ReturnSetInfo; @@ -1147,14 +1139,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, rsinfo.setDesc = NULL; /* - * Normally the passed expression tree will be a FuncExprState, since - * the grammar only allows a function call at the top level of a table - * function reference. However, if the function doesn't return set - * then the planner might have replaced the function call via - * constant-folding or inlining. So if we see any other kind of - * expression node, execute it via the general ExecEvalExpr() code; - * the only difference is that we don't get a chance to pass a special - * ReturnSetInfo to any functions buried in the expression. + * Normally the passed expression tree will be a FuncExprState, since the + * grammar only allows a function call at the top level of a table + * function reference. However, if the function doesn't return set then + * the planner might have replaced the function call via constant-folding + * or inlining. So if we see any other kind of expression node, execute + * it via the general ExecEvalExpr() code; the only difference is that we + * don't get a chance to pass a special ReturnSetInfo to any functions + * buried in the expression. */ if (funcexpr && IsA(funcexpr, FuncExprState) && IsA(funcexpr->expr, FuncExpr)) @@ -1182,9 +1174,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, * Evaluate the function's argument list. * * Note: ideally, we'd do this in the per-tuple context, but then the - * argument values would disappear when we reset the context in - * the inner loop. So do it in caller context. Perhaps we should - * make a separate context just to hold the evaluated arguments? + * argument values would disappear when we reset the context in the + * inner loop. So do it in caller context. Perhaps we should make a + * separate context just to hold the evaluated arguments? */ fcinfo.flinfo = &(fcache->func); argDone = ExecEvalFuncArgs(&fcinfo, fcache->args, econtext); @@ -1217,8 +1209,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, } /* - * Switch to short-lived context for calling the function or - * expression. + * Switch to short-lived context for calling the function or expression. */ MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); @@ -1232,9 +1223,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, HeapTuple tuple; /* - * reset per-tuple memory context before each call of the function - * or expression. This cleans up any local memory the function may - * leak when called. + * reset per-tuple memory context before each call of the function or + * expression. This cleans up any local memory the function may leak + * when called. */ ResetExprContext(econtext); @@ -1261,12 +1252,12 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, break; /* - * Can't do anything very useful with NULL rowtype values. - * For a function returning set, we consider this a protocol - * violation (but another alternative would be to just ignore - * the result and "continue" to get another row). For a function - * not returning set, we fall out of the loop; we'll cons up - * an all-nulls result row below. + * Can't do anything very useful with NULL rowtype values. For a + * function returning set, we consider this a protocol violation + * (but another alternative would be to just ignore the result and + * "continue" to get another row). For a function not returning + * set, we fall out of the loop; we'll cons up an all-nulls result + * row below. */ if (returnsTuple && fcinfo.isnull) { @@ -1278,8 +1269,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, } /* - * If first time through, build tupdesc and tuplestore for - * result + * If first time through, build tupdesc and tuplestore for result */ if (first_time) { @@ -1287,15 +1277,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, if (returnsTuple) { /* - * Use the type info embedded in the rowtype Datum to - * look up the needed tupdesc. Make a copy for the - * query. + * Use the type info embedded in the rowtype Datum to look + * up the needed tupdesc. Make a copy for the query. */ HeapTupleHeader td; td = DatumGetHeapTupleHeader(result); tupdesc = lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(td), - HeapTupleHeaderGetTypMod(td)); + HeapTupleHeaderGetTypMod(td)); tupdesc = CreateTupleDescCopy(tupdesc); } else @@ -1507,7 +1496,7 @@ ExecEvalDistinct(FuncExprState *fcache, if (argDone != ExprSingleResult) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("IS DISTINCT FROM does not support set arguments"))); + errmsg("IS DISTINCT FROM does not support set arguments"))); Assert(fcinfo.nargs == 2); if (fcinfo.argnull[0] && fcinfo.argnull[1]) @@ -1580,12 +1569,12 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate, if (argDone != ExprSingleResult) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("op ANY/ALL (array) does not support set arguments"))); + errmsg("op ANY/ALL (array) does not support set arguments"))); Assert(fcinfo.nargs == 2); /* - * If the array is NULL then we return NULL --- it's not very - * meaningful to do anything else, even if the operator isn't strict. + * If the array is NULL then we return NULL --- it's not very meaningful + * to do anything else, even if the operator isn't strict. */ if (fcinfo.argnull[1]) { @@ -1598,18 +1587,17 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate, /* * If the array is empty, we return either FALSE or TRUE per the useOr * flag. This is correct even if the scalar is NULL; since we would - * evaluate the operator zero times, it matters not whether it would - * want to return NULL. + * evaluate the operator zero times, it matters not whether it would want + * to return NULL. */ nitems = ArrayGetNItems(ARR_NDIM(arr), ARR_DIMS(arr)); if (nitems <= 0) return BoolGetDatum(!useOr); /* - * If the scalar is NULL, and the function is strict, return NULL. - * This is just to avoid having to test for strictness inside the - * loop. (XXX but if arrays could have null elements, we'd need a - * test anyway.) + * If the scalar is NULL, and the function is strict, return NULL. This is + * just to avoid having to test for strictness inside the loop. (XXX but + * if arrays could have null elements, we'd need a test anyway.) */ if (fcinfo.argnull[0] && sstate->fxprstate.func.fn_strict) { @@ -1618,9 +1606,8 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate, } /* - * We arrange to look up info about the element type only once per - * series of calls, assuming the element type doesn't change - * underneath us. + * We arrange to look up info about the element type only once per series + * of calls, assuming the element type doesn't change underneath us. */ if (sstate->element_type != ARR_ELEMTYPE(arr)) { @@ -1711,15 +1698,15 @@ ExecEvalNot(BoolExprState *notclause, ExprContext *econtext, expr_value = ExecEvalExpr(clause, econtext, isNull, NULL); /* - * if the expression evaluates to null, then we just cascade the null - * back to whoever called us. + * if the expression evaluates to null, then we just cascade the null back + * to whoever called us. */ if (*isNull) return expr_value; /* - * evaluation of 'not' is simple.. expr is false, then return 'true' - * and vice versa. + * evaluation of 'not' is simple.. expr is false, then return 'true' and + * vice versa. */ return BoolGetDatum(!DatumGetBool(expr_value)); } @@ -1742,18 +1729,17 @@ ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext, AnyNull = false; /* - * If any of the clauses is TRUE, the OR result is TRUE regardless of - * the states of the rest of the clauses, so we can stop evaluating - * and return TRUE immediately. If none are TRUE and one or more is - * NULL, we return NULL; otherwise we return FALSE. This makes sense - * when you interpret NULL as "don't know": if we have a TRUE then the - * OR is TRUE even if we aren't sure about some of the other inputs. - * If all the known inputs are FALSE, but we have one or more "don't - * knows", then we have to report that we "don't know" what the OR's - * result should be --- perhaps one of the "don't knows" would have - * been TRUE if we'd known its value. Only when all the inputs are - * known to be FALSE can we state confidently that the OR's result is - * FALSE. + * If any of the clauses is TRUE, the OR result is TRUE regardless of the + * states of the rest of the clauses, so we can stop evaluating and return + * TRUE immediately. If none are TRUE and one or more is NULL, we return + * NULL; otherwise we return FALSE. This makes sense when you interpret + * NULL as "don't know": if we have a TRUE then the OR is TRUE even if we + * aren't sure about some of the other inputs. If all the known inputs are + * FALSE, but we have one or more "don't knows", then we have to report + * that we "don't know" what the OR's result should be --- perhaps one of + * the "don't knows" would have been TRUE if we'd known its value. Only + * when all the inputs are known to be FALSE can we state confidently that + * the OR's result is FALSE. */ foreach(clause, clauses) { @@ -1794,12 +1780,12 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, AnyNull = false; /* - * If any of the clauses is FALSE, the AND result is FALSE regardless - * of the states of the rest of the clauses, so we can stop evaluating - * and return FALSE immediately. If none are FALSE and one or more is - * NULL, we return NULL; otherwise we return TRUE. This makes sense - * when you interpret NULL as "don't know", using the same sort of - * reasoning as for OR, above. + * If any of the clauses is FALSE, the AND result is FALSE regardless of + * the states of the rest of the clauses, so we can stop evaluating and + * return FALSE immediately. If none are FALSE and one or more is NULL, + * we return NULL; otherwise we return TRUE. This makes sense when you + * interpret NULL as "don't know", using the same sort of reasoning as for + * OR, above. */ foreach(clause, clauses) @@ -1826,7 +1812,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, /* ---------------------------------------------------------------- * ExecEvalConvertRowtype * - * Evaluate a rowtype coercion operation. This may require + * Evaluate a rowtype coercion operation. This may require * rearranging field positions. * ---------------------------------------------------------------- */ @@ -1865,10 +1851,9 @@ ExecEvalConvertRowtype(ConvertRowtypeExprState *cstate, tmptup.t_data = tuple; /* - * Extract all the values of the old tuple, offsetting the arrays - * so that invalues[0] is NULL and invalues[1] is the first - * source attribute; this exactly matches the numbering convention - * in attrMap. + * Extract all the values of the old tuple, offsetting the arrays so that + * invalues[0] is NULL and invalues[1] is the first source attribute; this + * exactly matches the numbering convention in attrMap. */ heap_deform_tuple(&tmptup, cstate->indesc, invalues + 1, inisnull + 1); invalues[0] = (Datum) 0; @@ -1915,10 +1900,10 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, *isDone = ExprSingleResult; /* - * If there's a test expression, we have to evaluate it and save the - * value where the CaseTestExpr placeholders can find it. We must save - * and restore prior setting of econtext's caseValue fields, in case - * this node is itself within a larger CASE. + * If there's a test expression, we have to evaluate it and save the value + * where the CaseTestExpr placeholders can find it. We must save and + * restore prior setting of econtext's caseValue fields, in case this node + * is itself within a larger CASE. */ save_datum = econtext->caseValue_datum; save_isNull = econtext->caseValue_isNull; @@ -1927,14 +1912,14 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, { econtext->caseValue_datum = ExecEvalExpr(caseExpr->arg, econtext, - &econtext->caseValue_isNull, + &econtext->caseValue_isNull, NULL); } /* - * we evaluate each of the WHEN clauses in turn, as soon as one is - * true we return the corresponding result. If none are true then we - * return the value of the default clause, or NULL if there is none. + * we evaluate each of the WHEN clauses in turn, as soon as one is true we + * return the corresponding result. If none are true then we return the + * value of the default clause, or NULL if there is none. */ foreach(clause, clauses) { @@ -1947,9 +1932,9 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, NULL); /* - * if we have a true test, then we return the result, since the - * case statement is satisfied. A NULL result from the test is - * not considered true. + * if we have a true test, then we return the result, since the case + * statement is satisfied. A NULL result from the test is not + * considered true. */ if (DatumGetBool(clause_value) && !*isNull) { @@ -2098,7 +2083,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot merge incompatible arrays"), errdetail("Array with element type %s cannot be " - "included in ARRAY construct with element type %s.", + "included in ARRAY construct with element type %s.", format_type_be(ARR_ELEMTYPE(array)), format_type_be(element_type)))); @@ -2110,8 +2095,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext, if (ndims <= 0 || ndims > MAXDIM) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("number of array dimensions (%d) exceeds " \ - "the maximum allowed (%d)", ndims, MAXDIM))); + errmsg("number of array dimensions (%d) exceeds " \ + "the maximum allowed (%d)", ndims, MAXDIM))); elem_dims = (int *) palloc(elem_ndims * sizeof(int)); memcpy(elem_dims, ARR_DIMS(array), elem_ndims * sizeof(int)); @@ -2130,8 +2115,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext, elem_ndims * sizeof(int)) != 0) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), - errmsg("multidimensional arrays must have array " - "expressions with matching dimensions"))); + errmsg("multidimensional arrays must have array " + "expressions with matching dimensions"))); } elem_ndatabytes = ARR_SIZE(array) - ARR_OVERHEAD(elem_ndims); @@ -2258,10 +2243,10 @@ static Datum ExecEvalMinMax(MinMaxExprState *minmaxExpr, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone) { - Datum result = (Datum) 0; + Datum result = (Datum) 0; MinMaxOp op = ((MinMaxExpr *) minmaxExpr->xprstate.expr)->op; FunctionCallInfoData locfcinfo; - ListCell *arg; + ListCell *arg; if (isDone) *isDone = ExprSingleResult; @@ -2295,7 +2280,7 @@ ExecEvalMinMax(MinMaxExprState *minmaxExpr, ExprContext *econtext, locfcinfo.arg[1] = value; locfcinfo.isnull = false; cmpresult = DatumGetInt32(FunctionCallInvoke(&locfcinfo)); - if (locfcinfo.isnull) /* probably should not happen */ + if (locfcinfo.isnull) /* probably should not happen */ continue; if (cmpresult > 0 && op == IS_LEAST) result = value; @@ -2531,8 +2516,8 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext, if (*isNull) ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), - errmsg("domain %s does not allow null values", - format_type_be(ctest->resulttype)))); + errmsg("domain %s does not allow null values", + format_type_be(ctest->resulttype)))); break; case DOM_CONSTRAINT_CHECK: { @@ -2545,8 +2530,7 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext, * Set up value to be returned by CoerceToDomainValue * nodes. We must save and restore prior setting of * econtext's domainValue fields, in case this node is - * itself within a check expression for another - * domain. + * itself within a check expression for another domain. */ save_datum = econtext->domainValue_datum; save_isNull = econtext->domainValue_isNull; @@ -2647,9 +2631,9 @@ ExecEvalFieldSelect(FieldSelectState *fstate, } /* - * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set - * all the fields in the struct just in case user tries to inspect - * system columns. + * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all + * the fields in the struct just in case user tries to inspect system + * columns. */ tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple); ItemPointerSetInvalid(&(tmptup.t_self)); @@ -2715,8 +2699,8 @@ ExecEvalFieldStore(FieldStoreState *fstate, if (!*isNull) { /* - * heap_deform_tuple needs a HeapTuple not a bare HeapTupleHeader. - * We set all the fields in the struct just in case. + * heap_deform_tuple needs a HeapTuple not a bare HeapTupleHeader. We + * set all the fields in the struct just in case. */ HeapTupleHeader tuphdr; HeapTupleData tmptup; @@ -2749,11 +2733,11 @@ ExecEvalFieldStore(FieldStoreState *fstate, Assert(fieldnum > 0 && fieldnum <= tupDesc->natts); /* - * Use the CaseTestExpr mechanism to pass down the old value of - * the field being replaced; this is useful in case we have a - * nested field update situation. It's safe to reuse the CASE - * mechanism because there cannot be a CASE between here and where - * the value would be needed. + * Use the CaseTestExpr mechanism to pass down the old value of the + * field being replaced; this is useful in case we have a nested field + * update situation. It's safe to reuse the CASE mechanism because + * there cannot be a CASE between here and where the value would be + * needed. */ econtext->caseValue_datum = values[fieldnum - 1]; econtext->caseValue_isNull = isnull[fieldnum - 1]; @@ -2895,8 +2879,8 @@ ExecInitExpr(Expr *node, PlanState *parent) /* * Complain if the aggregate's argument contains any * aggregates; nested agg functions are semantically - * nonsensical. (This should have been caught - * earlier, but we defend against it here anyway.) + * nonsensical. (This should have been caught earlier, + * but we defend against it here anyway.) */ if (naggs != aggstate->numaggs) ereport(ERROR, @@ -3020,9 +3004,8 @@ ExecInitExpr(Expr *node, PlanState *parent) elog(ERROR, "SubPlan found with no parent plan"); /* - * Here we just add the SubPlanState nodes to - * parent->subPlan. The subplans will be initialized - * later. + * Here we just add the SubPlanState nodes to parent->subPlan. + * The subplans will be initialized later. */ parent->subPlan = lcons(sstate, parent->subPlan); sstate->sub_estate = NULL; @@ -3073,8 +3056,8 @@ ExecInitExpr(Expr *node, PlanState *parent) { ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node; ConvertRowtypeExprState *cstate = makeNode(ConvertRowtypeExprState); - int i; - int n; + int i; + int n; cstate->xprstate.evalfunc = (ExprStateEvalFunc) ExecEvalConvertRowtype; cstate->arg = ExecInitExpr(convert->arg, parent); @@ -3095,7 +3078,7 @@ ExecInitExpr(Expr *node, PlanState *parent) int j; if (att->attisdropped) - continue; /* attrMap[i] is already 0 */ + continue; /* attrMap[i] is already 0 */ attname = NameStr(att->attname); atttypid = att->atttypid; atttypmod = att->atttypmod; @@ -3111,7 +3094,7 @@ ExecInitExpr(Expr *node, PlanState *parent) elog(ERROR, "attribute \"%s\" of type %s does not match corresponding attribute of type %s", attname, format_type_be(cstate->indesc->tdtypeid), - format_type_be(cstate->outdesc->tdtypeid)); + format_type_be(cstate->outdesc->tdtypeid)); cstate->attrMap[i] = (AttrNumber) (j + 1); break; } @@ -3217,24 +3200,24 @@ ExecInitExpr(Expr *node, PlanState *parent) if (!attrs[i]->attisdropped) { /* - * Guard against ALTER COLUMN TYPE on rowtype - * since the RowExpr was created. XXX should we - * check typmod too? Not sure we can be sure - * it'll be the same. + * Guard against ALTER COLUMN TYPE on rowtype since + * the RowExpr was created. XXX should we check + * typmod too? Not sure we can be sure it'll be the + * same. */ if (exprType((Node *) e) != attrs[i]->atttypid) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("ROW() column has type %s instead of type %s", - format_type_be(exprType((Node *) e)), - format_type_be(attrs[i]->atttypid)))); + format_type_be(exprType((Node *) e)), + format_type_be(attrs[i]->atttypid)))); } else { /* - * Ignore original expression and insert a NULL. - * We don't really care what type of NULL it is, - * so always make an int4 NULL. + * Ignore original expression and insert a NULL. We + * don't really care what type of NULL it is, so + * always make an int4 NULL. */ e = (Expr *) makeNullConst(INT4OID); } @@ -3485,16 +3468,16 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull) oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); /* - * Evaluate the qual conditions one at a time. If we find a FALSE - * result, we can stop evaluating and return FALSE --- the AND result - * must be FALSE. Also, if we find a NULL result when resultForNull - * is FALSE, we can stop and return FALSE --- the AND result must be - * FALSE or NULL in that case, and the caller doesn't care which. + * Evaluate the qual conditions one at a time. If we find a FALSE result, + * we can stop evaluating and return FALSE --- the AND result must be + * FALSE. Also, if we find a NULL result when resultForNull is FALSE, we + * can stop and return FALSE --- the AND result must be FALSE or NULL in + * that case, and the caller doesn't care which. * - * If we get to the end of the list, we can return TRUE. This will - * happen when the AND result is indeed TRUE, or when the AND result - * is NULL (one or more NULL subresult, with all the rest TRUE) and - * the caller has specified resultForNull = TRUE. + * If we get to the end of the list, we can return TRUE. This will happen + * when the AND result is indeed TRUE, or when the AND result is NULL (one + * or more NULL subresult, with all the rest TRUE) and the caller has + * specified resultForNull = TRUE. */ result = true; @@ -3637,8 +3620,7 @@ ExecTargetList(List *targetlist, if (*isDone == ExprSingleResult) { /* - * all sets are done, so report that tlist expansion is - * complete. + * all sets are done, so report that tlist expansion is complete. */ *isDone = ExprEndResult; MemoryContextSwitchTo(oldContext); @@ -3647,8 +3629,8 @@ ExecTargetList(List *targetlist, else { /* - * We have some done and some undone sets. Restart the done - * ones so that we can deliver a tuple (if possible). + * We have some done and some undone sets. Restart the done ones + * so that we can deliver a tuple (if possible). */ foreach(tl, targetlist) { @@ -3666,8 +3648,8 @@ ExecTargetList(List *targetlist, if (itemIsDone[resind] == ExprEndResult) { /* - * Oh dear, this item is returning an empty set. - * Guess we can't make a tuple after all. + * Oh dear, this item is returning an empty set. Guess + * we can't make a tuple after all. */ *isDone = ExprEndResult; break; @@ -3676,9 +3658,9 @@ ExecTargetList(List *targetlist, } /* - * If we cannot make a tuple because some sets are empty, we - * still have to cycle the nonempty sets to completion, else - * resources will not be released from subplans etc. + * If we cannot make a tuple because some sets are empty, we still + * have to cycle the nonempty sets to completion, else resources + * will not be released from subplans etc. * * XXX is that still necessary? */ @@ -3741,8 +3723,8 @@ ExecVariableList(ProjectionInfo *projInfo, projInfo->pi_lastScanVar); /* - * Assign to result by direct extraction of fields from source - * slots ... a mite ugly, but fast ... + * Assign to result by direct extraction of fields from source slots ... a + * mite ugly, but fast ... */ for (i = list_length(projInfo->pi_targetlist) - 1; i >= 0; i--) { @@ -3784,10 +3766,9 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone) slot = projInfo->pi_slot; /* - * Clear any former contents of the result slot. This makes it - * safe for us to use the slot's Datum/isnull arrays as workspace. - * (Also, we can return the slot as-is if we decide no rows can - * be projected.) + * Clear any former contents of the result slot. This makes it safe for + * us to use the slot's Datum/isnull arrays as workspace. (Also, we can + * return the slot as-is if we decide no rows can be projected.) */ ExecClearTuple(slot); diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c index 843aa15101c..90ffda092a0 100644 --- a/src/backend/executor/execScan.c +++ b/src/backend/executor/execScan.c @@ -12,7 +12,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.36 2005/05/22 22:30:19 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.37 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -61,16 +61,16 @@ ExecScan(ScanState *node, projInfo = node->ps.ps_ProjInfo; /* - * If we have neither a qual to check nor a projection to do, - * just skip all the overhead and return the raw scan tuple. + * If we have neither a qual to check nor a projection to do, just skip + * all the overhead and return the raw scan tuple. */ if (!qual && !projInfo) return (*accessMtd) (node); /* - * Check to see if we're still projecting out tuples from a previous - * scan tuple (because there is a function-returning-set in the - * projection expressions). If so, try to project another one. + * Check to see if we're still projecting out tuples from a previous scan + * tuple (because there is a function-returning-set in the projection + * expressions). If so, try to project another one. */ if (node->ps.ps_TupFromTlist) { @@ -84,15 +84,15 @@ ExecScan(ScanState *node, /* * Reset per-tuple memory context to free any expression evaluation - * storage allocated in the previous tuple cycle. Note this can't - * happen until we're done projecting out tuples from a scan tuple. + * storage allocated in the previous tuple cycle. Note this can't happen + * until we're done projecting out tuples from a scan tuple. */ econtext = node->ps.ps_ExprContext; ResetExprContext(econtext); /* - * get a tuple from the access method loop until we obtain a tuple - * which passes the qualification. + * get a tuple from the access method loop until we obtain a tuple which + * passes the qualification. */ for (;;) { @@ -103,10 +103,10 @@ ExecScan(ScanState *node, slot = (*accessMtd) (node); /* - * if the slot returned by the accessMtd contains NULL, then it - * means there is nothing more to scan so we just return an empty - * slot, being careful to use the projection result slot so it has - * correct tupleDesc. + * if the slot returned by the accessMtd contains NULL, then it means + * there is nothing more to scan so we just return an empty slot, + * being careful to use the projection result slot so it has correct + * tupleDesc. */ if (TupIsNull(slot)) { @@ -125,8 +125,8 @@ ExecScan(ScanState *node, * check that the current tuple satisfies the qual-clause * * check for non-nil qual here to avoid a function call to ExecQual() - * when the qual is nil ... saves only a few cycles, but they add - * up ... + * when the qual is nil ... saves only a few cycles, but they add up + * ... */ if (!qual || ExecQual(qual, econtext, false)) { @@ -136,10 +136,9 @@ ExecScan(ScanState *node, if (projInfo) { /* - * Form a projection tuple, store it in the result tuple - * slot and return it --- unless we find we can project no - * tuples from this scan tuple, in which case continue - * scan. + * Form a projection tuple, store it in the result tuple slot + * and return it --- unless we find we can project no tuples + * from this scan tuple, in which case continue scan. */ resultSlot = ExecProject(projInfo, &isDone); if (isDone != ExprEndResult) @@ -226,8 +225,8 @@ tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc return false; /* tlist too long */ /* - * If the plan context requires a particular hasoid setting, then that - * has to match, too. + * If the plan context requires a particular hasoid setting, then that has + * to match, too. */ if (ExecContextForcesOids(ps, &hasoid) && hasoid != tupdesc->tdhasoid) diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index 1c82a3b64be..b38bcc44cb4 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -15,7 +15,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.87 2005/04/06 16:34:04 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.88 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -129,7 +129,7 @@ ExecCreateTupleTable(int tableSize) * allocate the table itself */ newtable = (TupleTable) palloc(sizeof(TupleTableData) + - (tableSize - 1) * sizeof(TupleTableSlot)); + (tableSize - 1) *sizeof(TupleTableSlot)); newtable->size = tableSize; newtable->next = 0; @@ -175,10 +175,9 @@ ExecDropTupleTable(TupleTable table, /* tuple table */ Assert(table != NULL); /* - * first free all the valid pointers in the tuple array and drop - * refcounts of any referenced buffers, if that's what the caller - * wants. (There is probably no good reason for the caller ever not - * to want it!) + * first free all the valid pointers in the tuple array and drop refcounts + * of any referenced buffers, if that's what the caller wants. (There is + * probably no good reason for the caller ever not to want it!) */ if (shouldFree) { @@ -288,9 +287,9 @@ ExecAllocTableSlot(TupleTable table) Assert(table != NULL); /* - * We expect that the table was made big enough to begin with. - * We cannot reallocate it on the fly since previous plan nodes - * have already got pointers to individual entries. + * We expect that the table was made big enough to begin with. We cannot + * reallocate it on the fly since previous plan nodes have already got + * pointers to individual entries. */ if (table->next >= table->size) elog(ERROR, "plan requires more slots than are available"); @@ -322,8 +321,8 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */ ExecClearTuple(slot); /* - * Release any old descriptor. Also release old Datum/isnull arrays - * if present (we don't bother to check if they could be re-used). + * Release any old descriptor. Also release old Datum/isnull arrays if + * present (we don't bother to check if they could be re-used). */ if (slot->tts_shouldFreeDesc) FreeTupleDesc(slot->tts_tupleDescriptor); @@ -340,9 +339,8 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */ slot->tts_shouldFreeDesc = shouldFree; /* - * Allocate Datum/isnull arrays of the appropriate size. These must - * have the same lifetime as the slot, so allocate in the slot's own - * context. + * Allocate Datum/isnull arrays of the appropriate size. These must have + * the same lifetime as the slot, so allocate in the slot's own context. */ slot->tts_values = (Datum *) MemoryContextAlloc(slot->tts_mcxt, tupdesc->natts * sizeof(Datum)); @@ -417,8 +415,8 @@ ExecStoreTuple(HeapTuple tuple, slot->tts_tuple = tuple; /* - * If tuple is on a disk page, keep the page pinned as long as we hold - * a pointer into it. We assume the caller already has such a pin. + * If tuple is on a disk page, keep the page pinned as long as we hold a + * pointer into it. We assume the caller already has such a pin. */ slot->tts_buffer = buffer; if (BufferIsValid(buffer)) @@ -621,21 +619,20 @@ ExecMaterializeSlot(TupleTableSlot *slot) Assert(!slot->tts_isempty); /* - * If we have a physical tuple, and it's locally palloc'd, we have - * nothing to do. + * If we have a physical tuple, and it's locally palloc'd, we have nothing + * to do. */ if (slot->tts_tuple && slot->tts_shouldFree) return slot->tts_tuple; /* * Otherwise, copy or build a tuple, and then store it as the new slot - * value. (Note: tts_nvalid will be reset to zero here. There are - * cases in which this could be optimized but it's probably not worth - * worrying about.) + * value. (Note: tts_nvalid will be reset to zero here. There are cases + * in which this could be optimized but it's probably not worth worrying + * about.) * - * We may be called in a context that is shorter-lived than the - * tuple slot, but we have to ensure that the materialized tuple - * will survive anyway. + * We may be called in a context that is shorter-lived than the tuple slot, + * but we have to ensure that the materialized tuple will survive anyway. */ oldContext = MemoryContextSwitchTo(slot->tts_mcxt); newTuple = ExecCopySlotTuple(slot); @@ -663,9 +660,9 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot) MemoryContext oldContext; /* - * There might be ways to optimize this when the source is virtual, - * but for now just always build a physical copy. Make sure it is - * in the right context. + * There might be ways to optimize this when the source is virtual, but + * for now just always build a physical copy. Make sure it is in the + * right context. */ oldContext = MemoryContextSwitchTo(dstslot->tts_mcxt); newTuple = ExecCopySlotTuple(srcslot); @@ -893,8 +890,7 @@ TupleDescGetAttInMetadata(TupleDesc tupdesc) attinmeta->tupdesc = BlessTupleDesc(tupdesc); /* - * Gather info needed later to call the "in" function for each - * attribute + * Gather info needed later to call the "in" function for each attribute */ attinfuncinfo = (FmgrInfo *) palloc0(natts * sizeof(FmgrInfo)); attioparams = (Oid *) palloc0(natts * sizeof(Oid)); @@ -974,8 +970,8 @@ BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values) tuple = heap_formtuple(tupdesc, dvalues, nulls); /* - * Release locally palloc'd space. XXX would probably be good to - * pfree values of pass-by-reference datums, as well. + * Release locally palloc'd space. XXX would probably be good to pfree + * values of pass-by-reference datums, as well. */ pfree(dvalues); pfree(nulls); diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index feeffe70520..05bfc08dc7d 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.125 2005/08/01 20:31:07 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.126 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -63,8 +63,8 @@ int NTupleReplaced; int NTupleAppended; int NTupleDeleted; int NIndexTupleInserted; -extern int NIndexTupleProcessed; /* have to be defined in the - * access method level so that the +extern int NIndexTupleProcessed; /* have to be defined in the access + * method level so that the * cinterface.a will link ok. */ @@ -166,8 +166,8 @@ CreateExecutorState(void) ALLOCSET_DEFAULT_MAXSIZE); /* - * Make the EState node within the per-query context. This way, we - * don't need a separate pfree() operation for it at shutdown. + * Make the EState node within the per-query context. This way, we don't + * need a separate pfree() operation for it at shutdown. */ oldcontext = MemoryContextSwitchTo(qcontext); @@ -244,16 +244,16 @@ void FreeExecutorState(EState *estate) { /* - * Shut down and free any remaining ExprContexts. We do this - * explicitly to ensure that any remaining shutdown callbacks get - * called (since they might need to release resources that aren't - * simply memory within the per-query memory context). + * Shut down and free any remaining ExprContexts. We do this explicitly + * to ensure that any remaining shutdown callbacks get called (since they + * might need to release resources that aren't simply memory within the + * per-query memory context). */ while (estate->es_exprcontexts) { /* - * XXX: seems there ought to be a faster way to implement this - * than repeated list_delete(), no? + * XXX: seems there ought to be a faster way to implement this than + * repeated list_delete(), no? */ FreeExprContext((ExprContext *) linitial(estate->es_exprcontexts)); /* FreeExprContext removed the list link for us */ @@ -324,10 +324,9 @@ CreateExprContext(EState *estate) econtext->ecxt_callbacks = NULL; /* - * Link the ExprContext into the EState to ensure it is shut down when - * the EState is freed. Because we use lcons(), shutdowns will occur - * in reverse order of creation, which may not be essential but can't - * hurt. + * Link the ExprContext into the EState to ensure it is shut down when the + * EState is freed. Because we use lcons(), shutdowns will occur in + * reverse order of creation, which may not be essential but can't hurt. */ estate->es_exprcontexts = lcons(econtext, estate->es_exprcontexts); @@ -471,9 +470,9 @@ ExecAssignResultTypeFromTL(PlanState *planstate) } /* - * ExecTypeFromTL needs the parse-time representation of the tlist, - * not a list of ExprStates. This is good because some plan nodes - * don't bother to set up planstate->targetlist ... + * ExecTypeFromTL needs the parse-time representation of the tlist, not a + * list of ExprStates. This is good because some plan nodes don't bother + * to set up planstate->targetlist ... */ tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid); ExecAssignResultType(planstate, tupDesc, true); @@ -518,8 +517,8 @@ ExecBuildProjectionInfo(List *targetList, /* * Determine whether the target list consists entirely of simple Var - * references (ie, references to non-system attributes). If so, - * we can use the simpler ExecVariableList instead of ExecTargetList. + * references (ie, references to non-system attributes). If so, we can + * use the simpler ExecVariableList instead of ExecTargetList. */ isVarList = true; foreach(tl, targetList) @@ -545,18 +544,18 @@ ExecBuildProjectionInfo(List *targetList, AttrNumber lastOuterVar = 0; AttrNumber lastScanVar = 0; - projInfo->pi_itemIsDone = NULL; /* not needed */ + projInfo->pi_itemIsDone = NULL; /* not needed */ projInfo->pi_varSlotOffsets = varSlotOffsets = (int *) palloc0(len * sizeof(int)); projInfo->pi_varNumbers = varNumbers = (int *) palloc0(len * sizeof(int)); /* - * Set up the data needed by ExecVariableList. The slots in which - * the variables can be found at runtime are denoted by the offsets - * of their slot pointers within the econtext. This rather grotty - * representation is needed because the caller may not have given - * us the real econtext yet (see hacks in nodeSubplan.c). + * Set up the data needed by ExecVariableList. The slots in which the + * variables can be found at runtime are denoted by the offsets of + * their slot pointers within the econtext. This rather grotty + * representation is needed because the caller may not have given us + * the real econtext yet (see hacks in nodeSubplan.c). */ foreach(tl, targetList) { @@ -631,7 +630,7 @@ ExecAssignProjectionInfo(PlanState *planstate) * * However ... there is no particular need to do it during ExecEndNode, * because FreeExecutorState will free any remaining ExprContexts within - * the EState. Letting FreeExecutorState do it allows the ExprContexts to + * the EState. Letting FreeExecutorState do it allows the ExprContexts to * be freed in reverse order of creation, rather than order of creation as * will happen if we delete them here, which saves O(N^2) work in the list * cleanup inside FreeExprContext. @@ -641,8 +640,8 @@ void ExecFreeExprContext(PlanState *planstate) { /* - * Per above discussion, don't actually delete the ExprContext. - * We do unlink it from the plan node, though. + * Per above discussion, don't actually delete the ExprContext. We do + * unlink it from the plan node, though. */ planstate->ps_ExprContext = NULL; } @@ -774,13 +773,13 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo) * to a new tablespace. * * If the index AM is not safe for concurrent updates, obtain an - * exclusive lock on the index to lock out other updaters as well - * as readers (index_beginscan places AccessShareLock). + * exclusive lock on the index to lock out other updaters as well as + * readers (index_beginscan places AccessShareLock). * - * If there are multiple not-concurrent-safe indexes, all backends - * must lock the indexes in the same order or we will get deadlocks - * here. This is guaranteed by RelationGetIndexList(), which promises - * to return the index list in OID order. + * If there are multiple not-concurrent-safe indexes, all backends must + * lock the indexes in the same order or we will get deadlocks here. + * This is guaranteed by RelationGetIndexList(), which promises to + * return the index list in OID order. * * The locks will be released in ExecCloseIndices. */ @@ -876,9 +875,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot, heapRelation = resultRelInfo->ri_RelationDesc; /* - * We will use the EState's per-tuple context for evaluating - * predicates and index expressions (creating it if it's not already - * there). + * We will use the EState's per-tuple context for evaluating predicates + * and index expressions (creating it if it's not already there). */ econtext = GetPerTupleExprContext(estate); @@ -903,8 +901,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot, List *predicate; /* - * If predicate state not set up yet, create it (in the - * estate's per-query context) + * If predicate state not set up yet, create it (in the estate's + * per-query context) */ predicate = indexInfo->ii_PredicateState; if (predicate == NIL) @@ -921,8 +919,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot, } /* - * FormIndexDatum fills in its values and isnull parameters with - * the appropriate values for the column(s) of the index. + * FormIndexDatum fills in its values and isnull parameters with the + * appropriate values for the column(s) of the index. */ FormIndexDatum(indexInfo, slot, @@ -931,14 +929,14 @@ ExecInsertIndexTuples(TupleTableSlot *slot, isnull); /* - * The index AM does the rest. Note we suppress unique-index - * checks if we are being called from VACUUM, since VACUUM may - * need to move dead tuples that have the same keys as live ones. + * The index AM does the rest. Note we suppress unique-index checks + * if we are being called from VACUUM, since VACUUM may need to move + * dead tuples that have the same keys as live ones. */ index_insert(relationDescs[i], /* index relation */ - values, /* array of index Datums */ - isnull, /* null flags */ - tupleid, /* tid of heap tuple */ + values, /* array of index Datums */ + isnull, /* null flags */ + tupleid, /* tid of heap tuple */ heapRelation, relationDescs[i]->rd_index->indisunique && !is_vacuum); @@ -959,14 +957,14 @@ UpdateChangedParamSet(PlanState *node, Bitmapset *newchg) Bitmapset *parmset; /* - * The plan node only depends on params listed in its allParam set. - * Don't include anything else into its chgParam set. + * The plan node only depends on params listed in its allParam set. Don't + * include anything else into its chgParam set. */ parmset = bms_intersect(node->plan->allParam, newchg); /* - * Keep node->chgParam == NULL if there's not actually any members; - * this allows the simplest possible tests in executor node files. + * Keep node->chgParam == NULL if there's not actually any members; this + * allows the simplest possible tests in executor node files. */ if (!bms_is_empty(parmset)) node->chgParam = bms_join(node->chgParam, parmset); @@ -1049,8 +1047,8 @@ ShutdownExprContext(ExprContext *econtext) return; /* - * Call the callbacks in econtext's per-tuple context. This ensures - * that any memory they might leak will get cleaned up. + * Call the callbacks in econtext's per-tuple context. This ensures that + * any memory they might leak will get cleaned up. */ oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 893ef64f03f..24a8b9a493a 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.97 2005/04/10 18:04:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.98 2005/10/15 02:49:16 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -82,7 +82,7 @@ typedef SQLFunctionCache *SQLFunctionCachePtr; /* non-export function prototypes */ static execution_state *init_execution_state(List *queryTree_list, - bool readonly_func); + bool readonly_func); static void init_sql_fcache(FmgrInfo *finfo); static void postquel_start(execution_state *es, SQLFunctionCachePtr fcache); static TupleTableSlot *postquel_getnext(execution_state *es); @@ -115,14 +115,14 @@ init_execution_state(List *queryTree_list, bool readonly_func) IsA(queryTree->utilityStmt, TransactionStmt)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /* translator: %s is a SQL statement name */ + /* translator: %s is a SQL statement name */ errmsg("%s is not allowed in a SQL function", CreateQueryTag(queryTree)))); if (readonly_func && !QueryIsReadOnly(queryTree)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /* translator: %s is a SQL statement name */ + /* translator: %s is a SQL statement name */ errmsg("%s is not allowed in a non-volatile function", CreateQueryTag(queryTree)))); @@ -178,8 +178,8 @@ init_sql_fcache(FmgrInfo *finfo) procedureStruct = (Form_pg_proc) GETSTRUCT(procedureTuple); /* - * get the result type from the procedure tuple, and check for - * polymorphic result type; if so, find out the actual result type. + * get the result type from the procedure tuple, and check for polymorphic + * result type; if so, find out the actual result type. */ rettype = procedureStruct->prorettype; @@ -190,7 +190,7 @@ init_sql_fcache(FmgrInfo *finfo) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("could not determine actual result type for function declared to return type %s", - format_type_be(procedureStruct->prorettype)))); + format_type_be(procedureStruct->prorettype)))); } fcache->rettype = rettype; @@ -208,9 +208,9 @@ init_sql_fcache(FmgrInfo *finfo) typeStruct = (Form_pg_type) GETSTRUCT(typeTuple); /* - * get the type length and by-value flag from the type tuple; also do - * a preliminary check for returnsTuple (this may prove inaccurate, - * see below). + * get the type length and by-value flag from the type tuple; also do a + * preliminary check for returnsTuple (this may prove inaccurate, see + * below). */ fcache->typlen = typeStruct->typlen; fcache->typbyval = typeStruct->typbyval; @@ -218,8 +218,8 @@ init_sql_fcache(FmgrInfo *finfo) rettype == RECORDOID); /* - * Parse and rewrite the queries. We need the argument type info to - * pass to the parser. + * Parse and rewrite the queries. We need the argument type info to pass + * to the parser. */ nargs = procedureStruct->pronargs; haspolyarg = false; @@ -265,17 +265,17 @@ init_sql_fcache(FmgrInfo *finfo) queryTree_list = pg_parse_and_rewrite(src, argOidVect, nargs); /* - * If the function has any arguments declared as polymorphic types, - * then it wasn't type-checked at definition time; must do so now. + * If the function has any arguments declared as polymorphic types, then + * it wasn't type-checked at definition time; must do so now. * - * Also, force a type-check if the declared return type is a rowtype; we - * need to find out whether we are actually returning the whole tuple - * result, or just regurgitating a rowtype expression result. In the - * latter case we clear returnsTuple because we need not act different - * from the scalar result case. + * Also, force a type-check if the declared return type is a rowtype; we need + * to find out whether we are actually returning the whole tuple result, + * or just regurgitating a rowtype expression result. In the latter case + * we clear returnsTuple because we need not act different from the scalar + * result case. * - * In the returnsTuple case, check_sql_fn_retval will also construct - * a JunkFilter we can use to coerce the returned rowtype to the desired + * In the returnsTuple case, check_sql_fn_retval will also construct a + * JunkFilter we can use to coerce the returned rowtype to the desired * form. */ if (haspolyarg || fcache->returnsTuple) @@ -307,9 +307,9 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache) /* * In a read-only function, use the surrounding query's snapshot; * otherwise take a new snapshot for each query. The snapshot should - * include a fresh command ID so that all work to date in this - * transaction is visible. We copy in both cases so that postquel_end - * can unconditionally do FreeSnapshot. + * include a fresh command ID so that all work to date in this transaction + * is visible. We copy in both cases so that postquel_end can + * unconditionally do FreeSnapshot. */ if (fcache->readonly_func) snapshot = CopySnapshot(ActiveSnapshot); @@ -470,8 +470,8 @@ postquel_execute(execution_state *es, if (TupIsNull(slot)) { /* - * We fall out here for all cases except where we have obtained - * a row from a function's final SELECT. + * We fall out here for all cases except where we have obtained a row + * from a function's final SELECT. */ postquel_end(es); fcinfo->isnull = true; @@ -479,34 +479,34 @@ postquel_execute(execution_state *es, } /* - * If we got a row from a command within the function it has to be - * the final command. All others shouldn't be returning anything. + * If we got a row from a command within the function it has to be the + * final command. All others shouldn't be returning anything. */ Assert(LAST_POSTQUEL_COMMAND(es)); /* - * Set up to return the function value. For pass-by-reference - * datatypes, be sure to allocate the result in resultcontext, - * not the current memory context (which has query lifespan). + * Set up to return the function value. For pass-by-reference datatypes, + * be sure to allocate the result in resultcontext, not the current memory + * context (which has query lifespan). */ oldcontext = MemoryContextSwitchTo(resultcontext); if (fcache->returnsTuple) { /* - * We are returning the whole tuple, so filter it and apply the - * proper labeling to make it a valid Datum. There are several - * reasons why we do this: + * We are returning the whole tuple, so filter it and apply the proper + * labeling to make it a valid Datum. There are several reasons why + * we do this: * - * 1. To copy the tuple out of the child execution context and - * into the desired result context. + * 1. To copy the tuple out of the child execution context and into the + * desired result context. * - * 2. To remove any junk attributes present in the raw subselect - * result. (This is probably not absolutely necessary, but it - * seems like good policy.) + * 2. To remove any junk attributes present in the raw subselect result. + * (This is probably not absolutely necessary, but it seems like good + * policy.) * - * 3. To insert dummy null columns if the declared result type - * has any attisdropped columns. + * 3. To insert dummy null columns if the declared result type has any + * attisdropped columns. */ HeapTuple newtup; HeapTupleHeader dtup; @@ -517,19 +517,18 @@ postquel_execute(execution_state *es, newtup = ExecRemoveJunk(fcache->junkFilter, slot); /* - * Compress out the HeapTuple header data. We assume that - * heap_form_tuple made the tuple with header and body in one - * palloc'd chunk. We want to return a pointer to the chunk - * start so that it will work if someone tries to free it. + * Compress out the HeapTuple header data. We assume that + * heap_form_tuple made the tuple with header and body in one palloc'd + * chunk. We want to return a pointer to the chunk start so that it + * will work if someone tries to free it. */ t_len = newtup->t_len; dtup = (HeapTupleHeader) newtup; memmove((char *) dtup, (char *) newtup->t_data, t_len); /* - * Use the declared return type if it's not RECORD; else take - * the type from the computed result, making sure a typmod has - * been assigned. + * Use the declared return type if it's not RECORD; else take the type + * from the computed result, making sure a typmod has been assigned. */ if (fcache->rettype != RECORDOID) { @@ -559,9 +558,8 @@ postquel_execute(execution_state *es, else { /* - * Returning a scalar, which we have to extract from the first - * column of the SELECT result, and then copy into result - * context if needed. + * Returning a scalar, which we have to extract from the first column + * of the SELECT result, and then copy into result context if needed. */ value = slot_getattr(slot, 1, &(fcinfo->isnull)); @@ -617,8 +615,8 @@ fmgr_sql(PG_FUNCTION_ARGS) es = fcache->func_state; /* - * Convert params to appropriate format if starting a fresh execution. - * (If continuing execution, we can re-use prior params.) + * Convert params to appropriate format if starting a fresh execution. (If + * continuing execution, we can re-use prior params.) */ if (es && es->status == F_EXEC_START) postquel_sub_params(fcache, fcinfo); @@ -631,8 +629,7 @@ fmgr_sql(PG_FUNCTION_ARGS) /* * Execute each command in the function one after another until we're - * executing the final command and get a result or we run out of - * commands. + * executing the final command and get a result or we run out of commands. */ while (es) { @@ -691,8 +688,8 @@ fmgr_sql(PG_FUNCTION_ARGS) } /* - * If we got a result from a command within the function it has to be - * the final command. All others shouldn't be returning anything. + * If we got a result from a command within the function it has to be the + * final command. All others shouldn't be returning anything. */ Assert(LAST_POSTQUEL_COMMAND(es)); @@ -711,8 +708,8 @@ fmgr_sql(PG_FUNCTION_ARGS) errmsg("set-valued function called in context that cannot accept a set"))); /* - * Ensure we will get shut down cleanly if the exprcontext is not - * run to completion. + * Ensure we will get shut down cleanly if the exprcontext is not run + * to completion. */ if (!fcache->shutdown_reg) { @@ -754,8 +751,7 @@ sql_exec_error_callback(void *arg) fn_name = NameStr(functup->proname); /* - * If there is a syntax error position, convert to internal syntax - * error + * If there is a syntax error position, convert to internal syntax error */ syntaxerrposition = geterrposition(); if (syntaxerrposition > 0) @@ -776,11 +772,11 @@ sql_exec_error_callback(void *arg) } /* - * Try to determine where in the function we failed. If there is a - * query with non-null QueryDesc, finger it. (We check this rather - * than looking for F_EXEC_RUN state, so that errors during - * ExecutorStart or ExecutorEnd are blamed on the appropriate query; - * see postquel_start and postquel_end.) + * Try to determine where in the function we failed. If there is a query + * with non-null QueryDesc, finger it. (We check this rather than looking + * for F_EXEC_RUN state, so that errors during ExecutorStart or + * ExecutorEnd are blamed on the appropriate query; see postquel_start and + * postquel_end.) */ if (fcache) { @@ -888,9 +884,9 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, if (rettype != VOIDOID) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("return type mismatch in function declared to return %s", - format_type_be(rettype)), - errdetail("Function's final statement must be a SELECT."))); + errmsg("return type mismatch in function declared to return %s", + format_type_be(rettype)), + errdetail("Function's final statement must be a SELECT."))); return false; } @@ -901,17 +897,16 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, tlist = parse->targetList; /* - * The last query must be a SELECT if and only if return type isn't - * VOID. + * The last query must be a SELECT if and only if return type isn't VOID. */ if (rettype == VOIDOID) { if (cmd == CMD_SELECT) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("return type mismatch in function declared to return %s", - format_type_be(rettype)), - errdetail("Function's final statement must not be a SELECT."))); + errmsg("return type mismatch in function declared to return %s", + format_type_be(rettype)), + errdetail("Function's final statement must not be a SELECT."))); return false; } @@ -919,9 +914,9 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, if (cmd != CMD_SELECT) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("return type mismatch in function declared to return %s", - format_type_be(rettype)), - errdetail("Function's final statement must be a SELECT."))); + errmsg("return type mismatch in function declared to return %s", + format_type_be(rettype)), + errdetail("Function's final statement must be a SELECT."))); /* * Count the non-junk entries in the result targetlist. @@ -934,22 +929,22 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, { /* * For base-type returns, the target list should have exactly one - * entry, and its type should agree with what the user declared. - * (As of Postgres 7.2, we accept binary-compatible types too.) + * entry, and its type should agree with what the user declared. (As + * of Postgres 7.2, we accept binary-compatible types too.) */ if (tlistlen != 1) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("return type mismatch in function declared to return %s", - format_type_be(rettype)), - errdetail("Final SELECT must return exactly one column."))); + errmsg("return type mismatch in function declared to return %s", + format_type_be(rettype)), + errdetail("Final SELECT must return exactly one column."))); restype = exprType((Node *) ((TargetEntry *) linitial(tlist))->expr); if (!IsBinaryCoercible(restype, rettype)) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("return type mismatch in function declared to return %s", - format_type_be(rettype)), + errmsg("return type mismatch in function declared to return %s", + format_type_be(rettype)), errdetail("Actual return type is %s.", format_type_be(restype)))); } @@ -957,16 +952,16 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, { /* Returns a rowtype */ TupleDesc tupdesc; - int tupnatts; /* physical number of columns in tuple */ - int tuplogcols; /* # of nondeleted columns in tuple */ - int colindex; /* physical column index */ + int tupnatts; /* physical number of columns in tuple */ + int tuplogcols; /* # of nondeleted columns in tuple */ + int colindex; /* physical column index */ /* - * If the target list is of length 1, and the type of the varnode - * in the target list matches the declared return type, this is - * okay. This can happen, for example, where the body of the - * function is 'SELECT func2()', where func2 has the same return - * type as the function that's calling it. + * If the target list is of length 1, and the type of the varnode in + * the target list matches the declared return type, this is okay. + * This can happen, for example, where the body of the function is + * 'SELECT func2()', where func2 has the same return type as the + * function that's calling it. */ if (tlistlen == 1) { @@ -979,9 +974,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, if (get_func_result_type(func_id, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) { /* - * Assume we are returning the whole tuple. - * Crosschecking against what the caller expects will happen at - * runtime. + * Assume we are returning the whole tuple. Crosschecking against + * what the caller expects will happen at runtime. */ if (junkFilter) *junkFilter = ExecInitJunkFilter(tlist, false, NULL); @@ -990,9 +984,9 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, Assert(tupdesc); /* - * Verify that the targetlist matches the return tuple type. - * We scan the non-deleted attributes to ensure that they match the - * datatypes of the non-resjunk columns. + * Verify that the targetlist matches the return tuple type. We scan + * the non-deleted attributes to ensure that they match the datatypes + * of the non-resjunk columns. */ tupnatts = tupdesc->natts; tuplogcols = 0; /* we'll count nondeleted cols as we go */ @@ -1016,7 +1010,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("return type mismatch in function declared to return %s", format_type_be(rettype)), - errdetail("Final SELECT returns too many columns."))); + errdetail("Final SELECT returns too many columns."))); attr = tupdesc->attrs[colindex - 1]; } while (attr->attisdropped); tuplogcols++; @@ -1046,15 +1040,15 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, if (tlistlen != tuplogcols) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("return type mismatch in function declared to return %s", - format_type_be(rettype)), + errmsg("return type mismatch in function declared to return %s", + format_type_be(rettype)), errdetail("Final SELECT returns too few columns."))); /* Set up junk filter if needed */ if (junkFilter) *junkFilter = ExecInitJunkFilterConversion(tlist, - CreateTupleDescCopy(tupdesc), - NULL); + CreateTupleDescCopy(tupdesc), + NULL); /* Report that we are returning entire tuple result */ return true; @@ -1070,8 +1064,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, else ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("return type %s is not supported for SQL functions", - format_type_be(rettype)))); + errmsg("return type %s is not supported for SQL functions", + format_type_be(rettype)))); return false; } diff --git a/src/backend/executor/instrument.c b/src/backend/executor/instrument.c index c5b4a252d61..08d35c16163 100644 --- a/src/backend/executor/instrument.c +++ b/src/backend/executor/instrument.c @@ -7,7 +7,7 @@ * Copyright (c) 2001-2005, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.12 2005/04/16 20:07:35 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.13 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -43,7 +43,7 @@ InstrStartNode(Instrumentation *instr) void InstrStopNode(Instrumentation *instr, bool returnedTuple) { - instr_time endtime; + instr_time endtime; /* count the returned tuples */ if (returnedTuple) @@ -72,7 +72,7 @@ InstrStopNode(Instrumentation *instr, bool returnedTuple) instr->counter.tv_usec -= 1000000; instr->counter.tv_sec++; } -#else /* WIN32 */ +#else /* WIN32 */ instr->counter.QuadPart += (endtime.QuadPart - instr->starttime.QuadPart); #endif diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index b7a0bc344ff..0403c9aca1b 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -53,7 +53,7 @@ * pass-by-ref inputs, but in the aggregate case we know the left input is * either the initial transition value or a previous function result, and * in either case its value need not be preserved. See int8inc() for an - * example. Notice that advance_transition_function() is coded to avoid a + * example. Notice that advance_transition_function() is coded to avoid a * data copy step when the previous transition value pointer is returned. * * @@ -61,7 +61,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.134 2005/06/28 05:08:55 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.135 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -109,8 +109,8 @@ typedef struct AggStatePerAggData /* * fmgr lookup data for transfer functions --- only valid when - * corresponding oid is not InvalidOid. Note in particular that - * fn_strict flags are kept here. + * corresponding oid is not InvalidOid. Note in particular that fn_strict + * flags are kept here. */ FmgrInfo transfn; FmgrInfo finalfn; @@ -124,8 +124,8 @@ typedef struct AggStatePerAggData Oid sortOperator; /* - * fmgr lookup data for input type's equality operator --- only - * set/used when aggregate has DISTINCT flag. + * fmgr lookup data for input type's equality operator --- only set/used + * when aggregate has DISTINCT flag. */ FmgrInfo equalfn; @@ -147,14 +147,14 @@ typedef struct AggStatePerAggData transtypeByVal; /* - * These values are working state that is initialized at the start of - * an input tuple group and updated for each input tuple. + * These values are working state that is initialized at the start of an + * input tuple group and updated for each input tuple. * * For a simple (non DISTINCT) aggregate, we just feed the input values * straight to the transition function. If it's DISTINCT, we pass the - * input values into a Tuplesort object; then at completion of the - * input tuple group, we scan the sorted values, eliminate duplicates, - * and run the transition function on the rest. + * input values into a Tuplesort object; then at completion of the input + * tuple group, we scan the sorted values, eliminate duplicates, and run + * the transition function on the rest. */ Tuplesortstate *sortstate; /* sort object, if a DISTINCT agg */ @@ -184,12 +184,11 @@ typedef struct AggStatePerGroupData bool noTransValue; /* true if transValue not set yet */ /* - * Note: noTransValue initially has the same value as - * transValueIsNull, and if true both are cleared to false at the same - * time. They are not the same though: if transfn later returns a - * NULL, we want to keep that NULL and not auto-replace it with a - * later input value. Only the first non-NULL input will be - * auto-substituted. + * Note: noTransValue initially has the same value as transValueIsNull, + * and if true both are cleared to false at the same time. They are not + * the same though: if transfn later returns a NULL, we want to keep that + * NULL and not auto-replace it with a later input value. Only the first + * non-NULL input will be auto-substituted. */ } AggStatePerGroupData; @@ -270,11 +269,11 @@ initialize_aggregates(AggState *aggstate, } /* - * If we are reinitializing after a group boundary, we have to - * free any prior transValue to avoid memory leakage. We must - * check not only the isnull flag but whether the pointer is NULL; - * since pergroupstate is initialized with palloc0, the initial - * condition has isnull = 0 and null pointer. + * If we are reinitializing after a group boundary, we have to free + * any prior transValue to avoid memory leakage. We must check not + * only the isnull flag but whether the pointer is NULL; since + * pergroupstate is initialized with palloc0, the initial condition + * has isnull = 0 and null pointer. */ if (!peraggstate->transtypeByVal && !pergroupstate->transValueIsNull && @@ -284,8 +283,8 @@ initialize_aggregates(AggState *aggstate, /* * (Re)set transValue to the initial value. * - * Note that when the initial value is pass-by-ref, we must copy it - * (into the aggcontext) since we will pfree the transValue later. + * Note that when the initial value is pass-by-ref, we must copy it (into + * the aggcontext) since we will pfree the transValue later. */ if (peraggstate->initValueIsNull) pergroupstate->transValue = peraggstate->initValue; @@ -295,18 +294,18 @@ initialize_aggregates(AggState *aggstate, oldContext = MemoryContextSwitchTo(aggstate->aggcontext); pergroupstate->transValue = datumCopy(peraggstate->initValue, - peraggstate->transtypeByVal, - peraggstate->transtypeLen); + peraggstate->transtypeByVal, + peraggstate->transtypeLen); MemoryContextSwitchTo(oldContext); } pergroupstate->transValueIsNull = peraggstate->initValueIsNull; /* - * If the initial value for the transition state doesn't exist in - * the pg_aggregate table then we will let the first non-NULL - * value returned from the outer procNode become the initial - * value. (This is useful for aggregates like max() and min().) - * The noTransValue flag signals that we still need to do this. + * If the initial value for the transition state doesn't exist in the + * pg_aggregate table then we will let the first non-NULL value + * returned from the outer procNode become the initial value. (This is + * useful for aggregates like max() and min().) The noTransValue flag + * signals that we still need to do this. */ pergroupstate->noTransValue = peraggstate->initValueIsNull; } @@ -337,20 +336,18 @@ advance_transition_function(AggState *aggstate, if (pergroupstate->noTransValue) { /* - * transValue has not been initialized. This is the first - * non-NULL input value. We use it as the initial value for - * transValue. (We already checked that the agg's input type - * is binary-compatible with its transtype, so straight copy - * here is OK.) + * transValue has not been initialized. This is the first non-NULL + * input value. We use it as the initial value for transValue. (We + * already checked that the agg's input type is binary-compatible + * with its transtype, so straight copy here is OK.) * - * We must copy the datum into aggcontext if it is pass-by-ref. - * We do not need to pfree the old transValue, since it's - * NULL. + * We must copy the datum into aggcontext if it is pass-by-ref. We do + * not need to pfree the old transValue, since it's NULL. */ oldContext = MemoryContextSwitchTo(aggstate->aggcontext); pergroupstate->transValue = datumCopy(newVal, - peraggstate->transtypeByVal, - peraggstate->transtypeLen); + peraggstate->transtypeByVal, + peraggstate->transtypeLen); pergroupstate->transValueIsNull = false; pergroupstate->noTransValue = false; MemoryContextSwitchTo(oldContext); @@ -360,10 +357,9 @@ advance_transition_function(AggState *aggstate, { /* * Don't call a strict function with NULL inputs. Note it is - * possible to get here despite the above tests, if the - * transfn is strict *and* returned a NULL on a prior cycle. - * If that happens we will propagate the NULL all the way to - * the end. + * possible to get here despite the above tests, if the transfn is + * strict *and* returned a NULL on a prior cycle. If that happens + * we will propagate the NULL all the way to the end. */ return; } @@ -385,12 +381,12 @@ advance_transition_function(AggState *aggstate, newVal = FunctionCallInvoke(&fcinfo); /* - * If pass-by-ref datatype, must copy the new value into aggcontext - * and pfree the prior transValue. But if transfn returned a pointer - * to its first input, we don't need to do anything. + * If pass-by-ref datatype, must copy the new value into aggcontext and + * pfree the prior transValue. But if transfn returned a pointer to its + * first input, we don't need to do anything. */ if (!peraggstate->transtypeByVal && - DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue)) + DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue)) { if (!fcinfo.isnull) { @@ -473,24 +469,24 @@ process_sorted_aggregate(AggState *aggstate, tuplesort_performsort(peraggstate->sortstate); /* - * Note: if input type is pass-by-ref, the datums returned by the sort - * are freshly palloc'd in the per-query context, so we must be - * careful to pfree them when they are no longer needed. + * Note: if input type is pass-by-ref, the datums returned by the sort are + * freshly palloc'd in the per-query context, so we must be careful to + * pfree them when they are no longer needed. */ while (tuplesort_getdatum(peraggstate->sortstate, true, &newVal, &isNull)) { /* - * DISTINCT always suppresses nulls, per SQL spec, regardless of - * the transition function's strictness. + * DISTINCT always suppresses nulls, per SQL spec, regardless of the + * transition function's strictness. */ if (isNull) continue; /* - * Clear and select the working context for evaluation of the - * equality function and transition function. + * Clear and select the working context for evaluation of the equality + * function and transition function. */ MemoryContextReset(workcontext); oldContext = MemoryContextSwitchTo(workcontext); @@ -726,8 +722,8 @@ agg_retrieve_direct(AggState *aggstate) while (!aggstate->agg_done) { /* - * If we don't already have the first tuple of the new group, - * fetch it from the outer plan. + * If we don't already have the first tuple of the new group, fetch it + * from the outer plan. */ if (aggstate->grp_firstTuple == NULL) { @@ -735,8 +731,8 @@ agg_retrieve_direct(AggState *aggstate) if (!TupIsNull(outerslot)) { /* - * Make a copy of the first input tuple; we will use this - * for comparisons (in group mode) and for projection. + * Make a copy of the first input tuple; we will use this for + * comparisons (in group mode) and for projection. */ aggstate->grp_firstTuple = ExecCopySlotTuple(outerslot); } @@ -764,8 +760,8 @@ agg_retrieve_direct(AggState *aggstate) { /* * Store the copied first input tuple in the tuple table slot - * reserved for it. The tuple will be deleted when it is - * cleared from the slot. + * reserved for it. The tuple will be deleted when it is cleared + * from the slot. */ ExecStoreTuple(aggstate->grp_firstTuple, firstSlot, @@ -807,7 +803,7 @@ agg_retrieve_direct(AggState *aggstate) outerslot, node->numCols, node->grpColIdx, aggstate->eqfunctions, - tmpcontext->ecxt_per_tuple_memory)) + tmpcontext->ecxt_per_tuple_memory)) { /* * Save the first input tuple of the next group. @@ -838,17 +834,16 @@ agg_retrieve_direct(AggState *aggstate) /* * If we have no first tuple (ie, the outerPlan didn't return * anything), create a dummy all-nulls input tuple for use by - * ExecQual/ExecProject. 99.44% of the time this is a waste of - * cycles, because ordinarily the projected output tuple's - * targetlist cannot contain any direct (non-aggregated) - * references to input columns, so the dummy tuple will not be - * referenced. However there are special cases where this isn't so - * --- in particular an UPDATE involving an aggregate will have a - * targetlist reference to ctid. We need to return a null for - * ctid in that situation, not coredump. + * ExecQual/ExecProject. 99.44% of the time this is a waste of cycles, + * because ordinarily the projected output tuple's targetlist cannot + * contain any direct (non-aggregated) references to input columns, so + * the dummy tuple will not be referenced. However there are special + * cases where this isn't so --- in particular an UPDATE involving an + * aggregate will have a targetlist reference to ctid. We need to + * return a null for ctid in that situation, not coredump. * - * The values returned for the aggregates will be the initial values - * of the transition functions. + * The values returned for the aggregates will be the initial values of + * the transition functions. */ if (TupIsNull(firstSlot)) { @@ -866,15 +861,15 @@ agg_retrieve_direct(AggState *aggstate) econtext->ecxt_scantuple = firstSlot; /* - * Check the qual (HAVING clause); if the group does not match, - * ignore it and loop back to try to process another group. + * Check the qual (HAVING clause); if the group does not match, ignore + * it and loop back to try to process another group. */ if (ExecQual(aggstate->ss.ps.qual, econtext, false)) { /* - * Form and return a projection tuple using the aggregate - * results and the representative input tuple. Note we do not - * support aggregates returning sets ... + * Form and return a projection tuple using the aggregate results + * and the representative input tuple. Note we do not support + * aggregates returning sets ... */ return ExecProject(projInfo, NULL); } @@ -903,8 +898,8 @@ agg_fill_hash_table(AggState *aggstate) tmpcontext = aggstate->tmpcontext; /* - * Process each outer-plan tuple, and then fetch the next one, until - * we exhaust the outer plan. + * Process each outer-plan tuple, and then fetch the next one, until we + * exhaust the outer plan. */ for (;;) { @@ -979,8 +974,8 @@ agg_retrieve_hash_table(AggState *aggstate) ResetExprContext(econtext); /* - * Store the copied first input tuple in the tuple table slot - * reserved for it, so that it can be used in ExecProject. + * Store the copied first input tuple in the tuple table slot reserved + * for it, so that it can be used in ExecProject. */ ExecStoreTuple(entry->shared.firstTuple, firstSlot, @@ -1010,15 +1005,15 @@ agg_retrieve_hash_table(AggState *aggstate) econtext->ecxt_scantuple = firstSlot; /* - * Check the qual (HAVING clause); if the group does not match, - * ignore it and loop back to try to process another group. + * Check the qual (HAVING clause); if the group does not match, ignore + * it and loop back to try to process another group. */ if (ExecQual(aggstate->ss.ps.qual, econtext, false)) { /* - * Form and return a projection tuple using the aggregate - * results and the representative input tuple. Note we do not - * support aggregates returning sets ... + * Form and return a projection tuple using the aggregate results + * and the representative input tuple. Note we do not support + * aggregates returning sets ... */ return ExecProject(projInfo, NULL); } @@ -1065,8 +1060,8 @@ ExecInitAgg(Agg *node, EState *estate) /* * Create expression contexts. We need two, one for per-input-tuple - * processing and one for per-output-tuple processing. We cheat a - * little by using ExecAssignExprContext() to build both. + * processing and one for per-output-tuple processing. We cheat a little + * by using ExecAssignExprContext() to build both. */ ExecAssignExprContext(estate, &aggstate->ss.ps); aggstate->tmpcontext = aggstate->ss.ps.ps_ExprContext; @@ -1074,10 +1069,10 @@ ExecInitAgg(Agg *node, EState *estate) /* * We also need a long-lived memory context for holding hashtable data - * structures and transition values. NOTE: the details of what is - * stored in aggcontext and what is stored in the regular per-query - * memory context are driven by a simple decision: we want to reset - * the aggcontext in ExecReScanAgg to recover no-longer-wanted space. + * structures and transition values. NOTE: the details of what is stored + * in aggcontext and what is stored in the regular per-query memory + * context are driven by a simple decision: we want to reset the + * aggcontext in ExecReScanAgg to recover no-longer-wanted space. */ aggstate->aggcontext = AllocSetContextCreate(CurrentMemoryContext, @@ -1098,10 +1093,10 @@ ExecInitAgg(Agg *node, EState *estate) * initialize child expressions * * Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs - * contain other agg calls in their arguments. This would make no - * sense under SQL semantics anyway (and it's forbidden by the spec). - * Because that is true, we don't need to worry about evaluating the - * aggs in any particular order. + * contain other agg calls in their arguments. This would make no sense + * under SQL semantics anyway (and it's forbidden by the spec). Because + * that is true, we don't need to worry about evaluating the aggs in any + * particular order. */ aggstate->ss.ps.targetlist = (List *) ExecInitExpr((Expr *) node->plan.targetlist, @@ -1135,20 +1130,19 @@ ExecInitAgg(Agg *node, EState *estate) if (numaggs <= 0) { /* - * This is not an error condition: we might be using the Agg node - * just to do hash-based grouping. Even in the regular case, - * constant-expression simplification could optimize away all of - * the Aggrefs in the targetlist and qual. So keep going, but - * force local copy of numaggs positive so that palloc()s below - * don't choke. + * This is not an error condition: we might be using the Agg node just + * to do hash-based grouping. Even in the regular case, + * constant-expression simplification could optimize away all of the + * Aggrefs in the targetlist and qual. So keep going, but force local + * copy of numaggs positive so that palloc()s below don't choke. */ numaggs = 1; } /* - * If we are grouping, precompute fmgr lookup data for inner loop. We - * need both equality and hashing functions to do it by hashing, but - * only equality if not hashing. + * If we are grouping, precompute fmgr lookup data for inner loop. We need + * both equality and hashing functions to do it by hashing, but only + * equality if not hashing. */ if (node->numCols > 0) { @@ -1166,8 +1160,8 @@ ExecInitAgg(Agg *node, EState *estate) } /* - * Set up aggregate-result storage in the output expr context, and - * also allocate my private per-agg working storage + * Set up aggregate-result storage in the output expr context, and also + * allocate my private per-agg working storage */ econtext = aggstate->ss.ps.ps_ExprContext; econtext->ecxt_aggvalues = (Datum *) palloc0(sizeof(Datum) * numaggs); @@ -1192,10 +1186,10 @@ ExecInitAgg(Agg *node, EState *estate) /* * Perform lookups of aggregate function info, and initialize the * unchanging fields of the per-agg data. We also detect duplicate - * aggregates (for example, "SELECT sum(x) ... HAVING sum(x) > 0"). - * When duplicates are detected, we only make an AggStatePerAgg struct - * for the first one. The clones are simply pointed at the same - * result entry by giving them duplicate aggno values. + * aggregates (for example, "SELECT sum(x) ... HAVING sum(x) > 0"). When + * duplicates are detected, we only make an AggStatePerAgg struct for the + * first one. The clones are simply pointed at the same result entry by + * giving them duplicate aggno values. */ aggno = -1; foreach(l, aggstate->aggs) @@ -1243,9 +1237,9 @@ ExecInitAgg(Agg *node, EState *estate) peraggstate->aggref = aggref; /* - * Get actual datatype of the input. We need this because it may - * be different from the agg's declared input type, when the agg - * accepts ANY (eg, COUNT(*)) or ANYARRAY or ANYELEMENT. + * Get actual datatype of the input. We need this because it may be + * different from the agg's declared input type, when the agg accepts + * ANY (eg, COUNT(*)) or ANYARRAY or ANYELEMENT. */ inputType = exprType((Node *) aggref->target); @@ -1270,7 +1264,7 @@ ExecInitAgg(Agg *node, EState *estate) /* Check that aggregate owner has permission to call component fns */ { HeapTuple procTuple; - Oid aggOwner; + Oid aggOwner; procTuple = SearchSysCache(PROCOID, ObjectIdGetDatum(aggref->aggfnoid), @@ -1339,8 +1333,8 @@ ExecInitAgg(Agg *node, EState *estate) &peraggstate->transtypeByVal); /* - * initval is potentially null, so don't try to access it as a - * struct field. Must do it the hard way with SysCacheGetAttr. + * initval is potentially null, so don't try to access it as a struct + * field. Must do it the hard way with SysCacheGetAttr. */ textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple, Anum_pg_aggregate_agginitval, @@ -1353,11 +1347,11 @@ ExecInitAgg(Agg *node, EState *estate) aggtranstype); /* - * If the transfn is strict and the initval is NULL, make sure - * input type and transtype are the same (or at least binary- - * compatible), so that it's OK to use the first input value as - * the initial transValue. This should have been checked at agg - * definition time, but just in case... + * If the transfn is strict and the initval is NULL, make sure input + * type and transtype are the same (or at least binary- compatible), + * so that it's OK to use the first input value as the initial + * transValue. This should have been checked at agg definition time, + * but just in case... */ if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull) { @@ -1463,18 +1457,18 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt) if (((Agg *) node->ss.ps.plan)->aggstrategy == AGG_HASHED) { /* - * In the hashed case, if we haven't yet built the hash table then - * we can just return; nothing done yet, so nothing to undo. If - * subnode's chgParam is not NULL then it will be re-scanned by - * ExecProcNode, else no reason to re-scan it at all. + * In the hashed case, if we haven't yet built the hash table then we + * can just return; nothing done yet, so nothing to undo. If subnode's + * chgParam is not NULL then it will be re-scanned by ExecProcNode, + * else no reason to re-scan it at all. */ if (!node->table_filled) return; /* * If we do have the hash table and the subplan does not have any - * parameter changes, then we can just rescan the existing hash - * table; no need to build it again. + * parameter changes, then we can just rescan the existing hash table; + * no need to build it again. */ if (((PlanState *) node)->lefttree->chgParam == NULL) { @@ -1516,8 +1510,7 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt) else { /* - * Reset the per-group state (in particular, mark transvalues - * null) + * Reset the per-group state (in particular, mark transvalues null) */ MemSet(node->pergroup, 0, sizeof(AggStatePerGroupData) * node->numaggs); diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c index b88eec46a40..fc5c445db0e 100644 --- a/src/backend/executor/nodeAppend.c +++ b/src/backend/executor/nodeAppend.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.64 2005/05/22 22:30:19 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.65 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -88,10 +88,9 @@ exec_append_initialize_next(AppendState *appendstate) if (whichplan < appendstate->as_firstplan) { /* - * if scanning in reverse, we start at the last scan in the list - * and then proceed back to the first.. in any case we inform - * ExecAppend that we are at the end of the line by returning - * FALSE + * if scanning in reverse, we start at the last scan in the list and + * then proceed back to the first.. in any case we inform ExecAppend + * that we are at the end of the line by returning FALSE */ appendstate->as_whichplan = appendstate->as_firstplan; return FALSE; @@ -99,8 +98,7 @@ exec_append_initialize_next(AppendState *appendstate) else if (whichplan > appendstate->as_lastplan) { /* - * as above, end the scan if we go beyond the last scan in our - * list.. + * as above, end the scan if we go beyond the last scan in our list.. */ appendstate->as_whichplan = appendstate->as_lastplan; return FALSE; @@ -110,8 +108,8 @@ exec_append_initialize_next(AppendState *appendstate) /* * initialize the scan * - * If we are controlling the target relation, select the proper - * active ResultRelInfo and junk filter for this target. + * If we are controlling the target relation, select the proper active + * ResultRelInfo and junk filter for this target. */ if (((Append *) appendstate->ps.plan)->isTarget) { @@ -168,9 +166,8 @@ ExecInitAppend(Append *node, EState *estate) appendstate->as_nplans = nplans; /* - * Do we want to scan just one subplan? (Special case for - * EvalPlanQual) XXX pretty dirty way of determining that this case - * applies ... + * Do we want to scan just one subplan? (Special case for EvalPlanQual) + * XXX pretty dirty way of determining that this case applies ... */ if (node->isTarget && estate->es_evTuple != NULL) { @@ -199,8 +196,8 @@ ExecInitAppend(Append *node, EState *estate) #define APPEND_NSLOTS 1 /* - * append nodes still have Result slots, which hold pointers to - * tuples, so we have to initialize them. + * append nodes still have Result slots, which hold pointers to tuples, so + * we have to initialize them. */ ExecInitResultTupleSlot(estate, &appendstate->ps); @@ -220,10 +217,10 @@ ExecInitAppend(Append *node, EState *estate) } /* - * Initialize tuple type. (Note: in an inherited UPDATE situation, - * the tuple type computed here corresponds to the parent table, which - * is really a lie since tuples returned from child subplans will not - * all look the same.) + * Initialize tuple type. (Note: in an inherited UPDATE situation, the + * tuple type computed here corresponds to the parent table, which is + * really a lie since tuples returned from child subplans will not all + * look the same.) */ ExecAssignResultTypeFromTL(&appendstate->ps); appendstate->ps.ps_ProjInfo = NULL; @@ -275,19 +272,19 @@ ExecAppend(AppendState *node) if (!TupIsNull(result)) { /* - * If the subplan gave us something then return it as-is. - * We do NOT make use of the result slot that was set up in - * ExecInitAppend, first because there's no reason to and - * second because it may have the wrong tuple descriptor in + * If the subplan gave us something then return it as-is. We do + * NOT make use of the result slot that was set up in + * ExecInitAppend, first because there's no reason to and second + * because it may have the wrong tuple descriptor in * inherited-UPDATE cases. */ return result; } /* - * Go on to the "next" subplan in the appropriate direction. - * If no more subplans, return the empty slot set up for us - * by ExecInitAppend. + * Go on to the "next" subplan in the appropriate direction. If no + * more subplans, return the empty slot set up for us by + * ExecInitAppend. */ if (ScanDirectionIsForward(node->ps.state->es_direction)) node->as_whichplan++; @@ -348,8 +345,8 @@ ExecReScanAppend(AppendState *node, ExprContext *exprCtxt) UpdateChangedParamSet(subnode, node->ps.chgParam); /* - * if chgParam of subnode is not null then plan will be re-scanned - * by first ExecProcNode. + * if chgParam of subnode is not null then plan will be re-scanned by + * first ExecProcNode. */ if (subnode->chgParam == NULL) { diff --git a/src/backend/executor/nodeBitmapAnd.c b/src/backend/executor/nodeBitmapAnd.c index 939062d4d6c..a9e63cbfccb 100644 --- a/src/backend/executor/nodeBitmapAnd.c +++ b/src/backend/executor/nodeBitmapAnd.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapAnd.c,v 1.3 2005/08/28 22:47:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapAnd.c,v 1.4 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -16,7 +16,7 @@ * ExecInitBitmapAnd - initialize the BitmapAnd node * MultiExecBitmapAnd - retrieve the result bitmap from the node * ExecEndBitmapAnd - shut down the BitmapAnd node - * ExecReScanBitmapAnd - rescan the BitmapAnd node + * ExecReScanBitmapAnd - rescan the BitmapAnd node * * NOTES * BitmapAnd nodes don't make use of their left and right @@ -137,7 +137,7 @@ MultiExecBitmapAnd(BitmapAndState *node) elog(ERROR, "unrecognized result from subplan"); if (result == NULL) - result = subresult; /* first subplan */ + result = subresult; /* first subplan */ else { tbm_intersect(result, subresult); @@ -145,11 +145,11 @@ MultiExecBitmapAnd(BitmapAndState *node) } /* - * If at any stage we have a completely empty bitmap, we can fall - * out without evaluating the remaining subplans, since ANDing them - * can no longer change the result. (Note: the fact that indxpath.c - * orders the subplans by selectivity should make this case more - * likely to occur.) + * If at any stage we have a completely empty bitmap, we can fall out + * without evaluating the remaining subplans, since ANDing them can no + * longer change the result. (Note: the fact that indxpath.c orders + * the subplans by selectivity should make this case more likely to + * occur.) */ if (tbm_is_empty(result)) break; @@ -160,7 +160,7 @@ MultiExecBitmapAnd(BitmapAndState *node) /* must provide our own instrumentation support */ if (node->ps.instrument) - InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */); + InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */ ); return (Node *) result; } diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index 3c3c1fd96f1..5d92c19ea5e 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -5,7 +5,7 @@ * * NOTE: it is critical that this plan type only be used with MVCC-compliant * snapshots (ie, regular snapshots, not SnapshotNow or one of the other - * special snapshots). The reason is that since index and heap scans are + * special snapshots). The reason is that since index and heap scans are * decoupled, there can be no assurance that the index tuple prompting a * visit to a particular heap TID still exists when the visit is made. * Therefore the tuple might not exist anymore either (which is OK because @@ -21,7 +21,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.3 2005/10/06 02:29:16 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.4 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -76,11 +76,11 @@ BitmapHeapNext(BitmapHeapScanState *node) tbmres = node->tbmres; /* - * Clear any reference to the previously returned tuple. The idea - * here is to not have the tuple slot be the last holder of a pin on - * that tuple's buffer; if it is, we'll need a separate visit to the - * bufmgr to release the buffer. By clearing here, we get to have the - * release done by ReleaseAndReadBuffer, below. + * Clear any reference to the previously returned tuple. The idea here is + * to not have the tuple slot be the last holder of a pin on that tuple's + * buffer; if it is, we'll need a separate visit to the bufmgr to release + * the buffer. By clearing here, we get to have the release done by + * ReleaseAndReadBuffer, below. */ ExecClearTuple(slot); @@ -105,7 +105,7 @@ BitmapHeapNext(BitmapHeapScanState *node) ResetExprContext(econtext); if (!ExecQual(node->bitmapqualorig, econtext, false)) - ExecClearTuple(slot); /* would not be returned by scan */ + ExecClearTuple(slot); /* would not be returned by scan */ /* Flag for the next call that no more tuples */ estate->es_evTupleNull[scanrelid - 1] = true; @@ -114,8 +114,8 @@ BitmapHeapNext(BitmapHeapScanState *node) } /* - * If we haven't yet performed the underlying index scan, do it, - * and prepare the bitmap to be iterated over. + * If we haven't yet performed the underlying index scan, do it, and + * prepare the bitmap to be iterated over. */ if (tbm == NULL) { @@ -145,10 +145,10 @@ BitmapHeapNext(BitmapHeapScanState *node) } /* - * Ignore any claimed entries past what we think is the end of - * the relation. (This is probably not necessary given that we - * got AccessShareLock before performing any of the indexscans, - * but let's be safe.) + * Ignore any claimed entries past what we think is the end of the + * relation. (This is probably not necessary given that we got + * AccessShareLock before performing any of the indexscans, but + * let's be safe.) */ if (tbmres->blockno >= scandesc->rs_nblocks) { @@ -157,19 +157,18 @@ BitmapHeapNext(BitmapHeapScanState *node) } /* - * Acquire pin on the current heap page. We'll hold the pin - * until done looking at the page. We trade in any pin we - * held before. + * Acquire pin on the current heap page. We'll hold the pin until + * done looking at the page. We trade in any pin we held before. */ scandesc->rs_cbuf = ReleaseAndReadBuffer(scandesc->rs_cbuf, scandesc->rs_rd, tbmres->blockno); /* - * Determine how many entries we need to look at on this page. - * If the bitmap is lossy then we need to look at each physical - * item pointer; otherwise we just look through the offsets - * listed in tbmres. + * Determine how many entries we need to look at on this page. If + * the bitmap is lossy then we need to look at each physical item + * pointer; otherwise we just look through the offsets listed in + * tbmres. */ if (tbmres->ntuples >= 0) { @@ -180,7 +179,7 @@ BitmapHeapNext(BitmapHeapScanState *node) else { /* lossy case */ - Page dp; + Page dp; LockBuffer(scandesc->rs_cbuf, BUFFER_LOCK_SHARE); dp = (Page) BufferGetPage(scandesc->rs_cbuf); @@ -230,8 +229,8 @@ BitmapHeapNext(BitmapHeapScanState *node) ItemPointerSet(&scandesc->rs_ctup.t_self, tbmres->blockno, targoffset); /* - * Fetch the heap tuple and see if it matches the snapshot. - * We use heap_release_fetch to avoid useless bufmgr traffic. + * Fetch the heap tuple and see if it matches the snapshot. We use + * heap_release_fetch to avoid useless bufmgr traffic. */ if (heap_release_fetch(scandesc->rs_rd, scandesc->rs_snapshot, @@ -241,8 +240,8 @@ BitmapHeapNext(BitmapHeapScanState *node) &scandesc->rs_pgstat_info)) { /* - * Set up the result slot to point to this tuple. - * Note that the slot acquires a pin on the buffer. + * Set up the result slot to point to this tuple. Note that the + * slot acquires a pin on the buffer. */ ExecStoreTuple(&scandesc->rs_ctup, slot, @@ -338,8 +337,8 @@ ExecBitmapHeapReScan(BitmapHeapScanState *node, ExprContext *exprCtxt) node->tbmres = NULL; /* - * Always rescan the input immediately, to ensure we can pass down - * any outer tuple that might be used in index quals. + * Always rescan the input immediately, to ensure we can pass down any + * outer tuple that might be used in index quals. */ ExecReScan(outerPlanState(node), exprCtxt); } @@ -391,9 +390,9 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node) * close the heap relation. * * Currently, we do not release the AccessShareLock acquired by - * ExecInitBitmapHeapScan. This lock should be held till end of - * transaction. (There is a faction that considers this too much - * locking, however.) + * ExecInitBitmapHeapScan. This lock should be held till end of + * transaction. (There is a faction that considers this too much locking, + * however.) */ heap_close(relation, NoLock); } @@ -470,9 +469,9 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate) scanstate->ss.ss_currentRelation = currentRelation; /* - * Even though we aren't going to do a conventional seqscan, it is - * useful to create a HeapScanDesc --- this checks the relation size - * and sets up statistical infrastructure for us. + * Even though we aren't going to do a conventional seqscan, it is useful + * to create a HeapScanDesc --- this checks the relation size and sets up + * statistical infrastructure for us. */ scanstate->ss.ss_currentScanDesc = heap_beginscan(currentRelation, estate->es_snapshot, @@ -482,7 +481,7 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate) /* * One problem is that heap_beginscan counts a "sequential scan" start, * when we actually aren't doing any such thing. Reverse out the added - * scan count. (Eventually we may want to count bitmap scans separately.) + * scan count. (Eventually we may want to count bitmap scans separately.) */ pgstat_discount_heap_scan(&scanstate->ss.ss_currentScanDesc->rs_pgstat_info); diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c index 231c35b9560..49b63170d49 100644 --- a/src/backend/executor/nodeBitmapIndexscan.c +++ b/src/backend/executor/nodeBitmapIndexscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.9 2005/05/06 17:24:54 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.10 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -54,17 +54,16 @@ MultiExecBitmapIndexScan(BitmapIndexScanState *node) scandesc = node->biss_ScanDesc; /* - * If we have runtime keys and they've not already been set up, do it - * now. + * If we have runtime keys and they've not already been set up, do it now. */ if (node->biss_RuntimeKeyInfo && !node->biss_RuntimeKeysReady) ExecReScan((PlanState *) node, NULL); /* * Prepare the result bitmap. Normally we just create a new one to pass - * back; however, our parent node is allowed to store a pre-made one - * into node->biss_result, in which case we just OR our tuple IDs into - * the existing bitmap. (This saves needing explicit UNION steps.) + * back; however, our parent node is allowed to store a pre-made one into + * node->biss_result, in which case we just OR our tuple IDs into the + * existing bitmap. (This saves needing explicit UNION steps.) */ if (node->biss_result) { @@ -82,7 +81,7 @@ MultiExecBitmapIndexScan(BitmapIndexScanState *node) */ for (;;) { - bool more = index_getmulti(scandesc, tids, MAX_TIDS, &ntids); + bool more = index_getmulti(scandesc, tids, MAX_TIDS, &ntids); if (ntids > 0) { @@ -116,8 +115,7 @@ ExecBitmapIndexReScan(BitmapIndexScanState *node, ExprContext *exprCtxt) ExprContext *econtext; ExprState **runtimeKeyInfo; - econtext = node->biss_RuntimeContext; /* context for runtime - * keys */ + econtext = node->biss_RuntimeContext; /* context for runtime keys */ runtimeKeyInfo = node->biss_RuntimeKeyInfo; if (econtext) @@ -130,16 +128,16 @@ ExecBitmapIndexReScan(BitmapIndexScanState *node, ExprContext *exprCtxt) econtext->ecxt_outertuple = exprCtxt->ecxt_outertuple; /* - * Reset the runtime-key context so we don't leak memory as each - * outer tuple is scanned. Note this assumes that we will - * recalculate *all* runtime keys on each call. + * Reset the runtime-key context so we don't leak memory as each outer + * tuple is scanned. Note this assumes that we will recalculate *all* + * runtime keys on each call. */ ResetExprContext(econtext); } /* - * If we are doing runtime key calculations (ie, the index keys depend - * on data from an outer scan), compute the new key values + * If we are doing runtime key calculations (ie, the index keys depend on + * data from an outer scan), compute the new key values */ if (runtimeKeyInfo) { @@ -213,8 +211,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate) /* * Miscellaneous initialization * - * We do not need a standard exprcontext for this node, though we may - * decide below to create a runtime-key exprcontext + * We do not need a standard exprcontext for this node, though we may decide + * below to create a runtime-key exprcontext */ /* @@ -252,10 +250,10 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate) indexstate->biss_NumScanKeys = numScanKeys; /* - * If we have runtime keys, we need an ExprContext to evaluate them. - * We could just create a "standard" plan node exprcontext, but to - * keep the code looking similar to nodeIndexscan.c, it seems better - * to stick with the approach of using a separate ExprContext. + * If we have runtime keys, we need an ExprContext to evaluate them. We + * could just create a "standard" plan node exprcontext, but to keep the + * code looking similar to nodeIndexscan.c, it seems better to stick with + * the approach of using a separate ExprContext. */ if (have_runtime_keys) { @@ -272,17 +270,17 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate) /* * We do not open or lock the base relation here. We assume that an - * ancestor BitmapHeapScan node is holding AccessShareLock on the - * heap relation throughout the execution of the plan tree. + * ancestor BitmapHeapScan node is holding AccessShareLock on the heap + * relation throughout the execution of the plan tree. */ indexstate->ss.ss_currentRelation = NULL; indexstate->ss.ss_currentScanDesc = NULL; /* - * open the index relation and initialize relation and scan - * descriptors. Note we acquire no locks here; the index machinery - * does its own locks and unlocks. + * open the index relation and initialize relation and scan descriptors. + * Note we acquire no locks here; the index machinery does its own locks + * and unlocks. */ indexstate->biss_RelationDesc = index_open(node->indexid); indexstate->biss_ScanDesc = diff --git a/src/backend/executor/nodeBitmapOr.c b/src/backend/executor/nodeBitmapOr.c index 9078855ec33..772b948cc52 100644 --- a/src/backend/executor/nodeBitmapOr.c +++ b/src/backend/executor/nodeBitmapOr.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapOr.c,v 1.2 2005/04/20 15:48:36 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapOr.c,v 1.3 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -133,13 +133,13 @@ MultiExecBitmapOr(BitmapOrState *node) TIDBitmap *subresult; /* - * We can special-case BitmapIndexScan children to avoid an - * explicit tbm_union step for each child: just pass down the - * current result bitmap and let the child OR directly into it. + * We can special-case BitmapIndexScan children to avoid an explicit + * tbm_union step for each child: just pass down the current result + * bitmap and let the child OR directly into it. */ if (IsA(subnode, BitmapIndexScanState)) { - if (result == NULL) /* first subplan */ + if (result == NULL) /* first subplan */ { /* XXX should we use less than work_mem for this? */ result = tbm_create(work_mem * 1024L); @@ -161,7 +161,7 @@ MultiExecBitmapOr(BitmapOrState *node) elog(ERROR, "unrecognized result from subplan"); if (result == NULL) - result = subresult; /* first subplan */ + result = subresult; /* first subplan */ else { tbm_union(result, subresult); @@ -176,7 +176,7 @@ MultiExecBitmapOr(BitmapOrState *node) /* must provide our own instrumentation support */ if (node->ps.instrument) - InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */); + InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */ ); return (Node *) result; } diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c index 5cd6de45fda..a0178e8fa17 100644 --- a/src/backend/executor/nodeFunctionscan.c +++ b/src/backend/executor/nodeFunctionscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.34 2005/05/22 22:30:19 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.35 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -60,9 +60,8 @@ FunctionNext(FunctionScanState *node) tuplestorestate = node->tuplestorestate; /* - * If first time through, read all tuples from function and put them - * in a tuplestore. Subsequent calls just fetch tuples from - * tuplestore. + * If first time through, read all tuples from function and put them in a + * tuplestore. Subsequent calls just fetch tuples from tuplestore. */ if (tuplestorestate == NULL) { @@ -77,10 +76,10 @@ FunctionNext(FunctionScanState *node) /* * If function provided a tupdesc, cross-check it. We only really - * need to do this for functions returning RECORD, but might as - * well do it always. + * need to do this for functions returning RECORD, but might as well + * do it always. */ - if (funcTupdesc) + if (funcTupdesc) tupledesc_match(node->tupdesc, funcTupdesc); } @@ -174,8 +173,8 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate) Assert(rte->rtekind == RTE_FUNCTION); /* - * Now determine if the function returns a simple or composite type, - * and build an appropriate tupdesc. + * Now determine if the function returns a simple or composite type, and + * build an appropriate tupdesc. */ functypclass = get_expr_result_type(rte->funcexpr, &funcrettype, @@ -213,8 +212,8 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate) /* * For RECORD results, make sure a typmod has been assigned. (The - * function should do this for itself, but let's cover things in case - * it doesn't.) + * function should do this for itself, but let's cover things in case it + * doesn't.) */ BlessTupleDesc(tupdesc); @@ -329,10 +328,10 @@ ExecFunctionReScan(FunctionScanState *node, ExprContext *exprCtxt) return; /* - * Here we have a choice whether to drop the tuplestore (and recompute - * the function outputs) or just rescan it. This should depend on - * whether the function expression contains parameters and/or is - * marked volatile. FIXME soon. + * Here we have a choice whether to drop the tuplestore (and recompute the + * function outputs) or just rescan it. This should depend on whether the + * function expression contains parameters and/or is marked volatile. + * FIXME soon. */ if (node->ss.ps.chgParam != NULL) { @@ -376,7 +375,7 @@ tupledesc_match(TupleDesc dst_tupdesc, TupleDesc src_tupdesc) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("function return row and query-specified return row do not match"), - errdetail("Returned type %s at ordinal position %d, but query expects %s.", + errdetail("Returned type %s at ordinal position %d, but query expects %s.", format_type_be(sattr->atttypid), i + 1, format_type_be(dattr->atttypid)))); diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c index e16a228fa15..91a08add4d9 100644 --- a/src/backend/executor/nodeGroup.c +++ b/src/backend/executor/nodeGroup.c @@ -15,7 +15,7 @@ * locate group boundaries. * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeGroup.c,v 1.61 2005/03/16 21:38:07 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeGroup.c,v 1.62 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -61,8 +61,8 @@ ExecGroup(GroupState *node) */ /* - * If first time through, acquire first input tuple and determine - * whether to return it or not. + * If first time through, acquire first input tuple and determine whether + * to return it or not. */ if (TupIsNull(firsttupleslot)) { @@ -76,15 +76,15 @@ ExecGroup(GroupState *node) /* Copy tuple, set up as input for qual test and projection */ ExecCopySlot(firsttupleslot, outerslot); econtext->ecxt_scantuple = firsttupleslot; + /* - * Check the qual (HAVING clause); if the group does not match, - * ignore it and fall into scan loop. + * Check the qual (HAVING clause); if the group does not match, ignore + * it and fall into scan loop. */ if (ExecQual(node->ss.ps.qual, econtext, false)) { /* - * Form and return a projection tuple using the first input - * tuple. + * Form and return a projection tuple using the first input tuple. */ return ExecProject(node->ss.ps.ps_ProjInfo, NULL); } @@ -92,8 +92,8 @@ ExecGroup(GroupState *node) /* * This loop iterates once per input tuple group. At the head of the - * loop, we have finished processing the first tuple of the group and - * now need to scan over all the other group members. + * loop, we have finished processing the first tuple of the group and now + * need to scan over all the other group members. */ for (;;) { @@ -120,22 +120,23 @@ ExecGroup(GroupState *node) econtext->ecxt_per_tuple_memory)) break; } + /* - * We have the first tuple of the next input group. See if we - * want to return it. + * We have the first tuple of the next input group. See if we want to + * return it. */ /* Copy tuple, set up as input for qual test and projection */ ExecCopySlot(firsttupleslot, outerslot); econtext->ecxt_scantuple = firsttupleslot; + /* - * Check the qual (HAVING clause); if the group does not match, - * ignore it and loop back to scan the rest of the group. + * Check the qual (HAVING clause); if the group does not match, ignore + * it and loop back to scan the rest of the group. */ if (ExecQual(node->ss.ps.qual, econtext, false)) { /* - * Form and return a projection tuple using the first input - * tuple. + * Form and return a projection tuple using the first input tuple. */ return ExecProject(node->ss.ps.ps_ProjInfo, NULL); } diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 5e2be394d86..8c51e785b28 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.95 2005/09/25 19:37:34 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.96 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -100,11 +100,11 @@ MultiExecHash(HashState *node) InstrStopNodeMulti(node->ps.instrument, hashtable->totalTuples); /* - * We do not return the hash table directly because it's not a subtype - * of Node, and so would violate the MultiExecProcNode API. Instead, - * our parent Hashjoin node is expected to know how to fish it out - * of our node state. Ugly but not really worth cleaning up, since - * Hashjoin knows quite a bit more about Hash besides that. + * We do not return the hash table directly because it's not a subtype of + * Node, and so would violate the MultiExecProcNode API. Instead, our + * parent Hashjoin node is expected to know how to fish it out of our node + * state. Ugly but not really worth cleaning up, since Hashjoin knows + * quite a bit more about Hash besides that. */ return NULL; } @@ -161,8 +161,8 @@ ExecInitHash(Hash *node, EState *estate) outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate); /* - * initialize tuple type. no need to initialize projection info - * because this node doesn't do projections + * initialize tuple type. no need to initialize projection info because + * this node doesn't do projections */ ExecAssignResultTypeFromOuterPlan(&hashstate->ps); hashstate->ps.ps_ProjInfo = NULL; @@ -221,9 +221,9 @@ ExecHashTableCreate(Hash *node, List *hashOperators) MemoryContext oldcxt; /* - * Get information about the size of the relation to be hashed (it's - * the "outer" subtree of this node, but the inner relation of the - * hashjoin). Compute the appropriate size of the hash table. + * Get information about the size of the relation to be hashed (it's the + * "outer" subtree of this node, but the inner relation of the hashjoin). + * Compute the appropriate size of the hash table. */ outerNode = outerPlan(node); @@ -237,8 +237,8 @@ ExecHashTableCreate(Hash *node, List *hashOperators) /* * Initialize the hash table control block. * - * The hashtable control block is just palloc'd from the executor's - * per-query memory context. + * The hashtable control block is just palloc'd from the executor's per-query + * memory context. */ hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData)); hashtable->nbuckets = nbuckets; @@ -273,8 +273,8 @@ ExecHashTableCreate(Hash *node, List *hashOperators) } /* - * Create temporary memory contexts in which to keep the hashtable - * working storage. See notes in executor/hashjoin.h. + * Create temporary memory contexts in which to keep the hashtable working + * storage. See notes in executor/hashjoin.h. */ hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext, "HashTableContext", @@ -353,9 +353,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, ntuples = 1000.0; /* - * Estimate tupsize based on footprint of tuple in hashtable... note - * this does not allow for any palloc overhead. The manipulations of - * spaceUsed don't count palloc overhead either. + * Estimate tupsize based on footprint of tuple in hashtable... note this + * does not allow for any palloc overhead. The manipulations of spaceUsed + * don't count palloc overhead either. */ tupsize = MAXALIGN(sizeof(HashJoinTupleData)) + MAXALIGN(sizeof(HeapTupleHeaderData)) + @@ -375,16 +375,16 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, if (inner_rel_bytes > hash_table_bytes) { /* We'll need multiple batches */ - long lbuckets; - double dbatch; - int minbatch; + long lbuckets; + double dbatch; + int minbatch; lbuckets = (hash_table_bytes / tupsize) / NTUP_PER_BUCKET; lbuckets = Min(lbuckets, INT_MAX); nbuckets = (int) lbuckets; dbatch = ceil(inner_rel_bytes / hash_table_bytes); - dbatch = Min(dbatch, INT_MAX/2); + dbatch = Min(dbatch, INT_MAX / 2); minbatch = (int) dbatch; nbatch = 2; while (nbatch < minbatch) @@ -393,7 +393,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, else { /* We expect the hashtable to fit in memory */ - double dbuckets; + double dbuckets; dbuckets = ceil(ntuples / NTUP_PER_BUCKET); dbuckets = Min(dbuckets, INT_MAX); @@ -406,8 +406,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, * We want nbuckets to be prime so as to avoid having bucket and batch * numbers depend on only some bits of the hash code. Choose the next * larger prime from the list in hprimes[]. (This also enforces that - * nbuckets is not very small, by the simple expedient of not putting - * any very small entries in hprimes[].) + * nbuckets is not very small, by the simple expedient of not putting any + * very small entries in hprimes[].) */ for (i = 0; i < (int) lengthof(hprimes); i++) { @@ -475,7 +475,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) return; /* safety check to avoid overflow */ - if (oldnbatch > INT_MAX/2) + if (oldnbatch > INT_MAX / 2) return; nbatch = oldnbatch * 2; @@ -514,8 +514,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) hashtable->nbatch = nbatch; /* - * Scan through the existing hash table entries and dump out any - * that are no longer of the current batch. + * Scan through the existing hash table entries and dump out any that are + * no longer of the current batch. */ ninmemory = nfreed = 0; @@ -571,12 +571,12 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) #endif /* - * If we dumped out either all or none of the tuples in the table, - * disable further expansion of nbatch. This situation implies that - * we have enough tuples of identical hashvalues to overflow spaceAllowed. - * Increasing nbatch will not fix it since there's no way to subdivide - * the group any more finely. - * We have to just gut it out and hope the server has enough RAM. + * If we dumped out either all or none of the tuples in the table, disable + * further expansion of nbatch. This situation implies that we have + * enough tuples of identical hashvalues to overflow spaceAllowed. + * Increasing nbatch will not fix it since there's no way to subdivide the + * group any more finely. We have to just gut it out and hope the server + * has enough RAM. */ if (nfreed == 0 || nfreed == ninmemory) { @@ -663,8 +663,8 @@ ExecHashGetHashValue(HashJoinTable hashtable, MemoryContext oldContext; /* - * We reset the eval context each time to reclaim any memory leaked in - * the hashkey expressions. + * We reset the eval context each time to reclaim any memory leaked in the + * hashkey expressions. */ ResetExprContext(econtext); @@ -727,8 +727,8 @@ ExecHashGetBucketAndBatch(HashJoinTable hashtable, int *bucketno, int *batchno) { - uint32 nbuckets = (uint32) hashtable->nbuckets; - uint32 nbatch = (uint32) hashtable->nbatch; + uint32 nbuckets = (uint32) hashtable->nbuckets; + uint32 nbatch = (uint32) hashtable->nbatch; if (nbatch > 1) { @@ -759,8 +759,8 @@ ExecScanHashBucket(HashJoinState *hjstate, uint32 hashvalue = hjstate->hj_CurHashValue; /* - * hj_CurTuple is NULL to start scanning a new bucket, or the address - * of the last tuple returned from the current bucket. + * hj_CurTuple is NULL to start scanning a new bucket, or the address of + * the last tuple returned from the current bucket. */ if (hashTuple == NULL) hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo]; @@ -812,8 +812,8 @@ ExecHashTableReset(HashJoinTable hashtable) int nbuckets = hashtable->nbuckets; /* - * Release all the hash buckets and tuples acquired in the prior pass, - * and reinitialize the context for a new pass. + * Release all the hash buckets and tuples acquired in the prior pass, and + * reinitialize the context for a new pass. */ MemoryContextReset(hashtable->batchCxt); oldcxt = MemoryContextSwitchTo(hashtable->batchCxt); diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index 4b0f9377ba8..9f002dde9cf 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.73 2005/09/25 19:37:34 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.74 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -24,8 +24,8 @@ static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState *outerNode, - HashJoinState *hjstate, - uint32 *hashvalue); + HashJoinState *hjstate, + uint32 *hashvalue); static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate, BufFile *file, uint32 *hashvalue, @@ -77,9 +77,9 @@ ExecHashJoin(HashJoinState *node) econtext = node->js.ps.ps_ExprContext; /* - * Check to see if we're still projecting out tuples from a previous - * join tuple (because there is a function-returning-set in the - * projection expressions). If so, try to project another one. + * Check to see if we're still projecting out tuples from a previous join + * tuple (because there is a function-returning-set in the projection + * expressions). If so, try to project another one. */ if (node->js.ps.ps_TupFromTlist) { @@ -93,17 +93,17 @@ ExecHashJoin(HashJoinState *node) } /* - * If we're doing an IN join, we want to return at most one row per - * outer tuple; so we can stop scanning the inner scan if we matched - * on the previous try. + * If we're doing an IN join, we want to return at most one row per outer + * tuple; so we can stop scanning the inner scan if we matched on the + * previous try. */ if (node->js.jointype == JOIN_IN && node->hj_MatchedOuter) node->hj_NeedNewOuter = true; /* * Reset per-tuple memory context to free any expression evaluation - * storage allocated in the previous tuple cycle. Note this can't - * happen until we're done projecting out tuples from a join tuple. + * storage allocated in the previous tuple cycle. Note this can't happen + * until we're done projecting out tuples from a join tuple. */ ResetExprContext(econtext); @@ -114,17 +114,17 @@ ExecHashJoin(HashJoinState *node) { /* * If the outer relation is completely empty, we can quit without - * building the hash table. However, for an inner join it is only - * a win to check this when the outer relation's startup cost is less - * than the projected cost of building the hash table. Otherwise - * it's best to build the hash table first and see if the inner - * relation is empty. (When it's an outer join, we should always - * make this check, since we aren't going to be able to skip the - * join on the strength of an empty inner relation anyway.) + * building the hash table. However, for an inner join it is only a + * win to check this when the outer relation's startup cost is less + * than the projected cost of building the hash table. Otherwise it's + * best to build the hash table first and see if the inner relation is + * empty. (When it's an outer join, we should always make this check, + * since we aren't going to be able to skip the join on the strength + * of an empty inner relation anyway.) * - * The only way to make the check is to try to fetch a tuple from - * the outer plan node. If we succeed, we have to stash it away - * for later consumption by ExecHashJoinOuterGetTuple. + * The only way to make the check is to try to fetch a tuple from the + * outer plan node. If we succeed, we have to stash it away for later + * consumption by ExecHashJoinOuterGetTuple. */ if (outerNode->plan->startup_cost < hashNode->ps.plan->total_cost || node->js.jointype == JOIN_LEFT) @@ -150,8 +150,8 @@ ExecHashJoin(HashJoinState *node) (void) MultiExecProcNode((PlanState *) hashNode); /* - * If the inner relation is completely empty, and we're not doing - * an outer join, we can quit without scanning the outer relation. + * If the inner relation is completely empty, and we're not doing an + * outer join, we can quit without scanning the outer relation. */ if (hashtable->totalTuples == 0 && node->js.jointype != JOIN_LEFT) { @@ -193,8 +193,8 @@ ExecHashJoin(HashJoinState *node) node->hj_MatchedOuter = false; /* - * now we have an outer tuple, find the corresponding bucket - * for this tuple from the hash table + * now we have an outer tuple, find the corresponding bucket for + * this tuple from the hash table */ node->hj_CurHashValue = hashvalue; ExecHashGetBucketAndBatch(hashtable, hashvalue, @@ -202,21 +202,21 @@ ExecHashJoin(HashJoinState *node) node->hj_CurTuple = NULL; /* - * Now we've got an outer tuple and the corresponding hash - * bucket, but this tuple may not belong to the current batch. + * Now we've got an outer tuple and the corresponding hash bucket, + * but this tuple may not belong to the current batch. */ if (batchno != hashtable->curbatch) { /* - * Need to postpone this outer tuple to a later batch. - * Save it in the corresponding outer-batch file. + * Need to postpone this outer tuple to a later batch. Save it + * in the corresponding outer-batch file. */ Assert(batchno > hashtable->curbatch); ExecHashJoinSaveTuple(ExecFetchSlotTuple(outerTupleSlot), hashvalue, &hashtable->outerBatchFile[batchno]); node->hj_NeedNewOuter = true; - continue; /* loop around for a new outer tuple */ + continue; /* loop around for a new outer tuple */ } } @@ -243,11 +243,11 @@ ExecHashJoin(HashJoinState *node) /* * if we pass the qual, then save state for next call and have - * ExecProject form the projection, store it in the tuple - * table, and return the slot. + * ExecProject form the projection, store it in the tuple table, + * and return the slot. * - * Only the joinquals determine MatchedOuter status, but all - * quals must pass to actually return the tuple. + * Only the joinquals determine MatchedOuter status, but all quals + * must pass to actually return the tuple. */ if (joinqual == NIL || ExecQual(joinqual, econtext, false)) { @@ -268,8 +268,7 @@ ExecHashJoin(HashJoinState *node) } /* - * If we didn't return a tuple, may need to set - * NeedNewOuter + * If we didn't return a tuple, may need to set NeedNewOuter */ if (node->js.jointype == JOIN_IN) { @@ -281,8 +280,8 @@ ExecHashJoin(HashJoinState *node) /* * Now the current outer tuple has run out of matches, so check - * whether to emit a dummy outer-join tuple. If not, loop around - * to get a new outer tuple. + * whether to emit a dummy outer-join tuple. If not, loop around to + * get a new outer tuple. */ node->hj_NeedNewOuter = true; @@ -290,19 +289,17 @@ ExecHashJoin(HashJoinState *node) node->js.jointype == JOIN_LEFT) { /* - * We are doing an outer join and there were no join matches - * for this outer tuple. Generate a fake join tuple with - * nulls for the inner tuple, and return it if it passes the - * non-join quals. + * We are doing an outer join and there were no join matches for + * this outer tuple. Generate a fake join tuple with nulls for + * the inner tuple, and return it if it passes the non-join quals. */ econtext->ecxt_innertuple = node->hj_NullInnerTupleSlot; if (ExecQual(otherqual, econtext, false)) { /* - * qualification was satisfied so we project and return - * the slot containing the result tuple using - * ExecProject(). + * qualification was satisfied so we project and return the + * slot containing the result tuple using ExecProject(). */ TupleTableSlot *result; @@ -392,7 +389,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate) case JOIN_LEFT: hjstate->hj_NullInnerTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(hjstate))); + ExecGetResultType(innerPlanState(hjstate))); break; default: elog(ERROR, "unrecognized join type: %d", @@ -400,11 +397,11 @@ ExecInitHashJoin(HashJoin *node, EState *estate) } /* - * now for some voodoo. our temporary tuple slot is actually the - * result tuple slot of the Hash node (which is our inner plan). we - * do this because Hash nodes don't return tuples via ExecProcNode() - * -- instead the hash join node uses ExecScanHashBucket() to get at - * the contents of the hash table. -cim 6/9/91 + * now for some voodoo. our temporary tuple slot is actually the result + * tuple slot of the Hash node (which is our inner plan). we do this + * because Hash nodes don't return tuples via ExecProcNode() -- instead + * the hash join node uses ExecScanHashBucket() to get at the contents of + * the hash table. -cim 6/9/91 */ { HashState *hashstate = (HashState *) innerPlanState(hjstate); @@ -434,10 +431,10 @@ ExecInitHashJoin(HashJoin *node, EState *estate) hjstate->hj_CurTuple = NULL; /* - * Deconstruct the hash clauses into outer and inner argument values, - * so that we can evaluate those subexpressions separately. Also make - * a list of the hash operator OIDs, in preparation for looking up the - * hash functions to use. + * Deconstruct the hash clauses into outer and inner argument values, so + * that we can evaluate those subexpressions separately. Also make a list + * of the hash operator OIDs, in preparation for looking up the hash + * functions to use. */ lclauses = NIL; rclauses = NIL; @@ -536,6 +533,7 @@ ExecHashJoinOuterGetTuple(PlanState *outerNode, if (curbatch == 0) { /* if it is the first pass */ + /* * Check to see if first outer tuple was already fetched by * ExecHashJoin() and not used yet. @@ -560,16 +558,16 @@ ExecHashJoinOuterGetTuple(PlanState *outerNode, } /* - * We have just reached the end of the first pass. Try to switch - * to a saved batch. + * We have just reached the end of the first pass. Try to switch to a + * saved batch. */ curbatch = ExecHashJoinNewBatch(hjstate); } /* - * Try to read from a temp file. Loop allows us to advance to new - * batches as needed. NOTE: nbatch could increase inside - * ExecHashJoinNewBatch, so don't try to optimize this loop. + * Try to read from a temp file. Loop allows us to advance to new batches + * as needed. NOTE: nbatch could increase inside ExecHashJoinNewBatch, so + * don't try to optimize this loop. */ while (curbatch < hashtable->nbatch) { @@ -623,16 +621,16 @@ start_over: * sides. We can sometimes skip over batches that are empty on only one * side, but there are exceptions: * - * 1. In a LEFT JOIN, we have to process outer batches even if the - * inner batch is empty. + * 1. In a LEFT JOIN, we have to process outer batches even if the inner + * batch is empty. * - * 2. If we have increased nbatch since the initial estimate, we have - * to scan inner batches since they might contain tuples that need to - * be reassigned to later inner batches. + * 2. If we have increased nbatch since the initial estimate, we have to scan + * inner batches since they might contain tuples that need to be + * reassigned to later inner batches. * - * 3. Similarly, if we have increased nbatch since starting the outer - * scan, we have to rescan outer batches in case they contain tuples - * that need to be reassigned. + * 3. Similarly, if we have increased nbatch since starting the outer scan, + * we have to rescan outer batches in case they contain tuples that need + * to be reassigned. */ curbatch++; while (curbatch < nbatch && @@ -676,7 +674,7 @@ start_over: if (BufFileSeek(innerFile, 0, 0L, SEEK_SET)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not rewind hash-join temporary file: %m"))); + errmsg("could not rewind hash-join temporary file: %m"))); while ((slot = ExecHashJoinGetSavedTuple(hjstate, innerFile, @@ -684,8 +682,8 @@ start_over: hjstate->hj_HashTupleSlot))) { /* - * NOTE: some tuples may be sent to future batches. Also, - * it is possible for hashtable->nbatch to be increased here! + * NOTE: some tuples may be sent to future batches. Also, it is + * possible for hashtable->nbatch to be increased here! */ ExecHashTableInsert(hashtable, ExecFetchSlotTuple(slot), @@ -733,7 +731,7 @@ void ExecHashJoinSaveTuple(HeapTuple heapTuple, uint32 hashvalue, BufFile **fileptr) { - BufFile *file = *fileptr; + BufFile *file = *fileptr; size_t written; if (file == NULL) @@ -764,7 +762,7 @@ ExecHashJoinSaveTuple(HeapTuple heapTuple, uint32 hashvalue, /* * ExecHashJoinGetSavedTuple - * read the next tuple from a batch file. Return NULL if no more. + * read the next tuple from a batch file. Return NULL if no more. * * On success, *hashvalue is set to the tuple's hash value, and the tuple * itself is stored in the given slot. @@ -809,18 +807,18 @@ void ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt) { /* - * If we haven't yet built the hash table then we can just return; - * nothing done yet, so nothing to undo. + * If we haven't yet built the hash table then we can just return; nothing + * done yet, so nothing to undo. */ if (node->hj_HashTable == NULL) return; /* - * In a multi-batch join, we currently have to do rescans the hard - * way, primarily because batch temp files may have already been - * released. But if it's a single-batch join, and there is no - * parameter change for the inner subnode, then we can just re-use the - * existing hash table without rebuilding it. + * In a multi-batch join, we currently have to do rescans the hard way, + * primarily because batch temp files may have already been released. But + * if it's a single-batch join, and there is no parameter change for the + * inner subnode, then we can just re-use the existing hash table without + * rebuilding it. */ if (node->hj_HashTable->nbatch == 1 && ((PlanState *) node)->righttree->chgParam == NULL) @@ -835,8 +833,8 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt) node->hj_FirstOuterTupleSlot = NULL; /* - * if chgParam of subnode is not null then plan will be re-scanned - * by first ExecProcNode. + * if chgParam of subnode is not null then plan will be re-scanned by + * first ExecProcNode. */ if (((PlanState *) node)->righttree->chgParam == NULL) ExecReScan(((PlanState *) node)->righttree, exprCtxt); diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 2a10ef39c0d..94ab2223c75 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.103 2005/05/06 17:24:54 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.104 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -75,11 +75,11 @@ IndexNext(IndexScanState *node) scanrelid = ((IndexScan *) node->ss.ps.plan)->scan.scanrelid; /* - * Clear any reference to the previously returned tuple. The idea - * here is to not have the tuple slot be the last holder of a pin on - * that tuple's buffer; if it is, we'll need a separate visit to the - * bufmgr to release the buffer. By clearing here, we get to have the - * release done by ReleaseAndReadBuffer inside index_getnext. + * Clear any reference to the previously returned tuple. The idea here is + * to not have the tuple slot be the last holder of a pin on that tuple's + * buffer; if it is, we'll need a separate visit to the bufmgr to release + * the buffer. By clearing here, we get to have the release done by + * ReleaseAndReadBuffer inside index_getnext. */ ExecClearTuple(slot); @@ -104,7 +104,7 @@ IndexNext(IndexScanState *node) ResetExprContext(econtext); if (!ExecQual(node->indexqualorig, econtext, false)) - ExecClearTuple(slot); /* would not be returned by scan */ + ExecClearTuple(slot); /* would not be returned by scan */ /* Flag for the next call that no more tuples */ estate->es_evTupleNull[scanrelid - 1] = true; @@ -118,22 +118,21 @@ IndexNext(IndexScanState *node) if ((tuple = index_getnext(scandesc, direction)) != NULL) { /* - * Store the scanned tuple in the scan tuple slot of the scan - * state. Note: we pass 'false' because tuples returned by - * amgetnext are pointers onto disk pages and must not be - * pfree()'d. + * Store the scanned tuple in the scan tuple slot of the scan state. + * Note: we pass 'false' because tuples returned by amgetnext are + * pointers onto disk pages and must not be pfree()'d. */ - ExecStoreTuple(tuple, /* tuple to store */ - slot, /* slot to store in */ - scandesc->xs_cbuf, /* buffer containing tuple */ - false); /* don't pfree */ + ExecStoreTuple(tuple, /* tuple to store */ + slot, /* slot to store in */ + scandesc->xs_cbuf, /* buffer containing tuple */ + false); /* don't pfree */ return slot; } /* - * if we get here it means the index scan failed so we are at the end - * of the scan.. + * if we get here it means the index scan failed so we are at the end of + * the scan.. */ return ExecClearTuple(slot); } @@ -146,8 +145,7 @@ TupleTableSlot * ExecIndexScan(IndexScanState *node) { /* - * If we have runtime keys and they've not already been set up, do it - * now. + * If we have runtime keys and they've not already been set up, do it now. */ if (node->iss_RuntimeKeyInfo && !node->iss_RuntimeKeysReady) ExecReScan((PlanState *) node, NULL); @@ -179,8 +177,7 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt) Index scanrelid; estate = node->ss.ps.state; - econtext = node->iss_RuntimeContext; /* context for runtime - * keys */ + econtext = node->iss_RuntimeContext; /* context for runtime keys */ scanKeys = node->iss_ScanKeys; runtimeKeyInfo = node->iss_RuntimeKeyInfo; numScanKeys = node->iss_NumScanKeys; @@ -203,16 +200,16 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt) } /* - * Reset the runtime-key context so we don't leak memory as each - * outer tuple is scanned. Note this assumes that we will - * recalculate *all* runtime keys on each call. + * Reset the runtime-key context so we don't leak memory as each outer + * tuple is scanned. Note this assumes that we will recalculate *all* + * runtime keys on each call. */ ResetExprContext(econtext); } /* - * If we are doing runtime key calculations (ie, the index keys depend - * on data from an outer scan), compute the new key values + * If we are doing runtime key calculations (ie, the index keys depend on + * data from an outer scan), compute the new key values */ if (runtimeKeyInfo) { @@ -251,16 +248,16 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext, for (j = 0; j < n_keys; j++) { /* - * If we have a run-time key, then extract the run-time - * expression and evaluate it with respect to the current - * outer tuple. We then stick the result into the scan key. + * If we have a run-time key, then extract the run-time expression and + * evaluate it with respect to the current outer tuple. We then stick + * the result into the scan key. * - * Note: the result of the eval could be a pass-by-ref value - * that's stored in the outer scan's tuple, not in - * econtext->ecxt_per_tuple_memory. We assume that the - * outer tuple will stay put throughout our scan. If this - * is wrong, we could copy the result into our context - * explicitly, but I think that's not necessary... + * Note: the result of the eval could be a pass-by-ref value that's + * stored in the outer scan's tuple, not in + * econtext->ecxt_per_tuple_memory. We assume that the outer tuple + * will stay put throughout our scan. If this is wrong, we could copy + * the result into our context explicitly, but I think that's not + * necessary... */ if (run_keys[j] != NULL) { @@ -323,9 +320,8 @@ ExecEndIndexScan(IndexScanState *node) * close the heap relation. * * Currently, we do not release the AccessShareLock acquired by - * ExecInitIndexScan. This lock should be held till end of - * transaction. (There is a faction that considers this too much - * locking, however.) + * ExecInitIndexScan. This lock should be held till end of transaction. + * (There is a faction that considers this too much locking, however.) */ heap_close(relation, NoLock); } @@ -392,11 +388,10 @@ ExecInitIndexScan(IndexScan *node, EState *estate) * initialize child expressions * * Note: we don't initialize all of the indexqual expression, only the - * sub-parts corresponding to runtime keys (see below). The - * indexqualorig expression is always initialized even though it will - * only be used in some uncommon cases --- would be nice to improve - * that. (Problem is that any SubPlans present in the expression must - * be found now...) + * sub-parts corresponding to runtime keys (see below). The indexqualorig + * expression is always initialized even though it will only be used in + * some uncommon cases --- would be nice to improve that. (Problem is + * that any SubPlans present in the expression must be found now...) */ indexstate->ss.ps.targetlist = (List *) ExecInitExpr((Expr *) node->scan.plan.targetlist, @@ -440,10 +435,10 @@ ExecInitIndexScan(IndexScan *node, EState *estate) indexstate->iss_NumScanKeys = numScanKeys; /* - * If we have runtime keys, we need an ExprContext to evaluate them. - * The node's standard context won't do because we want to reset that - * context for every tuple. So, build another context just like the - * other one... -tgl 7/11/00 + * If we have runtime keys, we need an ExprContext to evaluate them. The + * node's standard context won't do because we want to reset that context + * for every tuple. So, build another context just like the other one... + * -tgl 7/11/00 */ if (have_runtime_keys) { @@ -476,10 +471,10 @@ ExecInitIndexScan(IndexScan *node, EState *estate) ExecAssignScanType(&indexstate->ss, RelationGetDescr(currentRelation), false); /* - * open the index relation and initialize relation and scan - * descriptors. Note we acquire no locks here; the index machinery - * does its own locks and unlocks. (We rely on having AccessShareLock - * on the parent table to ensure the index won't go away!) + * open the index relation and initialize relation and scan descriptors. + * Note we acquire no locks here; the index machinery does its own locks + * and unlocks. (We rely on having AccessShareLock on the parent table to + * ensure the index won't go away!) */ indexstate->iss_RelationDesc = index_open(node->indexid); indexstate->iss_ScanDesc = index_beginscan(currentRelation, @@ -543,8 +538,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals, (ExprState **) palloc(n_keys * sizeof(ExprState *)); /* - * for each opclause in the given qual, convert each qual's - * opclause into a single scan key + * for each opclause in the given qual, convert each qual's opclause into + * a single scan key */ qual_cell = list_head(quals); strategy_cell = list_head(strategies); @@ -552,15 +547,15 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals, for (j = 0; j < n_keys; j++) { - OpExpr *clause; /* one clause of index qual */ - Expr *leftop; /* expr on lhs of operator */ - Expr *rightop; /* expr on rhs ... */ + OpExpr *clause; /* one clause of index qual */ + Expr *leftop; /* expr on lhs of operator */ + Expr *rightop; /* expr on rhs ... */ int flags = 0; - AttrNumber varattno; /* att number used in scan */ + AttrNumber varattno; /* att number used in scan */ StrategyNumber strategy; /* op's strategy number */ - Oid subtype; /* op's strategy subtype */ - RegProcedure opfuncid; /* operator proc id used in scan */ - Datum scanvalue; /* value used in scan (if const) */ + Oid subtype; /* op's strategy subtype */ + RegProcedure opfuncid; /* operator proc id used in scan */ + Datum scanvalue; /* value used in scan (if const) */ /* * extract clause information from the qualification @@ -578,18 +573,17 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals, opfuncid = clause->opfuncid; /* - * Here we figure out the contents of the index qual. The - * usual case is (var op const) which means we form a scan key - * for the attribute listed in the var node and use the value - * of the const as comparison data. + * Here we figure out the contents of the index qual. The usual case + * is (var op const) which means we form a scan key for the attribute + * listed in the var node and use the value of the const as comparison + * data. * - * If we don't have a const node, it means our scan key is a - * function of information obtained during the execution of - * the plan, in which case we need to recalculate the index - * scan key at run time. Hence, we set have_runtime_keys to - * true and place the appropriate subexpression in run_keys. - * The corresponding scan key values are recomputed at run - * time. + * If we don't have a const node, it means our scan key is a function of + * information obtained during the execution of the plan, in which + * case we need to recalculate the index scan key at run time. Hence, + * we set have_runtime_keys to true and place the appropriate + * subexpression in run_keys. The corresponding scan key values are + * recomputed at run time. */ run_keys[j] = NULL; @@ -622,8 +616,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals, if (IsA(rightop, Const)) { /* - * if the rightop is a const node then it means it - * identifies the value to place in our scan key. + * if the rightop is a const node then it means it identifies the + * value to place in our scan key. */ scanvalue = ((Const *) rightop)->constvalue; if (((Const *) rightop)->constisnull) @@ -632,9 +626,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals, else { /* - * otherwise, the rightop contains an expression evaluable - * at runtime to figure out the value to place in our scan - * key. + * otherwise, the rightop contains an expression evaluable at + * runtime to figure out the value to place in our scan key. */ have_runtime_keys = true; run_keys[j] = ExecInitExpr(rightop, planstate); @@ -646,11 +639,11 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals, */ ScanKeyEntryInitialize(&scan_keys[j], flags, - varattno, /* attribute number to scan */ - strategy, /* op's strategy */ - subtype, /* strategy subtype */ - opfuncid, /* reg proc to use */ - scanvalue); /* constant */ + varattno, /* attribute number to scan */ + strategy, /* op's strategy */ + subtype, /* strategy subtype */ + opfuncid, /* reg proc to use */ + scanvalue); /* constant */ } /* If no runtime keys, get rid of speculatively-allocated array */ diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c index 40e0283e86f..462db0aee9d 100644 --- a/src/backend/executor/nodeLimit.c +++ b/src/backend/executor/nodeLimit.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.21 2005/03/16 21:38:07 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.22 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -61,10 +61,9 @@ ExecLimit(LimitState *node) return NULL; /* - * First call for this scan, so compute limit/offset. (We - * can't do this any earlier, because parameters from upper - * nodes may not be set until now.) This also sets position = - * 0. + * First call for this scan, so compute limit/offset. (We can't do + * this any earlier, because parameters from upper nodes may not + * be set until now.) This also sets position = 0. */ recompute_limits(node); @@ -86,8 +85,8 @@ ExecLimit(LimitState *node) if (TupIsNull(slot)) { /* - * The subplan returns too few tuples for us to - * produce any output at all. + * The subplan returns too few tuples for us to produce + * any output at all. */ node->lstate = LIMIT_EMPTY; return NULL; @@ -115,11 +114,10 @@ ExecLimit(LimitState *node) if (ScanDirectionIsForward(direction)) { /* - * Forwards scan, so check for stepping off end of window. - * If we are at the end of the window, return NULL without - * advancing the subplan or the position variable; but - * change the state machine state to record having done - * so. + * Forwards scan, so check for stepping off end of window. If + * we are at the end of the window, return NULL without + * advancing the subplan or the position variable; but change + * the state machine state to record having done so. */ if (!node->noCount && node->position >= node->offset + node->count) @@ -143,9 +141,8 @@ ExecLimit(LimitState *node) else { /* - * Backwards scan, so check for stepping off start of - * window. As above, change only state-machine status if - * so. + * Backwards scan, so check for stepping off start of window. + * As above, change only state-machine status if so. */ if (node->position <= node->offset + 1) { @@ -169,9 +166,8 @@ ExecLimit(LimitState *node) return NULL; /* - * Backing up from subplan EOF, so re-fetch previous tuple; - * there should be one! Note previous tuple must be in - * window. + * Backing up from subplan EOF, so re-fetch previous tuple; there + * should be one! Note previous tuple must be in window. */ slot = ExecProcNode(outerPlan); if (TupIsNull(slot)) @@ -328,8 +324,8 @@ ExecInitLimit(Limit *node, EState *estate) outerPlanState(limitstate) = ExecInitNode(outerPlan, estate); /* - * limit nodes do no projections, so initialize projection info for - * this node appropriately + * limit nodes do no projections, so initialize projection info for this + * node appropriately */ ExecAssignResultTypeFromOuterPlan(&limitstate->ps); limitstate->ps.ps_ProjInfo = NULL; diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c index fe128595576..750f355b0ee 100644 --- a/src/backend/executor/nodeMaterial.c +++ b/src/backend/executor/nodeMaterial.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.49 2005/03/16 21:38:07 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.50 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -68,8 +68,8 @@ ExecMaterial(MaterialState *node) } /* - * If we are not at the end of the tuplestore, or are going backwards, - * try to fetch a tuple from tuplestore. + * If we are not at the end of the tuplestore, or are going backwards, try + * to fetch a tuple from tuplestore. */ eof_tuplestore = tuplestore_ateof(tuplestorestate); @@ -79,9 +79,9 @@ ExecMaterial(MaterialState *node) { /* * When reversing direction at tuplestore EOF, the first - * getheaptuple call will fetch the last-added tuple; but we - * want to return the one before that, if possible. So do an - * extra fetch. + * getheaptuple call will fetch the last-added tuple; but we want + * to return the one before that, if possible. So do an extra + * fetch. */ heapTuple = tuplestore_getheaptuple(tuplestorestate, forward, @@ -106,10 +106,10 @@ ExecMaterial(MaterialState *node) /* * If necessary, try to fetch another row from the subplan. * - * Note: the eof_underlying state variable exists to short-circuit - * further subplan calls. It's not optional, unfortunately, because - * some plan node types are not robust about being called again when - * they've already returned NULL. + * Note: the eof_underlying state variable exists to short-circuit further + * subplan calls. It's not optional, unfortunately, because some plan + * node types are not robust about being called again when they've already + * returned NULL. */ if (eof_tuplestore && !node->eof_underlying) { @@ -117,8 +117,8 @@ ExecMaterial(MaterialState *node) TupleTableSlot *outerslot; /* - * We can only get here with forward==true, so no need to worry - * about which direction the subplan will go. + * We can only get here with forward==true, so no need to worry about + * which direction the subplan will go. */ outerNode = outerPlanState(node); outerslot = ExecProcNode(outerNode); @@ -132,8 +132,8 @@ ExecMaterial(MaterialState *node) /* * Append returned tuple to tuplestore, too. NOTE: because the - * tuplestore is certainly in EOF state, its read position will - * move forward over the added tuple. This is what we want. + * tuplestore is certainly in EOF state, its read position will move + * forward over the added tuple. This is what we want. */ tuplestore_puttuple(tuplestorestate, (void *) heapTuple); } @@ -192,8 +192,8 @@ ExecInitMaterial(Material *node, EState *estate) outerPlanState(matstate) = ExecInitNode(outerPlan, estate); /* - * initialize tuple type. no need to initialize projection info - * because this node doesn't do projections. + * initialize tuple type. no need to initialize projection info because + * this node doesn't do projections. */ ExecAssignResultTypeFromOuterPlan(&matstate->ss.ps); ExecAssignScanTypeFromOuterPlan(&matstate->ss); @@ -284,9 +284,9 @@ void ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt) { /* - * If we haven't materialized yet, just return. If outerplan' chgParam - * is not NULL then it will be re-scanned by ExecProcNode, else - no - * reason to re-scan it at all. + * If we haven't materialized yet, just return. If outerplan' chgParam is + * not NULL then it will be re-scanned by ExecProcNode, else - no reason + * to re-scan it at all. */ if (!node->tuplestorestate) return; @@ -294,11 +294,11 @@ ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); /* - * If subnode is to be rescanned then we forget previous stored - * results; we have to re-read the subplan and re-store. + * If subnode is to be rescanned then we forget previous stored results; + * we have to re-read the subplan and re-store. * - * Otherwise we can just rewind and rescan the stored output. The state - * of the subnode does not change. + * Otherwise we can just rewind and rescan the stored output. The state of + * the subnode does not change. */ if (((PlanState *) node)->lefttree->chgParam != NULL) { diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index fb279e8b68e..0d4eed4c9ba 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.74 2005/05/15 21:19:55 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.75 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -40,7 +40,7 @@ * matching tuple and so on. * * Therefore, when initializing the merge-join node, we look up the - * associated sort operators. We assume the planner has seen to it + * associated sort operators. We assume the planner has seen to it * that the inputs are correctly sorted by these operators. Rather * than directly executing the merge join clauses, we evaluate the * left and right key expressions separately and then compare the @@ -124,30 +124,33 @@ typedef enum typedef struct MergeJoinClauseData { /* Executable expression trees */ - ExprState *lexpr; /* left-hand (outer) input expression */ - ExprState *rexpr; /* right-hand (inner) input expression */ + ExprState *lexpr; /* left-hand (outer) input expression */ + ExprState *rexpr; /* right-hand (inner) input expression */ + /* * If we have a current left or right input tuple, the values of the * expressions are loaded into these fields: */ - Datum ldatum; /* current left-hand value */ - Datum rdatum; /* current right-hand value */ - bool lisnull; /* and their isnull flags */ - bool risnull; + Datum ldatum; /* current left-hand value */ + Datum rdatum; /* current right-hand value */ + bool lisnull; /* and their isnull flags */ + bool risnull; + /* * Remember whether mergejoin operator is strict (usually it will be). - * NOTE: if it's not strict, we still assume it cannot return true for - * one null and one non-null input. + * NOTE: if it's not strict, we still assume it cannot return true for one + * null and one non-null input. */ - bool mergestrict; + bool mergestrict; + /* - * The comparison strategy in use, and the lookup info to let us call - * the needed comparison routines. eqfinfo is the "=" operator itself. + * The comparison strategy in use, and the lookup info to let us call the + * needed comparison routines. eqfinfo is the "=" operator itself. * cmpfinfo is either the btree comparator or the "<" operator. */ MergeFunctionKind cmpstrategy; - FmgrInfo eqfinfo; - FmgrInfo cmpfinfo; + FmgrInfo eqfinfo; + FmgrInfo cmpfinfo; } MergeJoinClauseData; @@ -167,8 +170,8 @@ typedef struct MergeJoinClauseData * * The best, most efficient way to compare two expressions is to use a btree * comparison support routine, since that requires only one function call - * per comparison. Hence we try to find a btree opclass that matches the - * mergejoinable operator. If we cannot find one, we'll have to call both + * per comparison. Hence we try to find a btree opclass that matches the + * mergejoinable operator. If we cannot find one, we'll have to call both * the "=" and (often) the "<" operator for each comparison. */ static MergeJoinClause @@ -204,8 +207,8 @@ MJExamineQuals(List *qualList, PlanState *parent) clause->rexpr = ExecInitExpr((Expr *) lsecond(qual->args), parent); /* - * Check permission to call the mergejoinable operator. - * For predictability, we check this even if we end up not using it. + * Check permission to call the mergejoinable operator. For + * predictability, we check this even if we end up not using it. */ aclresult = pg_proc_aclcheck(qual->opfuncid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) @@ -220,7 +223,7 @@ MJExamineQuals(List *qualList, PlanState *parent) /* * Lookup the comparison operators that go with the mergejoinable - * top-level operator. (This will elog if the operator isn't + * top-level operator. (This will elog if the operator isn't * mergejoinable, which would be the planner's mistake.) */ op_mergejoin_crossops(qual->opno, @@ -232,13 +235,12 @@ MJExamineQuals(List *qualList, PlanState *parent) clause->cmpstrategy = MERGEFUNC_LT; /* - * Look for a btree opclass including all three operators. - * This is much like SelectSortFunction except we insist on - * matching all the operators provided, and it can be a cross-type - * opclass. + * Look for a btree opclass including all three operators. This is + * much like SelectSortFunction except we insist on matching all the + * operators provided, and it can be a cross-type opclass. * - * XXX for now, insist on forward sort so that NULLs can be counted - * on to be high. + * XXX for now, insist on forward sort so that NULLs can be counted on to + * be high. */ catlist = SearchSysCacheList(AMOPOPID, 1, ObjectIdGetDatum(qual->opno), @@ -255,13 +257,13 @@ MJExamineQuals(List *qualList, PlanState *parent) if (!opclass_is_btree(opcid)) continue; if (get_op_opclass_strategy(ltop, opcid) == BTLessStrategyNumber && - get_op_opclass_strategy(gtop, opcid) == BTGreaterStrategyNumber) + get_op_opclass_strategy(gtop, opcid) == BTGreaterStrategyNumber) { clause->cmpstrategy = MERGEFUNC_CMP; ltproc = get_opclass_proc(opcid, aform->amopsubtype, BTORDER_PROC); Assert(RegProcedureIsValid(ltproc)); - break; /* done looking */ + break; /* done looking */ } } @@ -325,7 +327,7 @@ MJEvalOuterValues(MergeJoinState *mergestate) /* * MJEvalInnerValues * - * Same as above, but for the inner tuple. Here, we have to be prepared + * Same as above, but for the inner tuple. Here, we have to be prepared * to load data from either the true current inner, or the marked inner, * so caller must tell us which slot to load from. */ @@ -379,8 +381,8 @@ MJCompare(MergeJoinState *mergestate) FunctionCallInfoData fcinfo; /* - * Call the comparison functions in short-lived context, in case they - * leak memory. + * Call the comparison functions in short-lived context, in case they leak + * memory. */ ResetExprContext(econtext); @@ -394,11 +396,11 @@ MJCompare(MergeJoinState *mergestate) /* * Deal with null inputs. We treat NULL as sorting after non-NULL. * - * If both inputs are NULL, and the comparison function isn't - * strict, then we call it and check for a true result (this allows - * operators that behave like IS NOT DISTINCT to be mergejoinable). - * If the function is strict or returns false, we temporarily - * pretend NULL == NULL and contine checking remaining columns. + * If both inputs are NULL, and the comparison function isn't strict, + * then we call it and check for a true result (this allows operators + * that behave like IS NOT DISTINCT to be mergejoinable). If the + * function is strict or returns false, we temporarily pretend NULL == + * NULL and contine checking remaining columns. */ if (clause->lisnull) { @@ -477,7 +479,8 @@ MJCompare(MergeJoinState *mergestate) break; } } - else /* must be MERGEFUNC_CMP */ + else + /* must be MERGEFUNC_CMP */ { InitFunctionCallInfoData(fcinfo, &(clause->cmpfinfo), 2, NULL, NULL); @@ -512,10 +515,10 @@ MJCompare(MergeJoinState *mergestate) } /* - * If we had any null comparison results or NULL-vs-NULL inputs, - * we do not want to report that the tuples are equal. Instead, - * if result is still 0, change it to +1. This will result in - * advancing the inner side of the join. + * If we had any null comparison results or NULL-vs-NULL inputs, we do not + * want to report that the tuples are equal. Instead, if result is still + * 0, change it to +1. This will result in advancing the inner side of + * the join. */ if (nulleqnull && result == 0) result = 1; @@ -544,8 +547,8 @@ MJFillOuter(MergeJoinState *node) if (ExecQual(otherqual, econtext, false)) { /* - * qualification succeeded. now form the desired projection tuple - * and return the slot containing it. + * qualification succeeded. now form the desired projection tuple and + * return the slot containing it. */ TupleTableSlot *result; ExprDoneCond isDone; @@ -583,8 +586,8 @@ MJFillInner(MergeJoinState *node) if (ExecQual(otherqual, econtext, false)) { /* - * qualification succeeded. now form the desired projection tuple - * and return the slot containing it. + * qualification succeeded. now form the desired projection tuple and + * return the slot containing it. */ TupleTableSlot *result; ExprDoneCond isDone; @@ -696,9 +699,9 @@ ExecMergeJoin(MergeJoinState *node) doFillInner = node->mj_FillInner; /* - * Check to see if we're still projecting out tuples from a previous - * join tuple (because there is a function-returning-set in the - * projection expressions). If so, try to project another one. + * Check to see if we're still projecting out tuples from a previous join + * tuple (because there is a function-returning-set in the projection + * expressions). If so, try to project another one. */ if (node->js.ps.ps_TupFromTlist) { @@ -714,8 +717,8 @@ ExecMergeJoin(MergeJoinState *node) /* * Reset per-tuple memory context to free any expression evaluation - * storage allocated in the previous tuple cycle. Note this can't - * happen until we're done projecting out tuples from a join tuple. + * storage allocated in the previous tuple cycle. Note this can't happen + * until we're done projecting out tuples from a join tuple. */ ResetExprContext(econtext); @@ -733,10 +736,10 @@ ExecMergeJoin(MergeJoinState *node) { /* * EXEC_MJ_INITIALIZE_OUTER means that this is the first time - * ExecMergeJoin() has been called and so we have to fetch - * the first matchable tuple for both outer and inner subplans. - * We do the outer side in INITIALIZE_OUTER state, then - * advance to INITIALIZE_INNER state for the inner subplan. + * ExecMergeJoin() has been called and so we have to fetch the + * first matchable tuple for both outer and inner subplans. We + * do the outer side in INITIALIZE_OUTER state, then advance + * to INITIALIZE_INNER state for the inner subplan. */ case EXEC_MJ_INITIALIZE_OUTER: MJ_printf("ExecMergeJoin: EXEC_MJ_INITIALIZE_OUTER\n"); @@ -749,9 +752,9 @@ ExecMergeJoin(MergeJoinState *node) if (doFillInner) { /* - * Need to emit right-join tuples for remaining - * inner tuples. We set MatchedInner = true to - * force the ENDOUTER state to advance inner. + * Need to emit right-join tuples for remaining inner + * tuples. We set MatchedInner = true to force the + * ENDOUTER state to advance inner. */ node->mj_JoinState = EXEC_MJ_ENDOUTER; node->mj_MatchedInner = true; @@ -797,11 +800,10 @@ ExecMergeJoin(MergeJoinState *node) if (doFillOuter) { /* - * Need to emit left-join tuples for all outer - * tuples, including the one we just fetched. We - * set MatchedOuter = false to force the ENDINNER - * state to emit first tuple before advancing - * outer. + * Need to emit left-join tuples for all outer tuples, + * including the one we just fetched. We set + * MatchedOuter = false to force the ENDINNER state to + * emit first tuple before advancing outer. */ node->mj_JoinState = EXEC_MJ_ENDINNER; node->mj_MatchedOuter = false; @@ -840,9 +842,9 @@ ExecMergeJoin(MergeJoinState *node) break; /* - * EXEC_MJ_JOINTUPLES means we have two tuples which - * satisfied the merge clause so we join them and then - * proceed to get the next inner tuple (EXEC_MJ_NEXTINNER). + * EXEC_MJ_JOINTUPLES means we have two tuples which satisfied + * the merge clause so we join them and then proceed to get + * the next inner tuple (EXEC_MJ_NEXTINNER). */ case EXEC_MJ_JOINTUPLES: MJ_printf("ExecMergeJoin: EXEC_MJ_JOINTUPLES\n"); @@ -855,18 +857,18 @@ ExecMergeJoin(MergeJoinState *node) node->mj_JoinState = EXEC_MJ_NEXTINNER; /* - * Check the extra qual conditions to see if we actually - * want to return this join tuple. If not, can proceed - * with merge. We must distinguish the additional - * joinquals (which must pass to consider the tuples - * "matched" for outer-join logic) from the otherquals - * (which must pass before we actually return the tuple). + * Check the extra qual conditions to see if we actually want + * to return this join tuple. If not, can proceed with merge. + * We must distinguish the additional joinquals (which must + * pass to consider the tuples "matched" for outer-join logic) + * from the otherquals (which must pass before we actually + * return the tuple). * * We don't bother with a ResetExprContext here, on the - * assumption that we just did one while checking the - * merge qual. One per tuple should be sufficient. We - * do have to set up the econtext links to the tuples - * for ExecQual to use. + * assumption that we just did one while checking the merge + * qual. One per tuple should be sufficient. We do have to + * set up the econtext links to the tuples for ExecQual to + * use. */ outerTupleSlot = node->mj_OuterTupleSlot; econtext->ecxt_outertuple = outerTupleSlot; @@ -896,8 +898,7 @@ ExecMergeJoin(MergeJoinState *node) { /* * qualification succeeded. now form the desired - * projection tuple and return the slot containing - * it. + * projection tuple and return the slot containing it. */ TupleTableSlot *result; ExprDoneCond isDone; @@ -918,9 +919,9 @@ ExecMergeJoin(MergeJoinState *node) break; /* - * EXEC_MJ_NEXTINNER means advance the inner scan to the - * next tuple. If the tuple is not nil, we then proceed to - * test it against the join qualification. + * EXEC_MJ_NEXTINNER means advance the inner scan to the next + * tuple. If the tuple is not nil, we then proceed to test it + * against the join qualification. * * Before advancing, we check to see if we must emit an * outer-join fill tuple for this inner tuple. @@ -932,8 +933,7 @@ ExecMergeJoin(MergeJoinState *node) { /* * Generate a fake join tuple with nulls for the outer - * tuple, and return it if it passes the non-join - * quals. + * tuple, and return it if it passes the non-join quals. */ TupleTableSlot *result; @@ -945,12 +945,12 @@ ExecMergeJoin(MergeJoinState *node) } /* - * now we get the next inner tuple, if any. If there's - * none, advance to next outer tuple (which may be able - * to join to previously marked tuples). + * now we get the next inner tuple, if any. If there's none, + * advance to next outer tuple (which may be able to join to + * previously marked tuples). * - * If we find one but it cannot join to anything, stay - * in NEXTINNER state to fetch the next one. + * If we find one but it cannot join to anything, stay in + * NEXTINNER state to fetch the next one. */ innerTupleSlot = ExecProcNode(innerPlan); node->mj_InnerTupleSlot = innerTupleSlot; @@ -969,8 +969,8 @@ ExecMergeJoin(MergeJoinState *node) /* * Test the new inner tuple to see if it matches outer. * - * If they do match, then we join them and move on to the - * next inner tuple (EXEC_MJ_JOINTUPLES). + * If they do match, then we join them and move on to the next + * inner tuple (EXEC_MJ_JOINTUPLES). * * If they do not match then advance to next outer tuple. */ @@ -1013,8 +1013,7 @@ ExecMergeJoin(MergeJoinState *node) { /* * Generate a fake join tuple with nulls for the inner - * tuple, and return it if it passes the non-join - * quals. + * tuple, and return it if it passes the non-join quals. */ TupleTableSlot *result; @@ -1034,8 +1033,8 @@ ExecMergeJoin(MergeJoinState *node) node->mj_MatchedOuter = false; /* - * if the outer tuple is null then we are done with the - * join, unless we have inner tuples we need to null-fill. + * if the outer tuple is null then we are done with the join, + * unless we have inner tuples we need to null-fill. */ if (TupIsNull(outerTupleSlot)) { @@ -1044,8 +1043,8 @@ ExecMergeJoin(MergeJoinState *node) if (doFillInner && !TupIsNull(innerTupleSlot)) { /* - * Need to emit right-join tuples for remaining - * inner tuples. + * Need to emit right-join tuples for remaining inner + * tuples. */ node->mj_JoinState = EXEC_MJ_ENDOUTER; break; @@ -1118,26 +1117,25 @@ ExecMergeJoin(MergeJoinState *node) if (compareResult == 0) { /* - * the merge clause matched so now we restore the - * inner scan position to the first mark, and go join - * that tuple (and any following ones) to the new outer. + * the merge clause matched so now we restore the inner + * scan position to the first mark, and go join that tuple + * (and any following ones) to the new outer. * - * NOTE: we do not need to worry about the MatchedInner - * state for the rescanned inner tuples. We know all - * of them will match this new outer tuple and - * therefore won't be emitted as fill tuples. This - * works *only* because we require the extra joinquals - * to be nil when doing a right or full join --- - * otherwise some of the rescanned tuples might fail - * the extra joinquals. + * NOTE: we do not need to worry about the MatchedInner state + * for the rescanned inner tuples. We know all of them + * will match this new outer tuple and therefore won't be + * emitted as fill tuples. This works *only* because we + * require the extra joinquals to be nil when doing a + * right or full join --- otherwise some of the rescanned + * tuples might fail the extra joinquals. */ ExecRestrPos(innerPlan); /* * ExecRestrPos probably should give us back a new Slot, * but since it doesn't, use the marked slot. (The - * previously returned mj_InnerTupleSlot cannot be - * assumed to hold the required tuple.) + * previously returned mj_InnerTupleSlot cannot be assumed + * to hold the required tuple.) */ node->mj_InnerTupleSlot = innerTupleSlot; /* we need not do MJEvalInnerValues again */ @@ -1159,7 +1157,7 @@ ExecMergeJoin(MergeJoinState *node) * which means that all subsequent outer tuples will be * larger than our marked inner tuples. So we need not * revisit any of the marked tuples but can proceed to - * look for a match to the current inner. If there's + * look for a match to the current inner. If there's * no more inners, we are done. * ---------------- */ @@ -1222,8 +1220,8 @@ ExecMergeJoin(MergeJoinState *node) /* * before we advance, make sure the current tuples do not - * satisfy the mergeclauses. If they do, then we update - * the marked tuple position and go join them. + * satisfy the mergeclauses. If they do, then we update the + * marked tuple position and go join them. */ compareResult = MJCompare(node); MJ_DEBUG_COMPARE(compareResult); @@ -1238,7 +1236,8 @@ ExecMergeJoin(MergeJoinState *node) } else if (compareResult < 0) node->mj_JoinState = EXEC_MJ_SKIPOUTER_ADVANCE; - else /* compareResult > 0 */ + else + /* compareResult > 0 */ node->mj_JoinState = EXEC_MJ_SKIPINNER_ADVANCE; break; @@ -1253,8 +1252,7 @@ ExecMergeJoin(MergeJoinState *node) { /* * Generate a fake join tuple with nulls for the inner - * tuple, and return it if it passes the non-join - * quals. + * tuple, and return it if it passes the non-join quals. */ TupleTableSlot *result; @@ -1274,8 +1272,8 @@ ExecMergeJoin(MergeJoinState *node) node->mj_MatchedOuter = false; /* - * if the outer tuple is null then we are done with the - * join, unless we have inner tuples we need to null-fill. + * if the outer tuple is null then we are done with the join, + * unless we have inner tuples we need to null-fill. */ if (TupIsNull(outerTupleSlot)) { @@ -1284,8 +1282,8 @@ ExecMergeJoin(MergeJoinState *node) if (doFillInner && !TupIsNull(innerTupleSlot)) { /* - * Need to emit right-join tuples for remaining - * inner tuples. + * Need to emit right-join tuples for remaining inner + * tuples. */ node->mj_JoinState = EXEC_MJ_ENDOUTER; break; @@ -1317,8 +1315,7 @@ ExecMergeJoin(MergeJoinState *node) { /* * Generate a fake join tuple with nulls for the outer - * tuple, and return it if it passes the non-join - * quals. + * tuple, and return it if it passes the non-join quals. */ TupleTableSlot *result; @@ -1338,8 +1335,8 @@ ExecMergeJoin(MergeJoinState *node) node->mj_MatchedInner = false; /* - * if the inner tuple is null then we are done with the - * join, unless we have outer tuples we need to null-fill. + * if the inner tuple is null then we are done with the join, + * unless we have outer tuples we need to null-fill. */ if (TupIsNull(innerTupleSlot)) { @@ -1348,8 +1345,8 @@ ExecMergeJoin(MergeJoinState *node) if (doFillOuter && !TupIsNull(outerTupleSlot)) { /* - * Need to emit left-join tuples for remaining - * outer tuples. + * Need to emit left-join tuples for remaining outer + * tuples. */ node->mj_JoinState = EXEC_MJ_ENDINNER; break; @@ -1371,9 +1368,9 @@ ExecMergeJoin(MergeJoinState *node) break; /* - * EXEC_MJ_ENDOUTER means we have run out of outer tuples, - * but are doing a right/full join and therefore must - * null-fill any remaing unmatched inner tuples. + * EXEC_MJ_ENDOUTER means we have run out of outer tuples, but + * are doing a right/full join and therefore must null-fill + * any remaing unmatched inner tuples. */ case EXEC_MJ_ENDOUTER: MJ_printf("ExecMergeJoin: EXEC_MJ_ENDOUTER\n"); @@ -1384,8 +1381,7 @@ ExecMergeJoin(MergeJoinState *node) { /* * Generate a fake join tuple with nulls for the outer - * tuple, and return it if it passes the non-join - * quals. + * tuple, and return it if it passes the non-join quals. */ TupleTableSlot *result; @@ -1414,9 +1410,9 @@ ExecMergeJoin(MergeJoinState *node) break; /* - * EXEC_MJ_ENDINNER means we have run out of inner tuples, - * but are doing a left/full join and therefore must null- - * fill any remaing unmatched outer tuples. + * EXEC_MJ_ENDINNER means we have run out of inner tuples, but + * are doing a left/full join and therefore must null- fill + * any remaing unmatched outer tuples. */ case EXEC_MJ_ENDINNER: MJ_printf("ExecMergeJoin: EXEC_MJ_ENDINNER\n"); @@ -1427,8 +1423,7 @@ ExecMergeJoin(MergeJoinState *node) { /* * Generate a fake join tuple with nulls for the inner - * tuple, and return it if it passes the non-join - * quals. + * tuple, and return it if it passes the non-join quals. */ TupleTableSlot *result; @@ -1493,10 +1488,9 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate) ExecAssignExprContext(estate, &mergestate->js.ps); /* - * we need two additional econtexts in which we can compute the - * join expressions from the left and right input tuples. The - * node's regular econtext won't do because it gets reset too - * often. + * we need two additional econtexts in which we can compute the join + * expressions from the left and right input tuples. The node's regular + * econtext won't do because it gets reset too often. */ mergestate->mj_OuterEContext = CreateExprContext(estate); mergestate->mj_InnerEContext = CreateExprContext(estate); @@ -1546,18 +1540,18 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate) mergestate->mj_FillInner = false; mergestate->mj_NullInnerTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(mergestate))); + ExecGetResultType(innerPlanState(mergestate))); break; case JOIN_RIGHT: mergestate->mj_FillOuter = false; mergestate->mj_FillInner = true; mergestate->mj_NullOuterTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(outerPlanState(mergestate))); + ExecGetResultType(outerPlanState(mergestate))); /* - * Can't handle right or full join with non-nil extra - * joinclauses. This should have been caught by planner. + * Can't handle right or full join with non-nil extra joinclauses. + * This should have been caught by planner. */ if (node->join.joinqual != NIL) ereport(ERROR, @@ -1569,14 +1563,13 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate) mergestate->mj_FillInner = true; mergestate->mj_NullOuterTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(outerPlanState(mergestate))); + ExecGetResultType(outerPlanState(mergestate))); mergestate->mj_NullInnerTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(mergestate))); + ExecGetResultType(innerPlanState(mergestate))); /* - * Can't handle right or full join with non-nil extra - * joinclauses. + * Can't handle right or full join with non-nil extra joinclauses. */ if (node->join.joinqual != NIL) ereport(ERROR, @@ -1675,8 +1668,8 @@ ExecReScanMergeJoin(MergeJoinState *node, ExprContext *exprCtxt) node->mj_InnerTupleSlot = NULL; /* - * if chgParam of subnodes is not null then plans will be re-scanned - * by first ExecProcNode. + * if chgParam of subnodes is not null then plans will be re-scanned by + * first ExecProcNode. */ if (((PlanState *) node)->lefttree->chgParam == NULL) ExecReScan(((PlanState *) node)->lefttree, exprCtxt); diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c index 8b48ceefd19..a497e9ac337 100644 --- a/src/backend/executor/nodeNestloop.c +++ b/src/backend/executor/nodeNestloop.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.38 2004/12/31 21:59:45 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.39 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -85,9 +85,9 @@ ExecNestLoop(NestLoopState *node) econtext->ecxt_outertuple = outerTupleSlot; /* - * Check to see if we're still projecting out tuples from a previous - * join tuple (because there is a function-returning-set in the - * projection expressions). If so, try to project another one. + * Check to see if we're still projecting out tuples from a previous join + * tuple (because there is a function-returning-set in the projection + * expressions). If so, try to project another one. */ if (node->js.ps.ps_TupFromTlist) { @@ -102,9 +102,9 @@ ExecNestLoop(NestLoopState *node) } /* - * If we're doing an IN join, we want to return at most one row per - * outer tuple; so we can stop scanning the inner scan if we matched - * on the previous try. + * If we're doing an IN join, we want to return at most one row per outer + * tuple; so we can stop scanning the inner scan if we matched on the + * previous try. */ if (node->js.jointype == JOIN_IN && node->nl_MatchedOuter) @@ -112,8 +112,8 @@ ExecNestLoop(NestLoopState *node) /* * Reset per-tuple memory context to free any expression evaluation - * storage allocated in the previous tuple cycle. Note this can't - * happen until we're done projecting out tuples from a join tuple. + * storage allocated in the previous tuple cycle. Note this can't happen + * until we're done projecting out tuples from a join tuple. */ ResetExprContext(econtext); @@ -135,8 +135,7 @@ ExecNestLoop(NestLoopState *node) outerTupleSlot = ExecProcNode(outerPlan); /* - * if there are no more outer tuples, then the join is - * complete.. + * if there are no more outer tuples, then the join is complete.. */ if (TupIsNull(outerTupleSlot)) { @@ -157,8 +156,8 @@ ExecNestLoop(NestLoopState *node) /* * The scan key of the inner plan might depend on the current - * outer tuple (e.g. in index scans), that's why we pass our - * expr context. + * outer tuple (e.g. in index scans), that's why we pass our expr + * context. */ ExecReScan(innerPlan, econtext); } @@ -181,10 +180,10 @@ ExecNestLoop(NestLoopState *node) node->js.jointype == JOIN_LEFT) { /* - * We are doing an outer join and there were no join - * matches for this outer tuple. Generate a fake join - * tuple with nulls for the inner tuple, and return it if - * it passes the non-join quals. + * We are doing an outer join and there were no join matches + * for this outer tuple. Generate a fake join tuple with + * nulls for the inner tuple, and return it if it passes the + * non-join quals. */ econtext->ecxt_innertuple = node->nl_NullInnerTupleSlot; @@ -193,8 +192,8 @@ ExecNestLoop(NestLoopState *node) if (ExecQual(otherqual, econtext, false)) { /* - * qualification was satisfied so we project and - * return the slot containing the result tuple using + * qualification was satisfied so we project and return + * the slot containing the result tuple using * ExecProject(). */ TupleTableSlot *result; @@ -220,12 +219,12 @@ ExecNestLoop(NestLoopState *node) } /* - * at this point we have a new pair of inner and outer tuples so - * we test the inner and outer tuples to see if they satisfy the - * node's qualification. + * at this point we have a new pair of inner and outer tuples so we + * test the inner and outer tuples to see if they satisfy the node's + * qualification. * - * Only the joinquals determine MatchedOuter status, but all quals - * must pass to actually return the tuple. + * Only the joinquals determine MatchedOuter status, but all quals must + * pass to actually return the tuple. */ ENL1_printf("testing qualification"); @@ -236,9 +235,8 @@ ExecNestLoop(NestLoopState *node) if (otherqual == NIL || ExecQual(otherqual, econtext, false)) { /* - * qualification was satisfied so we project and return - * the slot containing the result tuple using - * ExecProject(). + * qualification was satisfied so we project and return the + * slot containing the result tuple using ExecProject(). */ TupleTableSlot *result; ExprDoneCond isDone; @@ -330,7 +328,7 @@ ExecInitNestLoop(NestLoop *node, EState *estate) case JOIN_LEFT: nlstate->nl_NullInnerTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(nlstate))); + ExecGetResultType(innerPlanState(nlstate))); break; default: elog(ERROR, "unrecognized join type: %d", @@ -408,10 +406,9 @@ ExecReScanNestLoop(NestLoopState *node, ExprContext *exprCtxt) /* * If outerPlan->chgParam is not null then plan will be automatically - * re-scanned by first ExecProcNode. innerPlan is re-scanned for each - * new outer tuple and MUST NOT be re-scanned from here or you'll get - * troubles from inner index scans when outer Vars are used as - * run-time keys... + * re-scanned by first ExecProcNode. innerPlan is re-scanned for each new + * outer tuple and MUST NOT be re-scanned from here or you'll get troubles + * from inner index scans when outer Vars are used as run-time keys... */ if (outerPlan->chgParam == NULL) ExecReScan(outerPlan, exprCtxt); diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c index 7c77dc07121..013c4e99794 100644 --- a/src/backend/executor/nodeResult.c +++ b/src/backend/executor/nodeResult.c @@ -38,7 +38,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeResult.c,v 1.31 2005/04/24 15:32:07 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeResult.c,v 1.32 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -92,9 +92,9 @@ ExecResult(ResultState *node) } /* - * Check to see if we're still projecting out tuples from a previous - * scan tuple (because there is a function-returning-set in the - * projection expressions). If so, try to project another one. + * Check to see if we're still projecting out tuples from a previous scan + * tuple (because there is a function-returning-set in the projection + * expressions). If so, try to project another one. */ if (node->ps.ps_TupFromTlist) { @@ -107,16 +107,16 @@ ExecResult(ResultState *node) /* * Reset per-tuple memory context to free any expression evaluation - * storage allocated in the previous tuple cycle. Note this can't - * happen until we're done projecting out tuples from a scan tuple. + * storage allocated in the previous tuple cycle. Note this can't happen + * until we're done projecting out tuples from a scan tuple. */ ResetExprContext(econtext); /* * if rs_done is true then it means that we were asked to return a * constant tuple and we already did the last time ExecResult() was - * called, OR that we failed the constant qual check. Either way, now - * we are through. + * called, OR that we failed the constant qual check. Either way, now we + * are through. */ while (!node->rs_done) { @@ -125,8 +125,7 @@ ExecResult(ResultState *node) if (outerPlan != NULL) { /* - * retrieve tuples from the outer plan until there are no - * more. + * retrieve tuples from the outer plan until there are no more. */ outerTupleSlot = ExecProcNode(outerPlan); @@ -136,8 +135,7 @@ ExecResult(ResultState *node) node->ps.ps_OuterTupleSlot = outerTupleSlot; /* - * XXX gross hack. use outer tuple as scan tuple for - * projection + * XXX gross hack. use outer tuple as scan tuple for projection */ econtext->ecxt_outertuple = outerTupleSlot; econtext->ecxt_scantuple = outerTupleSlot; @@ -145,16 +143,16 @@ ExecResult(ResultState *node) else { /* - * if we don't have an outer plan, then we are just generating - * the results from a constant target list. Do it only once. + * if we don't have an outer plan, then we are just generating the + * results from a constant target list. Do it only once. */ node->rs_done = true; } /* - * form the result tuple using ExecProject(), and return it --- - * unless the projection produces an empty set, in which case we - * must loop back to see if there are more outerPlan tuples. + * form the result tuple using ExecProject(), and return it --- unless + * the projection produces an empty set, in which case we must loop + * back to see if there are more outerPlan tuples. */ resultSlot = ExecProject(node->ps.ps_ProjInfo, &isDone); diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index fab526f399c..91e0c81e036 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.53 2005/05/15 21:19:55 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.54 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,11 +62,11 @@ SeqNext(SeqScanState *node) slot = node->ss_ScanTupleSlot; /* - * Clear any reference to the previously returned tuple. The idea - * here is to not have the tuple slot be the last holder of a pin on - * that tuple's buffer; if it is, we'll need a separate visit to the - * bufmgr to release the buffer. By clearing here, we get to have the - * release done by ReleaseAndReadBuffer inside heap_getnext. + * Clear any reference to the previously returned tuple. The idea here is + * to not have the tuple slot be the last holder of a pin on that tuple's + * buffer; if it is, we'll need a separate visit to the bufmgr to release + * the buffer. By clearing here, we get to have the release done by + * ReleaseAndReadBuffer inside heap_getnext. */ ExecClearTuple(slot); @@ -87,8 +87,8 @@ SeqNext(SeqScanState *node) /* * Note that unlike IndexScan, SeqScan never use keys in - * heap_beginscan (and this is very bad) - so, here we do not - * check are keys ok or not. + * heap_beginscan (and this is very bad) - so, here we do not check + * are keys ok or not. */ /* Flag for the next call that no more tuples */ @@ -102,20 +102,19 @@ SeqNext(SeqScanState *node) tuple = heap_getnext(scandesc, direction); /* - * save the tuple and the buffer returned to us by the access methods - * in our scan tuple slot and return the slot. Note: we pass 'false' - * because tuples returned by heap_getnext() are pointers onto disk - * pages and were not created with palloc() and so should not be - * pfree()'d. Note also that ExecStoreTuple will increment the - * refcount of the buffer; the refcount will not be dropped until the - * tuple table slot is cleared. + * save the tuple and the buffer returned to us by the access methods in + * our scan tuple slot and return the slot. Note: we pass 'false' because + * tuples returned by heap_getnext() are pointers onto disk pages and were + * not created with palloc() and so should not be pfree()'d. Note also + * that ExecStoreTuple will increment the refcount of the buffer; the + * refcount will not be dropped until the tuple table slot is cleared. */ if (tuple) - ExecStoreTuple(tuple, /* tuple to store */ - slot, /* slot to store in */ - scandesc->rs_cbuf, /* buffer associated with - * this tuple */ - false); /* don't pfree this pointer */ + ExecStoreTuple(tuple, /* tuple to store */ + slot, /* slot to store in */ + scandesc->rs_cbuf, /* buffer associated with this + * tuple */ + false); /* don't pfree this pointer */ return slot; } @@ -157,8 +156,8 @@ InitScanRelation(SeqScanState *node, EState *estate) HeapScanDesc currentScanDesc; /* - * get the relation object id from the relid'th entry in the range - * table, open that relation and initialize the scan state. + * get the relation object id from the relid'th entry in the range table, + * open that relation and initialize the scan state. * * We acquire AccessShareLock for the duration of the scan. */ @@ -191,8 +190,8 @@ ExecInitSeqScan(SeqScan *node, EState *estate) SeqScanState *scanstate; /* - * Once upon a time it was possible to have an outerPlan of a SeqScan, - * but not any more. + * Once upon a time it was possible to have an outerPlan of a SeqScan, but + * not any more. */ Assert(outerPlan(node) == NULL); Assert(innerPlan(node) == NULL); @@ -291,9 +290,8 @@ ExecEndSeqScan(SeqScanState *node) * close the heap relation. * * Currently, we do not release the AccessShareLock acquired by - * InitScanRelation. This lock should be held till end of - * transaction. (There is a faction that considers this too much - * locking, however.) + * InitScanRelation. This lock should be held till end of transaction. + * (There is a faction that considers this too much locking, however.) */ heap_close(relation, NoLock); } @@ -359,10 +357,10 @@ ExecSeqRestrPos(SeqScanState *node) HeapScanDesc scan = node->ss_currentScanDesc; /* - * Clear any reference to the previously returned tuple. This is - * needed because the slot is simply pointing at scan->rs_cbuf, which - * heap_restrpos will change; we'd have an internally inconsistent - * slot if we didn't do this. + * Clear any reference to the previously returned tuple. This is needed + * because the slot is simply pointing at scan->rs_cbuf, which + * heap_restrpos will change; we'd have an internally inconsistent slot if + * we didn't do this. */ ExecClearTuple(node->ss_ScanTupleSlot); diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index 6daadfd0b8c..a5ca58354c6 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -21,7 +21,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeSetOp.c,v 1.17 2005/05/06 17:24:54 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeSetOp.c,v 1.18 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -58,8 +58,8 @@ ExecSetOp(SetOpState *node) resultTupleSlot = node->ps.ps_ResultTupleSlot; /* - * If the previously-returned tuple needs to be returned more than - * once, keep returning it. + * If the previously-returned tuple needs to be returned more than once, + * keep returning it. */ if (node->numOutput > 0) { @@ -71,9 +71,9 @@ ExecSetOp(SetOpState *node) ExecClearTuple(resultTupleSlot); /* - * Absorb groups of duplicate tuples, counting them, and saving the - * first of each group as a possible return value. At the end of each - * group, decide whether to return anything. + * Absorb groups of duplicate tuples, counting them, and saving the first + * of each group as a possible return value. At the end of each group, + * decide whether to return anything. * * We assume that the tuples arrive in sorted order so we can detect * duplicates easily. @@ -177,8 +177,8 @@ ExecSetOp(SetOpState *node) else { /* - * Current tuple is member of same group as resultTuple. Count - * it in the appropriate counter. + * Current tuple is member of same group as resultTuple. Count it + * in the appropriate counter. */ int flag; bool isNull; @@ -232,8 +232,8 @@ ExecInitSetOp(SetOp *node, EState *estate) * Miscellaneous initialization * * SetOp nodes have no ExprContext initialization because they never call - * ExecQual or ExecProject. But they do need a per-tuple memory - * context anyway for calling execTuplesMatch. + * ExecQual or ExecProject. But they do need a per-tuple memory context + * anyway for calling execTuplesMatch. */ setopstate->tempContext = AllocSetContextCreate(CurrentMemoryContext, @@ -255,8 +255,8 @@ ExecInitSetOp(SetOp *node, EState *estate) outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate); /* - * setop nodes do no projections, so initialize projection info for - * this node appropriately + * setop nodes do no projections, so initialize projection info for this + * node appropriately */ ExecAssignResultTypeFromOuterPlan(&setopstate->ps); setopstate->ps.ps_ProjInfo = NULL; diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c index ef025374149..d3e4fb5e0b3 100644 --- a/src/backend/executor/nodeSort.c +++ b/src/backend/executor/nodeSort.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.50 2005/03/16 21:38:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.51 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -56,9 +56,8 @@ ExecSort(SortState *node) tuplesortstate = (Tuplesortstate *) node->tuplesortstate; /* - * If first time through, read all tuples from outer plan and pass - * them to tuplesort.c. Subsequent calls just fetch tuples from - * tuplesort. + * If first time through, read all tuples from outer plan and pass them to + * tuplesort.c. Subsequent calls just fetch tuples from tuplesort. */ if (!node->sort_Done) @@ -71,8 +70,8 @@ ExecSort(SortState *node) "sorting subplan"); /* - * Want to scan subplan in the forward direction while creating - * the sorted data. + * Want to scan subplan in the forward direction while creating the + * sorted data. */ estate->es_direction = ForwardScanDirection; @@ -191,8 +190,8 @@ ExecInitSort(Sort *node, EState *estate) outerPlanState(sortstate) = ExecInitNode(outerPlan(node), estate); /* - * initialize tuple type. no need to initialize projection info - * because this node doesn't do projections. + * initialize tuple type. no need to initialize projection info because + * this node doesn't do projections. */ ExecAssignResultTypeFromOuterPlan(&sortstate->ss.ps); ExecAssignScanTypeFromOuterPlan(&sortstate->ss); @@ -286,9 +285,9 @@ void ExecReScanSort(SortState *node, ExprContext *exprCtxt) { /* - * If we haven't sorted yet, just return. If outerplan' chgParam is - * not NULL then it will be re-scanned by ExecProcNode, else - no - * reason to re-scan it at all. + * If we haven't sorted yet, just return. If outerplan' chgParam is not + * NULL then it will be re-scanned by ExecProcNode, else - no reason to + * re-scan it at all. */ if (!node->sort_Done) return; @@ -296,8 +295,8 @@ ExecReScanSort(SortState *node, ExprContext *exprCtxt) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); /* - * If subnode is to be rescanned then we forget previous sort results; - * we have to re-read the subplan and re-sort. + * If subnode is to be rescanned then we forget previous sort results; we + * have to re-read the subplan and re-sort. * * Otherwise we can just rewind and rescan the sorted output. */ diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index 5bed87aea9b..0e7b6df7225 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.69 2005/05/06 17:24:54 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.70 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -86,15 +86,15 @@ ExecHashSubPlan(SubPlanState *node, elog(ERROR, "hashed subplan with direct correlation not supported"); /* - * If first time through or we need to rescan the subplan, build the - * hash table. + * If first time through or we need to rescan the subplan, build the hash + * table. */ if (node->hashtable == NULL || planstate->chgParam != NULL) buildSubPlanHash(node); /* - * The result for an empty subplan is always FALSE; no need to - * evaluate lefthand side. + * The result for an empty subplan is always FALSE; no need to evaluate + * lefthand side. */ *isNull = false; if (!node->havehashrows && !node->havenullrows) @@ -108,34 +108,32 @@ ExecHashSubPlan(SubPlanState *node, slot = ExecProject(node->projLeft, NULL); /* - * Note: because we are typically called in a per-tuple context, we - * have to explicitly clear the projected tuple before returning. - * Otherwise, we'll have a double-free situation: the per-tuple - * context will probably be reset before we're called again, and then - * the tuple slot will think it still needs to free the tuple. + * Note: because we are typically called in a per-tuple context, we have + * to explicitly clear the projected tuple before returning. Otherwise, + * we'll have a double-free situation: the per-tuple context will probably + * be reset before we're called again, and then the tuple slot will think + * it still needs to free the tuple. */ /* - * Since the hashtable routines will use innerecontext's per-tuple - * memory as working memory, be sure to reset it for each tuple. + * Since the hashtable routines will use innerecontext's per-tuple memory + * as working memory, be sure to reset it for each tuple. */ ResetExprContext(innerecontext); /* - * If the LHS is all non-null, probe for an exact match in the main - * hash table. If we find one, the result is TRUE. Otherwise, scan - * the partly-null table to see if there are any rows that aren't - * provably unequal to the LHS; if so, the result is UNKNOWN. (We - * skip that part if we don't care about UNKNOWN.) Otherwise, the - * result is FALSE. + * If the LHS is all non-null, probe for an exact match in the main hash + * table. If we find one, the result is TRUE. Otherwise, scan the + * partly-null table to see if there are any rows that aren't provably + * unequal to the LHS; if so, the result is UNKNOWN. (We skip that part + * if we don't care about UNKNOWN.) Otherwise, the result is FALSE. * - * Note: the reason we can avoid a full scan of the main hash table is - * that the combining operators are assumed never to yield NULL when - * both inputs are non-null. If they were to do so, we might need to - * produce UNKNOWN instead of FALSE because of an UNKNOWN result in - * comparing the LHS to some main-table entry --- which is a - * comparison we will not even make, unless there's a chance match of - * hash keys. + * Note: the reason we can avoid a full scan of the main hash table is that + * the combining operators are assumed never to yield NULL when both + * inputs are non-null. If they were to do so, we might need to produce + * UNKNOWN instead of FALSE because of an UNKNOWN result in comparing the + * LHS to some main-table entry --- which is a comparison we will not even + * make, unless there's a chance match of hash keys. */ if (slotNoNulls(slot)) { @@ -157,14 +155,14 @@ ExecHashSubPlan(SubPlanState *node, } /* - * When the LHS is partly or wholly NULL, we can never return TRUE. If - * we don't care about UNKNOWN, just return FALSE. Otherwise, if the - * LHS is wholly NULL, immediately return UNKNOWN. (Since the - * combining operators are strict, the result could only be FALSE if - * the sub-select were empty, but we already handled that case.) - * Otherwise, we must scan both the main and partly-null tables to see - * if there are any rows that aren't provably unequal to the LHS; if - * so, the result is UNKNOWN. Otherwise, the result is FALSE. + * When the LHS is partly or wholly NULL, we can never return TRUE. If we + * don't care about UNKNOWN, just return FALSE. Otherwise, if the LHS is + * wholly NULL, immediately return UNKNOWN. (Since the combining + * operators are strict, the result could only be FALSE if the sub-select + * were empty, but we already handled that case.) Otherwise, we must scan + * both the main and partly-null tables to see if there are any rows that + * aren't provably unequal to the LHS; if so, the result is UNKNOWN. + * Otherwise, the result is FALSE. */ if (node->hashnulls == NULL) { @@ -217,9 +215,9 @@ ExecScanSubPlan(SubPlanState *node, ArrayBuildState *astate = NULL; /* - * We are probably in a short-lived expression-evaluation context. - * Switch to the child plan's per-query context for manipulating its - * chgParam, calling ExecProcNode on it, etc. + * We are probably in a short-lived expression-evaluation context. Switch + * to the child plan's per-query context for manipulating its chgParam, + * calling ExecProcNode on it, etc. */ oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt); @@ -245,24 +243,23 @@ ExecScanSubPlan(SubPlanState *node, ExecReScan(planstate, NULL); /* - * For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the - * result is boolean as are the results of the combining operators. We - * combine results within a tuple (if there are multiple columns) - * using OR semantics if "useOr" is true, AND semantics if not. We - * then combine results across tuples (if the subplan produces more - * than one) using OR semantics for ANY_SUBLINK or AND semantics for - * ALL_SUBLINK. (MULTIEXPR_SUBLINK doesn't allow multiple tuples from - * the subplan.) NULL results from the combining operators are handled - * according to the usual SQL semantics for OR and AND. The result - * for no input tuples is FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, - * NULL for MULTIEXPR_SUBLINK. + * For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the result + * is boolean as are the results of the combining operators. We combine + * results within a tuple (if there are multiple columns) using OR + * semantics if "useOr" is true, AND semantics if not. We then combine + * results across tuples (if the subplan produces more than one) using OR + * semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK. + * (MULTIEXPR_SUBLINK doesn't allow multiple tuples from the subplan.) + * NULL results from the combining operators are handled according to the + * usual SQL semantics for OR and AND. The result for no input tuples is + * FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for + * MULTIEXPR_SUBLINK. * - * For EXPR_SUBLINK we require the subplan to produce no more than one - * tuple, else an error is raised. For ARRAY_SUBLINK we allow the - * subplan to produce more than one tuple. In either case, if zero - * tuples are produced, we return NULL. Assuming we get a tuple, we - * just use its first column (there can be only one non-junk column in - * this case). + * For EXPR_SUBLINK we require the subplan to produce no more than one tuple, + * else an error is raised. For ARRAY_SUBLINK we allow the subplan to + * produce more than one tuple. In either case, if zero tuples are + * produced, we return NULL. Assuming we get a tuple, we just use its + * first column (there can be only one non-junk column in this case). */ result = BoolGetDatum(subLinkType == ALL_SUBLINK); *isNull = false; @@ -294,12 +291,12 @@ ExecScanSubPlan(SubPlanState *node, found = true; /* - * We need to copy the subplan's tuple in case the result is - * of pass-by-ref type --- our return value will point into - * this copied tuple! Can't use the subplan's instance of the - * tuple since it won't still be valid after next - * ExecProcNode() call. node->curTuple keeps track of the - * copied tuple for eventual freeing. + * We need to copy the subplan's tuple in case the result is of + * pass-by-ref type --- our return value will point into this + * copied tuple! Can't use the subplan's instance of the tuple + * since it won't still be valid after next ExecProcNode() call. + * node->curTuple keeps track of the copied tuple for eventual + * freeing. */ MemoryContextSwitchTo(econtext->ecxt_per_query_memory); if (node->curTuple) @@ -350,8 +347,7 @@ ExecScanSubPlan(SubPlanState *node, bool expnull; /* - * Load up the Param representing this column of the - * sub-select. + * Load up the Param representing this column of the sub-select. */ prmdata = &(econtext->ecxt_param_exec_vals[paramid]); Assert(prmdata->execPlan == NULL); @@ -436,8 +432,8 @@ ExecScanSubPlan(SubPlanState *node, { /* * deal with empty subplan result. result/isNull were previously - * initialized correctly for all sublink types except EXPR, ARRAY, - * and MULTIEXPR; for those, return NULL. + * initialized correctly for all sublink types except EXPR, ARRAY, and + * MULTIEXPR; for those, return NULL. */ if (subLinkType == EXPR_SUBLINK || subLinkType == ARRAY_SUBLINK || @@ -478,19 +474,19 @@ buildSubPlanHash(SubPlanState *node) Assert(!subplan->useOr); /* - * If we already had any hash tables, destroy 'em; then create empty - * hash table(s). + * If we already had any hash tables, destroy 'em; then create empty hash + * table(s). * * If we need to distinguish accurately between FALSE and UNKNOWN (i.e., - * NULL) results of the IN operation, then we have to store subplan - * output rows that are partly or wholly NULL. We store such rows in - * a separate hash table that we expect will be much smaller than the - * main table. (We can use hashing to eliminate partly-null rows that - * are not distinct. We keep them separate to minimize the cost of - * the inevitable full-table searches; see findPartialMatch.) + * NULL) results of the IN operation, then we have to store subplan output + * rows that are partly or wholly NULL. We store such rows in a separate + * hash table that we expect will be much smaller than the main table. + * (We can use hashing to eliminate partly-null rows that are not + * distinct. We keep them separate to minimize the cost of the inevitable + * full-table searches; see findPartialMatch.) * - * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't - * need to store subplan output rows that contain NULL. + * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't need + * to store subplan output rows that contain NULL. */ MemoryContextReset(node->tablecxt); node->hashtable = NULL; @@ -532,9 +528,8 @@ buildSubPlanHash(SubPlanState *node) } /* - * We are probably in a short-lived expression-evaluation context. - * Switch to the child plan's per-query context for calling - * ExecProcNode. + * We are probably in a short-lived expression-evaluation context. Switch + * to the child plan's per-query context for calling ExecProcNode. */ oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt); @@ -544,9 +539,8 @@ buildSubPlanHash(SubPlanState *node) ExecReScan(planstate, NULL); /* - * Scan the subplan and load the hash table(s). Note that when there - * are duplicate rows coming out of the sub-select, only one copy is - * stored. + * Scan the subplan and load the hash table(s). Note that when there are + * duplicate rows coming out of the sub-select, only one copy is stored. */ for (slot = ExecProcNode(planstate); !TupIsNull(slot); @@ -557,8 +551,8 @@ buildSubPlanHash(SubPlanState *node) bool isnew; /* - * Load up the Params representing the raw sub-select outputs, - * then form the projection tuple to store in the hashtable. + * Load up the Params representing the raw sub-select outputs, then + * form the projection tuple to store in the hashtable. */ foreach(plst, subplan->paramIds) { @@ -588,18 +582,18 @@ buildSubPlanHash(SubPlanState *node) } /* - * Reset innerecontext after each inner tuple to free any memory - * used in hash computation or comparison routines. + * Reset innerecontext after each inner tuple to free any memory used + * in hash computation or comparison routines. */ ResetExprContext(innerecontext); } /* - * Since the projected tuples are in the sub-query's context and not - * the main context, we'd better clear the tuple slot before there's - * any chance of a reset of the sub-query's context. Else we will - * have the potential for a double free attempt. (XXX possibly - * no longer needed, but can't hurt.) + * Since the projected tuples are in the sub-query's context and not the + * main context, we'd better clear the tuple slot before there's any + * chance of a reset of the sub-query's context. Else we will have the + * potential for a double free attempt. (XXX possibly no longer needed, + * but can't hurt.) */ ExecClearTuple(node->projRight->pi_slot); @@ -710,10 +704,10 @@ ExecInitSubPlan(SubPlanState *node, EState *estate) /* * create an EState for the subplan * - * The subquery needs its own EState because it has its own rangetable. - * It shares our Param ID space, however. XXX if rangetable access - * were done differently, the subquery could share our EState, which - * would eliminate some thrashing about in this module... + * The subquery needs its own EState because it has its own rangetable. It + * shares our Param ID space, however. XXX if rangetable access were done + * differently, the subquery could share our EState, which would eliminate + * some thrashing about in this module... */ sp_estate = CreateExecutorState(); node->sub_estate = sp_estate; @@ -739,13 +733,12 @@ ExecInitSubPlan(SubPlanState *node, EState *estate) MemoryContextSwitchTo(oldcontext); /* - * If this plan is un-correlated or undirect correlated one and want - * to set params for parent plan then mark parameters as needing - * evaluation. + * If this plan is un-correlated or undirect correlated one and want to + * set params for parent plan then mark parameters as needing evaluation. * * Note that in the case of un-correlated subqueries we don't care about - * setting parent->chgParam here: indices take care about it, for - * others - it doesn't matter... + * setting parent->chgParam here: indices take care about it, for others - + * it doesn't matter... */ if (subplan->setParam != NIL) { @@ -761,8 +754,8 @@ ExecInitSubPlan(SubPlanState *node, EState *estate) } /* - * If we are going to hash the subquery output, initialize relevant - * stuff. (We don't create the hashtable until needed, though.) + * If we are going to hash the subquery output, initialize relevant stuff. + * (We don't create the hashtable until needed, though.) */ if (subplan->useHashTable) { @@ -794,18 +787,17 @@ ExecInitSubPlan(SubPlanState *node, EState *estate) /* * We use ExecProject to evaluate the lefthand and righthand - * expression lists and form tuples. (You might think that we - * could use the sub-select's output tuples directly, but that is - * not the case if we had to insert any run-time coercions of the - * sub-select's output datatypes; anyway this avoids storing any - * resjunk columns that might be in the sub-select's output.) Run - * through the combining expressions to build tlists for the - * lefthand and righthand sides. We need both the ExprState list - * (for ExecProject) and the underlying parse Exprs (for - * ExecTypeFromTL). + * expression lists and form tuples. (You might think that we could + * use the sub-select's output tuples directly, but that is not the + * case if we had to insert any run-time coercions of the sub-select's + * output datatypes; anyway this avoids storing any resjunk columns + * that might be in the sub-select's output.) Run through the + * combining expressions to build tlists for the lefthand and + * righthand sides. We need both the ExprState list (for ExecProject) + * and the underlying parse Exprs (for ExecTypeFromTL). * - * We also extract the combining operators themselves to initialize - * the equality and hashing functions for the hash tables. + * We also extract the combining operators themselves to initialize the + * equality and hashing functions for the hash tables. */ lefttlist = righttlist = NIL; leftptlist = rightptlist = NIL; @@ -869,21 +861,21 @@ ExecInitSubPlan(SubPlanState *node, EState *estate) } /* - * Create a tupletable to hold these tuples. (Note: we never - * bother to free the tupletable explicitly; that's okay because - * it will never store raw disk tuples that might have associated - * buffer pins. The only resource involved is memory, which will - * be cleaned up by freeing the query context.) + * Create a tupletable to hold these tuples. (Note: we never bother + * to free the tupletable explicitly; that's okay because it will + * never store raw disk tuples that might have associated buffer pins. + * The only resource involved is memory, which will be cleaned up by + * freeing the query context.) */ tupTable = ExecCreateTupleTable(2); /* - * Construct tupdescs, slots and projection nodes for left and - * right sides. The lefthand expressions will be evaluated in the - * parent plan node's exprcontext, which we don't have access to - * here. Fortunately we can just pass NULL for now and fill it in - * later (hack alert!). The righthand expressions will be - * evaluated in our own innerecontext. + * Construct tupdescs, slots and projection nodes for left and right + * sides. The lefthand expressions will be evaluated in the parent + * plan node's exprcontext, which we don't have access to here. + * Fortunately we can just pass NULL for now and fill it in later + * (hack alert!). The righthand expressions will be evaluated in our + * own innerecontext. */ tupDesc = ExecTypeFromTL(leftptlist, false); slot = ExecAllocTableSlot(tupTable); @@ -983,11 +975,10 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) found = true; /* - * We need to copy the subplan's tuple into our own context, in - * case any of the params are pass-by-ref type --- the pointers - * stored in the param structs will point at this copied tuple! - * node->curTuple keeps track of the copied tuple for eventual - * freeing. + * We need to copy the subplan's tuple into our own context, in case + * any of the params are pass-by-ref type --- the pointers stored in + * the param structs will point at this copied tuple! node->curTuple + * keeps track of the copied tuple for eventual freeing. */ MemoryContextSwitchTo(econtext->ecxt_per_query_memory); if (node->curTuple) diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c index 90e59f90f4d..9b1bd251435 100644 --- a/src/backend/executor/nodeSubqueryscan.c +++ b/src/backend/executor/nodeSubqueryscan.c @@ -12,7 +12,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.26 2005/05/22 22:30:19 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.27 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,13 +62,13 @@ SubqueryNext(SubqueryScanState *node) direction = estate->es_direction; /* - * We need not support EvalPlanQual here, since we are not scanning a - * real relation. + * We need not support EvalPlanQual here, since we are not scanning a real + * relation. */ /* - * Get the next tuple from the sub-query. We have to be careful to - * run it in its appropriate memory context. + * Get the next tuple from the sub-query. We have to be careful to run it + * in its appropriate memory context. */ node->sss_SubEState->es_direction = direction; @@ -170,11 +170,10 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate) ExecCheckRTPerms(rte->subquery->rtable); /* - * The subquery needs its own EState because it has its own - * rangetable. It shares our Param ID space, however. XXX if - * rangetable access were done differently, the subquery could share - * our EState, which would eliminate some thrashing about in this - * module... + * The subquery needs its own EState because it has its own rangetable. It + * shares our Param ID space, however. XXX if rangetable access were done + * differently, the subquery could share our EState, which would eliminate + * some thrashing about in this module... */ sp_estate = CreateExecutorState(); subquerystate->sss_SubEState = sp_estate; @@ -246,7 +245,7 @@ ExecEndSubqueryScan(SubqueryScanState *node) * clean out the upper tuple table */ ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - node->ss.ss_ScanTupleSlot = NULL; /* not ours to clear */ + node->ss.ss_ScanTupleSlot = NULL; /* not ours to clear */ /* * close down subquery @@ -278,9 +277,8 @@ ExecSubqueryReScan(SubqueryScanState *node, ExprContext *exprCtxt) /* * ExecReScan doesn't know about my subplan, so I have to do - * changed-parameter signaling myself. This is just as well, because - * the subplan has its own memory context in which its chgParam state - * lives. + * changed-parameter signaling myself. This is just as well, because the + * subplan has its own memory context in which its chgParam state lives. */ if (node->ss.ps.chgParam != NULL) UpdateChangedParamSet(node->subplan, node->ss.ps.chgParam); diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c index 4cc1e4df148..c8708f58311 100644 --- a/src/backend/executor/nodeTidscan.c +++ b/src/backend/executor/nodeTidscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.42 2005/09/22 15:09:51 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.43 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -126,8 +126,8 @@ TidNext(TidScanState *node) return slot; /* return empty slot */ /* - * XXX shouldn't we check here to make sure tuple matches TID - * list? In runtime-key case this is not certain, is it? + * XXX shouldn't we check here to make sure tuple matches TID list? In + * runtime-key case this is not certain, is it? */ ExecStoreTuple(estate->es_evTuple[scanrelid - 1], @@ -150,9 +150,9 @@ TidNext(TidScanState *node) tuple = &(node->tss_htup); /* - * ok, now that we have what we need, fetch an tid tuple. if scanning - * this tid succeeded then return the appropriate heap tuple.. else - * return NULL. + * ok, now that we have what we need, fetch an tid tuple. if scanning this + * tid succeeded then return the appropriate heap tuple.. else return + * NULL. */ bBackward = ScanDirectionIsBackward(direction); if (bBackward) @@ -184,10 +184,10 @@ TidNext(TidScanState *node) /* * store the scanned tuple in the scan tuple slot of the scan - * state. Eventually we will only do this and not return a - * tuple. Note: we pass 'false' because tuples returned by - * amgetnext are pointers onto disk pages and were not created - * with palloc() and so should not be pfree()'d. + * state. Eventually we will only do this and not return a tuple. + * Note: we pass 'false' because tuples returned by amgetnext are + * pointers onto disk pages and were not created with palloc() and + * so should not be pfree()'d. */ ExecStoreTuple(tuple, /* tuple to store */ slot, /* slot to store in */ @@ -196,8 +196,7 @@ TidNext(TidScanState *node) /* * At this point we have an extra pin on the buffer, because - * ExecStoreTuple incremented the pin count. Drop our local - * pin. + * ExecStoreTuple incremented the pin count. Drop our local pin. */ ReleaseBuffer(buffer); @@ -229,8 +228,8 @@ TidNext(TidScanState *node) } /* - * if we get here it means the tid scan failed so we are at the end of - * the scan.. + * if we get here it means the tid scan failed so we are at the end of the + * scan.. */ return ExecClearTuple(slot); } @@ -420,8 +419,8 @@ ExecInitTidScan(TidScan *node, EState *estate) tidstate->tss_TidPtr = -1; /* - * get the range table and direction information from the execution - * state (these are needed to open the relations). + * get the range table and direction information from the execution state + * (these are needed to open the relations). */ rangeTable = estate->es_range_table; diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c index b00a572e14c..ab3879d7cc6 100644 --- a/src/backend/executor/nodeUnique.c +++ b/src/backend/executor/nodeUnique.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.47 2005/05/06 17:24:54 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.48 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -56,10 +56,10 @@ ExecUnique(UniqueState *node) * now loop, returning only non-duplicate tuples. We assume that the * tuples arrive in sorted order so we can detect duplicates easily. * - * We return the first tuple from each group of duplicates (or the last - * tuple of each group, when moving backwards). At either end of the - * subplan, clear the result slot so that we correctly return the - * first/last tuple when reversing direction. + * We return the first tuple from each group of duplicates (or the last tuple + * of each group, when moving backwards). At either end of the subplan, + * clear the result slot so that we correctly return the first/last tuple + * when reversing direction. */ for (;;) { @@ -81,9 +81,9 @@ ExecUnique(UniqueState *node) break; /* - * Else test if the new tuple and the previously returned tuple - * match. If so then we loop back and fetch another new tuple - * from the subplan. + * Else test if the new tuple and the previously returned tuple match. + * If so then we loop back and fetch another new tuple from the + * subplan. */ if (!execTuplesMatch(slot, resultTupleSlot, plannode->numCols, plannode->uniqColIdx, @@ -93,10 +93,10 @@ ExecUnique(UniqueState *node) } /* - * We have a new tuple different from the previous saved tuple (if - * any). Save it and return it. We must copy it because the source - * subplan won't guarantee that this source tuple is still accessible - * after fetching the next source tuple. + * We have a new tuple different from the previous saved tuple (if any). + * Save it and return it. We must copy it because the source subplan + * won't guarantee that this source tuple is still accessible after + * fetching the next source tuple. */ return ExecCopySlot(resultTupleSlot, slot); } @@ -123,9 +123,9 @@ ExecInitUnique(Unique *node, EState *estate) /* * Miscellaneous initialization * - * Unique nodes have no ExprContext initialization because they never - * call ExecQual or ExecProject. But they do need a per-tuple memory - * context anyway for calling execTuplesMatch. + * Unique nodes have no ExprContext initialization because they never call + * ExecQual or ExecProject. But they do need a per-tuple memory context + * anyway for calling execTuplesMatch. */ uniquestate->tempContext = AllocSetContextCreate(CurrentMemoryContext, @@ -147,8 +147,8 @@ ExecInitUnique(Unique *node, EState *estate) outerPlanState(uniquestate) = ExecInitNode(outerPlan(node), estate); /* - * unique nodes do no projections, so initialize projection info for - * this node appropriately + * unique nodes do no projections, so initialize projection info for this + * node appropriately */ ExecAssignResultTypeFromOuterPlan(&uniquestate->ps); uniquestate->ps.ps_ProjInfo = NULL; diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index ff1b8932ea1..c4aef41a8d0 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.142 2005/10/01 18:43:19 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.143 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -38,11 +38,11 @@ static int _SPI_curid = -1; static void _SPI_prepare_plan(const char *src, _SPI_plan *plan); static int _SPI_execute_plan(_SPI_plan *plan, - Datum *Values, const char *Nulls, - Snapshot snapshot, Snapshot crosscheck_snapshot, - bool read_only, long tcount); + Datum *Values, const char *Nulls, + Snapshot snapshot, Snapshot crosscheck_snapshot, + bool read_only, long tcount); -static int _SPI_pquery(QueryDesc *queryDesc, long tcount); +static int _SPI_pquery(QueryDesc *queryDesc, long tcount); static void _SPI_error_callback(void *arg); @@ -66,8 +66,8 @@ SPI_connect(void) int newdepth; /* - * When procedure called by Executor _SPI_curid expected to be equal - * to _SPI_connected + * When procedure called by Executor _SPI_curid expected to be equal to + * _SPI_connected */ if (_SPI_curid != _SPI_connected) return SPI_ERROR_CONNECT; @@ -106,28 +106,28 @@ SPI_connect(void) _SPI_current->processed = 0; _SPI_current->lastoid = InvalidOid; _SPI_current->tuptable = NULL; - _SPI_current->procCxt = NULL; /* in case we fail to create 'em */ + _SPI_current->procCxt = NULL; /* in case we fail to create 'em */ _SPI_current->execCxt = NULL; _SPI_current->connectSubid = GetCurrentSubTransactionId(); /* * Create memory contexts for this procedure * - * XXX it would be better to use PortalContext as the parent context, but - * we may not be inside a portal (consider deferred-trigger - * execution). Perhaps CurTransactionContext would do? For now it - * doesn't matter because we clean up explicitly in AtEOSubXact_SPI(). + * XXX it would be better to use PortalContext as the parent context, but we + * may not be inside a portal (consider deferred-trigger execution). + * Perhaps CurTransactionContext would do? For now it doesn't matter + * because we clean up explicitly in AtEOSubXact_SPI(). */ _SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext, "SPI Proc", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); _SPI_current->execCxt = AllocSetContextCreate(TopTransactionContext, "SPI Exec", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); /* ... and switch to procedure's context */ _SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt); @@ -161,9 +161,9 @@ SPI_finish(void) SPI_tuptable = NULL; /* - * After _SPI_begin_call _SPI_connected == _SPI_curid. Now we are - * closing connection to SPI and returning to upper Executor and so - * _SPI_connected must be equal to _SPI_curid. + * After _SPI_begin_call _SPI_connected == _SPI_curid. Now we are closing + * connection to SPI and returning to upper Executor and so _SPI_connected + * must be equal to _SPI_curid. */ _SPI_connected--; _SPI_curid--; @@ -182,9 +182,9 @@ void AtEOXact_SPI(bool isCommit) { /* - * Note that memory contexts belonging to SPI stack entries will be - * freed automatically, so we can ignore them here. We just need to - * restore our static variables to initial state. + * Note that memory contexts belonging to SPI stack entries will be freed + * automatically, so we can ignore them here. We just need to restore our + * static variables to initial state. */ if (isCommit && _SPI_connected != -1) ereport(WARNING, @@ -236,8 +236,8 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid) /* * Pop the stack entry and reset global variables. Unlike - * SPI_finish(), we don't risk switching to memory contexts that - * might be already gone. + * SPI_finish(), we don't risk switching to memory contexts that might + * be already gone. */ _SPI_connected--; _SPI_curid = _SPI_connected; @@ -560,8 +560,8 @@ SPI_modifytuple(Relation rel, HeapTuple tuple, int natts, int *attnum, mtuple = heap_formtuple(rel->rd_att, v, n); /* - * copy the identification info of the old tuple: t_ctid, t_self, - * and OID (if any) + * copy the identification info of the old tuple: t_ctid, t_self, and + * OID (if any) */ mtuple->t_data->t_ctid = tuple->t_data->t_ctid; mtuple->t_self = tuple->t_self; @@ -658,8 +658,8 @@ SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber) getTypeOutputInfo(typoid, &foutoid, &typisvarlena); /* - * If we have a toasted datum, forcibly detoast it here to avoid - * memory leakage inside the type's output routine. + * If we have a toasted datum, forcibly detoast it here to avoid memory + * leakage inside the type's output routine. */ if (typisvarlena) val = PointerGetDatum(PG_DETOAST_DATUM(origval)); @@ -755,7 +755,7 @@ SPI_getrelname(Relation rel) char * SPI_getnspname(Relation rel) { - return get_namespace_name(RelationGetNamespace(rel)); + return get_namespace_name(RelationGetNamespace(rel)); } void * @@ -939,8 +939,8 @@ SPI_cursor_open(const char *name, void *plan, portal->cursorOptions |= CURSOR_OPT_NO_SCROLL; /* - * Set up the snapshot to use. (PortalStart will do CopySnapshot, - * so we skip that here.) + * Set up the snapshot to use. (PortalStart will do CopySnapshot, so we + * skip that here.) */ if (read_only) snapshot = ActiveSnapshot; @@ -1214,7 +1214,7 @@ spi_printtup(TupleTableSlot *slot, DestReceiver *self) tuptable->free = 256; tuptable->alloced += tuptable->free; tuptable->vals = (HeapTuple *) repalloc(tuptable->vals, - tuptable->alloced * sizeof(HeapTuple)); + tuptable->alloced * sizeof(HeapTuple)); } tuptable->vals[tuptable->alloced - tuptable->free] = @@ -1247,9 +1247,9 @@ _SPI_prepare_plan(const char *src, _SPI_plan *plan) int nargs = plan->nargs; /* - * Increment CommandCounter to see changes made by now. We must do - * this to be sure of seeing any schema changes made by a just-preceding - * SPI command. (But we don't bother advancing the snapshot, since the + * Increment CommandCounter to see changes made by now. We must do this + * to be sure of seeing any schema changes made by a just-preceding SPI + * command. (But we don't bother advancing the snapshot, since the * planner generally operates under SnapshotNow rules anyway.) */ CommandCounterIncrement(); @@ -1270,9 +1270,9 @@ _SPI_prepare_plan(const char *src, _SPI_plan *plan) /* * Do parse analysis and rule rewrite for each raw parsetree. * - * We save the querytrees from each raw parsetree as a separate - * sublist. This allows _SPI_execute_plan() to know where the - * boundaries between original queries fall. + * We save the querytrees from each raw parsetree as a separate sublist. + * This allows _SPI_execute_plan() to know where the boundaries between + * original queries fall. */ query_list_list = NIL; plan_list = NIL; @@ -1316,7 +1316,7 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls, volatile int res = 0; volatile uint32 my_processed = 0; volatile Oid my_lastoid = InvalidOid; - SPITupleTable * volatile my_tuptable = NULL; + SPITupleTable *volatile my_tuptable = NULL; Snapshot saveActiveSnapshot; /* Be sure to restore ActiveSnapshot on error exit */ @@ -1407,9 +1407,10 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls, if (read_only && !QueryIsReadOnly(queryTree)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /* translator: %s is a SQL statement name */ - errmsg("%s is not allowed in a non-volatile function", - CreateQueryTag(queryTree)))); + /* translator: %s is a SQL statement name */ + errmsg("%s is not allowed in a non-volatile function", + CreateQueryTag(queryTree)))); + /* * If not read-only mode, advance the command counter before * each command. @@ -1462,6 +1463,7 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls, } FreeSnapshot(ActiveSnapshot); ActiveSnapshot = NULL; + /* * The last canSetTag query sets the auxiliary values returned * to the caller. Be careful to free any tuptables not @@ -1520,10 +1522,10 @@ _SPI_pquery(QueryDesc *queryDesc, long tcount) { case CMD_SELECT: res = SPI_OK_SELECT; - if (queryDesc->parsetree->into) /* select into table? */ + if (queryDesc->parsetree->into) /* select into table? */ { res = SPI_OK_SELINTO; - queryDesc->dest = None_Receiver; /* don't output results */ + queryDesc->dest = None_Receiver; /* don't output results */ } else if (queryDesc->dest->mydest != SPI) { @@ -1589,8 +1591,8 @@ _SPI_error_callback(void *arg) int syntaxerrposition; /* - * If there is a syntax error position, convert to internal syntax - * error; otherwise treat the query as an item of context stack + * If there is a syntax error position, convert to internal syntax error; + * otherwise treat the query as an item of context stack */ syntaxerrposition = geterrposition(); if (syntaxerrposition > 0) @@ -1635,13 +1637,12 @@ _SPI_cursor_operation(Portal portal, bool forward, long count, dest); /* - * Think not to combine this store with the preceding function call. - * If the portal contains calls to functions that use SPI, then - * SPI_stack is likely to move around while the portal runs. When - * control returns, _SPI_current will point to the correct stack - * entry... but the pointer may be different than it was beforehand. - * So we must be sure to re-fetch the pointer after the function call - * completes. + * Think not to combine this store with the preceding function call. If + * the portal contains calls to functions that use SPI, then SPI_stack is + * likely to move around while the portal runs. When control returns, + * _SPI_current will point to the correct stack entry... but the pointer + * may be different than it was beforehand. So we must be sure to re-fetch + * the pointer after the function call completes. */ _SPI_current->processed = nfetched; @@ -1738,12 +1739,13 @@ _SPI_copy_plan(_SPI_plan *plan, int location) parentcxt = _SPI_current->procCxt; else if (location == _SPI_CPLAN_TOPCXT) parentcxt = TopMemoryContext; - else /* (this case not currently used) */ + else + /* (this case not currently used) */ parentcxt = CurrentMemoryContext; /* - * Create a memory context for the plan. We don't expect the plan to - * be very large, so use smaller-than-default alloc parameters. + * Create a memory context for the plan. We don't expect the plan to be + * very large, so use smaller-than-default alloc parameters. */ plancxt = AllocSetContextCreate(parentcxt, "SPI Plan", diff --git a/src/backend/lib/dllist.c b/src/backend/lib/dllist.c index 59708bee78c..95802a31eec 100644 --- a/src/backend/lib/dllist.c +++ b/src/backend/lib/dllist.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/lib/dllist.c,v 1.31 2005/01/18 22:59:32 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/lib/dllist.c,v 1.32 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -148,7 +148,7 @@ DLAddHead(Dllist *l, Dlelem *e) e->dle_prev = NULL; l->dll_head = e; - if (l->dll_tail == NULL) /* if this is first element added */ + if (l->dll_tail == NULL) /* if this is first element added */ l->dll_tail = e; } @@ -163,7 +163,7 @@ DLAddTail(Dllist *l, Dlelem *e) e->dle_next = NULL; l->dll_tail = e; - if (l->dll_head == NULL) /* if this is first element added */ + if (l->dll_head == NULL) /* if this is first element added */ l->dll_head = e; } diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c index 49d509e57b7..53e4913b4a8 100644 --- a/src/backend/lib/stringinfo.c +++ b/src/backend/lib/stringinfo.c @@ -9,7 +9,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.41 2004/12/31 21:59:48 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.42 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -106,8 +106,8 @@ appendStringInfoVA(StringInfo str, const char *fmt, va_list args) Assert(str != NULL); /* - * If there's hardly any space, don't bother trying, just fail to make - * the caller enlarge the buffer first. + * If there's hardly any space, don't bother trying, just fail to make the + * caller enlarge the buffer first. */ avail = str->maxlen - str->len - 1; if (avail < 16) @@ -115,8 +115,8 @@ appendStringInfoVA(StringInfo str, const char *fmt, va_list args) /* * Assert check here is to catch buggy vsnprintf that overruns the - * specified buffer length. Solaris 7 in 64-bit mode is an example of - * a platform with such a bug. + * specified buffer length. Solaris 7 in 64-bit mode is an example of a + * platform with such a bug. */ #ifdef USE_ASSERT_CHECKING str->data[str->maxlen - 1] = '\0'; @@ -127,9 +127,9 @@ appendStringInfoVA(StringInfo str, const char *fmt, va_list args) Assert(str->data[str->maxlen - 1] == '\0'); /* - * Note: some versions of vsnprintf return the number of chars - * actually stored, but at least one returns -1 on failure. Be - * conservative about believing whether the print worked. + * Note: some versions of vsnprintf return the number of chars actually + * stored, but at least one returns -1 on failure. Be conservative about + * believing whether the print worked. */ if (nprinted >= 0 && nprinted < avail - 1) { @@ -193,8 +193,8 @@ appendBinaryStringInfo(StringInfo str, const char *data, int datalen) str->len += datalen; /* - * Keep a trailing null in place, even though it's probably useless - * for binary data... + * Keep a trailing null in place, even though it's probably useless for + * binary data... */ str->data[str->len] = '\0'; } @@ -222,9 +222,9 @@ enlargeStringInfo(StringInfo str, int needed) int newlen; /* - * Guard against ridiculous "needed" values, which can occur if we're - * fed bogus data. Without this, we can get an overflow or infinite - * loop in the following. + * Guard against ridiculous "needed" values, which can occur if we're fed + * bogus data. Without this, we can get an overflow or infinite loop in + * the following. */ if (needed < 0 || ((Size) needed) >= (MaxAllocSize - (Size) str->len)) @@ -239,19 +239,18 @@ enlargeStringInfo(StringInfo str, int needed) return; /* got enough space already */ /* - * We don't want to allocate just a little more space with each - * append; for efficiency, double the buffer size each time it - * overflows. Actually, we might need to more than double it if - * 'needed' is big... + * We don't want to allocate just a little more space with each append; + * for efficiency, double the buffer size each time it overflows. + * Actually, we might need to more than double it if 'needed' is big... */ newlen = 2 * str->maxlen; while (needed > newlen) newlen = 2 * newlen; /* - * Clamp to MaxAllocSize in case we went past it. Note we are - * assuming here that MaxAllocSize <= INT_MAX/2, else the above loop - * could overflow. We will still have newlen >= needed. + * Clamp to MaxAllocSize in case we went past it. Note we are assuming + * here that MaxAllocSize <= INT_MAX/2, else the above loop could + * overflow. We will still have newlen >= needed. */ if (newlen > (int) MaxAllocSize) newlen = (int) MaxAllocSize; diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 3bdddf86da4..240a02f0e93 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.129 2005/10/13 22:55:19 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.130 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -41,7 +41,7 @@ static char *recv_password_packet(Port *port); static int recv_and_check_password_packet(Port *port); char *pg_krb_server_keyfile; -char *pg_krb_srvnam; +char *pg_krb_srvnam; bool pg_krb_caseins_users; char *pg_krb_server_hostname = NULL; @@ -65,8 +65,8 @@ static struct pam_conv pam_passw_conv = { }; static char *pam_passwd = NULL; /* Workaround for Solaris 2.6 brokenness */ -static Port *pam_port_cludge; /* Workaround for passing "Port *port" - * into pam_passwd_conv_proc */ +static Port *pam_port_cludge; /* Workaround for passing "Port *port" into + * pam_passwd_conv_proc */ #endif /* USE_PAM */ #ifdef KRB5 @@ -119,7 +119,7 @@ static int pg_krb5_init(void) { krb5_error_code retval; - char *khostname; + char *khostname; if (pg_krb5_initialised) return STATUS_OK; @@ -147,8 +147,8 @@ pg_krb5_init(void) } /* - * If no hostname was specified, pg_krb_server_hostname is already - * NULL. If it's set to blank, force it to NULL. + * If no hostname was specified, pg_krb_server_hostname is already NULL. + * If it's set to blank, force it to NULL. */ khostname = pg_krb_server_hostname; if (khostname && khostname[0] == '\0') @@ -163,9 +163,9 @@ pg_krb5_init(void) { ereport(LOG, (errmsg("Kerberos sname_to_principal(\"%s\", \"%s\") returned error %d", - khostname ? khostname : "localhost", pg_krb_srvnam, retval))); + khostname ? khostname : "localhost", pg_krb_srvnam, retval))); com_err("postgres", retval, - "while getting server principal for server \"%s\" for service \"%s\"", + "while getting server principal for server \"%s\" for service \"%s\"", khostname ? khostname : "localhost", pg_krb_srvnam); krb5_kt_close(pg_krb5_context, pg_krb5_keytab); krb5_free_context(pg_krb5_context); @@ -260,7 +260,6 @@ pg_krb5_recvauth(Port *port) return ret; } - #else static int @@ -293,13 +292,13 @@ auth_failed(Port *port, int status) /* * If we failed due to EOF from client, just quit; there's no point in - * trying to send a message to the client, and not much point in - * logging the failure in the postmaster log. (Logging the failure - * might be desirable, were it not for the fact that libpq closes the - * connection unceremoniously if challenged for a password when it - * hasn't got one to send. We'll get a useless log entry for every - * psql connection under password auth, even if it's perfectly - * successful, if we log STATUS_EOF events.) + * trying to send a message to the client, and not much point in logging + * the failure in the postmaster log. (Logging the failure might be + * desirable, were it not for the fact that libpq closes the connection + * unceremoniously if challenged for a password when it hasn't got one to + * send. We'll get a useless log entry for every psql connection under + * password auth, even if it's perfectly successful, if we log STATUS_EOF + * events.) */ if (status == STATUS_EOF) proc_exit(0); @@ -351,9 +350,9 @@ ClientAuthentication(Port *port) /* * Get the authentication method to use for this frontend/database - * combination. Note: a failure return indicates a problem with the - * hba config file, not with the request. hba.c should have dropped - * an error message into the postmaster logfile if it failed. + * combination. Note: a failure return indicates a problem with the hba + * config file, not with the request. hba.c should have dropped an error + * message into the postmaster logfile if it failed. */ if (hba_getauthmethod(port) != STATUS_OK) ereport(FATAL, @@ -368,11 +367,11 @@ ClientAuthentication(Port *port) /* * This could have come from an explicit "reject" entry in * pg_hba.conf, but more likely it means there was no matching - * entry. Take pity on the poor user and issue a helpful - * error message. NOTE: this is not a security breach, - * because all the info reported here is known at the frontend - * and must be assumed known to bad guys. We're merely helping - * out the less clueful good guys. + * entry. Take pity on the poor user and issue a helpful error + * message. NOTE: this is not a security breach, because all the + * info reported here is known at the frontend and must be assumed + * known to bad guys. We're merely helping out the less clueful + * good guys. */ { char hostinfo[NI_MAXHOST]; @@ -384,14 +383,14 @@ ClientAuthentication(Port *port) #ifdef USE_SSL ereport(FATAL, - (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION), - errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s", - hostinfo, port->user_name, port->database_name, - port->ssl ? _("SSL on") : _("SSL off")))); + (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION), + errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s", + hostinfo, port->user_name, port->database_name, + port->ssl ? _("SSL on") : _("SSL off")))); #else ereport(FATAL, - (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION), - errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"", + (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION), + errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"", hostinfo, port->user_name, port->database_name))); #endif break; @@ -425,7 +424,7 @@ ClientAuthentication(Port *port) if (setsockopt(port->sock, 0, LOCAL_CREDS, &on, sizeof(on)) < 0) ereport(FATAL, (errcode_for_socket_access(), - errmsg("could not enable credential reception: %m"))); + errmsg("could not enable credential reception: %m"))); #endif sendAuthRequest(port, AUTH_REQ_SCM_CREDS); @@ -488,8 +487,8 @@ sendAuthRequest(Port *port, AuthRequest areq) pq_endmessage(&buf); /* - * Flush message so client will see it, except for AUTH_REQ_OK, which - * need not be sent until we are ready for queries. + * Flush message so client will see it, except for AUTH_REQ_OK, which need + * not be sent until we are ready for queries. */ if (areq != AUTH_REQ_OK) pq_flush(); @@ -526,15 +525,15 @@ pam_passwd_conv_proc(int num_msg, const struct pam_message ** msg, if (!appdata_ptr) { /* - * Workaround for Solaris 2.6 where the PAM library is broken and - * does not pass appdata_ptr to the conversation routine + * Workaround for Solaris 2.6 where the PAM library is broken and does + * not pass appdata_ptr to the conversation routine */ appdata_ptr = pam_passwd; } /* - * Password wasn't passed to PAM the first time around - let's go ask - * the client to send a password, which we then stuff into PAM. + * Password wasn't passed to PAM the first time around - let's go ask the + * client to send a password, which we then stuff into PAM. */ if (strlen(appdata_ptr) == 0) { @@ -695,15 +694,15 @@ recv_password_packet(Port *port) { /* * If the client just disconnects without offering a password, - * don't make a log entry. This is legal per protocol spec - * and in fact commonly done by psql, so complaining just - * clutters the log. + * don't make a log entry. This is legal per protocol spec and in + * fact commonly done by psql, so complaining just clutters the + * log. */ if (mtype != EOF) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("expected password response, got message type %d", - mtype))); + errmsg("expected password response, got message type %d", + mtype))); return NULL; /* EOF or bad message type */ } } @@ -723,8 +722,8 @@ recv_password_packet(Port *port) } /* - * Apply sanity check: password packet length should agree with length - * of contained string. Note it is safe to use strlen here because + * Apply sanity check: password packet length should agree with length of + * contained string. Note it is safe to use strlen here because * StringInfo is guaranteed to have an appended '\0'. */ if (strlen(buf.data) + 1 != buf.len) @@ -738,8 +737,8 @@ recv_password_packet(Port *port) /* * Return the received string. Note we do not attempt to do any - * character-set conversion on it; since we don't yet know the - * client's encoding, there wouldn't be much point. + * character-set conversion on it; since we don't yet know the client's + * encoding, there wouldn't be much point. */ return buf.data; } diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c index 016884e425f..139f8946dd8 100644 --- a/src/backend/libpq/be-fsstubs.c +++ b/src/backend/libpq/be-fsstubs.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.78 2005/06/13 02:26:48 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.79 2005/10/15 02:49:17 momjian Exp $ * * NOTES * This should be moved to a more appropriate place. It is here @@ -74,7 +74,7 @@ static MemoryContext fscxt = NULL; ALLOCSET_DEFAULT_INITSIZE, \ ALLOCSET_DEFAULT_MAXSIZE); \ } while (0) - + static int newLOfd(LargeObjectDesc *lobjCookie); static void deleteLOfd(int fd); @@ -198,8 +198,8 @@ lo_write(int fd, char *buf, int len) if ((cookies[fd]->flags & IFS_WRLOCK) == 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("large object descriptor %d was not opened for writing", - fd))); + errmsg("large object descriptor %d was not opened for writing", + fd))); Assert(fscxt != NULL); currentContext = MemoryContextSwitchTo(fscxt); @@ -289,9 +289,8 @@ lo_tell(PG_FUNCTION_ARGS) } /* - * We assume we do not need to switch contexts for inv_tell. That is - * true for now, but is probably more than this module ought to - * assume... + * We assume we do not need to switch contexts for inv_tell. That is true + * for now, but is probably more than this module ought to assume... */ PG_RETURN_INT32(inv_tell(cookies[fd])); } @@ -322,9 +321,9 @@ lo_unlink(PG_FUNCTION_ARGS) } /* - * inv_drop does not need a context switch, indeed it doesn't touch - * any LO-specific data structures at all. (Again, that's probably - * more than this module ought to be assuming.) + * inv_drop does not need a context switch, indeed it doesn't touch any + * LO-specific data structures at all. (Again, that's probably more than + * this module ought to be assuming.) */ PG_RETURN_INT32(inv_drop(lobjId)); } @@ -388,13 +387,13 @@ lo_import(PG_FUNCTION_ARGS) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to use server-side lo_import()"), + errmsg("must be superuser to use server-side lo_import()"), errhint("Anyone can use the client-side lo_import() provided by libpq."))); #endif /* - * We don't actually need to switch into fscxt, but create it anyway - * to ensure that AtEOXact_LargeObject knows there is state to clean up + * We don't actually need to switch into fscxt, but create it anyway to + * ensure that AtEOXact_LargeObject knows there is state to clean up */ CreateFSContext(); @@ -462,13 +461,13 @@ lo_export(PG_FUNCTION_ARGS) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to use server-side lo_export()"), + errmsg("must be superuser to use server-side lo_export()"), errhint("Anyone can use the client-side lo_export() provided by libpq."))); #endif /* - * We don't actually need to switch into fscxt, but create it anyway - * to ensure that AtEOXact_LargeObject knows there is state to clean up + * We don't actually need to switch into fscxt, but create it anyway to + * ensure that AtEOXact_LargeObject knows there is state to clean up */ CreateFSContext(); @@ -480,9 +479,9 @@ lo_export(PG_FUNCTION_ARGS) /* * open the file to be written to * - * Note: we reduce backend's normal 077 umask to the slightly friendlier - * 022. This code used to drop it all the way to 0, but creating - * world-writable export files doesn't seem wise. + * Note: we reduce backend's normal 077 umask to the slightly friendlier 022. + * This code used to drop it all the way to 0, but creating world-writable + * export files doesn't seem wise. */ nbytes = VARSIZE(filename) - VARHDRSZ; if (nbytes >= MAXPGPATH) @@ -533,8 +532,8 @@ AtEOXact_LargeObject(bool isCommit) currentContext = MemoryContextSwitchTo(fscxt); /* - * Close LO fds and clear cookies array so that LO fds are no longer - * good. On abort we skip the close step. + * Close LO fds and clear cookies array so that LO fds are no longer good. + * On abort we skip the close step. */ for (i = 0; i < cookies_size; i++) { @@ -587,8 +586,8 @@ AtEOSubXact_LargeObject(bool isCommit, SubTransactionId mySubid, else { /* - * Make sure we do not call inv_close twice if it errors - * out for some reason. Better a leak than a crash. + * Make sure we do not call inv_close twice if it errors out + * for some reason. Better a leak than a crash. */ deleteLOfd(i); inv_close(lo); diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c index 01dc4f1af0d..a2404ebd38a 100644 --- a/src/backend/libpq/be-secure.c +++ b/src/backend/libpq/be-secure.c @@ -11,7 +11,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.58 2005/07/04 04:51:46 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.59 2005/10/15 02:49:17 momjian Exp $ * * Since the server static private key ($DataDir/server.key) * will normally be stored unencrypted so that the database @@ -103,7 +103,7 @@ #define ROOT_CERT_FILE "root.crt" #define SERVER_CERT_FILE "server.crt" -#define SERVER_PRIVATE_KEY_FILE "server.key" +#define SERVER_PRIVATE_KEY_FILE "server.key" static DH *load_dh_file(int keylength); static DH *load_dh_buffer(const char *, size_t); @@ -276,8 +276,8 @@ rloop: case SSL_ERROR_WANT_WRITE: #ifdef WIN32 pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl), - (err==SSL_ERROR_WANT_READ) ? - FD_READ|FD_CLOSE : FD_WRITE|FD_CLOSE); + (err == SSL_ERROR_WANT_READ) ? + FD_READ | FD_CLOSE : FD_WRITE | FD_CLOSE); #endif goto rloop; case SSL_ERROR_SYSCALL: @@ -353,7 +353,7 @@ secure_write(Port *port, void *ptr, size_t len) if (port->ssl->state != SSL_ST_OK) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("SSL failed to send renegotiation request"))); + errmsg("SSL failed to send renegotiation request"))); port->ssl->state |= SSL_ST_ACCEPT; SSL_do_handshake(port->ssl); if (port->ssl->state != SSL_ST_OK) @@ -375,8 +375,8 @@ wloop: case SSL_ERROR_WANT_WRITE: #ifdef WIN32 pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl), - (err==SSL_ERROR_WANT_READ) ? - FD_READ|FD_CLOSE : FD_WRITE|FD_CLOSE); + (err == SSL_ERROR_WANT_READ) ? + FD_READ | FD_CLOSE : FD_WRITE | FD_CLOSE); #endif goto wloop; case SSL_ERROR_SYSCALL: @@ -439,12 +439,12 @@ wloop: static bool my_bio_initialized = false; static BIO_METHOD my_bio_methods; -static int (*std_sock_read) (BIO *h, char *buf, int size); +static int (*std_sock_read) (BIO *h, char *buf, int size); static int my_sock_read(BIO *h, char *buf, int size) { - int res; + int res; prepare_for_client_read(); @@ -472,21 +472,21 @@ my_BIO_s_socket(void) static int my_SSL_set_fd(SSL *s, int fd) { - int ret=0; - BIO *bio=NULL; + int ret = 0; + BIO *bio = NULL; - bio=BIO_new(my_BIO_s_socket()); + bio = BIO_new(my_BIO_s_socket()); if (bio == NULL) { - SSLerr(SSL_F_SSL_SET_FD,ERR_R_BUF_LIB); + SSLerr(SSL_F_SSL_SET_FD, ERR_R_BUF_LIB); goto err; } - BIO_set_fd(bio,fd,BIO_NOCLOSE); - SSL_set_bio(s,bio,bio); - ret=1; + BIO_set_fd(bio, fd, BIO_NOCLOSE); + SSL_set_bio(s, bio, bio); + ret = 1; err: - return(ret); + return (ret); } /* @@ -539,7 +539,7 @@ load_dh_file(int keylength) (codes & DH_CHECK_P_NOT_SAFE_PRIME)) { elog(LOG, - "DH error (%s): neither suitable generator or safe prime", + "DH error (%s): neither suitable generator or safe prime", fnbuf); return NULL; } @@ -640,8 +640,8 @@ tmp_dh_cb(SSL *s, int is_export, int keylength) if (r == NULL || 8 * DH_size(r) < keylength) { ereport(DEBUG2, - (errmsg_internal("DH: generating parameters (%d bits)....", - keylength))); + (errmsg_internal("DH: generating parameters (%d bits)....", + keylength))); r = DH_generate_parameters(keylength, DH_GENERATOR_2, NULL, NULL); } @@ -735,30 +735,30 @@ initialize_SSL(void) SSL_FILETYPE_PEM)) ereport(FATAL, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("could not load server certificate file \"%s\": %s", - SERVER_CERT_FILE, SSLerrmessage()))); + errmsg("could not load server certificate file \"%s\": %s", + SERVER_CERT_FILE, SSLerrmessage()))); if (stat(SERVER_PRIVATE_KEY_FILE, &buf) == -1) ereport(FATAL, (errcode_for_file_access(), - errmsg("could not access private key file \"%s\": %m", - SERVER_PRIVATE_KEY_FILE))); + errmsg("could not access private key file \"%s\": %m", + SERVER_PRIVATE_KEY_FILE))); /* * Require no public access to key file. * - * XXX temporarily suppress check when on Windows, because there may - * not be proper support for Unix-y file permissions. Need to - * think of a reasonable check to apply on Windows. (See also the - * data directory permission check in postmaster.c) + * XXX temporarily suppress check when on Windows, because there may not + * be proper support for Unix-y file permissions. Need to think of a + * reasonable check to apply on Windows. (See also the data directory + * permission check in postmaster.c) */ #if !defined(WIN32) && !defined(__CYGWIN__) if (!S_ISREG(buf.st_mode) || (buf.st_mode & (S_IRWXG | S_IRWXO)) || buf.st_uid != geteuid()) ereport(FATAL, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("unsafe permissions on private key file \"%s\"", - SERVER_PRIVATE_KEY_FILE), + errmsg("unsafe permissions on private key file \"%s\"", + SERVER_PRIVATE_KEY_FILE), errdetail("File must be owned by the database user and must have no permissions for \"group\" or \"other\"."))); #endif @@ -861,8 +861,8 @@ aloop: case SSL_ERROR_WANT_WRITE: #ifdef WIN32 pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl), - (err==SSL_ERROR_WANT_READ) ? - FD_READ|FD_CLOSE|FD_ACCEPT : FD_WRITE|FD_CLOSE); + (err == SSL_ERROR_WANT_READ) ? + FD_READ | FD_CLOSE | FD_ACCEPT : FD_WRITE | FD_CLOSE); #endif goto aloop; case SSL_ERROR_SYSCALL: @@ -873,7 +873,7 @@ aloop: else ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("could not accept SSL connection: EOF detected"))); + errmsg("could not accept SSL connection: EOF detected"))); break; case SSL_ERROR_SSL: ereport(COMMERROR, @@ -884,7 +884,7 @@ aloop: case SSL_ERROR_ZERO_RETURN: ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("could not accept SSL connection: EOF detected"))); + errmsg("could not accept SSL connection: EOF detected"))); break; default: ereport(COMMERROR, @@ -912,7 +912,7 @@ aloop: port->peer_dn, sizeof(port->peer_dn)); port->peer_dn[sizeof(port->peer_dn) - 1] = '\0'; X509_NAME_get_text_by_NID(X509_get_subject_name(port->peer), - NID_commonName, port->peer_cn, sizeof(port->peer_cn)); + NID_commonName, port->peer_cn, sizeof(port->peer_cn)); port->peer_cn[sizeof(port->peer_cn) - 1] = '\0'; } ereport(DEBUG2, diff --git a/src/backend/libpq/crypt.c b/src/backend/libpq/crypt.c index 4e91b1a36f0..b0a17aea53b 100644 --- a/src/backend/libpq/crypt.c +++ b/src/backend/libpq/crypt.c @@ -9,7 +9,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/libpq/crypt.c,v 1.65 2005/08/15 02:40:25 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/libpq/crypt.c,v 1.66 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -148,9 +148,9 @@ md5_crypt_verify(const Port *port, const char *role, char *client_pass) TimestampTz vuntil; vuntil = DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in, - CStringGetDatum(valuntil), - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(-1))); + CStringGetDatum(valuntil), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1))); if (vuntil < GetCurrentTimestamp()) retval = STATUS_ERROR; diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index f565442ac68..734a4568d2a 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -10,7 +10,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.147 2005/08/11 21:11:44 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.148 2005/10/15 02:49:17 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -80,9 +80,9 @@ static List **role_sorted = NULL; /* sorted role list, for bsearch() */ static int role_length; static void tokenize_file(const char *filename, FILE *file, - List **lines, List **line_nums); + List **lines, List **line_nums); static char *tokenize_inc_file(const char *outer_filename, - const char *inc_filename); + const char *inc_filename); /* * isblank() exists in the ISO C99 spec, but it's not very portable yet, @@ -136,8 +136,8 @@ next_token(FILE *fp, char *buf, int bufsz) } /* - * Build a token in buf of next characters up to EOF, EOL, unquoted - * comma, or unquoted whitespace. + * Build a token in buf of next characters up to EOF, EOL, unquoted comma, + * or unquoted whitespace. */ while (c != EOF && c != '\n' && (!pg_isblank(c) || in_quote == true)) @@ -158,8 +158,8 @@ next_token(FILE *fp, char *buf, int bufsz) *buf = '\0'; ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("authentication file token too long, skipping: \"%s\"", - start_buf))); + errmsg("authentication file token too long, skipping: \"%s\"", + start_buf))); /* Discard remainder of line */ while ((c = getc(fp)) != EOF && c != '\n') ; @@ -189,8 +189,8 @@ next_token(FILE *fp, char *buf, int bufsz) } /* - * Put back the char right after the token (critical in case it is - * EOL, since we need to detect end-of-line at next call). + * Put back the char right after the token (critical in case it is EOL, + * since we need to detect end-of-line at next call). */ if (c != EOF) ungetc(c, fp); @@ -370,8 +370,8 @@ tokenize_inc_file(const char *outer_filename, foreach(token, token_list) { - int oldlen = strlen(comma_str); - int needed; + int oldlen = strlen(comma_str); + int needed; needed = oldlen + strlen(lfirst(token)) + 1; if (oldlen > 0) @@ -460,7 +460,7 @@ role_bsearch_cmp(const void *role, const void *list) /* * Lookup a role name in the pg_auth file */ -List ** +List ** get_role_line(const char *role) { /* On some versions of Solaris, bsearch of zero items dumps core */ @@ -495,8 +495,8 @@ is_member(const char *user, const char *role) return true; /* - * skip over the role name, password, valuntil, examine all the - * membership entries + * skip over the role name, password, valuntil, examine all the membership + * entries */ if (list_length(*line) < 4) return false; @@ -761,9 +761,9 @@ parse_hba(List *line, int line_num, hbaPort *port, { ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("invalid IP address \"%s\" in file \"%s\" line %d: %s", - token, HbaFileName, line_num, - gai_strerror(ret)))); + errmsg("invalid IP address \"%s\" in file \"%s\" line %d: %s", + token, HbaFileName, line_num, + gai_strerror(ret)))); if (cidr_slash) *cidr_slash = '/'; if (gai_result) @@ -796,9 +796,9 @@ parse_hba(List *line, int line_num, hbaPort *port, { ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("invalid IP mask \"%s\" in file \"%s\" line %d: %s", - token, HbaFileName, line_num, - gai_strerror(ret)))); + errmsg("invalid IP mask \"%s\" in file \"%s\" line %d: %s", + token, HbaFileName, line_num, + gai_strerror(ret)))); if (gai_result) freeaddrinfo_all(hints.ai_family, gai_result); goto hba_other_error; @@ -820,9 +820,9 @@ parse_hba(List *line, int line_num, hbaPort *port, if (addr.ss_family != port->raddr.addr.ss_family) { /* - * Wrong address family. We allow only one case: if the file - * has IPv4 and the port is IPv6, promote the file address to - * IPv6 and try to match that way. + * Wrong address family. We allow only one case: if the file has + * IPv4 and the port is IPv6, promote the file address to IPv6 and + * try to match that way. */ #ifdef HAVE_IPV6 if (addr.ss_family == AF_INET && @@ -869,14 +869,14 @@ hba_syntax: if (line_item) ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("invalid entry in file \"%s\" at line %d, token \"%s\"", - HbaFileName, line_num, - (char *) lfirst(line_item)))); + errmsg("invalid entry in file \"%s\" at line %d, token \"%s\"", + HbaFileName, line_num, + (char *) lfirst(line_item)))); else ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("missing field in file \"%s\" at end of line %d", - HbaFileName, line_num))); + errmsg("missing field in file \"%s\" at end of line %d", + HbaFileName, line_num))); /* Come here if suitable message already logged */ hba_other_error: @@ -928,7 +928,7 @@ load_role(void) /* Discard any old data */ if (role_lines || role_line_nums) free_lines(&role_lines, &role_line_nums); - if (role_sorted) + if (role_sorted) pfree(role_sorted); role_sorted = NULL; role_length = 0; @@ -957,8 +957,8 @@ load_role(void) role_length = list_length(role_lines); if (role_length) { - int i = 0; - ListCell *line; + int i = 0; + ListCell *line; /* We assume the flat file was written already-sorted */ role_sorted = palloc(role_length * sizeof(List *)); @@ -1124,7 +1124,7 @@ check_ident_usermap(const char *usermap_name, { ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("cannot use Ident authentication without usermap field"))); + errmsg("cannot use Ident authentication without usermap field"))); found_entry = false; } else if (strcmp(usermap_name, "sameuser\n") == 0 || @@ -1191,12 +1191,10 @@ static bool interpret_ident_response(const char *ident_response, char *ident_user) { - const char *cursor = ident_response; /* Cursor into - * *ident_response */ + const char *cursor = ident_response; /* Cursor into *ident_response */ /* - * Ident's response, in the telnet tradition, should end in crlf - * (\r\n). + * Ident's response, in the telnet tradition, should end in crlf (\r\n). */ if (strlen(ident_response) < 2) return false; @@ -1230,9 +1228,8 @@ interpret_ident_response(const char *ident_response, else { /* - * It's a USERID response. Good. "cursor" should be - * pointing to the colon that precedes the operating - * system type. + * It's a USERID response. Good. "cursor" should be pointing + * to the colon that precedes the operating system type. */ if (*cursor != ':') return false; @@ -1280,10 +1277,9 @@ ident_inet(const SockAddr remote_addr, const SockAddr local_addr, char *ident_user) { - int sock_fd, /* File descriptor for socket on which we - * talk to Ident */ - rc; /* Return code from a locally called - * function */ + int sock_fd, /* File descriptor for socket on which we talk + * to Ident */ + rc; /* Return code from a locally called function */ bool ident_return; char remote_addr_s[NI_MAXHOST]; char remote_port[NI_MAXSERV]; @@ -1297,8 +1293,8 @@ ident_inet(const SockAddr remote_addr, hints; /* - * Might look a little weird to first convert it to text and then back - * to sockaddr, but it's protocol independent. + * Might look a little weird to first convert it to text and then back to + * sockaddr, but it's protocol independent. */ getnameinfo_all(&remote_addr.addr, remote_addr.salen, remote_addr_s, sizeof(remote_addr_s), @@ -1348,16 +1344,15 @@ ident_inet(const SockAddr remote_addr, { ereport(LOG, (errcode_for_socket_access(), - errmsg("could not create socket for Ident connection: %m"))); + errmsg("could not create socket for Ident connection: %m"))); ident_return = false; goto ident_inet_done; } /* - * Bind to the address which the client originally contacted, - * otherwise the ident server won't be able to match up the right - * connection. This is necessary if the PostgreSQL server is running - * on an IP alias. + * Bind to the address which the client originally contacted, otherwise + * the ident server won't be able to match up the right connection. This + * is necessary if the PostgreSQL server is running on an IP alias. */ rc = bind(sock_fd, la->ai_addr, la->ai_addrlen); if (rc != 0) @@ -1421,8 +1416,8 @@ ident_inet(const SockAddr remote_addr, ident_return = interpret_ident_response(ident_response, ident_user); if (!ident_return) ereport(LOG, - (errmsg("invalidly formatted response from Ident server: \"%s\"", - ident_response))); + (errmsg("invalidly formatted response from Ident server: \"%s\"", + ident_response))); ident_inet_done: if (sock_fd >= 0) @@ -1473,7 +1468,6 @@ ident_unix(int sock, char *ident_user) StrNCpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1); return true; - #elif defined(SO_PEERCRED) /* Linux style: use getsockopt(SO_PEERCRED) */ struct ucred peercred; @@ -1504,7 +1498,6 @@ ident_unix(int sock, char *ident_user) StrNCpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1); return true; - #elif defined(HAVE_STRUCT_CMSGCRED) || defined(HAVE_STRUCT_FCRED) || (defined(HAVE_STRUCT_SOCKCRED) && defined(LOCAL_CREDS)) struct msghdr msg; @@ -1543,8 +1536,8 @@ ident_unix(int sock, char *ident_user) /* * The one character which is received here is not meaningful; its - * purposes is only to make sure that recvmsg() blocks long enough for - * the other side to send its credentials. + * purposes is only to make sure that recvmsg() blocks long enough for the + * other side to send its credentials. */ iov.iov_base = &buf; iov.iov_len = 1; @@ -1574,7 +1567,6 @@ ident_unix(int sock, char *ident_user) StrNCpy(ident_user, pw->pw_name, IDENT_USERNAME_MAX + 1); return true; - #else ereport(LOG, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), diff --git a/src/backend/libpq/md5.c b/src/backend/libpq/md5.c index 3c7fcd69127..f73d38795a6 100644 --- a/src/backend/libpq/md5.c +++ b/src/backend/libpq/md5.c @@ -14,7 +14,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/libpq/md5.c,v 1.28 2005/02/23 22:46:17 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/libpq/md5.c,v 1.29 2005/10/15 02:49:18 momjian Exp $ */ @@ -329,8 +329,8 @@ EncryptMD5(const char *passwd, const char *salt, size_t salt_len, bool ret; /* - * Place salt at the end because it may be known by users trying to - * crack the MD5 output. + * Place salt at the end because it may be known by users trying to crack + * the MD5 output. */ strcpy(crypt_buf, passwd); memcpy(crypt_buf + passwd_len, salt, salt_len); diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index d0904bfc7df..ccb4bcf2b51 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -30,7 +30,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.180 2005/09/24 17:53:14 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.181 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -107,12 +107,10 @@ static char sock_path[MAXPGPATH]; #define PQ_BUFFER_SIZE 8192 static char PqSendBuffer[PQ_BUFFER_SIZE]; -static int PqSendPointer; /* Next index to store a byte in - * PqSendBuffer */ +static int PqSendPointer; /* Next index to store a byte in PqSendBuffer */ static char PqRecvBuffer[PQ_BUFFER_SIZE]; -static int PqRecvPointer; /* Next index to read a byte from - * PqRecvBuffer */ +static int PqRecvPointer; /* Next index to read a byte from PqRecvBuffer */ static int PqRecvLength; /* End of data available in PqRecvBuffer */ /* @@ -126,6 +124,7 @@ static bool DoingCopyOut; static void pq_close(int code, Datum arg); static int internal_putbytes(const char *s, size_t len); static int internal_flush(void); + #ifdef HAVE_UNIX_SOCKETS static int Lock_AF_UNIX(unsigned short portNumber, char *unixSocketName); static int Setup_AF_UNIX(void); @@ -178,11 +177,11 @@ pq_close(int code, Datum arg) secure_close(MyProcPort); /* - * Formerly we did an explicit close() here, but it seems better - * to leave the socket open until the process dies. This allows - * clients to perform a "synchronous close" if they care --- wait - * till the transport layer reports connection closure, and you - * can be sure the backend has exited. + * Formerly we did an explicit close() here, but it seems better to + * leave the socket open until the process dies. This allows clients + * to perform a "synchronous close" if they care --- wait till the + * transport layer reports connection closure, and you can be sure the + * backend has exited. * * We do set sock to -1 to prevent any further I/O, though. */ @@ -272,8 +271,8 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber, hostName, service, gai_strerror(ret)))); else ereport(LOG, - (errmsg("could not translate service \"%s\" to address: %s", - service, gai_strerror(ret)))); + (errmsg("could not translate service \"%s\" to address: %s", + service, gai_strerror(ret)))); if (addrs) freeaddrinfo_all(hint.ai_family, addrs); return STATUS_ERROR; @@ -284,8 +283,8 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber, if (!IS_AF_UNIX(family) && IS_AF_UNIX(addr->ai_family)) { /* - * Only set up a unix domain socket when they really asked for - * it. The service/port is different in that case. + * Only set up a unix domain socket when they really asked for it. + * The service/port is different in that case. */ continue; } @@ -368,9 +367,9 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber, /* * Note: This might fail on some OS's, like Linux older than - * 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and - * map ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all - * ipv4 connections. + * 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and map + * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4 + * connections. */ err = bind(fd, addr->ai_addr, addr->ai_addrlen); if (err < 0) @@ -381,12 +380,12 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber, errmsg("could not bind %s socket: %m", familyDesc), (IS_AF_UNIX(addr->ai_family)) ? - errhint("Is another postmaster already running on port %d?" - " If not, remove socket file \"%s\" and retry.", - (int) portNumber, sock_path) : - errhint("Is another postmaster already running on port %d?" - " If not, wait a few seconds and retry.", - (int) portNumber))); + errhint("Is another postmaster already running on port %d?" + " If not, remove socket file \"%s\" and retry.", + (int) portNumber, sock_path) : + errhint("Is another postmaster already running on port %d?" + " If not, wait a few seconds and retry.", + (int) portNumber))); closesocket(fd); continue; } @@ -403,10 +402,9 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber, #endif /* - * Select appropriate accept-queue length limit. PG_SOMAXCONN is - * only intended to provide a clamp on the request on platforms - * where an overly large request provokes a kernel error (are - * there any?). + * Select appropriate accept-queue length limit. PG_SOMAXCONN is only + * intended to provide a clamp on the request on platforms where an + * overly large request provokes a kernel error (are there any?). */ maxconn = MaxBackends * 2; if (maxconn > PG_SOMAXCONN) @@ -472,8 +470,8 @@ Setup_AF_UNIX(void) /* * Fix socket ownership/permission if requested. Note we must do this - * before we listen() to avoid a window where unwanted connections - * could get accepted. + * before we listen() to avoid a window where unwanted connections could + * get accepted. */ Assert(Unix_socket_group); if (Unix_socket_group[0] != '\0') @@ -596,11 +594,11 @@ StreamConnection(int server_fd, Port *port) } /* - * Also apply the current keepalive parameters. If we fail to set - * a parameter, don't error out, because these aren't universally + * Also apply the current keepalive parameters. If we fail to set a + * parameter, don't error out, because these aren't universally * supported. (Note: you might think we need to reset the GUC - * variables to 0 in such a case, but it's not necessary because - * the show hooks for these variables report the truth anyway.) + * variables to 0 in such a case, but it's not necessary because the + * show hooks for these variables report the truth anyway.) */ (void) pq_setkeepalivesidle(tcp_keepalives_idle, port); (void) pq_setkeepalivesinterval(tcp_keepalives_interval, port); @@ -642,9 +640,9 @@ TouchSocketFile(void) if (sock_path[0] != '\0') { /* - * utime() is POSIX standard, utimes() is a common alternative. If - * we have neither, there's no way to affect the mod or access - * time of the socket :-( + * utime() is POSIX standard, utimes() is a common alternative. If we + * have neither, there's no way to affect the mod or access time of + * the socket :-( * * In either path, we ignore errors; there's no point in complaining. */ @@ -705,10 +703,9 @@ pq_recvbuf(void) continue; /* Ok if interrupted */ /* - * Careful: an ereport() that tries to write to the client - * would cause recursion to here, leading to stack overflow - * and core dump! This message must go *only* to the - * postmaster log. + * Careful: an ereport() that tries to write to the client would + * cause recursion to here, leading to stack overflow and core + * dump! This message must go *only* to the postmaster log. */ ereport(COMMERROR, (errcode_for_socket_access(), @@ -718,8 +715,8 @@ pq_recvbuf(void) if (r == 0) { /* - * EOF detected. We used to write a log message here, but - * it's better to expect the ultimate caller to do that. + * EOF detected. We used to write a log message here, but it's + * better to expect the ultimate caller to do that. */ return EOF; } @@ -925,7 +922,7 @@ pq_getmessage(StringInfo s, int maxlen) if (len > 0) { /* - * Allocate space for message. If we run out of room (ridiculously + * Allocate space for message. If we run out of room (ridiculously * large message), we will elog(ERROR), but we want to discard the * message body so as not to lose communication sync. */ @@ -1044,14 +1041,13 @@ internal_flush(void) continue; /* Ok if we were interrupted */ /* - * Careful: an ereport() that tries to write to the client - * would cause recursion to here, leading to stack overflow - * and core dump! This message must go *only* to the - * postmaster log. + * Careful: an ereport() that tries to write to the client would + * cause recursion to here, leading to stack overflow and core + * dump! This message must go *only* to the postmaster log. * * If a client disconnects while we're in the midst of output, we - * might write quite a bit of data before we get to a safe - * query abort point. So, suppress duplicate log messages. + * might write quite a bit of data before we get to a safe query + * abort point. So, suppress duplicate log messages. */ if (errno != last_reported_send_errno) { @@ -1187,14 +1183,14 @@ pq_getkeepalivesidle(Port *port) if (port->default_keepalives_idle == 0) { - socklen_t size = sizeof(port->default_keepalives_idle); + socklen_t size = sizeof(port->default_keepalives_idle); if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPIDLE, (char *) &port->default_keepalives_idle, &size) < 0) { elog(LOG, "getsockopt(TCP_KEEPIDLE) failed: %m"); - port->default_keepalives_idle = -1; /* don't know */ + port->default_keepalives_idle = -1; /* don't know */ } } @@ -1219,7 +1215,7 @@ pq_setkeepalivesidle(int idle, Port *port) if (pq_getkeepalivesidle(port) < 0) { if (idle == 0) - return STATUS_OK; /* default is set but unknown */ + return STATUS_OK; /* default is set but unknown */ else return STATUS_ERROR; } @@ -1259,14 +1255,14 @@ pq_getkeepalivesinterval(Port *port) if (port->default_keepalives_interval == 0) { - socklen_t size = sizeof(port->default_keepalives_interval); + socklen_t size = sizeof(port->default_keepalives_interval); if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPINTVL, (char *) &port->default_keepalives_interval, &size) < 0) { elog(LOG, "getsockopt(TCP_KEEPINTVL) failed: %m"); - port->default_keepalives_interval = -1; /* don't know */ + port->default_keepalives_interval = -1; /* don't know */ } } @@ -1291,7 +1287,7 @@ pq_setkeepalivesinterval(int interval, Port *port) if (pq_getkeepalivesinterval(port) < 0) { if (interval == 0) - return STATUS_OK; /* default is set but unknown */ + return STATUS_OK; /* default is set but unknown */ else return STATUS_ERROR; } @@ -1331,14 +1327,14 @@ pq_getkeepalivescount(Port *port) if (port->default_keepalives_count == 0) { - socklen_t size = sizeof(port->default_keepalives_count); + socklen_t size = sizeof(port->default_keepalives_count); if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPCNT, (char *) &port->default_keepalives_count, &size) < 0) { elog(LOG, "getsockopt(TCP_KEEPCNT) failed: %m"); - port->default_keepalives_count = -1; /* don't know */ + port->default_keepalives_count = -1; /* don't know */ } } @@ -1363,7 +1359,7 @@ pq_setkeepalivescount(int count, Port *port) if (pq_getkeepalivescount(port) < 0) { if (count == 0) - return STATUS_OK; /* default is set but unknown */ + return STATUS_OK; /* default is set but unknown */ else return STATUS_ERROR; } diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c index 284427b832a..46e75c5e049 100644 --- a/src/backend/libpq/pqformat.c +++ b/src/backend/libpq/pqformat.c @@ -24,7 +24,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/libpq/pqformat.c,v 1.39 2005/09/24 17:53:14 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/libpq/pqformat.c,v 1.40 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -93,9 +93,8 @@ pq_beginmessage(StringInfo buf, char msgtype) /* * We stash the message type into the buffer's cursor field, expecting - * that the pq_sendXXX routines won't touch it. We could - * alternatively make it the first byte of the buffer contents, but - * this seems easier. + * that the pq_sendXXX routines won't touch it. We could alternatively + * make it the first byte of the buffer contents, but this seems easier. */ buf->cursor = msgtype; } @@ -664,8 +663,8 @@ pq_getmsgstring(StringInfo msg) str = &msg->data[msg->cursor]; /* - * It's safe to use strlen() here because a StringInfo is guaranteed - * to have a trailing null byte. But check we found a null inside the + * It's safe to use strlen() here because a StringInfo is guaranteed to + * have a trailing null byte. But check we found a null inside the * message. */ slen = strlen(str); diff --git a/src/backend/libpq/pqsignal.c b/src/backend/libpq/pqsignal.c index 6bc3535e83a..abf13e33fc9 100644 --- a/src/backend/libpq/pqsignal.c +++ b/src/backend/libpq/pqsignal.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/libpq/pqsignal.c,v 1.40 2005/02/14 23:02:35 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/libpq/pqsignal.c,v 1.41 2005/10/15 02:49:18 momjian Exp $ * * NOTES * This shouldn't be in libpq, but the monitor and some other @@ -50,7 +50,6 @@ sigset_t UnBlockSig, BlockSig, AuthBlockSig; - #else int UnBlockSig, BlockSig, @@ -83,9 +82,9 @@ pqinitmask(void) sigfillset(&AuthBlockSig); /* - * Unmark those signals that should never be blocked. Some of these - * signal names don't exist on all platforms. Most do, but might as - * well ifdef them all for consistency... + * Unmark those signals that should never be blocked. Some of these signal + * names don't exist on all platforms. Most do, but might as well ifdef + * them all for consistency... */ #ifdef SIGTRAP sigdelset(&BlockSig, SIGTRAP); @@ -135,7 +134,7 @@ pqinitmask(void) UnBlockSig = 0; BlockSig = sigmask(SIGQUIT) | sigmask(SIGTERM) | sigmask(SIGALRM) | - /* common signals between two */ + /* common signals between two */ sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGUSR1) | sigmask(SIGUSR2) | sigmask(SIGCHLD) | diff --git a/src/backend/main/main.c b/src/backend/main/main.c index ed1895d839c..ea1a3bef254 100644 --- a/src/backend/main/main.c +++ b/src/backend/main/main.c @@ -13,7 +13,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.95 2005/10/13 15:37:14 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.96 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -56,15 +56,15 @@ main(int argc, char *argv[]) char *pw_name_persist; /* - * Place platform-specific startup hacks here. This is the right - * place to put code that must be executed early in launch of either a - * postmaster, a standalone backend, or a standalone bootstrap run. - * Note that this code will NOT be executed when a backend or - * sub-bootstrap run is forked by the postmaster. + * Place platform-specific startup hacks here. This is the right place to + * put code that must be executed early in launch of either a postmaster, + * a standalone backend, or a standalone bootstrap run. Note that this + * code will NOT be executed when a backend or sub-bootstrap run is forked + * by the postmaster. * - * XXX The need for code here is proof that the platform in question is - * too brain-dead to provide a standard C execution environment - * without help. Avoid adding more here, if you can. + * XXX The need for code here is proof that the platform in question is too + * brain-dead to provide a standard C execution environment without help. + * Avoid adding more here, if you can. */ #if defined(__alpha) /* no __alpha__ ? */ @@ -78,12 +78,11 @@ main(int argc, char *argv[]) #endif /* - * On some platforms, unaligned memory accesses result in a kernel - * trap; the default kernel behavior is to emulate the memory - * access, but this results in a significant performance - * penalty. We ought to fix PG not to make such unaligned memory - * accesses, so this code disables the kernel emulation: unaligned - * accesses will result in SIGBUS instead. + * On some platforms, unaligned memory accesses result in a kernel trap; + * the default kernel behavior is to emulate the memory access, but this + * results in a significant performance penalty. We ought to fix PG not to + * make such unaligned memory accesses, so this code disables the kernel + * emulation: unaligned accesses will result in SIGBUS instead. */ #ifdef NOFIXADE @@ -125,31 +124,30 @@ main(int argc, char *argv[]) #endif /* - * Not-quite-so-platform-specific startup environment checks. Still - * best to minimize these. + * Not-quite-so-platform-specific startup environment checks. Still best + * to minimize these. */ /* - * Remember the physical location of the initially given argv[] array - * for possible use by ps display. On some platforms, the argv[] - * storage must be overwritten in order to set the process title for - * ps. In such cases save_ps_display_args makes and returns a new copy - * of the argv[] array. + * Remember the physical location of the initially given argv[] array for + * possible use by ps display. On some platforms, the argv[] storage must + * be overwritten in order to set the process title for ps. In such cases + * save_ps_display_args makes and returns a new copy of the argv[] array. * - * save_ps_display_args may also move the environment strings to make - * extra room. Therefore this should be done as early as possible - * during startup, to avoid entanglements with code that might save a - * getenv() result pointer. + * save_ps_display_args may also move the environment strings to make extra + * room. Therefore this should be done as early as possible during + * startup, to avoid entanglements with code that might save a getenv() + * result pointer. */ argv = save_ps_display_args(argc, argv); /* * Set up locale information from environment. Note that LC_CTYPE and * LC_COLLATE will be overridden later from pg_control if we are in an - * already-initialized database. We set them here so that they will - * be available to fill pg_control during initdb. LC_MESSAGES will - * get set later during GUC option processing, but we set it here to - * allow startup error messages to be localized. + * already-initialized database. We set them here so that they will be + * available to fill pg_control during initdb. LC_MESSAGES will get set + * later during GUC option processing, but we set it here to allow startup + * error messages to be localized. */ set_pglocale_pgservice(argv[0], "postgres"); @@ -157,11 +155,10 @@ main(int argc, char *argv[]) #ifdef WIN32 /* - * Windows uses codepages rather than the environment, so we work - * around that by querying the environment explicitly first for - * LC_COLLATE and LC_CTYPE. We have to do this because initdb passes - * those values in the environment. If there is nothing there we fall - * back on the codepage. + * Windows uses codepages rather than the environment, so we work around + * that by querying the environment explicitly first for LC_COLLATE and + * LC_CTYPE. We have to do this because initdb passes those values in the + * environment. If there is nothing there we fall back on the codepage. */ if ((env_locale = getenv("LC_COLLATE")) != NULL) @@ -183,17 +180,16 @@ main(int argc, char *argv[]) #endif /* - * We keep these set to "C" always, except transiently in pg_locale.c; - * see that file for explanations. + * We keep these set to "C" always, except transiently in pg_locale.c; see + * that file for explanations. */ setlocale(LC_MONETARY, "C"); setlocale(LC_NUMERIC, "C"); setlocale(LC_TIME, "C"); /* - * Skip permission checks if we're just trying to do --help or - * --version; otherwise root will get unhelpful failure messages from - * initdb. + * Skip permission checks if we're just trying to do --help or --version; + * otherwise root will get unhelpful failure messages from initdb. */ if (!(argc > 1 && (strcmp(argv[1], "--help") == 0 || @@ -215,19 +211,19 @@ main(int argc, char *argv[]) write_stderr("\"root\" execution of the PostgreSQL server is not permitted.\n" "The server must be started under an unprivileged user ID to prevent\n" "possible system security compromise. See the documentation for\n" - "more information on how to properly start the server.\n"); + "more information on how to properly start the server.\n"); exit(1); } #endif /* !__BEOS__ */ /* - * Also make sure that real and effective uids are the same. - * Executing Postgres as a setuid program from a root shell is a - * security hole, since on many platforms a nefarious subroutine - * could setuid back to root if real uid is root. (Since nobody - * actually uses Postgres as a setuid program, trying to actively - * fix this situation seems more trouble than it's worth; we'll - * just expend the effort to check for it.) + * Also make sure that real and effective uids are the same. Executing + * Postgres as a setuid program from a root shell is a security hole, + * since on many platforms a nefarious subroutine could setuid back to + * root if real uid is root. (Since nobody actually uses Postgres as + * a setuid program, trying to actively fix this situation seems more + * trouble than it's worth; we'll just expend the effort to check for + * it.) */ if (getuid() != geteuid()) { @@ -242,7 +238,7 @@ main(int argc, char *argv[]) "permitted.\n" "The server must be started under an unprivileged user ID to prevent\n" "possible system security compromises. See the documentation for\n" - "more information on how to properly start the server.\n"); + "more information on how to properly start the server.\n"); exit(1); } #endif /* !WIN32 */ @@ -250,9 +246,9 @@ main(int argc, char *argv[]) /* * Now dispatch to one of PostmasterMain, PostgresMain, GucInfoMain, - * SubPostmasterMain, or BootstrapMain depending on the program name - * (and possibly first argument) we were called with. The lack of - * consistency here is historical. + * SubPostmasterMain, or BootstrapMain depending on the program name (and + * possibly first argument) we were called with. The lack of consistency + * here is historical. */ if (strcmp(get_progname(argv[0]), "postmaster") == 0) { @@ -262,8 +258,8 @@ main(int argc, char *argv[]) /* * If the first argument begins with "-fork", then invoke - * SubPostmasterMain. This is used for forking postmaster child - * processes on systems where we can't simply fork. + * SubPostmasterMain. This is used for forking postmaster child processes + * on systems where we can't simply fork. */ #ifdef EXEC_BACKEND if (argc > 1 && strncmp(argv[1], "-fork", 5) == 0) @@ -271,11 +267,12 @@ main(int argc, char *argv[]) #endif #ifdef WIN32 + /* * Start our win32 signal implementation * - * SubPostmasterMain() will do this for itself, but the remaining - * modes need it here + * SubPostmasterMain() will do this for itself, but the remaining modes need + * it here */ pgwin32_signal_initialize(); #endif @@ -295,9 +292,8 @@ main(int argc, char *argv[]) exit(GucInfoMain()); /* - * Otherwise we're a standalone backend. Invoke PostgresMain, - * specifying current userid as the "authenticated" Postgres user - * name. + * Otherwise we're a standalone backend. Invoke PostgresMain, specifying + * current userid as the "authenticated" Postgres user name. */ #ifndef WIN32 pw = getpwuid(geteuid()); diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c index d74ba6189ed..916833df0dc 100644 --- a/src/backend/nodes/bitmapset.c +++ b/src/backend/nodes/bitmapset.c @@ -14,7 +14,7 @@ * Copyright (c) 2003-2005, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/bitmapset.c,v 1.9 2005/06/15 16:24:07 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/bitmapset.c,v 1.10 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -769,7 +769,7 @@ bms_first_member(Bitmapset *a) * * Note: we must ensure that any two bitmapsets that are bms_equal() will * hash to the same value; in practice this means that trailing all-zero - * words cannot affect the result. The circular-shift-and-XOR hash method + * words cannot affect the result. The circular-shift-and-XOR hash method * used here has this property, so long as we work from back to front. * * Note: you might wonder why we bother with the circular shift; at first @@ -779,7 +779,7 @@ bms_first_member(Bitmapset *a) * multiword bitmapsets is "a JOIN b JOIN c JOIN d ...", which gives rise * to rangetables in which base tables and JOIN nodes alternate; so * bitmapsets of base table RT indexes tend to use only odd-numbered or only - * even-numbered bits. A straight longitudinal XOR would preserve this + * even-numbered bits. A straight longitudinal XOR would preserve this * property, leading to a much smaller set of possible outputs than if * we include a shift. */ @@ -791,7 +791,7 @@ bms_hash_value(const Bitmapset *a) if (a == NULL || a->nwords <= 0) return 0; /* All empty sets hash to 0 */ - for (wordnum = a->nwords; --wordnum > 0; ) + for (wordnum = a->nwords; --wordnum > 0;) { result ^= a->words[wordnum]; if (result & ((bitmapword) 1 << (BITS_PER_BITMAPWORD - 1))) diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 9c21c2f977a..4a90b10b277 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -15,7 +15,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.315 2005/08/01 20:31:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.316 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -154,7 +154,7 @@ _copyAppend(Append *from) static BitmapAnd * _copyBitmapAnd(BitmapAnd *from) { - BitmapAnd *newnode = makeNode(BitmapAnd); + BitmapAnd *newnode = makeNode(BitmapAnd); /* * copy node superclass fields @@ -175,7 +175,7 @@ _copyBitmapAnd(BitmapAnd *from) static BitmapOr * _copyBitmapOr(BitmapOr *from) { - BitmapOr *newnode = makeNode(BitmapOr); + BitmapOr *newnode = makeNode(BitmapOr); /* * copy node superclass fields @@ -269,7 +269,7 @@ _copyIndexScan(IndexScan *from) static BitmapIndexScan * _copyBitmapIndexScan(BitmapIndexScan *from) { - BitmapIndexScan *newnode = makeNode(BitmapIndexScan); + BitmapIndexScan *newnode = makeNode(BitmapIndexScan); /* * copy node superclass fields @@ -294,7 +294,7 @@ _copyBitmapIndexScan(BitmapIndexScan *from) static BitmapHeapScan * _copyBitmapHeapScan(BitmapHeapScan *from) { - BitmapHeapScan *newnode = makeNode(BitmapHeapScan); + BitmapHeapScan *newnode = makeNode(BitmapHeapScan); /* * copy node superclass fields @@ -1262,8 +1262,7 @@ _copyRestrictInfo(RestrictInfo *from) COPY_SCALAR_FIELD(right_sortop); /* - * Do not copy pathkeys, since they'd not be canonical in a copied - * query + * Do not copy pathkeys, since they'd not be canonical in a copied query */ newnode->left_pathkey = NIL; newnode->right_pathkey = NIL; @@ -1791,7 +1790,7 @@ _copyFuncWithArgs(FuncWithArgs *from) static GrantRoleStmt * _copyGrantRoleStmt(GrantRoleStmt *from) { - GrantRoleStmt *newnode = makeNode(GrantRoleStmt); + GrantRoleStmt *newnode = makeNode(GrantRoleStmt); COPY_NODE_FIELD(granted_roles); COPY_NODE_FIELD(grantee_roles); @@ -2906,8 +2905,8 @@ copyObject(void *from) break; /* - * Lists of integers and OIDs don't need to be deep-copied, so - * we perform a shallow copy via list_copy() + * Lists of integers and OIDs don't need to be deep-copied, so we + * perform a shallow copy via list_copy() */ case T_IntList: case T_OidList: diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index 326eb9c62aa..9baa79dd935 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -18,7 +18,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.252 2005/08/01 20:31:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.253 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -135,8 +135,7 @@ _equalConst(Const *a, Const *b) /* * We treat all NULL constants of the same type as equal. Someday this - * might need to change? But datumIsEqual doesn't work on nulls, - * so... + * might need to change? But datumIsEqual doesn't work on nulls, so... */ if (a->constisnull) return true; @@ -202,8 +201,8 @@ _equalFuncExpr(FuncExpr *a, FuncExpr *b) COMPARE_SCALAR_FIELD(funcretset); /* - * Special-case COERCE_DONTCARE, so that planner can build coercion - * nodes that are equal() to both explicit and implicit coercions. + * Special-case COERCE_DONTCARE, so that planner can build coercion nodes + * that are equal() to both explicit and implicit coercions. */ if (a->funcformat != b->funcformat && a->funcformat != COERCE_DONTCARE && @@ -222,9 +221,9 @@ _equalOpExpr(OpExpr *a, OpExpr *b) /* * Special-case opfuncid: it is allowable for it to differ if one node - * contains zero and the other doesn't. This just means that the one - * node isn't as far along in the parse/plan pipeline and hasn't had - * the opfuncid cache filled yet. + * contains zero and the other doesn't. This just means that the one node + * isn't as far along in the parse/plan pipeline and hasn't had the + * opfuncid cache filled yet. */ if (a->opfuncid != b->opfuncid && a->opfuncid != 0 && @@ -245,9 +244,9 @@ _equalDistinctExpr(DistinctExpr *a, DistinctExpr *b) /* * Special-case opfuncid: it is allowable for it to differ if one node - * contains zero and the other doesn't. This just means that the one - * node isn't as far along in the parse/plan pipeline and hasn't had - * the opfuncid cache filled yet. + * contains zero and the other doesn't. This just means that the one node + * isn't as far along in the parse/plan pipeline and hasn't had the + * opfuncid cache filled yet. */ if (a->opfuncid != b->opfuncid && a->opfuncid != 0 && @@ -268,9 +267,9 @@ _equalScalarArrayOpExpr(ScalarArrayOpExpr *a, ScalarArrayOpExpr *b) /* * Special-case opfuncid: it is allowable for it to differ if one node - * contains zero and the other doesn't. This just means that the one - * node isn't as far along in the parse/plan pipeline and hasn't had - * the opfuncid cache filled yet. + * contains zero and the other doesn't. This just means that the one node + * isn't as far along in the parse/plan pipeline and hasn't had the + * opfuncid cache filled yet. */ if (a->opfuncid != b->opfuncid && a->opfuncid != 0 && @@ -354,8 +353,8 @@ _equalRelabelType(RelabelType *a, RelabelType *b) COMPARE_SCALAR_FIELD(resulttypmod); /* - * Special-case COERCE_DONTCARE, so that planner can build coercion - * nodes that are equal() to both explicit and implicit coercions. + * Special-case COERCE_DONTCARE, so that planner can build coercion nodes + * that are equal() to both explicit and implicit coercions. */ if (a->relabelformat != b->relabelformat && a->relabelformat != COERCE_DONTCARE && @@ -372,8 +371,8 @@ _equalConvertRowtypeExpr(ConvertRowtypeExpr *a, ConvertRowtypeExpr *b) COMPARE_SCALAR_FIELD(resulttype); /* - * Special-case COERCE_DONTCARE, so that planner can build coercion - * nodes that are equal() to both explicit and implicit coercions. + * Special-case COERCE_DONTCARE, so that planner can build coercion nodes + * that are equal() to both explicit and implicit coercions. */ if (a->convertformat != b->convertformat && a->convertformat != COERCE_DONTCARE && @@ -430,8 +429,8 @@ _equalRowExpr(RowExpr *a, RowExpr *b) COMPARE_SCALAR_FIELD(row_typeid); /* - * Special-case COERCE_DONTCARE, so that planner can build coercion - * nodes that are equal() to both explicit and implicit coercions. + * Special-case COERCE_DONTCARE, so that planner can build coercion nodes + * that are equal() to both explicit and implicit coercions. */ if (a->row_format != b->row_format && a->row_format != COERCE_DONTCARE && @@ -467,9 +466,9 @@ _equalNullIfExpr(NullIfExpr *a, NullIfExpr *b) /* * Special-case opfuncid: it is allowable for it to differ if one node - * contains zero and the other doesn't. This just means that the one - * node isn't as far along in the parse/plan pipeline and hasn't had - * the opfuncid cache filled yet. + * contains zero and the other doesn't. This just means that the one node + * isn't as far along in the parse/plan pipeline and hasn't had the + * opfuncid cache filled yet. */ if (a->opfuncid != b->opfuncid && a->opfuncid != 0 && @@ -509,8 +508,8 @@ _equalCoerceToDomain(CoerceToDomain *a, CoerceToDomain *b) COMPARE_SCALAR_FIELD(resulttypmod); /* - * Special-case COERCE_DONTCARE, so that planner can build coercion - * nodes that are equal() to both explicit and implicit coercions. + * Special-case COERCE_DONTCARE, so that planner can build coercion nodes + * that are equal() to both explicit and implicit coercions. */ if (a->coercionformat != b->coercionformat && a->coercionformat != COERCE_DONTCARE && @@ -606,8 +605,8 @@ _equalRestrictInfo(RestrictInfo *a, RestrictInfo *b) COMPARE_BITMAPSET_FIELD(required_relids); /* - * We ignore all the remaining fields, since they may not be set yet, - * and should be derivable from the clause anyway. + * We ignore all the remaining fields, since they may not be set yet, and + * should be derivable from the clause anyway. */ return true; @@ -1717,15 +1716,15 @@ _equalList(List *a, List *b) ListCell *item_b; /* - * Try to reject by simple scalar checks before grovelling through all - * the list elements... + * Try to reject by simple scalar checks before grovelling through all the + * list elements... */ COMPARE_SCALAR_FIELD(type); COMPARE_SCALAR_FIELD(length); /* - * We place the switch outside the loop for the sake of efficiency; - * this may not be worth doing... + * We place the switch outside the loop for the sake of efficiency; this + * may not be worth doing... */ switch (a->type) { diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c index 80043834b63..c775770f70f 100644 --- a/src/backend/nodes/list.c +++ b/src/backend/nodes/list.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.65 2005/07/28 20:26:19 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.66 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -50,7 +50,6 @@ check_list_invariants(List *list) Assert(list->head->next == list->tail); Assert(list->tail->next == NULL); } - #else #define check_list_invariants(l) #endif /* USE_ASSERT_CHECKING */ @@ -532,9 +531,9 @@ list_delete_cell(List *list, ListCell *cell, ListCell *prev) Assert(prev != NULL ? lnext(prev) == cell : list_head(list) == cell); /* - * If we're about to delete the last node from the list, free the - * whole list instead and return NIL, which is the only valid - * representation of a zero-length list. + * If we're about to delete the last node from the list, free the whole + * list instead and return NIL, which is the only valid representation of + * a zero-length list. */ if (list->length == 1) { @@ -543,9 +542,8 @@ list_delete_cell(List *list, ListCell *cell, ListCell *prev) } /* - * Otherwise, adjust the necessary list links, deallocate the - * particular node we have just removed, and return the list we were - * given. + * Otherwise, adjust the necessary list links, deallocate the particular + * node we have just removed, and return the list we were given. */ list->length--; @@ -951,7 +949,7 @@ list_append_unique_oid(List *list, Oid datum) * via equal(). * * This is almost the same functionality as list_union(), but list1 is - * modified in-place rather than being copied. Note also that list2's cells + * modified in-place rather than being copied. Note also that list2's cells * are not inserted in list1, so the analogy to list_concat() isn't perfect. */ List * @@ -1110,8 +1108,8 @@ list_copy(List *oldlist) newlist->length = oldlist->length; /* - * Copy over the data in the first cell; new_list() has already - * allocated the head cell itself + * Copy over the data in the first cell; new_list() has already allocated + * the head cell itself */ newlist->head->data = oldlist->head->data; @@ -1163,8 +1161,8 @@ list_copy_tail(List *oldlist, int nskip) oldlist_cur = oldlist_cur->next; /* - * Copy over the data in the first remaining cell; new_list() has - * already allocated the head cell itself + * Copy over the data in the first remaining cell; new_list() has already + * allocated the head cell itself */ newlist->head->data = oldlist_cur->data; diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c index e1e6c3da836..28202af9ee5 100644 --- a/src/backend/nodes/makefuncs.c +++ b/src/backend/nodes/makefuncs.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.47 2005/04/06 16:34:05 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.48 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -73,11 +73,10 @@ makeVar(Index varno, var->varlevelsup = varlevelsup; /* - * Since few if any routines ever create Var nodes with - * varnoold/varoattno different from varno/varattno, we don't provide - * separate arguments for them, but just initialize them to the given - * varno/varattno. This reduces code clutter and chance of error for - * most callers. + * Since few if any routines ever create Var nodes with varnoold/varoattno + * different from varno/varattno, we don't provide separate arguments for + * them, but just initialize them to the given varno/varattno. This + * reduces code clutter and chance of error for most callers. */ var->varnoold = varno; var->varoattno = varattno; @@ -102,8 +101,8 @@ makeTargetEntry(Expr *expr, tle->resname = resname; /* - * We always set these fields to 0. If the caller wants to change them - * he must do so explicitly. Few callers do that, so omitting these + * We always set these fields to 0. If the caller wants to change them he + * must do so explicitly. Few callers do that, so omitting these * arguments reduces the chance of error. */ tle->ressortgroupref = 0; diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index d6d12363883..19306b3e53d 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.260 2005/08/27 22:13:43 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.261 2005/10/15 02:49:18 momjian Exp $ * * NOTES * Every node type that can appear in stored rules' parsetrees *must* @@ -113,9 +113,9 @@ _outToken(StringInfo str, char *s) } /* - * Look for characters or patterns that are treated specially by - * read.c (either in pg_strtok() or in nodeRead()), and therefore need - * a protective backslash. + * Look for characters or patterns that are treated specially by read.c + * (either in pg_strtok() or in nodeRead()), and therefore need a + * protective backslash. */ /* These characters only need to be quoted at the start of the string */ if (*s == '<' || @@ -151,8 +151,8 @@ _outList(StringInfo str, List *node) { /* * For the sake of backward compatibility, we emit a slightly - * different whitespace format for lists of nodes vs. other types - * of lists. XXX: is this necessary? + * different whitespace format for lists of nodes vs. other types of + * lists. XXX: is this necessary? */ if (IsA(node, List)) { @@ -1444,9 +1444,9 @@ _outQuery(StringInfo str, Query *node) /* * Hack to work around missing outfuncs routines for a lot of the * utility-statement node types. (The only one we actually *need* for - * rules support is NotifyStmt.) Someday we ought to support 'em all, - * but for the meantime do this to avoid getting lots of warnings when - * running with debug_print_parse on. + * rules support is NotifyStmt.) Someday we ought to support 'em all, but + * for the meantime do this to avoid getting lots of warnings when running + * with debug_print_parse on. */ if (node->utilityStmt) { @@ -1616,8 +1616,8 @@ _outValue(StringInfo str, Value *value) case T_Float: /* - * We assume the value is a valid numeric literal and so does - * not need quoting. + * We assume the value is a valid numeric literal and so does not + * need quoting. */ appendStringInfoString(str, value->val.str); break; @@ -2099,9 +2099,8 @@ _outNode(StringInfo str, void *obj) default: /* - * This should be an ERROR, but it's too useful to be able - * to dump structures that _outNode only understands part - * of. + * This should be an ERROR, but it's too useful to be able to + * dump structures that _outNode only understands part of. */ elog(WARNING, "could not dump unrecognized node type: %d", (int) nodeTag(obj)); diff --git a/src/backend/nodes/print.c b/src/backend/nodes/print.c index 9d6511cf508..2f70355b328 100644 --- a/src/backend/nodes/print.c +++ b/src/backend/nodes/print.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.76 2005/05/01 18:56:18 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.77 2005/10/15 02:49:19 momjian Exp $ * * HISTORY * AUTHOR DATE MAJOR EVENT @@ -603,7 +603,7 @@ print_plan_recursive(Plan *p, Query *parsetree, int indentLevel, char *label) if (IsA(p, BitmapAnd)) { ListCell *l; - BitmapAnd *bitmapandplan = (BitmapAnd *) p; + BitmapAnd *bitmapandplan = (BitmapAnd *) p; foreach(l, bitmapandplan->bitmapplans) { @@ -616,7 +616,7 @@ print_plan_recursive(Plan *p, Query *parsetree, int indentLevel, char *label) if (IsA(p, BitmapOr)) { ListCell *l; - BitmapOr *bitmaporplan = (BitmapOr *) p; + BitmapOr *bitmaporplan = (BitmapOr *) p; foreach(l, bitmaporplan->bitmapplans) { diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c index df2165863d6..09175074d51 100644 --- a/src/backend/nodes/read.c +++ b/src/backend/nodes/read.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/read.c,v 1.46 2004/12/31 21:59:55 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/read.c,v 1.47 2005/10/15 02:49:19 momjian Exp $ * * HISTORY * AUTHOR DATE MAJOR EVENT @@ -41,10 +41,10 @@ stringToNode(char *str) void *retval; /* - * We save and restore the pre-existing state of pg_strtok. This makes - * the world safe for re-entrant invocation of stringToNode, without - * incurring a lot of notational overhead by having to pass the - * next-character pointer around through all the readfuncs.c code. + * We save and restore the pre-existing state of pg_strtok. This makes the + * world safe for re-entrant invocation of stringToNode, without incurring + * a lot of notational overhead by having to pass the next-character + * pointer around through all the readfuncs.c code. */ save_strtok = pg_strtok_ptr; @@ -211,13 +211,13 @@ nodeTokenType(char *token, int length) if (*numptr == '+' || *numptr == '-') numptr++, numlen--; if ((numlen > 0 && isdigit((unsigned char) *numptr)) || - (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1]))) + (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1]))) { /* * Yes. Figure out whether it is integral or float; this requires - * both a syntax check and a range check. strtol() can do both for - * us. We know the token will end at a character that strtol will - * stop at, so we do not need to modify the string. + * both a syntax check and a range check. strtol() can do both for us. + * We know the token will end at a character that strtol will stop at, + * so we do not need to modify the string. */ long val; char *endptr; @@ -386,8 +386,7 @@ nodeRead(char *token, int tok_len) case T_Integer: /* - * we know that the token terminates on a char atol will stop - * at + * we know that the token terminates on a char atol will stop at */ result = (Node *) makeInteger(atol(token)); break; diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index ff49ee21f2e..46c99834461 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/readfuncs.c,v 1.181 2005/08/01 20:31:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/readfuncs.c,v 1.182 2005/10/15 02:49:19 momjian Exp $ * * NOTES * Path and Plan nodes do not have any readfuncs support, because we @@ -389,12 +389,12 @@ _readOpExpr(void) READ_OID_FIELD(opfuncid); /* - * The opfuncid is stored in the textual format primarily for - * debugging and documentation reasons. We want to always read it as - * zero to force it to be re-looked-up in the pg_operator entry. This - * ensures that stored rules don't have hidden dependencies on - * operators' functions. (We don't currently support an ALTER OPERATOR - * command, but might someday.) + * The opfuncid is stored in the textual format primarily for debugging + * and documentation reasons. We want to always read it as zero to force + * it to be re-looked-up in the pg_operator entry. This ensures that + * stored rules don't have hidden dependencies on operators' functions. + * (We don't currently support an ALTER OPERATOR command, but might + * someday.) */ local_node->opfuncid = InvalidOid; @@ -417,12 +417,12 @@ _readDistinctExpr(void) READ_OID_FIELD(opfuncid); /* - * The opfuncid is stored in the textual format primarily for - * debugging and documentation reasons. We want to always read it as - * zero to force it to be re-looked-up in the pg_operator entry. This - * ensures that stored rules don't have hidden dependencies on - * operators' functions. (We don't currently support an ALTER OPERATOR - * command, but might someday.) + * The opfuncid is stored in the textual format primarily for debugging + * and documentation reasons. We want to always read it as zero to force + * it to be re-looked-up in the pg_operator entry. This ensures that + * stored rules don't have hidden dependencies on operators' functions. + * (We don't currently support an ALTER OPERATOR command, but might + * someday.) */ local_node->opfuncid = InvalidOid; @@ -445,12 +445,12 @@ _readScalarArrayOpExpr(void) READ_OID_FIELD(opfuncid); /* - * The opfuncid is stored in the textual format primarily for - * debugging and documentation reasons. We want to always read it as - * zero to force it to be re-looked-up in the pg_operator entry. This - * ensures that stored rules don't have hidden dependencies on - * operators' functions. (We don't currently support an ALTER OPERATOR - * command, but might someday.) + * The opfuncid is stored in the textual format primarily for debugging + * and documentation reasons. We want to always read it as zero to force + * it to be re-looked-up in the pg_operator entry. This ensures that + * stored rules don't have hidden dependencies on operators' functions. + * (We don't currently support an ALTER OPERATOR command, but might + * someday.) */ local_node->opfuncid = InvalidOid; @@ -686,12 +686,12 @@ _readNullIfExpr(void) READ_OID_FIELD(opfuncid); /* - * The opfuncid is stored in the textual format primarily for - * debugging and documentation reasons. We want to always read it as - * zero to force it to be re-looked-up in the pg_operator entry. This - * ensures that stored rules don't have hidden dependencies on - * operators' functions. (We don't currently support an ALTER OPERATOR - * command, but might someday.) + * The opfuncid is stored in the textual format primarily for debugging + * and documentation reasons. We want to always read it as zero to force + * it to be re-looked-up in the pg_operator entry. This ensures that + * stored rules don't have hidden dependencies on operators' functions. + * (We don't currently support an ALTER OPERATOR command, but might + * someday.) */ local_node->opfuncid = InvalidOid; diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index a3b5c7d6d07..bcfc7d0920c 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -23,7 +23,7 @@ * Copyright (c) 2003-2005, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/tidbitmap.c,v 1.7 2005/09/02 19:02:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/tidbitmap.c,v 1.8 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -39,7 +39,7 @@ /* * The maximum number of tuples per page is not large (typically 256 with * 8K pages, or 1024 with 32K pages). So there's not much point in making - * the per-page bitmaps variable size. We just legislate that the size + * the per-page bitmaps variable size. We just legislate that the size * is this: */ #define MAX_TUPLES_PER_PAGE MaxHeapTuplesPerPage @@ -52,10 +52,10 @@ * for that page in the page table. * * We actually store both exact pages and lossy chunks in the same hash - * table, using identical data structures. (This is because dynahash.c's + * table, using identical data structures. (This is because dynahash.c's * memory management doesn't allow space to be transferred easily from one * hashtable to another.) Therefore it's best if PAGES_PER_CHUNK is the - * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we + * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we * also want PAGES_PER_CHUNK to be a power of 2 to avoid expensive integer * remainder operations. So, define it like this: */ @@ -69,7 +69,7 @@ typedef uint32 bitmapword; /* must be an unsigned type */ #define BITNUM(x) ((x) % BITS_PER_BITMAPWORD) /* number of active words for an exact page: */ -#define WORDS_PER_PAGE ((MAX_TUPLES_PER_PAGE - 1) / BITS_PER_BITMAPWORD + 1) +#define WORDS_PER_PAGE ((MAX_TUPLES_PER_PAGE - 1) / BITS_PER_BITMAPWORD + 1) /* number of active words for a lossy chunk: */ #define WORDS_PER_CHUNK ((PAGES_PER_CHUNK - 1) / BITS_PER_BITMAPWORD + 1) @@ -85,7 +85,7 @@ typedef uint32 bitmapword; /* must be an unsigned type */ */ typedef struct PagetableEntry { - BlockNumber blockno; /* page number (hashtable key) */ + BlockNumber blockno; /* page number (hashtable key) */ bool ischunk; /* T = lossy storage, F = exact */ bitmapword words[Max(WORDS_PER_PAGE, WORDS_PER_CHUNK)]; } PagetableEntry; @@ -136,9 +136,9 @@ struct TIDBitmap /* Local function prototypes */ static void tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage); static bool tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, - const TIDBitmap *b); + const TIDBitmap *b); static const PagetableEntry *tbm_find_pageentry(const TIDBitmap *tbm, - BlockNumber pageno); + BlockNumber pageno); static PagetableEntry *tbm_get_pageentry(TIDBitmap *tbm, BlockNumber pageno); static bool tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno); static void tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno); @@ -160,8 +160,8 @@ tbm_create(long maxbytes) long nbuckets; /* - * Create the TIDBitmap struct, with enough trailing space to serve - * the needs of the TBMIterateResult sub-struct. + * Create the TIDBitmap struct, with enough trailing space to serve the + * needs of the TBMIterateResult sub-struct. */ tbm = (TIDBitmap *) palloc(sizeof(TIDBitmap) + MAX_TUPLES_PER_PAGE * sizeof(OffsetNumber)); @@ -173,17 +173,17 @@ tbm_create(long maxbytes) tbm->status = TBM_EMPTY; /* - * Estimate number of hashtable entries we can have within maxbytes. - * This estimates the hash overhead at MAXALIGN(sizeof(HASHELEMENT)) - * plus a pointer per hash entry, which is crude but good enough for - * our purpose. Also count an extra Pointer per entry for the arrays - * created during iteration readout. + * Estimate number of hashtable entries we can have within maxbytes. This + * estimates the hash overhead at MAXALIGN(sizeof(HASHELEMENT)) plus a + * pointer per hash entry, which is crude but good enough for our purpose. + * Also count an extra Pointer per entry for the arrays created during + * iteration readout. */ nbuckets = maxbytes / (MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(PagetableEntry)) + sizeof(Pointer) + sizeof(Pointer)); - nbuckets = Min(nbuckets, INT_MAX-1); /* safety limit */ - nbuckets = Max(nbuckets, 16); /* sanity limit */ + nbuckets = Min(nbuckets, INT_MAX - 1); /* safety limit */ + nbuckets = Max(nbuckets, 16); /* sanity limit */ tbm->maxentries = (int) nbuckets; return tbm; @@ -319,7 +319,7 @@ static void tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage) { PagetableEntry *apage; - int wordnum; + int wordnum; if (bpage->ischunk) { @@ -330,7 +330,7 @@ tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage) if (w != 0) { - BlockNumber pg; + BlockNumber pg; pg = bpage->blockno + (wordnum * BITS_PER_BITMAPWORD); while (w != 0) @@ -428,12 +428,12 @@ static bool tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b) { const PagetableEntry *bpage; - int wordnum; + int wordnum; if (apage->ischunk) { /* Scan each bit in chunk, try to clear */ - bool candelete = true; + bool candelete = true; for (wordnum = 0; wordnum < WORDS_PER_PAGE; wordnum++) { @@ -442,8 +442,8 @@ tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b) if (w != 0) { bitmapword neww = w; - BlockNumber pg; - int bitnum; + BlockNumber pg; + int bitnum; pg = apage->blockno + (wordnum * BITS_PER_BITMAPWORD); bitnum = 0; @@ -472,19 +472,19 @@ tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b) else if (tbm_page_is_lossy(b, apage->blockno)) { /* - * When the page is lossy in b, we have to mark it lossy in a too. - * We know that no bits need be set in bitmap a, but we do not know - * which ones should be cleared, and we have no API for "at most - * these tuples need be checked". (Perhaps it's worth adding that?) + * When the page is lossy in b, we have to mark it lossy in a too. We + * know that no bits need be set in bitmap a, but we do not know which + * ones should be cleared, and we have no API for "at most these + * tuples need be checked". (Perhaps it's worth adding that?) */ tbm_mark_page_lossy(a, apage->blockno); /* - * Note: tbm_mark_page_lossy will have removed apage from a, and - * may have inserted a new lossy chunk instead. We can continue the - * same seq_search scan at the caller level, because it does not - * matter whether we visit such a new chunk or not: it will have - * only the bit for apage->blockno set, which is correct. + * Note: tbm_mark_page_lossy will have removed apage from a, and may + * have inserted a new lossy chunk instead. We can continue the same + * seq_search scan at the caller level, because it does not matter + * whether we visit such a new chunk or not: it will have only the bit + * for apage->blockno set, which is correct. * * We must return false here since apage was already deleted. */ @@ -492,7 +492,7 @@ tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b) } else { - bool candelete = true; + bool candelete = true; bpage = tbm_find_pageentry(b, apage->blockno); if (bpage != NULL) @@ -535,17 +535,20 @@ tbm_begin_iterate(TIDBitmap *tbm) int nchunks; tbm->iterating = true; + /* * Reset iteration pointers. */ tbm->spageptr = 0; tbm->schunkptr = 0; tbm->schunkbit = 0; + /* * Nothing else to do if no entries, nor if we don't have a hashtable. */ if (tbm->nentries == 0 || tbm->status != TBM_HASH) return; + /* * Create and fill the sorted page lists if we didn't already. */ @@ -591,6 +594,7 @@ tbm_iterate(TIDBitmap *tbm) TBMIterateResult *output = &(tbm->output); Assert(tbm->iterating); + /* * If lossy chunk pages remain, make sure we've advanced schunkptr/ * schunkbit to the next set bit. @@ -598,12 +602,12 @@ tbm_iterate(TIDBitmap *tbm) while (tbm->schunkptr < tbm->nchunks) { PagetableEntry *chunk = tbm->schunks[tbm->schunkptr]; - int schunkbit = tbm->schunkbit; + int schunkbit = tbm->schunkbit; while (schunkbit < PAGES_PER_CHUNK) { - int wordnum = WORDNUM(schunkbit); - int bitnum = BITNUM(schunkbit); + int wordnum = WORDNUM(schunkbit); + int bitnum = BITNUM(schunkbit); if ((chunk->words[wordnum] & ((bitmapword) 1 << bitnum)) != 0) break; @@ -618,6 +622,7 @@ tbm_iterate(TIDBitmap *tbm) tbm->schunkptr++; tbm->schunkbit = 0; } + /* * If both chunk and per-page data remain, must output the numerically * earlier page. @@ -717,7 +722,7 @@ tbm_find_pageentry(const TIDBitmap *tbm, BlockNumber pageno) * * If new, the entry is marked as an exact (non-chunk) entry. * - * This may cause the table to exceed the desired memory size. It is + * This may cause the table to exceed the desired memory size. It is * up to the caller to call tbm_lossify() at the next safe point if so. */ static PagetableEntry * @@ -785,8 +790,8 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno) HASH_FIND, NULL); if (page != NULL && page->ischunk) { - int wordnum = WORDNUM(bitno); - int bitnum = BITNUM(bitno); + int wordnum = WORDNUM(bitno); + int bitnum = BITNUM(bitno); if ((page->words[wordnum] & ((bitmapword) 1 << bitnum)) != 0) return true; @@ -797,7 +802,7 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno) /* * tbm_mark_page_lossy - mark the page number as lossily stored * - * This may cause the table to exceed the desired memory size. It is + * This may cause the table to exceed the desired memory size. It is * up to the caller to call tbm_lossify() at the next safe point if so. */ static void @@ -818,9 +823,8 @@ tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno) chunk_pageno = pageno - bitno; /* - * Remove any extant non-lossy entry for the page. If the page is - * its own chunk header, however, we skip this and handle the case - * below. + * Remove any extant non-lossy entry for the page. If the page is its own + * chunk header, however, we skip this and handle the case below. */ if (bitno != 0) { @@ -879,10 +883,9 @@ tbm_lossify(TIDBitmap *tbm) /* * XXX Really stupid implementation: this just lossifies pages in - * essentially random order. We should be paying some attention - * to the number of bits set in each page, instead. Also it might - * be a good idea to lossify more than the minimum number of pages - * during each call. + * essentially random order. We should be paying some attention to the + * number of bits set in each page, instead. Also it might be a good idea + * to lossify more than the minimum number of pages during each call. */ Assert(!tbm->iterating); Assert(tbm->status == TBM_HASH); @@ -892,9 +895,10 @@ tbm_lossify(TIDBitmap *tbm) { if (page->ischunk) continue; /* already a chunk header */ + /* - * If the page would become a chunk header, we won't save anything - * by converting it to lossy, so skip it. + * If the page would become a chunk header, we won't save anything by + * converting it to lossy, so skip it. */ if ((page->blockno % PAGES_PER_CHUNK) == 0) continue; @@ -906,9 +910,9 @@ tbm_lossify(TIDBitmap *tbm) return; /* we have done enough */ /* - * Note: tbm_mark_page_lossy may have inserted a lossy chunk into - * the hashtable. We can continue the same seq_search scan since - * we do not care whether we visit lossy chunks or not. + * Note: tbm_mark_page_lossy may have inserted a lossy chunk into the + * hashtable. We can continue the same seq_search scan since we do + * not care whether we visit lossy chunks or not. */ } } diff --git a/src/backend/optimizer/geqo/geqo_erx.c b/src/backend/optimizer/geqo/geqo_erx.c index 05d7602fefe..9c7a3425858 100644 --- a/src/backend/optimizer/geqo/geqo_erx.c +++ b/src/backend/optimizer/geqo/geqo_erx.c @@ -3,7 +3,7 @@ * geqo_erx.c * edge recombination crossover [ER] * -* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_erx.c,v 1.19 2003/11/29 22:39:49 pgsql Exp $ +* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_erx.c,v 1.20 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -55,8 +55,8 @@ alloc_edge_table(int num_gene) Edge *edge_table; /* - * palloc one extra location so that nodes numbered 1..n can be - * indexed directly; 0 will not be used + * palloc one extra location so that nodes numbered 1..n can be indexed + * directly; 0 will not be used */ edge_table = (Edge *) palloc((num_gene + 1) * sizeof(Edge)); @@ -94,8 +94,7 @@ gimme_edge_table(Gene *tour1, Gene *tour2, int num_gene, Edge *edge_table) int i, index1, index2; - int edge_total; /* total number of unique edges in two - * genes */ + int edge_total; /* total number of unique edges in two genes */ /* at first clear the edge table's old data */ for (i = 1; i <= num_gene; i++) @@ -111,15 +110,15 @@ gimme_edge_table(Gene *tour1, Gene *tour2, int num_gene, Edge *edge_table) for (index1 = 0; index1 < num_gene; index1++) { /* - * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this - * operaton maps n back to 1 + * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operaton + * maps n back to 1 */ index2 = (index1 + 1) % num_gene; /* - * edges are bidirectional, i.e. 1->2 is same as 2->1 call - * gimme_edge twice per edge + * edges are bidirectional, i.e. 1->2 is same as 2->1 call gimme_edge + * twice per edge */ edge_total += gimme_edge(tour1[index1], tour1[index2], edge_table); @@ -320,10 +319,10 @@ gimme_gene(Edge edge, Edge *edge_table) */ /* - * The test for minimum_count can probably be removed at some - * point but comments should probably indicate exactly why it is - * guaranteed that the test will always succeed the first time - * around. If it can fail then the code is in error + * The test for minimum_count can probably be removed at some point + * but comments should probably indicate exactly why it is guaranteed + * that the test will always succeed the first time around. If it can + * fail then the code is in error */ @@ -379,8 +378,8 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene) /* - * how many edges remain? how many gene with four total (initial) - * edges remain? + * how many edges remain? how many gene with four total (initial) edges + * remain? */ for (i = 1; i <= num_gene; i++) @@ -395,8 +394,8 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene) } /* - * random decision of the gene with remaining edges and whose - * total_edges == 4 + * random decision of the gene with remaining edges and whose total_edges + * == 4 */ if (four_count != 0) @@ -444,15 +443,15 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene) } /* - * edge table seems to be empty; this happens sometimes on the last - * point due to the fact that the first point is removed from the - * table even though only one of its edges has been determined + * edge table seems to be empty; this happens sometimes on the last point + * due to the fact that the first point is removed from the table even + * though only one of its edges has been determined */ else - { /* occurs only at the last point in the - * tour; simply look for the point which - * is not yet used */ + { /* occurs only at the last point in the tour; + * simply look for the point which is not yet + * used */ for (i = 1; i <= num_gene; i++) if (edge_table[i].unused_edges >= 0) diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c index d1bb3059fc0..0a2dee08dc8 100644 --- a/src/backend/optimizer/geqo/geqo_eval.c +++ b/src/backend/optimizer/geqo/geqo_eval.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.76 2005/06/09 04:18:59 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.77 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -52,15 +52,15 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata) struct HTAB *savehash; /* - * Because gimme_tree considers both left- and right-sided trees, - * there is no difference between a tour (a,b,c,d,...) and a tour - * (b,a,c,d,...) --- the same join orders will be considered. To avoid - * redundant cost calculations, we simply reject tours where tour[0] > - * tour[1], assigning them an artificially bad fitness. + * Because gimme_tree considers both left- and right-sided trees, there is + * no difference between a tour (a,b,c,d,...) and a tour (b,a,c,d,...) --- + * the same join orders will be considered. To avoid redundant cost + * calculations, we simply reject tours where tour[0] > tour[1], assigning + * them an artificially bad fitness. * * init_tour() is aware of this rule and so we should never reject a tour - * during the initial filling of the pool. It seems difficult to - * persuade the recombination logic never to break the rule, however. + * during the initial filling of the pool. It seems difficult to persuade + * the recombination logic never to break the rule, however. */ if (num_gene >= 2 && tour[0] > tour[1]) return DBL_MAX; @@ -69,10 +69,10 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata) * Create a private memory context that will hold all temp storage * allocated inside gimme_tree(). * - * Since geqo_eval() will be called many times, we can't afford to let - * all that memory go unreclaimed until end of statement. Note we - * make the temp context a child of the planner's normal context, so - * that it will be freed even if we abort via ereport(ERROR). + * Since geqo_eval() will be called many times, we can't afford to let all + * that memory go unreclaimed until end of statement. Note we make the + * temp context a child of the planner's normal context, so that it will + * be freed even if we abort via ereport(ERROR). */ mycontext = AllocSetContextCreate(CurrentMemoryContext, "GEQO", @@ -84,15 +84,15 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata) /* * gimme_tree will add entries to root->join_rel_list, which may or may * not already contain some entries. The newly added entries will be - * recycled by the MemoryContextDelete below, so we must ensure that - * the list is restored to its former state before exiting. We can - * do this by truncating the list to its original length. NOTE this - * assumes that any added entries are appended at the end! + * recycled by the MemoryContextDelete below, so we must ensure that the + * list is restored to its former state before exiting. We can do this by + * truncating the list to its original length. NOTE this assumes that any + * added entries are appended at the end! * - * We also must take care not to mess up the outer join_rel_hash, - * if there is one. We can do this by just temporarily setting the - * link to NULL. (If we are dealing with enough join rels, which we - * very likely are, a new hash table will get built and used locally.) + * We also must take care not to mess up the outer join_rel_hash, if there is + * one. We can do this by just temporarily setting the link to NULL. (If + * we are dealing with enough join rels, which we very likely are, a new + * hash table will get built and used locally.) */ savelength = list_length(evaldata->root->join_rel_list); savehash = evaldata->root->join_rel_hash; @@ -170,23 +170,22 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata) * Push each relation onto the stack in the specified order. After * pushing each relation, see whether the top two stack entries are * joinable according to the desirable_join() heuristics. If so, join - * them into one stack entry, and try again to combine with the next - * stack entry down (if any). When the stack top is no longer - * joinable, continue to the next input relation. After we have - * pushed the last input relation, the heuristics are disabled and we - * force joining all the remaining stack entries. + * them into one stack entry, and try again to combine with the next stack + * entry down (if any). When the stack top is no longer joinable, + * continue to the next input relation. After we have pushed the last + * input relation, the heuristics are disabled and we force joining all + * the remaining stack entries. * * If desirable_join() always returns true, this produces a straight - * left-to-right join just like the old code. Otherwise we may - * produce a bushy plan or a left/right-sided plan that really - * corresponds to some tour other than the one given. To the extent - * that the heuristics are helpful, however, this will be a better - * plan than the raw tour. + * left-to-right join just like the old code. Otherwise we may produce a + * bushy plan or a left/right-sided plan that really corresponds to some + * tour other than the one given. To the extent that the heuristics are + * helpful, however, this will be a better plan than the raw tour. * - * Also, when a join attempt fails (because of IN-clause constraints), we - * may be able to recover and produce a workable plan, where the old - * code just had to give up. This case acts the same as a false - * result from desirable_join(). + * Also, when a join attempt fails (because of IN-clause constraints), we may + * be able to recover and produce a workable plan, where the old code just + * had to give up. This case acts the same as a false result from + * desirable_join(). */ for (rel_count = 0; rel_count < num_gene; rel_count++) { @@ -199,8 +198,8 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata) stack_depth++; /* - * While it's feasible, pop the top two stack entries and replace - * with their join. + * While it's feasible, pop the top two stack entries and replace with + * their join. */ while (stack_depth >= 2) { @@ -208,20 +207,18 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata) RelOptInfo *inner_rel = stack[stack_depth - 1]; /* - * Don't pop if heuristics say not to join now. However, once - * we have exhausted the input, the heuristics can't prevent - * popping. + * Don't pop if heuristics say not to join now. However, once we + * have exhausted the input, the heuristics can't prevent popping. */ if (rel_count < num_gene - 1 && !desirable_join(evaldata->root, outer_rel, inner_rel)) break; /* - * Construct a RelOptInfo representing the join of these two - * input relations. These are always inner joins. Note that - * we expect the joinrel not to exist in root->join_rel_list - * yet, and so the paths constructed for it will only include - * the ones we want. + * Construct a RelOptInfo representing the join of these two input + * relations. These are always inner joins. Note that we expect + * the joinrel not to exist in root->join_rel_list yet, and so the + * paths constructed for it will only include the ones we want. */ joinrel = make_join_rel(evaldata->root, outer_rel, inner_rel, JOIN_INNER); @@ -266,9 +263,9 @@ desirable_join(PlannerInfo *root, return true; /* - * Join if the rels are members of the same IN sub-select. This is - * needed to improve the odds that we will find a valid solution in a - * case where an IN sub-select has a clauseless join. + * Join if the rels are members of the same IN sub-select. This is needed + * to improve the odds that we will find a valid solution in a case where + * an IN sub-select has a clauseless join. */ foreach(l, root->in_info_list) { diff --git a/src/backend/optimizer/geqo/geqo_main.c b/src/backend/optimizer/geqo/geqo_main.c index c027f4370c3..d7618c5d67d 100644 --- a/src/backend/optimizer/geqo/geqo_main.c +++ b/src/backend/optimizer/geqo/geqo_main.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.50 2005/06/08 23:02:04 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.51 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -106,10 +106,9 @@ geqo(PlannerInfo *root, int number_of_rels, List *initial_rels) random_init_pool(pool, &evaldata); /* sort the pool according to cheapest path as fitness */ - sort_pool(pool); /* we have to do it only one time, since - * all kids replace the worst individuals - * in future (-> geqo_pool.c:spread_chromo - * ) */ + sort_pool(pool); /* we have to do it only one time, since all + * kids replace the worst individuals in + * future (-> geqo_pool.c:spread_chromo ) */ #ifdef GEQO_DEBUG elog(DEBUG1, "GEQO selected %d pool entries, best %.2f, worst %.2f", diff --git a/src/backend/optimizer/geqo/geqo_misc.c b/src/backend/optimizer/geqo/geqo_misc.c index 5afdcd7b8f5..ff5bd07e6ad 100644 --- a/src/backend/optimizer/geqo/geqo_misc.c +++ b/src/backend/optimizer/geqo/geqo_misc.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.42 2004/12/31 21:59:58 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.43 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -41,10 +41,10 @@ avg_pool(Pool *pool) elog(ERROR, "pool_size is zero"); /* - * Since the pool may contain multiple occurrences of DBL_MAX, divide - * by pool->size before summing, not after, to avoid overflow. This - * loses a little in speed and accuracy, but this routine is only used - * for debug printouts, so we don't care that much. + * Since the pool may contain multiple occurrences of DBL_MAX, divide by + * pool->size before summing, not after, to avoid overflow. This loses a + * little in speed and accuracy, but this routine is only used for debug + * printouts, so we don't care that much. */ for (i = 0; i < pool->size; i++) cumulative += pool->data[i].worth / pool->size; diff --git a/src/backend/optimizer/geqo/geqo_pool.c b/src/backend/optimizer/geqo/geqo_pool.c index f6881c0f5ff..83927facae5 100644 --- a/src/backend/optimizer/geqo/geqo_pool.c +++ b/src/backend/optimizer/geqo/geqo_pool.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.26 2004/12/31 21:59:58 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.27 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -96,13 +96,12 @@ random_init_pool(Pool *pool, GeqoEvalData *evaldata) int bad = 0; /* - * We immediately discard any invalid individuals (those that - * geqo_eval returns DBL_MAX for), thereby not wasting pool space on - * them. + * We immediately discard any invalid individuals (those that geqo_eval + * returns DBL_MAX for), thereby not wasting pool space on them. * - * If we fail to make any valid individuals after 10000 tries, give up; - * this probably means something is broken, and we shouldn't just let - * ourselves get stuck in an infinite loop. + * If we fail to make any valid individuals after 10000 tries, give up; this + * probably means something is broken, and we shouldn't just let ourselves + * get stuck in an infinite loop. */ i = 0; while (i < pool->size) @@ -223,8 +222,8 @@ spread_chromo(Chromosome *chromo, Pool *pool) /* - * these 2 cases move the search indices since a new location has - * not yet been found. + * these 2 cases move the search indices since a new location has not + * yet been found. */ else if (chromo->worth < pool->data[mid].worth) @@ -242,8 +241,7 @@ spread_chromo(Chromosome *chromo, Pool *pool) /* now we have index for chromo */ /* - * move every gene from index on down one position to make room for - * chromo + * move every gene from index on down one position to make room for chromo */ /* diff --git a/src/backend/optimizer/geqo/geqo_recombination.c b/src/backend/optimizer/geqo/geqo_recombination.c index d2ebee17653..c73e5b2a79e 100644 --- a/src/backend/optimizer/geqo/geqo_recombination.c +++ b/src/backend/optimizer/geqo/geqo_recombination.c @@ -3,7 +3,7 @@ * geqo_recombination.c * misc recombination procedures * -* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.14 2004/08/29 05:06:43 momjian Exp $ +* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.15 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,8 +62,8 @@ init_tour(Gene *tour, int num_gene) } /* - * Since geqo_eval() will reject tours where tour[0] > tour[1], we may - * as well switch the two to make it a valid tour. + * Since geqo_eval() will reject tours where tour[0] > tour[1], we may as + * well switch the two to make it a valid tour. */ if (num_gene >= 2 && tour[0] > tour[1]) { @@ -86,8 +86,8 @@ alloc_city_table(int num_gene) City *city_table; /* - * palloc one extra location so that nodes numbered 1..n can be - * indexed directly; 0 will not be used + * palloc one extra location so that nodes numbered 1..n can be indexed + * directly; 0 will not be used */ city_table = (City *) palloc((num_gene + 1) * sizeof(City)); diff --git a/src/backend/optimizer/geqo/geqo_selection.c b/src/backend/optimizer/geqo/geqo_selection.c index 92b735cb282..32a3e83ae03 100644 --- a/src/backend/optimizer/geqo/geqo_selection.c +++ b/src/backend/optimizer/geqo/geqo_selection.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_selection.c,v 1.19 2005/06/14 14:21:16 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_selection.c,v 1.20 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -86,13 +86,14 @@ linear(int pool_size, double bias) /* bias is y-intercept of linear /* * If geqo_rand() returns exactly 1.0 then we will get exactly max from - * this equation, whereas we need 0 <= index < max. Also it seems possible - * that roundoff error might deliver values slightly outside the range; - * in particular avoid passing a value slightly less than 0 to sqrt(). - * If we get a bad value just try again. + * this equation, whereas we need 0 <= index < max. Also it seems + * possible that roundoff error might deliver values slightly outside the + * range; in particular avoid passing a value slightly less than 0 to + * sqrt(). If we get a bad value just try again. */ - do { - double sqrtval; + do + { + double sqrtval; sqrtval = (bias * bias) - 4.0 * (bias - 1.0) * geqo_rand(); if (sqrtval > 0.0) diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index aa14deacd0c..d8a42b82548 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.136 2005/08/22 17:34:58 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.137 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,7 +62,7 @@ static void compare_tlist_datatypes(List *tlist, List *colTypes, static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual, bool *differentTypes); static void subquery_push_qual(Query *subquery, - RangeTblEntry *rte, Index rti, Node *qual); + RangeTblEntry *rte, Index rti, Node *qual); static void recurse_push_qual(Node *setOp, Query *topquery, RangeTblEntry *rte, Index rti, Node *qual); @@ -105,7 +105,7 @@ make_one_rel(PlannerInfo *root) if (brel == NULL) continue; - Assert(brel->relid == rti); /* sanity check on array */ + Assert(brel->relid == rti); /* sanity check on array */ /* ignore RTEs that are "other rels" */ if (brel->reloptkind != RELOPT_BASEREL) @@ -134,9 +134,9 @@ set_base_rel_pathlists(PlannerInfo *root) Index rti; /* - * Note: because we call expand_inherited_rtentry inside the loop, - * it's quite possible for the base_rel_array to be enlarged while - * the loop runs. Hence don't try to optimize the loop. + * Note: because we call expand_inherited_rtentry inside the loop, it's + * quite possible for the base_rel_array to be enlarged while the loop + * runs. Hence don't try to optimize the loop. */ for (rti = 1; rti < root->base_rel_array_size; rti++) { @@ -255,8 +255,8 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, ListCell *il; /* - * XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE; - * can we do better? + * XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE; can + * we do better? */ if (list_member_int(root->parse->rowMarks, parentRTindex)) ereport(ERROR, @@ -270,8 +270,8 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, rel->width = 0; /* - * Generate access paths for each table in the tree (parent AND - * children), and pick the cheapest path for each table. + * Generate access paths for each table in the tree (parent AND children), + * and pick the cheapest path for each table. */ foreach(il, inheritlist) { @@ -286,18 +286,17 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, childOID = childrte->relid; /* - * Make a RelOptInfo for the child so we can do planning. - * Mark it as an "other rel" since it will not be part of the - * main join tree. + * Make a RelOptInfo for the child so we can do planning. Mark it as + * an "other rel" since it will not be part of the main join tree. */ childrel = build_other_rel(root, childRTindex); /* - * Copy the parent's targetlist and restriction quals to the - * child, with attribute-number adjustment as needed. We don't - * bother to copy the join quals, since we can't do any joining of - * the individual tables. Also, we just zap attr_needed rather - * than trying to adjust it; it won't be looked at in the child. + * Copy the parent's targetlist and restriction quals to the child, + * with attribute-number adjustment as needed. We don't bother to + * copy the join quals, since we can't do any joining of the + * individual tables. Also, we just zap attr_needed rather than + * trying to adjust it; it won't be looked at in the child. */ childrel->reltargetlist = (List *) adjust_inherited_attrs((Node *) rel->reltargetlist, @@ -320,13 +319,14 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, */ if (constraint_exclusion) { - List *constraint_pred; + List *constraint_pred; constraint_pred = get_relation_constraints(childOID, childrel); + /* - * We do not currently enforce that CHECK constraints contain - * only immutable functions, so it's necessary to check here. - * We daren't draw conclusions from plan-time evaluation of + * We do not currently enforce that CHECK constraints contain only + * immutable functions, so it's necessary to check here. We + * daren't draw conclusions from plan-time evaluation of * non-immutable functions. */ if (!contain_mutable_functions((Node *) constraint_pred)) @@ -351,9 +351,9 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, subpaths = lappend(subpaths, childrel->cheapest_total_path); /* - * Propagate size information from the child back to the parent. - * For simplicity, we use the largest widths from any child as the - * parent estimates. + * Propagate size information from the child back to the parent. For + * simplicity, we use the largest widths from any child as the parent + * estimates. */ rel->rows += childrel->rows; if (childrel->width > rel->width) @@ -377,9 +377,9 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, } /* - * Finally, build Append path and install it as the only access path - * for the parent rel. (Note: this is correct even if we have zero - * or one live subpath due to constraint exclusion.) + * Finally, build Append path and install it as the only access path for + * the parent rel. (Note: this is correct even if we have zero or one + * live subpath due to constraint exclusion.) */ add_path(rel, (Path *) create_append_path(rel, subpaths)); @@ -430,18 +430,18 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, /* * If there are any restriction clauses that have been attached to the - * subquery relation, consider pushing them down to become WHERE or - * HAVING quals of the subquery itself. This transformation is useful - * because it may allow us to generate a better plan for the subquery - * than evaluating all the subquery output rows and then filtering them. + * subquery relation, consider pushing them down to become WHERE or HAVING + * quals of the subquery itself. This transformation is useful because it + * may allow us to generate a better plan for the subquery than evaluating + * all the subquery output rows and then filtering them. * - * There are several cases where we cannot push down clauses. - * Restrictions involving the subquery are checked by - * subquery_is_pushdown_safe(). Restrictions on individual clauses - * are checked by qual_is_pushdown_safe(). + * There are several cases where we cannot push down clauses. Restrictions + * involving the subquery are checked by subquery_is_pushdown_safe(). + * Restrictions on individual clauses are checked by + * qual_is_pushdown_safe(). * - * Non-pushed-down clauses will get evaluated as qpquals of the - * SubqueryScan node. + * Non-pushed-down clauses will get evaluated as qpquals of the SubqueryScan + * node. * * XXX Are there any cases where we want to make a policy decision not to * push down a pushable qual, because it'd result in a worse plan? @@ -475,10 +475,10 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, pfree(differentTypes); /* - * We can safely pass the outer tuple_fraction down to the subquery - * if the outer level has no joining, aggregation, or sorting to do. - * Otherwise we'd better tell the subquery to plan for full retrieval. - * (XXX This could probably be made more intelligent ...) + * We can safely pass the outer tuple_fraction down to the subquery if the + * outer level has no joining, aggregation, or sorting to do. Otherwise + * we'd better tell the subquery to plan for full retrieval. (XXX This + * could probably be made more intelligent ...) */ if (parse->hasAggs || parse->groupClause || @@ -540,8 +540,8 @@ make_fromexpr_rel(PlannerInfo *root, FromExpr *from) /* * Count the number of child jointree nodes. This is the depth of the - * dynamic-programming algorithm we must employ to consider all ways - * of joining the child nodes. + * dynamic-programming algorithm we must employ to consider all ways of + * joining the child nodes. */ levels_needed = list_length(from->fromlist); @@ -603,11 +603,11 @@ make_one_rel_by_joins(PlannerInfo *root, int levels_needed, List *initial_rels) RelOptInfo *rel; /* - * We employ a simple "dynamic programming" algorithm: we first find - * all ways to build joins of two jointree items, then all ways to - * build joins of three items (from two-item joins and single items), - * then four-item joins, and so on until we have considered all ways - * to join all the items into one rel. + * We employ a simple "dynamic programming" algorithm: we first find all + * ways to build joins of two jointree items, then all ways to build joins + * of three items (from two-item joins and single items), then four-item + * joins, and so on until we have considered all ways to join all the + * items into one rel. * * joinitems[j] is a list of all the j-item rels. Initially we set * joinitems[1] to represent all the single-jointree-item relations. @@ -823,8 +823,8 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual, return false; /* - * Examine all Vars used in clause; since it's a restriction clause, - * all such Vars must refer to subselect output columns. + * Examine all Vars used in clause; since it's a restriction clause, all + * such Vars must refer to subselect output columns. */ vars = pull_var_clause(qual, false); foreach(vl, vars) @@ -835,9 +835,9 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual, Assert(var->varno == rti); /* - * We use a bitmapset to avoid testing the same attno more than - * once. (NB: this only works because subquery outputs can't have - * negative attnos.) + * We use a bitmapset to avoid testing the same attno more than once. + * (NB: this only works because subquery outputs can't have negative + * attnos.) */ if (bms_is_member(var->varattno, tested)) continue; @@ -893,11 +893,10 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual) else { /* - * We need to replace Vars in the qual (which must refer to - * outputs of the subquery) with copies of the subquery's - * targetlist expressions. Note that at this point, any uplevel - * Vars in the qual should have been replaced with Params, so they - * need no work. + * We need to replace Vars in the qual (which must refer to outputs of + * the subquery) with copies of the subquery's targetlist expressions. + * Note that at this point, any uplevel Vars in the qual should have + * been replaced with Params, so they need no work. * * This step also ensures that when we are pushing into a setop tree, * each component query gets its own copy of the qual. @@ -907,9 +906,9 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual) CMD_SELECT, 0); /* - * Now attach the qual to the proper place: normally WHERE, but - * if the subquery uses grouping or aggregation, put it in HAVING - * (since the qual really refers to the group-result rows). + * Now attach the qual to the proper place: normally WHERE, but if the + * subquery uses grouping or aggregation, put it in HAVING (since the + * qual really refers to the group-result rows). */ if (subquery->hasAggs || subquery->groupClause || subquery->havingQual) subquery->havingQual = make_and_qual(subquery->havingQual, qual); @@ -919,8 +918,8 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual) /* * We need not change the subquery's hasAggs or hasSublinks flags, - * since we can't be pushing down any aggregates that weren't - * there before, and we don't push down subselects at all. + * since we can't be pushing down any aggregates that weren't there + * before, and we don't push down subselects at all. */ } } diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c index aad977164a7..9a4990898e9 100644 --- a/src/backend/optimizer/path/clausesel.c +++ b/src/backend/optimizer/path/clausesel.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.74 2005/10/11 16:44:40 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.75 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -82,7 +82,7 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause, * hisel + losel + null_frac - 1.) * * If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation - * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation + * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation * yields an impossible (negative) result. * * A free side-effect is that we can recognize redundant inequalities such @@ -102,9 +102,9 @@ clauselist_selectivity(PlannerInfo *root, ListCell *l; /* - * Initial scan over clauses. Anything that doesn't look like a - * potential rangequery clause gets multiplied into s1 and forgotten. - * Anything that does gets inserted into an rqlist entry. + * Initial scan over clauses. Anything that doesn't look like a potential + * rangequery clause gets multiplied into s1 and forgotten. Anything that + * does gets inserted into an rqlist entry. */ foreach(l, clauses) { @@ -127,10 +127,10 @@ clauselist_selectivity(PlannerInfo *root, rinfo = NULL; /* - * See if it looks like a restriction clause with a pseudoconstant - * on one side. (Anything more complicated than that might not - * behave in the simple way we are expecting.) Most of the tests - * here can be done more efficiently with rinfo than without. + * See if it looks like a restriction clause with a pseudoconstant on + * one side. (Anything more complicated than that might not behave in + * the simple way we are expecting.) Most of the tests here can be + * done more efficiently with rinfo than without. */ if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2) { @@ -142,10 +142,10 @@ clauselist_selectivity(PlannerInfo *root, { ok = (bms_membership(rinfo->clause_relids) == BMS_SINGLETON) && (is_pseudo_constant_clause_relids(lsecond(expr->args), - rinfo->right_relids) || + rinfo->right_relids) || (varonleft = false, - is_pseudo_constant_clause_relids(linitial(expr->args), - rinfo->left_relids))); + is_pseudo_constant_clause_relids(linitial(expr->args), + rinfo->left_relids))); } else { @@ -159,8 +159,8 @@ clauselist_selectivity(PlannerInfo *root, { /* * If it's not a "<" or ">" operator, just merge the - * selectivity in generically. But if it's the right - * oprrest, add the clause to rqlist for later processing. + * selectivity in generically. But if it's the right oprrest, + * add the clause to rqlist for later processing. */ switch (get_oprrest(expr->opno)) { @@ -199,8 +199,8 @@ clauselist_selectivity(PlannerInfo *root, /* * Exact equality to the default value probably means the - * selectivity function punted. This is not airtight but - * should be good enough. + * selectivity function punted. This is not airtight but should + * be good enough. */ if (rqlist->hibound == DEFAULT_INEQ_SEL || rqlist->lobound == DEFAULT_INEQ_SEL) @@ -289,8 +289,8 @@ addRangeClause(RangeQueryClause **rqlist, Node *clause, for (rqelem = *rqlist; rqelem; rqelem = rqelem->next) { /* - * We use full equal() here because the "var" might be a function - * of one or more attributes of the same relation... + * We use full equal() here because the "var" might be a function of + * one or more attributes of the same relation... */ if (!equal(var, rqelem->var)) continue; @@ -423,17 +423,16 @@ clause_selectivity(PlannerInfo *root, rinfo = (RestrictInfo *) clause; /* - * If possible, cache the result of the selectivity calculation - * for the clause. We can cache if varRelid is zero or the clause - * contains only vars of that relid --- otherwise varRelid will - * affect the result, so mustn't cache. We also have to be - * careful about the jointype. It's OK to cache when jointype is - * JOIN_INNER or one of the outer join types (any given outer-join - * clause should always be examined with the same jointype, so - * result won't change). It's not OK to cache when jointype is one - * of the special types associated with IN processing, because the - * same clause may be examined with different jointypes and the - * result should vary. + * If possible, cache the result of the selectivity calculation for + * the clause. We can cache if varRelid is zero or the clause + * contains only vars of that relid --- otherwise varRelid will affect + * the result, so mustn't cache. We also have to be careful about the + * jointype. It's OK to cache when jointype is JOIN_INNER or one of + * the outer join types (any given outer-join clause should always be + * examined with the same jointype, so result won't change). It's not + * OK to cache when jointype is one of the special types associated + * with IN processing, because the same clause may be examined with + * different jointypes and the result should vary. */ if (varRelid == 0 || bms_is_subset_singleton(rinfo->clause_relids, varRelid)) @@ -477,8 +476,8 @@ clause_selectivity(PlannerInfo *root, Var *var = (Var *) clause; /* - * We probably shouldn't ever see an uplevel Var here, but if we - * do, return the default selectivity... + * We probably shouldn't ever see an uplevel Var here, but if we do, + * return the default selectivity... */ if (var->varlevelsup == 0 && (varRelid == 0 || varRelid == (int) var->varno)) @@ -488,23 +487,23 @@ clause_selectivity(PlannerInfo *root, if (rte->rtekind == RTE_SUBQUERY) { /* - * XXX not smart about subquery references... any way to - * do better? + * XXX not smart about subquery references... any way to do + * better? */ s1 = 0.5; } else { /* - * A Var at the top of a clause must be a bool Var. This - * is equivalent to the clause reln.attribute = 't', so we + * A Var at the top of a clause must be a bool Var. This is + * equivalent to the clause reln.attribute = 't', so we * compute the selectivity as if that is what we have. */ s1 = restriction_selectivity(root, BooleanEqualOperator, list_make2(var, - makeBoolConst(true, - false)), + makeBoolConst(true, + false)), varRelid); } } @@ -534,7 +533,7 @@ clause_selectivity(PlannerInfo *root, { /* inverse of the selectivity of the underlying clause */ s1 = 1.0 - clause_selectivity(root, - (Node *) get_notclausearg((Expr *) clause), + (Node *) get_notclausearg((Expr *) clause), varRelid, jointype); } @@ -576,17 +575,16 @@ clause_selectivity(PlannerInfo *root, { /* * If we are considering a nestloop join then all clauses are - * restriction clauses, since we are only interested in the - * one relation. + * restriction clauses, since we are only interested in the one + * relation. */ is_join_clause = false; } else { /* - * Otherwise, it's a join if there's more than one relation - * used. We can optimize this calculation if an rinfo was - * passed. + * Otherwise, it's a join if there's more than one relation used. + * We can optimize this calculation if an rinfo was passed. */ if (rinfo) is_join_clause = (bms_membership(rinfo->clause_relids) == @@ -613,8 +611,8 @@ clause_selectivity(PlannerInfo *root, else if (is_funcclause(clause)) { /* - * This is not an operator, so we guess at the selectivity. THIS - * IS A HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE + * This is not an operator, so we guess at the selectivity. THIS IS A + * HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE * SELECTIVITIES THEMSELVES. -- JMH 7/9/92 */ s1 = (Selectivity) 0.3333333; diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index bb506678ce4..8a1df9e0a2d 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -49,7 +49,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.148 2005/10/05 17:19:19 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.149 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -121,8 +121,8 @@ clamp_row_est(double nrows) { /* * Force estimate to be at least one row, to make explain output look - * better and to avoid possible divide-by-zero when interpolating - * costs. Make it an integer, too. + * better and to avoid possible divide-by-zero when interpolating costs. + * Make it an integer, too. */ if (nrows < 1.0) nrows = 1.0; @@ -155,12 +155,11 @@ cost_seqscan(Path *path, PlannerInfo *root, /* * disk costs * - * The cost of reading a page sequentially is 1.0, by definition. Note - * that the Unix kernel will typically do some amount of read-ahead - * optimization, so that this cost is less than the true cost of - * reading a page from disk. We ignore that issue here, but must take - * it into account when estimating the cost of non-sequential - * accesses! + * The cost of reading a page sequentially is 1.0, by definition. Note that + * the Unix kernel will typically do some amount of read-ahead + * optimization, so that this cost is less than the true cost of reading a + * page from disk. We ignore that issue here, but must take it into + * account when estimating the cost of non-sequential accesses! */ run_cost += baserel->pages; /* sequential fetches with cost 1.0 */ @@ -276,10 +275,10 @@ cost_index(IndexPath *path, PlannerInfo *root, startup_cost += disable_cost; /* - * Call index-access-method-specific code to estimate the processing - * cost for scanning the index, as well as the selectivity of the - * index (ie, the fraction of main-table tuples we will have to - * retrieve) and its correlation to the main-table tuple order. + * Call index-access-method-specific code to estimate the processing cost + * for scanning the index, as well as the selectivity of the index (ie, + * the fraction of main-table tuples we will have to retrieve) and its + * correlation to the main-table tuple order. */ OidFunctionCall7(index->amcostestimate, PointerGetDatum(root), @@ -292,8 +291,8 @@ cost_index(IndexPath *path, PlannerInfo *root, /* * Save amcostestimate's results for possible use in bitmap scan planning. - * We don't bother to save indexStartupCost or indexCorrelation, because - * a bitmap scan doesn't care about either. + * We don't bother to save indexStartupCost or indexCorrelation, because a + * bitmap scan doesn't care about either. */ path->indextotalcost = indexTotalCost; path->indexselectivity = indexSelectivity; @@ -366,19 +365,18 @@ cost_index(IndexPath *path, PlannerInfo *root, } /* - * min_IO_cost corresponds to the perfectly correlated case - * (csquared=1), max_IO_cost to the perfectly uncorrelated case - * (csquared=0). Note that we just charge random_page_cost per page - * in the uncorrelated case, rather than using - * cost_nonsequential_access, since we've already accounted for - * caching effects by using the Mackert model. + * min_IO_cost corresponds to the perfectly correlated case (csquared=1), + * max_IO_cost to the perfectly uncorrelated case (csquared=0). Note that + * we just charge random_page_cost per page in the uncorrelated case, + * rather than using cost_nonsequential_access, since we've already + * accounted for caching effects by using the Mackert model. */ min_IO_cost = ceil(indexSelectivity * T); max_IO_cost = pages_fetched * random_page_cost; /* - * Now interpolate based on estimated index order correlation to get - * total disk I/O cost for main table accesses. + * Now interpolate based on estimated index order correlation to get total + * disk I/O cost for main table accesses. */ csquared = indexCorrelation * indexCorrelation; @@ -390,9 +388,9 @@ cost_index(IndexPath *path, PlannerInfo *root, * Normally the indexquals will be removed from the list of restriction * clauses that we have to evaluate as qpquals, so we should subtract * their costs from baserestrictcost. But if we are doing a join then - * some of the indexquals are join clauses and shouldn't be - * subtracted. Rather than work out exactly how much to subtract, we - * don't subtract anything. + * some of the indexquals are join clauses and shouldn't be subtracted. + * Rather than work out exactly how much to subtract, we don't subtract + * anything. */ startup_cost += baserel->baserestrictcost.startup; cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple; @@ -467,9 +465,9 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, /* * For small numbers of pages we should charge random_page_cost apiece, * while if nearly all the table's pages are being read, it's more - * appropriate to charge 1.0 apiece. The effect is nonlinear, too. - * For lack of a better idea, interpolate like this to determine the - * cost per page. + * appropriate to charge 1.0 apiece. The effect is nonlinear, too. For + * lack of a better idea, interpolate like this to determine the cost per + * page. */ if (pages_fetched >= 2.0) cost_per_page = random_page_cost - @@ -482,10 +480,10 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, /* * Estimate CPU costs per tuple. * - * Often the indexquals don't need to be rechecked at each tuple ... - * but not always, especially not if there are enough tuples involved - * that the bitmaps become lossy. For the moment, just assume they - * will be rechecked always. + * Often the indexquals don't need to be rechecked at each tuple ... but not + * always, especially not if there are enough tuples involved that the + * bitmaps become lossy. For the moment, just assume they will be + * rechecked always. */ startup_cost += baserel->baserestrictcost.startup; cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple; @@ -527,7 +525,7 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec) * Estimate the cost of a BitmapAnd node * * Note that this considers only the costs of index scanning and bitmap - * creation, not the eventual heap access. In that sense the object isn't + * creation, not the eventual heap access. In that sense the object isn't * truly a Path, but it has enough path-like properties (costs in particular) * to warrant treating it as one. */ @@ -535,24 +533,24 @@ void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root) { Cost totalCost; - Selectivity selec; + Selectivity selec; ListCell *l; /* - * We estimate AND selectivity on the assumption that the inputs - * are independent. This is probably often wrong, but we don't - * have the info to do better. + * We estimate AND selectivity on the assumption that the inputs are + * independent. This is probably often wrong, but we don't have the info + * to do better. * * The runtime cost of the BitmapAnd itself is estimated at 100x - * cpu_operator_cost for each tbm_intersect needed. Probably too - * small, definitely too simplistic? + * cpu_operator_cost for each tbm_intersect needed. Probably too small, + * definitely too simplistic? */ totalCost = 0.0; selec = 1.0; foreach(l, path->bitmapquals) { - Path *subpath = (Path *) lfirst(l); - Cost subCost; + Path *subpath = (Path *) lfirst(l); + Cost subCost; Selectivity subselec; cost_bitmap_tree_node(subpath, &subCost, &subselec); @@ -578,25 +576,25 @@ void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root) { Cost totalCost; - Selectivity selec; + Selectivity selec; ListCell *l; /* - * We estimate OR selectivity on the assumption that the inputs - * are non-overlapping, since that's often the case in "x IN (list)" - * type situations. Of course, we clamp to 1.0 at the end. + * We estimate OR selectivity on the assumption that the inputs are + * non-overlapping, since that's often the case in "x IN (list)" type + * situations. Of course, we clamp to 1.0 at the end. * * The runtime cost of the BitmapOr itself is estimated at 100x - * cpu_operator_cost for each tbm_union needed. Probably too - * small, definitely too simplistic? We are aware that the tbm_unions - * are optimized out when the inputs are BitmapIndexScans. + * cpu_operator_cost for each tbm_union needed. Probably too small, + * definitely too simplistic? We are aware that the tbm_unions are + * optimized out when the inputs are BitmapIndexScans. */ totalCost = 0.0; selec = 0.0; foreach(l, path->bitmapquals) { - Path *subpath = (Path *) lfirst(l); - Cost subCost; + Path *subpath = (Path *) lfirst(l); + Cost subCost; Selectivity subselec; cost_bitmap_tree_node(subpath, &subCost, &subselec); @@ -661,10 +659,9 @@ cost_subqueryscan(Path *path, RelOptInfo *baserel) Assert(baserel->rtekind == RTE_SUBQUERY); /* - * Cost of path is cost of evaluating the subplan, plus cost of - * evaluating any restriction clauses that will be attached to the - * SubqueryScan node, plus cpu_tuple_cost to account for selection and - * projection overhead. + * Cost of path is cost of evaluating the subplan, plus cost of evaluating + * any restriction clauses that will be attached to the SubqueryScan node, + * plus cpu_tuple_cost to account for selection and projection overhead. */ path->startup_cost = baserel->subplan->startup_cost; path->total_cost = baserel->subplan->total_cost; @@ -694,8 +691,8 @@ cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel) /* * For now, estimate function's cost at one operator eval per function - * call. Someday we should revive the function cost estimate columns - * in pg_proc... + * call. Someday we should revive the function cost estimate columns in + * pg_proc... */ cpu_per_tuple = cpu_operator_cost; @@ -758,9 +755,8 @@ cost_sort(Path *path, PlannerInfo *root, startup_cost += disable_cost; /* - * We want to be sure the cost of a sort is never estimated as zero, - * even if passed-in tuple count is zero. Besides, mustn't do - * log(0)... + * We want to be sure the cost of a sort is never estimated as zero, even + * if passed-in tuple count is zero. Besides, mustn't do log(0)... */ if (tuples < 2.0) tuples = 2.0; @@ -790,8 +786,8 @@ cost_sort(Path *path, PlannerInfo *root, } /* - * Also charge a small amount (arbitrarily set equal to operator cost) - * per extracted tuple. + * Also charge a small amount (arbitrarily set equal to operator cost) per + * extracted tuple. */ run_cost += cpu_operator_cost * tuples; @@ -828,17 +824,16 @@ cost_material(Path *path, /* * Charge a very small amount per inserted tuple, to reflect bookkeeping - * costs. We use cpu_tuple_cost/10 for this. This is needed to break - * the tie that would otherwise exist between nestloop with A outer, + * costs. We use cpu_tuple_cost/10 for this. This is needed to break the + * tie that would otherwise exist between nestloop with A outer, * materialized B inner and nestloop with B outer, materialized A inner. * The extra cost ensures we'll prefer materializing the smaller rel. */ startup_cost += cpu_tuple_cost * 0.1 * tuples; /* - * Also charge a small amount per extracted tuple. We use - * cpu_tuple_cost so that it doesn't appear worthwhile to materialize - * a bare seqscan. + * Also charge a small amount per extracted tuple. We use cpu_tuple_cost + * so that it doesn't appear worthwhile to materialize a bare seqscan. */ run_cost += cpu_tuple_cost * tuples; @@ -865,23 +860,22 @@ cost_agg(Path *path, PlannerInfo *root, Cost total_cost; /* - * We charge one cpu_operator_cost per aggregate function per input - * tuple, and another one per output tuple (corresponding to transfn - * and finalfn calls respectively). If we are grouping, we charge an - * additional cpu_operator_cost per grouping column per input tuple - * for grouping comparisons. + * We charge one cpu_operator_cost per aggregate function per input tuple, + * and another one per output tuple (corresponding to transfn and finalfn + * calls respectively). If we are grouping, we charge an additional + * cpu_operator_cost per grouping column per input tuple for grouping + * comparisons. * * We will produce a single output tuple if not grouping, and a tuple per * group otherwise. We charge cpu_tuple_cost for each output tuple. * - * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the - * same total CPU cost, but AGG_SORTED has lower startup cost. If the - * input path is already sorted appropriately, AGG_SORTED should be - * preferred (since it has no risk of memory overflow). This will - * happen as long as the computed total costs are indeed exactly equal - * --- but if there's roundoff error we might do the wrong thing. So - * be sure that the computations below form the same intermediate - * values in the same order. + * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the same + * total CPU cost, but AGG_SORTED has lower startup cost. If the input + * path is already sorted appropriately, AGG_SORTED should be preferred + * (since it has no risk of memory overflow). This will happen as long as + * the computed total costs are indeed exactly equal --- but if there's + * roundoff error we might do the wrong thing. So be sure that the + * computations below form the same intermediate values in the same order. */ if (aggstrategy == AGG_PLAIN) { @@ -937,8 +931,8 @@ cost_group(Path *path, PlannerInfo *root, total_cost = input_total_cost; /* - * Charge one cpu_operator_cost per comparison per input tuple. We - * assume all columns get compared at most of the tuples. + * Charge one cpu_operator_cost per comparison per input tuple. We assume + * all columns get compared at most of the tuples. */ total_cost += cpu_operator_cost * input_tuples * numGroupCols; @@ -968,10 +962,10 @@ cost_nestloop(NestPath *path, PlannerInfo *root) Selectivity joininfactor; /* - * If inner path is an indexscan, be sure to use its estimated output - * row count, which may be lower than the restriction-clause-only row - * count of its parent. (We don't include this case in the PATH_ROWS - * macro because it applies *only* to a nestloop's inner relation.) + * If inner path is an indexscan, be sure to use its estimated output row + * count, which may be lower than the restriction-clause-only row count of + * its parent. (We don't include this case in the PATH_ROWS macro because + * it applies *only* to a nestloop's inner relation.) */ if (IsA(inner_path, IndexPath)) inner_path_rows = ((IndexPath *) inner_path)->rows; @@ -982,11 +976,11 @@ cost_nestloop(NestPath *path, PlannerInfo *root) startup_cost += disable_cost; /* - * If we're doing JOIN_IN then we will stop scanning inner tuples for - * an outer tuple as soon as we have one match. Account for the - * effects of this by scaling down the cost estimates in proportion to - * the JOIN_IN selectivity. (This assumes that all the quals attached - * to the join are IN quals, which should be true.) + * If we're doing JOIN_IN then we will stop scanning inner tuples for an + * outer tuple as soon as we have one match. Account for the effects of + * this by scaling down the cost estimates in proportion to the JOIN_IN + * selectivity. (This assumes that all the quals attached to the join are + * IN quals, which should be true.) */ joininfactor = join_in_selectivity(path, root); @@ -996,9 +990,9 @@ cost_nestloop(NestPath *path, PlannerInfo *root) * NOTE: clearly, we must pay both outer and inner paths' startup_cost * before we can start returning tuples, so the join's startup cost is * their sum. What's not so clear is whether the inner path's - * startup_cost must be paid again on each rescan of the inner path. - * This is not true if the inner path is materialized or is a - * hashjoin, but probably is true otherwise. + * startup_cost must be paid again on each rescan of the inner path. This + * is not true if the inner path is materialized or is a hashjoin, but + * probably is true otherwise. */ startup_cost += outer_path->startup_cost + inner_path->startup_cost; run_cost += outer_path->total_cost - outer_path->startup_cost; @@ -1077,12 +1071,11 @@ cost_mergejoin(MergePath *path, PlannerInfo *root) /* * Compute cost and selectivity of the mergequals and qpquals (other - * restriction clauses) separately. We use approx_selectivity here - * for speed --- in most cases, any errors won't affect the result - * much. + * restriction clauses) separately. We use approx_selectivity here for + * speed --- in most cases, any errors won't affect the result much. * - * Note: it's probably bogus to use the normal selectivity calculation - * here when either the outer or inner path is a UniquePath. + * Note: it's probably bogus to use the normal selectivity calculation here + * when either the outer or inner path is a UniquePath. */ merge_selec = approx_selectivity(root, mergeclauses, path->jpath.jointype); @@ -1095,31 +1088,30 @@ cost_mergejoin(MergePath *path, PlannerInfo *root) mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows); /* - * When there are equal merge keys in the outer relation, the - * mergejoin must rescan any matching tuples in the inner relation. - * This means re-fetching inner tuples. Our cost model for this is - * that a re-fetch costs the same as an original fetch, which is - * probably an overestimate; but on the other hand we ignore the - * bookkeeping costs of mark/restore. Not clear if it's worth - * developing a more refined model. + * When there are equal merge keys in the outer relation, the mergejoin + * must rescan any matching tuples in the inner relation. This means + * re-fetching inner tuples. Our cost model for this is that a re-fetch + * costs the same as an original fetch, which is probably an overestimate; + * but on the other hand we ignore the bookkeeping costs of mark/restore. + * Not clear if it's worth developing a more refined model. * - * The number of re-fetches can be estimated approximately as size of - * merge join output minus size of inner relation. Assume that the - * distinct key values are 1, 2, ..., and denote the number of values - * of each key in the outer relation as m1, m2, ...; in the inner - * relation, n1, n2, ... Then we have + * The number of re-fetches can be estimated approximately as size of merge + * join output minus size of inner relation. Assume that the distinct key + * values are 1, 2, ..., and denote the number of values of each key in + * the outer relation as m1, m2, ...; in the inner relation, n1, n2, ... + * Then we have * * size of join = m1 * n1 + m2 * n2 + ... * - * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 * - * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner + * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 * n1 + * + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner * relation * - * This equation works correctly for outer tuples having no inner match - * (nk = 0), but not for inner tuples having no outer match (mk = 0); - * we are effectively subtracting those from the number of rescanned - * tuples, when we should not. Can we do better without expensive - * selectivity computations? + * This equation works correctly for outer tuples having no inner match (nk = + * 0), but not for inner tuples having no outer match (mk = 0); we are + * effectively subtracting those from the number of rescanned tuples, when + * we should not. Can we do better without expensive selectivity + * computations? */ if (IsA(outer_path, UniquePath)) rescannedtuples = 0; @@ -1140,9 +1132,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root) * inputs that will actually need to be scanned. We use only the first * (most significant) merge clause for this purpose. * - * Since this calculation is somewhat expensive, and will be the same for - * all mergejoin paths associated with the merge clause, we cache the - * results in the RestrictInfo node. + * Since this calculation is somewhat expensive, and will be the same for all + * mergejoin paths associated with the merge clause, we cache the results + * in the RestrictInfo node. */ if (mergeclauses && path->jpath.jointype != JOIN_FULL) { @@ -1181,9 +1173,8 @@ cost_mergejoin(MergePath *path, PlannerInfo *root) /* * Readjust scan selectivities to account for above rounding. This is - * normally an insignificant effect, but when there are only a few - * rows in the inputs, failing to do this makes for a large percentage - * error. + * normally an insignificant effect, but when there are only a few rows in + * the inputs, failing to do this makes for a large percentage error. */ outerscansel = outer_rows / outer_path_rows; innerscansel = inner_rows / inner_path_rows; @@ -1231,20 +1222,20 @@ cost_mergejoin(MergePath *path, PlannerInfo *root) /* CPU costs */ /* - * If we're doing JOIN_IN then we will stop outputting inner tuples - * for an outer tuple as soon as we have one match. Account for the - * effects of this by scaling down the cost estimates in proportion to - * the expected output size. (This assumes that all the quals - * attached to the join are IN quals, which should be true.) + * If we're doing JOIN_IN then we will stop outputting inner tuples for an + * outer tuple as soon as we have one match. Account for the effects of + * this by scaling down the cost estimates in proportion to the expected + * output size. (This assumes that all the quals attached to the join are + * IN quals, which should be true.) */ joininfactor = join_in_selectivity(&path->jpath, root); /* - * The number of tuple comparisons needed is approximately number of - * outer rows plus number of inner rows plus number of rescanned - * tuples (can we refine this?). At each one, we need to evaluate the - * mergejoin quals. NOTE: JOIN_IN mode does not save any work here, - * so do NOT include joininfactor. + * The number of tuple comparisons needed is approximately number of outer + * rows plus number of inner rows plus number of rescanned tuples (can we + * refine this?). At each one, we need to evaluate the mergejoin quals. + * NOTE: JOIN_IN mode does not save any work here, so do NOT include + * joininfactor. */ startup_cost += merge_qual_cost.startup; run_cost += merge_qual_cost.per_tuple * @@ -1253,9 +1244,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root) /* * For each tuple that gets through the mergejoin proper, we charge * cpu_tuple_cost plus the cost of evaluating additional restriction - * clauses that are to be applied at the join. (This is pessimistic - * since not all of the quals may get evaluated at each tuple.) This - * work is skipped in JOIN_IN mode, so apply the factor. + * clauses that are to be applied at the join. (This is pessimistic since + * not all of the quals may get evaluated at each tuple.) This work is + * skipped in JOIN_IN mode, so apply the factor. */ startup_cost += qp_qual_cost.startup; cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple; @@ -1290,9 +1281,9 @@ cost_hashjoin(HashPath *path, PlannerInfo *root) double outer_path_rows = PATH_ROWS(outer_path); double inner_path_rows = PATH_ROWS(inner_path); double outerbytes = relation_byte_size(outer_path_rows, - outer_path->parent->width); + outer_path->parent->width); double innerbytes = relation_byte_size(inner_path_rows, - inner_path->parent->width); + inner_path->parent->width); int num_hashclauses = list_length(hashclauses); int numbuckets; int numbatches; @@ -1306,12 +1297,11 @@ cost_hashjoin(HashPath *path, PlannerInfo *root) /* * Compute cost and selectivity of the hashquals and qpquals (other - * restriction clauses) separately. We use approx_selectivity here - * for speed --- in most cases, any errors won't affect the result - * much. + * restriction clauses) separately. We use approx_selectivity here for + * speed --- in most cases, any errors won't affect the result much. * - * Note: it's probably bogus to use the normal selectivity calculation - * here when either the outer or inner path is a UniquePath. + * Note: it's probably bogus to use the normal selectivity calculation here + * when either the outer or inner path is a UniquePath. */ hash_selec = approx_selectivity(root, hashclauses, path->jpath.jointype); @@ -1329,13 +1319,12 @@ cost_hashjoin(HashPath *path, PlannerInfo *root) startup_cost += inner_path->total_cost; /* - * Cost of computing hash function: must do it once per input tuple. - * We charge one cpu_operator_cost for each column's hash function. + * Cost of computing hash function: must do it once per input tuple. We + * charge one cpu_operator_cost for each column's hash function. * - * XXX when a hashclause is more complex than a single operator, we - * really should charge the extra eval costs of the left or right - * side, as appropriate, here. This seems more work than it's worth - * at the moment. + * XXX when a hashclause is more complex than a single operator, we really + * should charge the extra eval costs of the left or right side, as + * appropriate, here. This seems more work than it's worth at the moment. */ startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows; run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows; @@ -1345,17 +1334,17 @@ cost_hashjoin(HashPath *path, PlannerInfo *root) inner_path->parent->width, &numbuckets, &numbatches); - virtualbuckets = (double) numbuckets * (double) numbatches; + virtualbuckets = (double) numbuckets *(double) numbatches; /* - * Determine bucketsize fraction for inner relation. We use the - * smallest bucketsize estimated for any individual hashclause; this - * is undoubtedly conservative. + * Determine bucketsize fraction for inner relation. We use the smallest + * bucketsize estimated for any individual hashclause; this is undoubtedly + * conservative. * - * BUT: if inner relation has been unique-ified, we can assume it's good - * for hashing. This is important both because it's the right answer, - * and because we avoid contaminating the cache with a value that's - * wrong for non-unique-ified paths. + * BUT: if inner relation has been unique-ified, we can assume it's good for + * hashing. This is important both because it's the right answer, and + * because we avoid contaminating the cache with a value that's wrong for + * non-unique-ified paths. */ if (IsA(inner_path, UniquePath)) innerbucketsize = 1.0 / virtualbuckets; @@ -1370,13 +1359,12 @@ cost_hashjoin(HashPath *path, PlannerInfo *root) Assert(IsA(restrictinfo, RestrictInfo)); /* - * First we have to figure out which side of the hashjoin - * clause is the inner side. + * First we have to figure out which side of the hashjoin clause + * is the inner side. * * Since we tend to visit the same clauses over and over when - * planning a large query, we cache the bucketsize estimate in - * the RestrictInfo node to avoid repeated lookups of - * statistics. + * planning a large query, we cache the bucketsize estimate in the + * RestrictInfo node to avoid repeated lookups of statistics. */ if (bms_is_subset(restrictinfo->right_relids, inner_path->parent->relids)) @@ -1388,7 +1376,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root) /* not cached yet */ thisbucketsize = estimate_hash_bucketsize(root, - get_rightop(restrictinfo->clause), + get_rightop(restrictinfo->clause), virtualbuckets); restrictinfo->right_bucketsize = thisbucketsize; } @@ -1404,7 +1392,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root) /* not cached yet */ thisbucketsize = estimate_hash_bucketsize(root, - get_leftop(restrictinfo->clause), + get_leftop(restrictinfo->clause), virtualbuckets); restrictinfo->left_bucketsize = thisbucketsize; } @@ -1417,10 +1405,10 @@ cost_hashjoin(HashPath *path, PlannerInfo *root) /* * If inner relation is too big then we will need to "batch" the join, - * which implies writing and reading most of the tuples to disk an - * extra time. Charge one cost unit per page of I/O (correct since it - * should be nice and sequential...). Writing the inner rel counts as - * startup cost, all the rest as run cost. + * which implies writing and reading most of the tuples to disk an extra + * time. Charge one cost unit per page of I/O (correct since it should be + * nice and sequential...). Writing the inner rel counts as startup cost, + * all the rest as run cost. */ if (numbatches > 1) { @@ -1436,21 +1424,21 @@ cost_hashjoin(HashPath *path, PlannerInfo *root) /* CPU costs */ /* - * If we're doing JOIN_IN then we will stop comparing inner tuples to - * an outer tuple as soon as we have one match. Account for the - * effects of this by scaling down the cost estimates in proportion to - * the expected output size. (This assumes that all the quals - * attached to the join are IN quals, which should be true.) + * If we're doing JOIN_IN then we will stop comparing inner tuples to an + * outer tuple as soon as we have one match. Account for the effects of + * this by scaling down the cost estimates in proportion to the expected + * output size. (This assumes that all the quals attached to the join are + * IN quals, which should be true.) */ joininfactor = join_in_selectivity(&path->jpath, root); /* - * The number of tuple comparisons needed is the number of outer - * tuples times the typical number of tuples in a hash bucket, which - * is the inner relation size times its bucketsize fraction. At each - * one, we need to evaluate the hashjoin quals. (Note: charging the - * full qual eval cost at each tuple is pessimistic, since we don't - * evaluate the quals unless the hash values match exactly.) + * The number of tuple comparisons needed is the number of outer tuples + * times the typical number of tuples in a hash bucket, which is the inner + * relation size times its bucketsize fraction. At each one, we need to + * evaluate the hashjoin quals. (Note: charging the full qual eval cost + * at each tuple is pessimistic, since we don't evaluate the quals unless + * the hash values match exactly.) */ startup_cost += hash_qual_cost.startup; run_cost += hash_qual_cost.per_tuple * @@ -1460,8 +1448,8 @@ cost_hashjoin(HashPath *path, PlannerInfo *root) /* * For each tuple that gets through the hashjoin proper, we charge * cpu_tuple_cost plus the cost of evaluating additional restriction - * clauses that are to be applied at the join. (This is pessimistic - * since not all of the quals may get evaluated at each tuple.) + * clauses that are to be applied at the join. (This is pessimistic since + * not all of the quals may get evaluated at each tuple.) */ startup_cost += qp_qual_cost.startup; cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple; @@ -1469,16 +1457,16 @@ cost_hashjoin(HashPath *path, PlannerInfo *root) /* * Bias against putting larger relation on inside. We don't want an - * absolute prohibition, though, since larger relation might have - * better bucketsize --- and we can't trust the size estimates - * unreservedly, anyway. Instead, inflate the run cost by the square - * root of the size ratio. (Why square root? No real good reason, - * but it seems reasonable...) + * absolute prohibition, though, since larger relation might have better + * bucketsize --- and we can't trust the size estimates unreservedly, + * anyway. Instead, inflate the run cost by the square root of the size + * ratio. (Why square root? No real good reason, but it seems + * reasonable...) * * Note: before 7.4 we implemented this by inflating startup cost; but if - * there's a disable_cost component in the input paths' startup cost, - * that unfairly penalizes the hash. Probably it'd be better to keep - * track of disable penalty separately from cost. + * there's a disable_cost component in the input paths' startup cost, that + * unfairly penalizes the hash. Probably it'd be better to keep track of + * disable penalty separately from cost. */ if (innerbytes > outerbytes && outerbytes > 0) run_cost *= sqrt(innerbytes / outerbytes); @@ -1545,13 +1533,13 @@ cost_qual_eval_walker(Node *node, QualCost *total) return false; /* - * Our basic strategy is to charge one cpu_operator_cost for each - * operator or function node in the given tree. Vars and Consts are - * charged zero, and so are boolean operators (AND, OR, NOT). - * Simplistic, but a lot better than no model at all. + * Our basic strategy is to charge one cpu_operator_cost for each operator + * or function node in the given tree. Vars and Consts are charged zero, + * and so are boolean operators (AND, OR, NOT). Simplistic, but a lot + * better than no model at all. * - * Should we try to account for the possibility of short-circuit - * evaluation of AND/OR? + * Should we try to account for the possibility of short-circuit evaluation + * of AND/OR? */ if (IsA(node, FuncExpr) || IsA(node, OpExpr) || @@ -1572,12 +1560,12 @@ cost_qual_eval_walker(Node *node, QualCost *total) { /* * A subplan node in an expression typically indicates that the - * subplan will be executed on each evaluation, so charge - * accordingly. (Sub-selects that can be executed as InitPlans - * have already been removed from the expression.) + * subplan will be executed on each evaluation, so charge accordingly. + * (Sub-selects that can be executed as InitPlans have already been + * removed from the expression.) * - * An exception occurs when we have decided we can implement the - * subplan by hashing. + * An exception occurs when we have decided we can implement the subplan + * by hashing. * */ SubPlan *subplan = (SubPlan *) node; @@ -1586,32 +1574,31 @@ cost_qual_eval_walker(Node *node, QualCost *total) if (subplan->useHashTable) { /* - * If we are using a hash table for the subquery outputs, then - * the cost of evaluating the query is a one-time cost. We - * charge one cpu_operator_cost per tuple for the work of - * loading the hashtable, too. + * If we are using a hash table for the subquery outputs, then the + * cost of evaluating the query is a one-time cost. We charge one + * cpu_operator_cost per tuple for the work of loading the + * hashtable, too. */ total->startup += plan->total_cost + cpu_operator_cost * plan->plan_rows; /* - * The per-tuple costs include the cost of evaluating the - * lefthand expressions, plus the cost of probing the - * hashtable. Recursion into the exprs list will handle the - * lefthand expressions properly, and will count one - * cpu_operator_cost for each comparison operator. That is - * probably too low for the probing cost, but it's hard to - * make a better estimate, so live with it for now. + * The per-tuple costs include the cost of evaluating the lefthand + * expressions, plus the cost of probing the hashtable. Recursion + * into the exprs list will handle the lefthand expressions + * properly, and will count one cpu_operator_cost for each + * comparison operator. That is probably too low for the probing + * cost, but it's hard to make a better estimate, so live with it + * for now. */ } else { /* * Otherwise we will be rescanning the subplan output on each - * evaluation. We need to estimate how much of the output we - * will actually need to scan. NOTE: this logic should agree - * with the estimates used by make_subplan() in - * plan/subselect.c. + * evaluation. We need to estimate how much of the output we will + * actually need to scan. NOTE: this logic should agree with the + * estimates used by make_subplan() in plan/subselect.c. */ Cost plan_run_cost = plan->total_cost - plan->startup_cost; @@ -1636,10 +1623,10 @@ cost_qual_eval_walker(Node *node, QualCost *total) /* * Also account for subplan's startup cost. If the subplan is - * uncorrelated or undirect correlated, AND its topmost node - * is a Sort or Material node, assume that we'll only need to - * pay its startup cost once; otherwise assume we pay the - * startup cost every time. + * uncorrelated or undirect correlated, AND its topmost node is a + * Sort or Material node, assume that we'll only need to pay its + * startup cost once; otherwise assume we pay the startup cost + * every time. */ if (subplan->parParam == NIL && (IsA(plan, Sort) || @@ -1761,9 +1748,9 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, /* * Compute joinclause selectivity. Note that we are only considering - * clauses that become restriction clauses at this join level; we are - * not double-counting them because they were not considered in - * estimating the sizes of the component rels. + * clauses that become restriction clauses at this join level; we are not + * double-counting them because they were not considered in estimating the + * sizes of the component rels. */ selec = clauselist_selectivity(root, restrictlist, @@ -1773,13 +1760,13 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, /* * Basically, we multiply size of Cartesian product by selectivity. * - * If we are doing an outer join, take that into account: the output must - * be at least as large as the non-nullable input. (Is there any - * chance of being even smarter?) + * If we are doing an outer join, take that into account: the output must be + * at least as large as the non-nullable input. (Is there any chance of + * being even smarter?) * - * For JOIN_IN and variants, the Cartesian product is figured with - * respect to a unique-ified input, and then we can clamp to the size - * of the other input. + * For JOIN_IN and variants, the Cartesian product is figured with respect to + * a unique-ified input, and then we can clamp to the size of the other + * input. */ switch (jointype) { @@ -1848,12 +1835,11 @@ join_in_selectivity(JoinPath *path, PlannerInfo *root) return 1.0; /* - * Return 1.0 if the inner side is already known unique. The case - * where the inner path is already a UniquePath probably cannot happen - * in current usage, but check it anyway for completeness. The - * interesting case is where we've determined the inner relation - * itself is unique, which we can check by looking at the rows - * estimate for its UniquePath. + * Return 1.0 if the inner side is already known unique. The case where + * the inner path is already a UniquePath probably cannot happen in + * current usage, but check it anyway for completeness. The interesting + * case is where we've determined the inner relation itself is unique, + * which we can check by looking at the rows estimate for its UniquePath. */ if (IsA(path->innerjoinpath, UniquePath)) return 1.0; @@ -1866,10 +1852,9 @@ join_in_selectivity(JoinPath *path, PlannerInfo *root) /* * Compute same result set_joinrel_size_estimates would compute for - * JOIN_INNER. Note that we use the input rels' absolute size - * estimates, not PATH_ROWS() which might be less; if we used - * PATH_ROWS() we'd be double-counting the effects of any join clauses - * used in input scans. + * JOIN_INNER. Note that we use the input rels' absolute size estimates, + * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be + * double-counting the effects of any join clauses used in input scans. */ selec = clauselist_selectivity(root, path->joinrestrictinfo, @@ -1908,8 +1893,8 @@ set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel) /* * Estimate number of rows the function itself will return. * - * XXX no idea how to do this yet; but we can at least check whether - * function returns set or not... + * XXX no idea how to do this yet; but we can at least check whether function + * returns set or not... */ if (expression_returns_set(rte->funcexpr)) rel->tuples = 1000; @@ -1957,8 +1942,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel) ndx = var->varattno - rel->min_attr; /* - * The width probably hasn't been cached yet, but may as well - * check + * The width probably hasn't been cached yet, but may as well check */ if (rel->attr_widths[ndx] > 0) { diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index f186b89db44..1790cc5266b 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.190 2005/09/24 22:54:36 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.191 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -48,9 +48,9 @@ static List *find_usable_indexes(PlannerInfo *root, RelOptInfo *rel, - List *clauses, List *outer_clauses, - bool istoplevel, bool isjoininner, - Relids outer_relids); + List *clauses, List *outer_clauses, + bool istoplevel, bool isjoininner, + Relids outer_relids); static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths); static int bitmap_path_comparator(const void *a, const void *b); static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths); @@ -62,25 +62,25 @@ static Oid indexable_operator(Expr *clause, Oid opclass, bool indexkey_on_left); static Relids indexable_outerrelids(RelOptInfo *rel); static bool matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel, - Relids outer_relids); + Relids outer_relids); static List *find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel, - Relids outer_relids, bool isouterjoin); + Relids outer_relids, bool isouterjoin); static ScanDirection match_variant_ordering(PlannerInfo *root, - IndexOptInfo *index, - List *restrictclauses); + IndexOptInfo *index, + List *restrictclauses); static List *identify_ignorable_ordering_cols(PlannerInfo *root, - IndexOptInfo *index, - List *restrictclauses); + IndexOptInfo *index, + List *restrictclauses); static bool match_index_to_query_keys(PlannerInfo *root, - IndexOptInfo *index, - ScanDirection indexscandir, - List *ignorables); + IndexOptInfo *index, + ScanDirection indexscandir, + List *ignorables); static bool match_boolean_index_clause(Node *clause, int indexcol, - IndexOptInfo *index); + IndexOptInfo *index); static bool match_special_index_operator(Expr *clause, Oid opclass, bool indexkey_on_left); static Expr *expand_boolean_index_clause(Node *clause, int indexcol, - IndexOptInfo *index); + IndexOptInfo *index); static List *expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass); static List *prefix_quals(Node *leftop, Oid opclass, Const *prefix, Pattern_Prefix_Status pstatus); @@ -153,8 +153,8 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) true, false, NULL); /* - * We can submit them all to add_path. (This generates access paths for - * plain IndexScan plans.) However, for the next step we will only want + * We can submit them all to add_path. (This generates access paths for + * plain IndexScan plans.) However, for the next step we will only want * the ones that have some selectivity; we must discard anything that was * generated solely for ordering purposes. */ @@ -180,8 +180,8 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) bitindexpaths = list_concat(bitindexpaths, indexpaths); /* - * If we found anything usable, generate a BitmapHeapPath for the - * most promising combination of bitmap index paths. + * If we found anything usable, generate a BitmapHeapPath for the most + * promising combination of bitmap index paths. */ if (bitindexpaths != NIL) { @@ -254,19 +254,19 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel, bool index_is_ordered; /* - * Ignore partial indexes that do not match the query. If a partial - * index is marked predOK then we know it's OK; otherwise, if we - * are at top level we know it's not OK (since predOK is exactly - * whether its predicate could be proven from the toplevel clauses). - * Otherwise, we have to test whether the added clauses are - * sufficient to imply the predicate. If so, we could use - * the index in the current context. + * Ignore partial indexes that do not match the query. If a partial + * index is marked predOK then we know it's OK; otherwise, if we are + * at top level we know it's not OK (since predOK is exactly whether + * its predicate could be proven from the toplevel clauses). + * Otherwise, we have to test whether the added clauses are sufficient + * to imply the predicate. If so, we could use the index in the + * current context. * - * We set useful_predicate to true iff the predicate was proven - * using the current set of clauses. This is needed to prevent - * matching a predOK index to an arm of an OR, which would be - * a legal but pointlessly inefficient plan. (A better plan will - * be generated by just scanning the predOK index alone, no OR.) + * We set useful_predicate to true iff the predicate was proven using the + * current set of clauses. This is needed to prevent matching a + * predOK index to an arm of an OR, which would be a legal but + * pointlessly inefficient plan. (A better plan will be generated by + * just scanning the predOK index alone, no OR.) */ useful_predicate = false; if (index->indpred != NIL) @@ -282,7 +282,7 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel, else { if (istoplevel) - continue; /* no point in trying to prove it */ + continue; /* no point in trying to prove it */ /* Form all_clauses if not done already */ if (all_clauses == NIL) @@ -290,7 +290,7 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel, outer_clauses); if (!predicate_implied_by(index->indpred, all_clauses)) - continue; /* can't use it at all */ + continue; /* can't use it at all */ if (!predicate_implied_by(index->indpred, outer_clauses)) useful_predicate = true; @@ -309,17 +309,17 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel, &found_clause); /* - * Not all index AMs support scans with no restriction clauses. - * We can't generate a scan over an index with amoptionalkey = false + * Not all index AMs support scans with no restriction clauses. We + * can't generate a scan over an index with amoptionalkey = false * unless there's at least one restriction clause. */ if (restrictclauses == NIL && !index->amoptionalkey) continue; /* - * 2. Compute pathkeys describing index's ordering, if any, then - * see how many of them are actually useful for this query. This - * is not relevant unless we are at top level. + * 2. Compute pathkeys describing index's ordering, if any, then see + * how many of them are actually useful for this query. This is not + * relevant unless we are at top level. */ index_is_ordered = OidIsValid(index->ordering[0]); if (istoplevel && index_is_ordered && !isjoininner) @@ -335,9 +335,8 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel, /* * 3. Generate an indexscan path if there are relevant restriction * clauses in the current clauses, OR the index ordering is - * potentially useful for later merging or final output ordering, - * OR the index has a predicate that was proven by the current - * clauses. + * potentially useful for later merging or final output ordering, OR + * the index has a predicate that was proven by the current clauses. */ if (found_clause || useful_pathkeys != NIL || useful_predicate) { @@ -352,16 +351,15 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel, } /* - * 4. If the index is ordered, and there is a requested query - * ordering that we failed to match, consider variant ways of - * achieving the ordering. Again, this is only interesting - * at top level. + * 4. If the index is ordered, and there is a requested query ordering + * that we failed to match, consider variant ways of achieving the + * ordering. Again, this is only interesting at top level. */ if (istoplevel && index_is_ordered && !isjoininner && root->query_pathkeys != NIL && pathkeys_useful_for_ordering(root, useful_pathkeys) == 0) { - ScanDirection scandir; + ScanDirection scandir; scandir = match_variant_ordering(root, index, restrictclauses); if (!ScanDirectionIsNoMovement(scandir)) @@ -409,9 +407,9 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel, foreach(l, clauses) { RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); - List *pathlist; - Path *bitmapqual; - ListCell *j; + List *pathlist; + Path *bitmapqual; + ListCell *j; Assert(IsA(rinfo, RestrictInfo)); /* Ignore RestrictInfos that aren't ORs */ @@ -419,19 +417,19 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel, continue; /* - * We must be able to match at least one index to each of the arms - * of the OR, else we can't use it. + * We must be able to match at least one index to each of the arms of + * the OR, else we can't use it. */ pathlist = NIL; foreach(j, ((BoolExpr *) rinfo->orclause)->args) { - Node *orarg = (Node *) lfirst(j); - List *indlist; + Node *orarg = (Node *) lfirst(j); + List *indlist; /* OR arguments should be ANDs or sub-RestrictInfos */ if (and_clause(orarg)) { - List *andargs = ((BoolExpr *) orarg)->args; + List *andargs = ((BoolExpr *) orarg)->args; indlist = find_usable_indexes(root, rel, andargs, @@ -458,25 +456,28 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel, isjoininner, outer_relids); } + /* - * If nothing matched this arm, we can't do anything - * with this OR clause. + * If nothing matched this arm, we can't do anything with this OR + * clause. */ if (indlist == NIL) { pathlist = NIL; break; } + /* - * OK, pick the most promising AND combination, - * and add it to pathlist. + * OK, pick the most promising AND combination, and add it to + * pathlist. */ bitmapqual = choose_bitmap_and(root, rel, indlist); pathlist = lappend(pathlist, bitmapqual); } + /* - * If we have a match for every arm, then turn them - * into a BitmapOrPath, and add to result list. + * If we have a match for every arm, then turn them into a + * BitmapOrPath, and add to result list. */ if (pathlist != NIL) { @@ -494,7 +495,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel, * Given a nonempty list of bitmap paths, AND them into one path. * * This is a nontrivial decision since we can legally use any subset of the - * given path set. We want to choose a good tradeoff between selectivity + * given path set. We want to choose a good tradeoff between selectivity * and cost of computing the bitmap. * * The result is either a single one of the inputs, or a BitmapAndPath @@ -511,7 +512,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) int i; ListCell *l; - Assert(npaths > 0); /* else caller error */ + Assert(npaths > 0); /* else caller error */ if (npaths == 1) return (Path *) linitial(paths); /* easy case */ @@ -519,24 +520,23 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) * In theory we should consider every nonempty subset of the given paths. * In practice that seems like overkill, given the crude nature of the * estimates, not to mention the possible effects of higher-level AND and - * OR clauses. As a compromise, we sort the paths by selectivity. - * We always take the first, and sequentially add on paths that result - * in a lower estimated cost. + * OR clauses. As a compromise, we sort the paths by selectivity. We + * always take the first, and sequentially add on paths that result in a + * lower estimated cost. * - * We also make some effort to detect directly redundant input paths, - * as can happen if there are multiple possibly usable indexes. For - * this we look only at plain IndexPath inputs, not at sub-OR clauses. - * And we consider an index redundant if all its index conditions were - * already used by earlier indexes. (We could use predicate_implied_by - * to have a more intelligent, but much more expensive, check --- but in - * most cases simple pointer equality should suffice, since after all the - * index conditions are all coming from the same RestrictInfo lists.) + * We also make some effort to detect directly redundant input paths, as can + * happen if there are multiple possibly usable indexes. For this we look + * only at plain IndexPath inputs, not at sub-OR clauses. And we consider + * an index redundant if all its index conditions were already used by + * earlier indexes. (We could use predicate_implied_by to have a more + * intelligent, but much more expensive, check --- but in most cases + * simple pointer equality should suffice, since after all the index + * conditions are all coming from the same RestrictInfo lists.) * - * XXX is there any risk of throwing away a useful partial index here - * because we don't explicitly look at indpred? At least in simple - * cases, the partial index will sort before competing non-partial - * indexes and so it makes the right choice, but perhaps we need to - * work harder. + * XXX is there any risk of throwing away a useful partial index here because + * we don't explicitly look at indpred? At least in simple cases, the + * partial index will sort before competing non-partial indexes and so it + * makes the right choice, but perhaps we need to work harder. * * Note: outputting the selected sub-paths in selectivity order is a good * thing even if we weren't using that as part of the selection method, @@ -559,13 +559,13 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) qualsofar = list_copy(((IndexPath *) patharray[0])->indexclauses); else qualsofar = NIL; - lastcell = list_head(paths); /* for quick deletions */ + lastcell = list_head(paths); /* for quick deletions */ for (i = 1; i < npaths; i++) { - Path *newpath = patharray[i]; - List *newqual = NIL; - Cost newcost; + Path *newpath = patharray[i]; + List *newqual = NIL; + Cost newcost; if (IsA(newpath, IndexPath)) { @@ -599,12 +599,12 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) static int bitmap_path_comparator(const void *a, const void *b) { - Path *pa = *(Path * const *) a; - Path *pb = *(Path * const *) b; + Path *pa = *(Path *const *) a; + Path *pb = *(Path *const *) b; Cost acost; Cost bcost; - Selectivity aselec; - Selectivity bselec; + Selectivity aselec; + Selectivity bselec; cost_bitmap_tree_node(pa, &acost, &aselec); cost_bitmap_tree_node(pb, &bcost, &bselec); @@ -660,7 +660,7 @@ bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths) * * We can use clauses from either the current clauses or outer_clauses lists, * but *found_clause is set TRUE only if we used at least one clause from - * the "current clauses" list. See find_usable_indexes() for motivation. + * the "current clauses" list. See find_usable_indexes() for motivation. * * outer_relids determines what Vars will be allowed on the other side * of a possible index qual; see match_clause_to_indexcol(). @@ -770,7 +770,7 @@ group_clauses_by_indexkey(IndexOptInfo *index, * to the caller-specified outer_relids relations (which had better not * include the relation whose index is being tested). outer_relids should * be NULL when checking simple restriction clauses, and the outer side - * of the join when building a join inner scan. Other than that, the + * of the join when building a join inner scan. Other than that, the * only thing we don't like is volatile functions. * * Note: in most cases we already know that the clause as a whole uses @@ -836,8 +836,8 @@ match_clause_to_indexcol(IndexOptInfo *index, return true; /* - * If we didn't find a member of the index's opclass, see whether - * it is a "special" indexable operator. + * If we didn't find a member of the index's opclass, see whether it + * is a "special" indexable operator. */ if (match_special_index_operator(clause, opclass, true)) return true; @@ -852,8 +852,8 @@ match_clause_to_indexcol(IndexOptInfo *index, return true; /* - * If we didn't find a member of the index's opclass, see whether - * it is a "special" indexable operator. + * If we didn't find a member of the index's opclass, see whether it + * is a "special" indexable operator. */ if (match_special_index_operator(clause, opclass, false)) return true; @@ -914,14 +914,14 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel) /* * Note: if Postgres tried to optimize queries by forming equivalence * classes over equi-joined attributes (i.e., if it recognized that a - * qualification such as "where a.b=c.d and a.b=5" could make use of - * an index on c.d), then we could use that equivalence class info - * here with joininfo lists to do more complete tests for the usability - * of a partial index. For now, the test only uses restriction - * clauses (those in baserestrictinfo). --Nels, Dec '92 + * qualification such as "where a.b=c.d and a.b=5" could make use of an + * index on c.d), then we could use that equivalence class info here with + * joininfo lists to do more complete tests for the usability of a partial + * index. For now, the test only uses restriction clauses (those in + * baserestrictinfo). --Nels, Dec '92 * - * XXX as of 7.1, equivalence class info *is* available. Consider - * improving this code as foreseen by Nels. + * XXX as of 7.1, equivalence class info *is* available. Consider improving + * this code as foreseen by Nels. */ foreach(ilist, rel->indexlist) @@ -943,7 +943,7 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel) /* * indexable_outerrelids * Finds all other relids that participate in any indexable join clause - * for the specified table. Returns a set of relids. + * for the specified table. Returns a set of relids. */ static Relids indexable_outerrelids(RelOptInfo *rel) @@ -958,7 +958,7 @@ indexable_outerrelids(RelOptInfo *rel) foreach(l, rel->joininfo) { RestrictInfo *joininfo = (RestrictInfo *) lfirst(l); - Relids other_rels; + Relids other_rels; other_rels = bms_difference(joininfo->required_relids, rel->relids); if (matches_any_index(joininfo, rel, other_rels)) @@ -986,7 +986,7 @@ matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel, Relids outer_relids) { foreach(l, ((BoolExpr *) rinfo->orclause)->args) { - Node *orarg = (Node *) lfirst(l); + Node *orarg = (Node *) lfirst(l); /* OR arguments should be ANDs or sub-RestrictInfos */ if (and_clause(orarg)) @@ -1092,17 +1092,17 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel, return NULL; /* - * Otherwise, we have to do path selection in the memory context of - * the given rel, so that any created path can be safely attached to - * the rel's cache of best inner paths. (This is not currently an - * issue for normal planning, but it is an issue for GEQO planning.) + * Otherwise, we have to do path selection in the memory context of the + * given rel, so that any created path can be safely attached to the rel's + * cache of best inner paths. (This is not currently an issue for normal + * planning, but it is an issue for GEQO planning.) */ oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel)); /* - * Intersect the given outer_relids with index_outer_relids to find - * the set of outer relids actually relevant for this rel. If there - * are none, again we can fail immediately. + * Intersect the given outer_relids with index_outer_relids to find the + * set of outer relids actually relevant for this rel. If there are none, + * again we can fail immediately. */ outer_relids = bms_intersect(rel->index_outer_relids, outer_relids); if (bms_is_empty(outer_relids)) @@ -1113,11 +1113,10 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel, } /* - * Look to see if we already computed the result for this set of - * relevant outerrels. (We include the isouterjoin status in the - * cache lookup key for safety. In practice I suspect this is not - * necessary because it should always be the same for a given - * innerrel.) + * Look to see if we already computed the result for this set of relevant + * outerrels. (We include the isouterjoin status in the cache lookup key + * for safety. In practice I suspect this is not necessary because it + * should always be the same for a given innerrel.) */ foreach(l, rel->index_inner_paths) { @@ -1160,8 +1159,8 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel, bitindexpaths = list_concat(bitindexpaths, list_copy(indexpaths)); /* - * If we found anything usable, generate a BitmapHeapPath for the - * most promising combination of bitmap index paths. + * If we found anything usable, generate a BitmapHeapPath for the most + * promising combination of bitmap index paths. */ if (bitindexpaths != NIL) { @@ -1218,12 +1217,11 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel, ListCell *l; /* - * We can always use plain restriction clauses for the rel. We - * scan these first because we want them first in the clause - * list for the convenience of remove_redundant_join_clauses, - * which can never remove non-join clauses and hence won't be able - * to get rid of a non-join clause if it appears after a join - * clause it is redundant with. + * We can always use plain restriction clauses for the rel. We scan these + * first because we want them first in the clause list for the convenience + * of remove_redundant_join_clauses, which can never remove non-join + * clauses and hence won't be able to get rid of a non-join clause if it + * appears after a join clause it is redundant with. */ foreach(l, rel->baserestrictinfo) { @@ -1305,7 +1303,7 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel, * * If able to match the requested query pathkeys, returns either * ForwardScanDirection or BackwardScanDirection to indicate the proper index - * scan direction. If no match, returns NoMovementScanDirection. + * scan direction. If no match, returns NoMovementScanDirection. */ static ScanDirection match_variant_ordering(PlannerInfo *root, @@ -1318,8 +1316,8 @@ match_variant_ordering(PlannerInfo *root, * Forget the whole thing if not a btree index; our check for ignorable * columns assumes we are dealing with btree opclasses. (It'd be possible * to factor out just the try for backwards indexscan, but considering - * that we presently have no orderable indexes except btrees anyway, - * it's hardly worth contorting this code for that case.) + * that we presently have no orderable indexes except btrees anyway, it's + * hardly worth contorting this code for that case.) * * Note: if you remove this, you probably need to put in a check on * amoptionalkey to prevent possible clauseless scan on an index that @@ -1327,17 +1325,19 @@ match_variant_ordering(PlannerInfo *root, */ if (index->relam != BTREE_AM_OID) return NoMovementScanDirection; + /* - * Figure out which index columns can be optionally ignored because - * they have an equality constraint. This is the same set for either - * forward or backward scan, so we do it just once. + * Figure out which index columns can be optionally ignored because they + * have an equality constraint. This is the same set for either forward + * or backward scan, so we do it just once. */ ignorables = identify_ignorable_ordering_cols(root, index, restrictclauses); + /* - * Try to match to forward scan, then backward scan. However, we can - * skip the forward-scan case if there are no ignorable columns, - * because find_usable_indexes() would have found the match already. + * Try to match to forward scan, then backward scan. However, we can skip + * the forward-scan case if there are no ignorable columns, because + * find_usable_indexes() would have found the match already. */ if (ignorables && match_index_to_query_keys(root, index, ForwardScanDirection, @@ -1365,24 +1365,24 @@ identify_ignorable_ordering_cols(PlannerInfo *root, List *restrictclauses) { List *result = NIL; - int indexcol = 0; /* note this is 0-based */ + int indexcol = 0; /* note this is 0-based */ ListCell *l; /* restrictclauses is either NIL or has a sublist per column */ foreach(l, restrictclauses) { - List *sublist = (List *) lfirst(l); - Oid opclass = index->classlist[indexcol]; - ListCell *l2; + List *sublist = (List *) lfirst(l); + Oid opclass = index->classlist[indexcol]; + ListCell *l2; foreach(l2, sublist) { RestrictInfo *rinfo = (RestrictInfo *) lfirst(l2); OpExpr *clause = (OpExpr *) rinfo->clause; - Oid clause_op; - int op_strategy; - bool varonleft; - bool ispc; + Oid clause_op; + int op_strategy; + bool varonleft; + bool ispc; /* We know this clause passed match_clause_to_indexcol */ @@ -1393,11 +1393,11 @@ identify_ignorable_ordering_cols(PlannerInfo *root, index)) { /* - * The clause means either col = TRUE or col = FALSE; - * we do not care which, it's an equality constraint - * either way. + * The clause means either col = TRUE or col = FALSE; we + * do not care which, it's an equality constraint either + * way. */ - result = lappend_int(result, indexcol+1); + result = lappend_int(result, indexcol + 1); break; } } @@ -1426,12 +1426,11 @@ identify_ignorable_ordering_cols(PlannerInfo *root, op_strategy = get_op_opclass_strategy(clause_op, opclass); /* - * You might expect to see Assert(op_strategy != 0) here, - * but you won't: the clause might contain a special indexable - * operator rather than an ordinary opclass member. Currently - * none of the special operators are very likely to expand to - * an equality operator; we do not bother to check, but just - * assume no match. + * You might expect to see Assert(op_strategy != 0) here, but you + * won't: the clause might contain a special indexable operator + * rather than an ordinary opclass member. Currently none of the + * special operators are very likely to expand to an equality + * operator; we do not bother to check, but just assume no match. */ if (op_strategy != BTEqualStrategyNumber) continue; @@ -1445,7 +1444,7 @@ identify_ignorable_ordering_cols(PlannerInfo *root, rinfo->left_relids); if (ispc) { - result = lappend_int(result, indexcol+1); + result = lappend_int(result, indexcol + 1); break; } } @@ -1480,8 +1479,8 @@ match_index_to_query_keys(PlannerInfo *root, index_pathkeys = build_index_pathkeys(root, index, indexscandir); /* - * Can we match to the query's requested pathkeys? The inner loop - * skips over ignorable index columns while trying to match. + * Can we match to the query's requested pathkeys? The inner loop skips + * over ignorable index columns while trying to match. */ index_cell = list_head(index_pathkeys); index_col = 0; @@ -1492,13 +1491,14 @@ match_index_to_query_keys(PlannerInfo *root, for (;;) { - List *isubkey; + List *isubkey; if (index_cell == NULL) return false; isubkey = (List *) lfirst(index_cell); index_cell = lnext(index_cell); index_col++; /* index_col is now 1-based */ + /* * Since we are dealing with canonicalized pathkeys, pointer * comparison is sufficient to determine a match. @@ -1561,9 +1561,9 @@ match_index_to_operand(Node *operand, int indkey; /* - * Ignore any RelabelType node above the operand. This is needed to - * be able to apply indexscanning in binary-compatible-operator cases. - * Note: we can assume there is at most one RelabelType node; + * Ignore any RelabelType node above the operand. This is needed to be + * able to apply indexscanning in binary-compatible-operator cases. Note: + * we can assume there is at most one RelabelType node; * eval_const_expressions() will have simplified if more than one. */ if (operand && IsA(operand, RelabelType)) @@ -1583,9 +1583,9 @@ match_index_to_operand(Node *operand, else { /* - * Index expression; find the correct expression. (This search - * could be avoided, at the cost of complicating all the callers - * of this routine; doesn't seem worth it.) + * Index expression; find the correct expression. (This search could + * be avoided, at the cost of complicating all the callers of this + * routine; doesn't seem worth it.) */ ListCell *indexpr_item; int i; @@ -1645,7 +1645,7 @@ match_index_to_operand(Node *operand, * * Another thing that we do with this machinery is to provide special * smarts for "boolean" indexes (that is, indexes on boolean columns - * that support boolean equality). We can transform a plain reference + * that support boolean equality). We can transform a plain reference * to the indexkey into "indexkey = true", or "NOT indexkey" into * "indexkey = false", so as to make the expression indexable using the * regular index operators. (As of Postgres 8.1, we must do this here @@ -1696,14 +1696,15 @@ match_boolean_index_clause(Node *clause, indexcol, index)) return true; } + /* * Since we only consider clauses at top level of WHERE, we can convert - * indexkey IS TRUE and indexkey IS FALSE to index searches as well. - * The different meaning for NULL isn't important. + * indexkey IS TRUE and indexkey IS FALSE to index searches as well. The + * different meaning for NULL isn't important. */ else if (clause && IsA(clause, BooleanTest)) { - BooleanTest *btest = (BooleanTest *) clause; + BooleanTest *btest = (BooleanTest *) clause; if (btest->booltesttype == IS_TRUE || btest->booltesttype == IS_FALSE) @@ -1737,8 +1738,8 @@ match_special_index_operator(Expr *clause, Oid opclass, /* * Currently, all known special operators require the indexkey on the - * left, but this test could be pushed into the switch statement if - * some are added that do not... + * left, but this test could be pushed into the switch statement if some + * are added that do not... */ if (!indexkey_on_left) return false; @@ -1760,12 +1761,12 @@ match_special_index_operator(Expr *clause, Oid opclass, case OID_NAME_LIKE_OP: /* the right-hand const is type text for all of these */ isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like, - &prefix, &rest) != Pattern_Prefix_None; + &prefix, &rest) != Pattern_Prefix_None; break; case OID_BYTEA_LIKE_OP: isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like, - &prefix, &rest) != Pattern_Prefix_None; + &prefix, &rest) != Pattern_Prefix_None; break; case OID_TEXT_ICLIKE_OP: @@ -1773,7 +1774,7 @@ match_special_index_operator(Expr *clause, Oid opclass, case OID_NAME_ICLIKE_OP: /* the right-hand const is type text for all of these */ isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC, - &prefix, &rest) != Pattern_Prefix_None; + &prefix, &rest) != Pattern_Prefix_None; break; case OID_TEXT_REGEXEQ_OP: @@ -1781,7 +1782,7 @@ match_special_index_operator(Expr *clause, Oid opclass, case OID_NAME_REGEXEQ_OP: /* the right-hand const is type text for all of these */ isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex, - &prefix, &rest) != Pattern_Prefix_None; + &prefix, &rest) != Pattern_Prefix_None; break; case OID_TEXT_ICREGEXEQ_OP: @@ -1789,7 +1790,7 @@ match_special_index_operator(Expr *clause, Oid opclass, case OID_NAME_ICREGEXEQ_OP: /* the right-hand const is type text for all of these */ isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC, - &prefix, &rest) != Pattern_Prefix_None; + &prefix, &rest) != Pattern_Prefix_None; break; case OID_INET_SUB_OP: @@ -1815,9 +1816,9 @@ match_special_index_operator(Expr *clause, Oid opclass, * want to apply. (A hash index, for example, will not support ">=".) * Currently, only btree supports the operators we need. * - * We insist on the opclass being the specific one we expect, else we'd - * do the wrong thing if someone were to make a reverse-sort opclass - * with the same operators. + * We insist on the opclass being the specific one we expect, else we'd do + * the wrong thing if someone were to make a reverse-sort opclass with the + * same operators. */ switch (expr_op) { @@ -1906,7 +1907,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups) /* First check for boolean cases */ if (IsBooleanOpclass(curClass)) { - Expr *boolqual; + Expr *boolqual; boolqual = expand_boolean_index_clause((Node *) rinfo->clause, indexcol, @@ -1960,7 +1961,7 @@ expand_boolean_index_clause(Node *clause, /* NOT clause? */ if (not_clause(clause)) { - Node *arg = (Node *) get_notclausearg((Expr *) clause); + Node *arg = (Node *) get_notclausearg((Expr *) clause); /* It must have matched the indexkey */ Assert(match_index_to_operand(arg, indexcol, index)); @@ -1971,8 +1972,8 @@ expand_boolean_index_clause(Node *clause, } if (clause && IsA(clause, BooleanTest)) { - BooleanTest *btest = (BooleanTest *) clause; - Node *arg = (Node *) btest->arg; + BooleanTest *btest = (BooleanTest *) clause; + Node *arg = (Node *) btest->arg; /* It must have matched the indexkey */ Assert(match_index_to_operand(arg, indexcol, index)); @@ -2007,6 +2008,7 @@ static List * expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass) { Expr *clause = rinfo->clause; + /* we know these will succeed */ Node *leftop = get_leftop(clause); Node *rightop = get_rightop(clause); @@ -2020,10 +2022,9 @@ expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass) switch (expr_op) { /* - * LIKE and regex operators are not members of any index - * opclass, so if we find one in an indexqual list we can - * assume that it was accepted by - * match_special_index_operator(). + * LIKE and regex operators are not members of any index opclass, + * so if we find one in an indexqual list we can assume that it + * was accepted by match_special_index_operator(). */ case OID_TEXT_LIKE_OP: case OID_BPCHAR_LIKE_OP: @@ -2128,8 +2129,8 @@ prefix_quals(Node *leftop, Oid opclass, } /* - * If necessary, coerce the prefix constant to the right type. The - * given prefix constant is either text or bytea type. + * If necessary, coerce the prefix constant to the right type. The given + * prefix constant is either text or bytea type. */ if (prefix_const->consttype != datatype) { @@ -2139,11 +2140,11 @@ prefix_quals(Node *leftop, Oid opclass, { case TEXTOID: prefix = DatumGetCString(DirectFunctionCall1(textout, - prefix_const->constvalue)); + prefix_const->constvalue)); break; case BYTEAOID: prefix = DatumGetCString(DirectFunctionCall1(byteaout, - prefix_const->constvalue)); + prefix_const->constvalue)); break; default: elog(ERROR, "unexpected const type: %u", diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index b02f67ba1f6..ab3f902f02b 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.95 2005/06/05 22:32:55 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.96 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -65,9 +65,9 @@ add_paths_to_joinrel(PlannerInfo *root, /* * Find potential mergejoin clauses. We can skip this if we are not - * interested in doing a mergejoin. However, mergejoin is currently - * our only way of implementing full outer joins, so override - * mergejoin disable if it's a full join. + * interested in doing a mergejoin. However, mergejoin is currently our + * only way of implementing full outer joins, so override mergejoin + * disable if it's a full join. */ if (enable_mergejoin || jointype == JOIN_FULL) mergeclause_list = select_mergejoin_clauses(joinrel, @@ -95,23 +95,22 @@ add_paths_to_joinrel(PlannerInfo *root, /* * 3. Consider paths where the inner relation need not be explicitly - * sorted. This includes mergejoins only (nestloops were already - * built in match_unsorted_outer). + * sorted. This includes mergejoins only (nestloops were already built in + * match_unsorted_outer). * * Diked out as redundant 2/13/2000 -- tgl. There isn't any really - * significant difference between the inner and outer side of a - * mergejoin, so match_unsorted_inner creates no paths that aren't - * equivalent to those made by match_unsorted_outer when - * add_paths_to_joinrel() is invoked with the two rels given in the - * other order. + * significant difference between the inner and outer side of a mergejoin, + * so match_unsorted_inner creates no paths that aren't equivalent to + * those made by match_unsorted_outer when add_paths_to_joinrel() is + * invoked with the two rels given in the other order. */ match_unsorted_inner(root, joinrel, outerrel, innerrel, restrictlist, mergeclause_list, jointype); #endif /* - * 4. Consider paths where both outer and inner relations must be - * hashed before being joined. + * 4. Consider paths where both outer and inner relations must be hashed + * before being joined. */ if (enable_hashjoin) hash_inner_and_outer(root, joinrel, outerrel, innerrel, @@ -174,11 +173,11 @@ sort_inner_and_outer(PlannerInfo *root, /* * We only consider the cheapest-total-cost input paths, since we are * assuming here that a sort is required. We will consider - * cheapest-startup-cost input paths later, and only if they don't - * need a sort. + * cheapest-startup-cost input paths later, and only if they don't need a + * sort. * - * If unique-ification is requested, do it and then handle as a plain - * inner join. + * If unique-ification is requested, do it and then handle as a plain inner + * join. */ outer_path = outerrel->cheapest_total_path; inner_path = innerrel->cheapest_total_path; @@ -194,31 +193,29 @@ sort_inner_and_outer(PlannerInfo *root, } /* - * Each possible ordering of the available mergejoin clauses will - * generate a differently-sorted result path at essentially the same - * cost. We have no basis for choosing one over another at this level - * of joining, but some sort orders may be more useful than others for - * higher-level mergejoins, so it's worth considering multiple - * orderings. + * Each possible ordering of the available mergejoin clauses will generate + * a differently-sorted result path at essentially the same cost. We have + * no basis for choosing one over another at this level of joining, but + * some sort orders may be more useful than others for higher-level + * mergejoins, so it's worth considering multiple orderings. * * Actually, it's not quite true that every mergeclause ordering will * generate a different path order, because some of the clauses may be - * redundant. Therefore, what we do is convert the mergeclause list - * to a list of canonical pathkeys, and then consider different - * orderings of the pathkeys. + * redundant. Therefore, what we do is convert the mergeclause list to a + * list of canonical pathkeys, and then consider different orderings of + * the pathkeys. * * Generating a path for *every* permutation of the pathkeys doesn't seem * like a winning strategy; the cost in planning time is too high. For - * now, we generate one path for each pathkey, listing that pathkey - * first and the rest in random order. This should allow at least a - * one-clause mergejoin without re-sorting against any other possible - * mergejoin partner path. But if we've not guessed the right - * ordering of secondary keys, we may end up evaluating clauses as - * qpquals when they could have been done as mergeclauses. We need to - * figure out a better way. (Two possible approaches: look at all the - * relevant index relations to suggest plausible sort orders, or make - * just one output path and somehow mark it as having a sort-order - * that can be rearranged freely.) + * now, we generate one path for each pathkey, listing that pathkey first + * and the rest in random order. This should allow at least a one-clause + * mergejoin without re-sorting against any other possible mergejoin + * partner path. But if we've not guessed the right ordering of secondary + * keys, we may end up evaluating clauses as qpquals when they could have + * been done as mergeclauses. We need to figure out a better way. (Two + * possible approaches: look at all the relevant index relations to + * suggest plausible sort orders, or make just one output path and somehow + * mark it as having a sort-order that can be rearranged freely.) */ all_pathkeys = make_pathkeys_for_mergeclauses(root, mergeclause_list, @@ -243,26 +240,25 @@ sort_inner_and_outer(PlannerInfo *root, /* * Select mergeclause(s) that match this sort ordering. If we had - * redundant merge clauses then we will get a subset of the - * original clause list. There had better be some match, - * however... + * redundant merge clauses then we will get a subset of the original + * clause list. There had better be some match, however... */ cur_mergeclauses = find_mergeclauses_for_pathkeys(root, cur_pathkeys, - mergeclause_list); + mergeclause_list); Assert(cur_mergeclauses != NIL); /* Forget it if can't use all the clauses in right/full join */ if (useallclauses && - list_length(cur_mergeclauses) != list_length(mergeclause_list)) + list_length(cur_mergeclauses) != list_length(mergeclause_list)) continue; /* * Build sort pathkeys for both sides. * * Note: it's possible that the cheapest paths will already be sorted - * properly. create_mergejoin_path will detect that case and - * suppress an explicit sort step, so we needn't do so here. + * properly. create_mergejoin_path will detect that case and suppress + * an explicit sort step, so we needn't do so here. */ outerkeys = make_pathkeys_for_mergeclauses(root, cur_mergeclauses, @@ -343,10 +339,10 @@ match_unsorted_outer(PlannerInfo *root, /* * Nestloop only supports inner, left, and IN joins. Also, if we are - * doing a right or full join, we must use *all* the mergeclauses as - * join clauses, else we will not have a valid plan. (Although these - * two flags are currently inverses, keep them separate for clarity - * and possible future changes.) + * doing a right or full join, we must use *all* the mergeclauses as join + * clauses, else we will not have a valid plan. (Although these two flags + * are currently inverses, keep them separate for clarity and possible + * future changes.) */ switch (jointype) { @@ -385,10 +381,9 @@ match_unsorted_outer(PlannerInfo *root, else if (nestjoinOK) { /* - * If the cheapest inner path is a join or seqscan, we should - * consider materializing it. (This is a heuristic: we could - * consider it always, but for inner indexscans it's probably a - * waste of time.) + * If the cheapest inner path is a join or seqscan, we should consider + * materializing it. (This is a heuristic: we could consider it + * always, but for inner indexscans it's probably a waste of time.) */ if (!(IsA(inner_cheapest_total, IndexPath) || IsA(inner_cheapest_total, BitmapHeapPath) || @@ -397,8 +392,8 @@ match_unsorted_outer(PlannerInfo *root, create_material_path(innerrel, inner_cheapest_total); /* - * Get the best innerjoin indexpath (if any) for this outer rel. - * It's the same for all outer paths. + * Get the best innerjoin indexpath (if any) for this outer rel. It's + * the same for all outer paths. */ bestinnerjoin = best_inner_indexscan(root, innerrel, outerrel->relids, jointype); @@ -417,8 +412,8 @@ match_unsorted_outer(PlannerInfo *root, int sortkeycnt; /* - * If we need to unique-ify the outer path, it's pointless to - * consider any but the cheapest outer. + * If we need to unique-ify the outer path, it's pointless to consider + * any but the cheapest outer. */ if (save_jointype == JOIN_UNIQUE_OUTER) { @@ -429,9 +424,9 @@ match_unsorted_outer(PlannerInfo *root, } /* - * The result will have this sort order (even if it is implemented - * as a nestloop, and even if some of the mergeclauses are - * implemented by qpquals rather than as true mergeclauses): + * The result will have this sort order (even if it is implemented as + * a nestloop, and even if some of the mergeclauses are implemented by + * qpquals rather than as true mergeclauses): */ merge_pathkeys = build_join_pathkeys(root, joinrel, jointype, outerpath->pathkeys); @@ -516,9 +511,9 @@ match_unsorted_outer(PlannerInfo *root, innerrel); /* - * Generate a mergejoin on the basis of sorting the cheapest - * inner. Since a sort will be needed, only cheapest total cost - * matters. (But create_mergejoin_path will do the right thing if + * Generate a mergejoin on the basis of sorting the cheapest inner. + * Since a sort will be needed, only cheapest total cost matters. + * (But create_mergejoin_path will do the right thing if * inner_cheapest_total is already correctly sorted.) */ add_path(joinrel, (Path *) @@ -538,10 +533,10 @@ match_unsorted_outer(PlannerInfo *root, continue; /* - * Look for presorted inner paths that satisfy the innersortkey - * list --- or any truncation thereof, if we are allowed to build - * a mergejoin using a subset of the merge clauses. Here, we - * consider both cheap startup cost and cheap total cost. Ignore + * Look for presorted inner paths that satisfy the innersortkey list + * --- or any truncation thereof, if we are allowed to build a + * mergejoin using a subset of the merge clauses. Here, we consider + * both cheap startup cost and cheap total cost. Ignore * inner_cheapest_total, since we already made a path with it. */ num_sortkeys = list_length(innersortkeys); @@ -559,8 +554,8 @@ match_unsorted_outer(PlannerInfo *root, /* * Look for an inner path ordered well enough for the first - * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is - * modified destructively, which is why we made a copy... + * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified + * destructively, which is why we made a copy... */ trialsortkeys = list_truncate(trialsortkeys, sortkeycnt); innerpath = get_cheapest_path_for_pathkeys(innerrel->pathlist, @@ -611,8 +606,8 @@ match_unsorted_outer(PlannerInfo *root, if (innerpath != cheapest_total_inner) { /* - * Avoid rebuilding clause list if we already made - * one; saves memory in big join trees... + * Avoid rebuilding clause list if we already made one; + * saves memory in big join trees... */ if (newclauses == NIL) { @@ -620,8 +615,8 @@ match_unsorted_outer(PlannerInfo *root, { newclauses = find_mergeclauses_for_pathkeys(root, - trialsortkeys, - mergeclauses); + trialsortkeys, + mergeclauses); Assert(newclauses != NIL); } else @@ -697,8 +692,8 @@ hash_inner_and_outer(PlannerInfo *root, * We need to build only one hashpath for any given pair of outer and * inner relations; all of the hashable clauses will be used as keys. * - * Scan the join's restrictinfo list to find hashjoinable clauses that - * are usable with this pair of sub-relations. + * Scan the join's restrictinfo list to find hashjoinable clauses that are + * usable with this pair of sub-relations. */ hashclauses = NIL; foreach(l, restrictlist) @@ -725,7 +720,7 @@ hash_inner_and_outer(PlannerInfo *root, /* righthand side is inner */ } else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) && - bms_is_subset(restrictinfo->right_relids, outerrel->relids)) + bms_is_subset(restrictinfo->right_relids, outerrel->relids)) { /* lefthand side is inner */ } @@ -739,9 +734,9 @@ hash_inner_and_outer(PlannerInfo *root, if (hashclauses) { /* - * We consider both the cheapest-total-cost and - * cheapest-startup-cost outer paths. There's no need to consider - * any but the cheapest-total-cost inner path, however. + * We consider both the cheapest-total-cost and cheapest-startup-cost + * outer paths. There's no need to consider any but the + * cheapest-total-cost inner path, however. */ Path *cheapest_startup_outer = outerrel->cheapest_startup_path; Path *cheapest_total_outer = outerrel->cheapest_total_path; @@ -807,15 +802,15 @@ select_mergejoin_clauses(RelOptInfo *joinrel, RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l); /* - * If processing an outer join, only use its own join clauses in - * the merge. For inner joins we need not be so picky. + * If processing an outer join, only use its own join clauses in the + * merge. For inner joins we need not be so picky. * - * Furthermore, if it is a right/full join then *all* the explicit - * join clauses must be mergejoinable, else the executor will - * fail. If we are asked for a right join then just return NIL to - * indicate no mergejoin is possible (we can handle it as a left - * join instead). If we are asked for a full join then emit an - * error, because there is no fallback. + * Furthermore, if it is a right/full join then *all* the explicit join + * clauses must be mergejoinable, else the executor will fail. If we + * are asked for a right join then just return NIL to indicate no + * mergejoin is possible (we can handle it as a left join instead). If + * we are asked for a full join then emit an error, because there is + * no fallback. */ if (isouterjoin) { @@ -847,8 +842,8 @@ select_mergejoin_clauses(RelOptInfo *joinrel, /* * Check if clause is usable with these input rels. All the vars - * needed on each side of the clause must be available from one or - * the other of the input rels. + * needed on each side of the clause must be available from one or the + * other of the input rels. */ if (bms_is_subset(restrictinfo->left_relids, outerrel->relids) && bms_is_subset(restrictinfo->right_relids, innerrel->relids)) @@ -856,7 +851,7 @@ select_mergejoin_clauses(RelOptInfo *joinrel, /* righthand side is inner */ } else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) && - bms_is_subset(restrictinfo->right_relids, outerrel->relids)) + bms_is_subset(restrictinfo->right_relids, outerrel->relids)) { /* lefthand side is inner */ } diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index f4f2d779b0a..ecb63156860 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.75 2005/07/28 22:27:00 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.76 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -49,17 +49,16 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels) /* * First, consider left-sided and right-sided plans, in which rels of - * exactly level-1 member relations are joined against initial - * relations. We prefer to join using join clauses, but if we find a - * rel of level-1 members that has no join clauses, we will generate - * Cartesian-product joins against all initial rels not already - * contained in it. + * exactly level-1 member relations are joined against initial relations. + * We prefer to join using join clauses, but if we find a rel of level-1 + * members that has no join clauses, we will generate Cartesian-product + * joins against all initial rels not already contained in it. * - * In the first pass (level == 2), we try to join each initial rel to - * each initial rel that appears later in joinrels[1]. (The - * mirror-image joins are handled automatically by make_join_rel.) In - * later passes, we try to join rels of size level-1 from - * joinrels[level-1] to each initial rel in joinrels[1]. + * In the first pass (level == 2), we try to join each initial rel to each + * initial rel that appears later in joinrels[1]. (The mirror-image joins + * are handled automatically by make_join_rel.) In later passes, we try + * to join rels of size level-1 from joinrels[level-1] to each initial rel + * in joinrels[1]. */ foreach(r, joinrels[level - 1]) { @@ -76,23 +75,22 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels) if (old_rel->joininfo != NIL) { /* - * Note that if all available join clauses for this rel - * require more than one other rel, we will fail to make any - * joins against it here. In most cases that's OK; it'll be - * considered by "bushy plan" join code in a higher-level pass - * where we have those other rels collected into a join rel. + * Note that if all available join clauses for this rel require + * more than one other rel, we will fail to make any joins against + * it here. In most cases that's OK; it'll be considered by + * "bushy plan" join code in a higher-level pass where we have + * those other rels collected into a join rel. */ new_rels = make_rels_by_clause_joins(root, old_rel, other_rels); /* - * An exception occurs when there is a clauseless join inside - * an IN (sub-SELECT) construct. Here, the members of the - * subselect all have join clauses (against the stuff outside - * the IN), but they *must* be joined to each other before we - * can make use of those join clauses. So do the clauseless - * join bit. + * An exception occurs when there is a clauseless join inside an + * IN (sub-SELECT) construct. Here, the members of the subselect + * all have join clauses (against the stuff outside the IN), but + * they *must* be joined to each other before we can make use of + * those join clauses. So do the clauseless join bit. * * See also the last-ditch case below. */ @@ -115,30 +113,29 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels) /* * At levels above 2 we will generate the same joined relation in * multiple ways --- for example (a join b) join c is the same - * RelOptInfo as (b join c) join a, though the second case will - * add a different set of Paths to it. To avoid making extra work - * for subsequent passes, do not enter the same RelOptInfo into - * our output list multiple times. + * RelOptInfo as (b join c) join a, though the second case will add a + * different set of Paths to it. To avoid making extra work for + * subsequent passes, do not enter the same RelOptInfo into our output + * list multiple times. */ result_rels = list_concat_unique_ptr(result_rels, new_rels); } /* - * Now, consider "bushy plans" in which relations of k initial rels - * are joined to relations of level-k initial rels, for 2 <= k <= - * level-2. + * Now, consider "bushy plans" in which relations of k initial rels are + * joined to relations of level-k initial rels, for 2 <= k <= level-2. * * We only consider bushy-plan joins for pairs of rels where there is a - * suitable join clause, in order to avoid unreasonable growth of - * planning time. + * suitable join clause, in order to avoid unreasonable growth of planning + * time. */ for (k = 2;; k++) { int other_level = level - k; /* - * Since make_join_rel(x, y) handles both x,y and y,x cases, we - * only need to go as far as the halfway point. + * Since make_join_rel(x, y) handles both x,y and y,x cases, we only + * need to go as far as the halfway point. */ if (k > other_level) break; @@ -165,8 +162,8 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels) { /* * OK, we can build a rel of the right level from this - * pair of rels. Do so if there is at least one - * usable join clause. + * pair of rels. Do so if there is at least one usable + * join clause. */ if (have_relevant_joinclause(old_rel, new_rel)) { @@ -185,16 +182,16 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels) } /* - * Last-ditch effort: if we failed to find any usable joins so far, - * force a set of cartesian-product joins to be generated. This - * handles the special case where all the available rels have join - * clauses but we cannot use any of the joins yet. An example is + * Last-ditch effort: if we failed to find any usable joins so far, force + * a set of cartesian-product joins to be generated. This handles the + * special case where all the available rels have join clauses but we + * cannot use any of the joins yet. An example is * * SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0; * * The join clause will be usable at level 3, but at level 2 we have no - * choice but to make cartesian joins. We consider only left-sided - * and right-sided cartesian joins in this case (no bushy). + * choice but to make cartesian joins. We consider only left-sided and + * right-sided cartesian joins in this case (no bushy). */ if (result_rels == NIL) { @@ -318,8 +315,8 @@ make_rels_by_clauseless_joins(PlannerInfo *root, jrel = make_join_rel(root, old_rel, other_rel, JOIN_INNER); /* - * As long as given other_rels are distinct, don't need to - * test to see if jrel is already part of output list. + * As long as given other_rels are distinct, don't need to test to + * see if jrel is already part of output list. */ if (jrel) result = lcons(jrel, result); @@ -393,10 +390,10 @@ make_jointree_rel(PlannerInfo *root, Node *jtnode) elog(ERROR, "invalid join order"); /* - * Since we are only going to consider this one way to do it, - * we're done generating Paths for this joinrel and can now select - * the cheapest. In fact we *must* do so now, since next level up - * will need it! + * Since we are only going to consider this one way to do it, we're + * done generating Paths for this joinrel and can now select the + * cheapest. In fact we *must* do so now, since next level up will + * need it! */ set_cheapest(rel); @@ -439,10 +436,10 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, joinrelids = bms_union(rel1->relids, rel2->relids); /* - * If we are implementing IN clauses as joins, there are some joins - * that are illegal. Check to see if the proposed join is trouble. We - * can skip the work if looking at an outer join, however, because - * only top-level joins might be affected. + * If we are implementing IN clauses as joins, there are some joins that + * are illegal. Check to see if the proposed join is trouble. We can skip + * the work if looking at an outer join, however, because only top-level + * joins might be affected. */ if (jointype == JOIN_INNER) { @@ -454,8 +451,8 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, /* * This IN clause is not relevant unless its RHS overlaps the - * proposed join. (Check this first as a fast path for - * dismissing most irrelevant INs quickly.) + * proposed join. (Check this first as a fast path for dismissing + * most irrelevant INs quickly.) */ if (!bms_overlap(ininfo->righthand, joinrelids)) continue; @@ -468,10 +465,10 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, continue; /* - * Cannot join if proposed join contains rels not in the RHS - * *and* contains only part of the RHS. We must build the - * complete RHS (subselect's join) before it can be joined to - * rels outside the subselect. + * Cannot join if proposed join contains rels not in the RHS *and* + * contains only part of the RHS. We must build the complete RHS + * (subselect's join) before it can be joined to rels outside the + * subselect. */ if (!bms_is_subset(ininfo->righthand, joinrelids)) { @@ -480,13 +477,12 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, } /* - * At this point we are considering a join of the IN's RHS to - * some other rel(s). + * At this point we are considering a join of the IN's RHS to some + * other rel(s). * - * If we already joined IN's RHS to any other rels in either - * input path, then this join is not constrained (the - * necessary work was done at the lower level where that join - * occurred). + * If we already joined IN's RHS to any other rels in either input + * path, then this join is not constrained (the necessary work was + * done at the lower level where that join occurred). */ if (bms_is_subset(ininfo->righthand, rel1->relids) && !bms_equal(ininfo->righthand, rel1->relids)) @@ -500,12 +496,11 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, * innerrel is exactly RHS; conversely JOIN_REVERSE_IN handles * RHS/LHS. * - * JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS; - * conversely JOIN_UNIQUE_INNER will work if innerrel is - * exactly RHS. + * JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS; conversely + * JOIN_UNIQUE_INNER will work if innerrel is exactly RHS. * - * But none of these will work if we already found another IN - * that needs to trigger here. + * But none of these will work if we already found another IN that + * needs to trigger here. */ if (jointype != JOIN_INNER) { @@ -532,8 +527,8 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, } /* - * Find or build the join RelOptInfo, and compute the restrictlist - * that goes with this particular joining. + * Find or build the join RelOptInfo, and compute the restrictlist that + * goes with this particular joining. */ joinrel = build_join_rel(root, joinrelids, rel1, rel2, jointype, &restrictlist); diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c index eb1e1a6ffcd..be5a0c3434f 100644 --- a/src/backend/optimizer/path/orindxpath.c +++ b/src/backend/optimizer/path/orindxpath.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.74 2005/07/28 20:26:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.75 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -99,14 +99,14 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel) if (restriction_is_or_clause(rinfo)) { /* - * Use the generate_bitmap_or_paths() machinery to estimate - * the value of each OR clause. We can use regular - * restriction clauses along with the OR clause contents to - * generate indexquals. We pass outer_relids = NULL so that - * sub-clauses that are actually joins will be ignored. + * Use the generate_bitmap_or_paths() machinery to estimate the + * value of each OR clause. We can use regular restriction + * clauses along with the OR clause contents to generate + * indexquals. We pass outer_relids = NULL so that sub-clauses + * that are actually joins will be ignored. */ - List *orpaths; - ListCell *k; + List *orpaths; + ListCell *k; orpaths = generate_bitmap_or_paths(root, rel, list_make1(rinfo), @@ -116,7 +116,7 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel) /* Locate the cheapest OR path */ foreach(k, orpaths) { - BitmapOrPath *path = (BitmapOrPath *) lfirst(k); + BitmapOrPath *path = (BitmapOrPath *) lfirst(k); Assert(IsA(path, BitmapOrPath)); if (bestpath == NULL || @@ -134,8 +134,8 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel) return false; /* - * Convert the path's indexclauses structure to a RestrictInfo tree. - * We include any partial-index predicates so as to get a reasonable + * Convert the path's indexclauses structure to a RestrictInfo tree. We + * include any partial-index predicates so as to get a reasonable * representation of what the path is actually scanning. */ newrinfos = make_restrictinfo_from_bitmapqual((Path *) bestpath, @@ -155,12 +155,12 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel) rel->baserestrictinfo = list_concat(rel->baserestrictinfo, newrinfos); /* - * Adjust the original OR clause's cached selectivity to compensate - * for the selectivity of the added (but redundant) lower-level qual. - * This should result in the join rel getting approximately the same - * rows estimate as it would have gotten without all these - * shenanigans. (XXX major hack alert ... this depends on the - * assumption that the selectivity will stay cached ...) + * Adjust the original OR clause's cached selectivity to compensate for + * the selectivity of the added (but redundant) lower-level qual. This + * should result in the join rel getting approximately the same rows + * estimate as it would have gotten without all these shenanigans. (XXX + * major hack alert ... this depends on the assumption that the + * selectivity will stay cached ...) */ or_selec = clause_selectivity(root, (Node *) or_rinfo, 0, JOIN_INNER); diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c index 09ad68ecd93..a2626929826 100644 --- a/src/backend/optimizer/path/pathkeys.c +++ b/src/backend/optimizer/path/pathkeys.c @@ -11,7 +11,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.72 2005/08/27 22:13:43 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.73 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -33,17 +33,17 @@ static PathKeyItem *makePathKeyItem(Node *key, Oid sortop, bool checkType); static void generate_outer_join_implications(PlannerInfo *root, - List *equi_key_set, - Relids *relids); + List *equi_key_set, + Relids *relids); static void sub_generate_join_implications(PlannerInfo *root, - List *equi_key_set, Relids *relids, - Node *item1, Oid sortop1, - Relids item1_relids); + List *equi_key_set, Relids *relids, + Node *item1, Oid sortop1, + Relids item1_relids); static void process_implied_const_eq(PlannerInfo *root, - List *equi_key_set, Relids *relids, - Node *item1, Oid sortop1, - Relids item1_relids, - bool delete_it); + List *equi_key_set, Relids *relids, + Node *item1, Oid sortop1, + Relids item1_relids, + bool delete_it); static List *make_canonical_pathkey(PlannerInfo *root, PathKeyItem *item); static Var *find_indexkey_var(PlannerInfo *root, RelOptInfo *rel, AttrNumber varattno); @@ -59,12 +59,11 @@ makePathKeyItem(Node *key, Oid sortop, bool checkType) PathKeyItem *item = makeNode(PathKeyItem); /* - * Some callers pass expressions that are not necessarily of the same - * type as the sort operator expects as input (for example when - * dealing with an index that uses binary-compatible operators). We - * must relabel these with the correct type so that the key - * expressions will be seen as equal() to expressions that have been - * correctly labeled. + * Some callers pass expressions that are not necessarily of the same type + * as the sort operator expects as input (for example when dealing with an + * index that uses binary-compatible operators). We must relabel these + * with the correct type so that the key expressions will be seen as + * equal() to expressions that have been correctly labeled. */ if (checkType) { @@ -116,20 +115,19 @@ add_equijoined_keys(PlannerInfo *root, RestrictInfo *restrictinfo) return; /* - * Our plan is to make a two-element set, then sweep through the - * existing equijoin sets looking for matches to item1 or item2. When - * we find one, we remove that set from equi_key_list and union it - * into our new set. When done, we add the new set to the front of - * equi_key_list. + * Our plan is to make a two-element set, then sweep through the existing + * equijoin sets looking for matches to item1 or item2. When we find one, + * we remove that set from equi_key_list and union it into our new set. + * When done, we add the new set to the front of equi_key_list. * * It may well be that the two items we're given are already known to be * equijoin-equivalent, in which case we don't need to change our data * structure. If we find both of them in the same equivalence set to * start with, we can quit immediately. * - * This is a standard UNION-FIND problem, for which there exist better - * data structures than simple lists. If this code ever proves to be - * a bottleneck then it could be sped up --- but for now, simple is + * This is a standard UNION-FIND problem, for which there exist better data + * structures than simple lists. If this code ever proves to be a + * bottleneck then it could be sped up --- but for now, simple is * beautiful. */ newset = NIL; @@ -148,8 +146,7 @@ add_equijoined_keys(PlannerInfo *root, RestrictInfo *restrictinfo) if (item1here || item2here) { /* - * If find both in same equivalence set, no need to do any - * more + * If find both in same equivalence set, no need to do any more */ if (item1here && item2here) { @@ -228,18 +225,18 @@ generate_implied_equalities(PlannerInfo *root) int i1; /* - * A set containing only two items cannot imply any equalities - * beyond the one that created the set, so we can skip it --- - * unless outer joins appear in the query. + * A set containing only two items cannot imply any equalities beyond + * the one that created the set, so we can skip it --- unless outer + * joins appear in the query. */ if (nitems < 3 && !root->hasOuterJoins) continue; /* - * Collect info about relids mentioned in each item. For this - * routine we only really care whether there are any at all in - * each item, but process_implied_equality() needs the exact sets, - * so we may as well pull them here. + * Collect info about relids mentioned in each item. For this routine + * we only really care whether there are any at all in each item, but + * process_implied_equality() needs the exact sets, so we may as well + * pull them here. */ relids = (Relids *) palloc(nitems * sizeof(Relids)); have_consts = false; @@ -258,9 +255,9 @@ generate_implied_equalities(PlannerInfo *root) * Match each item in the set with all that appear after it (it's * sufficient to generate A=B, need not process B=A too). * - * A set containing only two items cannot imply any equalities - * beyond the one that created the set, so we can skip this - * processing in that case. + * A set containing only two items cannot imply any equalities beyond the + * one that created the set, so we can skip this processing in that + * case. */ if (nitems >= 3) { @@ -346,7 +343,7 @@ generate_implied_equalities(PlannerInfo *root) * the time it gets here, the restriction will look like * COALESCE(LEFTVAR, RIGHTVAR) = CONSTANT * and we will have a join clause LEFTVAR = RIGHTVAR that we can match the - * COALESCE expression to. In this situation we can push LEFTVAR = CONSTANT + * COALESCE expression to. In this situation we can push LEFTVAR = CONSTANT * and RIGHTVAR = CONSTANT into the input relations, since any rows not * meeting these conditions cannot contribute to the join result. * @@ -397,8 +394,8 @@ generate_outer_join_implications(PlannerInfo *root, */ static void sub_generate_join_implications(PlannerInfo *root, - List *equi_key_set, Relids *relids, - Node *item1, Oid sortop1, Relids item1_relids) + List *equi_key_set, Relids *relids, + Node *item1, Oid sortop1, Relids item1_relids) { ListCell *l; @@ -410,34 +407,36 @@ sub_generate_join_implications(PlannerInfo *root, foreach(l, root->left_join_clauses) { RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); - Node *leftop = get_leftop(rinfo->clause); + Node *leftop = get_leftop(rinfo->clause); if (equal(leftop, item1) && rinfo->left_sortop == sortop1) { /* - * Match, so find constant member(s) of set and generate - * implied INNERVAR = CONSTANT + * Match, so find constant member(s) of set and generate implied + * INNERVAR = CONSTANT */ - Node *rightop = get_rightop(rinfo->clause); + Node *rightop = get_rightop(rinfo->clause); process_implied_const_eq(root, equi_key_set, relids, rightop, rinfo->right_sortop, rinfo->right_relids, false); + /* * We can remove explicit tests of this outer-join qual, too, - * since we now have tests forcing each of its sides - * to the same value. + * since we now have tests forcing each of its sides to the same + * value. */ process_implied_equality(root, leftop, rightop, rinfo->left_sortop, rinfo->right_sortop, rinfo->left_relids, rinfo->right_relids, true); + /* - * And recurse to see if we can deduce anything from - * INNERVAR = CONSTANT + * And recurse to see if we can deduce anything from INNERVAR = + * CONSTANT */ sub_generate_join_implications(root, equi_key_set, relids, rightop, @@ -450,34 +449,36 @@ sub_generate_join_implications(PlannerInfo *root, foreach(l, root->right_join_clauses) { RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); - Node *rightop = get_rightop(rinfo->clause); + Node *rightop = get_rightop(rinfo->clause); if (equal(rightop, item1) && rinfo->right_sortop == sortop1) { /* - * Match, so find constant member(s) of set and generate - * implied INNERVAR = CONSTANT + * Match, so find constant member(s) of set and generate implied + * INNERVAR = CONSTANT */ - Node *leftop = get_leftop(rinfo->clause); + Node *leftop = get_leftop(rinfo->clause); process_implied_const_eq(root, equi_key_set, relids, leftop, rinfo->left_sortop, rinfo->left_relids, false); + /* * We can remove explicit tests of this outer-join qual, too, - * since we now have tests forcing each of its sides - * to the same value. + * since we now have tests forcing each of its sides to the same + * value. */ process_implied_equality(root, leftop, rightop, rinfo->left_sortop, rinfo->right_sortop, rinfo->left_relids, rinfo->right_relids, true); + /* - * And recurse to see if we can deduce anything from - * INNERVAR = CONSTANT + * And recurse to see if we can deduce anything from INNERVAR = + * CONSTANT */ sub_generate_join_implications(root, equi_key_set, relids, leftop, @@ -492,8 +493,8 @@ sub_generate_join_implications(PlannerInfo *root, if (IsA(item1, CoalesceExpr)) { CoalesceExpr *cexpr = (CoalesceExpr *) item1; - Node *cfirst; - Node *csecond; + Node *cfirst; + Node *csecond; if (list_length(cexpr->args) != 2) return; @@ -501,26 +502,26 @@ sub_generate_join_implications(PlannerInfo *root, csecond = (Node *) lsecond(cexpr->args); /* - * Examine each mergejoinable full-join clause, looking for a - * clause of the form "x = y" matching the COALESCE(x,y) expression + * Examine each mergejoinable full-join clause, looking for a clause + * of the form "x = y" matching the COALESCE(x,y) expression */ foreach(l, root->full_join_clauses) { RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); - Node *leftop = get_leftop(rinfo->clause); - Node *rightop = get_rightop(rinfo->clause); + Node *leftop = get_leftop(rinfo->clause); + Node *rightop = get_rightop(rinfo->clause); /* - * We can assume the COALESCE() inputs are in the same order - * as the join clause, since both were automatically generated - * in the cases we care about. + * We can assume the COALESCE() inputs are in the same order as + * the join clause, since both were automatically generated in the + * cases we care about. * - * XXX currently this may fail to match in cross-type cases - * because the COALESCE will contain typecast operations while - * the join clause may not (if there is a cross-type mergejoin - * operator available for the two column types). - * Is it OK to strip implicit coercions from the COALESCE - * arguments? What of the sortops in such cases? + * XXX currently this may fail to match in cross-type cases because + * the COALESCE will contain typecast operations while the join + * clause may not (if there is a cross-type mergejoin operator + * available for the two column types). Is it OK to strip implicit + * coercions from the COALESCE arguments? What of the sortops in + * such cases? */ if (equal(leftop, cfirst) && equal(rightop, csecond) && @@ -548,10 +549,11 @@ sub_generate_join_implications(PlannerInfo *root, sortop1, item1_relids, true); + /* * We can remove explicit tests of this outer-join qual, too, - * since we now have tests forcing each of its sides - * to the same value. + * since we now have tests forcing each of its sides to the + * same value. */ process_implied_equality(root, leftop, rightop, @@ -560,9 +562,10 @@ sub_generate_join_implications(PlannerInfo *root, rinfo->left_relids, rinfo->right_relids, true); + /* - * And recurse to see if we can deduce anything from - * LEFTVAR = CONSTANT + * And recurse to see if we can deduce anything from LEFTVAR = + * CONSTANT */ sub_generate_join_implications(root, equi_key_set, relids, leftop, @@ -700,19 +703,19 @@ canonicalize_pathkeys(PlannerInfo *root, List *pathkeys) List *cpathkey; /* - * It's sufficient to look at the first entry in the sublist; if - * there are more entries, they're already part of an equivalence - * set by definition. + * It's sufficient to look at the first entry in the sublist; if there + * are more entries, they're already part of an equivalence set by + * definition. */ Assert(pathkey != NIL); item = (PathKeyItem *) linitial(pathkey); cpathkey = make_canonical_pathkey(root, item); /* - * Eliminate redundant ordering requests --- ORDER BY A,A is the - * same as ORDER BY A. We want to check this only after we have - * canonicalized the keys, so that equivalent-key knowledge is - * used when deciding if an item is redundant. + * Eliminate redundant ordering requests --- ORDER BY A,A is the same + * as ORDER BY A. We want to check this only after we have + * canonicalized the keys, so that equivalent-key knowledge is used + * when deciding if an item is redundant. */ new_pathkeys = list_append_unique_ptr(new_pathkeys, cpathkey); } @@ -769,8 +772,8 @@ compare_pathkeys(List *keys1, List *keys2) List *subkey2 = (List *) lfirst(key2); /* - * XXX would like to check that we've been given canonicalized - * input, but PlannerInfo not accessible here... + * XXX would like to check that we've been given canonicalized input, + * but PlannerInfo not accessible here... */ #ifdef NOT_USED Assert(list_member_ptr(root->equi_key_list, subkey1)); @@ -778,10 +781,10 @@ compare_pathkeys(List *keys1, List *keys2) #endif /* - * We will never have two subkeys where one is a subset of the - * other, because of the canonicalization process. Either they - * are equal or they ain't. Furthermore, we only need pointer - * comparison to detect equality. + * We will never have two subkeys where one is a subset of the other, + * because of the canonicalization process. Either they are equal or + * they ain't. Furthermore, we only need pointer comparison to detect + * equality. */ if (subkey1 != subkey2) return PATHKEYS_DIFFERENT; /* no need to keep looking */ @@ -789,9 +792,9 @@ compare_pathkeys(List *keys1, List *keys2) /* * If we reached the end of only one list, the other is longer and - * therefore not a subset. (We assume the additional sublist(s) of - * the other list are not NIL --- no pathkey list should ever have a - * NIL sublist.) + * therefore not a subset. (We assume the additional sublist(s) of the + * other list are not NIL --- no pathkey list should ever have a NIL + * sublist.) */ if (key1 == NULL && key2 == NULL) return PATHKEYS_EQUAL; @@ -840,8 +843,8 @@ get_cheapest_path_for_pathkeys(List *paths, List *pathkeys, Path *path = (Path *) lfirst(l); /* - * Since cost comparison is a lot cheaper than pathkey comparison, - * do that first. (XXX is that still true?) + * Since cost comparison is a lot cheaper than pathkey comparison, do + * that first. (XXX is that still true?) */ if (matched_path != NULL && compare_path_costs(matched_path, path, cost_criterion) <= 0) @@ -879,11 +882,11 @@ get_cheapest_fractional_path_for_pathkeys(List *paths, Path *path = (Path *) lfirst(l); /* - * Since cost comparison is a lot cheaper than pathkey comparison, - * do that first. + * Since cost comparison is a lot cheaper than pathkey comparison, do + * that first. */ if (matched_path != NULL && - compare_fractional_path_costs(matched_path, path, fraction) <= 0) + compare_fractional_path_costs(matched_path, path, fraction) <= 0) continue; if (pathkeys_contained_in(pathkeys, path->pathkeys)) @@ -954,8 +957,8 @@ build_index_pathkeys(PlannerInfo *root, cpathkey = make_canonical_pathkey(root, item); /* - * Eliminate redundant ordering info; could happen if query is - * such that index keys are equijoined... + * Eliminate redundant ordering info; could happen if query is such + * that index keys are equijoined... */ retval = list_append_unique_ptr(retval, cpathkey); @@ -1003,7 +1006,7 @@ find_indexkey_var(PlannerInfo *root, RelOptInfo *rel, AttrNumber varattno) /* * convert_subquery_pathkeys * Build a pathkeys list that describes the ordering of a subquery's - * result, in the terms of the outer query. This is essentially a + * result, in the terms of the outer query. This is essentially a * task of conversion. * * 'rel': outer query's RelOptInfo for the subquery relation. @@ -1033,19 +1036,18 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel, /* * The sub_pathkey could contain multiple elements (representing - * knowledge that multiple items are effectively equal). Each - * element might match none, one, or more of the output columns - * that are visible to the outer query. This means we may have - * multiple possible representations of the sub_pathkey in the - * context of the outer query. Ideally we would generate them all - * and put them all into a pathkey list of the outer query, - * thereby propagating equality knowledge up to the outer query. - * Right now we cannot do so, because the outer query's canonical - * pathkey sets are already frozen when this is called. Instead - * we prefer the one that has the highest "score" (number of - * canonical pathkey peers, plus one if it matches the outer - * query_pathkeys). This is the most likely to be useful in the - * outer query. + * knowledge that multiple items are effectively equal). Each element + * might match none, one, or more of the output columns that are + * visible to the outer query. This means we may have multiple + * possible representations of the sub_pathkey in the context of the + * outer query. Ideally we would generate them all and put them all + * into a pathkey list of the outer query, thereby propagating + * equality knowledge up to the outer query. Right now we cannot do + * so, because the outer query's canonical pathkey sets are already + * frozen when this is called. Instead we prefer the one that has the + * highest "score" (number of canonical pathkey peers, plus one if it + * matches the outer query_pathkeys). This is the most likely to be + * useful in the outer query. */ foreach(j, sub_pathkey) { @@ -1144,13 +1146,13 @@ build_join_pathkeys(PlannerInfo *root, return NIL; /* - * This used to be quite a complex bit of code, but now that all - * pathkey sublists start out life canonicalized, we don't have to do - * a darn thing here! The inner-rel vars we used to need to add are - * *already* part of the outer pathkey! + * This used to be quite a complex bit of code, but now that all pathkey + * sublists start out life canonicalized, we don't have to do a darn thing + * here! The inner-rel vars we used to need to add are *already* part of + * the outer pathkey! * - * We do, however, need to truncate the pathkeys list, since it may - * contain pathkeys that were useful for forming this joinrel but are + * We do, however, need to truncate the pathkeys list, since it may contain + * pathkeys that were useful for forming this joinrel but are * uninteresting to higher levels. */ return truncate_useless_pathkeys(root, joinrel, outer_pathkeys); @@ -1289,22 +1291,20 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root, /* * We can match a pathkey against either left or right side of any - * mergejoin clause. (We examine both sides since we aren't told - * if the given pathkeys are for inner or outer input path; no - * confusion is possible.) Furthermore, if there are multiple - * matching clauses, take them all. In plain inner-join scenarios - * we expect only one match, because redundant-mergeclause - * elimination will have removed any redundant mergeclauses from - * the input list. However, in outer-join scenarios there might be - * multiple matches. An example is + * mergejoin clause. (We examine both sides since we aren't told if + * the given pathkeys are for inner or outer input path; no confusion + * is possible.) Furthermore, if there are multiple matching clauses, + * take them all. In plain inner-join scenarios we expect only one + * match, because redundant-mergeclause elimination will have removed + * any redundant mergeclauses from the input list. However, in + * outer-join scenarios there might be multiple matches. An example is * - * select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and - * a.v1 = b.v2; + * select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and a.v1 = + * b.v2; * * Given the pathkeys ((a.v1), (a.v2)) it is okay to return all three - * clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and - * indeed we *must* do so or we will be unable to form a valid - * plan. + * clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and indeed + * we *must* do so or we will be unable to form a valid plan. */ foreach(j, restrictinfos) { @@ -1325,15 +1325,15 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root, /* * If we didn't find a mergeclause, we're done --- any additional - * sort-key positions in the pathkeys are useless. (But we can - * still mergejoin if we found at least one mergeclause.) + * sort-key positions in the pathkeys are useless. (But we can still + * mergejoin if we found at least one mergeclause.) */ if (matched_restrictinfos == NIL) break; /* - * If we did find usable mergeclause(s) for this sort-key - * position, add them to result list. + * If we did find usable mergeclause(s) for this sort-key position, + * add them to result list. */ mergeclauses = list_concat(mergeclauses, matched_restrictinfos); } @@ -1390,14 +1390,13 @@ make_pathkeys_for_mergeclauses(PlannerInfo *root, } /* - * When we are given multiple merge clauses, it's possible that - * some clauses refer to the same vars as earlier clauses. There's - * no reason for us to specify sort keys like (A,B,A) when (A,B) - * will do --- and adding redundant sort keys makes add_path think - * that this sort order is different from ones that are really the - * same, so don't do it. Since we now have a canonicalized - * pathkey, a simple ptrMember test is sufficient to detect - * redundant keys. + * When we are given multiple merge clauses, it's possible that some + * clauses refer to the same vars as earlier clauses. There's no + * reason for us to specify sort keys like (A,B,A) when (A,B) will do + * --- and adding redundant sort keys makes add_path think that this + * sort order is different from ones that are really the same, so + * don't do it. Since we now have a canonicalized pathkey, a simple + * ptrMember test is sufficient to detect redundant keys. */ pathkeys = list_append_unique_ptr(pathkeys, pathkey); } @@ -1447,8 +1446,8 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys) cache_mergeclause_pathkeys(root, restrictinfo); /* - * We can compare canonical pathkey sublists by simple - * pointer equality; see compare_pathkeys. + * We can compare canonical pathkey sublists by simple pointer + * equality; see compare_pathkeys. */ if (pathkey == restrictinfo->left_pathkey || pathkey == restrictinfo->right_pathkey) @@ -1460,8 +1459,8 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys) /* * If we didn't find a mergeclause, we're done --- any additional - * sort-key positions in the pathkeys are useless. (But we can - * still mergejoin if we found at least one mergeclause.) + * sort-key positions in the pathkeys are useless. (But we can still + * mergejoin if we found at least one mergeclause.) */ if (matched) useful++; diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c index 348524372e1..26058dc1b64 100644 --- a/src/backend/optimizer/path/tidpath.c +++ b/src/backend/optimizer/path/tidpath.c @@ -11,7 +11,7 @@ * WHERE ctid IN (tid1, tid2, ...) * * There is currently no special support for joins involving CTID; in - * particular nothing corresponding to best_inner_indexscan(). Since it's + * particular nothing corresponding to best_inner_indexscan(). Since it's * not very useful to store TIDs of one table in another table, there * doesn't seem to be enough use-case to justify adding a lot of code * for that. @@ -22,7 +22,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.24 2005/08/23 20:49:47 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.25 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -50,7 +50,7 @@ static List *TidQualFromRestrictinfo(int varno, List *restrictinfo); * * If it is, return the pseudoconstant subnode; if not, return NULL. * - * We check that the CTID Var belongs to relation "varno". That is probably + * We check that the CTID Var belongs to relation "varno". That is probably * redundant considering this is only applied to restriction clauses, but * let's be safe. */ diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index b7af04e1b9f..f0dd6548711 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -10,7 +10,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.200 2005/10/13 00:06:46 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.201 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -50,10 +50,10 @@ static IndexScan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path, List *tlist, List *scan_clauses, List **nonlossy_clauses); static BitmapHeapScan *create_bitmap_scan_plan(PlannerInfo *root, - BitmapHeapPath *best_path, - List *tlist, List *scan_clauses); + BitmapHeapPath *best_path, + List *tlist, List *scan_clauses); static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, - List **qual, List **indexqual); + List **qual, List **indexqual); static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path, List *tlist, List *scan_clauses); static SubqueryScan *create_subqueryscan_plan(PlannerInfo *root, Path *best_path, @@ -72,7 +72,7 @@ static void fix_indexqual_references(List *indexquals, IndexPath *index_path, List **indexstrategy, List **indexsubtype); static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index, - Oid *opclass); + Oid *opclass); static List *get_switched_clauses(List *clauses, Relids outerrelids); static void copy_path_costsize(Plan *dest, Path *src); static void copy_plan_costsize(Plan *dest, Plan *src); @@ -82,15 +82,15 @@ static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid, List *indexstrategy, List *indexsubtype, ScanDirection indexscandir); static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid, - List *indexqual, - List *indexqualorig, - List *indexstrategy, - List *indexsubtype); + List *indexqual, + List *indexqualorig, + List *indexstrategy, + List *indexsubtype); static BitmapHeapScan *make_bitmap_heapscan(List *qptlist, - List *qpqual, - Plan *lefttree, - List *bitmapqualorig, - Index scanrelid); + List *qpqual, + Plan *lefttree, + List *bitmapqualorig, + Index scanrelid); static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid, List *tideval); static FunctionScan *make_functionscan(List *qptlist, List *qpqual, @@ -164,7 +164,7 @@ create_plan(PlannerInfo *root, Path *best_path) break; case T_Material: plan = (Plan *) create_material_plan(root, - (MaterialPath *) best_path); + (MaterialPath *) best_path); break; case T_Unique: plan = (Plan *) create_unique_plan(root, @@ -195,12 +195,12 @@ create_scan_plan(PlannerInfo *root, Path *best_path) Scan *plan; /* - * For table scans, rather than using the relation targetlist (which - * is only those Vars actually needed by the query), we prefer to - * generate a tlist containing all Vars in order. This will allow the - * executor to optimize away projection of the table tuples, if - * possible. (Note that planner.c may replace the tlist we generate - * here, forcing projection to occur.) + * For table scans, rather than using the relation targetlist (which is + * only those Vars actually needed by the query), we prefer to generate a + * tlist containing all Vars in order. This will allow the executor to + * optimize away projection of the table tuples, if possible. (Note that + * planner.c may replace the tlist we generate here, forcing projection to + * occur.) */ if (use_physical_tlist(rel)) { @@ -213,8 +213,8 @@ create_scan_plan(PlannerInfo *root, Path *best_path) tlist = build_relation_tlist(rel); /* - * Extract the relevant restriction clauses from the parent relation; - * the executor must apply all these restrictions during the scan. + * Extract the relevant restriction clauses from the parent relation; the + * executor must apply all these restrictions during the scan. */ scan_clauses = rel->baserestrictinfo; @@ -237,7 +237,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path) case T_BitmapHeapScan: plan = (Scan *) create_bitmap_scan_plan(root, - (BitmapHeapPath *) best_path, + (BitmapHeapPath *) best_path, tlist, scan_clauses); break; @@ -308,8 +308,8 @@ use_physical_tlist(RelOptInfo *rel) int i; /* - * OK for subquery and function scans; otherwise, can't do it for - * anything except real relations. + * OK for subquery and function scans; otherwise, can't do it for anything + * except real relations. */ if (rel->rtekind != RTE_RELATION) { @@ -328,9 +328,9 @@ use_physical_tlist(RelOptInfo *rel) return false; /* - * Can't do it if any system columns are requested, either. (This - * could possibly be fixed but would take some fragile assumptions in - * setrefs.c, I think.) + * Can't do it if any system columns are requested, either. (This could + * possibly be fixed but would take some fragile assumptions in setrefs.c, + * I think.) */ for (i = rel->min_attr; i <= 0; i++) { @@ -415,14 +415,14 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path) #ifdef NOT_USED /* - * * Expensive function pullups may have pulled local predicates * - * into this path node. Put them in the qpqual of the plan node. * - * JMH, 6/15/92 + * * Expensive function pullups may have pulled local predicates * into + * this path node. Put them in the qpqual of the plan node. * JMH, + * 6/15/92 */ if (get_loc_restrictinfo(best_path) != NIL) set_qpqual((Plan) plan, list_concat(get_qpqual((Plan) plan), - get_actual_clauses(get_loc_restrictinfo(best_path)))); + get_actual_clauses(get_loc_restrictinfo(best_path)))); #endif return plan; @@ -444,13 +444,13 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path) ListCell *subpaths; /* - * It is possible for the subplans list to contain only one entry, - * or even no entries. Handle these cases specially. + * It is possible for the subplans list to contain only one entry, or even + * no entries. Handle these cases specially. * - * XXX ideally, if there's just one entry, we'd not bother to generate - * an Append node but just return the single child. At the moment this - * does not work because the varno of the child scan plan won't match - * the parent-rel Vars it'll be asked to emit. + * XXX ideally, if there's just one entry, we'd not bother to generate an + * Append node but just return the single child. At the moment this does + * not work because the varno of the child scan plan won't match the + * parent-rel Vars it'll be asked to emit. */ if (best_path->subpaths == NIL) { @@ -618,8 +618,8 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path) if (newitems) { /* - * If the top plan node can't do projections, we need to add a - * Result node to help it along. + * If the top plan node can't do projections, we need to add a Result + * node to help it along. */ if (!is_projection_capable_plan(subplan)) subplan = (Plan *) make_result(newtlist, NULL, subplan); @@ -628,8 +628,8 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path) } /* - * Build control information showing which subplan output columns are - * to be examined by the grouping step. Unfortunately we can't merge this + * Build control information showing which subplan output columns are to + * be examined by the grouping step. Unfortunately we can't merge this * with the previous loop, since we didn't then know which version of the * subplan tlist we'd end up using. */ @@ -656,9 +656,9 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path) numGroups = (long) Min(best_path->rows, (double) LONG_MAX); /* - * Since the Agg node is going to project anyway, we can give it - * the minimum output tlist, without any stuff we might have added - * to the subplan tlist. + * Since the Agg node is going to project anyway, we can give it the + * minimum output tlist, without any stuff we might have added to the + * subplan tlist. */ plan = (Plan *) make_agg(root, build_relation_tlist(best_path->path.parent), @@ -776,9 +776,9 @@ create_indexscan_plan(PlannerInfo *root, stripped_indexquals = get_actual_clauses(indexquals); /* - * The executor needs a copy with the indexkey on the left of each - * clause and with index attr numbers substituted for table ones. This - * pass also gets strategy info and looks for "lossy" operators. + * The executor needs a copy with the indexkey on the left of each clause + * and with index attr numbers substituted for table ones. This pass also + * gets strategy info and looks for "lossy" operators. */ fix_indexqual_references(indexquals, best_path, &fixed_indexquals, @@ -792,12 +792,11 @@ create_indexscan_plan(PlannerInfo *root, /* * If this is an innerjoin scan, the indexclauses will contain join - * clauses that are not present in scan_clauses (since the passed-in - * value is just the rel's baserestrictinfo list). We must add these - * clauses to scan_clauses to ensure they get checked. In most cases - * we will remove the join clauses again below, but if a join clause - * contains a special operator, we need to make sure it gets into the - * scan_clauses. + * clauses that are not present in scan_clauses (since the passed-in value + * is just the rel's baserestrictinfo list). We must add these clauses to + * scan_clauses to ensure they get checked. In most cases we will remove + * the join clauses again below, but if a join clause contains a special + * operator, we need to make sure it gets into the scan_clauses. * * Note: pointer comparison should be enough to determine RestrictInfo * matches. @@ -806,25 +805,25 @@ create_indexscan_plan(PlannerInfo *root, scan_clauses = list_union_ptr(scan_clauses, best_path->indexclauses); /* - * The qpqual list must contain all restrictions not automatically - * handled by the index. All the predicates in the indexquals will be - * checked (either by the index itself, or by nodeIndexscan.c), but if - * there are any "special" operators involved then they must be included - * in qpqual. Also, any lossy index operators must be rechecked in - * the qpqual. The upshot is that qpqual must contain scan_clauses - * minus whatever appears in nonlossy_indexquals. + * The qpqual list must contain all restrictions not automatically handled + * by the index. All the predicates in the indexquals will be checked + * (either by the index itself, or by nodeIndexscan.c), but if there are + * any "special" operators involved then they must be included in qpqual. + * Also, any lossy index operators must be rechecked in the qpqual. The + * upshot is that qpqual must contain scan_clauses minus whatever appears + * in nonlossy_indexquals. * - * In normal cases simple pointer equality checks will be enough to - * spot duplicate RestrictInfos, so we try that first. In some situations - * (particularly with OR'd index conditions) we may have scan_clauses - * that are not equal to, but are logically implied by, the index quals; - * so we also try a predicate_implied_by() check to see if we can discard - * quals that way. (predicate_implied_by assumes its first input contains - * only immutable functions, so we have to check that.) We can also - * discard quals that are implied by a partial index's predicate. + * In normal cases simple pointer equality checks will be enough to spot + * duplicate RestrictInfos, so we try that first. In some situations + * (particularly with OR'd index conditions) we may have scan_clauses that + * are not equal to, but are logically implied by, the index quals; so we + * also try a predicate_implied_by() check to see if we can discard quals + * that way. (predicate_implied_by assumes its first input contains only + * immutable functions, so we have to check that.) We can also discard + * quals that are implied by a partial index's predicate. * - * While at it, we strip off the RestrictInfos to produce a list of - * plain expressions. + * While at it, we strip off the RestrictInfos to produce a list of plain + * expressions. */ qpqual = NIL; foreach(l, scan_clauses) @@ -836,7 +835,7 @@ create_indexscan_plan(PlannerInfo *root, continue; if (!contain_mutable_functions((Node *) rinfo->clause)) { - List *clausel = list_make1(rinfo->clause); + List *clausel = list_make1(rinfo->clause); if (predicate_implied_by(clausel, nonlossy_indexquals)) continue; @@ -898,13 +897,12 @@ create_bitmap_scan_plan(PlannerInfo *root, scan_clauses = get_actual_clauses(scan_clauses); /* - * If this is a innerjoin scan, the indexclauses will contain join - * clauses that are not present in scan_clauses (since the passed-in - * value is just the rel's baserestrictinfo list). We must add these - * clauses to scan_clauses to ensure they get checked. In most cases - * we will remove the join clauses again below, but if a join clause - * contains a special operator, we need to make sure it gets into the - * scan_clauses. + * If this is a innerjoin scan, the indexclauses will contain join clauses + * that are not present in scan_clauses (since the passed-in value is just + * the rel's baserestrictinfo list). We must add these clauses to + * scan_clauses to ensure they get checked. In most cases we will remove + * the join clauses again below, but if a join clause contains a special + * operator, we need to make sure it gets into the scan_clauses. */ if (best_path->isjoininner) { @@ -912,12 +910,12 @@ create_bitmap_scan_plan(PlannerInfo *root, } /* - * The qpqual list must contain all restrictions not automatically - * handled by the index. All the predicates in the indexquals will be - * checked (either by the index itself, or by nodeBitmapHeapscan.c), - * but if there are any "special" or lossy operators involved then they - * must be added to qpqual. The upshot is that qpquals must contain - * scan_clauses minus whatever appears in indexquals. + * The qpqual list must contain all restrictions not automatically handled + * by the index. All the predicates in the indexquals will be checked + * (either by the index itself, or by nodeBitmapHeapscan.c), but if there + * are any "special" or lossy operators involved then they must be added + * to qpqual. The upshot is that qpquals must contain scan_clauses minus + * whatever appears in indexquals. * * In normal cases simple equal() checks will be enough to spot duplicate * clauses, so we try that first. In some situations (particularly with @@ -930,25 +928,25 @@ create_bitmap_scan_plan(PlannerInfo *root, * * XXX For the moment, we only consider partial index predicates in the * simple single-index-scan case. Is it worth trying to be smart about - * more complex cases? Perhaps create_bitmap_subplan should be made to + * more complex cases? Perhaps create_bitmap_subplan should be made to * include predicate info in what it constructs. */ qpqual = NIL; foreach(l, scan_clauses) { - Node *clause = (Node *) lfirst(l); + Node *clause = (Node *) lfirst(l); if (list_member(indexquals, clause)) continue; if (!contain_mutable_functions(clause)) { - List *clausel = list_make1(clause); + List *clausel = list_make1(clause); if (predicate_implied_by(clausel, indexquals)) continue; if (IsA(best_path->bitmapqual, IndexPath)) { - IndexPath *ipath = (IndexPath *) best_path->bitmapqual; + IndexPath *ipath = (IndexPath *) best_path->bitmapqual; if (predicate_implied_by(clausel, ipath->indexinfo->indpred)) continue; @@ -1010,15 +1008,15 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, /* * There may well be redundant quals among the subplans, since a * top-level WHERE qual might have gotten used to form several - * different index quals. We don't try exceedingly hard to - * eliminate redundancies, but we do eliminate obvious duplicates - * by using list_concat_unique. + * different index quals. We don't try exceedingly hard to eliminate + * redundancies, but we do eliminate obvious duplicates by using + * list_concat_unique. */ foreach(l, apath->bitmapquals) { - Plan *subplan; - List *subqual; - List *subindexqual; + Plan *subplan; + List *subqual; + List *subindexqual; subplan = create_bitmap_subplan(root, (Path *) lfirst(l), &subqual, &subindexqual); @@ -1048,7 +1046,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, /* * Here, we only detect qual-free subplans. A qual-free subplan would * cause us to generate "... OR true ..." which we may as well reduce - * to just "true". We do not try to eliminate redundant subclauses + * to just "true". We do not try to eliminate redundant subclauses * because (a) it's not as likely as in the AND case, and (b) we might * well be working with hundreds or even thousands of OR conditions, * perhaps from a long IN list. The performance of list_append_unique @@ -1056,9 +1054,9 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, */ foreach(l, opath->bitmapquals) { - Plan *subplan; - List *subqual; - List *subindexqual; + Plan *subplan; + List *subqual; + List *subindexqual; subplan = create_bitmap_subplan(root, (Path *) lfirst(l), &subqual, &subindexqual); @@ -1080,6 +1078,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, plan->plan_rows = clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples); plan->plan_width = 0; /* meaningless */ + /* * If there were constant-TRUE subquals, the OR reduces to constant * TRUE. Also, avoid generating one-element ORs, which could happen @@ -1100,9 +1099,9 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, } else if (IsA(bitmapqual, IndexPath)) { - IndexPath *ipath = (IndexPath *) bitmapqual; - IndexScan *iscan; - List *nonlossy_clauses; + IndexPath *ipath = (IndexPath *) bitmapqual; + IndexScan *iscan; + List *nonlossy_clauses; /* Use the regular indexscan plan build machinery... */ iscan = create_indexscan_plan(root, ipath, NIL, NIL, @@ -1245,18 +1244,18 @@ create_nestloop_plan(PlannerInfo *root, if (IsA(best_path->innerjoinpath, IndexPath)) { /* - * An index is being used to reduce the number of tuples scanned - * in the inner relation. If there are join clauses being used - * with the index, we may remove those join clauses from the list - * of clauses that have to be checked as qpquals at the join node. + * An index is being used to reduce the number of tuples scanned in + * the inner relation. If there are join clauses being used with the + * index, we may remove those join clauses from the list of clauses + * that have to be checked as qpquals at the join node. * * We can also remove any join clauses that are redundant with those - * being used in the index scan; prior redundancy checks will not - * have caught this case because the join clauses would never have - * been put in the same joininfo list. + * being used in the index scan; prior redundancy checks will not have + * caught this case because the join clauses would never have been put + * in the same joininfo list. * - * We can skip this if the index path is an ordinary indexpath and - * not a special innerjoin path. + * We can skip this if the index path is an ordinary indexpath and not a + * special innerjoin path. */ IndexPath *innerpath = (IndexPath *) best_path->innerjoinpath; @@ -1266,7 +1265,7 @@ create_nestloop_plan(PlannerInfo *root, select_nonredundant_join_clauses(root, joinrestrictclauses, innerpath->indexclauses, - IS_OUTER_JOIN(best_path->jointype)); + IS_OUTER_JOIN(best_path->jointype)); } } else if (IsA(best_path->innerjoinpath, BitmapHeapPath)) @@ -1275,11 +1274,11 @@ create_nestloop_plan(PlannerInfo *root, * Same deal for bitmapped index scans. * * Note: both here and above, we ignore any implicit index restrictions - * associated with the use of partial indexes. This is OK because + * associated with the use of partial indexes. This is OK because * we're only trying to prove we can dispense with some join quals; * failing to prove that doesn't result in an incorrect plan. It is - * the right way to proceed because adding more quals to the stuff - * we got from the original query would just make it harder to detect + * the right way to proceed because adding more quals to the stuff we + * got from the original query would just make it harder to detect * duplication. */ BitmapHeapPath *innerpath = (BitmapHeapPath *) best_path->innerjoinpath; @@ -1296,7 +1295,7 @@ create_nestloop_plan(PlannerInfo *root, select_nonredundant_join_clauses(root, joinrestrictclauses, bitmapclauses, - IS_OUTER_JOIN(best_path->jointype)); + IS_OUTER_JOIN(best_path->jointype)); } } @@ -1355,18 +1354,18 @@ create_mergejoin_plan(PlannerInfo *root, } /* - * Remove the mergeclauses from the list of join qual clauses, leaving - * the list of quals that must be checked as qpquals. + * Remove the mergeclauses from the list of join qual clauses, leaving the + * list of quals that must be checked as qpquals. */ mergeclauses = get_actual_clauses(best_path->path_mergeclauses); joinclauses = list_difference(joinclauses, mergeclauses); /* - * Rearrange mergeclauses, if needed, so that the outer variable is - * always on the left. + * Rearrange mergeclauses, if needed, so that the outer variable is always + * on the left. */ mergeclauses = get_switched_clauses(best_path->path_mergeclauses, - best_path->jpath.outerjoinpath->parent->relids); + best_path->jpath.outerjoinpath->parent->relids); /* Sort clauses into best execution order */ /* NB: do NOT reorder the mergeclauses */ @@ -1375,8 +1374,8 @@ create_mergejoin_plan(PlannerInfo *root, /* * Create explicit sort nodes for the outer and inner join paths if - * necessary. The sort cost was already accounted for in the path. - * Make sure there are no excess columns in the inputs if sorting. + * necessary. The sort cost was already accounted for in the path. Make + * sure there are no excess columns in the inputs if sorting. */ if (best_path->outersortkeys) { @@ -1439,18 +1438,18 @@ create_hashjoin_plan(PlannerInfo *root, } /* - * Remove the hashclauses from the list of join qual clauses, leaving - * the list of quals that must be checked as qpquals. + * Remove the hashclauses from the list of join qual clauses, leaving the + * list of quals that must be checked as qpquals. */ hashclauses = get_actual_clauses(best_path->path_hashclauses); joinclauses = list_difference(joinclauses, hashclauses); /* - * Rearrange hashclauses, if needed, so that the outer variable is - * always on the left. + * Rearrange hashclauses, if needed, so that the outer variable is always + * on the left. */ hashclauses = get_switched_clauses(best_path->path_hashclauses, - best_path->jpath.outerjoinpath->parent->relids); + best_path->jpath.outerjoinpath->parent->relids); /* Sort clauses into best execution order */ joinclauses = order_qual_clauses(root, joinclauses); @@ -1551,23 +1550,22 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path, /* * Make a copy that will become the fixed clause. * - * We used to try to do a shallow copy here, but that fails if there - * is a subplan in the arguments of the opclause. So just do a - * full copy. + * We used to try to do a shallow copy here, but that fails if there is a + * subplan in the arguments of the opclause. So just do a full copy. */ newclause = (OpExpr *) copyObject((Node *) clause); /* - * Check to see if the indexkey is on the right; if so, commute - * the clause. The indexkey should be the side that refers to - * (only) the base relation. + * Check to see if the indexkey is on the right; if so, commute the + * clause. The indexkey should be the side that refers to (only) the + * base relation. */ if (!bms_equal(rinfo->left_relids, index->rel->relids)) CommuteClause(newclause); /* - * Now, determine which index attribute this is, change the - * indexkey operand as needed, and get the index opclass. + * Now, determine which index attribute this is, change the indexkey + * operand as needed, and get the index opclass. */ linitial(newclause->args) = fix_indexqual_operand(linitial(newclause->args), @@ -1577,10 +1575,9 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path, *fixed_indexquals = lappend(*fixed_indexquals, newclause); /* - * Look up the (possibly commuted) operator in the operator class - * to get its strategy numbers and the recheck indicator. This - * also double-checks that we found an operator matching the - * index. + * Look up the (possibly commuted) operator in the operator class to + * get its strategy numbers and the recheck indicator. This also + * double-checks that we found an operator matching the index. */ get_op_opclass_properties(newclause->opno, opclass, &stratno, &stratsubtype, &recheck); @@ -1598,11 +1595,11 @@ static Node * fix_indexqual_operand(Node *node, IndexOptInfo *index, Oid *opclass) { /* - * We represent index keys by Var nodes having the varno of the base - * table but varattno equal to the index's attribute number (index - * column position). This is a bit hokey ... would be cleaner to use - * a special-purpose node type that could not be mistaken for a - * regular Var. But it will do for now. + * We represent index keys by Var nodes having the varno of the base table + * but varattno equal to the index's attribute number (index column + * position). This is a bit hokey ... would be cleaner to use a + * special-purpose node type that could not be mistaken for a regular Var. + * But it will do for now. */ Var *result; int pos; @@ -1692,8 +1689,8 @@ get_switched_clauses(List *clauses, Relids outerrelids) if (bms_is_subset(restrictinfo->right_relids, outerrelids)) { /* - * Duplicate just enough of the structure to allow commuting - * the clause without changing the original list. Could use + * Duplicate just enough of the structure to allow commuting the + * clause without changing the original list. Could use * copyObject, but a complete deep copy is overkill. */ OpExpr *temp = makeNode(OpExpr); @@ -1934,9 +1931,9 @@ make_subqueryscan(List *qptlist, Plan *plan = &node->scan.plan; /* - * Cost is figured here for the convenience of prepunion.c. Note this - * is only correct for the case where qpqual is empty; otherwise - * caller should overwrite cost with a better estimate. + * Cost is figured here for the convenience of prepunion.c. Note this is + * only correct for the case where qpqual is empty; otherwise caller + * should overwrite cost with a better estimate. */ copy_plan_costsize(plan, subplan); plan->total_cost += cpu_tuple_cost * subplan->plan_rows; @@ -1977,9 +1974,9 @@ make_append(List *appendplans, bool isTarget, List *tlist) ListCell *subnode; /* - * Compute cost as sum of subplan costs. We charge nothing extra for - * the Append itself, which perhaps is too optimistic, but since it - * doesn't do any selection or projection, it is a pretty cheap node. + * Compute cost as sum of subplan costs. We charge nothing extra for the + * Append itself, which perhaps is too optimistic, but since it doesn't do + * any selection or projection, it is a pretty cheap node. */ plan->startup_cost = 0; plan->total_cost = 0; @@ -2094,8 +2091,8 @@ make_hash(Plan *lefttree) copy_plan_costsize(plan, lefttree); /* - * For plausibility, make startup & total costs equal total cost of - * input plan; this only affects EXPLAIN display not decisions. + * For plausibility, make startup & total costs equal total cost of input + * plan; this only affects EXPLAIN display not decisions. */ plan->startup_cost = plan->total_cost; plan->targetlist = copyObject(lefttree->targetlist); @@ -2217,8 +2214,7 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys) Oid *sortOperators; /* - * We will need at most list_length(pathkeys) sort columns; possibly - * less + * We will need at most list_length(pathkeys) sort columns; possibly less */ numsortkeys = list_length(pathkeys); sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber)); @@ -2236,14 +2232,14 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys) /* * We can sort by any one of the sort key items listed in this * sublist. For now, we take the first one that corresponds to an - * available Var in the tlist. If there isn't any, use the first - * one that is an expression in the input's vars. + * available Var in the tlist. If there isn't any, use the first one + * that is an expression in the input's vars. * - * XXX if we have a choice, is there any way of figuring out which - * might be cheapest to execute? (For example, int4lt is likely - * much cheaper to execute than numericlt, but both might appear - * in the same pathkey sublist...) Not clear that we ever will - * have a choice in practice, so it may not matter. + * XXX if we have a choice, is there any way of figuring out which might + * be cheapest to execute? (For example, int4lt is likely much + * cheaper to execute than numericlt, but both might appear in the + * same pathkey sublist...) Not clear that we ever will have a choice + * in practice, so it may not matter. */ foreach(j, keysublist) { @@ -2296,13 +2292,13 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys) } /* - * The column might already be selected as a sort key, if the - * pathkeys contain duplicate entries. (This can happen in - * scenarios where multiple mergejoinable clauses mention the same - * var, for example.) So enter it only once in the sort arrays. + * The column might already be selected as a sort key, if the pathkeys + * contain duplicate entries. (This can happen in scenarios where + * multiple mergejoinable clauses mention the same var, for example.) + * So enter it only once in the sort arrays. */ numsortkeys = add_sort_column(tle->resno, pathkey->sortop, - numsortkeys, sortColIdx, sortOperators); + numsortkeys, sortColIdx, sortOperators); } Assert(numsortkeys > 0); @@ -2328,8 +2324,7 @@ make_sort_from_sortclauses(PlannerInfo *root, List *sortcls, Plan *lefttree) Oid *sortOperators; /* - * We will need at most list_length(sortcls) sort columns; possibly - * less + * We will need at most list_length(sortcls) sort columns; possibly less */ numsortkeys = list_length(sortcls); sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber)); @@ -2348,7 +2343,7 @@ make_sort_from_sortclauses(PlannerInfo *root, List *sortcls, Plan *lefttree) * redundantly. */ numsortkeys = add_sort_column(tle->resno, sortcl->sortop, - numsortkeys, sortColIdx, sortOperators); + numsortkeys, sortColIdx, sortOperators); } Assert(numsortkeys > 0); @@ -2384,8 +2379,7 @@ make_sort_from_groupcols(PlannerInfo *root, Oid *sortOperators; /* - * We will need at most list_length(groupcls) sort columns; possibly - * less + * We will need at most list_length(groupcls) sort columns; possibly less */ numsortkeys = list_length(groupcls); sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber)); @@ -2404,7 +2398,7 @@ make_sort_from_groupcols(PlannerInfo *root, * redundantly. */ numsortkeys = add_sort_column(tle->resno, grpcl->sortop, - numsortkeys, sortColIdx, sortOperators); + numsortkeys, sortColIdx, sortOperators); grpno++; } @@ -2492,8 +2486,8 @@ make_agg(PlannerInfo *root, List *tlist, List *qual, plan->total_cost = agg_path.total_cost; /* - * We will produce a single output tuple if not grouping, and a tuple - * per group otherwise. + * We will produce a single output tuple if not grouping, and a tuple per + * group otherwise. */ if (aggstrategy == AGG_PLAIN) plan->plan_rows = 1; @@ -2501,13 +2495,13 @@ make_agg(PlannerInfo *root, List *tlist, List *qual, plan->plan_rows = numGroups; /* - * We also need to account for the cost of evaluation of the qual (ie, - * the HAVING clause) and the tlist. Note that cost_qual_eval doesn't - * charge anything for Aggref nodes; this is okay since they are - * really comparable to Vars. + * We also need to account for the cost of evaluation of the qual (ie, the + * HAVING clause) and the tlist. Note that cost_qual_eval doesn't charge + * anything for Aggref nodes; this is okay since they are really + * comparable to Vars. * - * See notes in grouping_planner about why this routine and make_group - * are the only ones in this file that worry about tlist eval cost. + * See notes in grouping_planner about why this routine and make_group are + * the only ones in this file that worry about tlist eval cost. */ if (qual) { @@ -2559,16 +2553,15 @@ make_group(PlannerInfo *root, plan->plan_rows = numGroups; /* - * We also need to account for the cost of evaluation of the qual (ie, - * the HAVING clause) and the tlist. + * We also need to account for the cost of evaluation of the qual (ie, the + * HAVING clause) and the tlist. * - * XXX this double-counts the cost of evaluation of any expressions used - * for grouping, since in reality those will have been evaluated at a - * lower plan level and will only be copied by the Group node. Worth - * fixing? + * XXX this double-counts the cost of evaluation of any expressions used for + * grouping, since in reality those will have been evaluated at a lower + * plan level and will only be copied by the Group node. Worth fixing? * - * See notes in grouping_planner about why this routine and make_agg are - * the only ones in this file that worry about tlist eval cost. + * See notes in grouping_planner about why this routine and make_agg are the + * only ones in this file that worry about tlist eval cost. */ if (qual) { @@ -2607,16 +2600,16 @@ make_unique(Plan *lefttree, List *distinctList) copy_plan_costsize(plan, lefttree); /* - * Charge one cpu_operator_cost per comparison per input tuple. We - * assume all columns get compared at most of the tuples. (XXX - * probably this is an overestimate.) + * Charge one cpu_operator_cost per comparison per input tuple. We assume + * all columns get compared at most of the tuples. (XXX probably this is + * an overestimate.) */ plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols; /* - * plan->plan_rows is left as a copy of the input subplan's plan_rows; - * ie, we assume the filter removes nothing. The caller must alter - * this if he has a better idea. + * plan->plan_rows is left as a copy of the input subplan's plan_rows; ie, + * we assume the filter removes nothing. The caller must alter this if he + * has a better idea. */ plan->targetlist = copyObject(lefttree->targetlist); @@ -2625,8 +2618,7 @@ make_unique(Plan *lefttree, List *distinctList) plan->righttree = NULL; /* - * convert SortClause list into array of attr indexes, as wanted by - * exec + * convert SortClause list into array of attr indexes, as wanted by exec */ Assert(numCols > 0); uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols); @@ -2664,8 +2656,8 @@ make_setop(SetOpCmd cmd, Plan *lefttree, copy_plan_costsize(plan, lefttree); /* - * Charge one cpu_operator_cost per comparison per input tuple. We - * assume all columns get compared at most of the tuples. + * Charge one cpu_operator_cost per comparison per input tuple. We assume + * all columns get compared at most of the tuples. */ plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols; @@ -2683,8 +2675,7 @@ make_setop(SetOpCmd cmd, Plan *lefttree, plan->righttree = NULL; /* - * convert SortClause list into array of attr indexes, as wanted by - * exec + * convert SortClause list into array of attr indexes, as wanted by exec */ Assert(numCols > 0); dupColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols); @@ -2727,8 +2718,8 @@ make_limit(Plan *lefttree, Node *limitOffset, Node *limitCount, * building a subquery then it's important to report correct info to the * outer planner. * - * When the offset or count couldn't be estimated, use 10% of the - * estimated number of rows emitted from the subplan. + * When the offset or count couldn't be estimated, use 10% of the estimated + * number of rows emitted from the subplan. */ if (offset_est != 0) { diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c index 7e3d5bca55b..dd8fc4fa2d7 100644 --- a/src/backend/optimizer/plan/initsplan.c +++ b/src/backend/optimizer/plan/initsplan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.109 2005/09/28 21:17:02 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.110 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -221,7 +221,7 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode, result = bms_add_members(result, distribute_quals_to_rels(root, lfirst(l), - below_outer_join)); + below_outer_join)); } /* @@ -243,17 +243,17 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode, ListCell *qual; /* - * Order of operations here is subtle and critical. First we - * recurse to handle sub-JOINs. Their join quals will be placed - * without regard for whether this level is an outer join, which - * is correct. Then we place our own join quals, which are - * restricted by lower outer joins in any case, and are forced to - * this level if this is an outer join and they mention the outer - * side. Finally, if this is an outer join, we mark baserels - * contained within the inner side(s) with our own rel set; this - * will prevent quals above us in the join tree that use those - * rels from being pushed down below this level. (It's okay for - * upper quals to be pushed down to the outer side, however.) + * Order of operations here is subtle and critical. First we recurse + * to handle sub-JOINs. Their join quals will be placed without + * regard for whether this level is an outer join, which is correct. + * Then we place our own join quals, which are restricted by lower + * outer joins in any case, and are forced to this level if this is an + * outer join and they mention the outer side. Finally, if this is an + * outer join, we mark baserels contained within the inner side(s) + * with our own rel set; this will prevent quals above us in the join + * tree that use those rels from being pushed down below this level. + * (It's okay for upper quals to be pushed down to the outer side, + * however.) */ switch (j->jointype) { @@ -302,19 +302,19 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode, case JOIN_UNION: /* - * This is where we fail if upper levels of planner - * haven't rewritten UNION JOIN as an Append ... + * This is where we fail if upper levels of planner haven't + * rewritten UNION JOIN as an Append ... */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("UNION JOIN is not implemented"))); - nonnullable_rels = NULL; /* keep compiler quiet */ + nonnullable_rels = NULL; /* keep compiler quiet */ nullable_rels = NULL; break; default: elog(ERROR, "unrecognized join type: %d", (int) j->jointype); - nonnullable_rels = NULL; /* keep compiler quiet */ + nonnullable_rels = NULL; /* keep compiler quiet */ nullable_rels = NULL; break; } @@ -349,19 +349,19 @@ mark_baserels_for_outer_join(PlannerInfo *root, Relids rels, Relids outerrels) RelOptInfo *rel = find_base_rel(root, relno); /* - * Since we do this bottom-up, any outer-rels previously marked - * should be within the new outer join set. + * Since we do this bottom-up, any outer-rels previously marked should + * be within the new outer join set. */ Assert(bms_is_subset(rel->outerjoinset, outerrels)); /* * Presently the executor cannot support FOR UPDATE/SHARE marking of * rels appearing on the nullable side of an outer join. (It's - * somewhat unclear what that would mean, anyway: what should we - * mark when a result row is generated from no element of the - * nullable relation?) So, complain if target rel is FOR UPDATE/SHARE. - * It's sufficient to make this check once per rel, so do it only - * if rel wasn't already known nullable. + * somewhat unclear what that would mean, anyway: what should we mark + * when a result row is generated from no element of the nullable + * relation?) So, complain if target rel is FOR UPDATE/SHARE. It's + * sufficient to make this check once per rel, so do it only if rel + * wasn't already known nullable. */ if (rel->outerjoinset == NULL) { @@ -430,9 +430,9 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, /* * If the clause is variable-free, we force it to be evaluated at its * original syntactic level. Note that this should not happen for - * top-level clauses, because query_planner() special-cases them. But - * it will happen for variable-free JOIN/ON clauses. We don't have to - * be real smart about such a case, we just have to be correct. + * top-level clauses, because query_planner() special-cases them. But it + * will happen for variable-free JOIN/ON clauses. We don't have to be + * real smart about such a case, we just have to be correct. */ if (bms_is_empty(relids)) relids = qualscope; @@ -446,8 +446,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, /* * If the qual came from implied-equality deduction, we always * evaluate the qual at its natural semantic level. It is the - * responsibility of the deducer not to create any quals that - * should be delayed by outer-join rules. + * responsibility of the deducer not to create any quals that should + * be delayed by outer-join rules. */ Assert(bms_equal(relids, qualscope)); /* Needn't feed it back for more deductions */ @@ -457,28 +457,28 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, else if (bms_overlap(relids, outerjoin_nonnullable)) { /* - * The qual is attached to an outer join and mentions (some of - * the) rels on the nonnullable side. Force the qual to be - * evaluated exactly at the level of joining corresponding to the - * outer join. We cannot let it get pushed down into the - * nonnullable side, since then we'd produce no output rows, - * rather than the intended single null-extended row, for any - * nonnullable-side rows failing the qual. + * The qual is attached to an outer join and mentions (some of the) + * rels on the nonnullable side. Force the qual to be evaluated + * exactly at the level of joining corresponding to the outer join. We + * cannot let it get pushed down into the nonnullable side, since then + * we'd produce no output rows, rather than the intended single + * null-extended row, for any nonnullable-side rows failing the qual. * - * Note: an outer-join qual that mentions only nullable-side rels can - * be pushed down into the nullable side without changing the join + * Note: an outer-join qual that mentions only nullable-side rels can be + * pushed down into the nullable side without changing the join * result, so we treat it the same as an ordinary inner-join qual, * except for not setting maybe_equijoin (see below). */ relids = qualscope; + /* - * We can't use such a clause to deduce equijoin (the left and - * right sides might be unequal above the join because one of - * them has gone to NULL) ... but we might be able to use it - * for more limited purposes. Note: for the current uses of - * deductions from an outer-join clause, it seems safe to make - * the deductions even when the clause is below a higher-level - * outer join; so we do not check below_outer_join here. + * We can't use such a clause to deduce equijoin (the left and right + * sides might be unequal above the join because one of them has gone + * to NULL) ... but we might be able to use it for more limited + * purposes. Note: for the current uses of deductions from an + * outer-join clause, it seems safe to make the deductions even when + * the clause is below a higher-level outer join; so we do not check + * below_outer_join here. */ maybe_equijoin = false; maybe_outer_join = true; @@ -486,15 +486,14 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, else { /* - * For a non-outer-join qual, we can evaluate the qual as soon as - * (1) we have all the rels it mentions, and (2) we are at or - * above any outer joins that can null any of these rels and are - * below the syntactic location of the given qual. To enforce the - * latter, scan the base rels listed in relids, and merge their - * outer-join sets into the clause's own reference list. At the - * time we are called, the outerjoinset of each baserel will show - * exactly those outer joins that are below the qual in the join - * tree. + * For a non-outer-join qual, we can evaluate the qual as soon as (1) + * we have all the rels it mentions, and (2) we are at or above any + * outer joins that can null any of these rels and are below the + * syntactic location of the given qual. To enforce the latter, scan + * the base rels listed in relids, and merge their outer-join sets + * into the clause's own reference list. At the time we are called, + * the outerjoinset of each baserel will show exactly those outer + * joins that are below the qual in the join tree. */ Relids addrelids = NULL; Relids tmprelids; @@ -513,13 +512,13 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, if (bms_is_subset(addrelids, relids)) { /* - * Qual is not delayed by any lower outer-join restriction. - * If it is not itself below or within an outer join, we - * can consider it "valid everywhere", so consider feeding - * it to the equijoin machinery. (If it is within an outer - * join, we can't consider it "valid everywhere": once the - * contained variables have gone to NULL, we'd be asserting - * things like NULL = NULL, which is not true.) + * Qual is not delayed by any lower outer-join restriction. If it + * is not itself below or within an outer join, we can consider it + * "valid everywhere", so consider feeding it to the equijoin + * machinery. (If it is within an outer join, we can't consider + * it "valid everywhere": once the contained variables have gone + * to NULL, we'd be asserting things like NULL = NULL, which is + * not true.) */ if (!below_outer_join && outerjoin_nonnullable == NULL) maybe_equijoin = true; @@ -533,8 +532,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, Assert(bms_is_subset(relids, qualscope)); /* - * Because application of the qual will be delayed by outer - * join, we mustn't assume its vars are equal everywhere. + * Because application of the qual will be delayed by outer join, + * we mustn't assume its vars are equal everywhere. */ maybe_equijoin = false; } @@ -543,11 +542,10 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, } /* - * Mark the qual as "pushed down" if it can be applied at a level - * below its original syntactic level. This allows us to distinguish - * original JOIN/ON quals from higher-level quals pushed down to the - * same joinrel. A qual originating from WHERE is always considered - * "pushed down". + * Mark the qual as "pushed down" if it can be applied at a level below + * its original syntactic level. This allows us to distinguish original + * JOIN/ON quals from higher-level quals pushed down to the same joinrel. + * A qual originating from WHERE is always considered "pushed down". */ if (!is_pushed_down) is_pushed_down = !bms_equal(relids, qualscope); @@ -573,25 +571,24 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, rel = find_base_rel(root, bms_singleton_member(relids)); /* - * Check for a "mergejoinable" clause even though it's not a - * join clause. This is so that we can recognize that "a.x = - * a.y" makes x and y eligible to be considered equal, even - * when they belong to the same rel. Without this, we would - * not recognize that "a.x = a.y AND a.x = b.z AND a.y = c.q" - * allows us to consider z and q equal after their rels are - * joined. + * Check for a "mergejoinable" clause even though it's not a join + * clause. This is so that we can recognize that "a.x = a.y" + * makes x and y eligible to be considered equal, even when they + * belong to the same rel. Without this, we would not recognize + * that "a.x = a.y AND a.x = b.z AND a.y = c.q" allows us to + * consider z and q equal after their rels are joined. */ check_mergejoinable(restrictinfo); /* - * If the clause was deduced from implied equality, check to - * see whether it is redundant with restriction clauses we - * already have for this rel. Note we cannot apply this check - * to user-written clauses, since we haven't found the - * canonical pathkey sets yet while processing user clauses. - * (NB: no comparable check is done in the join-clause case; - * redundancy will be detected when the join clause is moved - * into a join rel's restriction list.) + * If the clause was deduced from implied equality, check to see + * whether it is redundant with restriction clauses we already + * have for this rel. Note we cannot apply this check to + * user-written clauses, since we haven't found the canonical + * pathkey sets yet while processing user clauses. (NB: no + * comparable check is done in the join-clause case; redundancy + * will be detected when the join clause is moved into a join + * rel's restriction list.) */ if (!is_deduced || !qual_is_redundant(root, restrictinfo, @@ -605,17 +602,17 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, case BMS_MULTIPLE: /* - * 'clause' is a join clause, since there is more than one rel - * in the relid set. + * 'clause' is a join clause, since there is more than one rel in + * the relid set. */ /* * Check for hash or mergejoinable operators. * - * We don't bother setting the hashjoin info if we're not going - * to need it. We do want to know about mergejoinable ops in - * all cases, however, because we use mergejoinable ops for - * other purposes such as detecting redundant clauses. + * We don't bother setting the hashjoin info if we're not going to + * need it. We do want to know about mergejoinable ops in all + * cases, however, because we use mergejoinable ops for other + * purposes such as detecting redundant clauses. */ check_mergejoinable(restrictinfo); if (enable_hashjoin) @@ -628,9 +625,9 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, /* * Add vars used in the join clause to targetlists of their - * relations, so that they will be emitted by the plan nodes - * that scan those relations (else they won't be available at - * the join node!). + * relations, so that they will be emitted by the plan nodes that + * scan those relations (else they won't be available at the join + * node!). */ vars = pull_var_clause(clause, false); add_vars_to_targetlist(root, vars, relids); @@ -639,17 +636,16 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, default: /* - * 'clause' references no rels, and therefore we have no place - * to attach it. Shouldn't get here if callers are working - * properly. + * 'clause' references no rels, and therefore we have no place to + * attach it. Shouldn't get here if callers are working properly. */ elog(ERROR, "cannot cope with variable-free clause"); break; } /* - * If the clause has a mergejoinable operator, we may be able to - * deduce more things from it under the principle of transitivity. + * If the clause has a mergejoinable operator, we may be able to deduce + * more things from it under the principle of transitivity. * * If it is not an outer-join qualification nor bubbled up due to an outer * join, then the two sides represent equivalent PathKeyItems for path @@ -744,8 +740,8 @@ process_implied_equality(PlannerInfo *root, /* * If the exprs involve a single rel, we need to look at that rel's - * baserestrictinfo list. If multiple rels, we can scan the joininfo - * list of any of 'em. + * baserestrictinfo list. If multiple rels, we can scan the joininfo list + * of any of 'em. */ if (membership == BMS_SINGLETON) { @@ -767,8 +763,8 @@ process_implied_equality(PlannerInfo *root, } /* - * Scan to see if equality is already known. If so, we're done in the - * add case, and done after removing it in the delete case. + * Scan to see if equality is already known. If so, we're done in the add + * case, and done after removing it in the delete case. */ foreach(itm, restrictlist) { @@ -791,7 +787,7 @@ process_implied_equality(PlannerInfo *root, { /* delete it from local restrictinfo list */ rel1->baserestrictinfo = list_delete_ptr(rel1->baserestrictinfo, - restrictinfo); + restrictinfo); } else { @@ -808,8 +804,8 @@ process_implied_equality(PlannerInfo *root, return; /* - * This equality is new information, so construct a clause - * representing it to add to the query data structures. + * This equality is new information, so construct a clause representing it + * to add to the query data structures. */ ltype = exprType(item1); rtype = exprType(item2); @@ -818,14 +814,14 @@ process_implied_equality(PlannerInfo *root, if (!HeapTupleIsValid(eq_operator)) { /* - * Would it be safe to just not add the equality to the query if - * we have no suitable equality operator for the combination of + * Would it be safe to just not add the equality to the query if we + * have no suitable equality operator for the combination of * datatypes? NO, because sortkey selection may screw up anyway. */ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify an equality operator for types %s and %s", - format_type_be(ltype), format_type_be(rtype)))); + errmsg("could not identify an equality operator for types %s and %s", + format_type_be(ltype), format_type_be(rtype)))); } pgopform = (Form_pg_operator) GETSTRUCT(eq_operator); @@ -856,8 +852,8 @@ process_implied_equality(PlannerInfo *root, /* * Push the new clause into all the appropriate restrictinfo lists. * - * Note: we mark the qual "pushed down" to ensure that it can never be - * taken for an original JOIN/ON clause. + * Note: we mark the qual "pushed down" to ensure that it can never be taken + * for an original JOIN/ON clause. */ distribute_qual_to_rels(root, (Node *) clause, true, true, false, NULL, relids); @@ -911,9 +907,9 @@ qual_is_redundant(PlannerInfo *root, return false; /* - * Scan existing quals to find those referencing same pathkeys. - * Usually there will be few, if any, so build a list of just the - * interesting ones. + * Scan existing quals to find those referencing same pathkeys. Usually + * there will be few, if any, so build a list of just the interesting + * ones. */ oldquals = NIL; foreach(olditem, restrictlist) @@ -933,11 +929,10 @@ qual_is_redundant(PlannerInfo *root, /* * Now, we want to develop a list of exprs that are known equal to the - * left side of the new qual. We traverse the old-quals list - * repeatedly to transitively expand the exprs list. If at any point - * we find we can reach the right-side expr of the new qual, we are - * done. We give up when we can't expand the equalexprs list any - * more. + * left side of the new qual. We traverse the old-quals list repeatedly + * to transitively expand the exprs list. If at any point we find we can + * reach the right-side expr of the new qual, we are done. We give up + * when we can't expand the equalexprs list any more. */ equalexprs = list_make1(newleft); do diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c index f2002a5228d..7c2f0211f10 100644 --- a/src/backend/optimizer/plan/planagg.c +++ b/src/backend/optimizer/plan/planagg.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.9 2005/09/21 19:15:27 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.10 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -43,12 +43,12 @@ typedef struct static bool find_minmax_aggs_walker(Node *node, List **context); static bool build_minmax_path(PlannerInfo *root, RelOptInfo *rel, - MinMaxAggInfo *info); + MinMaxAggInfo *info); static ScanDirection match_agg_to_index_col(MinMaxAggInfo *info, - IndexOptInfo *index, int indexcol); + IndexOptInfo *index, int indexcol); static void make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, - List *constant_quals); -static Node *replace_aggs_with_params_mutator(Node *node, List **context); + List *constant_quals); +static Node *replace_aggs_with_params_mutator(Node *node, List **context); static Oid fetch_agg_sort_op(Oid aggfnoid); @@ -62,7 +62,7 @@ static Oid fetch_agg_sort_op(Oid aggfnoid); * generic scan-all-the-rows plan. * * We are passed the preprocessed tlist, and the best path - * devised for computing the input of a standard Agg node. If we are able + * devised for computing the input of a standard Agg node. If we are able * to optimize all the aggregates, and the result is estimated to be cheaper * than the generic aggregate method, then generate and return a Plan that * does it that way. Otherwise, return NULL. @@ -87,24 +87,24 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path) if (!parse->hasAggs) return NULL; - Assert(!parse->setOperations); /* shouldn't get here if a setop */ - Assert(parse->rowMarks == NIL); /* nor if FOR UPDATE */ + Assert(!parse->setOperations); /* shouldn't get here if a setop */ + Assert(parse->rowMarks == NIL); /* nor if FOR UPDATE */ /* * Reject unoptimizable cases. * - * We don't handle GROUP BY, because our current implementations of - * grouping require looking at all the rows anyway, and so there's not - * much point in optimizing MIN/MAX. + * We don't handle GROUP BY, because our current implementations of grouping + * require looking at all the rows anyway, and so there's not much point + * in optimizing MIN/MAX. */ if (parse->groupClause) return NULL; /* - * We also restrict the query to reference exactly one table, since - * join conditions can't be handled reasonably. (We could perhaps - * handle a query containing cartesian-product joins, but it hardly - * seems worth the trouble.) + * We also restrict the query to reference exactly one table, since join + * conditions can't be handled reasonably. (We could perhaps handle a + * query containing cartesian-product joins, but it hardly seems worth the + * trouble.) */ Assert(parse->jointree != NULL && IsA(parse->jointree, FromExpr)); if (list_length(parse->jointree->fromlist) != 1) @@ -118,8 +118,8 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path) rel = find_base_rel(root, rtr->rtindex); /* - * Also reject cases with subplans or volatile functions in WHERE. - * This may be overly paranoid, but it's not entirely clear if the + * Also reject cases with subplans or volatile functions in WHERE. This + * may be overly paranoid, but it's not entirely clear if the * transformation is safe then. */ if (contain_subplans(parse->jointree->quals) || @@ -127,17 +127,16 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path) return NULL; /* - * Since this optimization is not applicable all that often, we want - * to fall out before doing very much work if possible. Therefore - * we do the work in several passes. The first pass scans the tlist - * and HAVING qual to find all the aggregates and verify that - * each of them is a MIN/MAX aggregate. If that succeeds, the second - * pass looks at each aggregate to see if it is optimizable; if so - * we make an IndexPath describing how we would scan it. (We do not - * try to optimize if only some aggs are optimizable, since that means - * we'll have to scan all the rows anyway.) If that succeeds, we have - * enough info to compare costs against the generic implementation. - * Only if that test passes do we build a Plan. + * Since this optimization is not applicable all that often, we want to + * fall out before doing very much work if possible. Therefore we do the + * work in several passes. The first pass scans the tlist and HAVING qual + * to find all the aggregates and verify that each of them is a MIN/MAX + * aggregate. If that succeeds, the second pass looks at each aggregate + * to see if it is optimizable; if so we make an IndexPath describing how + * we would scan it. (We do not try to optimize if only some aggs are + * optimizable, since that means we'll have to scan all the rows anyway.) + * If that succeeds, we have enough info to compare costs against the + * generic implementation. Only if that test passes do we build a Plan. */ /* Pass 1: find all the aggregates */ @@ -161,9 +160,9 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path) /* * Make the cost comparison. * - * Note that we don't include evaluation cost of the tlist here; - * this is OK since it isn't included in best_path's cost either, - * and should be the same in either case. + * Note that we don't include evaluation cost of the tlist here; this is OK + * since it isn't included in best_path's cost either, and should be the + * same in either case. */ cost_agg(&agg_p, root, AGG_PLAIN, list_length(aggs_list), 0, 0, @@ -174,13 +173,13 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path) return NULL; /* too expensive */ /* - * OK, we are going to generate an optimized plan. The first thing we - * need to do is look for any non-variable WHERE clauses that query_planner - * might have removed from the basic plan. (Normal WHERE clauses will - * be properly incorporated into the sub-plans by create_plan.) If there - * are any, they will be in a gating Result node atop the best_path. - * They have to be incorporated into a gating Result in each sub-plan - * in order to produce the semantically correct result. + * OK, we are going to generate an optimized plan. The first thing we + * need to do is look for any non-variable WHERE clauses that + * query_planner might have removed from the basic plan. (Normal WHERE + * clauses will be properly incorporated into the sub-plans by + * create_plan.) If there are any, they will be in a gating Result node + * atop the best_path. They have to be incorporated into a gating Result + * in each sub-plan in order to produce the semantically correct result. */ if (IsA(best_path, ResultPath)) { @@ -275,8 +274,8 @@ find_minmax_aggs_walker(Node *node, List **context) *context = lappend(*context, info); /* - * We need not recurse into the argument, since it can't contain - * any aggregates. + * We need not recurse into the argument, since it can't contain any + * aggregates. */ return false; } @@ -325,8 +324,8 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info) /* * Look for a match to one of the index columns. (In a stupidly - * designed index, there could be multiple matches, but we only - * care about the first one.) + * designed index, there could be multiple matches, but we only care + * about the first one.) */ for (indexcol = 0; indexcol < index->ncolumns; indexcol++) { @@ -340,12 +339,12 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info) /* * If the match is not at the first index column, we have to verify * that there are "x = something" restrictions on all the earlier - * index columns. Since we'll need the restrictclauses list anyway - * to build the path, it's convenient to extract that first and then - * look through it for the equality restrictions. + * index columns. Since we'll need the restrictclauses list anyway to + * build the path, it's convenient to extract that first and then look + * through it for the equality restrictions. */ restrictclauses = group_clauses_by_indexkey(index, - index->rel->baserestrictinfo, + index->rel->baserestrictinfo, NIL, NULL, &found_clause); @@ -354,8 +353,8 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info) continue; /* definitely haven't got enough */ for (prevcol = 0; prevcol < indexcol; prevcol++) { - List *rinfos = (List *) list_nth(restrictclauses, prevcol); - ListCell *ll; + List *rinfos = (List *) list_nth(restrictclauses, prevcol); + ListCell *ll; foreach(ll, rinfos) { @@ -453,9 +452,9 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals) NullTest *ntest; /* - * Generate a suitably modified query. Much of the work here is - * probably unnecessary in the normal case, but we want to make it look - * good if someone tries to EXPLAIN the result. + * Generate a suitably modified query. Much of the work here is probably + * unnecessary in the normal case, but we want to make it look good if + * someone tries to EXPLAIN the result. */ memcpy(&subroot, root, sizeof(PlannerInfo)); subroot.parse = subparse = (Query *) copyObject(root->parse); @@ -489,18 +488,17 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals) false, true); /* - * Generate the plan for the subquery. We already have a Path for - * the basic indexscan, but we have to convert it to a Plan and - * attach a LIMIT node above it. We might need a gating Result, too, - * to handle any non-variable qual clauses. + * Generate the plan for the subquery. We already have a Path for the + * basic indexscan, but we have to convert it to a Plan and attach a LIMIT + * node above it. We might need a gating Result, too, to handle any + * non-variable qual clauses. * - * Also we must add a "WHERE foo IS NOT NULL" restriction to the - * indexscan, to be sure we don't return a NULL, which'd be contrary - * to the standard behavior of MIN/MAX. XXX ideally this should be - * done earlier, so that the selectivity of the restriction could be - * included in our cost estimates. But that looks painful, and in - * most cases the fraction of NULLs isn't high enough to change the - * decision. + * Also we must add a "WHERE foo IS NOT NULL" restriction to the indexscan, + * to be sure we don't return a NULL, which'd be contrary to the standard + * behavior of MIN/MAX. XXX ideally this should be done earlier, so that + * the selectivity of the restriction could be included in our cost + * estimates. But that looks painful, and in most cases the fraction of + * NULLs isn't high enough to change the decision. */ plan = create_plan(&subroot, (Path *) info->path); @@ -517,7 +515,7 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals) copyObject(constant_quals), plan); - plan = (Plan *) make_limit(plan, + plan = (Plan *) make_limit(plan, subparse->limitOffset, subparse->limitCount, 0, 1); @@ -534,7 +532,7 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals) * Replace original aggregate calls with subplan output Params */ static Node * -replace_aggs_with_params_mutator(Node *node, List **context) +replace_aggs_with_params_mutator(Node *node, List **context) { if (node == NULL) return NULL; diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c index 24d53be9e97..ecbf44400c9 100644 --- a/src/backend/optimizer/plan/planmain.c +++ b/src/backend/optimizer/plan/planmain.c @@ -14,7 +14,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.88 2005/09/28 21:17:02 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.89 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -57,7 +57,7 @@ * does not use grouping * * Note: the PlannerInfo node also includes a query_pathkeys field, which is - * both an input and an output of query_planner(). The input value signals + * both an input and an output of query_planner(). The input value signals * query_planner that the indicated sort order is wanted in the final output * plan. But this value has not yet been "canonicalized", since the needed * info does not get computed until we scan the qual clauses. We canonicalize @@ -99,7 +99,7 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, if (parse->jointree->fromlist == NIL) { *cheapest_path = (Path *) create_result_path(NULL, NULL, - (List *) parse->jointree->quals); + (List *) parse->jointree->quals); *sorted_path = NULL; return; } @@ -107,21 +107,21 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, /* * Pull out any non-variable WHERE clauses so these can be put in a * toplevel "Result" node, where they will gate execution of the whole - * plan (the Result will not invoke its descendant plan unless the - * quals are true). Note that any *really* non-variable quals will - * have been optimized away by eval_const_expressions(). What we're - * mostly interested in here is quals that depend only on outer-level - * vars, although if the qual reduces to "WHERE FALSE" this path will - * also be taken. + * plan (the Result will not invoke its descendant plan unless the quals + * are true). Note that any *really* non-variable quals will have been + * optimized away by eval_const_expressions(). What we're mostly + * interested in here is quals that depend only on outer-level vars, + * although if the qual reduces to "WHERE FALSE" this path will also be + * taken. */ parse->jointree->quals = (Node *) pull_constant_clauses((List *) parse->jointree->quals, &constant_quals); /* - * Init planner lists to empty. We create the base_rel_array with a - * size that will be sufficient if no pullups or inheritance additions - * happen ... otherwise it will be enlarged as needed. + * Init planner lists to empty. We create the base_rel_array with a size + * that will be sufficient if no pullups or inheritance additions happen + * ... otherwise it will be enlarged as needed. * * NOTE: in_info_list was set up by subquery_planner, do not touch here */ @@ -141,33 +141,32 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, add_base_rels_to_query(root, (Node *) parse->jointree); /* - * Examine the targetlist and qualifications, adding entries to - * baserel targetlists for all referenced Vars. Restrict and join - * clauses are added to appropriate lists belonging to the mentioned - * relations. We also build lists of equijoined keys for pathkey - * construction. + * Examine the targetlist and qualifications, adding entries to baserel + * targetlists for all referenced Vars. Restrict and join clauses are + * added to appropriate lists belonging to the mentioned relations. We + * also build lists of equijoined keys for pathkey construction. * - * Note: all subplan nodes will have "flat" (var-only) tlists. This - * implies that all expression evaluations are done at the root of the - * plan tree. Once upon a time there was code to try to push - * expensive function calls down to lower plan nodes, but that's dead - * code and has been for a long time... + * Note: all subplan nodes will have "flat" (var-only) tlists. This implies + * that all expression evaluations are done at the root of the plan tree. + * Once upon a time there was code to try to push expensive function calls + * down to lower plan nodes, but that's dead code and has been for a long + * time... */ build_base_rel_tlists(root, tlist); (void) distribute_quals_to_rels(root, (Node *) parse->jointree, false); /* - * Use the completed lists of equijoined keys to deduce any implied - * but unstated equalities (for example, A=B and B=C imply A=C). + * Use the completed lists of equijoined keys to deduce any implied but + * unstated equalities (for example, A=B and B=C imply A=C). */ generate_implied_equalities(root); /* - * We should now have all the pathkey equivalence sets built, so it's - * now possible to convert the requested query_pathkeys to canonical - * form. Also canonicalize the groupClause and sortClause pathkeys - * for use later. + * We should now have all the pathkey equivalence sets built, so it's now + * possible to convert the requested query_pathkeys to canonical form. + * Also canonicalize the groupClause and sortClause pathkeys for use + * later. */ root->query_pathkeys = canonicalize_pathkeys(root, root->query_pathkeys); root->group_pathkeys = canonicalize_pathkeys(root, root->group_pathkeys); @@ -182,13 +181,13 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, elog(ERROR, "failed to construct the join relation"); /* - * If there's grouping going on, estimate the number of result groups. - * We couldn't do this any earlier because it depends on relation size + * If there's grouping going on, estimate the number of result groups. We + * couldn't do this any earlier because it depends on relation size * estimates that were set up above. * - * Then convert tuple_fraction to fractional form if it is absolute, - * and adjust it based on the knowledge that grouping_planner will be - * doing grouping or aggregation work with our result. + * Then convert tuple_fraction to fractional form if it is absolute, and + * adjust it based on the knowledge that grouping_planner will be doing + * grouping or aggregation work with our result. * * This introduces some undesirable coupling between this code and * grouping_planner, but the alternatives seem even uglier; we couldn't @@ -205,18 +204,18 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, final_rel->rows); /* - * In GROUP BY mode, an absolute LIMIT is relative to the number - * of groups not the number of tuples. If the caller gave us - * a fraction, keep it as-is. (In both cases, we are effectively - * assuming that all the groups are about the same size.) + * In GROUP BY mode, an absolute LIMIT is relative to the number of + * groups not the number of tuples. If the caller gave us a fraction, + * keep it as-is. (In both cases, we are effectively assuming that + * all the groups are about the same size.) */ if (tuple_fraction >= 1.0) tuple_fraction /= *num_groups; /* * If both GROUP BY and ORDER BY are specified, we will need two - * levels of sort --- and, therefore, certainly need to read all - * the tuples --- unless ORDER BY is a subset of GROUP BY. + * levels of sort --- and, therefore, certainly need to read all the + * tuples --- unless ORDER BY is a subset of GROUP BY. */ if (parse->groupClause && parse->sortClause && !pathkeys_contained_in(root->sort_pathkeys, root->group_pathkeys)) @@ -225,8 +224,8 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, else if (parse->hasAggs || root->hasHavingQual) { /* - * Ungrouped aggregate will certainly want to read all the tuples, - * and it will deliver a single result row (so leave *num_groups 1). + * Ungrouped aggregate will certainly want to read all the tuples, and + * it will deliver a single result row (so leave *num_groups 1). */ tuple_fraction = 0.0; } @@ -234,11 +233,11 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, { /* * Since there was no grouping or aggregation, it's reasonable to - * assume the UNIQUE filter has effects comparable to GROUP BY. - * Return the estimated number of output rows for use by caller. - * (If DISTINCT is used with grouping, we ignore its effects for - * rowcount estimation purposes; this amounts to assuming the grouped - * rows are distinct already.) + * assume the UNIQUE filter has effects comparable to GROUP BY. Return + * the estimated number of output rows for use by caller. (If DISTINCT + * is used with grouping, we ignore its effects for rowcount + * estimation purposes; this amounts to assuming the grouped rows are + * distinct already.) */ List *distinctExprs; @@ -257,26 +256,26 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, else { /* - * Plain non-grouped, non-aggregated query: an absolute tuple - * fraction can be divided by the number of tuples. + * Plain non-grouped, non-aggregated query: an absolute tuple fraction + * can be divided by the number of tuples. */ if (tuple_fraction >= 1.0) tuple_fraction /= final_rel->rows; } /* - * Pick out the cheapest-total path and the cheapest presorted path - * for the requested pathkeys (if there is one). We should take the - * tuple fraction into account when selecting the cheapest presorted - * path, but not when selecting the cheapest-total path, since if we - * have to sort then we'll have to fetch all the tuples. (But there's - * a special case: if query_pathkeys is NIL, meaning order doesn't - * matter, then the "cheapest presorted" path will be the cheapest - * overall for the tuple fraction.) + * Pick out the cheapest-total path and the cheapest presorted path for + * the requested pathkeys (if there is one). We should take the tuple + * fraction into account when selecting the cheapest presorted path, but + * not when selecting the cheapest-total path, since if we have to sort + * then we'll have to fetch all the tuples. (But there's a special case: + * if query_pathkeys is NIL, meaning order doesn't matter, then the + * "cheapest presorted" path will be the cheapest overall for the tuple + * fraction.) * - * The cheapest-total path is also the one to use if grouping_planner - * decides to use hashed aggregation, so we return it separately even - * if this routine thinks the presorted path is the winner. + * The cheapest-total path is also the one to use if grouping_planner decides + * to use hashed aggregation, so we return it separately even if this + * routine thinks the presorted path is the winner. */ cheapestpath = final_rel->cheapest_total_path; @@ -291,8 +290,8 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, /* * Forget about the presorted path if it would be cheaper to sort the - * cheapest-total path. Here we need consider only the behavior at - * the tuple fraction point. + * cheapest-total path. Here we need consider only the behavior at the + * tuple fraction point. */ if (sortedpath) { @@ -323,8 +322,7 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction, } /* - * If we have constant quals, add a toplevel Result step to process - * them. + * If we have constant quals, add a toplevel Result step to process them. */ if (constant_quals) { diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index ace53d692fb..762dfb4b641 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.193 2005/09/24 22:54:37 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.194 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -59,8 +59,8 @@ static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode); static Plan *inheritance_planner(PlannerInfo *root, List *inheritlist); static Plan *grouping_planner(PlannerInfo *root, double tuple_fraction); static double preprocess_limit(PlannerInfo *root, - double tuple_fraction, - int *offset_est, int *count_est); + double tuple_fraction, + int *offset_est, int *count_est); static bool choose_hashed_grouping(PlannerInfo *root, double tuple_fraction, Path *cheapest_path, Path *sorted_path, double dNumGroups, AggClauseCounts *agg_counts); @@ -95,14 +95,13 @@ planner(Query *parse, bool isCursor, int cursorOptions, * these global state variables must be saved and restored. * * Query level and the param list cannot be moved into the per-query - * PlannerInfo structure since their whole purpose is communication - * across multiple sub-queries. Also, boundParams is explicitly info - * from outside the query, and so is likewise better handled as a global - * variable. + * PlannerInfo structure since their whole purpose is communication across + * multiple sub-queries. Also, boundParams is explicitly info from outside + * the query, and so is likewise better handled as a global variable. * - * Note we do NOT save and restore PlannerPlanId: it exists to assign - * unique IDs to SubPlan nodes, and we want those IDs to be unique for - * the life of a backend. Also, PlannerInitPlan is saved/restored in + * Note we do NOT save and restore PlannerPlanId: it exists to assign unique + * IDs to SubPlan nodes, and we want those IDs to be unique for the life + * of a backend. Also, PlannerInitPlan is saved/restored in * subquery_planner, not here. */ save_PlannerQueryLevel = PlannerQueryLevel; @@ -118,10 +117,10 @@ planner(Query *parse, bool isCursor, int cursorOptions, if (isCursor) { /* - * We have no real idea how many tuples the user will ultimately - * FETCH from a cursor, but it seems a good bet that he doesn't - * want 'em all. Optimize for 10% retrieval (you gotta better - * number? Should this be a SETtable parameter?) + * We have no real idea how many tuples the user will ultimately FETCH + * from a cursor, but it seems a good bet that he doesn't want 'em + * all. Optimize for 10% retrieval (you gotta better number? Should + * this be a SETtable parameter?) */ tuple_fraction = 0.10; } @@ -207,10 +206,10 @@ subquery_planner(Query *parse, double tuple_fraction, root->parse = parse; /* - * Look for IN clauses at the top level of WHERE, and transform them - * into joins. Note that this step only handles IN clauses originally - * at top level of WHERE; if we pull up any subqueries in the next - * step, their INs are processed just before pulling them up. + * Look for IN clauses at the top level of WHERE, and transform them into + * joins. Note that this step only handles IN clauses originally at top + * level of WHERE; if we pull up any subqueries in the next step, their + * INs are processed just before pulling them up. */ root->in_info_list = NIL; if (parse->hasSubLinks) @@ -225,14 +224,14 @@ subquery_planner(Query *parse, double tuple_fraction, pull_up_subqueries(root, (Node *) parse->jointree, false); /* - * Detect whether any rangetable entries are RTE_JOIN kind; if not, we - * can avoid the expense of doing flatten_join_alias_vars(). Also - * check for outer joins --- if none, we can skip reduce_outer_joins() - * and some other processing. This must be done after we have done + * Detect whether any rangetable entries are RTE_JOIN kind; if not, we can + * avoid the expense of doing flatten_join_alias_vars(). Also check for + * outer joins --- if none, we can skip reduce_outer_joins() and some + * other processing. This must be done after we have done * pull_up_subqueries, of course. * * Note: if reduce_outer_joins manages to eliminate all outer joins, - * root->hasOuterJoins is not reset currently. This is OK since its + * root->hasOuterJoins is not reset currently. This is OK since its * purpose is merely to suppress unnecessary processing in simple cases. */ root->hasJoinRTEs = false; @@ -255,8 +254,8 @@ subquery_planner(Query *parse, double tuple_fraction, /* * Set hasHavingQual to remember if HAVING clause is present. Needed - * because preprocess_expression will reduce a constant-true condition - * to an empty qual list ... but "HAVING TRUE" is not a semantic no-op. + * because preprocess_expression will reduce a constant-true condition to + * an empty qual list ... but "HAVING TRUE" is not a semantic no-op. */ root->hasHavingQual = (parse->havingQual != NULL); @@ -292,29 +291,29 @@ subquery_planner(Query *parse, double tuple_fraction, } /* - * In some cases we may want to transfer a HAVING clause into WHERE. - * We cannot do so if the HAVING clause contains aggregates (obviously) - * or volatile functions (since a HAVING clause is supposed to be executed + * In some cases we may want to transfer a HAVING clause into WHERE. We + * cannot do so if the HAVING clause contains aggregates (obviously) or + * volatile functions (since a HAVING clause is supposed to be executed * only once per group). Also, it may be that the clause is so expensive * to execute that we're better off doing it only once per group, despite * the loss of selectivity. This is hard to estimate short of doing the * entire planning process twice, so we use a heuristic: clauses - * containing subplans are left in HAVING. Otherwise, we move or copy - * the HAVING clause into WHERE, in hopes of eliminating tuples before + * containing subplans are left in HAVING. Otherwise, we move or copy the + * HAVING clause into WHERE, in hopes of eliminating tuples before * aggregation instead of after. * - * If the query has explicit grouping then we can simply move such a - * clause into WHERE; any group that fails the clause will not be - * in the output because none of its tuples will reach the grouping - * or aggregation stage. Otherwise we must have a degenerate - * (variable-free) HAVING clause, which we put in WHERE so that - * query_planner() can use it in a gating Result node, but also keep - * in HAVING to ensure that we don't emit a bogus aggregated row. - * (This could be done better, but it seems not worth optimizing.) + * If the query has explicit grouping then we can simply move such a clause + * into WHERE; any group that fails the clause will not be in the output + * because none of its tuples will reach the grouping or aggregation + * stage. Otherwise we must have a degenerate (variable-free) HAVING + * clause, which we put in WHERE so that query_planner() can use it in a + * gating Result node, but also keep in HAVING to ensure that we don't + * emit a bogus aggregated row. (This could be done better, but it seems + * not worth optimizing.) * * Note that both havingQual and parse->jointree->quals are in - * implicitly-ANDed-list form at this point, even though they are - * declared as Node *. + * implicitly-ANDed-list form at this point, even though they are declared + * as Node *. */ newHaving = NIL; foreach(l, (List *) parse->havingQual) @@ -346,28 +345,27 @@ subquery_planner(Query *parse, double tuple_fraction, parse->havingQual = (Node *) newHaving; /* - * If we have any outer joins, try to reduce them to plain inner - * joins. This step is most easily done after we've done expression + * If we have any outer joins, try to reduce them to plain inner joins. + * This step is most easily done after we've done expression * preprocessing. */ if (root->hasOuterJoins) reduce_outer_joins(root); /* - * See if we can simplify the jointree; opportunities for this may - * come from having pulled up subqueries, or from flattening explicit - * JOIN syntax. We must do this after flattening JOIN alias - * variables, since eliminating explicit JOIN nodes from the jointree - * will cause get_relids_for_join() to fail. But it should happen - * after reduce_outer_joins, anyway. + * See if we can simplify the jointree; opportunities for this may come + * from having pulled up subqueries, or from flattening explicit JOIN + * syntax. We must do this after flattening JOIN alias variables, since + * eliminating explicit JOIN nodes from the jointree will cause + * get_relids_for_join() to fail. But it should happen after + * reduce_outer_joins, anyway. */ parse->jointree = (FromExpr *) simplify_jointree(root, (Node *) parse->jointree); /* - * Do the main planning. If we have an inherited target relation, - * that needs special processing, else go straight to - * grouping_planner. + * Do the main planning. If we have an inherited target relation, that + * needs special processing, else go straight to grouping_planner. */ if (parse->resultRelation && (lst = expand_inherited_rtentry(root, parse->resultRelation)) != NIL) @@ -377,8 +375,8 @@ subquery_planner(Query *parse, double tuple_fraction, /* * If any subplans were generated, or if we're inside a subplan, build - * initPlan list and extParam/allParam sets for plan nodes, and attach - * the initPlans to the top plan node. + * initPlan list and extParam/allParam sets for plan nodes, and attach the + * initPlans to the top plan node. */ if (PlannerPlanId != saved_planid || PlannerQueryLevel > 1) SS_finalize_plan(plan, parse->rtable); @@ -405,9 +403,9 @@ static Node * preprocess_expression(PlannerInfo *root, Node *expr, int kind) { /* - * Fall out quickly if expression is empty. This occurs often enough - * to be worth checking. Note that null->null is the correct conversion - * for implicit-AND result format, too. + * Fall out quickly if expression is empty. This occurs often enough to + * be worth checking. Note that null->null is the correct conversion for + * implicit-AND result format, too. */ if (expr == NULL) return NULL; @@ -415,8 +413,7 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind) /* * If the query has any join RTEs, replace join alias variables with * base-relation variables. We must do this before sublink processing, - * else sublinks expanded out from join aliases wouldn't get - * processed. + * else sublinks expanded out from join aliases wouldn't get processed. */ if (root->hasJoinRTEs) expr = flatten_join_alias_vars(root, expr); @@ -429,13 +426,13 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind) * careful to maintain AND/OR flatness --- that is, do not generate a tree * with AND directly under AND, nor OR directly under OR. * - * Because this is a relatively expensive process, we skip it when the - * query is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()". - * The expression will only be evaluated once anyway, so no point in + * Because this is a relatively expensive process, we skip it when the query + * is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()". The + * expression will only be evaluated once anyway, so no point in * pre-simplifying; we can't execute it any faster than the executor can, * and we will waste cycles copying the tree. Notice however that we - * still must do it for quals (to get AND/OR flatness); and if we are - * in a subquery we should not assume it will be done only once. + * still must do it for quals (to get AND/OR flatness); and if we are in a + * subquery we should not assume it will be done only once. */ if (root->parse->jointree->fromlist != NIL || kind == EXPRKIND_QUAL || @@ -460,8 +457,8 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind) expr = SS_process_sublinks(expr, (kind == EXPRKIND_QUAL)); /* - * XXX do not insert anything here unless you have grokked the - * comments in SS_replace_correlation_vars ... + * XXX do not insert anything here unless you have grokked the comments in + * SS_replace_correlation_vars ... */ /* Replace uplevel vars with Param nodes */ @@ -469,9 +466,9 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind) expr = SS_replace_correlation_vars(expr); /* - * If it's a qual or havingQual, convert it to implicit-AND format. - * (We don't want to do this before eval_const_expressions, since the - * latter would be unable to simplify a top-level AND correctly. Also, + * If it's a qual or havingQual, convert it to implicit-AND format. (We + * don't want to do this before eval_const_expressions, since the latter + * would be unable to simplify a top-level AND correctly. Also, * SS_process_sublinks expects explicit-AND format.) */ if (kind == EXPRKIND_QUAL) @@ -557,9 +554,9 @@ inheritance_planner(PlannerInfo *root, List *inheritlist) Plan *subplan; /* - * Generate modified query with this rel as target. We have to - * be prepared to translate varnos in in_info_list as well as in - * the Query proper. + * Generate modified query with this rel as target. We have to be + * prepared to translate varnos in in_info_list as well as in the + * Query proper. */ memcpy(&subroot, root, sizeof(PlannerInfo)); subroot.parse = (Query *) @@ -580,26 +577,26 @@ inheritance_planner(PlannerInfo *root, List *inheritlist) * XXX my goodness this next bit is ugly. Really need to think about * ways to rein in planner's habit of scribbling on its input. * - * Planning of the subquery might have modified the rangetable, - * either by addition of RTEs due to expansion of inherited source - * tables, or by changes of the Query structures inside subquery - * RTEs. We have to ensure that this gets propagated back to the - * master copy. However, if we aren't done planning yet, we also - * need to ensure that subsequent calls to grouping_planner have - * virgin sub-Queries to work from. So, if we are at the last - * list entry, just copy the subquery rangetable back to the master - * copy; if we are not, then extend the master copy by adding - * whatever the subquery added. (We assume these added entries - * will go untouched by the future grouping_planner calls. We are - * also effectively assuming that sub-Queries will get planned - * identically each time, or at least that the impacts on their - * rangetables will be the same each time. Did I say this is ugly?) + * Planning of the subquery might have modified the rangetable, either by + * addition of RTEs due to expansion of inherited source tables, or by + * changes of the Query structures inside subquery RTEs. We have to + * ensure that this gets propagated back to the master copy. However, + * if we aren't done planning yet, we also need to ensure that + * subsequent calls to grouping_planner have virgin sub-Queries to + * work from. So, if we are at the last list entry, just copy the + * subquery rangetable back to the master copy; if we are not, then + * extend the master copy by adding whatever the subquery added. (We + * assume these added entries will go untouched by the future + * grouping_planner calls. We are also effectively assuming that + * sub-Queries will get planned identically each time, or at least + * that the impacts on their rangetables will be the same each time. + * Did I say this is ugly?) */ if (lnext(l) == NULL) parse->rtable = subroot.parse->rtable; else { - int subrtlength = list_length(subroot.parse->rtable); + int subrtlength = list_length(subroot.parse->rtable); if (subrtlength > mainrtlength) { @@ -666,38 +663,37 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) List *set_sortclauses; /* - * If there's a top-level ORDER BY, assume we have to fetch all - * the tuples. This might seem too simplistic given all the - * hackery below to possibly avoid the sort ... but a nonzero - * tuple_fraction is only of use to plan_set_operations() when - * the setop is UNION ALL, and the result of UNION ALL is always - * unsorted. + * If there's a top-level ORDER BY, assume we have to fetch all the + * tuples. This might seem too simplistic given all the hackery below + * to possibly avoid the sort ... but a nonzero tuple_fraction is only + * of use to plan_set_operations() when the setop is UNION ALL, and + * the result of UNION ALL is always unsorted. */ if (parse->sortClause) tuple_fraction = 0.0; /* - * Construct the plan for set operations. The result will not - * need any work except perhaps a top-level sort and/or LIMIT. + * Construct the plan for set operations. The result will not need + * any work except perhaps a top-level sort and/or LIMIT. */ result_plan = plan_set_operations(root, tuple_fraction, &set_sortclauses); /* - * Calculate pathkeys representing the sort order (if any) of the - * set operation's result. We have to do this before overwriting - * the sort key information... + * Calculate pathkeys representing the sort order (if any) of the set + * operation's result. We have to do this before overwriting the sort + * key information... */ current_pathkeys = make_pathkeys_for_sortclauses(set_sortclauses, - result_plan->targetlist); + result_plan->targetlist); current_pathkeys = canonicalize_pathkeys(root, current_pathkeys); /* - * We should not need to call preprocess_targetlist, since we must - * be in a SELECT query node. Instead, use the targetlist - * returned by plan_set_operations (since this tells whether it - * returned any resjunk columns!), and transfer any sort key - * information from the original tlist. + * We should not need to call preprocess_targetlist, since we must be + * in a SELECT query node. Instead, use the targetlist returned by + * plan_set_operations (since this tells whether it returned any + * resjunk columns!), and transfer any sort key information from the + * original tlist. */ Assert(parse->commandType == CMD_SELECT); @@ -741,11 +737,11 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) tlist = preprocess_targetlist(root, tlist); /* - * Generate appropriate target list for subplan; may be different - * from tlist if grouping or aggregation is needed. + * Generate appropriate target list for subplan; may be different from + * tlist if grouping or aggregation is needed. */ sub_tlist = make_subplanTargetList(root, tlist, - &groupColIdx, &need_tlist_eval); + &groupColIdx, &need_tlist_eval); /* * Calculate pathkeys that represent grouping/ordering requirements. @@ -763,10 +759,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * Note: we do not attempt to detect duplicate aggregates here; a * somewhat-overestimated count is okay for our present purposes. * - * Note: think not that we can turn off hasAggs if we find no aggs. - * It is possible for constant-expression simplification to remove - * all explicit references to aggs, but we still have to follow - * the aggregate semantics (eg, producing only one output row). + * Note: think not that we can turn off hasAggs if we find no aggs. It is + * possible for constant-expression simplification to remove all + * explicit references to aggs, but we still have to follow the + * aggregate semantics (eg, producing only one output row). */ if (parse->hasAggs) { @@ -777,13 +773,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * Figure out whether we need a sorted result from query_planner. * - * If we have a GROUP BY clause, then we want a result sorted - * properly for grouping. Otherwise, if there is an ORDER BY - * clause, we want to sort by the ORDER BY clause. (Note: if we - * have both, and ORDER BY is a superset of GROUP BY, it would be - * tempting to request sort by ORDER BY --- but that might just - * leave us failing to exploit an available sort order at all. - * Needs more thought...) + * If we have a GROUP BY clause, then we want a result sorted properly + * for grouping. Otherwise, if there is an ORDER BY clause, we want + * to sort by the ORDER BY clause. (Note: if we have both, and ORDER + * BY is a superset of GROUP BY, it would be tempting to request sort + * by ORDER BY --- but that might just leave us failing to exploit an + * available sort order at all. Needs more thought...) */ if (parse->groupClause) root->query_pathkeys = root->group_pathkeys; @@ -793,10 +788,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) root->query_pathkeys = NIL; /* - * Generate the best unsorted and presorted paths for this Query - * (but note there may not be any presorted path). query_planner - * will also estimate the number of groups in the query, and - * canonicalize all the pathkeys. + * Generate the best unsorted and presorted paths for this Query (but + * note there may not be any presorted path). query_planner will also + * estimate the number of groups in the query, and canonicalize all + * the pathkeys. */ query_planner(root, sub_tlist, tuple_fraction, &cheapest_path, &sorted_path, &dNumGroups); @@ -820,8 +815,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * Select the best path. If we are doing hashed grouping, we will - * always read all the input tuples, so use the cheapest-total - * path. Otherwise, trust query_planner's decision about which to use. + * always read all the input tuples, so use the cheapest-total path. + * Otherwise, trust query_planner's decision about which to use. */ if (use_hashed_grouping || !sorted_path) best_path = cheapest_path; @@ -829,10 +824,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) best_path = sorted_path; /* - * Check to see if it's possible to optimize MIN/MAX aggregates. - * If so, we will forget all the work we did so far to choose a - * "regular" path ... but we had to do it anyway to be able to - * tell which way is cheaper. + * Check to see if it's possible to optimize MIN/MAX aggregates. If + * so, we will forget all the work we did so far to choose a "regular" + * path ... but we had to do it anyway to be able to tell which way is + * cheaper. */ result_plan = optimize_minmax_aggregates(root, tlist, @@ -840,8 +835,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) if (result_plan != NULL) { /* - * optimize_minmax_aggregates generated the full plan, with - * the right tlist, and it has no sort order. + * optimize_minmax_aggregates generated the full plan, with the + * right tlist, and it has no sort order. */ current_pathkeys = NIL; } @@ -985,8 +980,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * GROUP BY without aggregation, so insert a group node (plus * the appropriate sort node, if necessary). * - * Add an explicit sort if we couldn't make the path come - * out the way the GROUP node needs it. + * Add an explicit sort if we couldn't make the path come out the + * way the GROUP node needs it. */ if (!pathkeys_contained_in(group_pathkeys, current_pathkeys)) { @@ -1014,11 +1009,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * This is a degenerate case in which we are supposed to emit * either 0 or 1 row depending on whether HAVING succeeds. * Furthermore, there cannot be any variables in either HAVING - * or the targetlist, so we actually do not need the FROM table - * at all! We can just throw away the plan-so-far and generate - * a Result node. This is a sufficiently unusual corner case - * that it's not worth contorting the structure of this routine - * to avoid having to generate the plan in the first place. + * or the targetlist, so we actually do not need the FROM + * table at all! We can just throw away the plan-so-far and + * generate a Result node. This is a sufficiently unusual + * corner case that it's not worth contorting the structure of + * this routine to avoid having to generate the plan in the + * first place. */ result_plan = (Plan *) make_result(tlist, parse->havingQual, @@ -1028,8 +1024,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) } /* end of if (setOperations) */ /* - * If we were not able to make the plan come out in the right order, - * add an explicit sort step. + * If we were not able to make the plan come out in the right order, add + * an explicit sort step. */ if (parse->sortClause) { @@ -1051,9 +1047,9 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) result_plan = (Plan *) make_unique(result_plan, parse->distinctClause); /* - * If there was grouping or aggregation, leave plan_rows as-is - * (ie, assume the result was already mostly unique). If not, - * use the number of distinct-groups calculated by query_planner. + * If there was grouping or aggregation, leave plan_rows as-is (ie, + * assume the result was already mostly unique). If not, use the + * number of distinct-groups calculated by query_planner. */ if (!parse->groupClause && !root->hasHavingQual && !parse->hasAggs) result_plan->plan_rows = dNumGroups; @@ -1072,8 +1068,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) } /* - * Return the actual output ordering in query_pathkeys for possible - * use by an outer query level. + * Return the actual output ordering in query_pathkeys for possible use by + * an outer query level. */ root->query_pathkeys = current_pathkeys; @@ -1084,7 +1080,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses * * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the - * results back in *count_est and *offset_est. These variables are set to + * results back in *count_est and *offset_est. These variables are set to * 0 if the corresponding clause is not present, and -1 if it's present * but we couldn't estimate the value for it. (The "0" convention is OK * for OFFSET but a little bit bogus for LIMIT: effectively we estimate @@ -1093,7 +1089,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * be passed to make_limit, which see if you change this code. * * The return value is the suitably adjusted tuple_fraction to use for - * planning the query. This adjustment is not overridable, since it reflects + * planning the query. This adjustment is not overridable, since it reflects * plan actions that grouping_planner() will certainly take, not assumptions * about context. */ @@ -1120,7 +1116,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, if (((Const *) est)->constisnull) { /* NULL indicates LIMIT ALL, ie, no limit */ - *count_est = 0; /* treat as not present */ + *count_est = 0; /* treat as not present */ } else { @@ -1143,7 +1139,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, if (((Const *) est)->constisnull) { /* Treat NULL as no offset; the executor will too */ - *offset_est = 0; /* treat as not present */ + *offset_est = 0; /* treat as not present */ } else { @@ -1217,11 +1213,11 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, else if (*offset_est != 0 && tuple_fraction > 0.0) { /* - * We have an OFFSET but no LIMIT. This acts entirely differently - * from the LIMIT case: here, we need to increase rather than - * decrease the caller's tuple_fraction, because the OFFSET acts - * to cause more tuples to be fetched instead of fewer. This only - * matters if we got a tuple_fraction > 0, however. + * We have an OFFSET but no LIMIT. This acts entirely differently + * from the LIMIT case: here, we need to increase rather than decrease + * the caller's tuple_fraction, because the OFFSET acts to cause more + * tuples to be fetched instead of fewer. This only matters if we got + * a tuple_fraction > 0, however. * * As above, use 10% if OFFSET is present but unestimatable. */ @@ -1232,9 +1228,9 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, /* * If we have absolute counts from both caller and OFFSET, add them - * together; likewise if they are both fractional. If one is - * fractional and the other absolute, we want to take the larger, - * and we heuristically assume that's the fractional one. + * together; likewise if they are both fractional. If one is + * fractional and the other absolute, we want to take the larger, and + * we heuristically assume that's the fractional one. */ if (tuple_fraction >= 1.0) { @@ -1260,7 +1256,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, /* both fractional, so add them together */ tuple_fraction += limit_fraction; if (tuple_fraction >= 1.0) - tuple_fraction = 0.0; /* assume fetch all */ + tuple_fraction = 0.0; /* assume fetch all */ } } } @@ -1303,9 +1299,8 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction, * Don't do it if it doesn't look like the hashtable will fit into * work_mem. * - * Beware here of the possibility that cheapest_path->parent is NULL. - * This could happen if user does something silly like - * SELECT 'foo' GROUP BY 1; + * Beware here of the possibility that cheapest_path->parent is NULL. This + * could happen if user does something silly like SELECT 'foo' GROUP BY 1; */ if (cheapest_path->parent) { @@ -1314,8 +1309,8 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction, } else { - cheapest_path_rows = 1; /* assume non-set result */ - cheapest_path_width = 100; /* arbitrary */ + cheapest_path_rows = 1; /* assume non-set result */ + cheapest_path_width = 100; /* arbitrary */ } /* Estimate per-hash-entry space at tuple width... */ @@ -1329,23 +1324,19 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction, return false; /* - * See if the estimated cost is no more than doing it the other way. - * While avoiding the need for sorted input is usually a win, the fact - * that the output won't be sorted may be a loss; so we need to do an - * actual cost comparison. + * See if the estimated cost is no more than doing it the other way. While + * avoiding the need for sorted input is usually a win, the fact that the + * output won't be sorted may be a loss; so we need to do an actual cost + * comparison. * - * We need to consider - * cheapest_path + hashagg [+ final sort] - * versus either - * cheapest_path [+ sort] + group or agg [+ final sort] - * or - * presorted_path + group or agg [+ final sort] - * where brackets indicate a step that may not be needed. We assume - * query_planner() will have returned a presorted path only if it's a - * winner compared to cheapest_path for this purpose. + * We need to consider cheapest_path + hashagg [+ final sort] versus either + * cheapest_path [+ sort] + group or agg [+ final sort] or presorted_path + * + group or agg [+ final sort] where brackets indicate a step that may + * not be needed. We assume query_planner() will have returned a presorted + * path only if it's a winner compared to cheapest_path for this purpose. * - * These path variables are dummies that just hold cost fields; we don't - * make actual Paths for these steps. + * These path variables are dummies that just hold cost fields; we don't make + * actual Paths for these steps. */ cost_agg(&hashed_p, root, AGG_HASHED, agg_counts->numAggs, numGroupCols, dNumGroups, @@ -1502,8 +1493,8 @@ make_subplanTargetList(PlannerInfo *root, /* * Otherwise, start with a "flattened" tlist (having just the vars - * mentioned in the targetlist and HAVING qual --- but not upper- - * level Vars; they will be replaced by Params later on). + * mentioned in the targetlist and HAVING qual --- but not upper- level + * Vars; they will be replaced by Params later on). */ sub_tlist = flatten_tlist(tlist); extravars = pull_var_clause(parse->havingQual, false); @@ -1513,9 +1504,8 @@ make_subplanTargetList(PlannerInfo *root, /* * If grouping, create sub_tlist entries for all GROUP BY expressions - * (GROUP BY items that are simple Vars should be in the list - * already), and make an array showing where the group columns are in - * the sub_tlist. + * (GROUP BY items that are simple Vars should be in the list already), + * and make an array showing where the group columns are in the sub_tlist. */ numCols = list_length(parse->groupClause); if (numCols > 0) @@ -1634,7 +1624,7 @@ postprocess_setop_tlist(List *new_tlist, List *orig_tlist) Assert(orig_tlist_item != NULL); orig_tle = (TargetEntry *) lfirst(orig_tlist_item); orig_tlist_item = lnext(orig_tlist_item); - if (orig_tle->resjunk) /* should not happen */ + if (orig_tle->resjunk) /* should not happen */ elog(ERROR, "resjunk output columns are not implemented"); Assert(new_tle->resno == orig_tle->resno); new_tle->ressortgroupref = orig_tle->ressortgroupref; diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index fe01555a3c4..2ca616e118b 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.114 2005/09/05 18:59:38 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.115 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -38,7 +38,7 @@ typedef struct int num_vars; /* number of plain Var tlist entries */ bool has_non_vars; /* are there non-plain-Var entries? */ /* array of num_vars entries: */ - tlist_vinfo vars[1]; /* VARIABLE LENGTH ARRAY */ + tlist_vinfo vars[1]; /* VARIABLE LENGTH ARRAY */ } indexed_tlist; /* VARIABLE LENGTH STRUCT */ typedef struct @@ -64,28 +64,28 @@ static void fix_expr_references(Plan *plan, Node *node); static bool fix_expr_references_walker(Node *node, void *context); static void set_join_references(Join *join, List *rtable); static void set_inner_join_references(Plan *inner_plan, - List *rtable, - indexed_tlist *outer_itlist); + List *rtable, + indexed_tlist *outer_itlist); static void set_uppernode_references(Plan *plan, Index subvarno); static indexed_tlist *build_tlist_index(List *tlist); static Var *search_indexed_tlist_for_var(Var *var, - indexed_tlist *itlist, - Index newvarno); + indexed_tlist *itlist, + Index newvarno); static Var *search_indexed_tlist_for_non_var(Node *node, - indexed_tlist *itlist, - Index newvarno); + indexed_tlist *itlist, + Index newvarno); static List *join_references(List *clauses, - List *rtable, - indexed_tlist *outer_itlist, - indexed_tlist *inner_itlist, - Index acceptable_rel); + List *rtable, + indexed_tlist *outer_itlist, + indexed_tlist *inner_itlist, + Index acceptable_rel); static Node *join_references_mutator(Node *node, join_references_context *context); static Node *replace_vars_with_subplan_refs(Node *node, - indexed_tlist *subplan_itlist, - Index subvarno); + indexed_tlist *subplan_itlist, + Index subvarno); static Node *replace_vars_with_subplan_refs_mutator(Node *node, - replace_vars_with_subplan_refs_context *context); + replace_vars_with_subplan_refs_context *context); static bool fix_opfuncids_walker(Node *node, void *context); static void set_sa_opfuncid(ScalarArrayOpExpr *opexpr); @@ -99,7 +99,7 @@ static void set_sa_opfuncid(ScalarArrayOpExpr *opexpr); /* * set_plan_references * - * This is the final processing pass of the planner/optimizer. The plan + * This is the final processing pass of the planner/optimizer. The plan * tree is complete; we just have to adjust some representational details * for the convenience of the executor. We update Vars in upper plan nodes * to refer to the outputs of their subplans, and we compute regproc OIDs @@ -150,22 +150,22 @@ set_plan_references(Plan *plan, List *rtable) fix_expr_references(plan, (Node *) ((IndexScan *) plan)->indexqual); fix_expr_references(plan, - (Node *) ((IndexScan *) plan)->indexqualorig); + (Node *) ((IndexScan *) plan)->indexqualorig); break; case T_BitmapIndexScan: /* no need to fix targetlist and qual */ Assert(plan->targetlist == NIL); Assert(plan->qual == NIL); fix_expr_references(plan, - (Node *) ((BitmapIndexScan *) plan)->indexqual); + (Node *) ((BitmapIndexScan *) plan)->indexqual); fix_expr_references(plan, - (Node *) ((BitmapIndexScan *) plan)->indexqualorig); + (Node *) ((BitmapIndexScan *) plan)->indexqualorig); break; case T_BitmapHeapScan: fix_expr_references(plan, (Node *) plan->targetlist); fix_expr_references(plan, (Node *) plan->qual); fix_expr_references(plan, - (Node *) ((BitmapHeapScan *) plan)->bitmapqualorig); + (Node *) ((BitmapHeapScan *) plan)->bitmapqualorig); break; case T_TidScan: fix_expr_references(plan, (Node *) plan->targetlist); @@ -200,7 +200,7 @@ set_plan_references(Plan *plan, List *rtable) fix_expr_references(plan, (Node *) plan->qual); fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual); fix_expr_references(plan, - (Node *) ((MergeJoin *) plan)->mergeclauses); + (Node *) ((MergeJoin *) plan)->mergeclauses); break; case T_HashJoin: set_join_references((Join *) plan, rtable); @@ -208,7 +208,7 @@ set_plan_references(Plan *plan, List *rtable) fix_expr_references(plan, (Node *) plan->qual); fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual); fix_expr_references(plan, - (Node *) ((HashJoin *) plan)->hashclauses); + (Node *) ((HashJoin *) plan)->hashclauses); break; case T_Hash: case T_Material: @@ -218,24 +218,24 @@ set_plan_references(Plan *plan, List *rtable) /* * These plan types don't actually bother to evaluate their - * targetlists (because they just return their unmodified - * input tuples). The optimizer is lazy about creating really - * valid targetlists for them --- it tends to just put in a - * pointer to the child plan node's tlist. Hence, we leave - * the tlist alone. In particular, we do not want to process - * subplans in the tlist, since we will likely end up reprocessing - * subplans that also appear in lower levels of the plan tree! + * targetlists (because they just return their unmodified input + * tuples). The optimizer is lazy about creating really valid + * targetlists for them --- it tends to just put in a pointer to + * the child plan node's tlist. Hence, we leave the tlist alone. + * In particular, we do not want to process subplans in the tlist, + * since we will likely end up reprocessing subplans that also + * appear in lower levels of the plan tree! * - * Since these plan types don't check quals either, we should - * not find any qual expression attached to them. + * Since these plan types don't check quals either, we should not + * find any qual expression attached to them. */ Assert(plan->qual == NIL); break; case T_Limit: /* - * Like the plan types above, Limit doesn't evaluate its tlist - * or quals. It does have live expressions for limit/offset, + * Like the plan types above, Limit doesn't evaluate its tlist or + * quals. It does have live expressions for limit/offset, * however. */ Assert(plan->qual == NIL); @@ -251,8 +251,8 @@ set_plan_references(Plan *plan, List *rtable) case T_Result: /* - * Result may or may not have a subplan; no need to fix up - * subplan references if it hasn't got one... + * Result may or may not have a subplan; no need to fix up subplan + * references if it hasn't got one... * * XXX why does Result use a different subvarno from Agg/Group? */ @@ -300,9 +300,9 @@ set_plan_references(Plan *plan, List *rtable) * NOTE: it is essential that we recurse into child plans AFTER we set * subplan references in this plan's tlist and quals. If we did the * reference-adjustments bottom-up, then we would fail to match this - * plan's var nodes against the already-modified nodes of the - * children. Fortunately, that consideration doesn't apply to SubPlan - * nodes; else we'd need two passes over the expression trees. + * plan's var nodes against the already-modified nodes of the children. + * Fortunately, that consideration doesn't apply to SubPlan nodes; else + * we'd need two passes over the expression trees. */ plan->lefttree = set_plan_references(plan->lefttree, rtable); plan->righttree = set_plan_references(plan->righttree, rtable); @@ -339,8 +339,8 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable) rte->subquery->rtable); /* - * We have to process any initplans too; set_plan_references can't do - * it for us because of the possibility of double-processing. + * We have to process any initplans too; set_plan_references can't do it + * for us because of the possibility of double-processing. */ foreach(l, plan->scan.plan.initPlan) { @@ -353,12 +353,12 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable) if (trivial_subqueryscan(plan)) { /* - * We can omit the SubqueryScan node and just pull up the subplan. - * We have to merge its rtable into the outer rtable, which means + * We can omit the SubqueryScan node and just pull up the subplan. We + * have to merge its rtable into the outer rtable, which means * adjusting varnos throughout the subtree. */ - int rtoffset = list_length(rtable); - List *sub_rtable; + int rtoffset = list_length(rtable); + List *sub_rtable; sub_rtable = copyObject(rte->subquery->rtable); range_table_walker(sub_rtable, @@ -382,11 +382,11 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable) else { /* - * Keep the SubqueryScan node. We have to do the processing that - * set_plan_references would otherwise have done on it. Notice - * we do not do set_uppernode_references() here, because a - * SubqueryScan will always have been created with correct - * references to its subplan's outputs to begin with. + * Keep the SubqueryScan node. We have to do the processing that + * set_plan_references would otherwise have done on it. Notice we do + * not do set_uppernode_references() here, because a SubqueryScan will + * always have been created with correct references to its subplan's + * outputs to begin with. */ result = (Plan *) plan; @@ -532,9 +532,9 @@ adjust_plan_varnos(Plan *plan, int rtoffset) case T_SetOp: /* - * Even though the targetlist won't be used by the executor, - * we fix it up for possible use by EXPLAIN (not to mention - * ease of debugging --- wrong varnos are very confusing). + * Even though the targetlist won't be used by the executor, we + * fix it up for possible use by EXPLAIN (not to mention ease of + * debugging --- wrong varnos are very confusing). */ adjust_expr_varnos((Node *) plan->targetlist, rtoffset); Assert(plan->qual == NIL); @@ -542,8 +542,8 @@ adjust_plan_varnos(Plan *plan, int rtoffset) case T_Limit: /* - * Like the plan types above, Limit doesn't evaluate its tlist - * or quals. It does have live expressions for limit/offset, + * Like the plan types above, Limit doesn't evaluate its tlist or + * quals. It does have live expressions for limit/offset, * however. */ adjust_expr_varnos((Node *) plan->targetlist, rtoffset); @@ -590,8 +590,8 @@ adjust_plan_varnos(Plan *plan, int rtoffset) /* * Now recurse into child plans. * - * We don't need to (and in fact mustn't) recurse into subqueries, - * so no need to examine initPlan list. + * We don't need to (and in fact mustn't) recurse into subqueries, so no need + * to examine initPlan list. */ adjust_plan_varnos(plan->lefttree, rtoffset); adjust_plan_varnos(plan->righttree, rtoffset); @@ -603,7 +603,7 @@ adjust_plan_varnos(Plan *plan, int rtoffset) * * This is different from the rewriter's OffsetVarNodes in that it has to * work on an already-planned expression tree; in particular, we should not - * disturb INNER and OUTER references. On the other hand, we don't have to + * disturb INNER and OUTER references. On the other hand, we don't have to * recurse into subqueries nor deal with outer-level Vars, so it's pretty * simple. */ @@ -763,10 +763,10 @@ set_inner_join_references(Plan *inner_plan, if (IsA(inner_plan, IndexScan)) { /* - * An index is being used to reduce the number of tuples - * scanned in the inner relation. If there are join clauses - * being used with the index, we must update their outer-rel - * var nodes to refer to the outer side of the join. + * An index is being used to reduce the number of tuples scanned in + * the inner relation. If there are join clauses being used with the + * index, we must update their outer-rel var nodes to refer to the + * outer side of the join. */ IndexScan *innerscan = (IndexScan *) inner_plan; List *indexqualorig = innerscan->indexqualorig; @@ -789,9 +789,9 @@ set_inner_join_references(Plan *inner_plan, innerrel); /* - * We must fix the inner qpqual too, if it has join - * clauses (this could happen if special operators are - * involved: some indexquals may get rechecked as qpquals). + * We must fix the inner qpqual too, if it has join clauses (this + * could happen if special operators are involved: some indexquals + * may get rechecked as qpquals). */ if (NumRelids((Node *) inner_plan->qual) > 1) inner_plan->qual = join_references(inner_plan->qual, @@ -832,11 +832,11 @@ set_inner_join_references(Plan *inner_plan, else if (IsA(inner_plan, BitmapHeapScan)) { /* - * The inner side is a bitmap scan plan. Fix the top node, - * and recurse to get the lower nodes. + * The inner side is a bitmap scan plan. Fix the top node, and + * recurse to get the lower nodes. * - * Note: create_bitmap_scan_plan removes clauses from bitmapqualorig - * if they are duplicated in qpqual, so must test these independently. + * Note: create_bitmap_scan_plan removes clauses from bitmapqualorig if + * they are duplicated in qpqual, so must test these independently. */ BitmapHeapScan *innerscan = (BitmapHeapScan *) inner_plan; Index innerrel = innerscan->scan.scanrelid; @@ -851,9 +851,9 @@ set_inner_join_references(Plan *inner_plan, innerrel); /* - * We must fix the inner qpqual too, if it has join - * clauses (this could happen if special operators are - * involved: some indexquals may get rechecked as qpquals). + * We must fix the inner qpqual too, if it has join clauses (this + * could happen if special operators are involved: some indexquals may + * get rechecked as qpquals). */ if (NumRelids((Node *) inner_plan->qual) > 1) inner_plan->qual = join_references(inner_plan->qual, @@ -870,8 +870,8 @@ set_inner_join_references(Plan *inner_plan, else if (IsA(inner_plan, BitmapAnd)) { /* All we need do here is recurse */ - BitmapAnd *innerscan = (BitmapAnd *) inner_plan; - ListCell *l; + BitmapAnd *innerscan = (BitmapAnd *) inner_plan; + ListCell *l; foreach(l, innerscan->bitmapplans) { @@ -883,8 +883,8 @@ set_inner_join_references(Plan *inner_plan, else if (IsA(inner_plan, BitmapOr)) { /* All we need do here is recurse */ - BitmapOr *innerscan = (BitmapOr *) inner_plan; - ListCell *l; + BitmapOr *innerscan = (BitmapOr *) inner_plan; + ListCell *l; foreach(l, innerscan->bitmapplans) { @@ -963,7 +963,7 @@ set_uppernode_references(Plan *plan, Index subvarno) * * In most cases, subplan tlists will be "flat" tlists with only Vars, * so we try to optimize that case by extracting information about Vars - * in advance. Matching a parent tlist to a child is still an O(N^2) + * in advance. Matching a parent tlist to a child is still an O(N^2) * operation, but at least with a much smaller constant factor than plain * tlist_member() searches. * @@ -994,7 +994,7 @@ build_tlist_index(List *tlist) if (tle->expr && IsA(tle->expr, Var)) { - Var *var = (Var *) tle->expr; + Var *var = (Var *) tle->expr; vinfo->varno = var->varno; vinfo->varattno = var->varattno; @@ -1068,7 +1068,7 @@ search_indexed_tlist_for_non_var(Node *node, exprType((Node *) tle->expr), exprTypmod((Node *) tle->expr), 0); - newvar->varnoold = 0; /* wasn't ever a plain Var */ + newvar->varnoold = 0; /* wasn't ever a plain Var */ newvar->varoattno = 0; return newvar; } @@ -1213,7 +1213,7 @@ replace_vars_with_subplan_refs(Node *node, static Node * replace_vars_with_subplan_refs_mutator(Node *node, - replace_vars_with_subplan_refs_context *context) + replace_vars_with_subplan_refs_context *context) { Var *newvar; diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index ec037db514c..b0dc9c5bf7f 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.99 2005/06/05 22:32:56 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.100 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -110,19 +110,18 @@ replace_outer_var(Var *var) abslevel = PlannerQueryLevel - var->varlevelsup; /* - * If there's already a PlannerParamList entry for this same Var, just - * use it. NOTE: in sufficiently complex querytrees, it is possible - * for the same varno/abslevel to refer to different RTEs in different - * parts of the parsetree, so that different fields might end up - * sharing the same Param number. As long as we check the vartype as - * well, I believe that this sort of aliasing will cause no trouble. - * The correct field should get stored into the Param slot at - * execution in each part of the tree. + * If there's already a PlannerParamList entry for this same Var, just use + * it. NOTE: in sufficiently complex querytrees, it is possible for the + * same varno/abslevel to refer to different RTEs in different parts of + * the parsetree, so that different fields might end up sharing the same + * Param number. As long as we check the vartype as well, I believe that + * this sort of aliasing will cause no trouble. The correct field should + * get stored into the Param slot at execution in each part of the tree. * - * We also need to demand a match on vartypmod. This does not matter for - * the Param itself, since those are not typmod-dependent, but it does - * matter when make_subplan() instantiates a modified copy of the Var - * for a subplan's args list. + * We also need to demand a match on vartypmod. This does not matter for the + * Param itself, since those are not typmod-dependent, but it does matter + * when make_subplan() instantiates a modified copy of the Var for a + * subplan's args list. */ i = 0; foreach(ppl, PlannerParamList) @@ -179,8 +178,8 @@ replace_outer_agg(Aggref *agg) abslevel = PlannerQueryLevel - agg->agglevelsup; /* - * It does not seem worthwhile to try to match duplicate outer aggs. - * Just make a new slot every time. + * It does not seem worthwhile to try to match duplicate outer aggs. Just + * make a new slot every time. */ agg = (Aggref *) copyObject(agg); IncrementVarSublevelsUp((Node *) agg, -((int) agg->agglevelsup), 0); @@ -253,33 +252,32 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) Node *result; /* - * Copy the source Query node. This is a quick and dirty kluge to - * resolve the fact that the parser can generate trees with multiple - * links to the same sub-Query node, but the planner wants to scribble - * on the Query. Try to clean this up when we do querytree redesign... + * Copy the source Query node. This is a quick and dirty kluge to resolve + * the fact that the parser can generate trees with multiple links to the + * same sub-Query node, but the planner wants to scribble on the Query. + * Try to clean this up when we do querytree redesign... */ subquery = (Query *) copyObject(subquery); /* - * For an EXISTS subplan, tell lower-level planner to expect that only - * the first tuple will be retrieved. For ALL and ANY subplans, we - * will be able to stop evaluating if the test condition fails, so - * very often not all the tuples will be retrieved; for lack of a - * better idea, specify 50% retrieval. For EXPR and MULTIEXPR - * subplans, use default behavior (we're only expecting one row out, - * anyway). + * For an EXISTS subplan, tell lower-level planner to expect that only the + * first tuple will be retrieved. For ALL and ANY subplans, we will be + * able to stop evaluating if the test condition fails, so very often not + * all the tuples will be retrieved; for lack of a better idea, specify + * 50% retrieval. For EXPR and MULTIEXPR subplans, use default behavior + * (we're only expecting one row out, anyway). * - * NOTE: if you change these numbers, also change cost_qual_eval_walker() - * in path/costsize.c. + * NOTE: if you change these numbers, also change cost_qual_eval_walker() in + * path/costsize.c. * * XXX If an ALL/ANY subplan is uncorrelated, we may decide to hash or - * materialize its result below. In that case it would've been better - * to specify full retrieval. At present, however, we can only detect + * materialize its result below. In that case it would've been better to + * specify full retrieval. At present, however, we can only detect * correlation or lack of it after we've made the subplan :-(. Perhaps - * detection of correlation should be done as a separate step. - * Meanwhile, we don't want to be too optimistic about the percentage - * of tuples retrieved, for fear of selecting a plan that's bad for - * the materialization case. + * detection of correlation should be done as a separate step. Meanwhile, + * we don't want to be too optimistic about the percentage of tuples + * retrieved, for fear of selecting a plan that's bad for the + * materialization case. */ if (slink->subLinkType == EXISTS_SUBLINK) tuple_fraction = 1.0; /* just like a LIMIT 1 */ @@ -294,8 +292,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) */ node->plan = plan = subquery_planner(subquery, tuple_fraction, NULL); - node->plan_id = PlannerPlanId++; /* Assign unique ID to this - * SubPlan */ + node->plan_id = PlannerPlanId++; /* Assign unique ID to this SubPlan */ node->rtable = subquery->rtable; @@ -314,8 +311,8 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) node->args = NIL; /* - * Make parParam list of params that current query level will pass to - * this child plan. + * Make parParam list of params that current query level will pass to this + * child plan. */ tmpset = bms_copy(plan->extParam); while ((paramid = bms_first_member(tmpset)) >= 0) @@ -328,13 +325,12 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) bms_free(tmpset); /* - * Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY, - * or MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or - * ARRAY, we just produce a Param referring to the result of - * evaluating the initPlan. For MULTIEXPR, we must build an AND or - * OR-clause of the individual comparison operators, using the - * appropriate lefthand side expressions and Params for the initPlan's - * target items. + * Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY, or + * MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or ARRAY, + * we just produce a Param referring to the result of evaluating the + * initPlan. For MULTIEXPR, we must build an AND or OR-clause of the + * individual comparison operators, using the appropriate lefthand side + * expressions and Params for the initPlan's target items. */ if (node->parParam == NIL && slink->subLinkType == EXISTS_SUBLINK) { @@ -387,9 +383,8 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) PlannerInitPlan = lappend(PlannerInitPlan, node); /* - * The executable expressions are returned to become part of the - * outer plan's expression tree; they are not kept in the initplan - * node. + * The executable expressions are returned to become part of the outer + * plan's expression tree; they are not kept in the initplan node. */ if (list_length(exprs) > 1) result = (Node *) (node->useOr ? make_orclause(exprs) : @@ -403,22 +398,22 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) ListCell *l; /* - * We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types - * to initPlans, even when they are uncorrelated or undirect - * correlated, because we need to scan the output of the subplan - * for each outer tuple. But if it's an IN (= ANY) test, we might - * be able to use a hashtable to avoid comparing all the tuples. + * We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types to + * initPlans, even when they are uncorrelated or undirect correlated, + * because we need to scan the output of the subplan for each outer + * tuple. But if it's an IN (= ANY) test, we might be able to use a + * hashtable to avoid comparing all the tuples. */ if (subplan_is_hashable(slink, node)) node->useHashTable = true; /* - * Otherwise, we have the option to tack a MATERIAL node onto the - * top of the subplan, to reduce the cost of reading it - * repeatedly. This is pointless for a direct-correlated subplan, - * since we'd have to recompute its results each time anyway. For - * uncorrelated/undirect correlated subplans, we add MATERIAL unless - * the subplan's top plan node would materialize its output anyway. + * Otherwise, we have the option to tack a MATERIAL node onto the top + * of the subplan, to reduce the cost of reading it repeatedly. This + * is pointless for a direct-correlated subplan, since we'd have to + * recompute its results each time anyway. For uncorrelated/undirect + * correlated subplans, we add MATERIAL unless the subplan's top plan + * node would materialize its output anyway. */ else if (node->parParam == NIL) { @@ -455,9 +450,9 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual) PlannerParamItem *pitem = list_nth(PlannerParamList, lfirst_int(l)); /* - * The Var or Aggref has already been adjusted to have the - * correct varlevelsup or agglevelsup. We probably don't even - * need to copy it again, but be safe. + * The Var or Aggref has already been adjusted to have the correct + * varlevelsup or agglevelsup. We probably don't even need to + * copy it again, but be safe. */ args = lappend(args, copyObject(pitem->item)); } @@ -545,8 +540,8 @@ convert_sublink_opers(List *lefthand, List *operOids, * * Note: we use make_op_expr in case runtime type conversion function * calls must be inserted for this operator! (But we are not - * expecting to have to resolve unknown Params, so it's okay to - * pass a null pstate.) + * expecting to have to resolve unknown Params, so it's okay to pass a + * null pstate.) */ result = lappend(result, make_op_expr(NULL, @@ -580,8 +575,8 @@ subplan_is_hashable(SubLink *slink, SubPlan *node) /* * The sublink type must be "= ANY" --- that is, an IN operator. (We * require the operator name to be unqualified, which may be overly - * paranoid, or may not be.) XXX since we also check that the - * operators are hashable, the test on operator name may be redundant? + * paranoid, or may not be.) XXX since we also check that the operators + * are hashable, the test on operator name may be redundant? */ if (slink->subLinkType != ANY_SUBLINK) return false; @@ -591,15 +586,15 @@ subplan_is_hashable(SubLink *slink, SubPlan *node) /* * The subplan must not have any direct correlation vars --- else we'd - * have to recompute its output each time, so that the hashtable - * wouldn't gain anything. + * have to recompute its output each time, so that the hashtable wouldn't + * gain anything. */ if (node->parParam != NIL) return false; /* - * The estimated size of the subquery result must fit in work_mem. - * (XXX what about hashtable overhead?) + * The estimated size of the subquery result must fit in work_mem. (XXX + * what about hashtable overhead?) */ subquery_size = node->plan->plan_rows * (MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleData))); @@ -607,18 +602,17 @@ subplan_is_hashable(SubLink *slink, SubPlan *node) return false; /* - * The combining operators must be hashable, strict, and - * self-commutative. The need for hashability is obvious, since we - * want to use hashing. Without strictness, behavior in the presence - * of nulls is too unpredictable. (We actually must assume even more - * than plain strictness, see nodeSubplan.c for details.) And - * commutativity ensures that the left and right datatypes are the - * same; this allows us to assume that the combining operators are - * equality for the righthand datatype, so that they can be used to - * compare righthand tuples as well as comparing lefthand to righthand - * tuples. (This last restriction could be relaxed by using two - * different sets of operators with the hash table, but there is no - * obvious usefulness to that at present.) + * The combining operators must be hashable, strict, and self-commutative. + * The need for hashability is obvious, since we want to use hashing. + * Without strictness, behavior in the presence of nulls is too + * unpredictable. (We actually must assume even more than plain + * strictness, see nodeSubplan.c for details.) And commutativity ensures + * that the left and right datatypes are the same; this allows us to + * assume that the combining operators are equality for the righthand + * datatype, so that they can be used to compare righthand tuples as well + * as comparing lefthand to righthand tuples. (This last restriction + * could be relaxed by using two different sets of operators with the hash + * table, but there is no obvious usefulness to that at present.) */ foreach(l, slink->operOids) { @@ -679,24 +673,24 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink) return NULL; /* - * The sub-select must not refer to any Vars of the parent query. - * (Vars of higher levels should be okay, though.) + * The sub-select must not refer to any Vars of the parent query. (Vars of + * higher levels should be okay, though.) */ if (contain_vars_of_level((Node *) subselect, 1)) return NULL; /* - * The left-hand expressions must contain some Vars of the current - * query, else it's not gonna be a join. + * The left-hand expressions must contain some Vars of the current query, + * else it's not gonna be a join. */ left_varnos = pull_varnos((Node *) sublink->lefthand); if (bms_is_empty(left_varnos)) return NULL; /* - * The left-hand expressions mustn't be volatile. (Perhaps we should - * test the combining operators, too? We'd only need to point the - * function directly at the sublink ...) + * The left-hand expressions mustn't be volatile. (Perhaps we should test + * the combining operators, too? We'd only need to point the function + * directly at the sublink ...) */ if (contain_volatile_functions((Node *) sublink->lefthand)) return NULL; @@ -704,10 +698,10 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink) /* * Okay, pull up the sub-select into top range table and jointree. * - * We rely here on the assumption that the outer query has no references - * to the inner (necessarily true, other than the Vars that we build - * below). Therefore this is a lot easier than what - * pull_up_subqueries has to go through. + * We rely here on the assumption that the outer query has no references to + * the inner (necessarily true, other than the Vars that we build below). + * Therefore this is a lot easier than what pull_up_subqueries has to go + * through. */ rte = addRangeTableEntryForSubquery(NULL, subselect, @@ -729,8 +723,8 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink) /* * Build the result qual expressions. As a side effect, - * ininfo->sub_targetlist is filled with a list of Vars representing - * the subselect outputs. + * ininfo->sub_targetlist is filled with a list of Vars representing the + * subselect outputs. */ exprs = convert_sublink_opers(sublink->lefthand, sublink->operOids, @@ -811,8 +805,7 @@ process_sublinks_mutator(Node *node, bool *isTopQual) List *lefthand; /* - * First, recursively process the lefthand-side expressions, if - * any. + * First, recursively process the lefthand-side expressions, if any. */ locTopQual = false; lefthand = (List *) @@ -825,22 +818,22 @@ process_sublinks_mutator(Node *node, bool *isTopQual) } /* - * We should never see a SubPlan expression in the input (since this - * is the very routine that creates 'em to begin with). We shouldn't - * find ourselves invoked directly on a Query, either. + * We should never see a SubPlan expression in the input (since this is + * the very routine that creates 'em to begin with). We shouldn't find + * ourselves invoked directly on a Query, either. */ Assert(!is_subplan(node)); Assert(!IsA(node, Query)); /* * Because make_subplan() could return an AND or OR clause, we have to - * take steps to preserve AND/OR flatness of a qual. We assume the - * input has been AND/OR flattened and so we need no recursion here. + * take steps to preserve AND/OR flatness of a qual. We assume the input + * has been AND/OR flattened and so we need no recursion here. * * If we recurse down through anything other than an AND node, we are - * definitely not at top qual level anymore. (Due to the coding here, - * we will not get called on the List subnodes of an AND, so no check - * is needed for List.) + * definitely not at top qual level anymore. (Due to the coding here, we + * will not get called on the List subnodes of an AND, so no check is + * needed for List.) */ if (and_clause(node)) { @@ -909,8 +902,8 @@ SS_finalize_plan(Plan *plan, List *rtable) /* * First, scan the param list to discover the sets of params that are - * available from outer query levels and my own query level. We do - * this once to save time in the per-plan recursion steps. + * available from outer query levels and my own query level. We do this + * once to save time in the per-plan recursion steps. */ paramid = 0; foreach(l, PlannerParamList) @@ -942,13 +935,12 @@ SS_finalize_plan(Plan *plan, List *rtable) bms_free(valid_params); /* - * Finally, attach any initPlans to the topmost plan node, - * and add their extParams to the topmost node's, too. + * Finally, attach any initPlans to the topmost plan node, and add their + * extParams to the topmost node's, too. * - * We also add the total_cost of each initPlan to the startup cost of - * the top node. This is a conservative overestimate, since in - * fact each initPlan might be executed later than plan startup, - * or even not at all. + * We also add the total_cost of each initPlan to the startup cost of the top + * node. This is a conservative overestimate, since in fact each initPlan + * might be executed later than plan startup, or even not at all. */ plan->initPlan = PlannerInitPlan; PlannerInitPlan = NIL; /* make sure they're not attached twice */ @@ -988,10 +980,10 @@ finalize_plan(Plan *plan, List *rtable, context.outer_params = outer_params; /* - * When we call finalize_primnode, context.paramids sets are - * automatically merged together. But when recursing to self, we have - * to do it the hard way. We want the paramids set to include params - * in subplans as well as at this level. + * When we call finalize_primnode, context.paramids sets are automatically + * merged together. But when recursing to self, we have to do it the hard + * way. We want the paramids set to include params in subplans as well as + * at this level. */ /* Find params in targetlist and qual */ @@ -1011,17 +1003,18 @@ finalize_plan(Plan *plan, List *rtable, &context); /* - * we need not look at indexqualorig, since it will have the - * same param references as indexqual. + * we need not look at indexqualorig, since it will have the same + * param references as indexqual. */ break; case T_BitmapIndexScan: finalize_primnode((Node *) ((BitmapIndexScan *) plan)->indexqual, &context); + /* - * we need not look at indexqualorig, since it will have the - * same param references as indexqual. + * we need not look at indexqualorig, since it will have the same + * param references as indexqual. */ break; @@ -1038,14 +1031,14 @@ finalize_plan(Plan *plan, List *rtable, case T_SubqueryScan: /* - * In a SubqueryScan, SS_finalize_plan has already been run on - * the subplan by the inner invocation of subquery_planner, so - * there's no need to do it again. Instead, just pull out the - * subplan's extParams list, which represents the params it - * needs from my level and higher levels. + * In a SubqueryScan, SS_finalize_plan has already been run on the + * subplan by the inner invocation of subquery_planner, so there's + * no need to do it again. Instead, just pull out the subplan's + * extParams list, which represents the params it needs from my + * level and higher levels. */ context.paramids = bms_add_members(context.paramids, - ((SubqueryScan *) plan)->subplan->extParam); + ((SubqueryScan *) plan)->subplan->extParam); break; case T_FunctionScan: @@ -1170,8 +1163,8 @@ finalize_plan(Plan *plan, List *rtable, plan->allParam = context.paramids; /* - * For speed at execution time, make sure extParam/allParam are - * actually NULL if they are empty sets. + * For speed at execution time, make sure extParam/allParam are actually + * NULL if they are empty sets. */ if (bms_is_empty(plan->extParam)) { @@ -1212,8 +1205,8 @@ finalize_primnode(Node *node, finalize_primnode_context *context) /* Add outer-level params needed by the subplan to paramids */ context->paramids = bms_join(context->paramids, - bms_intersect(subplan->plan->extParam, - context->outer_params)); + bms_intersect(subplan->plan->extParam, + context->outer_params)); /* fall through to recurse into subplan args */ } return expression_tree_walker(node, finalize_primnode, @@ -1241,7 +1234,7 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan, int paramid; /* - * Set up for a new level of subquery. This is just to keep + * Set up for a new level of subquery. This is just to keep * SS_finalize_plan from becoming confused. */ PlannerQueryLevel++; @@ -1262,16 +1255,15 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan, node = makeNode(SubPlan); node->subLinkType = EXPR_SUBLINK; node->plan = plan; - node->plan_id = PlannerPlanId++; /* Assign unique ID to this - * SubPlan */ + node->plan_id = PlannerPlanId++; /* Assign unique ID to this SubPlan */ node->rtable = root->parse->rtable; PlannerInitPlan = lappend(PlannerInitPlan, node); /* - * Make parParam list of params that current query level will pass to - * this child plan. (In current usage there probably aren't any.) + * Make parParam list of params that current query level will pass to this + * child plan. (In current usage there probably aren't any.) */ tmpset = bms_copy(plan->extParam); while ((paramid = bms_first_member(tmpset)) >= 0) diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c index 9624a4ad135..ece6133c144 100644 --- a/src/backend/optimizer/prep/prepjointree.c +++ b/src/backend/optimizer/prep/prepjointree.c @@ -16,7 +16,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.30 2005/08/01 20:31:09 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.31 2005/10/15 02:49:20 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -143,8 +143,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join) Query *subquery = rte->subquery; /* - * Is this a subquery RTE, and if so, is the subquery simple - * enough to pull up? (If not, do nothing at this node.) + * Is this a subquery RTE, and if so, is the subquery simple enough to + * pull up? (If not, do nothing at this node.) * * If we are inside an outer join, only pull up subqueries whose * targetlists are nullable --- otherwise substituting their tlist @@ -153,8 +153,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join) * * XXX This could be improved by generating pseudo-variables for such * expressions; we'd have to figure out how to get the pseudo- - * variables evaluated at the right place in the modified plan - * tree. Fix it someday. + * variables evaluated at the right place in the modified plan tree. + * Fix it someday. */ if (rte->rtekind == RTE_SUBQUERY && is_simple_subquery(subquery) && @@ -166,53 +166,53 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join) ListCell *rt; /* - * Need a modifiable copy of the subquery to hack on. Even if - * we didn't sometimes choose not to pull up below, we must do - * this to avoid problems if the same subquery is referenced - * from multiple jointree items (which can't happen normally, - * but might after rule rewriting). + * Need a modifiable copy of the subquery to hack on. Even if we + * didn't sometimes choose not to pull up below, we must do this + * to avoid problems if the same subquery is referenced from + * multiple jointree items (which can't happen normally, but might + * after rule rewriting). */ subquery = copyObject(subquery); /* * Create a PlannerInfo data structure for this subquery. * - * NOTE: the next few steps should match the first processing - * in subquery_planner(). Can we refactor to avoid code - * duplication, or would that just make things uglier? + * NOTE: the next few steps should match the first processing in + * subquery_planner(). Can we refactor to avoid code duplication, + * or would that just make things uglier? */ subroot = makeNode(PlannerInfo); subroot->parse = subquery; /* - * Pull up any IN clauses within the subquery's WHERE, so that - * we don't leave unoptimized INs behind. + * Pull up any IN clauses within the subquery's WHERE, so that we + * don't leave unoptimized INs behind. */ subroot->in_info_list = NIL; if (subquery->hasSubLinks) subquery->jointree->quals = pull_up_IN_clauses(subroot, - subquery->jointree->quals); + subquery->jointree->quals); /* * Recursively pull up the subquery's subqueries, so that this * routine's processing is complete for its jointree and * rangetable. * - * Note: 'false' is correct here even if we are within an outer - * join in the upper query; the lower query starts with a - * clean slate for outer-join semantics. + * Note: 'false' is correct here even if we are within an outer join + * in the upper query; the lower query starts with a clean slate + * for outer-join semantics. */ subquery->jointree = (FromExpr *) pull_up_subqueries(subroot, (Node *) subquery->jointree, false); /* - * Now we must recheck whether the subquery is still simple - * enough to pull up. If not, abandon processing it. + * Now we must recheck whether the subquery is still simple enough + * to pull up. If not, abandon processing it. * - * We don't really need to recheck all the conditions involved, - * but it's easier just to keep this "if" looking the same as - * the one above. + * We don't really need to recheck all the conditions involved, but + * it's easier just to keep this "if" looking the same as the one + * above. */ if (is_simple_subquery(subquery) && (!below_outer_join || has_nullable_targetlist(subquery))) @@ -224,10 +224,10 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join) /* * Give up, return unmodified RangeTblRef. * - * Note: The work we just did will be redone when the - * subquery gets planned on its own. Perhaps we could - * avoid that by storing the modified subquery back into - * the rangetable, but I'm not gonna risk it now. + * Note: The work we |
