diff options
| author | Bruce Momjian | 2015-05-24 01:35:49 +0000 |
|---|---|---|
| committer | Bruce Momjian | 2015-05-24 01:35:49 +0000 |
| commit | 807b9e0dff663c5da875af7907a5106c0ff90673 (patch) | |
| tree | 89a0cfbd3c9801dcb04aae4ccf2fee935092f958 /src/bin/pg_upgrade | |
| parent | 225892552bd3052982d2b97b749e5945ea71facc (diff) | |
pgindent run for 9.5
Diffstat (limited to 'src/bin/pg_upgrade')
| -rw-r--r-- | src/bin/pg_upgrade/check.c | 29 | ||||
| -rw-r--r-- | src/bin/pg_upgrade/dump.c | 8 | ||||
| -rw-r--r-- | src/bin/pg_upgrade/info.c | 160 | ||||
| -rw-r--r-- | src/bin/pg_upgrade/option.c | 12 | ||||
| -rw-r--r-- | src/bin/pg_upgrade/pg_upgrade.c | 6 | ||||
| -rw-r--r-- | src/bin/pg_upgrade/pg_upgrade.h | 6 | ||||
| -rw-r--r-- | src/bin/pg_upgrade/relfilenode.c | 8 | ||||
| -rw-r--r-- | src/bin/pg_upgrade/server.c | 11 | ||||
| -rw-r--r-- | src/bin/pg_upgrade/version.c | 4 |
9 files changed, 126 insertions, 118 deletions
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 99c66be7fb4..5a91871c359 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -317,16 +317,16 @@ equivalent_locale(int category, const char *loca, const char *locb) int lenb; /* - * If the names are equal, the locales are equivalent. Checking this - * first avoids calling setlocale() in the common case that the names - * are equal. That's a good thing, if setlocale() is buggy, for example. + * If the names are equal, the locales are equivalent. Checking this first + * avoids calling setlocale() in the common case that the names are equal. + * That's a good thing, if setlocale() is buggy, for example. */ if (pg_strcasecmp(loca, locb) == 0) return true; /* - * Not identical. Canonicalize both names, remove the encoding parts, - * and try again. + * Not identical. Canonicalize both names, remove the encoding parts, and + * try again. */ canona = get_canonical_locale_name(category, loca); chara = strrchr(canona, '.'); @@ -512,7 +512,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) { /* reproduce warning from CREATE TABLESPACE that is in the log */ pg_log(PG_WARNING, - "\nWARNING: user-defined tablespace locations should not be inside the data directory, e.g. %s\n", old_tablespace_dir); + "\nWARNING: user-defined tablespace locations should not be inside the data directory, e.g. %s\n", old_tablespace_dir); /* Unlink file in case it is left over from a previous run. */ unlink(*deletion_script_file_name); @@ -611,8 +611,8 @@ check_is_install_user(ClusterInfo *cluster) /* * We only allow the install user in the new cluster (see comment below) - * and we preserve pg_authid.oid, so this must be the install user in - * the old cluster too. + * and we preserve pg_authid.oid, so this must be the install user in the + * old cluster too. */ if (PQntuples(res) != 1 || atooid(PQgetvalue(res, 0, 1)) != BOOTSTRAP_SUPERUSERID) @@ -681,10 +681,13 @@ check_proper_datallowconn(ClusterInfo *cluster) } else { - /* avoid datallowconn == false databases from being skipped on restore */ + /* + * avoid datallowconn == false databases from being skipped on + * restore + */ if (strcmp(datallowconn, "f") == 0) pg_fatal("All non-template0 databases must allow connections, " - "i.e. their pg_database.datallowconn must be true\n"); + "i.e. their pg_database.datallowconn must be true\n"); } } @@ -873,7 +876,7 @@ check_for_reg_data_type_usage(ClusterInfo *cluster) " 'pg_catalog.regconfig'::pg_catalog.regtype, " " 'pg_catalog.regdictionary'::pg_catalog.regtype) AND " " c.relnamespace = n.oid AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); + " n.nspname NOT IN ('pg_catalog', 'information_schema')"); ntups = PQntuples(res); i_nspname = PQfnumber(res, "nspname"); @@ -964,7 +967,7 @@ check_for_jsonb_9_4_usage(ClusterInfo *cluster) " c.relnamespace = n.oid AND " /* exclude possible orphaned temp tables */ " n.nspname !~ '^pg_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); + " n.nspname NOT IN ('pg_catalog', 'information_schema')"); ntups = PQntuples(res); i_nspname = PQfnumber(res, "nspname"); @@ -999,7 +1002,7 @@ check_for_jsonb_9_4_usage(ClusterInfo *cluster) { pg_log(PG_REPORT, "fatal\n"); pg_fatal("Your installation contains one of the JSONB data types in user tables.\n" - "The internal format of JSONB changed during 9.4 beta so this cluster cannot currently\n" + "The internal format of JSONB changed during 9.4 beta so this cluster cannot currently\n" "be upgraded. You can remove the problem tables and restart the upgrade. A list\n" "of the problem columns is in the file:\n" " %s\n\n", output_path); diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c index 2c20e847ac0..6d6f84d7252 100644 --- a/src/bin/pg_upgrade/dump.c +++ b/src/bin/pg_upgrade/dump.c @@ -111,7 +111,7 @@ optionally_create_toast_tables(void) "FROM pg_catalog.pg_class c, " " pg_catalog.pg_namespace n " "WHERE c.relnamespace = n.oid AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema') AND " + " n.nspname NOT IN ('pg_catalog', 'information_schema') AND " "c.relkind IN ('r', 'm') AND " "c.reltoastrelid = 0"); @@ -122,12 +122,12 @@ optionally_create_toast_tables(void) { /* enable auto-oid-numbered TOAST creation if needed */ PQclear(executeQueryOrDie(conn, "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%d'::pg_catalog.oid);", - OPTIONALLY_CREATE_TOAST_OID)); + OPTIONALLY_CREATE_TOAST_OID)); /* dummy command that also triggers check for required TOAST table */ PQclear(executeQueryOrDie(conn, "ALTER TABLE %s.%s RESET (binary_upgrade_dummy_option);", - quote_identifier(PQgetvalue(res, rowno, i_nspname)), - quote_identifier(PQgetvalue(res, rowno, i_relname)))); + quote_identifier(PQgetvalue(res, rowno, i_nspname)), + quote_identifier(PQgetvalue(res, rowno, i_relname)))); } PQclear(res); diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c index c0a56012090..e158c9ff8b0 100644 --- a/src/bin/pg_upgrade/info.c +++ b/src/bin/pg_upgrade/info.c @@ -38,16 +38,16 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, int *nmaps, const char *old_pgdata, const char *new_pgdata) { FileNameMap *maps; - int old_relnum, new_relnum; + int old_relnum, + new_relnum; int num_maps = 0; maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) * old_db->rel_arr.nrels); /* - * The old database shouldn't have more relations than the new one. - * We force the new cluster to have a TOAST table if the old table - * had one. + * The old database shouldn't have more relations than the new one. We + * force the new cluster to have a TOAST table if the old table had one. */ if (old_db->rel_arr.nrels > new_db->rel_arr.nrels) pg_fatal("old and new databases \"%s\" have a mismatched number of relations\n", @@ -62,15 +62,15 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, /* * It is possible that the new cluster has a TOAST table for a table - * that didn't need one in the old cluster, e.g. 9.0 to 9.1 changed the - * NUMERIC length computation. Therefore, if we have a TOAST table - * in the new cluster that doesn't match, skip over it and continue - * processing. It is possible this TOAST table used an OID that was - * reserved in the old cluster, but we have no way of testing that, - * and we would have already gotten an error at the new cluster schema - * creation stage. Fortunately, since we only restore the OID counter - * after schema restore, and restore in OID order via pg_dump, a - * conflict would only happen if the new TOAST table had a very low + * that didn't need one in the old cluster, e.g. 9.0 to 9.1 changed + * the NUMERIC length computation. Therefore, if we have a TOAST + * table in the new cluster that doesn't match, skip over it and + * continue processing. It is possible this TOAST table used an OID + * that was reserved in the old cluster, but we have no way of testing + * that, and we would have already gotten an error at the new cluster + * schema creation stage. Fortunately, since we only restore the OID + * counter after schema restore, and restore in OID order via pg_dump, + * a conflict would only happen if the new TOAST table had a very low * OID. However, TOAST tables created long after initial table * creation can have any OID, particularly after OID wraparound. */ @@ -330,75 +330,77 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) */ snprintf(query, sizeof(query), - /* get regular heap */ - "WITH regular_heap (reloid) AS ( " - " SELECT c.oid " - " FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n " - " ON c.relnamespace = n.oid " - " LEFT OUTER JOIN pg_catalog.pg_index i " - " ON c.oid = i.indexrelid " - " WHERE relkind IN ('r', 'm', 'i', 'S') AND " - /* - * pg_dump only dumps valid indexes; testing indisready is necessary in - * 9.2, and harmless in earlier/later versions. - */ - " i.indisvalid IS DISTINCT FROM false AND " - " i.indisready IS DISTINCT FROM false AND " - /* exclude possible orphaned temp tables */ - " ((n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - /* skip pg_toast because toast index have relkind == 'i', not 't' */ - " n.nspname NOT IN ('pg_catalog', 'information_schema', " - " 'binary_upgrade', 'pg_toast') AND " - " c.oid >= %u) OR " - " (n.nspname = 'pg_catalog' AND " - " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ))), " - /* - * We have to gather the TOAST tables in later steps because we - * can't schema-qualify TOAST tables. - */ - /* get TOAST heap */ - " toast_heap (reloid) AS ( " - " SELECT reltoastrelid " - " FROM regular_heap JOIN pg_catalog.pg_class c " - " ON regular_heap.reloid = c.oid " - " AND c.reltoastrelid != %u), " - /* get indexes on regular and TOAST heap */ - " all_index (reloid) AS ( " - " SELECT indexrelid " - " FROM pg_index " - " WHERE indisvalid " - " AND indrelid IN (SELECT reltoastrelid " - " FROM (SELECT reloid FROM regular_heap " - " UNION ALL " - " SELECT reloid FROM toast_heap) all_heap " - " JOIN pg_catalog.pg_class c " - " ON all_heap.reloid = c.oid " - " AND c.reltoastrelid != %u)) " - /* get all rels */ - "SELECT c.oid, n.nspname, c.relname, " - " c.relfilenode, c.reltablespace, %s " - "FROM (SELECT reloid FROM regular_heap " - " UNION ALL " - " SELECT reloid FROM toast_heap " - " UNION ALL " - " SELECT reloid FROM all_index) all_rels " - " JOIN pg_catalog.pg_class c " - " ON all_rels.reloid = c.oid " - " JOIN pg_catalog.pg_namespace n " - " ON c.relnamespace = n.oid " - " LEFT OUTER JOIN pg_catalog.pg_tablespace t " - " ON c.reltablespace = t.oid " + /* get regular heap */ + "WITH regular_heap (reloid) AS ( " + " SELECT c.oid " + " FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n " + " ON c.relnamespace = n.oid " + " LEFT OUTER JOIN pg_catalog.pg_index i " + " ON c.oid = i.indexrelid " + " WHERE relkind IN ('r', 'm', 'i', 'S') AND " + + /* + * pg_dump only dumps valid indexes; testing indisready is necessary in + * 9.2, and harmless in earlier/later versions. + */ + " i.indisvalid IS DISTINCT FROM false AND " + " i.indisready IS DISTINCT FROM false AND " + /* exclude possible orphaned temp tables */ + " ((n.nspname !~ '^pg_temp_' AND " + " n.nspname !~ '^pg_toast_temp_' AND " + /* skip pg_toast because toast index have relkind == 'i', not 't' */ + " n.nspname NOT IN ('pg_catalog', 'information_schema', " + " 'binary_upgrade', 'pg_toast') AND " + " c.oid >= %u) OR " + " (n.nspname = 'pg_catalog' AND " + " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ))), " + + /* + * We have to gather the TOAST tables in later steps because we can't + * schema-qualify TOAST tables. + */ + /* get TOAST heap */ + " toast_heap (reloid) AS ( " + " SELECT reltoastrelid " + " FROM regular_heap JOIN pg_catalog.pg_class c " + " ON regular_heap.reloid = c.oid " + " AND c.reltoastrelid != %u), " + /* get indexes on regular and TOAST heap */ + " all_index (reloid) AS ( " + " SELECT indexrelid " + " FROM pg_index " + " WHERE indisvalid " + " AND indrelid IN (SELECT reltoastrelid " + " FROM (SELECT reloid FROM regular_heap " + " UNION ALL " + " SELECT reloid FROM toast_heap) all_heap " + " JOIN pg_catalog.pg_class c " + " ON all_heap.reloid = c.oid " + " AND c.reltoastrelid != %u)) " + /* get all rels */ + "SELECT c.oid, n.nspname, c.relname, " + " c.relfilenode, c.reltablespace, %s " + "FROM (SELECT reloid FROM regular_heap " + " UNION ALL " + " SELECT reloid FROM toast_heap " + " UNION ALL " + " SELECT reloid FROM all_index) all_rels " + " JOIN pg_catalog.pg_class c " + " ON all_rels.reloid = c.oid " + " JOIN pg_catalog.pg_namespace n " + " ON c.relnamespace = n.oid " + " LEFT OUTER JOIN pg_catalog.pg_tablespace t " + " ON c.reltablespace = t.oid " /* we preserve pg_class.oid so we sort by it to match old/new */ - "ORDER BY 1;", - FirstNormalObjectId, + "ORDER BY 1;", + FirstNormalObjectId, /* does pg_largeobject_metadata need to be migrated? */ - (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ? - "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'", - InvalidOid, InvalidOid, + (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ? + "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'", + InvalidOid, InvalidOid, /* 9.2 removed the spclocation column */ - (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? - "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation"); + (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? + "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation"); res = executeQueryOrDie(conn, "%s", query); diff --git a/src/bin/pg_upgrade/option.c b/src/bin/pg_upgrade/option.c index b8510561350..90f1401549b 100644 --- a/src/bin/pg_upgrade/option.c +++ b/src/bin/pg_upgrade/option.c @@ -142,7 +142,7 @@ parseCommandLine(int argc, char *argv[]) old_cluster.pgopts = pg_strdup(optarg); else { - char *old_pgopts = old_cluster.pgopts; + char *old_pgopts = old_cluster.pgopts; old_cluster.pgopts = psprintf("%s %s", old_pgopts, optarg); free(old_pgopts); @@ -155,7 +155,7 @@ parseCommandLine(int argc, char *argv[]) new_cluster.pgopts = pg_strdup(optarg); else { - char *new_pgopts = new_cluster.pgopts; + char *new_pgopts = new_cluster.pgopts; new_cluster.pgopts = psprintf("%s %s", new_pgopts, optarg); free(new_pgopts); @@ -249,13 +249,15 @@ parseCommandLine(int argc, char *argv[]) "PGDATANEW", "-D", "new cluster data resides"); #ifdef WIN32 + /* * On Windows, initdb --sync-only will fail with a "Permission denied" - * error on file pg_upgrade_utility.log if pg_upgrade is run inside - * the new cluster directory, so we do a check here. + * error on file pg_upgrade_utility.log if pg_upgrade is run inside the + * new cluster directory, so we do a check here. */ { - char cwd[MAXPGPATH], new_cluster_pgdata[MAXPGPATH]; + char cwd[MAXPGPATH], + new_cluster_pgdata[MAXPGPATH]; strlcpy(new_cluster_pgdata, new_cluster.pgdata, MAXPGPATH); canonicalize_path(new_cluster_pgdata); diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c index 4e6a9f91be6..8cdfaf35eff 100644 --- a/src/bin/pg_upgrade/pg_upgrade.c +++ b/src/bin/pg_upgrade/pg_upgrade.c @@ -333,8 +333,8 @@ create_new_objects(void) check_ok(); /* - * We don't have minmxids for databases or relations in pre-9.3 - * clusters, so set those after we have restores the schemas. + * We don't have minmxids for databases or relations in pre-9.3 clusters, + * so set those after we have restores the schemas. */ if (GET_MAJOR_VERSION(old_cluster.major_version) < 903) set_frozenxids(true); @@ -473,7 +473,7 @@ copy_clog_xlog_xid(void) /* now reset the wal archives in the new cluster */ prep_status("Resetting WAL archives"); exec_prog(UTILITY_LOG_FILE, NULL, true, - /* use timeline 1 to match controldata and no WAL history file */ + /* use timeline 1 to match controldata and no WAL history file */ "\"%s/pg_resetxlog\" -l 00000001%s \"%s\"", new_cluster.bindir, old_cluster.controldata.nextxlogfile + 8, new_cluster.pgdata); diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h index aecf0df30c2..13aa891d59d 100644 --- a/src/bin/pg_upgrade/pg_upgrade.h +++ b/src/bin/pg_upgrade/pg_upgrade.h @@ -329,7 +329,7 @@ extern OSInfo os_info; /* check.c */ void output_check_banner(bool live_check); -void check_and_dump_old_cluster(bool live_check); +void check_and_dump_old_cluster(bool live_check); void check_new_cluster(void); void report_clusters_compatible(void); void issue_warnings(void); @@ -358,7 +358,7 @@ void optionally_create_toast_tables(void); #define EXEC_PSQL_ARGS "--echo-queries --set ON_ERROR_STOP=on --no-psqlrc --dbname=template1" -bool exec_prog(const char *log_file, const char *opt_log_file, +bool exec_prog(const char *log_file, const char *opt_log_file, bool throw_error, const char *fmt,...) pg_attribute_printf(4, 5); void verify_directories(void); bool pid_lock_file_exists(const char *datadir); @@ -471,7 +471,7 @@ void pg_putenv(const char *var, const char *val); void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode); -void old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster); +void old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster); /* parallel.c */ void parallel_exec_prog(const char *log_file, const char *opt_log_file, diff --git a/src/bin/pg_upgrade/relfilenode.c b/src/bin/pg_upgrade/relfilenode.c index 7b3215af566..c22df429492 100644 --- a/src/bin/pg_upgrade/relfilenode.c +++ b/src/bin/pg_upgrade/relfilenode.c @@ -35,10 +35,10 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying"); /* - * Transferring files by tablespace is tricky because a single database can - * use multiple tablespaces. For non-parallel mode, we just pass a NULL - * tablespace path, which matches all tablespaces. In parallel mode, we - * pass the default tablespace and all user-created tablespaces and let + * Transferring files by tablespace is tricky because a single database + * can use multiple tablespaces. For non-parallel mode, we just pass a + * NULL tablespace path, which matches all tablespaces. In parallel mode, + * we pass the default tablespace and all user-created tablespaces and let * those operations happen in parallel. */ if (user_opts.jobs <= 1) diff --git a/src/bin/pg_upgrade/server.c b/src/bin/pg_upgrade/server.c index 8d8e7d70734..8c6b6da5153 100644 --- a/src/bin/pg_upgrade/server.c +++ b/src/bin/pg_upgrade/server.c @@ -204,11 +204,12 @@ start_postmaster(ClusterInfo *cluster, bool throw_error) /* * Since PG 9.1, we have used -b to disable autovacuum. For earlier * releases, setting autovacuum=off disables cleanup vacuum and analyze, - * but freeze vacuums can still happen, so we set autovacuum_freeze_max_age - * to its maximum. (autovacuum_multixact_freeze_max_age was introduced - * after 9.1, so there is no need to set that.) We assume all datfrozenxid - * and relfrozenxid values are less than a gap of 2000000000 from the current - * xid counter, so autovacuum will not touch them. + * but freeze vacuums can still happen, so we set + * autovacuum_freeze_max_age to its maximum. + * (autovacuum_multixact_freeze_max_age was introduced after 9.1, so there + * is no need to set that.) We assume all datfrozenxid and relfrozenxid + * values are less than a gap of 2000000000 from the current xid counter, + * so autovacuum will not touch them. * * Turn off durability requirements to improve object creation speed, and * we only modify the new cluster, so only use it there. If there is a diff --git a/src/bin/pg_upgrade/version.c b/src/bin/pg_upgrade/version.c index e3e7387c92d..9954daea17e 100644 --- a/src/bin/pg_upgrade/version.c +++ b/src/bin/pg_upgrade/version.c @@ -167,9 +167,9 @@ old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster) { pg_log(PG_REPORT, "fatal\n"); pg_fatal("Your installation contains the \"line\" data type in user tables. This\n" - "data type changed its internal and input/output format between your old\n" + "data type changed its internal and input/output format between your old\n" "and new clusters so this cluster cannot currently be upgraded. You can\n" - "remove the problem tables and restart the upgrade. A list of the problem\n" + "remove the problem tables and restart the upgrade. A list of the problem\n" "columns is in the file:\n" " %s\n\n", output_path); } |
