summaryrefslogtreecommitdiff
path: root/src/backend/libpq/hba.c
diff options
context:
space:
mode:
authorTom Lane2020-09-06 17:57:10 +0000
committerTom Lane2020-09-06 18:13:19 +0000
commit8e3c58e6e459b285d37edb6129e412ed25cd90c1 (patch)
tree853632a3efab6405a36dca91f2e69d5fe399536b /src/backend/libpq/hba.c
parent68b603e1a934dcd82e259b7776565ec1776e7a29 (diff)
Refactor pg_get_line() to expose an alternative StringInfo-based API.
Letting the caller provide a StringInfo to read into is helpful when the caller needs to merge lines or otherwise modify the data after it's been read. Notably, now the code added by commit 8f8154a50 can use pg_get_line_append() instead of having its own copy of that logic. A follow-on commit will also make use of this. Also, since StringInfo buffers are a minimum of 1KB long, blindly using pg_get_line() in a loop can eat a lot more memory than one would expect. I discovered for instance that commit e0f05cd5b caused initdb to consume circa 10MB to read postgres.bki, even though that's under 1MB worth of data. A less memory-hungry alternative is to re-use the same StringInfo for all lines and pg_strdup the results. Discussion: https://postgr.es/m/[email protected]
Diffstat (limited to 'src/backend/libpq/hba.c')
-rw-r--r--src/backend/libpq/hba.c40
1 files changed, 14 insertions, 26 deletions
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index 5991a21cf2d..9f106653f3f 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -502,33 +502,8 @@ tokenize_file(const char *filename, FILE *file, List **tok_lines, int elevel)
/* Collect the next input line, handling backslash continuations */
resetStringInfo(&buf);
- while (!feof(file) && !ferror(file))
+ while (pg_get_line_append(file, &buf))
{
- /* Make sure there's a reasonable amount of room in the buffer */
- enlargeStringInfo(&buf, 128);
-
- /* Read some data, appending it to what we already have */
- if (fgets(buf.data + buf.len, buf.maxlen - buf.len, file) == NULL)
- {
- int save_errno = errno;
-
- if (!ferror(file))
- break; /* normal EOF */
- /* I/O error! */
- ereport(elevel,
- (errcode_for_file_access(),
- errmsg("could not read file \"%s\": %m", filename)));
- err_msg = psprintf("could not read file \"%s\": %s",
- filename, strerror(save_errno));
- resetStringInfo(&buf);
- break;
- }
- buf.len += strlen(buf.data + buf.len);
-
- /* If we haven't got a whole line, loop to read more */
- if (!(buf.len > 0 && buf.data[buf.len - 1] == '\n'))
- continue;
-
/* Strip trailing newline, including \r in case we're on Windows */
buf.len = pg_strip_crlf(buf.data);
@@ -551,6 +526,19 @@ tokenize_file(const char *filename, FILE *file, List **tok_lines, int elevel)
break;
}
+ if (ferror(file))
+ {
+ /* I/O error! */
+ int save_errno = errno;
+
+ ereport(elevel,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m", filename)));
+ err_msg = psprintf("could not read file \"%s\": %s",
+ filename, strerror(save_errno));
+ break;
+ }
+
/* Parse fields */
lineptr = buf.data;
while (*lineptr && err_msg == NULL)