/Users/deen/code/yugabyte-db/src/postgres/src/bin/pg_dump/pg_backup_archiver.c
Line | Count | Source (jump to first uncovered line) |
1 | | /*------------------------------------------------------------------------- |
2 | | * |
3 | | * pg_backup_archiver.c |
4 | | * |
5 | | * Private implementation of the archiver routines. |
6 | | * |
7 | | * See the headers to pg_restore for more details. |
8 | | * |
9 | | * Copyright (c) 2000, Philip Warner |
10 | | * Rights are granted to use this software in any way so long |
11 | | * as this notice is not removed. |
12 | | * |
13 | | * The author is not responsible for loss or damages that may |
14 | | * result from its use. |
15 | | * |
16 | | * |
17 | | * IDENTIFICATION |
18 | | * src/bin/pg_dump/pg_backup_archiver.c |
19 | | * |
20 | | *------------------------------------------------------------------------- |
21 | | */ |
22 | | #include "postgres_fe.h" |
23 | | |
24 | | #include <ctype.h> |
25 | | #include <fcntl.h> |
26 | | #include <unistd.h> |
27 | | #include <sys/stat.h> |
28 | | #include <sys/wait.h> |
29 | | #ifdef WIN32 |
30 | | #include <io.h> |
31 | | #endif |
32 | | |
33 | | #include "parallel.h" |
34 | | #include "pg_backup_archiver.h" |
35 | | #include "pg_backup_db.h" |
36 | | #include "pg_backup_utils.h" |
37 | | #include "dumputils.h" |
38 | | #include "fe_utils/string_utils.h" |
39 | | |
40 | | #include "libpq/libpq-fs.h" |
41 | | |
42 | 0 | #define TEXT_DUMP_HEADER "--\n-- YSQL database dump\n--\n\n" |
43 | 0 | #define TEXT_DUMPALL_HEADER "--\n-- YSQL database cluster dump\n--\n\n" |
44 | | |
45 | | /* state needed to save/restore an archive's output target */ |
46 | | typedef struct _outputContext |
47 | | { |
48 | | void *OF; |
49 | | int gzOut; |
50 | | } OutputContext; |
51 | | |
52 | | /* translator: this is a module name */ |
53 | | static const char *modulename = gettext_noop("archiver"); |
54 | | |
55 | | |
56 | | static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt, |
57 | | const int compression, bool dosync, ArchiveMode mode, |
58 | | SetupWorkerPtrType setupWorkerPtr); |
59 | | static void _getObjectDescription(PQExpBuffer buf, TocEntry *te, |
60 | | ArchiveHandle *AH); |
61 | | static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData); |
62 | | static char *replace_line_endings(const char *str); |
63 | | static void _doSetFixedOutputState(ArchiveHandle *AH); |
64 | | static void _doSetSessionAuth(ArchiveHandle *AH, const char *user); |
65 | | static void _doSetWithOids(ArchiveHandle *AH, const bool withOids); |
66 | | static void _reconnectToDB(ArchiveHandle *AH, const char *dbname); |
67 | | static void _becomeUser(ArchiveHandle *AH, const char *user); |
68 | | static void _becomeOwner(ArchiveHandle *AH, TocEntry *te); |
69 | | static void _selectOutputSchema(ArchiveHandle *AH, const char *schemaName); |
70 | | static void _selectTablespace(ArchiveHandle *AH, const char *tablespace); |
71 | | static void processEncodingEntry(ArchiveHandle *AH, TocEntry *te); |
72 | | static void processStdStringsEntry(ArchiveHandle *AH, TocEntry *te); |
73 | | static void processSearchPathEntry(ArchiveHandle *AH, TocEntry *te); |
74 | | static teReqs _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH); |
75 | | static RestorePass _tocEntryRestorePass(TocEntry *te); |
76 | | static bool _tocEntryIsACL(TocEntry *te); |
77 | | static void _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te); |
78 | | static void _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te); |
79 | | static void buildTocEntryArrays(ArchiveHandle *AH); |
80 | | static void _moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te); |
81 | | static int _discoverArchiveFormat(ArchiveHandle *AH); |
82 | | |
83 | | static int RestoringToDB(ArchiveHandle *AH); |
84 | | static void dump_lo_buf(ArchiveHandle *AH); |
85 | | static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim); |
86 | | static void SetOutput(ArchiveHandle *AH, const char *filename, int compression); |
87 | | static OutputContext SaveOutput(ArchiveHandle *AH); |
88 | | static void RestoreOutput(ArchiveHandle *AH, OutputContext savedContext); |
89 | | |
90 | | static int restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel); |
91 | | static void restore_toc_entries_prefork(ArchiveHandle *AH, |
92 | | TocEntry *pending_list); |
93 | | static void restore_toc_entries_parallel(ArchiveHandle *AH, |
94 | | ParallelState *pstate, |
95 | | TocEntry *pending_list); |
96 | | static void restore_toc_entries_postfork(ArchiveHandle *AH, |
97 | | TocEntry *pending_list); |
98 | | static void par_list_header_init(TocEntry *l); |
99 | | static void par_list_append(TocEntry *l, TocEntry *te); |
100 | | static void par_list_remove(TocEntry *te); |
101 | | static void move_to_ready_list(TocEntry *pending_list, TocEntry *ready_list, |
102 | | RestorePass pass); |
103 | | static TocEntry *get_next_work_item(ArchiveHandle *AH, |
104 | | TocEntry *ready_list, |
105 | | ParallelState *pstate); |
106 | | static void mark_dump_job_done(ArchiveHandle *AH, |
107 | | TocEntry *te, |
108 | | int status, |
109 | | void *callback_data); |
110 | | static void mark_restore_job_done(ArchiveHandle *AH, |
111 | | TocEntry *te, |
112 | | int status, |
113 | | void *callback_data); |
114 | | static void fix_dependencies(ArchiveHandle *AH); |
115 | | static bool has_lock_conflicts(TocEntry *te1, TocEntry *te2); |
116 | | static void repoint_table_dependencies(ArchiveHandle *AH); |
117 | | static void identify_locking_dependencies(ArchiveHandle *AH, TocEntry *te); |
118 | | static void reduce_dependencies(ArchiveHandle *AH, TocEntry *te, |
119 | | TocEntry *ready_list); |
120 | | static void mark_create_done(ArchiveHandle *AH, TocEntry *te); |
121 | | static void inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te); |
122 | | |
123 | | static void StrictNamesCheck(RestoreOptions *ropt); |
124 | | |
125 | | |
126 | | /* |
127 | | * Allocate a new DumpOptions block containing all default values. |
128 | | */ |
129 | | DumpOptions * |
130 | | NewDumpOptions(void) |
131 | 0 | { |
132 | 0 | DumpOptions *opts = (DumpOptions *) pg_malloc(sizeof(DumpOptions)); |
133 | |
|
134 | 0 | InitDumpOptions(opts); |
135 | 0 | return opts; |
136 | 0 | } |
137 | | |
138 | | /* |
139 | | * Initialize a DumpOptions struct to all default values |
140 | | */ |
141 | | void |
142 | | InitDumpOptions(DumpOptions *opts) |
143 | 0 | { |
144 | 0 | memset(opts, 0, sizeof(DumpOptions)); |
145 | | /* set any fields that shouldn't default to zeroes */ |
146 | 0 | opts->include_everything = true; |
147 | 0 | opts->cparams.promptPassword = TRI_DEFAULT; |
148 | 0 | opts->dumpSections = DUMP_UNSECTIONED; |
149 | 0 | } |
150 | | |
151 | | /* |
152 | | * Create a freshly allocated DumpOptions with options equivalent to those |
153 | | * found in the given RestoreOptions. |
154 | | */ |
155 | | DumpOptions * |
156 | | dumpOptionsFromRestoreOptions(RestoreOptions *ropt) |
157 | 0 | { |
158 | 0 | DumpOptions *dopt = NewDumpOptions(); |
159 | | |
160 | | /* this is the inverse of what's at the end of pg_dump.c's main() */ |
161 | 0 | dopt->cparams.dbname = ropt->cparams.dbname ? pg_strdup(ropt->cparams.dbname) : NULL; |
162 | 0 | dopt->cparams.pgport = ropt->cparams.pgport ? pg_strdup(ropt->cparams.pgport) : NULL; |
163 | 0 | dopt->cparams.pghost = ropt->cparams.pghost ? pg_strdup(ropt->cparams.pghost) : NULL; |
164 | 0 | dopt->cparams.username = ropt->cparams.username ? pg_strdup(ropt->cparams.username) : NULL; |
165 | 0 | dopt->cparams.promptPassword = ropt->cparams.promptPassword; |
166 | 0 | dopt->outputClean = ropt->dropSchema; |
167 | 0 | dopt->dataOnly = ropt->dataOnly; |
168 | 0 | dopt->schemaOnly = ropt->schemaOnly; |
169 | 0 | dopt->if_exists = ropt->if_exists; |
170 | 0 | dopt->column_inserts = ropt->column_inserts; |
171 | 0 | dopt->dumpSections = ropt->dumpSections; |
172 | 0 | dopt->aclsSkip = ropt->aclsSkip; |
173 | 0 | dopt->outputSuperuser = ropt->superuser; |
174 | 0 | dopt->outputCreateDB = ropt->createDB; |
175 | 0 | dopt->outputNoOwner = ropt->noOwner; |
176 | 0 | dopt->outputNoTablespaces = ropt->noTablespace; |
177 | 0 | dopt->disable_triggers = ropt->disable_triggers; |
178 | 0 | dopt->use_setsessauth = ropt->use_setsessauth; |
179 | 0 | dopt->disable_dollar_quoting = ropt->disable_dollar_quoting; |
180 | 0 | dopt->dump_inserts = ropt->dump_inserts; |
181 | 0 | dopt->no_comments = ropt->no_comments; |
182 | 0 | dopt->no_publications = ropt->no_publications; |
183 | 0 | dopt->no_security_labels = ropt->no_security_labels; |
184 | 0 | dopt->no_subscriptions = ropt->no_subscriptions; |
185 | 0 | dopt->lockWaitTimeout = ropt->lockWaitTimeout; |
186 | 0 | dopt->include_everything = ropt->include_everything; |
187 | 0 | dopt->enable_row_security = ropt->enable_row_security; |
188 | 0 | dopt->sequence_data = ropt->sequence_data; |
189 | |
|
190 | 0 | return dopt; |
191 | 0 | } |
192 | | |
193 | | |
194 | | /* |
195 | | * Wrapper functions. |
196 | | * |
197 | | * The objective it to make writing new formats and dumpers as simple |
198 | | * as possible, if necessary at the expense of extra function calls etc. |
199 | | * |
200 | | */ |
201 | | |
202 | | /* |
203 | | * The dump worker setup needs lots of knowledge of the internals of pg_dump, |
204 | | * so It's defined in pg_dump.c and passed into OpenArchive. The restore worker |
205 | | * setup doesn't need to know anything much, so it's defined here. |
206 | | */ |
207 | | static void |
208 | | setupRestoreWorker(Archive *AHX) |
209 | 0 | { |
210 | 0 | ArchiveHandle *AH = (ArchiveHandle *) AHX; |
211 | |
|
212 | 0 | AH->ReopenPtr(AH); |
213 | 0 | } |
214 | | |
215 | | |
216 | | /* Create a new archive */ |
217 | | /* Public */ |
218 | | Archive * |
219 | | CreateArchive(const char *FileSpec, const ArchiveFormat fmt, |
220 | | const int compression, bool dosync, ArchiveMode mode, |
221 | | SetupWorkerPtrType setupDumpWorker) |
222 | | |
223 | 0 | { |
224 | 0 | ArchiveHandle *AH = _allocAH(FileSpec, fmt, compression, dosync, |
225 | 0 | mode, setupDumpWorker); |
226 | |
|
227 | 0 | return (Archive *) AH; |
228 | 0 | } |
229 | | |
230 | | /* Open an existing archive */ |
231 | | /* Public */ |
232 | | Archive * |
233 | | OpenArchive(const char *FileSpec, const ArchiveFormat fmt) |
234 | 0 | { |
235 | 0 | ArchiveHandle *AH = _allocAH(FileSpec, fmt, 0, true, archModeRead, setupRestoreWorker); |
236 | |
|
237 | 0 | return (Archive *) AH; |
238 | 0 | } |
239 | | |
240 | | /* Public */ |
241 | | void |
242 | | CloseArchive(Archive *AHX) |
243 | 0 | { |
244 | 0 | int res = 0; |
245 | 0 | ArchiveHandle *AH = (ArchiveHandle *) AHX; |
246 | |
|
247 | 0 | AH->ClosePtr(AH); |
248 | | |
249 | | /* Close the output */ |
250 | 0 | if (AH->gzOut) |
251 | 0 | res = GZCLOSE(AH->OF); |
252 | 0 | else if (AH->OF != stdout) |
253 | 0 | res = fclose(AH->OF); |
254 | |
|
255 | 0 | if (res != 0) |
256 | 0 | exit_horribly(modulename, "could not close output file: %s\n", |
257 | 0 | strerror(errno)); |
258 | 0 | } |
259 | | |
260 | | /* Public */ |
261 | | void |
262 | | SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ropt) |
263 | 0 | { |
264 | | /* Caller can omit dump options, in which case we synthesize them */ |
265 | 0 | if (dopt == NULL && ropt != NULL) |
266 | 0 | dopt = dumpOptionsFromRestoreOptions(ropt); |
267 | | |
268 | | /* Save options for later access */ |
269 | 0 | AH->dopt = dopt; |
270 | 0 | AH->ropt = ropt; |
271 | 0 | } |
272 | | |
273 | | /* Public */ |
274 | | void |
275 | | ProcessArchiveRestoreOptions(Archive *AHX) |
276 | 0 | { |
277 | 0 | ArchiveHandle *AH = (ArchiveHandle *) AHX; |
278 | 0 | RestoreOptions *ropt = AH->public.ropt; |
279 | 0 | TocEntry *te; |
280 | 0 | teSection curSection; |
281 | | |
282 | | /* Decide which TOC entries will be dumped/restored, and mark them */ |
283 | 0 | curSection = SECTION_PRE_DATA; |
284 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
285 | 0 | { |
286 | | /* |
287 | | * When writing an archive, we also take this opportunity to check |
288 | | * that we have generated the entries in a sane order that respects |
289 | | * the section divisions. When reading, don't complain, since buggy |
290 | | * old versions of pg_dump might generate out-of-order archives. |
291 | | */ |
292 | 0 | if (AH->mode != archModeRead) |
293 | 0 | { |
294 | 0 | switch (te->section) |
295 | 0 | { |
296 | 0 | case SECTION_NONE: |
297 | | /* ok to be anywhere */ |
298 | 0 | break; |
299 | 0 | case SECTION_PRE_DATA: |
300 | 0 | if (curSection != SECTION_PRE_DATA) |
301 | 0 | write_msg(modulename, |
302 | 0 | "WARNING: archive items not in correct section order\n"); |
303 | 0 | break; |
304 | 0 | case SECTION_DATA: |
305 | 0 | if (curSection == SECTION_POST_DATA) |
306 | 0 | write_msg(modulename, |
307 | 0 | "WARNING: archive items not in correct section order\n"); |
308 | 0 | break; |
309 | 0 | case SECTION_POST_DATA: |
310 | | /* ok no matter which section we were in */ |
311 | 0 | break; |
312 | 0 | default: |
313 | 0 | exit_horribly(modulename, "unexpected section code %d\n", |
314 | 0 | (int) te->section); |
315 | 0 | break; |
316 | 0 | } |
317 | 0 | } |
318 | | |
319 | 0 | if (te->section != SECTION_NONE) |
320 | 0 | curSection = te->section; |
321 | |
|
322 | 0 | te->reqs = _tocEntryRequired(te, curSection, AH); |
323 | 0 | } |
324 | | |
325 | | /* Enforce strict names checking */ |
326 | 0 | if (ropt->strict_names) |
327 | 0 | StrictNamesCheck(ropt); |
328 | 0 | } |
329 | | |
330 | | /* Public */ |
331 | | void |
332 | | RestoreArchive(Archive *AHX) |
333 | 0 | { |
334 | 0 | ArchiveHandle *AH = (ArchiveHandle *) AHX; |
335 | 0 | RestoreOptions *ropt = AH->public.ropt; |
336 | 0 | bool parallel_mode; |
337 | 0 | TocEntry *te; |
338 | 0 | OutputContext sav; |
339 | |
|
340 | 0 | AH->stage = STAGE_INITIALIZING; |
341 | | |
342 | | /* |
343 | | * Check for nonsensical option combinations. |
344 | | * |
345 | | * -C is not compatible with -1, because we can't create a database inside |
346 | | * a transaction block. |
347 | | */ |
348 | 0 | if (ropt->createDB && ropt->single_txn) |
349 | 0 | exit_horribly(modulename, "-C and -1 are incompatible options\n"); |
350 | | |
351 | | /* |
352 | | * If we're going to do parallel restore, there are some restrictions. |
353 | | */ |
354 | 0 | parallel_mode = (AH->public.numWorkers > 1 && ropt->useDB); |
355 | 0 | if (parallel_mode) |
356 | 0 | { |
357 | | /* We haven't got round to making this work for all archive formats */ |
358 | 0 | if (AH->ClonePtr == NULL || AH->ReopenPtr == NULL) |
359 | 0 | exit_horribly(modulename, "parallel restore is not supported with this archive file format\n"); |
360 | | |
361 | | /* Doesn't work if the archive represents dependencies as OIDs */ |
362 | 0 | if (AH->version < K_VERS_1_8) |
363 | 0 | exit_horribly(modulename, "parallel restore is not supported with archives made by pre-8.0 pg_dump\n"); |
364 | | |
365 | | /* |
366 | | * It's also not gonna work if we can't reopen the input file, so |
367 | | * let's try that immediately. |
368 | | */ |
369 | 0 | AH->ReopenPtr(AH); |
370 | 0 | } |
371 | | |
372 | | /* |
373 | | * Make sure we won't need (de)compression we haven't got |
374 | | */ |
375 | | #ifndef HAVE_LIBZ |
376 | | if (AH->compression != 0 && AH->PrintTocDataPtr != NULL) |
377 | | { |
378 | | for (te = AH->toc->next; te != AH->toc; te = te->next) |
379 | | { |
380 | | if (te->hadDumper && (te->reqs & REQ_DATA) != 0) |
381 | | exit_horribly(modulename, "cannot restore from compressed archive (compression not supported in this installation)\n"); |
382 | | } |
383 | | } |
384 | | #endif |
385 | | |
386 | | /* |
387 | | * Prepare index arrays, so we can assume we have them throughout restore. |
388 | | * It's possible we already did this, though. |
389 | | */ |
390 | 0 | if (AH->tocsByDumpId == NULL) |
391 | 0 | buildTocEntryArrays(AH); |
392 | | |
393 | | /* |
394 | | * If we're using a DB connection, then connect it. |
395 | | */ |
396 | 0 | if (ropt->useDB) |
397 | 0 | { |
398 | 0 | ahlog(AH, 1, "connecting to database for restore\n"); |
399 | 0 | if (AH->version < K_VERS_1_3) |
400 | 0 | exit_horribly(modulename, "direct database connections are not supported in pre-1.3 archives\n"); |
401 | | |
402 | | /* |
403 | | * We don't want to guess at whether the dump will successfully |
404 | | * restore; allow the attempt regardless of the version of the restore |
405 | | * target. |
406 | | */ |
407 | 0 | AHX->minRemoteVersion = 0; |
408 | 0 | AHX->maxRemoteVersion = 9999999; |
409 | |
|
410 | 0 | ConnectDatabase(AHX, &ropt->cparams, false); |
411 | | |
412 | | /* |
413 | | * If we're talking to the DB directly, don't send comments since they |
414 | | * obscure SQL when displaying errors |
415 | | */ |
416 | 0 | AH->noTocComments = 1; |
417 | 0 | } |
418 | | |
419 | | /* |
420 | | * Work out if we have an implied data-only restore. This can happen if |
421 | | * the dump was data only or if the user has used a toc list to exclude |
422 | | * all of the schema data. All we do is look for schema entries - if none |
423 | | * are found then we set the dataOnly flag. |
424 | | * |
425 | | * We could scan for wanted TABLE entries, but that is not the same as |
426 | | * dataOnly. At this stage, it seems unnecessary (6-Mar-2001). |
427 | | */ |
428 | 0 | if (!ropt->dataOnly) |
429 | 0 | { |
430 | 0 | int impliedDataOnly = 1; |
431 | |
|
432 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
433 | 0 | { |
434 | 0 | if ((te->reqs & REQ_SCHEMA) != 0) |
435 | 0 | { /* It's schema, and it's wanted */ |
436 | 0 | impliedDataOnly = 0; |
437 | 0 | break; |
438 | 0 | } |
439 | 0 | } |
440 | 0 | if (impliedDataOnly) |
441 | 0 | { |
442 | 0 | ropt->dataOnly = impliedDataOnly; |
443 | 0 | ahlog(AH, 1, "implied data-only restore\n"); |
444 | 0 | } |
445 | 0 | } |
446 | | |
447 | | /* |
448 | | * Setup the output file if necessary. |
449 | | */ |
450 | 0 | sav = SaveOutput(AH); |
451 | 0 | if (ropt->filename || ropt->compression) |
452 | 0 | SetOutput(AH, ropt->filename, ropt->compression); |
453 | |
|
454 | 0 | ahprintf(AH, "--\n-- YSQL database dump\n--\n\n"); |
455 | |
|
456 | 0 | if (AH->archiveRemoteVersion) |
457 | 0 | ahprintf(AH, "-- Dumped from database version %s\n", |
458 | 0 | AH->archiveRemoteVersion); |
459 | 0 | if (AH->archiveDumpVersion) |
460 | 0 | ahprintf(AH, "-- Dumped by ysql_dump version %s\n", |
461 | 0 | AH->archiveDumpVersion); |
462 | |
|
463 | 0 | ahprintf(AH, "\n"); |
464 | |
|
465 | 0 | if (AH->public.verbose) |
466 | 0 | dumpTimestamp(AH, "Started on", AH->createDate); |
467 | |
|
468 | 0 | if (ropt->single_txn) |
469 | 0 | { |
470 | 0 | if (AH->connection) |
471 | 0 | StartTransaction(AHX); |
472 | 0 | else |
473 | 0 | ahprintf(AH, "BEGIN;\n\n"); |
474 | 0 | } |
475 | | |
476 | | /* |
477 | | * Establish important parameter values right away. |
478 | | */ |
479 | 0 | _doSetFixedOutputState(AH); |
480 | |
|
481 | 0 | AH->stage = STAGE_PROCESSING; |
482 | | |
483 | | /* |
484 | | * Drop the items at the start, in reverse order |
485 | | */ |
486 | 0 | if (ropt->dropSchema) |
487 | 0 | { |
488 | 0 | for (te = AH->toc->prev; te != AH->toc; te = te->prev) |
489 | 0 | { |
490 | 0 | AH->currentTE = te; |
491 | | |
492 | | /* |
493 | | * In createDB mode, issue a DROP *only* for the database as a |
494 | | * whole. Issuing drops against anything else would be wrong, |
495 | | * because at this point we're connected to the wrong database. |
496 | | * (The DATABASE PROPERTIES entry, if any, should be treated like |
497 | | * the DATABASE entry.) |
498 | | */ |
499 | 0 | if (ropt->createDB) |
500 | 0 | { |
501 | 0 | if (strcmp(te->desc, "DATABASE") != 0 && |
502 | 0 | strcmp(te->desc, "DATABASE PROPERTIES") != 0) |
503 | 0 | continue; |
504 | 0 | } |
505 | | |
506 | | /* Otherwise, drop anything that's selected and has a dropStmt */ |
507 | 0 | if (((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0) && te->dropStmt) |
508 | 0 | { |
509 | 0 | ahlog(AH, 1, "dropping %s %s\n", te->desc, te->tag); |
510 | | /* Select owner and schema as necessary */ |
511 | 0 | _becomeOwner(AH, te); |
512 | 0 | _selectOutputSchema(AH, te->namespace); |
513 | | |
514 | | /* |
515 | | * Now emit the DROP command, if the object has one. Note we |
516 | | * don't necessarily emit it verbatim; at this point we add an |
517 | | * appropriate IF EXISTS clause, if the user requested it. |
518 | | */ |
519 | 0 | if (*te->dropStmt != '\0') |
520 | 0 | { |
521 | 0 | if (!ropt->if_exists) |
522 | 0 | { |
523 | | /* No --if-exists? Then just use the original */ |
524 | 0 | ahprintf(AH, "%s", te->dropStmt); |
525 | 0 | } |
526 | 0 | else |
527 | 0 | { |
528 | | /* |
529 | | * Inject an appropriate spelling of "if exists". For |
530 | | * large objects, we have a separate routine that |
531 | | * knows how to do it, without depending on |
532 | | * te->dropStmt; use that. For other objects we need |
533 | | * to parse the command. |
534 | | */ |
535 | 0 | if (strncmp(te->desc, "BLOB", 4) == 0) |
536 | 0 | { |
537 | 0 | DropBlobIfExists(AH, te->catalogId.oid); |
538 | 0 | } |
539 | 0 | else |
540 | 0 | { |
541 | 0 | char *dropStmt = pg_strdup(te->dropStmt); |
542 | 0 | char *dropStmtOrig = dropStmt; |
543 | 0 | PQExpBuffer ftStmt = createPQExpBuffer(); |
544 | | |
545 | | /* |
546 | | * Need to inject IF EXISTS clause after ALTER |
547 | | * TABLE part in ALTER TABLE .. DROP statement |
548 | | */ |
549 | 0 | if (strncmp(dropStmt, "ALTER TABLE", 11) == 0) |
550 | 0 | { |
551 | 0 | appendPQExpBuffer(ftStmt, |
552 | 0 | "ALTER TABLE IF EXISTS"); |
553 | 0 | dropStmt = dropStmt + 11; |
554 | 0 | } |
555 | | |
556 | | /* |
557 | | * ALTER TABLE..ALTER COLUMN..DROP DEFAULT does |
558 | | * not support the IF EXISTS clause, and therefore |
559 | | * we simply emit the original command for DEFAULT |
560 | | * objects (modulo the adjustment made above). |
561 | | * |
562 | | * Likewise, don't mess with DATABASE PROPERTIES. |
563 | | * |
564 | | * If we used CREATE OR REPLACE VIEW as a means of |
565 | | * quasi-dropping an ON SELECT rule, that should |
566 | | * be emitted unchanged as well. |
567 | | * |
568 | | * For other object types, we need to extract the |
569 | | * first part of the DROP which includes the |
570 | | * object type. Most of the time this matches |
571 | | * te->desc, so search for that; however for the |
572 | | * different kinds of CONSTRAINTs, we know to |
573 | | * search for hardcoded "DROP CONSTRAINT" instead. |
574 | | */ |
575 | 0 | if (strcmp(te->desc, "DEFAULT") == 0 || |
576 | 0 | strcmp(te->desc, "DATABASE PROPERTIES") == 0 || |
577 | 0 | strncmp(dropStmt, "CREATE OR REPLACE VIEW", 22) == 0) |
578 | 0 | appendPQExpBufferStr(ftStmt, dropStmt); |
579 | 0 | else |
580 | 0 | { |
581 | 0 | char buffer[40]; |
582 | 0 | char *mark; |
583 | |
|
584 | 0 | if (strcmp(te->desc, "CONSTRAINT") == 0 || |
585 | 0 | strcmp(te->desc, "CHECK CONSTRAINT") == 0 || |
586 | 0 | strcmp(te->desc, "FK CONSTRAINT") == 0) |
587 | 0 | strcpy(buffer, "DROP CONSTRAINT"); |
588 | 0 | else |
589 | 0 | snprintf(buffer, sizeof(buffer), "DROP %s", |
590 | 0 | te->desc); |
591 | |
|
592 | 0 | mark = strstr(dropStmt, buffer); |
593 | |
|
594 | 0 | if (mark) |
595 | 0 | { |
596 | 0 | *mark = '\0'; |
597 | 0 | appendPQExpBuffer(ftStmt, "%s%s IF EXISTS%s", |
598 | 0 | dropStmt, buffer, |
599 | 0 | mark + strlen(buffer)); |
600 | 0 | } |
601 | 0 | else |
602 | 0 | { |
603 | | /* complain and emit unmodified command */ |
604 | 0 | write_msg(modulename, |
605 | 0 | "WARNING: could not find where to insert IF EXISTS in statement \"%s\"\n", |
606 | 0 | dropStmtOrig); |
607 | 0 | appendPQExpBufferStr(ftStmt, dropStmt); |
608 | 0 | } |
609 | 0 | } |
610 | |
|
611 | 0 | ahprintf(AH, "%s", ftStmt->data); |
612 | |
|
613 | 0 | destroyPQExpBuffer(ftStmt); |
614 | 0 | pg_free(dropStmtOrig); |
615 | 0 | } |
616 | 0 | } |
617 | 0 | } |
618 | 0 | } |
619 | 0 | } |
620 | | |
621 | | /* |
622 | | * _selectOutputSchema may have set currSchema to reflect the effect |
623 | | * of a "SET search_path" command it emitted. However, by now we may |
624 | | * have dropped that schema; or it might not have existed in the first |
625 | | * place. In either case the effective value of search_path will not |
626 | | * be what we think. Forcibly reset currSchema so that we will |
627 | | * re-establish the search_path setting when needed (after creating |
628 | | * the schema). |
629 | | * |
630 | | * If we treated users as pg_dump'able objects then we'd need to reset |
631 | | * currUser here too. |
632 | | */ |
633 | 0 | if (AH->currSchema) |
634 | 0 | free(AH->currSchema); |
635 | 0 | AH->currSchema = NULL; |
636 | 0 | } |
637 | |
|
638 | 0 | if (parallel_mode) |
639 | 0 | { |
640 | | /* |
641 | | * In parallel mode, turn control over to the parallel-restore logic. |
642 | | */ |
643 | 0 | ParallelState *pstate; |
644 | 0 | TocEntry pending_list; |
645 | |
|
646 | 0 | par_list_header_init(&pending_list); |
647 | | |
648 | | /* This runs PRE_DATA items and then disconnects from the database */ |
649 | 0 | restore_toc_entries_prefork(AH, &pending_list); |
650 | 0 | Assert(AH->connection == NULL); |
651 | | |
652 | | /* ParallelBackupStart() will actually fork the processes */ |
653 | 0 | pstate = ParallelBackupStart(AH); |
654 | 0 | restore_toc_entries_parallel(AH, pstate, &pending_list); |
655 | 0 | ParallelBackupEnd(AH, pstate); |
656 | | |
657 | | /* reconnect the master and see if we missed something */ |
658 | 0 | restore_toc_entries_postfork(AH, &pending_list); |
659 | 0 | Assert(AH->connection != NULL); |
660 | 0 | } |
661 | 0 | else |
662 | 0 | { |
663 | | /* |
664 | | * In serial mode, process everything in three phases: normal items, |
665 | | * then ACLs, then matview refresh items. We might be able to skip |
666 | | * one or both extra phases in some cases, eg data-only restores. |
667 | | */ |
668 | 0 | bool haveACL = false; |
669 | 0 | bool haveRefresh = false; |
670 | |
|
671 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
672 | 0 | { |
673 | 0 | if ((te->reqs & (REQ_SCHEMA | REQ_DATA)) == 0) |
674 | 0 | continue; /* ignore if not to be dumped at all */ |
675 | | |
676 | 0 | switch (_tocEntryRestorePass(te)) |
677 | 0 | { |
678 | 0 | case RESTORE_PASS_MAIN: |
679 | 0 | (void) restore_toc_entry(AH, te, false); |
680 | 0 | break; |
681 | 0 | case RESTORE_PASS_ACL: |
682 | 0 | haveACL = true; |
683 | 0 | break; |
684 | 0 | case RESTORE_PASS_REFRESH: |
685 | 0 | haveRefresh = true; |
686 | 0 | break; |
687 | 0 | } |
688 | 0 | } |
689 | |
|
690 | 0 | if (haveACL) |
691 | 0 | { |
692 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
693 | 0 | { |
694 | 0 | if ((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0 && |
695 | 0 | _tocEntryRestorePass(te) == RESTORE_PASS_ACL) |
696 | 0 | (void) restore_toc_entry(AH, te, false); |
697 | 0 | } |
698 | 0 | } |
699 | |
|
700 | 0 | if (haveRefresh) |
701 | 0 | { |
702 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
703 | 0 | { |
704 | 0 | if ((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0 && |
705 | 0 | _tocEntryRestorePass(te) == RESTORE_PASS_REFRESH) |
706 | 0 | (void) restore_toc_entry(AH, te, false); |
707 | 0 | } |
708 | 0 | } |
709 | 0 | } |
710 | |
|
711 | 0 | if (ropt->single_txn) |
712 | 0 | { |
713 | 0 | if (AH->connection) |
714 | 0 | CommitTransaction(AHX); |
715 | 0 | else |
716 | 0 | ahprintf(AH, "COMMIT;\n\n"); |
717 | 0 | } |
718 | |
|
719 | 0 | if (AH->public.verbose) |
720 | 0 | dumpTimestamp(AH, "Completed on", time(NULL)); |
721 | |
|
722 | 0 | ahprintf(AH, "--\n-- YSQL database dump complete\n--\n\n"); |
723 | | |
724 | | /* |
725 | | * Clean up & we're done. |
726 | | */ |
727 | 0 | AH->stage = STAGE_FINALIZING; |
728 | |
|
729 | 0 | if (ropt->filename || ropt->compression) |
730 | 0 | RestoreOutput(AH, sav); |
731 | |
|
732 | 0 | if (ropt->useDB) |
733 | 0 | DisconnectDatabase(&AH->public); |
734 | 0 | } |
735 | | |
736 | | /* |
737 | | * Restore a single TOC item. Used in both parallel and non-parallel restore; |
738 | | * is_parallel is true if we are in a worker child process. |
739 | | * |
740 | | * Returns 0 normally, but WORKER_CREATE_DONE or WORKER_INHIBIT_DATA if |
741 | | * the parallel parent has to make the corresponding status update. |
742 | | */ |
743 | | static int |
744 | | restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel) |
745 | 0 | { |
746 | 0 | RestoreOptions *ropt = AH->public.ropt; |
747 | 0 | int status = WORKER_OK; |
748 | 0 | teReqs reqs; |
749 | 0 | bool defnDumped; |
750 | |
|
751 | 0 | AH->currentTE = te; |
752 | | |
753 | | /* Dump any relevant dump warnings to stderr */ |
754 | 0 | if (!ropt->suppressDumpWarnings && strcmp(te->desc, "WARNING") == 0) |
755 | 0 | { |
756 | 0 | if (!ropt->dataOnly && te->defn != NULL && strlen(te->defn) != 0) |
757 | 0 | write_msg(modulename, "warning from original dump file: %s\n", te->defn); |
758 | 0 | else if (te->copyStmt != NULL && strlen(te->copyStmt) != 0) |
759 | 0 | write_msg(modulename, "warning from original dump file: %s\n", te->copyStmt); |
760 | 0 | } |
761 | | |
762 | | /* Work out what, if anything, we want from this entry */ |
763 | 0 | reqs = te->reqs; |
764 | |
|
765 | 0 | defnDumped = false; |
766 | | |
767 | | /* |
768 | | * If it has a schema component that we want, then process that |
769 | | */ |
770 | 0 | if ((reqs & REQ_SCHEMA) != 0) |
771 | 0 | { |
772 | | /* Show namespace in log message if available */ |
773 | 0 | if (te->namespace) |
774 | 0 | ahlog(AH, 1, "creating %s \"%s.%s\"\n", |
775 | 0 | te->desc, te->namespace, te->tag); |
776 | 0 | else |
777 | 0 | ahlog(AH, 1, "creating %s \"%s\"\n", te->desc, te->tag); |
778 | |
|
779 | 0 | _printTocEntry(AH, te, false); |
780 | 0 | defnDumped = true; |
781 | |
|
782 | 0 | if (strcmp(te->desc, "TABLE") == 0) |
783 | 0 | { |
784 | 0 | if (AH->lastErrorTE == te) |
785 | 0 | { |
786 | | /* |
787 | | * We failed to create the table. If |
788 | | * --no-data-for-failed-tables was given, mark the |
789 | | * corresponding TABLE DATA to be ignored. |
790 | | * |
791 | | * In the parallel case this must be done in the parent, so we |
792 | | * just set the return value. |
793 | | */ |
794 | 0 | if (ropt->noDataForFailedTables) |
795 | 0 | { |
796 | 0 | if (is_parallel) |
797 | 0 | status = WORKER_INHIBIT_DATA; |
798 | 0 | else |
799 | 0 | inhibit_data_for_failed_table(AH, te); |
800 | 0 | } |
801 | 0 | } |
802 | 0 | else |
803 | 0 | { |
804 | | /* |
805 | | * We created the table successfully. Mark the corresponding |
806 | | * TABLE DATA for possible truncation. |
807 | | * |
808 | | * In the parallel case this must be done in the parent, so we |
809 | | * just set the return value. |
810 | | */ |
811 | 0 | if (is_parallel) |
812 | 0 | status = WORKER_CREATE_DONE; |
813 | 0 | else |
814 | 0 | mark_create_done(AH, te); |
815 | 0 | } |
816 | 0 | } |
817 | | |
818 | | /* |
819 | | * If we created a DB, connect to it. Also, if we changed DB |
820 | | * properties, reconnect to ensure that relevant GUC settings are |
821 | | * applied to our session. |
822 | | */ |
823 | 0 | if (strcmp(te->desc, "DATABASE") == 0 || |
824 | 0 | strcmp(te->desc, "DATABASE PROPERTIES") == 0) |
825 | 0 | { |
826 | 0 | ahlog(AH, 1, "connecting to new database \"%s\"\n", te->tag); |
827 | 0 | _reconnectToDB(AH, te->tag); |
828 | 0 | } |
829 | 0 | } |
830 | | |
831 | | /* |
832 | | * If it has a data component that we want, then process that |
833 | | */ |
834 | 0 | if ((reqs & REQ_DATA) != 0) |
835 | 0 | { |
836 | | /* |
837 | | * hadDumper will be set if there is genuine data component for this |
838 | | * node. Otherwise, we need to check the defn field for statements |
839 | | * that need to be executed in data-only restores. |
840 | | */ |
841 | 0 | if (te->hadDumper) |
842 | 0 | { |
843 | | /* |
844 | | * If we can output the data, then restore it. |
845 | | */ |
846 | 0 | if (AH->PrintTocDataPtr != NULL) |
847 | 0 | { |
848 | 0 | _printTocEntry(AH, te, true); |
849 | |
|
850 | 0 | if (strcmp(te->desc, "BLOBS") == 0 || |
851 | 0 | strcmp(te->desc, "BLOB COMMENTS") == 0) |
852 | 0 | { |
853 | 0 | ahlog(AH, 1, "processing %s\n", te->desc); |
854 | |
|
855 | 0 | _selectOutputSchema(AH, "pg_catalog"); |
856 | | |
857 | | /* Send BLOB COMMENTS data to ExecuteSimpleCommands() */ |
858 | 0 | if (strcmp(te->desc, "BLOB COMMENTS") == 0) |
859 | 0 | AH->outputKind = OUTPUT_OTHERDATA; |
860 | |
|
861 | 0 | AH->PrintTocDataPtr(AH, te); |
862 | |
|
863 | 0 | AH->outputKind = OUTPUT_SQLCMDS; |
864 | 0 | } |
865 | 0 | else |
866 | 0 | { |
867 | 0 | _disableTriggersIfNecessary(AH, te); |
868 | | |
869 | | /* Select owner and schema as necessary */ |
870 | 0 | _becomeOwner(AH, te); |
871 | 0 | _selectOutputSchema(AH, te->namespace); |
872 | |
|
873 | 0 | ahlog(AH, 1, "processing data for table \"%s.%s\"\n", |
874 | 0 | te->namespace, te->tag); |
875 | | |
876 | | /* |
877 | | * In parallel restore, if we created the table earlier in |
878 | | * the run then we wrap the COPY in a transaction and |
879 | | * precede it with a TRUNCATE. If archiving is not on |
880 | | * this prevents WAL-logging the COPY. This obtains a |
881 | | * speedup similar to that from using single_txn mode in |
882 | | * non-parallel restores. |
883 | | */ |
884 | 0 | if (is_parallel && te->created) |
885 | 0 | { |
886 | | /* |
887 | | * Parallel restore is always talking directly to a |
888 | | * server, so no need to see if we should issue BEGIN. |
889 | | */ |
890 | 0 | StartTransaction(&AH->public); |
891 | | |
892 | | /* |
893 | | * If the server version is >= 8.4, make sure we issue |
894 | | * TRUNCATE with ONLY so that child tables are not |
895 | | * wiped. |
896 | | */ |
897 | 0 | ahprintf(AH, "TRUNCATE TABLE %s%s;\n\n", |
898 | 0 | (PQserverVersion(AH->connection) >= 80400 ? |
899 | 0 | "ONLY " : ""), |
900 | 0 | fmtQualifiedId(te->namespace, te->tag)); |
901 | 0 | } |
902 | | |
903 | | /* |
904 | | * If we have a copy statement, use it. |
905 | | */ |
906 | 0 | if (te->copyStmt && strlen(te->copyStmt) > 0) |
907 | 0 | { |
908 | 0 | ahprintf(AH, "%s", te->copyStmt); |
909 | 0 | AH->outputKind = OUTPUT_COPYDATA; |
910 | 0 | } |
911 | 0 | else |
912 | 0 | AH->outputKind = OUTPUT_OTHERDATA; |
913 | |
|
914 | 0 | AH->PrintTocDataPtr(AH, te); |
915 | | |
916 | | /* |
917 | | * Terminate COPY if needed. |
918 | | */ |
919 | 0 | if (AH->outputKind == OUTPUT_COPYDATA && |
920 | 0 | RestoringToDB(AH)) |
921 | 0 | EndDBCopyMode(&AH->public, te->tag); |
922 | 0 | AH->outputKind = OUTPUT_SQLCMDS; |
923 | | |
924 | | /* close out the transaction started above */ |
925 | 0 | if (is_parallel && te->created) |
926 | 0 | CommitTransaction(&AH->public); |
927 | |
|
928 | 0 | _enableTriggersIfNecessary(AH, te); |
929 | 0 | } |
930 | 0 | } |
931 | 0 | } |
932 | 0 | else if (!defnDumped) |
933 | 0 | { |
934 | | /* If we haven't already dumped the defn part, do so now */ |
935 | 0 | ahlog(AH, 1, "executing %s %s\n", te->desc, te->tag); |
936 | 0 | _printTocEntry(AH, te, false); |
937 | 0 | } |
938 | 0 | } |
939 | |
|
940 | 0 | if (AH->public.n_errors > 0 && status == WORKER_OK) |
941 | 0 | status = WORKER_IGNORED_ERRORS; |
942 | |
|
943 | 0 | return status; |
944 | 0 | } |
945 | | |
946 | | /* |
947 | | * Allocate a new RestoreOptions block. |
948 | | * This is mainly so we can initialize it, but also for future expansion, |
949 | | */ |
950 | | RestoreOptions * |
951 | | NewRestoreOptions(void) |
952 | 0 | { |
953 | 0 | RestoreOptions *opts; |
954 | |
|
955 | 0 | opts = (RestoreOptions *) pg_malloc0(sizeof(RestoreOptions)); |
956 | | |
957 | | /* set any fields that shouldn't default to zeroes */ |
958 | 0 | opts->format = archUnknown; |
959 | 0 | opts->cparams.promptPassword = TRI_DEFAULT; |
960 | 0 | opts->dumpSections = DUMP_UNSECTIONED; |
961 | |
|
962 | 0 | return opts; |
963 | 0 | } |
964 | | |
965 | | static void |
966 | | _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te) |
967 | 0 | { |
968 | 0 | RestoreOptions *ropt = AH->public.ropt; |
969 | | |
970 | | /* This hack is only needed in a data-only restore */ |
971 | 0 | if (!ropt->dataOnly || !ropt->disable_triggers) |
972 | 0 | return; |
973 | | |
974 | 0 | ahlog(AH, 1, "disabling triggers for %s\n", te->tag); |
975 | | |
976 | | /* |
977 | | * Become superuser if possible, since they are the only ones who can |
978 | | * disable constraint triggers. If -S was not given, assume the initial |
979 | | * user identity is a superuser. (XXX would it be better to become the |
980 | | * table owner?) |
981 | | */ |
982 | 0 | _becomeUser(AH, ropt->superuser); |
983 | | |
984 | | /* |
985 | | * Disable them. |
986 | | */ |
987 | 0 | ahprintf(AH, "ALTER TABLE %s DISABLE TRIGGER ALL;\n\n", |
988 | 0 | fmtQualifiedId(te->namespace, te->tag)); |
989 | 0 | } |
990 | | |
991 | | static void |
992 | | _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te) |
993 | 0 | { |
994 | 0 | RestoreOptions *ropt = AH->public.ropt; |
995 | | |
996 | | /* This hack is only needed in a data-only restore */ |
997 | 0 | if (!ropt->dataOnly || !ropt->disable_triggers) |
998 | 0 | return; |
999 | | |
1000 | 0 | ahlog(AH, 1, "enabling triggers for %s\n", te->tag); |
1001 | | |
1002 | | /* |
1003 | | * Become superuser if possible, since they are the only ones who can |
1004 | | * disable constraint triggers. If -S was not given, assume the initial |
1005 | | * user identity is a superuser. (XXX would it be better to become the |
1006 | | * table owner?) |
1007 | | */ |
1008 | 0 | _becomeUser(AH, ropt->superuser); |
1009 | | |
1010 | | /* |
1011 | | * Enable them. |
1012 | | */ |
1013 | 0 | ahprintf(AH, "ALTER TABLE %s ENABLE TRIGGER ALL;\n\n", |
1014 | 0 | fmtQualifiedId(te->namespace, te->tag)); |
1015 | 0 | } |
1016 | | |
1017 | | /* |
1018 | | * This is a routine that is part of the dumper interface, hence the 'Archive*' parameter. |
1019 | | */ |
1020 | | |
1021 | | /* Public */ |
1022 | | void |
1023 | | WriteData(Archive *AHX, const void *data, size_t dLen) |
1024 | 0 | { |
1025 | 0 | ArchiveHandle *AH = (ArchiveHandle *) AHX; |
1026 | |
|
1027 | 0 | if (!AH->currToc) |
1028 | 0 | exit_horribly(modulename, "internal error -- WriteData cannot be called outside the context of a DataDumper routine\n"); |
1029 | | |
1030 | 0 | AH->WriteDataPtr(AH, data, dLen); |
1031 | |
|
1032 | 0 | return; |
1033 | 0 | } |
1034 | | |
1035 | | /* |
1036 | | * Create a new TOC entry. The TOC was designed as a TOC, but is now the |
1037 | | * repository for all metadata. But the name has stuck. |
1038 | | */ |
1039 | | |
1040 | | /* Public */ |
1041 | | void |
1042 | | ArchiveEntry(Archive *AHX, |
1043 | | CatalogId catalogId, DumpId dumpId, |
1044 | | const char *tag, |
1045 | | const char *namespace, |
1046 | | const char *tablespace, |
1047 | | const char *owner, bool withOids, |
1048 | | const char *desc, teSection section, |
1049 | | const char *defn, |
1050 | | const char *dropStmt, const char *copyStmt, |
1051 | | const DumpId *deps, int nDeps, |
1052 | | DataDumperPtr dumpFn, void *dumpArg) |
1053 | 0 | { |
1054 | 0 | ArchiveHandle *AH = (ArchiveHandle *) AHX; |
1055 | 0 | TocEntry *newToc; |
1056 | |
|
1057 | 0 | newToc = (TocEntry *) pg_malloc0(sizeof(TocEntry)); |
1058 | |
|
1059 | 0 | AH->tocCount++; |
1060 | 0 | if (dumpId > AH->maxDumpId) |
1061 | 0 | AH->maxDumpId = dumpId; |
1062 | |
|
1063 | 0 | newToc->prev = AH->toc->prev; |
1064 | 0 | newToc->next = AH->toc; |
1065 | 0 | AH->toc->prev->next = newToc; |
1066 | 0 | AH->toc->prev = newToc; |
1067 | |
|
1068 | 0 | newToc->catalogId = catalogId; |
1069 | 0 | newToc->dumpId = dumpId; |
1070 | 0 | newToc->section = section; |
1071 | |
|
1072 | 0 | newToc->tag = pg_strdup(tag); |
1073 | 0 | newToc->namespace = namespace ? pg_strdup(namespace) : NULL; |
1074 | 0 | newToc->tablespace = tablespace ? pg_strdup(tablespace) : NULL; |
1075 | 0 | newToc->owner = pg_strdup(owner); |
1076 | 0 | newToc->withOids = withOids; |
1077 | 0 | newToc->desc = pg_strdup(desc); |
1078 | 0 | newToc->defn = pg_strdup(defn); |
1079 | 0 | newToc->dropStmt = pg_strdup(dropStmt); |
1080 | 0 | newToc->copyStmt = copyStmt ? pg_strdup(copyStmt) : NULL; |
1081 | |
|
1082 | 0 | if (nDeps > 0) |
1083 | 0 | { |
1084 | 0 | newToc->dependencies = (DumpId *) pg_malloc(nDeps * sizeof(DumpId)); |
1085 | 0 | memcpy(newToc->dependencies, deps, nDeps * sizeof(DumpId)); |
1086 | 0 | newToc->nDeps = nDeps; |
1087 | 0 | } |
1088 | 0 | else |
1089 | 0 | { |
1090 | 0 | newToc->dependencies = NULL; |
1091 | 0 | newToc->nDeps = 0; |
1092 | 0 | } |
1093 | |
|
1094 | 0 | newToc->dataDumper = dumpFn; |
1095 | 0 | newToc->dataDumperArg = dumpArg; |
1096 | 0 | newToc->hadDumper = dumpFn ? true : false; |
1097 | |
|
1098 | 0 | newToc->formatData = NULL; |
1099 | |
|
1100 | 0 | if (AH->ArchiveEntryPtr != NULL) |
1101 | 0 | AH->ArchiveEntryPtr(AH, newToc); |
1102 | 0 | } |
1103 | | |
1104 | | /* Public */ |
1105 | | void |
1106 | | PrintTOCSummary(Archive *AHX) |
1107 | 0 | { |
1108 | 0 | ArchiveHandle *AH = (ArchiveHandle *) AHX; |
1109 | 0 | RestoreOptions *ropt = AH->public.ropt; |
1110 | 0 | TocEntry *te; |
1111 | 0 | teSection curSection; |
1112 | 0 | OutputContext sav; |
1113 | 0 | const char *fmtName; |
1114 | 0 | char stamp_str[64]; |
1115 | |
|
1116 | 0 | sav = SaveOutput(AH); |
1117 | 0 | if (ropt->filename) |
1118 | 0 | SetOutput(AH, ropt->filename, 0 /* no compression */ ); |
1119 | |
|
1120 | 0 | if (strftime(stamp_str, sizeof(stamp_str), PGDUMP_STRFTIME_FMT, |
1121 | 0 | localtime(&AH->createDate)) == 0) |
1122 | 0 | strcpy(stamp_str, "[unknown]"); |
1123 | |
|
1124 | 0 | ahprintf(AH, ";\n; Archive created at %s\n", stamp_str); |
1125 | 0 | ahprintf(AH, "; dbname: %s\n; TOC Entries: %d\n; Compression: %d\n", |
1126 | 0 | replace_line_endings(AH->archdbname), |
1127 | 0 | AH->tocCount, AH->compression); |
1128 | |
|
1129 | 0 | switch (AH->format) |
1130 | 0 | { |
1131 | 0 | case archCustom: |
1132 | 0 | fmtName = "CUSTOM"; |
1133 | 0 | break; |
1134 | 0 | case archDirectory: |
1135 | 0 | fmtName = "DIRECTORY"; |
1136 | 0 | break; |
1137 | 0 | case archTar: |
1138 | 0 | fmtName = "TAR"; |
1139 | 0 | break; |
1140 | 0 | default: |
1141 | 0 | fmtName = "UNKNOWN"; |
1142 | 0 | } |
1143 | |
|
1144 | 0 | ahprintf(AH, "; Dump Version: %d.%d-%d\n", |
1145 | 0 | ARCHIVE_MAJOR(AH->version), ARCHIVE_MINOR(AH->version), ARCHIVE_REV(AH->version)); |
1146 | 0 | ahprintf(AH, "; Format: %s\n", fmtName); |
1147 | 0 | ahprintf(AH, "; Integer: %d bytes\n", (int) AH->intSize); |
1148 | 0 | ahprintf(AH, "; Offset: %d bytes\n", (int) AH->offSize); |
1149 | 0 | if (AH->archiveRemoteVersion) |
1150 | 0 | ahprintf(AH, "; Dumped from database version: %s\n", |
1151 | 0 | AH->archiveRemoteVersion); |
1152 | 0 | if (AH->archiveDumpVersion) |
1153 | 0 | ahprintf(AH, "; Dumped by ysql_dump version: %s\n", |
1154 | 0 | AH->archiveDumpVersion); |
1155 | |
|
1156 | 0 | ahprintf(AH, ";\n;\n; Selected TOC Entries:\n;\n"); |
1157 | |
|
1158 | 0 | curSection = SECTION_PRE_DATA; |
1159 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
1160 | 0 | { |
1161 | 0 | if (te->section != SECTION_NONE) |
1162 | 0 | curSection = te->section; |
1163 | 0 | if (ropt->verbose || |
1164 | 0 | (_tocEntryRequired(te, curSection, AH) & (REQ_SCHEMA | REQ_DATA)) != 0) |
1165 | 0 | { |
1166 | 0 | char *sanitized_name; |
1167 | 0 | char *sanitized_schema; |
1168 | 0 | char *sanitized_owner; |
1169 | | |
1170 | | /* |
1171 | | * As in _printTocEntry(), sanitize strings that might contain |
1172 | | * newlines, to ensure that each logical output line is in fact |
1173 | | * one physical output line. This prevents confusion when the |
1174 | | * file is read by "pg_restore -L". Note that we currently don't |
1175 | | * bother to quote names, meaning that the name fields aren't |
1176 | | * automatically parseable. "pg_restore -L" doesn't care because |
1177 | | * it only examines the dumpId field, but someday we might want to |
1178 | | * try harder. |
1179 | | */ |
1180 | 0 | sanitized_name = replace_line_endings(te->tag); |
1181 | 0 | if (te->namespace) |
1182 | 0 | sanitized_schema = replace_line_endings(te->namespace); |
1183 | 0 | else |
1184 | 0 | sanitized_schema = pg_strdup("-"); |
1185 | 0 | sanitized_owner = replace_line_endings(te->owner); |
1186 | |
|
1187 | 0 | ahprintf(AH, "%d; %u %u %s %s %s %s\n", te->dumpId, |
1188 | 0 | te->catalogId.tableoid, te->catalogId.oid, |
1189 | 0 | te->desc, sanitized_schema, sanitized_name, |
1190 | 0 | sanitized_owner); |
1191 | |
|
1192 | 0 | free(sanitized_name); |
1193 | 0 | free(sanitized_schema); |
1194 | 0 | free(sanitized_owner); |
1195 | 0 | } |
1196 | 0 | if (ropt->verbose && te->nDeps > 0) |
1197 | 0 | { |
1198 | 0 | int i; |
1199 | |
|
1200 | 0 | ahprintf(AH, ";\tdepends on:"); |
1201 | 0 | for (i = 0; i < te->nDeps; i++) |
1202 | 0 | ahprintf(AH, " %d", te->dependencies[i]); |
1203 | 0 | ahprintf(AH, "\n"); |
1204 | 0 | } |
1205 | 0 | } |
1206 | | |
1207 | | /* Enforce strict names checking */ |
1208 | 0 | if (ropt->strict_names) |
1209 | 0 | StrictNamesCheck(ropt); |
1210 | |
|
1211 | 0 | if (ropt->filename) |
1212 | 0 | RestoreOutput(AH, sav); |
1213 | 0 | } |
1214 | | |
1215 | | /*********** |
1216 | | * BLOB Archival |
1217 | | ***********/ |
1218 | | |
1219 | | /* Called by a dumper to signal start of a BLOB */ |
1220 | | int |
1221 | | StartBlob(Archive *AHX, Oid oid) |
1222 | 0 | { |
1223 | 0 | ArchiveHandle *AH = (ArchiveHandle *) AHX; |
1224 | |
|
1225 | 0 | if (!AH->StartBlobPtr) |
1226 | 0 | exit_horribly(modulename, "large-object output not supported in chosen format\n"); |
1227 | | |
1228 | 0 | AH->StartBlobPtr(AH, AH->currToc, oid); |
1229 | |
|
1230 | 0 | return 1; |
1231 | 0 | } |
1232 | | |
1233 | | /* Called by a dumper to signal end of a BLOB */ |
1234 | | int |
1235 | | EndBlob(Archive *AHX, Oid oid) |
1236 | 0 | { |
1237 | 0 | ArchiveHandle *AH = (ArchiveHandle *) AHX; |
1238 | |
|
1239 | 0 | if (AH->EndBlobPtr) |
1240 | 0 | AH->EndBlobPtr(AH, AH->currToc, oid); |
1241 | |
|
1242 | 0 | return 1; |
1243 | 0 | } |
1244 | | |
1245 | | /********** |
1246 | | * BLOB Restoration |
1247 | | **********/ |
1248 | | |
1249 | | /* |
1250 | | * Called by a format handler before any blobs are restored |
1251 | | */ |
1252 | | void |
1253 | | StartRestoreBlobs(ArchiveHandle *AH) |
1254 | 0 | { |
1255 | 0 | RestoreOptions *ropt = AH->public.ropt; |
1256 | |
|
1257 | 0 | if (!ropt->single_txn) |
1258 | 0 | { |
1259 | 0 | if (AH->connection) |
1260 | 0 | StartTransaction(&AH->public); |
1261 | 0 | else |
1262 | 0 | ahprintf(AH, "BEGIN;\n\n"); |
1263 | 0 | } |
1264 | |
|
1265 | 0 | AH->blobCount = 0; |
1266 | 0 | } |
1267 | | |
1268 | | /* |
1269 | | * Called by a format handler after all blobs are restored |
1270 | | */ |
1271 | | void |
1272 | | EndRestoreBlobs(ArchiveHandle *AH) |
1273 | 0 | { |
1274 | 0 | RestoreOptions *ropt = AH->public.ropt; |
1275 | |
|
1276 | 0 | if (!ropt->single_txn) |
1277 | 0 | { |
1278 | 0 | if (AH->connection) |
1279 | 0 | CommitTransaction(&AH->public); |
1280 | 0 | else |
1281 | 0 | ahprintf(AH, "COMMIT;\n\n"); |
1282 | 0 | } |
1283 | |
|
1284 | 0 | ahlog(AH, 1, ngettext("restored %d large object\n", |
1285 | 0 | "restored %d large objects\n", |
1286 | 0 | AH->blobCount), |
1287 | 0 | AH->blobCount); |
1288 | 0 | } |
1289 | | |
1290 | | |
1291 | | /* |
1292 | | * Called by a format handler to initiate restoration of a blob |
1293 | | */ |
1294 | | void |
1295 | | StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop) |
1296 | 0 | { |
1297 | 0 | bool old_blob_style = (AH->version < K_VERS_1_12); |
1298 | 0 | Oid loOid; |
1299 | |
|
1300 | 0 | AH->blobCount++; |
1301 | | |
1302 | | /* Initialize the LO Buffer */ |
1303 | 0 | AH->lo_buf_used = 0; |
1304 | |
|
1305 | 0 | ahlog(AH, 1, "restoring large object with OID %u\n", oid); |
1306 | | |
1307 | | /* With an old archive we must do drop and create logic here */ |
1308 | 0 | if (old_blob_style && drop) |
1309 | 0 | DropBlobIfExists(AH, oid); |
1310 | |
|
1311 | 0 | if (AH->connection) |
1312 | 0 | { |
1313 | 0 | if (old_blob_style) |
1314 | 0 | { |
1315 | 0 | loOid = lo_create(AH->connection, oid); |
1316 | 0 | if (loOid == 0 || loOid != oid) |
1317 | 0 | exit_horribly(modulename, "could not create large object %u: %s", |
1318 | 0 | oid, PQerrorMessage(AH->connection)); |
1319 | 0 | } |
1320 | 0 | AH->loFd = lo_open(AH->connection, oid, INV_WRITE); |
1321 | 0 | if (AH->loFd == -1) |
1322 | 0 | exit_horribly(modulename, "could not open large object %u: %s", |
1323 | 0 | oid, PQerrorMessage(AH->connection)); |
1324 | 0 | } |
1325 | 0 | else |
1326 | 0 | { |
1327 | 0 | if (old_blob_style) |
1328 | 0 | ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n", |
1329 | 0 | oid, INV_WRITE); |
1330 | 0 | else |
1331 | 0 | ahprintf(AH, "SELECT pg_catalog.lo_open('%u', %d);\n", |
1332 | 0 | oid, INV_WRITE); |
1333 | 0 | } |
1334 | |
|
1335 | 0 | AH->writingBlob = 1; |
1336 | 0 | } |
1337 | | |
1338 | | void |
1339 | | EndRestoreBlob(ArchiveHandle *AH, Oid oid) |
1340 | 0 | { |
1341 | 0 | if (AH->lo_buf_used > 0) |
1342 | 0 | { |
1343 | | /* Write remaining bytes from the LO buffer */ |
1344 | 0 | dump_lo_buf(AH); |
1345 | 0 | } |
1346 | |
|
1347 | 0 | AH->writingBlob = 0; |
1348 | |
|
1349 | 0 | if (AH->connection) |
1350 | 0 | { |
1351 | 0 | lo_close(AH->connection, AH->loFd); |
1352 | 0 | AH->loFd = -1; |
1353 | 0 | } |
1354 | 0 | else |
1355 | 0 | { |
1356 | 0 | ahprintf(AH, "SELECT pg_catalog.lo_close(0);\n\n"); |
1357 | 0 | } |
1358 | 0 | } |
1359 | | |
1360 | | /*********** |
1361 | | * Sorting and Reordering |
1362 | | ***********/ |
1363 | | |
1364 | | void |
1365 | | SortTocFromFile(Archive *AHX) |
1366 | 0 | { |
1367 | 0 | ArchiveHandle *AH = (ArchiveHandle *) AHX; |
1368 | 0 | RestoreOptions *ropt = AH->public.ropt; |
1369 | 0 | FILE *fh; |
1370 | 0 | char buf[100]; |
1371 | 0 | bool incomplete_line; |
1372 | | |
1373 | | /* Allocate space for the 'wanted' array, and init it */ |
1374 | 0 | ropt->idWanted = (bool *) pg_malloc(sizeof(bool) * AH->maxDumpId); |
1375 | 0 | memset(ropt->idWanted, 0, sizeof(bool) * AH->maxDumpId); |
1376 | | |
1377 | | /* Setup the file */ |
1378 | 0 | fh = fopen(ropt->tocFile, PG_BINARY_R); |
1379 | 0 | if (!fh) |
1380 | 0 | exit_horribly(modulename, "could not open TOC file \"%s\": %s\n", |
1381 | 0 | ropt->tocFile, strerror(errno)); |
1382 | | |
1383 | 0 | incomplete_line = false; |
1384 | 0 | while (fgets(buf, sizeof(buf), fh) != NULL) |
1385 | 0 | { |
1386 | 0 | bool prev_incomplete_line = incomplete_line; |
1387 | 0 | int buflen; |
1388 | 0 | char *cmnt; |
1389 | 0 | char *endptr; |
1390 | 0 | DumpId id; |
1391 | 0 | TocEntry *te; |
1392 | | |
1393 | | /* |
1394 | | * Some lines in the file might be longer than sizeof(buf). This is |
1395 | | * no problem, since we only care about the leading numeric ID which |
1396 | | * can be at most a few characters; but we have to skip continuation |
1397 | | * bufferloads when processing a long line. |
1398 | | */ |
1399 | 0 | buflen = strlen(buf); |
1400 | 0 | if (buflen > 0 && buf[buflen - 1] == '\n') |
1401 | 0 | incomplete_line = false; |
1402 | 0 | else |
1403 | 0 | incomplete_line = true; |
1404 | 0 | if (prev_incomplete_line) |
1405 | 0 | continue; |
1406 | | |
1407 | | /* Truncate line at comment, if any */ |
1408 | 0 | cmnt = strchr(buf, ';'); |
1409 | 0 | if (cmnt != NULL) |
1410 | 0 | cmnt[0] = '\0'; |
1411 | | |
1412 | | /* Ignore if all blank */ |
1413 | 0 | if (strspn(buf, " \t\r\n") == strlen(buf)) |
1414 | 0 | continue; |
1415 | | |
1416 | | /* Get an ID, check it's valid and not already seen */ |
1417 | 0 | id = strtol(buf, &endptr, 10); |
1418 | 0 | if (endptr == buf || id <= 0 || id > AH->maxDumpId || |
1419 | 0 | ropt->idWanted[id - 1]) |
1420 | 0 | { |
1421 | 0 | write_msg(modulename, "WARNING: line ignored: %s\n", buf); |
1422 | 0 | continue; |
1423 | 0 | } |
1424 | | |
1425 | | /* Find TOC entry */ |
1426 | 0 | te = getTocEntryByDumpId(AH, id); |
1427 | 0 | if (!te) |
1428 | 0 | exit_horribly(modulename, "could not find entry for ID %d\n", |
1429 | 0 | id); |
1430 | | |
1431 | | /* Mark it wanted */ |
1432 | 0 | ropt->idWanted[id - 1] = true; |
1433 | | |
1434 | | /* |
1435 | | * Move each item to the end of the list as it is selected, so that |
1436 | | * they are placed in the desired order. Any unwanted items will end |
1437 | | * up at the front of the list, which may seem unintuitive but it's |
1438 | | * what we need. In an ordinary serial restore that makes no |
1439 | | * difference, but in a parallel restore we need to mark unrestored |
1440 | | * items' dependencies as satisfied before we start examining |
1441 | | * restorable items. Otherwise they could have surprising |
1442 | | * side-effects on the order in which restorable items actually get |
1443 | | * restored. |
1444 | | */ |
1445 | 0 | _moveBefore(AH, AH->toc, te); |
1446 | 0 | } |
1447 | |
|
1448 | 0 | if (fclose(fh) != 0) |
1449 | 0 | exit_horribly(modulename, "could not close TOC file: %s\n", |
1450 | 0 | strerror(errno)); |
1451 | 0 | } |
1452 | | |
1453 | | /********************** |
1454 | | * 'Convenience functions that look like standard IO functions |
1455 | | * for writing data when in dump mode. |
1456 | | **********************/ |
1457 | | |
1458 | | /* Public */ |
1459 | | void |
1460 | | archputs(const char *s, Archive *AH) |
1461 | 0 | { |
1462 | 0 | WriteData(AH, s, strlen(s)); |
1463 | 0 | return; |
1464 | 0 | } |
1465 | | |
1466 | | /* Public */ |
1467 | | int |
1468 | | archprintf(Archive *AH, const char *fmt,...) |
1469 | 0 | { |
1470 | 0 | char *p; |
1471 | 0 | size_t len = 128; /* initial assumption about buffer size */ |
1472 | 0 | size_t cnt; |
1473 | |
|
1474 | 0 | for (;;) |
1475 | 0 | { |
1476 | 0 | va_list args; |
1477 | | |
1478 | | /* Allocate work buffer. */ |
1479 | 0 | p = (char *) pg_malloc(len); |
1480 | | |
1481 | | /* Try to format the data. */ |
1482 | 0 | va_start(args, fmt); |
1483 | 0 | cnt = pvsnprintf(p, len, fmt, args); |
1484 | 0 | va_end(args); |
1485 | |
|
1486 | 0 | if (cnt < len) |
1487 | 0 | break; /* success */ |
1488 | | |
1489 | | /* Release buffer and loop around to try again with larger len. */ |
1490 | 0 | free(p); |
1491 | 0 | len = cnt; |
1492 | 0 | } |
1493 | |
|
1494 | 0 | WriteData(AH, p, cnt); |
1495 | 0 | free(p); |
1496 | 0 | return (int) cnt; |
1497 | 0 | } |
1498 | | |
1499 | | |
1500 | | /******************************* |
1501 | | * Stuff below here should be 'private' to the archiver routines |
1502 | | *******************************/ |
1503 | | |
1504 | | static void |
1505 | | SetOutput(ArchiveHandle *AH, const char *filename, int compression) |
1506 | 0 | { |
1507 | 0 | int fn; |
1508 | |
|
1509 | 0 | if (filename) |
1510 | 0 | fn = -1; |
1511 | 0 | else if (AH->FH) |
1512 | 0 | fn = fileno(AH->FH); |
1513 | 0 | else if (AH->fSpec) |
1514 | 0 | { |
1515 | 0 | fn = -1; |
1516 | 0 | filename = AH->fSpec; |
1517 | 0 | } |
1518 | 0 | else |
1519 | 0 | fn = fileno(stdout); |
1520 | | |
1521 | | /* If compression explicitly requested, use gzopen */ |
1522 | 0 | #ifdef HAVE_LIBZ |
1523 | 0 | if (compression != 0) |
1524 | 0 | { |
1525 | 0 | char fmode[14]; |
1526 | | |
1527 | | /* Don't use PG_BINARY_x since this is zlib */ |
1528 | 0 | sprintf(fmode, "wb%d", compression); |
1529 | 0 | if (fn >= 0) |
1530 | 0 | AH->OF = gzdopen(dup(fn), fmode); |
1531 | 0 | else |
1532 | 0 | AH->OF = gzopen(filename, fmode); |
1533 | 0 | AH->gzOut = 1; |
1534 | 0 | } |
1535 | 0 | else |
1536 | 0 | #endif |
1537 | 0 | { /* Use fopen */ |
1538 | 0 | if (AH->mode == archModeAppend) |
1539 | 0 | { |
1540 | 0 | if (fn >= 0) |
1541 | 0 | AH->OF = fdopen(dup(fn), PG_BINARY_A); |
1542 | 0 | else |
1543 | 0 | AH->OF = fopen(filename, PG_BINARY_A); |
1544 | 0 | } |
1545 | 0 | else |
1546 | 0 | { |
1547 | 0 | if (fn >= 0) |
1548 | 0 | AH->OF = fdopen(dup(fn), PG_BINARY_W); |
1549 | 0 | else |
1550 | 0 | AH->OF = fopen(filename, PG_BINARY_W); |
1551 | 0 | } |
1552 | 0 | AH->gzOut = 0; |
1553 | 0 | } |
1554 | |
|
1555 | 0 | if (!AH->OF) |
1556 | 0 | { |
1557 | 0 | if (filename) |
1558 | 0 | exit_horribly(modulename, "could not open output file \"%s\": %s\n", |
1559 | 0 | filename, strerror(errno)); |
1560 | 0 | else |
1561 | 0 | exit_horribly(modulename, "could not open output file: %s\n", |
1562 | 0 | strerror(errno)); |
1563 | 0 | } |
1564 | 0 | } |
1565 | | |
1566 | | static OutputContext |
1567 | | SaveOutput(ArchiveHandle *AH) |
1568 | 0 | { |
1569 | 0 | OutputContext sav; |
1570 | |
|
1571 | 0 | sav.OF = AH->OF; |
1572 | 0 | sav.gzOut = AH->gzOut; |
1573 | |
|
1574 | 0 | return sav; |
1575 | 0 | } |
1576 | | |
1577 | | static void |
1578 | | RestoreOutput(ArchiveHandle *AH, OutputContext savedContext) |
1579 | 0 | { |
1580 | 0 | int res; |
1581 | |
|
1582 | 0 | if (AH->gzOut) |
1583 | 0 | res = GZCLOSE(AH->OF); |
1584 | 0 | else |
1585 | 0 | res = fclose(AH->OF); |
1586 | |
|
1587 | 0 | if (res != 0) |
1588 | 0 | exit_horribly(modulename, "could not close output file: %s\n", |
1589 | 0 | strerror(errno)); |
1590 | | |
1591 | 0 | AH->gzOut = savedContext.gzOut; |
1592 | 0 | AH->OF = savedContext.OF; |
1593 | 0 | } |
1594 | | |
1595 | | |
1596 | | |
1597 | | /* |
1598 | | * Print formatted text to the output file (usually stdout). |
1599 | | */ |
1600 | | int |
1601 | | ahprintf(ArchiveHandle *AH, const char *fmt,...) |
1602 | 0 | { |
1603 | 0 | char *p; |
1604 | 0 | size_t len = 128; /* initial assumption about buffer size */ |
1605 | 0 | size_t cnt; |
1606 | |
|
1607 | 0 | for (;;) |
1608 | 0 | { |
1609 | 0 | va_list args; |
1610 | | |
1611 | | /* Allocate work buffer. */ |
1612 | 0 | p = (char *) pg_malloc(len); |
1613 | | |
1614 | | /* Try to format the data. */ |
1615 | 0 | va_start(args, fmt); |
1616 | 0 | cnt = pvsnprintf(p, len, fmt, args); |
1617 | 0 | va_end(args); |
1618 | |
|
1619 | 0 | if (cnt < len) |
1620 | 0 | break; /* success */ |
1621 | | |
1622 | | /* Release buffer and loop around to try again with larger len. */ |
1623 | 0 | free(p); |
1624 | 0 | len = cnt; |
1625 | 0 | } |
1626 | |
|
1627 | 0 | ahwrite(p, 1, cnt, AH); |
1628 | 0 | free(p); |
1629 | 0 | return (int) cnt; |
1630 | 0 | } |
1631 | | |
1632 | | void |
1633 | | ahlog(ArchiveHandle *AH, int level, const char *fmt,...) |
1634 | 0 | { |
1635 | 0 | va_list ap; |
1636 | |
|
1637 | 0 | if (AH->debugLevel < level && (!AH->public.verbose || level > 1)) |
1638 | 0 | return; |
1639 | | |
1640 | 0 | va_start(ap, fmt); |
1641 | 0 | vwrite_msg(NULL, fmt, ap); |
1642 | 0 | va_end(ap); |
1643 | 0 | } |
1644 | | |
1645 | | /* |
1646 | | * Single place for logic which says 'We are restoring to a direct DB connection'. |
1647 | | */ |
1648 | | static int |
1649 | | RestoringToDB(ArchiveHandle *AH) |
1650 | 0 | { |
1651 | 0 | RestoreOptions *ropt = AH->public.ropt; |
1652 | |
|
1653 | 0 | return (ropt && ropt->useDB && AH->connection); |
1654 | 0 | } |
1655 | | |
1656 | | /* |
1657 | | * Dump the current contents of the LO data buffer while writing a BLOB |
1658 | | */ |
1659 | | static void |
1660 | | dump_lo_buf(ArchiveHandle *AH) |
1661 | 0 | { |
1662 | 0 | if (AH->connection) |
1663 | 0 | { |
1664 | 0 | size_t res; |
1665 | |
|
1666 | 0 | res = lo_write(AH->connection, AH->loFd, AH->lo_buf, AH->lo_buf_used); |
1667 | 0 | ahlog(AH, 5, ngettext("wrote %lu byte of large object data (result = %lu)\n", |
1668 | 0 | "wrote %lu bytes of large object data (result = %lu)\n", |
1669 | 0 | AH->lo_buf_used), |
1670 | 0 | (unsigned long) AH->lo_buf_used, (unsigned long) res); |
1671 | 0 | if (res != AH->lo_buf_used) |
1672 | 0 | exit_horribly(modulename, |
1673 | 0 | "could not write to large object (result: %lu, expected: %lu)\n", |
1674 | 0 | (unsigned long) res, (unsigned long) AH->lo_buf_used); |
1675 | 0 | } |
1676 | 0 | else |
1677 | 0 | { |
1678 | 0 | PQExpBuffer buf = createPQExpBuffer(); |
1679 | |
|
1680 | 0 | appendByteaLiteralAHX(buf, |
1681 | 0 | (const unsigned char *) AH->lo_buf, |
1682 | 0 | AH->lo_buf_used, |
1683 | 0 | AH); |
1684 | | |
1685 | | /* Hack: turn off writingBlob so ahwrite doesn't recurse to here */ |
1686 | 0 | AH->writingBlob = 0; |
1687 | 0 | ahprintf(AH, "SELECT pg_catalog.lowrite(0, %s);\n", buf->data); |
1688 | 0 | AH->writingBlob = 1; |
1689 | |
|
1690 | 0 | destroyPQExpBuffer(buf); |
1691 | 0 | } |
1692 | 0 | AH->lo_buf_used = 0; |
1693 | 0 | } |
1694 | | |
1695 | | |
1696 | | /* |
1697 | | * Write buffer to the output file (usually stdout). This is used for |
1698 | | * outputting 'restore' scripts etc. It is even possible for an archive |
1699 | | * format to create a custom output routine to 'fake' a restore if it |
1700 | | * wants to generate a script (see TAR output). |
1701 | | */ |
1702 | | void |
1703 | | ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH) |
1704 | 0 | { |
1705 | 0 | int bytes_written = 0; |
1706 | |
|
1707 | 0 | if (AH->writingBlob) |
1708 | 0 | { |
1709 | 0 | size_t remaining = size * nmemb; |
1710 | |
|
1711 | 0 | while (AH->lo_buf_used + remaining > AH->lo_buf_size) |
1712 | 0 | { |
1713 | 0 | size_t avail = AH->lo_buf_size - AH->lo_buf_used; |
1714 | |
|
1715 | 0 | memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, avail); |
1716 | 0 | ptr = (const void *) ((const char *) ptr + avail); |
1717 | 0 | remaining -= avail; |
1718 | 0 | AH->lo_buf_used += avail; |
1719 | 0 | dump_lo_buf(AH); |
1720 | 0 | } |
1721 | |
|
1722 | 0 | memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, remaining); |
1723 | 0 | AH->lo_buf_used += remaining; |
1724 | |
|
1725 | 0 | bytes_written = size * nmemb; |
1726 | 0 | } |
1727 | 0 | else if (AH->gzOut) |
1728 | 0 | bytes_written = GZWRITE(ptr, size, nmemb, AH->OF); |
1729 | 0 | else if (AH->CustomOutPtr) |
1730 | 0 | bytes_written = AH->CustomOutPtr(AH, ptr, size * nmemb); |
1731 | | |
1732 | 0 | else |
1733 | 0 | { |
1734 | | /* |
1735 | | * If we're doing a restore, and it's direct to DB, and we're |
1736 | | * connected then send it to the DB. |
1737 | | */ |
1738 | 0 | if (RestoringToDB(AH)) |
1739 | 0 | bytes_written = ExecuteSqlCommandBuf(&AH->public, (const char *) ptr, size * nmemb); |
1740 | 0 | else |
1741 | 0 | bytes_written = fwrite(ptr, size, nmemb, AH->OF) * size; |
1742 | 0 | } |
1743 | |
|
1744 | 0 | if (bytes_written != size * nmemb) |
1745 | 0 | WRITE_ERROR_EXIT; |
1746 | |
|
1747 | 0 | return; |
1748 | 0 | } |
1749 | | |
1750 | | /* on some error, we may decide to go on... */ |
1751 | | void |
1752 | | warn_or_exit_horribly(ArchiveHandle *AH, |
1753 | | const char *modulename, const char *fmt,...) |
1754 | 0 | { |
1755 | 0 | va_list ap; |
1756 | |
|
1757 | 0 | switch (AH->stage) |
1758 | 0 | { |
1759 | | |
1760 | 0 | case STAGE_NONE: |
1761 | | /* Do nothing special */ |
1762 | 0 | break; |
1763 | | |
1764 | 0 | case STAGE_INITIALIZING: |
1765 | 0 | if (AH->stage != AH->lastErrorStage) |
1766 | 0 | write_msg(modulename, "Error while INITIALIZING:\n"); |
1767 | 0 | break; |
1768 | | |
1769 | 0 | case STAGE_PROCESSING: |
1770 | 0 | if (AH->stage != AH->lastErrorStage) |
1771 | 0 | write_msg(modulename, "Error while PROCESSING TOC:\n"); |
1772 | 0 | break; |
1773 | | |
1774 | 0 | case STAGE_FINALIZING: |
1775 | 0 | if (AH->stage != AH->lastErrorStage) |
1776 | 0 | write_msg(modulename, "Error while FINALIZING:\n"); |
1777 | 0 | break; |
1778 | 0 | } |
1779 | 0 | if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE) |
1780 | 0 | { |
1781 | 0 | write_msg(modulename, "Error from TOC entry %d; %u %u %s %s %s\n", |
1782 | 0 | AH->currentTE->dumpId, |
1783 | 0 | AH->currentTE->catalogId.tableoid, |
1784 | 0 | AH->currentTE->catalogId.oid, |
1785 | 0 | AH->currentTE->desc ? AH->currentTE->desc : "(no desc)", |
1786 | 0 | AH->currentTE->tag ? AH->currentTE->tag : "(no tag)", |
1787 | 0 | AH->currentTE->owner ? AH->currentTE->owner : "(no owner)"); |
1788 | 0 | } |
1789 | 0 | AH->lastErrorStage = AH->stage; |
1790 | 0 | AH->lastErrorTE = AH->currentTE; |
1791 | |
|
1792 | 0 | va_start(ap, fmt); |
1793 | 0 | vwrite_msg(modulename, fmt, ap); |
1794 | 0 | va_end(ap); |
1795 | |
|
1796 | 0 | if (AH->public.exit_on_error) |
1797 | 0 | exit_nicely(1); |
1798 | 0 | else |
1799 | 0 | AH->public.n_errors++; |
1800 | 0 | } |
1801 | | |
1802 | | #ifdef NOT_USED |
1803 | | |
1804 | | static void |
1805 | | _moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te) |
1806 | | { |
1807 | | /* Unlink te from list */ |
1808 | | te->prev->next = te->next; |
1809 | | te->next->prev = te->prev; |
1810 | | |
1811 | | /* and insert it after "pos" */ |
1812 | | te->prev = pos; |
1813 | | te->next = pos->next; |
1814 | | pos->next->prev = te; |
1815 | | pos->next = te; |
1816 | | } |
1817 | | #endif |
1818 | | |
1819 | | static void |
1820 | | _moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te) |
1821 | 0 | { |
1822 | | /* Unlink te from list */ |
1823 | 0 | te->prev->next = te->next; |
1824 | 0 | te->next->prev = te->prev; |
1825 | | |
1826 | | /* and insert it before "pos" */ |
1827 | 0 | te->prev = pos->prev; |
1828 | 0 | te->next = pos; |
1829 | 0 | pos->prev->next = te; |
1830 | 0 | pos->prev = te; |
1831 | 0 | } |
1832 | | |
1833 | | /* |
1834 | | * Build index arrays for the TOC list |
1835 | | * |
1836 | | * This should be invoked only after we have created or read in all the TOC |
1837 | | * items. |
1838 | | * |
1839 | | * The arrays are indexed by dump ID (so entry zero is unused). Note that the |
1840 | | * array entries run only up to maxDumpId. We might see dependency dump IDs |
1841 | | * beyond that (if the dump was partial); so always check the array bound |
1842 | | * before trying to touch an array entry. |
1843 | | */ |
1844 | | static void |
1845 | | buildTocEntryArrays(ArchiveHandle *AH) |
1846 | 0 | { |
1847 | 0 | DumpId maxDumpId = AH->maxDumpId; |
1848 | 0 | TocEntry *te; |
1849 | |
|
1850 | 0 | AH->tocsByDumpId = (TocEntry **) pg_malloc0((maxDumpId + 1) * sizeof(TocEntry *)); |
1851 | 0 | AH->tableDataId = (DumpId *) pg_malloc0((maxDumpId + 1) * sizeof(DumpId)); |
1852 | |
|
1853 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
1854 | 0 | { |
1855 | | /* this check is purely paranoia, maxDumpId should be correct */ |
1856 | 0 | if (te->dumpId <= 0 || te->dumpId > maxDumpId) |
1857 | 0 | exit_horribly(modulename, "bad dumpId\n"); |
1858 | | |
1859 | | /* tocsByDumpId indexes all TOCs by their dump ID */ |
1860 | 0 | AH->tocsByDumpId[te->dumpId] = te; |
1861 | | |
1862 | | /* |
1863 | | * tableDataId provides the TABLE DATA item's dump ID for each TABLE |
1864 | | * TOC entry that has a DATA item. We compute this by reversing the |
1865 | | * TABLE DATA item's dependency, knowing that a TABLE DATA item has |
1866 | | * just one dependency and it is the TABLE item. |
1867 | | */ |
1868 | 0 | if (strcmp(te->desc, "TABLE DATA") == 0 && te->nDeps > 0) |
1869 | 0 | { |
1870 | 0 | DumpId tableId = te->dependencies[0]; |
1871 | | |
1872 | | /* |
1873 | | * The TABLE item might not have been in the archive, if this was |
1874 | | * a data-only dump; but its dump ID should be less than its data |
1875 | | * item's dump ID, so there should be a place for it in the array. |
1876 | | */ |
1877 | 0 | if (tableId <= 0 || tableId > maxDumpId) |
1878 | 0 | exit_horribly(modulename, "bad table dumpId for TABLE DATA item\n"); |
1879 | | |
1880 | 0 | AH->tableDataId[tableId] = te->dumpId; |
1881 | 0 | } |
1882 | 0 | } |
1883 | 0 | } |
1884 | | |
1885 | | TocEntry * |
1886 | | getTocEntryByDumpId(ArchiveHandle *AH, DumpId id) |
1887 | 0 | { |
1888 | | /* build index arrays if we didn't already */ |
1889 | 0 | if (AH->tocsByDumpId == NULL) |
1890 | 0 | buildTocEntryArrays(AH); |
1891 | |
|
1892 | 0 | if (id > 0 && id <= AH->maxDumpId) |
1893 | 0 | return AH->tocsByDumpId[id]; |
1894 | | |
1895 | 0 | return NULL; |
1896 | 0 | } |
1897 | | |
1898 | | teReqs |
1899 | | TocIDRequired(ArchiveHandle *AH, DumpId id) |
1900 | 0 | { |
1901 | 0 | TocEntry *te = getTocEntryByDumpId(AH, id); |
1902 | |
|
1903 | 0 | if (!te) |
1904 | 0 | return 0; |
1905 | | |
1906 | 0 | return te->reqs; |
1907 | 0 | } |
1908 | | |
1909 | | size_t |
1910 | | WriteOffset(ArchiveHandle *AH, pgoff_t o, int wasSet) |
1911 | 0 | { |
1912 | 0 | int off; |
1913 | | |
1914 | | /* Save the flag */ |
1915 | 0 | AH->WriteBytePtr(AH, wasSet); |
1916 | | |
1917 | | /* Write out pgoff_t smallest byte first, prevents endian mismatch */ |
1918 | 0 | for (off = 0; off < sizeof(pgoff_t); off++) |
1919 | 0 | { |
1920 | 0 | AH->WriteBytePtr(AH, o & 0xFF); |
1921 | 0 | o >>= 8; |
1922 | 0 | } |
1923 | 0 | return sizeof(pgoff_t) + 1; |
1924 | 0 | } |
1925 | | |
1926 | | int |
1927 | | ReadOffset(ArchiveHandle *AH, pgoff_t * o) |
1928 | 0 | { |
1929 | 0 | int i; |
1930 | 0 | int off; |
1931 | 0 | int offsetFlg; |
1932 | | |
1933 | | /* Initialize to zero */ |
1934 | 0 | *o = 0; |
1935 | | |
1936 | | /* Check for old version */ |
1937 | 0 | if (AH->version < K_VERS_1_7) |
1938 | 0 | { |
1939 | | /* Prior versions wrote offsets using WriteInt */ |
1940 | 0 | i = ReadInt(AH); |
1941 | | /* -1 means not set */ |
1942 | 0 | if (i < 0) |
1943 | 0 | return K_OFFSET_POS_NOT_SET; |
1944 | 0 | else if (i == 0) |
1945 | 0 | return K_OFFSET_NO_DATA; |
1946 | | |
1947 | | /* Cast to pgoff_t because it was written as an int. */ |
1948 | 0 | *o = (pgoff_t) i; |
1949 | 0 | return K_OFFSET_POS_SET; |
1950 | 0 | } |
1951 | | |
1952 | | /* |
1953 | | * Read the flag indicating the state of the data pointer. Check if valid |
1954 | | * and die if not. |
1955 | | * |
1956 | | * This used to be handled by a negative or zero pointer, now we use an |
1957 | | * extra byte specifically for the state. |
1958 | | */ |
1959 | 0 | offsetFlg = AH->ReadBytePtr(AH) & 0xFF; |
1960 | |
|
1961 | 0 | switch (offsetFlg) |
1962 | 0 | { |
1963 | 0 | case K_OFFSET_POS_NOT_SET: |
1964 | 0 | case K_OFFSET_NO_DATA: |
1965 | 0 | case K_OFFSET_POS_SET: |
1966 | |
|
1967 | 0 | break; |
1968 | |
|
1969 | 0 | default: |
1970 | 0 | exit_horribly(modulename, "unexpected data offset flag %d\n", offsetFlg); |
1971 | 0 | } |
1972 | | |
1973 | | /* |
1974 | | * Read the bytes |
1975 | | */ |
1976 | 0 | for (off = 0; off < AH->offSize; off++) |
1977 | 0 | { |
1978 | 0 | if (off < sizeof(pgoff_t)) |
1979 | 0 | *o |= ((pgoff_t) (AH->ReadBytePtr(AH))) << (off * 8); |
1980 | 0 | else |
1981 | 0 | { |
1982 | 0 | if (AH->ReadBytePtr(AH) != 0) |
1983 | 0 | exit_horribly(modulename, "file offset in dump file is too large\n"); |
1984 | 0 | } |
1985 | 0 | } |
1986 | |
|
1987 | 0 | return offsetFlg; |
1988 | 0 | } |
1989 | | |
1990 | | size_t |
1991 | | WriteInt(ArchiveHandle *AH, int i) |
1992 | 0 | { |
1993 | 0 | int b; |
1994 | | |
1995 | | /* |
1996 | | * This is a bit yucky, but I don't want to make the binary format very |
1997 | | * dependent on representation, and not knowing much about it, I write out |
1998 | | * a sign byte. If you change this, don't forget to change the file |
1999 | | * version #, and modify readInt to read the new format AS WELL AS the old |
2000 | | * formats. |
2001 | | */ |
2002 | | |
2003 | | /* SIGN byte */ |
2004 | 0 | if (i < 0) |
2005 | 0 | { |
2006 | 0 | AH->WriteBytePtr(AH, 1); |
2007 | 0 | i = -i; |
2008 | 0 | } |
2009 | 0 | else |
2010 | 0 | AH->WriteBytePtr(AH, 0); |
2011 | |
|
2012 | 0 | for (b = 0; b < AH->intSize; b++) |
2013 | 0 | { |
2014 | 0 | AH->WriteBytePtr(AH, i & 0xFF); |
2015 | 0 | i >>= 8; |
2016 | 0 | } |
2017 | |
|
2018 | 0 | return AH->intSize + 1; |
2019 | 0 | } |
2020 | | |
2021 | | int |
2022 | | ReadInt(ArchiveHandle *AH) |
2023 | 0 | { |
2024 | 0 | int res = 0; |
2025 | 0 | int bv, |
2026 | 0 | b; |
2027 | 0 | int sign = 0; /* Default positive */ |
2028 | 0 | int bitShift = 0; |
2029 | |
|
2030 | 0 | if (AH->version > K_VERS_1_0) |
2031 | | /* Read a sign byte */ |
2032 | 0 | sign = AH->ReadBytePtr(AH); |
2033 | |
|
2034 | 0 | for (b = 0; b < AH->intSize; b++) |
2035 | 0 | { |
2036 | 0 | bv = AH->ReadBytePtr(AH) & 0xFF; |
2037 | 0 | if (bv != 0) |
2038 | 0 | res = res + (bv << bitShift); |
2039 | 0 | bitShift += 8; |
2040 | 0 | } |
2041 | |
|
2042 | 0 | if (sign) |
2043 | 0 | res = -res; |
2044 | |
|
2045 | 0 | return res; |
2046 | 0 | } |
2047 | | |
2048 | | size_t |
2049 | | WriteStr(ArchiveHandle *AH, const char *c) |
2050 | 0 | { |
2051 | 0 | size_t res; |
2052 | |
|
2053 | 0 | if (c) |
2054 | 0 | { |
2055 | 0 | int len = strlen(c); |
2056 | |
|
2057 | 0 | res = WriteInt(AH, len); |
2058 | 0 | AH->WriteBufPtr(AH, c, len); |
2059 | 0 | res += len; |
2060 | 0 | } |
2061 | 0 | else |
2062 | 0 | res = WriteInt(AH, -1); |
2063 | |
|
2064 | 0 | return res; |
2065 | 0 | } |
2066 | | |
2067 | | char * |
2068 | | ReadStr(ArchiveHandle *AH) |
2069 | 0 | { |
2070 | 0 | char *buf; |
2071 | 0 | int l; |
2072 | |
|
2073 | 0 | l = ReadInt(AH); |
2074 | 0 | if (l < 0) |
2075 | 0 | buf = NULL; |
2076 | 0 | else |
2077 | 0 | { |
2078 | 0 | buf = (char *) pg_malloc(l + 1); |
2079 | 0 | AH->ReadBufPtr(AH, (void *) buf, l); |
2080 | |
|
2081 | 0 | buf[l] = '\0'; |
2082 | 0 | } |
2083 | |
|
2084 | 0 | return buf; |
2085 | 0 | } |
2086 | | |
2087 | | static int |
2088 | | _discoverArchiveFormat(ArchiveHandle *AH) |
2089 | 0 | { |
2090 | 0 | FILE *fh; |
2091 | 0 | char sig[6]; /* More than enough */ |
2092 | 0 | size_t cnt; |
2093 | 0 | int wantClose = 0; |
2094 | |
|
2095 | | #if 0 |
2096 | | write_msg(modulename, "attempting to ascertain archive format\n"); |
2097 | | #endif |
2098 | |
|
2099 | 0 | if (AH->lookahead) |
2100 | 0 | free(AH->lookahead); |
2101 | |
|
2102 | 0 | AH->lookaheadSize = 512; |
2103 | 0 | AH->lookahead = pg_malloc0(512); |
2104 | 0 | AH->lookaheadLen = 0; |
2105 | 0 | AH->lookaheadPos = 0; |
2106 | |
|
2107 | 0 | if (AH->fSpec) |
2108 | 0 | { |
2109 | 0 | struct stat st; |
2110 | |
|
2111 | 0 | wantClose = 1; |
2112 | | |
2113 | | /* |
2114 | | * Check if the specified archive is a directory. If so, check if |
2115 | | * there's a "toc.dat" (or "toc.dat.gz") file in it. |
2116 | | */ |
2117 | 0 | if (stat(AH->fSpec, &st) == 0 && S_ISDIR(st.st_mode)) |
2118 | 0 | { |
2119 | 0 | char buf[MAXPGPATH]; |
2120 | |
|
2121 | 0 | if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH) |
2122 | 0 | exit_horribly(modulename, "directory name too long: \"%s\"\n", |
2123 | 0 | AH->fSpec); |
2124 | 0 | if (stat(buf, &st) == 0 && S_ISREG(st.st_mode)) |
2125 | 0 | { |
2126 | 0 | AH->format = archDirectory; |
2127 | 0 | return AH->format; |
2128 | 0 | } |
2129 | | |
2130 | 0 | #ifdef HAVE_LIBZ |
2131 | 0 | if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH) |
2132 | 0 | exit_horribly(modulename, "directory name too long: \"%s\"\n", |
2133 | 0 | AH->fSpec); |
2134 | 0 | if (stat(buf, &st) == 0 && S_ISREG(st.st_mode)) |
2135 | 0 | { |
2136 | 0 | AH->format = archDirectory; |
2137 | 0 | return AH->format; |
2138 | 0 | } |
2139 | 0 | #endif |
2140 | 0 | exit_horribly(modulename, "directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)\n", |
2141 | 0 | AH->fSpec); |
2142 | 0 | fh = NULL; /* keep compiler quiet */ |
2143 | 0 | } |
2144 | 0 | else |
2145 | 0 | { |
2146 | 0 | fh = fopen(AH->fSpec, PG_BINARY_R); |
2147 | 0 | if (!fh) |
2148 | 0 | exit_horribly(modulename, "could not open input file \"%s\": %s\n", |
2149 | 0 | AH->fSpec, strerror(errno)); |
2150 | 0 | } |
2151 | 0 | } |
2152 | 0 | else |
2153 | 0 | { |
2154 | 0 | fh = stdin; |
2155 | 0 | if (!fh) |
2156 | 0 | exit_horribly(modulename, "could not open input file: %s\n", |
2157 | 0 | strerror(errno)); |
2158 | 0 | } |
2159 | | |
2160 | 0 | if ((cnt = fread(sig, 1, 5, fh)) != 5) |
2161 | 0 | { |
2162 | 0 | if (ferror(fh)) |
2163 | 0 | exit_horribly(modulename, "could not read input file: %s\n", strerror(errno)); |
2164 | 0 | else |
2165 | 0 | exit_horribly(modulename, "input file is too short (read %lu, expected 5)\n", |
2166 | 0 | (unsigned long) cnt); |
2167 | 0 | } |
2168 | | |
2169 | | /* Save it, just in case we need it later */ |
2170 | 0 | memcpy(&AH->lookahead[0], sig, 5); |
2171 | 0 | AH->lookaheadLen = 5; |
2172 | |
|
2173 | 0 | if (strncmp(sig, "PGDMP", 5) == 0) |
2174 | 0 | { |
2175 | 0 | int byteread; |
2176 | 0 | char vmaj, |
2177 | 0 | vmin, |
2178 | 0 | vrev; |
2179 | | |
2180 | | /* |
2181 | | * Finish reading (most of) a custom-format header. |
2182 | | * |
2183 | | * NB: this code must agree with ReadHead(). |
2184 | | */ |
2185 | 0 | if ((byteread = fgetc(fh)) == EOF) |
2186 | 0 | READ_ERROR_EXIT(fh); |
2187 | |
|
2188 | 0 | vmaj = byteread; |
2189 | |
|
2190 | 0 | if ((byteread = fgetc(fh)) == EOF) |
2191 | 0 | READ_ERROR_EXIT(fh); |
2192 | |
|
2193 | 0 | vmin = byteread; |
2194 | | |
2195 | | /* Save these too... */ |
2196 | 0 | AH->lookahead[AH->lookaheadLen++] = vmaj; |
2197 | 0 | AH->lookahead[AH->lookaheadLen++] = vmin; |
2198 | | |
2199 | | /* Check header version; varies from V1.0 */ |
2200 | 0 | if (vmaj > 1 || (vmaj == 1 && vmin > 0)) /* Version > 1.0 */ |
2201 | 0 | { |
2202 | 0 | if ((byteread = fgetc(fh)) == EOF) |
2203 | 0 | READ_ERROR_EXIT(fh); |
2204 | |
|
2205 | 0 | vrev = byteread; |
2206 | 0 | AH->lookahead[AH->lookaheadLen++] = vrev; |
2207 | 0 | } |
2208 | 0 | else |
2209 | 0 | vrev = 0; |
2210 | |
|
2211 | 0 | AH->version = MAKE_ARCHIVE_VERSION(vmaj, vmin, vrev); |
2212 | |
|
2213 | 0 | if ((AH->intSize = fgetc(fh)) == EOF) |
2214 | 0 | READ_ERROR_EXIT(fh); |
2215 | 0 | AH->lookahead[AH->lookaheadLen++] = AH->intSize; |
2216 | |
|
2217 | 0 | if (AH->version >= K_VERS_1_7) |
2218 | 0 | { |
2219 | 0 | if ((AH->offSize = fgetc(fh)) == EOF) |
2220 | 0 | READ_ERROR_EXIT(fh); |
2221 | 0 | AH->lookahead[AH->lookaheadLen++] = AH->offSize; |
2222 | 0 | } |
2223 | 0 | else |
2224 | 0 | AH->offSize = AH->intSize; |
2225 | |
|
2226 | 0 | if ((byteread = fgetc(fh)) == EOF) |
2227 | 0 | READ_ERROR_EXIT(fh); |
2228 | |
|
2229 | 0 | AH->format = byteread; |
2230 | 0 | AH->lookahead[AH->lookaheadLen++] = AH->format; |
2231 | 0 | } |
2232 | 0 | else |
2233 | 0 | { |
2234 | | /* |
2235 | | * *Maybe* we have a tar archive format file or a text dump ... So, |
2236 | | * read first 512 byte header... |
2237 | | */ |
2238 | 0 | cnt = fread(&AH->lookahead[AH->lookaheadLen], 1, 512 - AH->lookaheadLen, fh); |
2239 | | /* read failure is checked below */ |
2240 | 0 | AH->lookaheadLen += cnt; |
2241 | |
|
2242 | 0 | if (AH->lookaheadLen >= strlen(TEXT_DUMPALL_HEADER) && |
2243 | 0 | (strncmp(AH->lookahead, TEXT_DUMP_HEADER, strlen(TEXT_DUMP_HEADER)) == 0 || |
2244 | 0 | strncmp(AH->lookahead, TEXT_DUMPALL_HEADER, strlen(TEXT_DUMPALL_HEADER)) == 0)) |
2245 | 0 | { |
2246 | | /* |
2247 | | * looks like it's probably a text format dump. so suggest they |
2248 | | * try psql |
2249 | | */ |
2250 | 0 | exit_horribly(modulename, "input file appears to be a text format dump. Please use psql.\n"); |
2251 | 0 | } |
2252 | | |
2253 | 0 | if (AH->lookaheadLen != 512) |
2254 | 0 | { |
2255 | 0 | if (feof(fh)) |
2256 | 0 | exit_horribly(modulename, "input file does not appear to be a valid archive (too short?)\n"); |
2257 | 0 | else |
2258 | 0 | READ_ERROR_EXIT(fh); |
2259 | 0 | } |
2260 | |
|
2261 | 0 | if (!isValidTarHeader(AH->lookahead)) |
2262 | 0 | exit_horribly(modulename, "input file does not appear to be a valid archive\n"); |
2263 | | |
2264 | 0 | AH->format = archTar; |
2265 | 0 | } |
2266 | | |
2267 | | /* If we can't seek, then mark the header as read */ |
2268 | 0 | if (fseeko(fh, 0, SEEK_SET) != 0) |
2269 | 0 | { |
2270 | | /* |
2271 | | * NOTE: Formats that use the lookahead buffer can unset this in their |
2272 | | * Init routine. |
2273 | | */ |
2274 | 0 | AH->readHeader = 1; |
2275 | 0 | } |
2276 | 0 | else |
2277 | 0 | AH->lookaheadLen = 0; /* Don't bother since we've reset the file */ |
2278 | | |
2279 | | /* Close the file */ |
2280 | 0 | if (wantClose) |
2281 | 0 | if (fclose(fh) != 0) |
2282 | 0 | exit_horribly(modulename, "could not close input file: %s\n", |
2283 | 0 | strerror(errno)); |
2284 | | |
2285 | 0 | return AH->format; |
2286 | 0 | } |
2287 | | |
2288 | | |
2289 | | /* |
2290 | | * Allocate an archive handle |
2291 | | */ |
2292 | | static ArchiveHandle * |
2293 | | _allocAH(const char *FileSpec, const ArchiveFormat fmt, |
2294 | | const int compression, bool dosync, ArchiveMode mode, |
2295 | | SetupWorkerPtrType setupWorkerPtr) |
2296 | 0 | { |
2297 | 0 | ArchiveHandle *AH; |
2298 | |
|
2299 | | #if 0 |
2300 | | write_msg(modulename, "allocating AH for %s, format %d\n", FileSpec, fmt); |
2301 | | #endif |
2302 | |
|
2303 | 0 | AH = (ArchiveHandle *) pg_malloc0(sizeof(ArchiveHandle)); |
2304 | | |
2305 | | /* AH->debugLevel = 100; */ |
2306 | |
|
2307 | 0 | AH->version = K_VERS_SELF; |
2308 | | |
2309 | | /* initialize for backwards compatible string processing */ |
2310 | 0 | AH->public.encoding = 0; /* PG_SQL_ASCII */ |
2311 | 0 | AH->public.std_strings = false; |
2312 | | |
2313 | | /* sql error handling */ |
2314 | 0 | AH->public.exit_on_error = true; |
2315 | 0 | AH->public.n_errors = 0; |
2316 | |
|
2317 | 0 | AH->archiveDumpVersion = PG_VERSION; |
2318 | |
|
2319 | 0 | AH->createDate = time(NULL); |
2320 | |
|
2321 | 0 | AH->intSize = sizeof(int); |
2322 | 0 | AH->offSize = sizeof(pgoff_t); |
2323 | 0 | if (FileSpec) |
2324 | 0 | { |
2325 | 0 | AH->fSpec = pg_strdup(FileSpec); |
2326 | | |
2327 | | /* |
2328 | | * Not used; maybe later.... |
2329 | | * |
2330 | | * AH->workDir = pg_strdup(FileSpec); for(i=strlen(FileSpec) ; i > 0 ; |
2331 | | * i--) if (AH->workDir[i-1] == '/') |
2332 | | */ |
2333 | 0 | } |
2334 | 0 | else |
2335 | 0 | AH->fSpec = NULL; |
2336 | |
|
2337 | 0 | AH->currUser = NULL; /* unknown */ |
2338 | 0 | AH->currSchema = NULL; /* ditto */ |
2339 | 0 | AH->currTablespace = NULL; /* ditto */ |
2340 | 0 | AH->currWithOids = -1; /* force SET */ |
2341 | |
|
2342 | 0 | AH->toc = (TocEntry *) pg_malloc0(sizeof(TocEntry)); |
2343 | |
|
2344 | 0 | AH->toc->next = AH->toc; |
2345 | 0 | AH->toc->prev = AH->toc; |
2346 | |
|
2347 | 0 | AH->mode = mode; |
2348 | 0 | AH->compression = compression; |
2349 | 0 | AH->dosync = dosync; |
2350 | |
|
2351 | 0 | memset(&(AH->sqlparse), 0, sizeof(AH->sqlparse)); |
2352 | | |
2353 | | /* Open stdout with no compression for AH output handle */ |
2354 | 0 | AH->gzOut = 0; |
2355 | 0 | AH->OF = stdout; |
2356 | | |
2357 | | /* |
2358 | | * On Windows, we need to use binary mode to read/write non-text files, |
2359 | | * which include all archive formats as well as compressed plain text. |
2360 | | * Force stdin/stdout into binary mode if that is what we are using. |
2361 | | */ |
2362 | | #ifdef WIN32 |
2363 | | if ((fmt != archNull || compression != 0) && |
2364 | | (AH->fSpec == NULL || strcmp(AH->fSpec, "") == 0)) |
2365 | | { |
2366 | | if (mode == archModeWrite) |
2367 | | _setmode(fileno(stdout), O_BINARY); |
2368 | | else |
2369 | | _setmode(fileno(stdin), O_BINARY); |
2370 | | } |
2371 | | #endif |
2372 | |
|
2373 | 0 | AH->SetupWorkerPtr = setupWorkerPtr; |
2374 | |
|
2375 | 0 | if (fmt == archUnknown) |
2376 | 0 | AH->format = _discoverArchiveFormat(AH); |
2377 | 0 | else |
2378 | 0 | AH->format = fmt; |
2379 | |
|
2380 | 0 | switch (AH->format) |
2381 | 0 | { |
2382 | 0 | case archCustom: |
2383 | 0 | InitArchiveFmt_Custom(AH); |
2384 | 0 | break; |
2385 | | |
2386 | 0 | case archNull: |
2387 | 0 | InitArchiveFmt_Null(AH); |
2388 | 0 | break; |
2389 | | |
2390 | 0 | case archDirectory: |
2391 | 0 | InitArchiveFmt_Directory(AH); |
2392 | 0 | break; |
2393 | | |
2394 | 0 | case archTar: |
2395 | 0 | InitArchiveFmt_Tar(AH); |
2396 | 0 | break; |
2397 | | |
2398 | 0 | default: |
2399 | 0 | exit_horribly(modulename, "unrecognized file format \"%d\"\n", fmt); |
2400 | 0 | } |
2401 | | |
2402 | 0 | return AH; |
2403 | 0 | } |
2404 | | |
2405 | | /* |
2406 | | * Write out all data (tables & blobs) |
2407 | | */ |
2408 | | void |
2409 | | WriteDataChunks(ArchiveHandle *AH, ParallelState *pstate) |
2410 | 0 | { |
2411 | 0 | TocEntry *te; |
2412 | |
|
2413 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
2414 | 0 | { |
2415 | 0 | if (!te->dataDumper) |
2416 | 0 | continue; |
2417 | | |
2418 | 0 | if ((te->reqs & REQ_DATA) == 0) |
2419 | 0 | continue; |
2420 | | |
2421 | 0 | if (pstate && pstate->numWorkers > 1) |
2422 | 0 | { |
2423 | | /* |
2424 | | * If we are in a parallel backup, then we are always the master |
2425 | | * process. Dispatch each data-transfer job to a worker. |
2426 | | */ |
2427 | 0 | DispatchJobForTocEntry(AH, pstate, te, ACT_DUMP, |
2428 | 0 | mark_dump_job_done, NULL); |
2429 | 0 | } |
2430 | 0 | else |
2431 | 0 | WriteDataChunksForTocEntry(AH, te); |
2432 | 0 | } |
2433 | | |
2434 | | /* |
2435 | | * If parallel, wait for workers to finish. |
2436 | | */ |
2437 | 0 | if (pstate && pstate->numWorkers > 1) |
2438 | 0 | WaitForWorkers(AH, pstate, WFW_ALL_IDLE); |
2439 | 0 | } |
2440 | | |
2441 | | |
2442 | | /* |
2443 | | * Callback function that's invoked in the master process after a step has |
2444 | | * been parallel dumped. |
2445 | | * |
2446 | | * We don't need to do anything except check for worker failure. |
2447 | | */ |
2448 | | static void |
2449 | | mark_dump_job_done(ArchiveHandle *AH, |
2450 | | TocEntry *te, |
2451 | | int status, |
2452 | | void *callback_data) |
2453 | 0 | { |
2454 | 0 | ahlog(AH, 1, "finished item %d %s %s\n", |
2455 | 0 | te->dumpId, te->desc, te->tag); |
2456 | |
|
2457 | 0 | if (status != 0) |
2458 | 0 | exit_horribly(modulename, "worker process failed: exit code %d\n", |
2459 | 0 | status); |
2460 | 0 | } |
2461 | | |
2462 | | |
2463 | | void |
2464 | | WriteDataChunksForTocEntry(ArchiveHandle *AH, TocEntry *te) |
2465 | 0 | { |
2466 | 0 | StartDataPtrType startPtr; |
2467 | 0 | EndDataPtrType endPtr; |
2468 | |
|
2469 | 0 | AH->currToc = te; |
2470 | |
|
2471 | 0 | if (strcmp(te->desc, "BLOBS") == 0) |
2472 | 0 | { |
2473 | 0 | startPtr = AH->StartBlobsPtr; |
2474 | 0 | endPtr = AH->EndBlobsPtr; |
2475 | 0 | } |
2476 | 0 | else |
2477 | 0 | { |
2478 | 0 | startPtr = AH->StartDataPtr; |
2479 | 0 | endPtr = AH->EndDataPtr; |
2480 | 0 | } |
2481 | |
|
2482 | 0 | if (startPtr != NULL) |
2483 | 0 | (*startPtr) (AH, te); |
2484 | | |
2485 | | /* |
2486 | | * The user-provided DataDumper routine needs to call AH->WriteData |
2487 | | */ |
2488 | 0 | te->dataDumper((Archive *) AH, te->dataDumperArg); |
2489 | |
|
2490 | 0 | if (endPtr != NULL) |
2491 | 0 | (*endPtr) (AH, te); |
2492 | |
|
2493 | 0 | AH->currToc = NULL; |
2494 | 0 | } |
2495 | | |
2496 | | void |
2497 | | WriteToc(ArchiveHandle *AH) |
2498 | 0 | { |
2499 | 0 | TocEntry *te; |
2500 | 0 | char workbuf[32]; |
2501 | 0 | int tocCount; |
2502 | 0 | int i; |
2503 | | |
2504 | | /* count entries that will actually be dumped */ |
2505 | 0 | tocCount = 0; |
2506 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
2507 | 0 | { |
2508 | 0 | if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_SPECIAL)) != 0) |
2509 | 0 | tocCount++; |
2510 | 0 | } |
2511 | | |
2512 | | /* printf("%d TOC Entries to save\n", tocCount); */ |
2513 | |
|
2514 | 0 | WriteInt(AH, tocCount); |
2515 | |
|
2516 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
2517 | 0 | { |
2518 | 0 | if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_SPECIAL)) == 0) |
2519 | 0 | continue; |
2520 | | |
2521 | 0 | WriteInt(AH, te->dumpId); |
2522 | 0 | WriteInt(AH, te->dataDumper ? 1 : 0); |
2523 | | |
2524 | | /* OID is recorded as a string for historical reasons */ |
2525 | 0 | sprintf(workbuf, "%u", te->catalogId.tableoid); |
2526 | 0 | WriteStr(AH, workbuf); |
2527 | 0 | sprintf(workbuf, "%u", te->catalogId.oid); |
2528 | 0 | WriteStr(AH, workbuf); |
2529 | |
|
2530 | 0 | WriteStr(AH, te->tag); |
2531 | 0 | WriteStr(AH, te->desc); |
2532 | 0 | WriteInt(AH, te->section); |
2533 | 0 | WriteStr(AH, te->defn); |
2534 | 0 | WriteStr(AH, te->dropStmt); |
2535 | 0 | WriteStr(AH, te->copyStmt); |
2536 | 0 | WriteStr(AH, te->namespace); |
2537 | 0 | WriteStr(AH, te->tablespace); |
2538 | 0 | WriteStr(AH, te->owner); |
2539 | 0 | WriteStr(AH, te->withOids ? "true" : "false"); |
2540 | | |
2541 | | /* Dump list of dependencies */ |
2542 | 0 | for (i = 0; i < te->nDeps; i++) |
2543 | 0 | { |
2544 | 0 | sprintf(workbuf, "%d", te->dependencies[i]); |
2545 | 0 | WriteStr(AH, workbuf); |
2546 | 0 | } |
2547 | 0 | WriteStr(AH, NULL); /* Terminate List */ |
2548 | |
|
2549 | 0 | if (AH->WriteExtraTocPtr) |
2550 | 0 | AH->WriteExtraTocPtr(AH, te); |
2551 | 0 | } |
2552 | 0 | } |
2553 | | |
2554 | | void |
2555 | | ReadToc(ArchiveHandle *AH) |
2556 | 0 | { |
2557 | 0 | int i; |
2558 | 0 | char *tmp; |
2559 | 0 | DumpId *deps; |
2560 | 0 | int depIdx; |
2561 | 0 | int depSize; |
2562 | 0 | TocEntry *te; |
2563 | |
|
2564 | 0 | AH->tocCount = ReadInt(AH); |
2565 | 0 | AH->maxDumpId = 0; |
2566 | |
|
2567 | 0 | for (i = 0; i < AH->tocCount; i++) |
2568 | 0 | { |
2569 | 0 | te = (TocEntry *) pg_malloc0(sizeof(TocEntry)); |
2570 | 0 | te->dumpId = ReadInt(AH); |
2571 | |
|
2572 | 0 | if (te->dumpId > AH->maxDumpId) |
2573 | 0 | AH->maxDumpId = te->dumpId; |
2574 | | |
2575 | | /* Sanity check */ |
2576 | 0 | if (te->dumpId <= 0) |
2577 | 0 | exit_horribly(modulename, |
2578 | 0 | "entry ID %d out of range -- perhaps a corrupt TOC\n", |
2579 | 0 | te->dumpId); |
2580 | | |
2581 | 0 | te->hadDumper = ReadInt(AH); |
2582 | |
|
2583 | 0 | if (AH->version >= K_VERS_1_8) |
2584 | 0 | { |
2585 | 0 | tmp = ReadStr(AH); |
2586 | 0 | sscanf(tmp, "%u", &te->catalogId.tableoid); |
2587 | 0 | free(tmp); |
2588 | 0 | } |
2589 | 0 | else |
2590 | 0 | te->catalogId.tableoid = InvalidOid; |
2591 | 0 | tmp = ReadStr(AH); |
2592 | 0 | sscanf(tmp, "%u", &te->catalogId.oid); |
2593 | 0 | free(tmp); |
2594 | |
|
2595 | 0 | te->tag = ReadStr(AH); |
2596 | 0 | te->desc = ReadStr(AH); |
2597 | |
|
2598 | 0 | if (AH->version >= K_VERS_1_11) |
2599 | 0 | { |
2600 | 0 | te->section = ReadInt(AH); |
2601 | 0 | } |
2602 | 0 | else |
2603 | 0 | { |
2604 | | /* |
2605 | | * Rules for pre-8.4 archives wherein pg_dump hasn't classified |
2606 | | * the entries into sections. This list need not cover entry |
2607 | | * types added later than 8.4. |
2608 | | */ |
2609 | 0 | if (strcmp(te->desc, "COMMENT") == 0 || |
2610 | 0 | strcmp(te->desc, "ACL") == 0 || |
2611 | 0 | strcmp(te->desc, "ACL LANGUAGE") == 0) |
2612 | 0 | te->section = SECTION_NONE; |
2613 | 0 | else if (strcmp(te->desc, "TABLE DATA") == 0 || |
2614 | 0 | strcmp(te->desc, "BLOBS") == 0 || |
2615 | 0 | strcmp(te->desc, "BLOB COMMENTS") == 0) |
2616 | 0 | te->section = SECTION_DATA; |
2617 | 0 | else if (strcmp(te->desc, "CONSTRAINT") == 0 || |
2618 | 0 | strcmp(te->desc, "CHECK CONSTRAINT") == 0 || |
2619 | 0 | strcmp(te->desc, "FK CONSTRAINT") == 0 || |
2620 | 0 | strcmp(te->desc, "INDEX") == 0 || |
2621 | 0 | strcmp(te->desc, "RULE") == 0 || |
2622 | 0 | strcmp(te->desc, "TRIGGER") == 0) |
2623 | 0 | te->section = SECTION_POST_DATA; |
2624 | 0 | else |
2625 | 0 | te->section = SECTION_PRE_DATA; |
2626 | 0 | } |
2627 | |
|
2628 | 0 | te->defn = ReadStr(AH); |
2629 | 0 | te->dropStmt = ReadStr(AH); |
2630 | |
|
2631 | 0 | if (AH->version >= K_VERS_1_3) |
2632 | 0 | te->copyStmt = ReadStr(AH); |
2633 | |
|
2634 | 0 | if (AH->version >= K_VERS_1_6) |
2635 | 0 | te->namespace = ReadStr(AH); |
2636 | |
|
2637 | 0 | if (AH->version >= K_VERS_1_10) |
2638 | 0 | te->tablespace = ReadStr(AH); |
2639 | |
|
2640 | 0 | te->owner = ReadStr(AH); |
2641 | 0 | if (AH->version >= K_VERS_1_9) |
2642 | 0 | { |
2643 | 0 | if (strcmp(ReadStr(AH), "true") == 0) |
2644 | 0 | te->withOids = true; |
2645 | 0 | else |
2646 | 0 | te->withOids = false; |
2647 | 0 | } |
2648 | 0 | else |
2649 | 0 | te->withOids = true; |
2650 | | |
2651 | | /* Read TOC entry dependencies */ |
2652 | 0 | if (AH->version >= K_VERS_1_5) |
2653 | 0 | { |
2654 | 0 | depSize = 100; |
2655 | 0 | deps = (DumpId *) pg_malloc(sizeof(DumpId) * depSize); |
2656 | 0 | depIdx = 0; |
2657 | 0 | for (;;) |
2658 | 0 | { |
2659 | 0 | tmp = ReadStr(AH); |
2660 | 0 | if (!tmp) |
2661 | 0 | break; /* end of list */ |
2662 | 0 | if (depIdx >= depSize) |
2663 | 0 | { |
2664 | 0 | depSize *= 2; |
2665 | 0 | deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depSize); |
2666 | 0 | } |
2667 | 0 | sscanf(tmp, "%d", &deps[depIdx]); |
2668 | 0 | free(tmp); |
2669 | 0 | depIdx++; |
2670 | 0 | } |
2671 | |
|
2672 | 0 | if (depIdx > 0) /* We have a non-null entry */ |
2673 | 0 | { |
2674 | 0 | deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depIdx); |
2675 | 0 | te->dependencies = deps; |
2676 | 0 | te->nDeps = depIdx; |
2677 | 0 | } |
2678 | 0 | else |
2679 | 0 | { |
2680 | 0 | free(deps); |
2681 | 0 | te->dependencies = NULL; |
2682 | 0 | te->nDeps = 0; |
2683 | 0 | } |
2684 | 0 | } |
2685 | 0 | else |
2686 | 0 | { |
2687 | 0 | te->dependencies = NULL; |
2688 | 0 | te->nDeps = 0; |
2689 | 0 | } |
2690 | |
|
2691 | 0 | if (AH->ReadExtraTocPtr) |
2692 | 0 | AH->ReadExtraTocPtr(AH, te); |
2693 | |
|
2694 | 0 | ahlog(AH, 3, "read TOC entry %d (ID %d) for %s %s\n", |
2695 | 0 | i, te->dumpId, te->desc, te->tag); |
2696 | | |
2697 | | /* link completed entry into TOC circular list */ |
2698 | 0 | te->prev = AH->toc->prev; |
2699 | 0 | AH->toc->prev->next = te; |
2700 | 0 | AH->toc->prev = te; |
2701 | 0 | te->next = AH->toc; |
2702 | | |
2703 | | /* special processing immediately upon read for some items */ |
2704 | 0 | if (strcmp(te->desc, "ENCODING") == 0) |
2705 | 0 | processEncodingEntry(AH, te); |
2706 | 0 | else if (strcmp(te->desc, "STDSTRINGS") == 0) |
2707 | 0 | processStdStringsEntry(AH, te); |
2708 | 0 | else if (strcmp(te->desc, "SEARCHPATH") == 0) |
2709 | 0 | processSearchPathEntry(AH, te); |
2710 | 0 | } |
2711 | 0 | } |
2712 | | |
2713 | | static void |
2714 | | processEncodingEntry(ArchiveHandle *AH, TocEntry *te) |
2715 | 0 | { |
2716 | | /* te->defn should have the form SET client_encoding = 'foo'; */ |
2717 | 0 | char *defn = pg_strdup(te->defn); |
2718 | 0 | char *ptr1; |
2719 | 0 | char *ptr2 = NULL; |
2720 | 0 | int encoding; |
2721 | |
|
2722 | 0 | ptr1 = strchr(defn, '\''); |
2723 | 0 | if (ptr1) |
2724 | 0 | ptr2 = strchr(++ptr1, '\''); |
2725 | 0 | if (ptr2) |
2726 | 0 | { |
2727 | 0 | *ptr2 = '\0'; |
2728 | 0 | encoding = pg_char_to_encoding(ptr1); |
2729 | 0 | if (encoding < 0) |
2730 | 0 | exit_horribly(modulename, "unrecognized encoding \"%s\"\n", |
2731 | 0 | ptr1); |
2732 | 0 | AH->public.encoding = encoding; |
2733 | 0 | } |
2734 | 0 | else |
2735 | 0 | exit_horribly(modulename, "invalid ENCODING item: %s\n", |
2736 | 0 | te->defn); |
2737 | | |
2738 | 0 | free(defn); |
2739 | 0 | } |
2740 | | |
2741 | | static void |
2742 | | processStdStringsEntry(ArchiveHandle *AH, TocEntry *te) |
2743 | 0 | { |
2744 | | /* te->defn should have the form SET standard_conforming_strings = 'x'; */ |
2745 | 0 | char *ptr1; |
2746 | |
|
2747 | 0 | ptr1 = strchr(te->defn, '\''); |
2748 | 0 | if (ptr1 && strncmp(ptr1, "'on'", 4) == 0) |
2749 | 0 | AH->public.std_strings = true; |
2750 | 0 | else if (ptr1 && strncmp(ptr1, "'off'", 5) == 0) |
2751 | 0 | AH->public.std_strings = false; |
2752 | 0 | else |
2753 | 0 | exit_horribly(modulename, "invalid STDSTRINGS item: %s\n", |
2754 | 0 | te->defn); |
2755 | 0 | } |
2756 | | |
2757 | | static void |
2758 | | processSearchPathEntry(ArchiveHandle *AH, TocEntry *te) |
2759 | 0 | { |
2760 | | /* |
2761 | | * te->defn should contain a command to set search_path. We just copy it |
2762 | | * verbatim for use later. |
2763 | | */ |
2764 | 0 | AH->public.searchpath = pg_strdup(te->defn); |
2765 | 0 | } |
2766 | | |
2767 | | static void |
2768 | | StrictNamesCheck(RestoreOptions *ropt) |
2769 | 0 | { |
2770 | 0 | const char *missing_name; |
2771 | |
|
2772 | 0 | Assert(ropt->strict_names); |
2773 | |
|
2774 | 0 | if (ropt->schemaNames.head != NULL) |
2775 | 0 | { |
2776 | 0 | missing_name = simple_string_list_not_touched(&ropt->schemaNames); |
2777 | 0 | if (missing_name != NULL) |
2778 | 0 | exit_horribly(modulename, "schema \"%s\" not found\n", missing_name); |
2779 | 0 | } |
2780 | | |
2781 | 0 | if (ropt->tableNames.head != NULL) |
2782 | 0 | { |
2783 | 0 | missing_name = simple_string_list_not_touched(&ropt->tableNames); |
2784 | 0 | if (missing_name != NULL) |
2785 | 0 | exit_horribly(modulename, "table \"%s\" not found\n", missing_name); |
2786 | 0 | } |
2787 | | |
2788 | 0 | if (ropt->indexNames.head != NULL) |
2789 | 0 | { |
2790 | 0 | missing_name = simple_string_list_not_touched(&ropt->indexNames); |
2791 | 0 | if (missing_name != NULL) |
2792 | 0 | exit_horribly(modulename, "index \"%s\" not found\n", missing_name); |
2793 | 0 | } |
2794 | | |
2795 | 0 | if (ropt->functionNames.head != NULL) |
2796 | 0 | { |
2797 | 0 | missing_name = simple_string_list_not_touched(&ropt->functionNames); |
2798 | 0 | if (missing_name != NULL) |
2799 | 0 | exit_horribly(modulename, "function \"%s\" not found\n", missing_name); |
2800 | 0 | } |
2801 | | |
2802 | 0 | if (ropt->triggerNames.head != NULL) |
2803 | 0 | { |
2804 | 0 | missing_name = simple_string_list_not_touched(&ropt->triggerNames); |
2805 | 0 | if (missing_name != NULL) |
2806 | 0 | exit_horribly(modulename, "trigger \"%s\" not found\n", missing_name); |
2807 | 0 | } |
2808 | 0 | } |
2809 | | |
2810 | | /* |
2811 | | * Determine whether we want to restore this TOC entry. |
2812 | | * |
2813 | | * Returns 0 if entry should be skipped, or some combination of the |
2814 | | * REQ_SCHEMA and REQ_DATA bits if we want to restore schema and/or data |
2815 | | * portions of this TOC entry, or REQ_SPECIAL if it's a special entry. |
2816 | | */ |
2817 | | static teReqs |
2818 | | _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH) |
2819 | 0 | { |
2820 | 0 | teReqs res = REQ_SCHEMA | REQ_DATA; |
2821 | 0 | RestoreOptions *ropt = AH->public.ropt; |
2822 | | |
2823 | | /* These items are treated specially */ |
2824 | 0 | if (strcmp(te->desc, "ENCODING") == 0 || |
2825 | 0 | strcmp(te->desc, "STDSTRINGS") == 0 || |
2826 | 0 | strcmp(te->desc, "SEARCHPATH") == 0) |
2827 | 0 | return REQ_SPECIAL; |
2828 | | |
2829 | | /* |
2830 | | * DATABASE and DATABASE PROPERTIES also have a special rule: they are |
2831 | | * restored in createDB mode, and not restored otherwise, independently of |
2832 | | * all else. |
2833 | | */ |
2834 | 0 | if (strcmp(te->desc, "DATABASE") == 0 || |
2835 | 0 | strcmp(te->desc, "DATABASE PROPERTIES") == 0) |
2836 | 0 | { |
2837 | 0 | if (ropt->createDB) |
2838 | 0 | return REQ_SCHEMA; |
2839 | 0 | else |
2840 | 0 | return 0; |
2841 | 0 | } |
2842 | | |
2843 | | /* |
2844 | | * Process exclusions that affect certain classes of TOC entries. |
2845 | | */ |
2846 | | |
2847 | | /* If it's an ACL, maybe ignore it */ |
2848 | 0 | if (ropt->aclsSkip && _tocEntryIsACL(te)) |
2849 | 0 | return 0; |
2850 | | |
2851 | | /* If it's a comment, maybe ignore it */ |
2852 | 0 | if (ropt->no_comments && strcmp(te->desc, "COMMENT") == 0) |
2853 | 0 | return 0; |
2854 | | |
2855 | | /* |
2856 | | * If it's a publication or a table part of a publication, maybe ignore |
2857 | | * it. |
2858 | | */ |
2859 | 0 | if (ropt->no_publications && |
2860 | 0 | (strcmp(te->desc, "PUBLICATION") == 0 || |
2861 | 0 | strcmp(te->desc, "PUBLICATION TABLE") == 0)) |
2862 | 0 | return 0; |
2863 | | |
2864 | | /* If it's a security label, maybe ignore it */ |
2865 | 0 | if (ropt->no_security_labels && strcmp(te->desc, "SECURITY LABEL") == 0) |
2866 | 0 | return 0; |
2867 | | |
2868 | | /* If it's a subscription, maybe ignore it */ |
2869 | 0 | if (ropt->no_subscriptions && strcmp(te->desc, "SUBSCRIPTION") == 0) |
2870 | 0 | return 0; |
2871 | | |
2872 | | /* Ignore it if section is not to be dumped/restored */ |
2873 | 0 | switch (curSection) |
2874 | 0 | { |
2875 | 0 | case SECTION_PRE_DATA: |
2876 | 0 | if (!(ropt->dumpSections & DUMP_PRE_DATA)) |
2877 | 0 | return 0; |
2878 | 0 | break; |
2879 | 0 | case SECTION_DATA: |
2880 | 0 | if (!(ropt->dumpSections & DUMP_DATA)) |
2881 | 0 | return 0; |
2882 | 0 | break; |
2883 | 0 | case SECTION_POST_DATA: |
2884 | 0 | if (!(ropt->dumpSections & DUMP_POST_DATA)) |
2885 | 0 | return 0; |
2886 | 0 | break; |
2887 | 0 | default: |
2888 | | /* shouldn't get here, really, but ignore it */ |
2889 | 0 | return 0; |
2890 | 0 | } |
2891 | | |
2892 | | /* Ignore it if rejected by idWanted[] (cf. SortTocFromFile) */ |
2893 | 0 | if (ropt->idWanted && !ropt->idWanted[te->dumpId - 1]) |
2894 | 0 | return 0; |
2895 | | |
2896 | | /* |
2897 | | * Check options for selective dump/restore. |
2898 | | */ |
2899 | 0 | if (strcmp(te->desc, "ACL") == 0 || |
2900 | 0 | strcmp(te->desc, "COMMENT") == 0 || |
2901 | 0 | strcmp(te->desc, "SECURITY LABEL") == 0) |
2902 | 0 | { |
2903 | | /* Database properties react to createDB, not selectivity options. */ |
2904 | 0 | if (strncmp(te->tag, "DATABASE ", 9) == 0) |
2905 | 0 | { |
2906 | 0 | if (!ropt->createDB) |
2907 | 0 | return 0; |
2908 | 0 | } |
2909 | 0 | else if (ropt->schemaNames.head != NULL || |
2910 | 0 | ropt->schemaExcludeNames.head != NULL || |
2911 | 0 | ropt->selTypes) |
2912 | 0 | { |
2913 | | /* |
2914 | | * In a selective dump/restore, we want to restore these dependent |
2915 | | * TOC entry types only if their parent object is being restored. |
2916 | | * Without selectivity options, we let through everything in the |
2917 | | * archive. Note there may be such entries with no parent, eg |
2918 | | * non-default ACLs for built-in objects. |
2919 | | * |
2920 | | * This code depends on the parent having been marked already, |
2921 | | * which should be the case; if it isn't, perhaps due to |
2922 | | * SortTocFromFile rearrangement, skipping the dependent entry |
2923 | | * seems prudent anyway. |
2924 | | * |
2925 | | * Ideally we'd handle, eg, table CHECK constraints this way too. |
2926 | | * But it's hard to tell which of their dependencies is the one to |
2927 | | * consult. |
2928 | | */ |
2929 | 0 | if (te->nDeps != 1 || |
2930 | 0 | TocIDRequired(AH, te->dependencies[0]) == 0) |
2931 | 0 | return 0; |
2932 | 0 | } |
2933 | 0 | } |
2934 | 0 | else |
2935 | 0 | { |
2936 | | /* Apply selective-restore rules for standalone TOC entries. */ |
2937 | 0 | if (ropt->schemaNames.head != NULL) |
2938 | 0 | { |
2939 | | /* If no namespace is specified, it means all. */ |
2940 | 0 | if (!te->namespace) |
2941 | 0 | return 0; |
2942 | 0 | if (!simple_string_list_member(&ropt->schemaNames, te->namespace)) |
2943 | 0 | return 0; |
2944 | 0 | } |
2945 | | |
2946 | 0 | if (ropt->schemaExcludeNames.head != NULL && |
2947 | 0 | te->namespace && |
2948 | 0 | simple_string_list_member(&ropt->schemaExcludeNames, te->namespace)) |
2949 | 0 | return 0; |
2950 | | |
2951 | 0 | if (ropt->selTypes) |
2952 | 0 | { |
2953 | 0 | if (strcmp(te->desc, "TABLE") == 0 || |
2954 | 0 | strcmp(te->desc, "TABLE DATA") == 0 || |
2955 | 0 | strcmp(te->desc, "VIEW") == 0 || |
2956 | 0 | strcmp(te->desc, "FOREIGN TABLE") == 0 || |
2957 | 0 | strcmp(te->desc, "MATERIALIZED VIEW") == 0 || |
2958 | 0 | strcmp(te->desc, "MATERIALIZED VIEW DATA") == 0 || |
2959 | 0 | strcmp(te->desc, "SEQUENCE") == 0 || |
2960 | 0 | strcmp(te->desc, "SEQUENCE SET") == 0) |
2961 | 0 | { |
2962 | 0 | if (!ropt->selTable) |
2963 | 0 | return 0; |
2964 | 0 | if (ropt->tableNames.head != NULL && |
2965 | 0 | !simple_string_list_member(&ropt->tableNames, te->tag)) |
2966 | 0 | return 0; |
2967 | 0 | } |
2968 | 0 | else if (strcmp(te->desc, "INDEX") == 0) |
2969 | 0 | { |
2970 | 0 | if (!ropt->selIndex) |
2971 | 0 | return 0; |
2972 | 0 | if (ropt->indexNames.head != NULL && |
2973 | 0 | !simple_string_list_member(&ropt->indexNames, te->tag)) |
2974 | 0 | return 0; |
2975 | 0 | } |
2976 | 0 | else if (strcmp(te->desc, "FUNCTION") == 0 || |
2977 | 0 | strcmp(te->desc, "AGGREGATE") == 0 || |
2978 | 0 | strcmp(te->desc, "PROCEDURE") == 0) |
2979 | 0 | { |
2980 | 0 | if (!ropt->selFunction) |
2981 | 0 | return 0; |
2982 | 0 | if (ropt->functionNames.head != NULL && |
2983 | 0 | !simple_string_list_member(&ropt->functionNames, te->tag)) |
2984 | 0 | return 0; |
2985 | 0 | } |
2986 | 0 | else if (strcmp(te->desc, "TRIGGER") == 0) |
2987 | 0 | { |
2988 | 0 | if (!ropt->selTrigger) |
2989 | 0 | return 0; |
2990 | 0 | if (ropt->triggerNames.head != NULL && |
2991 | 0 | !simple_string_list_member(&ropt->triggerNames, te->tag)) |
2992 | 0 | return 0; |
2993 | 0 | } |
2994 | 0 | else |
2995 | 0 | return 0; |
2996 | 0 | } |
2997 | 0 | } |
2998 | | |
2999 | | /* |
3000 | | * Determine whether the TOC entry contains schema and/or data components, |
3001 | | * and mask off inapplicable REQ bits. If it had a dataDumper, assume |
3002 | | * it's both schema and data. Otherwise it's probably schema-only, but |
3003 | | * there are exceptions. |
3004 | | */ |
3005 | 0 | if (!te->hadDumper) |
3006 | 0 | { |
3007 | | /* |
3008 | | * Special Case: If 'SEQUENCE SET' or anything to do with BLOBs, then |
3009 | | * it is considered a data entry. We don't need to check for the |
3010 | | * BLOBS entry or old-style BLOB COMMENTS, because they will have |
3011 | | * hadDumper = true ... but we do need to check new-style BLOB ACLs, |
3012 | | * comments, etc. |
3013 | | */ |
3014 | 0 | if (strcmp(te->desc, "SEQUENCE SET") == 0 || |
3015 | 0 | strcmp(te->desc, "BLOB") == 0 || |
3016 | 0 | (strcmp(te->desc, "ACL") == 0 && |
3017 | 0 | strncmp(te->tag, "LARGE OBJECT ", 13) == 0) || |
3018 | 0 | (strcmp(te->desc, "COMMENT") == 0 && |
3019 | 0 | strncmp(te->tag, "LARGE OBJECT ", 13) == 0) || |
3020 | 0 | (strcmp(te->desc, "SECURITY LABEL") == 0 && |
3021 | 0 | strncmp(te->tag, "LARGE OBJECT ", 13) == 0)) |
3022 | 0 | res = res & REQ_DATA; |
3023 | 0 | else |
3024 | 0 | res = res & ~REQ_DATA; |
3025 | 0 | } |
3026 | | |
3027 | | /* If there's no definition command, there's no schema component */ |
3028 | 0 | if (!te->defn || !te->defn[0]) |
3029 | 0 | res = res & ~REQ_SCHEMA; |
3030 | | |
3031 | | /* |
3032 | | * Special case: <Init> type with <Max OID> tag; this is obsolete and we |
3033 | | * always ignore it. |
3034 | | */ |
3035 | 0 | if ((strcmp(te->desc, "<Init>") == 0) && (strcmp(te->tag, "Max OID") == 0)) |
3036 | 0 | return 0; |
3037 | | |
3038 | | /* Mask it if we only want schema */ |
3039 | 0 | if (ropt->schemaOnly) |
3040 | 0 | { |
3041 | | /* |
3042 | | * The sequence_data option overrides schemaOnly for SEQUENCE SET. |
3043 | | * |
3044 | | * In binary-upgrade mode, even with schemaOnly set, we do not mask |
3045 | | * out large objects. (Only large object definitions, comments and |
3046 | | * other metadata should be generated in binary-upgrade mode, not the |
3047 | | * actual data, but that need not concern us here.) |
3048 | | */ |
3049 | 0 | if (!(ropt->sequence_data && strcmp(te->desc, "SEQUENCE SET") == 0) && |
3050 | 0 | !(ropt->binary_upgrade && |
3051 | 0 | (strcmp(te->desc, "BLOB") == 0 || |
3052 | 0 | (strcmp(te->desc, "ACL") == 0 && |
3053 | 0 | strncmp(te->tag, "LARGE OBJECT ", 13) == 0) || |
3054 | 0 | (strcmp(te->desc, "COMMENT") == 0 && |
3055 | 0 | strncmp(te->tag, "LARGE OBJECT ", 13) == 0) || |
3056 | 0 | (strcmp(te->desc, "SECURITY LABEL") == 0 && |
3057 | 0 | strncmp(te->tag, "LARGE OBJECT ", 13) == 0)))) |
3058 | 0 | res = res & REQ_SCHEMA; |
3059 | 0 | } |
3060 | | |
3061 | | /* Mask it if we only want data */ |
3062 | 0 | if (ropt->dataOnly) |
3063 | 0 | res = res & REQ_DATA; |
3064 | |
|
3065 | 0 | return res; |
3066 | 0 | } |
3067 | | |
3068 | | /* |
3069 | | * Identify which pass we should restore this TOC entry in. |
3070 | | * |
3071 | | * See notes with the RestorePass typedef in pg_backup_archiver.h. |
3072 | | */ |
3073 | | static RestorePass |
3074 | | _tocEntryRestorePass(TocEntry *te) |
3075 | 0 | { |
3076 | | /* "ACL LANGUAGE" was a crock emitted only in PG 7.4 */ |
3077 | 0 | if (strcmp(te->desc, "ACL") == 0 || |
3078 | 0 | strcmp(te->desc, "ACL LANGUAGE") == 0 || |
3079 | 0 | strcmp(te->desc, "DEFAULT ACL") == 0) |
3080 | 0 | return RESTORE_PASS_ACL; |
3081 | 0 | if (strcmp(te->desc, "MATERIALIZED VIEW DATA") == 0) |
3082 | 0 | return RESTORE_PASS_REFRESH; |
3083 | 0 | return RESTORE_PASS_MAIN; |
3084 | 0 | } |
3085 | | |
3086 | | /* |
3087 | | * Identify TOC entries that are ACLs. |
3088 | | * |
3089 | | * Note: it seems worth duplicating some code here to avoid a hard-wired |
3090 | | * assumption that these are exactly the same entries that we restore during |
3091 | | * the RESTORE_PASS_ACL phase. |
3092 | | */ |
3093 | | static bool |
3094 | | _tocEntryIsACL(TocEntry *te) |
3095 | 0 | { |
3096 | | /* "ACL LANGUAGE" was a crock emitted only in PG 7.4 */ |
3097 | 0 | if (strcmp(te->desc, "ACL") == 0 || |
3098 | 0 | strcmp(te->desc, "ACL LANGUAGE") == 0 || |
3099 | 0 | strcmp(te->desc, "DEFAULT ACL") == 0) |
3100 | 0 | return true; |
3101 | 0 | return false; |
3102 | 0 | } |
3103 | | |
3104 | | /* |
3105 | | * Issue SET commands for parameters that we want to have set the same way |
3106 | | * at all times during execution of a restore script. |
3107 | | */ |
3108 | | static void |
3109 | | _doSetFixedOutputState(ArchiveHandle *AH) |
3110 | 0 | { |
3111 | 0 | RestoreOptions *ropt = AH->public.ropt; |
3112 | | |
3113 | | /* |
3114 | | * Disable timeouts to allow for slow commands, idle parallel workers, etc |
3115 | | */ |
3116 | 0 | if (AH->public.dopt->include_yb_metadata) |
3117 | 0 | ahprintf(AH, "SET yb_binary_restore = true;\n"); |
3118 | 0 | ahprintf(AH, "SET statement_timeout = 0;\n"); |
3119 | 0 | ahprintf(AH, "SET lock_timeout = 0;\n"); |
3120 | 0 | ahprintf(AH, "SET idle_in_transaction_session_timeout = 0;\n"); |
3121 | | |
3122 | | /* Select the correct character set encoding */ |
3123 | 0 | ahprintf(AH, "SET client_encoding = '%s';\n", |
3124 | 0 | pg_encoding_to_char(AH->public.encoding)); |
3125 | | |
3126 | | /* Select the correct string literal syntax */ |
3127 | 0 | ahprintf(AH, "SET standard_conforming_strings = %s;\n", |
3128 | 0 | AH->public.std_strings ? "on" : "off"); |
3129 | | |
3130 | | /* Select the role to be used during restore */ |
3131 | 0 | if (ropt && ropt->use_role) |
3132 | 0 | ahprintf(AH, "SET ROLE %s;\n", fmtId(ropt->use_role)); |
3133 | | |
3134 | | /* Select the dump-time search_path */ |
3135 | 0 | if (AH->public.searchpath) |
3136 | 0 | ahprintf(AH, "%s", AH->public.searchpath); |
3137 | | |
3138 | | /* Make sure function checking is disabled */ |
3139 | 0 | ahprintf(AH, "SET check_function_bodies = false;\n"); |
3140 | | |
3141 | | /* Avoid annoying notices etc */ |
3142 | 0 | ahprintf(AH, "SET client_min_messages = warning;\n"); |
3143 | 0 | if (!AH->public.std_strings) |
3144 | 0 | ahprintf(AH, "SET escape_string_warning = off;\n"); |
3145 | | |
3146 | | /* Adjust row-security state */ |
3147 | 0 | if (ropt && ropt->enable_row_security) |
3148 | 0 | ahprintf(AH, "SET row_security = on;\n"); |
3149 | 0 | else |
3150 | 0 | ahprintf(AH, "SET row_security = off;\n"); |
3151 | | |
3152 | | // Allow for creating tables/indexes with (table_oid = x). |
3153 | 0 | if (AH->public.dopt->include_yb_metadata) |
3154 | 0 | ahprintf(AH, "SET yb_enable_create_with_table_oid = true;\n"); |
3155 | |
|
3156 | 0 | ahprintf(AH, "\n"); |
3157 | 0 | } |
3158 | | |
3159 | | /* |
3160 | | * Issue a SET SESSION AUTHORIZATION command. Caller is responsible |
3161 | | * for updating state if appropriate. If user is NULL or an empty string, |
3162 | | * the specification DEFAULT will be used. |
3163 | | */ |
3164 | | static void |
3165 | | _doSetSessionAuth(ArchiveHandle *AH, const char *user) |
3166 | 0 | { |
3167 | 0 | PQExpBuffer cmd = createPQExpBuffer(); |
3168 | |
|
3169 | 0 | appendPQExpBufferStr(cmd, "SET SESSION AUTHORIZATION "); |
3170 | | |
3171 | | /* |
3172 | | * SQL requires a string literal here. Might as well be correct. |
3173 | | */ |
3174 | 0 | if (user && *user) |
3175 | 0 | appendStringLiteralAHX(cmd, user, AH); |
3176 | 0 | else |
3177 | 0 | appendPQExpBufferStr(cmd, "DEFAULT"); |
3178 | 0 | appendPQExpBufferChar(cmd, ';'); |
3179 | |
|
3180 | 0 | if (RestoringToDB(AH)) |
3181 | 0 | { |
3182 | 0 | PGresult *res; |
3183 | |
|
3184 | 0 | res = PQexec(AH->connection, cmd->data); |
3185 | |
|
3186 | 0 | if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) |
3187 | | /* NOT warn_or_exit_horribly... use -O instead to skip this. */ |
3188 | 0 | exit_horribly(modulename, "could not set session user to \"%s\": %s", |
3189 | 0 | user, PQerrorMessage(AH->connection)); |
3190 | | |
3191 | 0 | PQclear(res); |
3192 | 0 | } |
3193 | 0 | else |
3194 | 0 | ahprintf(AH, "%s\n\n", cmd->data); |
3195 | |
|
3196 | 0 | destroyPQExpBuffer(cmd); |
3197 | 0 | } |
3198 | | |
3199 | | |
3200 | | /* |
3201 | | * Issue a SET default_with_oids command. Caller is responsible |
3202 | | * for updating state if appropriate. |
3203 | | */ |
3204 | | static void |
3205 | | _doSetWithOids(ArchiveHandle *AH, const bool withOids) |
3206 | 0 | { |
3207 | 0 | PQExpBuffer cmd = createPQExpBuffer(); |
3208 | |
|
3209 | 0 | appendPQExpBuffer(cmd, "SET default_with_oids = %s;", withOids ? |
3210 | 0 | "true" : "false"); |
3211 | |
|
3212 | 0 | if (RestoringToDB(AH)) |
3213 | 0 | { |
3214 | 0 | PGresult *res; |
3215 | |
|
3216 | 0 | res = PQexec(AH->connection, cmd->data); |
3217 | |
|
3218 | 0 | if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) |
3219 | 0 | warn_or_exit_horribly(AH, modulename, |
3220 | 0 | "could not set default_with_oids: %s", |
3221 | 0 | PQerrorMessage(AH->connection)); |
3222 | |
|
3223 | 0 | PQclear(res); |
3224 | 0 | } |
3225 | 0 | else |
3226 | 0 | ahprintf(AH, "%s\n\n", cmd->data); |
3227 | |
|
3228 | 0 | destroyPQExpBuffer(cmd); |
3229 | 0 | } |
3230 | | |
3231 | | |
3232 | | /* |
3233 | | * Issue the commands to connect to the specified database. |
3234 | | * |
3235 | | * If we're currently restoring right into a database, this will |
3236 | | * actually establish a connection. Otherwise it puts a \connect into |
3237 | | * the script output. |
3238 | | */ |
3239 | | static void |
3240 | | _reconnectToDB(ArchiveHandle *AH, const char *dbname) |
3241 | 0 | { |
3242 | 0 | if (RestoringToDB(AH)) |
3243 | 0 | ReconnectToServer(AH, dbname); |
3244 | 0 | else |
3245 | 0 | { |
3246 | 0 | PQExpBufferData connectbuf; |
3247 | |
|
3248 | 0 | initPQExpBuffer(&connectbuf); |
3249 | 0 | appendPsqlMetaConnect(&connectbuf, dbname); |
3250 | 0 | ahprintf(AH, "%s\n", connectbuf.data); |
3251 | 0 | termPQExpBuffer(&connectbuf); |
3252 | 0 | } |
3253 | | |
3254 | | /* |
3255 | | * NOTE: currUser keeps track of what the imaginary session user in our |
3256 | | * script is. It's now effectively reset to the original userID. |
3257 | | */ |
3258 | 0 | if (AH->currUser) |
3259 | 0 | free(AH->currUser); |
3260 | 0 | AH->currUser = NULL; |
3261 | | |
3262 | | /* don't assume we still know the output schema, tablespace, etc either */ |
3263 | 0 | if (AH->currSchema) |
3264 | 0 | free(AH->currSchema); |
3265 | 0 | AH->currSchema = NULL; |
3266 | 0 | if (AH->currTablespace) |
3267 | 0 | free(AH->currTablespace); |
3268 | 0 | AH->currTablespace = NULL; |
3269 | 0 | AH->currWithOids = -1; |
3270 | | |
3271 | | /* re-establish fixed state */ |
3272 | 0 | _doSetFixedOutputState(AH); |
3273 | 0 | } |
3274 | | |
3275 | | /* |
3276 | | * Become the specified user, and update state to avoid redundant commands |
3277 | | * |
3278 | | * NULL or empty argument is taken to mean restoring the session default |
3279 | | */ |
3280 | | static void |
3281 | | _becomeUser(ArchiveHandle *AH, const char *user) |
3282 | 0 | { |
3283 | 0 | if (!user) |
3284 | 0 | user = ""; /* avoid null pointers */ |
3285 | |
|
3286 | 0 | if (AH->currUser && strcmp(AH->currUser, user) == 0) |
3287 | 0 | return; /* no need to do anything */ |
3288 | | |
3289 | 0 | _doSetSessionAuth(AH, user); |
3290 | | |
3291 | | /* |
3292 | | * NOTE: currUser keeps track of what the imaginary session user in our |
3293 | | * script is |
3294 | | */ |
3295 | 0 | if (AH->currUser) |
3296 | 0 | free(AH->currUser); |
3297 | 0 | AH->currUser = pg_strdup(user); |
3298 | 0 | } |
3299 | | |
3300 | | /* |
3301 | | * Become the owner of the given TOC entry object. If |
3302 | | * changes in ownership are not allowed, this doesn't do anything. |
3303 | | */ |
3304 | | static void |
3305 | | _becomeOwner(ArchiveHandle *AH, TocEntry *te) |
3306 | 0 | { |
3307 | 0 | RestoreOptions *ropt = AH->public.ropt; |
3308 | |
|
3309 | 0 | if (ropt && (ropt->noOwner || !ropt->use_setsessauth)) |
3310 | 0 | return; |
3311 | | |
3312 | 0 | _becomeUser(AH, te->owner); |
3313 | 0 | } |
3314 | | |
3315 | | |
3316 | | /* |
3317 | | * Set the proper default_with_oids value for the table. |
3318 | | */ |
3319 | | static void |
3320 | | _setWithOids(ArchiveHandle *AH, TocEntry *te) |
3321 | 0 | { |
3322 | 0 | if (AH->currWithOids != te->withOids) |
3323 | 0 | { |
3324 | 0 | _doSetWithOids(AH, te->withOids); |
3325 | 0 | AH->currWithOids = te->withOids; |
3326 | 0 | } |
3327 | 0 | } |
3328 | | |
3329 | | |
3330 | | /* |
3331 | | * Issue the commands to select the specified schema as the current schema |
3332 | | * in the target database. |
3333 | | */ |
3334 | | static void |
3335 | | _selectOutputSchema(ArchiveHandle *AH, const char *schemaName) |
3336 | 0 | { |
3337 | 0 | PQExpBuffer qry; |
3338 | | |
3339 | | /* |
3340 | | * If there was a SEARCHPATH TOC entry, we're supposed to just stay with |
3341 | | * that search_path rather than switching to entry-specific paths. |
3342 | | * Otherwise, it's an old archive that will not restore correctly unless |
3343 | | * we set the search_path as it's expecting. |
3344 | | */ |
3345 | 0 | if (AH->public.searchpath) |
3346 | 0 | return; |
3347 | | |
3348 | 0 | if (!schemaName || *schemaName == '\0' || |
3349 | 0 | (AH->currSchema && strcmp(AH->currSchema, schemaName) == 0)) |
3350 | 0 | return; /* no need to do anything */ |
3351 | | |
3352 | 0 | qry = createPQExpBuffer(); |
3353 | |
|
3354 | 0 | appendPQExpBuffer(qry, "SET search_path = %s", |
3355 | 0 | fmtId(schemaName)); |
3356 | 0 | if (strcmp(schemaName, "pg_catalog") != 0) |
3357 | 0 | appendPQExpBufferStr(qry, ", pg_catalog"); |
3358 | |
|
3359 | 0 | if (RestoringToDB(AH)) |
3360 | 0 | { |
3361 | 0 | PGresult *res; |
3362 | |
|
3363 | 0 | res = PQexec(AH->connection, qry->data); |
3364 | |
|
3365 | 0 | if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) |
3366 | 0 | warn_or_exit_horribly(AH, modulename, |
3367 | 0 | "could not set search_path to \"%s\": %s", |
3368 | 0 | schemaName, PQerrorMessage(AH->connection)); |
3369 | |
|
3370 | 0 | PQclear(res); |
3371 | 0 | } |
3372 | 0 | else |
3373 | 0 | ahprintf(AH, "%s;\n\n", qry->data); |
3374 | |
|
3375 | 0 | if (AH->currSchema) |
3376 | 0 | free(AH->currSchema); |
3377 | 0 | AH->currSchema = pg_strdup(schemaName); |
3378 | |
|
3379 | 0 | destroyPQExpBuffer(qry); |
3380 | 0 | } |
3381 | | |
3382 | | /* |
3383 | | * Issue the commands to select the specified tablespace as the current one |
3384 | | * in the target database. |
3385 | | */ |
3386 | | static void |
3387 | | _selectTablespace(ArchiveHandle *AH, const char *tablespace) |
3388 | 0 | { |
3389 | 0 | RestoreOptions *ropt = AH->public.ropt; |
3390 | 0 | PQExpBuffer qry; |
3391 | 0 | const char *want, |
3392 | 0 | *have; |
3393 | | |
3394 | | /* do nothing in --no-tablespaces mode */ |
3395 | 0 | if (ropt->noTablespace) |
3396 | 0 | return; |
3397 | | |
3398 | 0 | have = AH->currTablespace; |
3399 | 0 | want = tablespace; |
3400 | | |
3401 | | /* no need to do anything for non-tablespace object */ |
3402 | 0 | if (!want) |
3403 | 0 | return; |
3404 | | |
3405 | 0 | if (have && strcmp(want, have) == 0) |
3406 | 0 | return; /* no need to do anything */ |
3407 | | |
3408 | 0 | qry = createPQExpBuffer(); |
3409 | |
|
3410 | 0 | if (strcmp(want, "") == 0) |
3411 | 0 | { |
3412 | | /* We want the tablespace to be the database's default */ |
3413 | 0 | appendPQExpBufferStr(qry, "SET default_tablespace = ''"); |
3414 | 0 | } |
3415 | 0 | else |
3416 | 0 | { |
3417 | | /* We want an explicit tablespace */ |
3418 | 0 | appendPQExpBuffer(qry, "SET default_tablespace = %s", fmtId(want)); |
3419 | 0 | } |
3420 | |
|
3421 | 0 | if (RestoringToDB(AH)) |
3422 | 0 | { |
3423 | 0 | PGresult *res; |
3424 | |
|
3425 | 0 | res = PQexec(AH->connection, qry->data); |
3426 | |
|
3427 | 0 | if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) |
3428 | 0 | warn_or_exit_horribly(AH, modulename, |
3429 | 0 | "could not set default_tablespace to %s: %s", |
3430 | 0 | fmtId(want), PQerrorMessage(AH->connection)); |
3431 | |
|
3432 | 0 | PQclear(res); |
3433 | 0 | } |
3434 | 0 | else |
3435 | 0 | ahprintf(AH, "%s;\n\n", qry->data); |
3436 | |
|
3437 | 0 | if (AH->currTablespace) |
3438 | 0 | free(AH->currTablespace); |
3439 | 0 | AH->currTablespace = pg_strdup(want); |
3440 | |
|
3441 | 0 | destroyPQExpBuffer(qry); |
3442 | 0 | } |
3443 | | |
3444 | | /* |
3445 | | * Extract an object description for a TOC entry, and append it to buf. |
3446 | | * |
3447 | | * This is used for ALTER ... OWNER TO. |
3448 | | */ |
3449 | | static void |
3450 | | _getObjectDescription(PQExpBuffer buf, TocEntry *te, ArchiveHandle *AH) |
3451 | 0 | { |
3452 | 0 | const char *type = te->desc; |
3453 | | |
3454 | | /* Use ALTER TABLE for views and sequences */ |
3455 | 0 | if (strcmp(type, "VIEW") == 0 || strcmp(type, "SEQUENCE") == 0 || |
3456 | 0 | strcmp(type, "MATERIALIZED VIEW") == 0) |
3457 | 0 | type = "TABLE"; |
3458 | | |
3459 | | /* objects that don't require special decoration */ |
3460 | 0 | if (strcmp(type, "COLLATION") == 0 || |
3461 | 0 | strcmp(type, "CONVERSION") == 0 || |
3462 | 0 | strcmp(type, "DOMAIN") == 0 || |
3463 | 0 | strcmp(type, "TABLE") == 0 || |
3464 | 0 | strcmp(type, "TYPE") == 0 || |
3465 | 0 | strcmp(type, "FOREIGN TABLE") == 0 || |
3466 | 0 | strcmp(type, "TEXT SEARCH DICTIONARY") == 0 || |
3467 | 0 | strcmp(type, "TEXT SEARCH CONFIGURATION") == 0 || |
3468 | 0 | strcmp(type, "STATISTICS") == 0 || |
3469 | | /* non-schema-specified objects */ |
3470 | 0 | strcmp(type, "DATABASE") == 0 || |
3471 | 0 | strcmp(type, "PROCEDURAL LANGUAGE") == 0 || |
3472 | 0 | strcmp(type, "SCHEMA") == 0 || |
3473 | 0 | strcmp(type, "EVENT TRIGGER") == 0 || |
3474 | 0 | strcmp(type, "FOREIGN DATA WRAPPER") == 0 || |
3475 | 0 | strcmp(type, "SERVER") == 0 || |
3476 | 0 | strcmp(type, "PUBLICATION") == 0 || |
3477 | 0 | strcmp(type, "TABLEGROUP") == 0 || |
3478 | 0 | strcmp(type, "SUBSCRIPTION") == 0 || |
3479 | 0 | strcmp(type, "USER MAPPING") == 0) |
3480 | 0 | { |
3481 | 0 | appendPQExpBuffer(buf, "%s ", type); |
3482 | 0 | if (te->namespace && *te->namespace) |
3483 | 0 | appendPQExpBuffer(buf, "%s.", fmtId(te->namespace)); |
3484 | 0 | appendPQExpBufferStr(buf, fmtId(te->tag)); |
3485 | 0 | return; |
3486 | 0 | } |
3487 | | |
3488 | | /* BLOBs just have a name, but it's numeric so must not use fmtId */ |
3489 | 0 | if (strcmp(type, "BLOB") == 0) |
3490 | 0 | { |
3491 | 0 | appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag); |
3492 | 0 | return; |
3493 | 0 | } |
3494 | | |
3495 | | /* |
3496 | | * These object types require additional decoration. Fortunately, the |
3497 | | * information needed is exactly what's in the DROP command. |
3498 | | */ |
3499 | 0 | if (strcmp(type, "AGGREGATE") == 0 || |
3500 | 0 | strcmp(type, "FUNCTION") == 0 || |
3501 | 0 | strcmp(type, "OPERATOR") == 0 || |
3502 | 0 | strcmp(type, "OPERATOR CLASS") == 0 || |
3503 | 0 | strcmp(type, "OPERATOR FAMILY") == 0 || |
3504 | 0 | strcmp(type, "PROCEDURE") == 0) |
3505 | 0 | { |
3506 | | /* Chop "DROP " off the front and make a modifiable copy */ |
3507 | 0 | char *first = pg_strdup(te->dropStmt + 5); |
3508 | 0 | char *last; |
3509 | | |
3510 | | /* point to last character in string */ |
3511 | 0 | last = first + strlen(first) - 1; |
3512 | | |
3513 | | /* Strip off any ';' or '\n' at the end */ |
3514 | 0 | while (last >= first && (*last == '\n' || *last == ';')) |
3515 | 0 | last--; |
3516 | 0 | *(last + 1) = '\0'; |
3517 | |
|
3518 | 0 | appendPQExpBufferStr(buf, first); |
3519 | |
|
3520 | 0 | free(first); |
3521 | 0 | return; |
3522 | 0 | } |
3523 | | |
3524 | 0 | write_msg(modulename, "WARNING: don't know how to set owner for object type \"%s\"\n", |
3525 | 0 | type); |
3526 | 0 | } |
3527 | | |
3528 | | /* |
3529 | | * Emit the SQL commands to create the object represented by a TOC entry |
3530 | | * |
3531 | | * This now also includes issuing an ALTER OWNER command to restore the |
3532 | | * object's ownership, if wanted. But note that the object's permissions |
3533 | | * will remain at default, until the matching ACL TOC entry is restored. |
3534 | | */ |
3535 | | static void |
3536 | | _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData) |
3537 | 0 | { |
3538 | 0 | RestoreOptions *ropt = AH->public.ropt; |
3539 | | |
3540 | | /* Select owner, schema, and tablespace as necessary */ |
3541 | 0 | _becomeOwner(AH, te); |
3542 | 0 | _selectOutputSchema(AH, te->namespace); |
3543 | 0 | _selectTablespace(AH, te->tablespace); |
3544 | | |
3545 | | /* Set up OID mode too */ |
3546 | 0 | if (strcmp(te->desc, "TABLE") == 0) |
3547 | 0 | _setWithOids(AH, te); |
3548 | | |
3549 | | /* Emit header comment for item */ |
3550 | 0 | if (!AH->noTocComments) |
3551 | 0 | { |
3552 | 0 | const char *pfx; |
3553 | 0 | char *sanitized_name; |
3554 | 0 | char *sanitized_schema; |
3555 | 0 | char *sanitized_owner; |
3556 | |
|
3557 | 0 | if (isData) |
3558 | 0 | pfx = "Data for "; |
3559 | 0 | else |
3560 | 0 | pfx = ""; |
3561 | |
|
3562 | 0 | ahprintf(AH, "--\n"); |
3563 | 0 | if (AH->public.verbose) |
3564 | 0 | { |
3565 | 0 | ahprintf(AH, "-- TOC entry %d (class %u OID %u)\n", |
3566 | 0 | te->dumpId, te->catalogId.tableoid, te->catalogId.oid); |
3567 | 0 | if (te->nDeps > 0) |
3568 | 0 | { |
3569 | 0 | int i; |
3570 | |
|
3571 | 0 | ahprintf(AH, "-- Dependencies:"); |
3572 | 0 | for (i = 0; i < te->nDeps; i++) |
3573 | 0 | ahprintf(AH, " %d", te->dependencies[i]); |
3574 | 0 | ahprintf(AH, "\n"); |
3575 | 0 | } |
3576 | 0 | } |
3577 | | |
3578 | | /* |
3579 | | * Zap any line endings embedded in user-supplied fields, to prevent |
3580 | | * corruption of the dump (which could, in the worst case, present an |
3581 | | * SQL injection vulnerability if someone were to incautiously load a |
3582 | | * dump containing objects with maliciously crafted names). |
3583 | | */ |
3584 | 0 | sanitized_name = replace_line_endings(te->tag); |
3585 | 0 | if (te->namespace) |
3586 | 0 | sanitized_schema = replace_line_endings(te->namespace); |
3587 | 0 | else |
3588 | 0 | sanitized_schema = pg_strdup("-"); |
3589 | 0 | if (!ropt->noOwner) |
3590 | 0 | sanitized_owner = replace_line_endings(te->owner); |
3591 | 0 | else |
3592 | 0 | sanitized_owner = pg_strdup("-"); |
3593 | |
|
3594 | 0 | ahprintf(AH, "-- %sName: %s; Type: %s; Schema: %s; Owner: %s", |
3595 | 0 | pfx, sanitized_name, te->desc, sanitized_schema, |
3596 | 0 | sanitized_owner); |
3597 | |
|
3598 | 0 | free(sanitized_name); |
3599 | 0 | free(sanitized_schema); |
3600 | 0 | free(sanitized_owner); |
3601 | |
|
3602 | 0 | if (te->tablespace && strlen(te->tablespace) > 0 && !ropt->noTablespace) |
3603 | 0 | { |
3604 | 0 | char *sanitized_tablespace; |
3605 | |
|
3606 | 0 | sanitized_tablespace = replace_line_endings(te->tablespace); |
3607 | 0 | ahprintf(AH, "; Tablespace: %s", sanitized_tablespace); |
3608 | 0 | free(sanitized_tablespace); |
3609 | 0 | } |
3610 | 0 | ahprintf(AH, "\n"); |
3611 | |
|
3612 | 0 | if (AH->PrintExtraTocPtr != NULL) |
3613 | 0 | AH->PrintExtraTocPtr(AH, te); |
3614 | 0 | ahprintf(AH, "--\n\n"); |
3615 | 0 | } |
3616 | | |
3617 | | /* |
3618 | | * Actually print the definition. |
3619 | | * |
3620 | | * Really crude hack for suppressing AUTHORIZATION clause that old pg_dump |
3621 | | * versions put into CREATE SCHEMA. We have to do this when --no-owner |
3622 | | * mode is selected. This is ugly, but I see no other good way ... |
3623 | | */ |
3624 | 0 | if (ropt->noOwner && strcmp(te->desc, "SCHEMA") == 0) |
3625 | 0 | { |
3626 | 0 | ahprintf(AH, "CREATE SCHEMA %s;\n\n\n", fmtId(te->tag)); |
3627 | 0 | } |
3628 | 0 | else |
3629 | 0 | { |
3630 | 0 | if (strlen(te->defn) > 0) |
3631 | 0 | ahprintf(AH, "%s\n\n", te->defn); |
3632 | 0 | } |
3633 | | |
3634 | | /* |
3635 | | * If we aren't using SET SESSION AUTH to determine ownership, we must |
3636 | | * instead issue an ALTER OWNER command. We assume that anything without |
3637 | | * a DROP command is not a separately ownable object. All the categories |
3638 | | * with DROP commands must appear in one list or the other. |
3639 | | */ |
3640 | 0 | if (!ropt->noOwner && !ropt->use_setsessauth && |
3641 | 0 | strlen(te->owner) > 0 && strlen(te->dropStmt) > 0) |
3642 | 0 | { |
3643 | 0 | if (strcmp(te->desc, "AGGREGATE") == 0 || |
3644 | 0 | strcmp(te->desc, "BLOB") == 0 || |
3645 | 0 | strcmp(te->desc, "COLLATION") == 0 || |
3646 | 0 | strcmp(te->desc, "CONVERSION") == 0 || |
3647 | 0 | strcmp(te->desc, "DATABASE") == 0 || |
3648 | 0 | strcmp(te->desc, "DOMAIN") == 0 || |
3649 | 0 | strcmp(te->desc, "FUNCTION") == 0 || |
3650 | 0 | strcmp(te->desc, "OPERATOR") == 0 || |
3651 | 0 | strcmp(te->desc, "OPERATOR CLASS") == 0 || |
3652 | 0 | strcmp(te->desc, "OPERATOR FAMILY") == 0 || |
3653 | 0 | strcmp(te->desc, "PROCEDURE") == 0 || |
3654 | 0 | strcmp(te->desc, "PROCEDURAL LANGUAGE") == 0 || |
3655 | 0 | strcmp(te->desc, "SCHEMA") == 0 || |
3656 | 0 | strcmp(te->desc, "EVENT TRIGGER") == 0 || |
3657 | 0 | strcmp(te->desc, "TABLE") == 0 || |
3658 | 0 | strcmp(te->desc, "TABLEGROUP") == 0 || |
3659 | 0 | strcmp(te->desc, "TYPE") == 0 || |
3660 | 0 | strcmp(te->desc, "VIEW") == 0 || |
3661 | 0 | strcmp(te->desc, "MATERIALIZED VIEW") == 0 || |
3662 | 0 | strcmp(te->desc, "SEQUENCE") == 0 || |
3663 | 0 | strcmp(te->desc, "FOREIGN TABLE") == 0 || |
3664 | 0 | strcmp(te->desc, "TEXT SEARCH DICTIONARY") == 0 || |
3665 | 0 | strcmp(te->desc, "TEXT SEARCH CONFIGURATION") == 0 || |
3666 | 0 | strcmp(te->desc, "FOREIGN DATA WRAPPER") == 0 || |
3667 | 0 | strcmp(te->desc, "SERVER") == 0 || |
3668 | 0 | strcmp(te->desc, "STATISTICS") == 0 || |
3669 | 0 | strcmp(te->desc, "PUBLICATION") == 0 || |
3670 | 0 | strcmp(te->desc, "SUBSCRIPTION") == 0) |
3671 | 0 | { |
3672 | 0 | PQExpBuffer temp = createPQExpBuffer(); |
3673 | |
|
3674 | 0 | appendPQExpBufferStr(temp, "ALTER "); |
3675 | 0 | _getObjectDescription(temp, te, AH); |
3676 | 0 | appendPQExpBuffer(temp, " OWNER TO %s;", fmtId(te->owner)); |
3677 | 0 | ahprintf(AH, "%s\n\n", temp->data); |
3678 | 0 | destroyPQExpBuffer(temp); |
3679 | 0 | } |
3680 | 0 | else if (strcmp(te->desc, "CAST") == 0 || |
3681 | 0 | strcmp(te->desc, "CHECK CONSTRAINT") == 0 || |
3682 | 0 | strcmp(te->desc, "CONSTRAINT") == 0 || |
3683 | 0 | strcmp(te->desc, "DATABASE PROPERTIES") == 0 || |
3684 | 0 | strcmp(te->desc, "DEFAULT") == 0 || |
3685 | 0 | strcmp(te->desc, "FK CONSTRAINT") == 0 || |
3686 | 0 | strcmp(te->desc, "INDEX") == 0 || |
3687 | 0 | strcmp(te->desc, "RULE") == 0 || |
3688 | 0 | strcmp(te->desc, "TRIGGER") == 0 || |
3689 | 0 | strcmp(te->desc, "ROW SECURITY") == 0 || |
3690 | 0 | strcmp(te->desc, "POLICY") == 0 || |
3691 | 0 | strcmp(te->desc, "USER MAPPING") == 0) |
3692 | 0 | { |
3693 | | /* these object types don't have separate owners */ |
3694 | 0 | } |
3695 | 0 | else |
3696 | 0 | { |
3697 | 0 | write_msg(modulename, "WARNING: don't know how to set owner for object type \"%s\"\n", |
3698 | 0 | te->desc); |
3699 | 0 | } |
3700 | 0 | } |
3701 | | |
3702 | | /* |
3703 | | * If it's an ACL entry, it might contain SET SESSION AUTHORIZATION |
3704 | | * commands, so we can no longer assume we know the current auth setting. |
3705 | | */ |
3706 | 0 | if (_tocEntryIsACL(te)) |
3707 | 0 | { |
3708 | 0 | if (AH->currUser) |
3709 | 0 | free(AH->currUser); |
3710 | 0 | AH->currUser = NULL; |
3711 | 0 | } |
3712 | 0 | } |
3713 | | |
3714 | | /* |
3715 | | * Sanitize a string to be included in an SQL comment or TOC listing, |
3716 | | * by replacing any newlines with spaces. |
3717 | | * The result is a freshly malloc'd string. |
3718 | | */ |
3719 | | static char * |
3720 | | replace_line_endings(const char *str) |
3721 | 0 | { |
3722 | 0 | char *result; |
3723 | 0 | char *s; |
3724 | |
|
3725 | 0 | result = pg_strdup(str); |
3726 | |
|
3727 | 0 | for (s = result; *s != '\0'; s++) |
3728 | 0 | { |
3729 | 0 | if (*s == '\n' || *s == '\r') |
3730 | 0 | *s = ' '; |
3731 | 0 | } |
3732 | |
|
3733 | 0 | return result; |
3734 | 0 | } |
3735 | | |
3736 | | /* |
3737 | | * Write the file header for a custom-format archive |
3738 | | */ |
3739 | | void |
3740 | | WriteHead(ArchiveHandle *AH) |
3741 | 0 | { |
3742 | 0 | struct tm crtm; |
3743 | |
|
3744 | 0 | AH->WriteBufPtr(AH, "PGDMP", 5); /* Magic code */ |
3745 | 0 | AH->WriteBytePtr(AH, ARCHIVE_MAJOR(AH->version)); |
3746 | 0 | AH->WriteBytePtr(AH, ARCHIVE_MINOR(AH->version)); |
3747 | 0 | AH->WriteBytePtr(AH, ARCHIVE_REV(AH->version)); |
3748 | 0 | AH->WriteBytePtr(AH, AH->intSize); |
3749 | 0 | AH->WriteBytePtr(AH, AH->offSize); |
3750 | 0 | AH->WriteBytePtr(AH, AH->format); |
3751 | 0 | WriteInt(AH, AH->compression); |
3752 | 0 | crtm = *localtime(&AH->createDate); |
3753 | 0 | WriteInt(AH, crtm.tm_sec); |
3754 | 0 | WriteInt(AH, crtm.tm_min); |
3755 | 0 | WriteInt(AH, crtm.tm_hour); |
3756 | 0 | WriteInt(AH, crtm.tm_mday); |
3757 | 0 | WriteInt(AH, crtm.tm_mon); |
3758 | 0 | WriteInt(AH, crtm.tm_year); |
3759 | 0 | WriteInt(AH, crtm.tm_isdst); |
3760 | 0 | WriteStr(AH, PQdb(AH->connection)); |
3761 | 0 | WriteStr(AH, AH->public.remoteVersionStr); |
3762 | 0 | WriteStr(AH, PG_VERSION); |
3763 | 0 | } |
3764 | | |
3765 | | void |
3766 | | ReadHead(ArchiveHandle *AH) |
3767 | 0 | { |
3768 | 0 | char tmpMag[7]; |
3769 | 0 | int fmt; |
3770 | | |
3771 | | /* |
3772 | | * If we haven't already read the header, do so. |
3773 | | * |
3774 | | * NB: this code must agree with _discoverArchiveFormat(). Maybe find a |
3775 | | * way to unify the cases? |
3776 | | */ |
3777 | 0 | if (!AH->readHeader) |
3778 | 0 | { |
3779 | 0 | char vmaj, |
3780 | 0 | vmin, |
3781 | 0 | vrev; |
3782 | |
|
3783 | 0 | AH->ReadBufPtr(AH, tmpMag, 5); |
3784 | |
|
3785 | 0 | if (strncmp(tmpMag, "PGDMP", 5) != 0) |
3786 | 0 | exit_horribly(modulename, "did not find magic string in file header\n"); |
3787 | | |
3788 | 0 | vmaj = AH->ReadBytePtr(AH); |
3789 | 0 | vmin = AH->ReadBytePtr(AH); |
3790 | |
|
3791 | 0 | if (vmaj > 1 || (vmaj == 1 && vmin > 0)) /* Version > 1.0 */ |
3792 | 0 | vrev = AH->ReadBytePtr(AH); |
3793 | 0 | else |
3794 | 0 | vrev = 0; |
3795 | |
|
3796 | 0 | AH->version = MAKE_ARCHIVE_VERSION(vmaj, vmin, vrev); |
3797 | |
|
3798 | 0 | if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX) |
3799 | 0 | exit_horribly(modulename, "unsupported version (%d.%d) in file header\n", |
3800 | 0 | vmaj, vmin); |
3801 | | |
3802 | 0 | AH->intSize = AH->ReadBytePtr(AH); |
3803 | 0 | if (AH->intSize > 32) |
3804 | 0 | exit_horribly(modulename, "sanity check on integer size (%lu) failed\n", |
3805 | 0 | (unsigned long) AH->intSize); |
3806 | | |
3807 | 0 | if (AH->intSize > sizeof(int)) |
3808 | 0 | write_msg(modulename, "WARNING: archive was made on a machine with larger integers, some operations might fail\n"); |
3809 | |
|
3810 | 0 | if (AH->version >= K_VERS_1_7) |
3811 | 0 | AH->offSize = AH->ReadBytePtr(AH); |
3812 | 0 | else |
3813 | 0 | AH->offSize = AH->intSize; |
3814 | |
|
3815 | 0 | fmt = AH->ReadBytePtr(AH); |
3816 | |
|
3817 | 0 | if (AH->format != fmt) |
3818 | 0 | exit_horribly(modulename, "expected format (%d) differs from format found in file (%d)\n", |
3819 | 0 | AH->format, fmt); |
3820 | 0 | } |
3821 | | |
3822 | 0 | if (AH->version >= K_VERS_1_2) |
3823 | 0 | { |
3824 | 0 | if (AH->version < K_VERS_1_4) |
3825 | 0 | AH->compression = AH->ReadBytePtr(AH); |
3826 | 0 | else |
3827 | 0 | AH->compression = ReadInt(AH); |
3828 | 0 | } |
3829 | 0 | else |
3830 | 0 | AH->compression = Z_DEFAULT_COMPRESSION; |
3831 | |
|
3832 | | #ifndef HAVE_LIBZ |
3833 | | if (AH->compression != 0) |
3834 | | write_msg(modulename, "WARNING: archive is compressed, but this installation does not support compression -- no data will be available\n"); |
3835 | | #endif |
3836 | |
|
3837 | 0 | if (AH->version >= K_VERS_1_4) |
3838 | 0 | { |
3839 | 0 | struct tm crtm; |
3840 | |
|
3841 | 0 | crtm.tm_sec = ReadInt(AH); |
3842 | 0 | crtm.tm_min = ReadInt(AH); |
3843 | 0 | crtm.tm_hour = ReadInt(AH); |
3844 | 0 | crtm.tm_mday = ReadInt(AH); |
3845 | 0 | crtm.tm_mon = ReadInt(AH); |
3846 | 0 | crtm.tm_year = ReadInt(AH); |
3847 | 0 | crtm.tm_isdst = ReadInt(AH); |
3848 | | |
3849 | | /* |
3850 | | * Newer versions of glibc have mktime() report failure if tm_isdst is |
3851 | | * inconsistent with the prevailing timezone, e.g. tm_isdst = 1 when |
3852 | | * TZ=UTC. This is problematic when restoring an archive under a |
3853 | | * different timezone setting. If we get a failure, try again with |
3854 | | * tm_isdst set to -1 ("don't know"). |
3855 | | * |
3856 | | * XXX with or without this hack, we reconstruct createDate |
3857 | | * incorrectly when the prevailing timezone is different from |
3858 | | * pg_dump's. Next time we bump the archive version, we should flush |
3859 | | * this representation and store a plain seconds-since-the-Epoch |
3860 | | * timestamp instead. |
3861 | | */ |
3862 | 0 | AH->createDate = mktime(&crtm); |
3863 | 0 | if (AH->createDate == (time_t) -1) |
3864 | 0 | { |
3865 | 0 | crtm.tm_isdst = -1; |
3866 | 0 | AH->createDate = mktime(&crtm); |
3867 | 0 | if (AH->createDate == (time_t) -1) |
3868 | 0 | write_msg(modulename, |
3869 | 0 | "WARNING: invalid creation date in header\n"); |
3870 | 0 | } |
3871 | 0 | } |
3872 | |
|
3873 | 0 | if (AH->version >= K_VERS_1_4) |
3874 | 0 | { |
3875 | 0 | AH->archdbname = ReadStr(AH); |
3876 | 0 | } |
3877 | |
|
3878 | 0 | if (AH->version >= K_VERS_1_10) |
3879 | 0 | { |
3880 | 0 | AH->archiveRemoteVersion = ReadStr(AH); |
3881 | 0 | AH->archiveDumpVersion = ReadStr(AH); |
3882 | 0 | } |
3883 | 0 | } |
3884 | | |
3885 | | |
3886 | | /* |
3887 | | * checkSeek |
3888 | | * check to see if ftell/fseek can be performed. |
3889 | | */ |
3890 | | bool |
3891 | | checkSeek(FILE *fp) |
3892 | 0 | { |
3893 | 0 | pgoff_t tpos; |
3894 | | |
3895 | | /* |
3896 | | * If pgoff_t is wider than long, we must have "real" fseeko and not an |
3897 | | * emulation using fseek. Otherwise report no seek capability. |
3898 | | */ |
3899 | | #ifndef HAVE_FSEEKO |
3900 | | if (sizeof(pgoff_t) > sizeof(long)) |
3901 | | return false; |
3902 | | #endif |
3903 | | |
3904 | | /* Check that ftello works on this file */ |
3905 | 0 | tpos = ftello(fp); |
3906 | 0 | if (tpos < 0) |
3907 | 0 | return false; |
3908 | | |
3909 | | /* |
3910 | | * Check that fseeko(SEEK_SET) works, too. NB: we used to try to test |
3911 | | * this with fseeko(fp, 0, SEEK_CUR). But some platforms treat that as a |
3912 | | * successful no-op even on files that are otherwise unseekable. |
3913 | | */ |
3914 | 0 | if (fseeko(fp, tpos, SEEK_SET) != 0) |
3915 | 0 | return false; |
3916 | | |
3917 | 0 | return true; |
3918 | 0 | } |
3919 | | |
3920 | | |
3921 | | /* |
3922 | | * dumpTimestamp |
3923 | | */ |
3924 | | static void |
3925 | | dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim) |
3926 | 0 | { |
3927 | 0 | char buf[64]; |
3928 | |
|
3929 | 0 | if (strftime(buf, sizeof(buf), PGDUMP_STRFTIME_FMT, localtime(&tim)) != 0) |
3930 | 0 | ahprintf(AH, "-- %s %s\n\n", msg, buf); |
3931 | 0 | } |
3932 | | |
3933 | | /* |
3934 | | * Main engine for parallel restore. |
3935 | | * |
3936 | | * Parallel restore is done in three phases. In this first phase, |
3937 | | * we'll process all SECTION_PRE_DATA TOC entries that are allowed to be |
3938 | | * processed in the RESTORE_PASS_MAIN pass. (In practice, that's all |
3939 | | * PRE_DATA items other than ACLs.) Entries we can't process now are |
3940 | | * added to the pending_list for later phases to deal with. |
3941 | | */ |
3942 | | static void |
3943 | | restore_toc_entries_prefork(ArchiveHandle *AH, TocEntry *pending_list) |
3944 | 0 | { |
3945 | 0 | bool skipped_some; |
3946 | 0 | TocEntry *next_work_item; |
3947 | |
|
3948 | 0 | ahlog(AH, 2, "entering restore_toc_entries_prefork\n"); |
3949 | | |
3950 | | /* Adjust dependency information */ |
3951 | 0 | fix_dependencies(AH); |
3952 | | |
3953 | | /* |
3954 | | * Do all the early stuff in a single connection in the parent. There's no |
3955 | | * great point in running it in parallel, in fact it will actually run |
3956 | | * faster in a single connection because we avoid all the connection and |
3957 | | * setup overhead. Also, pre-9.2 pg_dump versions were not very good |
3958 | | * about showing all the dependencies of SECTION_PRE_DATA items, so we do |
3959 | | * not risk trying to process them out-of-order. |
3960 | | * |
3961 | | * Stuff that we can't do immediately gets added to the pending_list. |
3962 | | * Note: we don't yet filter out entries that aren't going to be restored. |
3963 | | * They might participate in dependency chains connecting entries that |
3964 | | * should be restored, so we treat them as live until we actually process |
3965 | | * them. |
3966 | | * |
3967 | | * Note: as of 9.2, it should be guaranteed that all PRE_DATA items appear |
3968 | | * before DATA items, and all DATA items before POST_DATA items. That is |
3969 | | * not certain to be true in older archives, though, and in any case use |
3970 | | * of a list file would destroy that ordering (cf. SortTocFromFile). So |
3971 | | * this loop cannot assume that it holds. |
3972 | | */ |
3973 | 0 | AH->restorePass = RESTORE_PASS_MAIN; |
3974 | 0 | skipped_some = false; |
3975 | 0 | for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next) |
3976 | 0 | { |
3977 | 0 | bool do_now = true; |
3978 | |
|
3979 | 0 | if (next_work_item->section != SECTION_PRE_DATA) |
3980 | 0 | { |
3981 | | /* DATA and POST_DATA items are just ignored for now */ |
3982 | 0 | if (next_work_item->section == SECTION_DATA || |
3983 | 0 | next_work_item->section == SECTION_POST_DATA) |
3984 | 0 | { |
3985 | 0 | do_now = false; |
3986 | 0 | skipped_some = true; |
3987 | 0 | } |
3988 | 0 | else |
3989 | 0 | { |
3990 | | /* |
3991 | | * SECTION_NONE items, such as comments, can be processed now |
3992 | | * if we are still in the PRE_DATA part of the archive. Once |
3993 | | * we've skipped any items, we have to consider whether the |
3994 | | * comment's dependencies are satisfied, so skip it for now. |
3995 | | */ |
3996 | 0 | if (skipped_some) |
3997 | 0 | do_now = false; |
3998 | 0 | } |
3999 | 0 | } |
4000 | | |
4001 | | /* |
4002 | | * Also skip items that need to be forced into later passes. We need |
4003 | | * not set skipped_some in this case, since by assumption no main-pass |
4004 | | * items could depend on these. |
4005 | | */ |
4006 | 0 | if (_tocEntryRestorePass(next_work_item) != RESTORE_PASS_MAIN) |
4007 | 0 | do_now = false; |
4008 | |
|
4009 | 0 | if (do_now) |
4010 | 0 | { |
4011 | | /* OK, restore the item and update its dependencies */ |
4012 | 0 | ahlog(AH, 1, "processing item %d %s %s\n", |
4013 | 0 | next_work_item->dumpId, |
4014 | 0 | next_work_item->desc, next_work_item->tag); |
4015 | |
|
4016 | 0 | (void) restore_toc_entry(AH, next_work_item, false); |
4017 | | |
4018 | | /* Reduce dependencies, but don't move anything to ready_list */ |
4019 | 0 | reduce_dependencies(AH, next_work_item, NULL); |
4020 | 0 | } |
4021 | 0 | else |
4022 | 0 | { |
4023 | | /* Nope, so add it to pending_list */ |
4024 | 0 | par_list_append(pending_list, next_work_item); |
4025 | 0 | } |
4026 | 0 | } |
4027 | | |
4028 | | /* |
4029 | | * Now close parent connection in prep for parallel steps. We do this |
4030 | | * mainly to ensure that we don't exceed the specified number of parallel |
4031 | | * connections. |
4032 | | */ |
4033 | 0 | DisconnectDatabase(&AH->public); |
4034 | | |
4035 | | /* blow away any transient state from the old connection */ |
4036 | 0 | if (AH->currUser) |
4037 | 0 | free(AH->currUser); |
4038 | 0 | AH->currUser = NULL; |
4039 | 0 | if (AH->currSchema) |
4040 | 0 | free(AH->currSchema); |
4041 | 0 | AH->currSchema = NULL; |
4042 | 0 | if (AH->currTablespace) |
4043 | 0 | free(AH->currTablespace); |
4044 | 0 | AH->currTablespace = NULL; |
4045 | 0 | AH->currWithOids = -1; |
4046 | 0 | } |
4047 | | |
4048 | | /* |
4049 | | * Main engine for parallel restore. |
4050 | | * |
4051 | | * Parallel restore is done in three phases. In this second phase, |
4052 | | * we process entries by dispatching them to parallel worker children |
4053 | | * (processes on Unix, threads on Windows), each of which connects |
4054 | | * separately to the database. Inter-entry dependencies are respected, |
4055 | | * and so is the RestorePass multi-pass structure. When we can no longer |
4056 | | * make any entries ready to process, we exit. Normally, there will be |
4057 | | * nothing left to do; but if there is, the third phase will mop up. |
4058 | | */ |
4059 | | static void |
4060 | | restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate, |
4061 | | TocEntry *pending_list) |
4062 | 0 | { |
4063 | 0 | TocEntry ready_list; |
4064 | 0 | TocEntry *next_work_item; |
4065 | |
|
4066 | 0 | ahlog(AH, 2, "entering restore_toc_entries_parallel\n"); |
4067 | | |
4068 | | /* |
4069 | | * The pending_list contains all items that we need to restore. Move all |
4070 | | * items that are available to process immediately into the ready_list. |
4071 | | * After this setup, the pending list is everything that needs to be done |
4072 | | * but is blocked by one or more dependencies, while the ready list |
4073 | | * contains items that have no remaining dependencies and are OK to |
4074 | | * process in the current restore pass. |
4075 | | */ |
4076 | 0 | par_list_header_init(&ready_list); |
4077 | 0 | AH->restorePass = RESTORE_PASS_MAIN; |
4078 | 0 | move_to_ready_list(pending_list, &ready_list, AH->restorePass); |
4079 | | |
4080 | | /* |
4081 | | * main parent loop |
4082 | | * |
4083 | | * Keep going until there is no worker still running AND there is no work |
4084 | | * left to be done. Note invariant: at top of loop, there should always |
4085 | | * be at least one worker available to dispatch a job to. |
4086 | | */ |
4087 | 0 | ahlog(AH, 1, "entering main parallel loop\n"); |
4088 | |
|
4089 | 0 | for (;;) |
4090 | 0 | { |
4091 | | /* Look for an item ready to be dispatched to a worker */ |
4092 | 0 | next_work_item = get_next_work_item(AH, &ready_list, pstate); |
4093 | 0 | if (next_work_item != NULL) |
4094 | 0 | { |
4095 | | /* If not to be restored, don't waste time launching a worker */ |
4096 | 0 | if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA)) == 0) |
4097 | 0 | { |
4098 | 0 | ahlog(AH, 1, "skipping item %d %s %s\n", |
4099 | 0 | next_work_item->dumpId, |
4100 | 0 | next_work_item->desc, next_work_item->tag); |
4101 | | /* Drop it from ready_list, and update its dependencies */ |
4102 | 0 | par_list_remove(next_work_item); |
4103 | 0 | reduce_dependencies(AH, next_work_item, &ready_list); |
4104 | | /* Loop around to see if anything else can be dispatched */ |
4105 | 0 | continue; |
4106 | 0 | } |
4107 | | |
4108 | 0 | ahlog(AH, 1, "launching item %d %s %s\n", |
4109 | 0 | next_work_item->dumpId, |
4110 | 0 | next_work_item->desc, next_work_item->tag); |
4111 | | |
4112 | | /* Remove it from ready_list, and dispatch to some worker */ |
4113 | 0 | par_list_remove(next_work_item); |
4114 | |
|
4115 | 0 | DispatchJobForTocEntry(AH, pstate, next_work_item, ACT_RESTORE, |
4116 | 0 | mark_restore_job_done, &ready_list); |
4117 | 0 | } |
4118 | 0 | else if (IsEveryWorkerIdle(pstate)) |
4119 | 0 | { |
4120 | | /* |
4121 | | * Nothing is ready and no worker is running, so we're done with |
4122 | | * the current pass or maybe with the whole process. |
4123 | | */ |
4124 | 0 | if (AH->restorePass == RESTORE_PASS_LAST) |
4125 | 0 | break; /* No more parallel processing is possible */ |
4126 | | |
4127 | | /* Advance to next restore pass */ |
4128 | 0 | AH->restorePass++; |
4129 | | /* That probably allows some stuff to be made ready */ |
4130 | 0 | move_to_ready_list(pending_list, &ready_list, AH->restorePass); |
4131 | | /* Loop around to see if anything's now ready */ |
4132 | 0 | continue; |
4133 | 0 | } |
4134 | 0 | else |
4135 | 0 | { |
4136 | | /* |
4137 | | * We have nothing ready, but at least one child is working, so |
4138 | | * wait for some subjob to finish. |
4139 | | */ |
4140 | 0 | } |
4141 | | |
4142 | | /* |
4143 | | * Before dispatching another job, check to see if anything has |
4144 | | * finished. We should check every time through the loop so as to |
4145 | | * reduce dependencies as soon as possible. If we were unable to |
4146 | | * dispatch any job this time through, wait until some worker finishes |
4147 | | * (and, hopefully, unblocks some pending item). If we did dispatch |
4148 | | * something, continue as soon as there's at least one idle worker. |
4149 | | * Note that in either case, there's guaranteed to be at least one |
4150 | | * idle worker when we return to the top of the loop. This ensures we |
4151 | | * won't block inside DispatchJobForTocEntry, which would be |
4152 | | * undesirable: we'd rather postpone dispatching until we see what's |
4153 | | * been unblocked by finished jobs. |
4154 | | */ |
4155 | 0 | WaitForWorkers(AH, pstate, |
4156 | 0 | next_work_item ? WFW_ONE_IDLE : WFW_GOT_STATUS); |
4157 | 0 | } |
4158 | | |
4159 | | /* There should now be nothing in ready_list. */ |
4160 | 0 | Assert(ready_list.par_next == &ready_list); |
4161 | |
|
4162 | 0 | ahlog(AH, 1, "finished main parallel loop\n"); |
4163 | 0 | } |
4164 | | |
4165 | | /* |
4166 | | * Main engine for parallel restore. |
4167 | | * |
4168 | | * Parallel restore is done in three phases. In this third phase, |
4169 | | * we mop up any remaining TOC entries by processing them serially. |
4170 | | * This phase normally should have nothing to do, but if we've somehow |
4171 | | * gotten stuck due to circular dependencies or some such, this provides |
4172 | | * at least some chance of completing the restore successfully. |
4173 | | */ |
4174 | | static void |
4175 | | restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list) |
4176 | 0 | { |
4177 | 0 | RestoreOptions *ropt = AH->public.ropt; |
4178 | 0 | TocEntry *te; |
4179 | |
|
4180 | 0 | ahlog(AH, 2, "entering restore_toc_entries_postfork\n"); |
4181 | | |
4182 | | /* |
4183 | | * Now reconnect the single parent connection. |
4184 | | */ |
4185 | 0 | ConnectDatabase((Archive *) AH, &ropt->cparams, true); |
4186 | | |
4187 | | /* re-establish fixed state */ |
4188 | 0 | _doSetFixedOutputState(AH); |
4189 | | |
4190 | | /* |
4191 | | * Make sure there is no work left due to, say, circular dependencies, or |
4192 | | * some other pathological condition. If so, do it in the single parent |
4193 | | * connection. We don't sweat about RestorePass ordering; it's likely we |
4194 | | * already violated that. |
4195 | | */ |
4196 | 0 | for (te = pending_list->par_next; te != pending_list; te = te->par_next) |
4197 | 0 | { |
4198 | 0 | ahlog(AH, 1, "processing missed item %d %s %s\n", |
4199 | 0 | te->dumpId, te->desc, te->tag); |
4200 | 0 | (void) restore_toc_entry(AH, te, false); |
4201 | 0 | } |
4202 | 0 | } |
4203 | | |
4204 | | /* |
4205 | | * Check if te1 has an exclusive lock requirement for an item that te2 also |
4206 | | * requires, whether or not te2's requirement is for an exclusive lock. |
4207 | | */ |
4208 | | static bool |
4209 | | has_lock_conflicts(TocEntry *te1, TocEntry *te2) |
4210 | 0 | { |
4211 | 0 | int j, |
4212 | 0 | k; |
4213 | |
|
4214 | 0 | for (j = 0; j < te1->nLockDeps; j++) |
4215 | 0 | { |
4216 | 0 | for (k = 0; k < te2->nDeps; k++) |
4217 | 0 | { |
4218 | 0 | if (te1->lockDeps[j] == te2->dependencies[k]) |
4219 | 0 | return true; |
4220 | 0 | } |
4221 | 0 | } |
4222 | 0 | return false; |
4223 | 0 | } |
4224 | | |
4225 | | |
4226 | | /* |
4227 | | * Initialize the header of a parallel-processing list. |
4228 | | * |
4229 | | * These are circular lists with a dummy TocEntry as header, just like the |
4230 | | * main TOC list; but we use separate list links so that an entry can be in |
4231 | | * the main TOC list as well as in a parallel-processing list. |
4232 | | */ |
4233 | | static void |
4234 | | par_list_header_init(TocEntry *l) |
4235 | 0 | { |
4236 | 0 | l->par_prev = l->par_next = l; |
4237 | 0 | } |
4238 | | |
4239 | | /* Append te to the end of the parallel-processing list headed by l */ |
4240 | | static void |
4241 | | par_list_append(TocEntry *l, TocEntry *te) |
4242 | 0 | { |
4243 | 0 | te->par_prev = l->par_prev; |
4244 | 0 | l->par_prev->par_next = te; |
4245 | 0 | l->par_prev = te; |
4246 | 0 | te->par_next = l; |
4247 | 0 | } |
4248 | | |
4249 | | /* Remove te from whatever parallel-processing list it's in */ |
4250 | | static void |
4251 | | par_list_remove(TocEntry *te) |
4252 | 0 | { |
4253 | 0 | te->par_prev->par_next = te->par_next; |
4254 | 0 | te->par_next->par_prev = te->par_prev; |
4255 | 0 | te->par_prev = NULL; |
4256 | 0 | te->par_next = NULL; |
4257 | 0 | } |
4258 | | |
4259 | | |
4260 | | /* |
4261 | | * Move all immediately-ready items from pending_list to ready_list. |
4262 | | * |
4263 | | * Items are considered ready if they have no remaining dependencies and |
4264 | | * they belong in the current restore pass. (See also reduce_dependencies, |
4265 | | * which applies the same logic one-at-a-time.) |
4266 | | */ |
4267 | | static void |
4268 | | move_to_ready_list(TocEntry *pending_list, TocEntry *ready_list, |
4269 | | RestorePass pass) |
4270 | 0 | { |
4271 | 0 | TocEntry *te; |
4272 | 0 | TocEntry *next_te; |
4273 | |
|
4274 | 0 | for (te = pending_list->par_next; te != pending_list; te = next_te) |
4275 | 0 | { |
4276 | | /* must save list link before possibly moving te to other list */ |
4277 | 0 | next_te = te->par_next; |
4278 | |
|
4279 | 0 | if (te->depCount == 0 && |
4280 | 0 | _tocEntryRestorePass(te) == pass) |
4281 | 0 | { |
4282 | | /* Remove it from pending_list ... */ |
4283 | 0 | par_list_remove(te); |
4284 | | /* ... and add to ready_list */ |
4285 | 0 | par_list_append(ready_list, te); |
4286 | 0 | } |
4287 | 0 | } |
4288 | 0 | } |
4289 | | |
4290 | | /* |
4291 | | * Find the next work item (if any) that is capable of being run now. |
4292 | | * |
4293 | | * To qualify, the item must have no remaining dependencies |
4294 | | * and no requirements for locks that are incompatible with |
4295 | | * items currently running. Items in the ready_list are known to have |
4296 | | * no remaining dependencies, but we have to check for lock conflicts. |
4297 | | * |
4298 | | * Note that the returned item has *not* been removed from ready_list. |
4299 | | * The caller must do that after successfully dispatching the item. |
4300 | | * |
4301 | | * pref_non_data is for an alternative selection algorithm that gives |
4302 | | * preference to non-data items if there is already a data load running. |
4303 | | * It is currently disabled. |
4304 | | */ |
4305 | | static TocEntry * |
4306 | | get_next_work_item(ArchiveHandle *AH, TocEntry *ready_list, |
4307 | | ParallelState *pstate) |
4308 | 0 | { |
4309 | 0 | bool pref_non_data = false; /* or get from AH->ropt */ |
4310 | 0 | TocEntry *data_te = NULL; |
4311 | 0 | TocEntry *te; |
4312 | 0 | int i, |
4313 | 0 | k; |
4314 | | |
4315 | | /* |
4316 | | * Bogus heuristics for pref_non_data |
4317 | | */ |
4318 | 0 | if (pref_non_data) |
4319 | 0 | { |
4320 | 0 | int count = 0; |
4321 | |
|
4322 | 0 | for (k = 0; k < pstate->numWorkers; k++) |
4323 | 0 | { |
4324 | 0 | TocEntry *running_te = pstate->te[k]; |
4325 | |
|
4326 | 0 | if (running_te != NULL && |
4327 | 0 | running_te->section == SECTION_DATA) |
4328 | 0 | count++; |
4329 | 0 | } |
4330 | 0 | if (pstate->numWorkers == 0 || count * 4 < pstate->numWorkers) |
4331 | 0 | pref_non_data = false; |
4332 | 0 | } |
4333 | | |
4334 | | /* |
4335 | | * Search the ready_list until we find a suitable item. |
4336 | | */ |
4337 | 0 | for (te = ready_list->par_next; te != ready_list; te = te->par_next) |
4338 | 0 | { |
4339 | 0 | bool conflicts = false; |
4340 | | |
4341 | | /* |
4342 | | * Check to see if the item would need exclusive lock on something |
4343 | | * that a currently running item also needs lock on, or vice versa. If |
4344 | | * so, we don't want to schedule them together. |
4345 | | */ |
4346 | 0 | for (i = 0; i < pstate->numWorkers; i++) |
4347 | 0 | { |
4348 | 0 | TocEntry *running_te = pstate->te[i]; |
4349 | |
|
4350 | 0 | if (running_te == NULL) |
4351 | 0 | continue; |
4352 | 0 | if (has_lock_conflicts(te, running_te) || |
4353 | 0 | has_lock_conflicts(running_te, te)) |
4354 | 0 | { |
4355 | 0 | conflicts = true; |
4356 | 0 | break; |
4357 | 0 | } |
4358 | 0 | } |
4359 | |
|
4360 | 0 | if (conflicts) |
4361 | 0 | continue; |
4362 | | |
4363 | 0 | if (pref_non_data && te->section == SECTION_DATA) |
4364 | 0 | { |
4365 | 0 | if (data_te == NULL) |
4366 | 0 | data_te = te; |
4367 | 0 | continue; |
4368 | 0 | } |
4369 | | |
4370 | | /* passed all tests, so this item can run */ |
4371 | 0 | return te; |
4372 | 0 | } |
4373 | |
|
4374 | 0 | if (data_te != NULL) |
4375 | 0 | return data_te; |
4376 | | |
4377 | 0 | ahlog(AH, 2, "no item ready\n"); |
4378 | 0 | return NULL; |
4379 | 0 | } |
4380 | | |
4381 | | |
4382 | | /* |
4383 | | * Restore a single TOC item in parallel with others |
4384 | | * |
4385 | | * this is run in the worker, i.e. in a thread (Windows) or a separate process |
4386 | | * (everything else). A worker process executes several such work items during |
4387 | | * a parallel backup or restore. Once we terminate here and report back that |
4388 | | * our work is finished, the master process will assign us a new work item. |
4389 | | */ |
4390 | | int |
4391 | | parallel_restore(ArchiveHandle *AH, TocEntry *te) |
4392 | 0 | { |
4393 | 0 | int status; |
4394 | |
|
4395 | 0 | Assert(AH->connection != NULL); |
4396 | | |
4397 | | /* Count only errors associated with this TOC entry */ |
4398 | 0 | AH->public.n_errors = 0; |
4399 | | |
4400 | | /* Restore the TOC item */ |
4401 | 0 | status = restore_toc_entry(AH, te, true); |
4402 | |
|
4403 | 0 | return status; |
4404 | 0 | } |
4405 | | |
4406 | | |
4407 | | /* |
4408 | | * Callback function that's invoked in the master process after a step has |
4409 | | * been parallel restored. |
4410 | | * |
4411 | | * Update status and reduce the dependency count of any dependent items. |
4412 | | */ |
4413 | | static void |
4414 | | mark_restore_job_done(ArchiveHandle *AH, |
4415 | | TocEntry *te, |
4416 | | int status, |
4417 | | void *callback_data) |
4418 | 0 | { |
4419 | 0 | TocEntry *ready_list = (TocEntry *) callback_data; |
4420 | |
|
4421 | 0 | ahlog(AH, 1, "finished item %d %s %s\n", |
4422 | 0 | te->dumpId, te->desc, te->tag); |
4423 | |
|
4424 | 0 | if (status == WORKER_CREATE_DONE) |
4425 | 0 | mark_create_done(AH, te); |
4426 | 0 | else if (status == WORKER_INHIBIT_DATA) |
4427 | 0 | { |
4428 | 0 | inhibit_data_for_failed_table(AH, te); |
4429 | 0 | AH->public.n_errors++; |
4430 | 0 | } |
4431 | 0 | else if (status == WORKER_IGNORED_ERRORS) |
4432 | 0 | AH->public.n_errors++; |
4433 | 0 | else if (status != 0) |
4434 | 0 | exit_horribly(modulename, "worker process failed: exit code %d\n", |
4435 | 0 | status); |
4436 | | |
4437 | 0 | reduce_dependencies(AH, te, ready_list); |
4438 | 0 | } |
4439 | | |
4440 | | |
4441 | | /* |
4442 | | * Process the dependency information into a form useful for parallel restore. |
4443 | | * |
4444 | | * This function takes care of fixing up some missing or badly designed |
4445 | | * dependencies, and then prepares subsidiary data structures that will be |
4446 | | * used in the main parallel-restore logic, including: |
4447 | | * 1. We build the revDeps[] arrays of incoming dependency dumpIds. |
4448 | | * 2. We set up depCount fields that are the number of as-yet-unprocessed |
4449 | | * dependencies for each TOC entry. |
4450 | | * |
4451 | | * We also identify locking dependencies so that we can avoid trying to |
4452 | | * schedule conflicting items at the same time. |
4453 | | */ |
4454 | | static void |
4455 | | fix_dependencies(ArchiveHandle *AH) |
4456 | 0 | { |
4457 | 0 | TocEntry *te; |
4458 | 0 | int i; |
4459 | | |
4460 | | /* |
4461 | | * Initialize the depCount/revDeps/nRevDeps fields, and make sure the TOC |
4462 | | * items are marked as not being in any parallel-processing list. |
4463 | | */ |
4464 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
4465 | 0 | { |
4466 | 0 | te->depCount = te->nDeps; |
4467 | 0 | te->revDeps = NULL; |
4468 | 0 | te->nRevDeps = 0; |
4469 | 0 | te->par_prev = NULL; |
4470 | 0 | te->par_next = NULL; |
4471 | 0 | } |
4472 | | |
4473 | | /* |
4474 | | * POST_DATA items that are shown as depending on a table need to be |
4475 | | * re-pointed to depend on that table's data, instead. This ensures they |
4476 | | * won't get scheduled until the data has been loaded. |
4477 | | */ |
4478 | 0 | repoint_table_dependencies(AH); |
4479 | | |
4480 | | /* |
4481 | | * Pre-8.4 versions of pg_dump neglected to set up a dependency from BLOB |
4482 | | * COMMENTS to BLOBS. Cope. (We assume there's only one BLOBS and only |
4483 | | * one BLOB COMMENTS in such files.) |
4484 | | */ |
4485 | 0 | if (AH->version < K_VERS_1_11) |
4486 | 0 | { |
4487 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
4488 | 0 | { |
4489 | 0 | if (strcmp(te->desc, "BLOB COMMENTS") == 0 && te->nDeps == 0) |
4490 | 0 | { |
4491 | 0 | TocEntry *te2; |
4492 | |
|
4493 | 0 | for (te2 = AH->toc->next; te2 != AH->toc; te2 = te2->next) |
4494 | 0 | { |
4495 | 0 | if (strcmp(te2->desc, "BLOBS") == 0) |
4496 | 0 | { |
4497 | 0 | te->dependencies = (DumpId *) pg_malloc(sizeof(DumpId)); |
4498 | 0 | te->dependencies[0] = te2->dumpId; |
4499 | 0 | te->nDeps++; |
4500 | 0 | te->depCount++; |
4501 | 0 | break; |
4502 | 0 | } |
4503 | 0 | } |
4504 | 0 | break; |
4505 | 0 | } |
4506 | 0 | } |
4507 | 0 | } |
4508 | | |
4509 | | /* |
4510 | | * At this point we start to build the revDeps reverse-dependency arrays, |
4511 | | * so all changes of dependencies must be complete. |
4512 | | */ |
4513 | | |
4514 | | /* |
4515 | | * Count the incoming dependencies for each item. Also, it is possible |
4516 | | * that the dependencies list items that are not in the archive at all |
4517 | | * (that should not happen in 9.2 and later, but is highly likely in older |
4518 | | * archives). Subtract such items from the depCounts. |
4519 | | */ |
4520 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
4521 | 0 | { |
4522 | 0 | for (i = 0; i < te->nDeps; i++) |
4523 | 0 | { |
4524 | 0 | DumpId depid = te->dependencies[i]; |
4525 | |
|
4526 | 0 | if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL) |
4527 | 0 | AH->tocsByDumpId[depid]->nRevDeps++; |
4528 | 0 | else |
4529 | 0 | te->depCount--; |
4530 | 0 | } |
4531 | 0 | } |
4532 | | |
4533 | | /* |
4534 | | * Allocate space for revDeps[] arrays, and reset nRevDeps so we can use |
4535 | | * it as a counter below. |
4536 | | */ |
4537 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
4538 | 0 | { |
4539 | 0 | if (te->nRevDeps > 0) |
4540 | 0 | te->revDeps = (DumpId *) pg_malloc(te->nRevDeps * sizeof(DumpId)); |
4541 | 0 | te->nRevDeps = 0; |
4542 | 0 | } |
4543 | | |
4544 | | /* |
4545 | | * Build the revDeps[] arrays of incoming-dependency dumpIds. This had |
4546 | | * better agree with the loops above. |
4547 | | */ |
4548 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
4549 | 0 | { |
4550 | 0 | for (i = 0; i < te->nDeps; i++) |
4551 | 0 | { |
4552 | 0 | DumpId depid = te->dependencies[i]; |
4553 | |
|
4554 | 0 | if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL) |
4555 | 0 | { |
4556 | 0 | TocEntry *otherte = AH->tocsByDumpId[depid]; |
4557 | |
|
4558 | 0 | otherte->revDeps[otherte->nRevDeps++] = te->dumpId; |
4559 | 0 | } |
4560 | 0 | } |
4561 | 0 | } |
4562 | | |
4563 | | /* |
4564 | | * Lastly, work out the locking dependencies. |
4565 | | */ |
4566 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
4567 | 0 | { |
4568 | 0 | te->lockDeps = NULL; |
4569 | 0 | te->nLockDeps = 0; |
4570 | 0 | identify_locking_dependencies(AH, te); |
4571 | 0 | } |
4572 | 0 | } |
4573 | | |
4574 | | /* |
4575 | | * Change dependencies on table items to depend on table data items instead, |
4576 | | * but only in POST_DATA items. |
4577 | | */ |
4578 | | static void |
4579 | | repoint_table_dependencies(ArchiveHandle *AH) |
4580 | 0 | { |
4581 | 0 | TocEntry *te; |
4582 | 0 | int i; |
4583 | 0 | DumpId olddep; |
4584 | |
|
4585 | 0 | for (te = AH->toc->next; te != AH->toc; te = te->next) |
4586 | 0 | { |
4587 | 0 | if (te->section != SECTION_POST_DATA) |
4588 | 0 | continue; |
4589 | 0 | for (i = 0; i < te->nDeps; i++) |
4590 | 0 | { |
4591 | 0 | olddep = te->dependencies[i]; |
4592 | 0 | if (olddep <= AH->maxDumpId && |
4593 | 0 | AH->tableDataId[olddep] != 0) |
4594 | 0 | { |
4595 | 0 | te->dependencies[i] = AH->tableDataId[olddep]; |
4596 | 0 | ahlog(AH, 2, "transferring dependency %d -> %d to %d\n", |
4597 | 0 | te->dumpId, olddep, AH->tableDataId[olddep]); |
4598 | 0 | } |
4599 | 0 | } |
4600 | 0 | } |
4601 | 0 | } |
4602 | | |
4603 | | /* |
4604 | | * Identify which objects we'll need exclusive lock on in order to restore |
4605 | | * the given TOC entry (*other* than the one identified by the TOC entry |
4606 | | * itself). Record their dump IDs in the entry's lockDeps[] array. |
4607 | | */ |
4608 | | static void |
4609 | | identify_locking_dependencies(ArchiveHandle *AH, TocEntry *te) |
4610 | 0 | { |
4611 | 0 | DumpId *lockids; |
4612 | 0 | int nlockids; |
4613 | 0 | int i; |
4614 | | |
4615 | | /* |
4616 | | * We only care about this for POST_DATA items. PRE_DATA items are not |
4617 | | * run in parallel, and DATA items are all independent by assumption. |
4618 | | */ |
4619 | 0 | if (te->section != SECTION_POST_DATA) |
4620 | 0 | return; |
4621 | | |
4622 | | /* Quick exit if no dependencies at all */ |
4623 | 0 | if (te->nDeps == 0) |
4624 | 0 | return; |
4625 | | |
4626 | | /* |
4627 | | * Most POST_DATA items are ALTER TABLEs or some moral equivalent of that, |
4628 | | * and hence require exclusive lock. However, we know that CREATE INDEX |
4629 | | * does not. (Maybe someday index-creating CONSTRAINTs will fall in that |
4630 | | * category too ... but today is not that day.) |
4631 | | */ |
4632 | 0 | if (strcmp(te->desc, "INDEX") == 0) |
4633 | 0 | return; |
4634 | | |
4635 | | /* |
4636 | | * We assume the entry requires exclusive lock on each TABLE or TABLE DATA |
4637 | | * item listed among its dependencies. Originally all of these would have |
4638 | | * been TABLE items, but repoint_table_dependencies would have repointed |
4639 | | * them to the TABLE DATA items if those are present (which they might not |
4640 | | * be, eg in a schema-only dump). Note that all of the entries we are |
4641 | | * processing here are POST_DATA; otherwise there might be a significant |
4642 | | * difference between a dependency on a table and a dependency on its |
4643 | | * data, so that closer analysis would be needed here. |
4644 | | */ |
4645 | 0 | lockids = (DumpId *) pg_malloc(te->nDeps * sizeof(DumpId)); |
4646 | 0 | nlockids = 0; |
4647 | 0 | for (i = 0; i < te->nDeps; i++) |
4648 | 0 | { |
4649 | 0 | DumpId depid = te->dependencies[i]; |
4650 | |
|
4651 | 0 | if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL && |
4652 | 0 | ((strcmp(AH->tocsByDumpId[depid]->desc, "TABLE DATA") == 0) || |
4653 | 0 | strcmp(AH->tocsByDumpId[depid]->desc, "TABLE") == 0)) |
4654 | 0 | lockids[nlockids++] = depid; |
4655 | 0 | } |
4656 | |
|
4657 | 0 | if (nlockids == 0) |
4658 | 0 | { |
4659 | 0 | free(lockids); |
4660 | 0 | return; |
4661 | 0 | } |
4662 | | |
4663 | 0 | te->lockDeps = pg_realloc(lockids, nlockids * sizeof(DumpId)); |
4664 | 0 | te->nLockDeps = nlockids; |
4665 | 0 | } |
4666 | | |
4667 | | /* |
4668 | | * Remove the specified TOC entry from the depCounts of items that depend on |
4669 | | * it, thereby possibly making them ready-to-run. Any pending item that |
4670 | | * becomes ready should be moved to the ready_list, if that's provided. |
4671 | | */ |
4672 | | static void |
4673 | | reduce_dependencies(ArchiveHandle *AH, TocEntry *te, TocEntry *ready_list) |
4674 | 0 | { |
4675 | 0 | int i; |
4676 | |
|
4677 | 0 | ahlog(AH, 2, "reducing dependencies for %d\n", te->dumpId); |
4678 | |
|
4679 | 0 | for (i = 0; i < te->nRevDeps; i++) |
4680 | 0 | { |
4681 | 0 | TocEntry *otherte = AH->tocsByDumpId[te->revDeps[i]]; |
4682 | |
|
4683 | 0 | Assert(otherte->depCount > 0); |
4684 | 0 | otherte->depCount--; |
4685 | | |
4686 | | /* |
4687 | | * It's ready if it has no remaining dependencies, and it belongs in |
4688 | | * the current restore pass, and it is currently a member of the |
4689 | | * pending list (that check is needed to prevent double restore in |
4690 | | * some cases where a list-file forces out-of-order restoring). |
4691 | | * However, if ready_list == NULL then caller doesn't want any list |
4692 | | * memberships changed. |
4693 | | */ |
4694 | 0 | if (otherte->depCount == 0 && |
4695 | 0 | _tocEntryRestorePass(otherte) == AH->restorePass && |
4696 | 0 | otherte->par_prev != NULL && |
4697 | 0 | ready_list != NULL) |
4698 | 0 | { |
4699 | | /* Remove it from pending list ... */ |
4700 | 0 | par_list_remove(otherte); |
4701 | | /* ... and add to ready_list */ |
4702 | 0 | par_list_append(ready_list, otherte); |
4703 | 0 | } |
4704 | 0 | } |
4705 | 0 | } |
4706 | | |
4707 | | /* |
4708 | | * Set the created flag on the DATA member corresponding to the given |
4709 | | * TABLE member |
4710 | | */ |
4711 | | static void |
4712 | | mark_create_done(ArchiveHandle *AH, TocEntry *te) |
4713 | 0 | { |
4714 | 0 | if (AH->tableDataId[te->dumpId] != 0) |
4715 | 0 | { |
4716 | 0 | TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]]; |
4717 | |
|
4718 | 0 | ted->created = true; |
4719 | 0 | } |
4720 | 0 | } |
4721 | | |
4722 | | /* |
4723 | | * Mark the DATA member corresponding to the given TABLE member |
4724 | | * as not wanted |
4725 | | */ |
4726 | | static void |
4727 | | inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te) |
4728 | 0 | { |
4729 | 0 | ahlog(AH, 1, "table \"%s\" could not be created, will not restore its data\n", |
4730 | 0 | te->tag); |
4731 | |
|
4732 | 0 | if (AH->tableDataId[te->dumpId] != 0) |
4733 | 0 | { |
4734 | 0 | TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]]; |
4735 | |
|
4736 | 0 | ted->reqs = 0; |
4737 | 0 | } |
4738 | 0 | } |
4739 | | |
4740 | | /* |
4741 | | * Clone and de-clone routines used in parallel restoration. |
4742 | | * |
4743 | | * Enough of the structure is cloned to ensure that there is no |
4744 | | * conflict between different threads each with their own clone. |
4745 | | */ |
4746 | | ArchiveHandle * |
4747 | | CloneArchive(ArchiveHandle *AH) |
4748 | 0 | { |
4749 | 0 | ArchiveHandle *clone; |
4750 | | |
4751 | | /* Make a "flat" copy */ |
4752 | 0 | clone = (ArchiveHandle *) pg_malloc(sizeof(ArchiveHandle)); |
4753 | 0 | memcpy(clone, AH, sizeof(ArchiveHandle)); |
4754 | | |
4755 | | /* Handle format-independent fields */ |
4756 | 0 | memset(&(clone->sqlparse), 0, sizeof(clone->sqlparse)); |
4757 | | |
4758 | | /* The clone will have its own connection, so disregard connection state */ |
4759 | 0 | clone->connection = NULL; |
4760 | 0 | clone->connCancel = NULL; |
4761 | 0 | clone->currUser = NULL; |
4762 | 0 | clone->currSchema = NULL; |
4763 | 0 | clone->currTablespace = NULL; |
4764 | 0 | clone->currWithOids = -1; |
4765 | | |
4766 | | /* savedPassword must be local in case we change it while connecting */ |
4767 | 0 | if (clone->savedPassword) |
4768 | 0 | clone->savedPassword = pg_strdup(clone->savedPassword); |
4769 | | |
4770 | | /* clone has its own error count, too */ |
4771 | 0 | clone->public.n_errors = 0; |
4772 | | |
4773 | | /* |
4774 | | * Connect our new clone object to the database, using the same connection |
4775 | | * parameters used for the original connection. |
4776 | | */ |
4777 | 0 | ConnectDatabase((Archive *) clone, &clone->public.ropt->cparams, true); |
4778 | | |
4779 | | /* re-establish fixed state */ |
4780 | 0 | if (AH->mode == archModeRead) |
4781 | 0 | _doSetFixedOutputState(clone); |
4782 | | /* in write case, setupDumpWorker will fix up connection state */ |
4783 | | |
4784 | | /* Let the format-specific code have a chance too */ |
4785 | 0 | clone->ClonePtr(clone); |
4786 | |
|
4787 | 0 | Assert(clone->connection != NULL); |
4788 | 0 | return clone; |
4789 | 0 | } |
4790 | | |
4791 | | /* |
4792 | | * Release clone-local storage. |
4793 | | * |
4794 | | * Note: we assume any clone-local connection was already closed. |
4795 | | */ |
4796 | | void |
4797 | | DeCloneArchive(ArchiveHandle *AH) |
4798 | 0 | { |
4799 | | /* Should not have an open database connection */ |
4800 | 0 | Assert(AH->connection == NULL); |
4801 | | |
4802 | | /* Clear format-specific state */ |
4803 | 0 | AH->DeClonePtr(AH); |
4804 | | |
4805 | | /* Clear state allocated by CloneArchive */ |
4806 | 0 | if (AH->sqlparse.curCmd) |
4807 | 0 | destroyPQExpBuffer(AH->sqlparse.curCmd); |
4808 | | |
4809 | | /* Clear any connection-local state */ |
4810 | 0 | if (AH->currUser) |
4811 | 0 | free(AH->currUser); |
4812 | 0 | if (AH->currSchema) |
4813 | 0 | free(AH->currSchema); |
4814 | 0 | if (AH->currTablespace) |
4815 | 0 | free(AH->currTablespace); |
4816 | 0 | if (AH->savedPassword) |
4817 | 0 | free(AH->savedPassword); |
4818 | |
|
4819 | 0 | free(AH); |
4820 | 0 | } |