#include "catalog/pg_control.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
+#include "libpq/pqsignal.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "postmaster/bgwriter.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "utils/builtins.h"
+#include "utils/flatfiles.h"
#include "utils/guc.h"
#include "utils/ps_status.h"
#include "pg_trace.h"
static TimestampTz recoveryStopTime;
static bool recoveryStopAfter;
-/* is the database in consistent state yet? */
-static bool reachedSafeStartPoint = false;
-
/*
* During normal operation, the only timeline we care about is ThisTimeLineID.
* During recovery, however, things are more complicated. To simplify life
static bool InRedo = false;
+/*
+ * Flag set by interrupt handlers for later service in the redo loop.
+ */
+static volatile sig_atomic_t shutdown_requested = false;
+
static void XLogArchiveNotify(const char *xlog);
static void XLogArchiveNotifySeg(uint32 log, uint32 seg);
* those it's a good bet we should have gotten it too. Aborting on other
* signals such as SIGTERM seems a good idea as well.
*
+ * However, if we were requested to terminate, we don't really care what
+ * happened to the restore command, so we just exit cleanly. In fact,
+ * the restore command most likely received the SIGTERM too, and we don't
+ * want to complain about that.
+ *
* Per the Single Unix Spec, shells report exit status > 128 when a called
* command died on a signal. Also, 126 and 127 are used to report
* problems such as an unfindable command; treat those as fatal errors
* too.
*/
+ if (shutdown_requested && InRedo)
+ {
+ /* XXX: We should update minSafeStartPoint to the exact value here */
+ proc_exit(0);
+ }
+
signaled = WIFSIGNALED(rc) || WEXITSTATUS(rc) > 125;
ereport(signaled ? FATAL : DEBUG2,
CheckPoint checkPoint;
bool wasShutdown;
bool reachedStopPoint = false;
+ bool reachedSafeStartPoint = false;
bool performedRecovery = false;
bool haveBackupLabel = false;
XLogRecPtr RecPtr,
*/
minSafeStartPoint = ControlFile->minSafeStartPoint;
+ /* Let postmaster know we've started redo now */
+ if (InArchiveRecovery && IsUnderPostmaster)
+ SendPostmasterSignal(PMSIGNAL_RECOVERY_STARTED);
+
/*
* main redo apply loop
*/
}
#endif
+ /*
+ * Process any requests or signals received recently.
+ */
+ if (shutdown_requested)
+ {
+ /*
+ * We were requested to exit without finishing recovery.
+ *
+ * XXX: We should update minSafeStartPoint to the exact
+ * value here.
+ */
+ proc_exit(0);
+ }
+
/*
* Have we reached our safe starting point? If so, we can
* signal postmaster to enter consistent recovery mode.
(errmsg("consistent recovery state reached at %X/%X",
EndRecPtr.xlogid, EndRecPtr.xrecoff)));
if (IsUnderPostmaster)
- SendPostmasterSignal(PMSIGNAL_RECOVERY_START);
+ SendPostmasterSignal(PMSIGNAL_RECOVERY_CONSISTENT);
}
}
performedRecovery = true;
}
+ /* start the archive_timeout timer running */
+ XLogCtl->Write.lastSegSwitchTime = (pg_time_t) time(NULL);
+
/* initialize shared-memory copy of latest checkpoint XID/epoch */
XLogCtl->ckptXidEpoch = ControlFile->checkPointCopy.nextXidEpoch;
XLogCtl->ckptXid = ControlFile->checkPointCopy.nextXid;
/*
* If we had to replay any WAL records, request a checkpoint. This isn't
* strictly necessary: if we crash now, the recovery will simply restart
- * from the same point where it started this time around (or from the
- * last restartpoint). The control file is left in DB_IN_*_RECOVERY
- * state; the first checkpoint will change that to DB_IN_PRODUCTION.
+ * from the same point as this time (or from the last restartpoint). The
+ * control file is left in DB_IN_*_RECOVERY state; the first checkpoint
+ * will change that to DB_IN_PRODUCTION.
*/
if (performedRecovery)
{
*/
XLogCtl->SharedRecoveryProcessingMode = false;
}
-
- /* start the archive_timeout timer running */
- XLogCtl->Write.lastSegSwitchTime = (pg_time_t) time(NULL);
-
}
/*
- * Is the system is still in recovery?
+ * Is the system still in recovery?
*
* As a side-effect, we initialize the local TimeLineID and RedoRecPtr
* variables the first time we see that recovery is finished.
ereport(LOG,
(errmsg("shutting down")));
- CreateCheckPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_IMMEDIATE);
+ if (IsRecoveryProcessingMode())
+ CreateRestartPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_IMMEDIATE);
+ else
+ CreateCheckPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_IMMEDIATE);
ShutdownCLOG();
ShutdownSUBTRANS();
ShutdownMultiXact();
TransactionId *inCommitXids;
int nInCommit;
+ /* shouldn't happen */
+ if (IsRecoveryProcessingMode())
+ elog(ERROR, "can't create a checkpoint during recovery");
+
/*
* Acquire CheckpointLock to ensure only one checkpoint happens at a time.
* (This is just pro forma, since in the present system structure there is
XLByteToSeg(ControlFile->checkPointCopy.redo, _logId, _logSeg);
/*
- * Update the control file. In 8.4, this routine becomes the primary
- * point for recording changes of state in the control file at the
- * end of recovery. Postmaster state already shows us being in
- * normal running mode, but it is only after this point that we
- * are completely free of reperforming a recovery if we crash. Note
- * that this is executed by bgwriter after the death of Startup process.
+ * Update the control file. This also sets state to IN_DB_PRODUCTION
+ * if this is the first checkpoint after recovery.
*/
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
if (shutdown)
}
/*
- * Store checkpoint record in shared memory, so that it can be used as a
- * restartpoint. This function is called each time a checkpoint record is
- * read from XLOG.
+ * This is used during WAL recovery to establish a point from which recovery
+ * can roll forward without replaying the entire recovery log. This function
+ * is called each time a checkpoint record is read from XLOG. It is stored
+ * in shared memory, so that it can be used as a restartpoint later on.
*/
static void
RecoveryRestartPoint(const CheckPoint *checkPoint)
XLogCtl->lastCheckPointRecPtr = ReadRecPtr;
memcpy(&XLogCtl->lastCheckPoint, checkPoint, sizeof(CheckPoint));
SpinLockRelease(&xlogctl->info_lck);
-
- /*
- * XXX: Should we try to perform restartpoints if we're not in consistent
- * recovery? The bgwriter isn't doing it for us in that case.
- */
}
/*
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
+ /*
+ * Acquire CheckpointLock to ensure only one restartpoint happens at a
+ * time. (This is just pro forma, since in the present system structure
+ * there is only one process that is allowed to issue checkpoints or
+ * restart points at any given time.)
+ */
+ LWLockAcquire(CheckpointLock, LW_EXCLUSIVE);
+
/* Get the a local copy of the last checkpoint record. */
SpinLockAcquire(&xlogctl->info_lck);
lastCheckPointRecPtr = xlogctl->lastCheckPointRecPtr;
ereport(DEBUG2,
(errmsg("skipping restartpoint, already performed at %X/%X",
lastCheckPoint.redo.xlogid, lastCheckPoint.redo.xrecoff)));
+ LWLockRelease(CheckpointLock);
return;
}
- /*
- * Acquire CheckpointLock to ensure only one restartpoint happens at a time.
- * We rely on this lock to ensure that the startup process doesn't exit
- * Recovery while we are half way through a restartpoint. XXX ?
+ /*
+ * Check that we're still in recovery mode. It's ok if we exit recovery
+ * mode after this check, the restart point is valid anyway.
*/
- LWLockAcquire(CheckpointLock, LW_EXCLUSIVE);
-
- /* Check that we're still in recovery mode. */
if (!IsRecoveryProcessingMode())
{
ereport(DEBUG2,
}
}
+/* ------------------------------------------------------
+ * Startup Process main entry point and signal handlers
+ * ------------------------------------------------------
+ */
+
+/*
+ * wal_quickdie() occurs when signalled SIGQUIT by the postmaster.
+ *
+ * Some backend has bought the farm,
+ * so we need to stop what we're doing and exit.
+ */
+static void
+startupproc_quickdie(SIGNAL_ARGS)
+{
+ PG_SETMASK(&BlockSig);
+
+ /*
+ * DO NOT proc_exit() -- we're here because shared memory may be
+ * corrupted, so we don't want to try to clean up our transaction. Just
+ * nail the windows shut and get out of town.
+ *
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
+ * backend. This is necessary precisely because we don't clean up our
+ * shared memory state.
+ */
+ exit(2);
+}
+
+
+/* SIGTERM: set flag to abort redo and exit */
+static void
+StartupProcShutdownHandler(SIGNAL_ARGS)
+{
+ shutdown_requested = true;
+}
+
+/* Main entry point for startup process */
+void
+StartupProcessMain(void)
+{
+ /*
+ * If possible, make this process a group leader, so that the postmaster
+ * can signal any child processes too.
+ */
+#ifdef HAVE_SETSID
+ if (setsid() < 0)
+ elog(FATAL, "setsid() failed: %m");
+#endif
+
+ /*
+ * Properly accept or ignore signals the postmaster might send us
+ */
+ pqsignal(SIGHUP, SIG_IGN); /* ignore config file updates */
+ pqsignal(SIGINT, SIG_IGN); /* ignore query cancel */
+ pqsignal(SIGTERM, StartupProcShutdownHandler); /* request shutdown */
+ pqsignal(SIGQUIT, startupproc_quickdie); /* hard crash time */
+ pqsignal(SIGALRM, SIG_IGN);
+ pqsignal(SIGPIPE, SIG_IGN);
+ pqsignal(SIGUSR1, SIG_IGN);
+ pqsignal(SIGUSR2, SIG_IGN);
+
+ /*
+ * Reset some signals that are accepted by postmaster but not here
+ */
+ pqsignal(SIGCHLD, SIG_DFL);
+ pqsignal(SIGTTIN, SIG_DFL);
+ pqsignal(SIGTTOU, SIG_DFL);
+ pqsignal(SIGCONT, SIG_DFL);
+ pqsignal(SIGWINCH, SIG_DFL);
+
+ /*
+ * Unblock signals (they were blocked when the postmaster forked us)
+ */
+ PG_SETMASK(&UnBlockSig);
+
+ StartupXLOG();
+
+ BuildFlatFiles(false);
+
+ /* Let postmaster know that startup is finished */
+ SendPostmasterSignal(PMSIGNAL_RECOVERY_COMPLETED);
+
+ /* exit normally */
+ proc_exit(0);
+}
static bool FatalError = false; /* T if recovering from backend crash */
+/* State of WAL redo */
+#define NoRecovery 0
+#define RecoveryStarted 1
+#define RecoveryConsistent 2
+#define RecoveryCompleted 3
+
+static int RecoveryStatus = NoRecovery;
+
/*
* We use a simple state machine to control startup, shutdown, and
- * recovery.
+ * crash recovery (which is rather like shutdown followed by startup).
*
- * Recovery is split into two phases: crash recovery and consistent (archive)
- * recovery. The startup process begins with crash recovery, replaying WAL
- * until a self-consistent database state is reached. At that point, it
- * signals postmaster, and we switch to consistent recovery phase. The
- * background writer is launched, while the startup process continues
- * applying WAL. We could start accepting connections to perform read-only
- * queries at this point, if we had the infrastructure to do that. When the
- * startup process exits, we switch to PM_RUN state. The startup process can
- * also skip the consistent recovery altogether, as it will during normal
- * startup when there's no recovery to be done, for example.
+ * After doing all the postmaster initialization work, we enter PM_STARTUP
+ * state and the startup process is launched. The startup process begins by
+ * reading the control file and other preliminary initialization steps. When
+ * it's ready to start WAL redo, it signals postmaster, and we switch to
+ * PM_RECOVERY phase. The background writer is launched, while the startup
+ * process continues applying WAL.
+ *
+ * After reaching a consistent point in WAL redo, startup process signals
+ * us again, and we switch to PM_RECOVERY_CONSISTENT phase. There's currently
+ * no difference between PM_RECOVERY and PM_RECOVERY_CONSISTENT, but we
+ * could start accepting connections to perform read-only queries at this
+ * point, if we had the infrastructure to do that.
+ *
+ * When the WAL redo is finished, the startup process signals us the third
+ * time, and we switch to PM_RUN state. The startup process can also skip the
+ * recovery and consistent recovery phases altogether, as it will during
+ * normal startup when there's no recovery to be done, for example.
*
* Normal child backends can only be launched when we are in PM_RUN state.
* (We also allow it in PM_WAIT_BACKUP state, but only for superusers.)
{
PM_INIT, /* postmaster starting */
PM_STARTUP, /* waiting for startup subprocess */
- PM_RECOVERY, /* consistent recovery mode */
+ PM_RECOVERY, /* in recovery mode */
+ PM_RECOVERY_CONSISTENT, /* consistent recovery mode */
PM_RUN, /* normal "database is alive" state */
PM_WAIT_BACKUP, /* waiting for online backup mode to end */
PM_WAIT_BACKENDS, /* waiting for live backends to exit */
static void reaper(SIGNAL_ARGS);
static void sigusr1_handler(SIGNAL_ARGS);
static void dummy_handler(SIGNAL_ARGS);
+static void CheckRecoverySignals(void);
static void CleanupBackend(int pid, int exitstatus);
static void HandleChildCrash(int pid, int exitstatus, const char *procname);
static void LogChildExit(int lev, const char *procname,
* state that prevents it, start one. It doesn't matter if this
* fails, we'll just try again later.
*/
- if (BgWriterPID == 0 && (pmState == PM_RUN || pmState == PM_RECOVERY))
+ if (BgWriterPID == 0 &&
+ (pmState == PM_RUN || pmState == PM_RECOVERY ||
+ pmState == PM_RECOVERY_CONSISTENT))
BgWriterPID = StartBackgroundWriter();
/*
ereport(LOG,
(errmsg("received smart shutdown request")));
- if (pmState == PM_RUN)
+ if (pmState == PM_RUN || pmState == PM_RECOVERY || pmState == PM_RECOVERY_CONSISTENT)
{
/* autovacuum workers are told to shut down immediately */
SignalAutovacWorkers(SIGTERM);
if (StartupPID != 0)
signal_child(StartupPID, SIGTERM);
- if (pmState == PM_RUN || pmState == PM_WAIT_BACKUP)
+ if (pmState == PM_RECOVERY)
+ {
+ /* only bgwriter is active in this state */
+ pmState = PM_WAIT_BACKENDS;
+ }
+ if (pmState == PM_RUN ||
+ pmState == PM_WAIT_BACKUP ||
+ pmState == PM_RECOVERY_CONSISTENT)
{
ereport(LOG,
(errmsg("aborting any active transactions")));
if (pid == StartupPID)
{
StartupPID = 0;
- Assert(pmState != PM_RUN);
+
+ CheckRecoverySignals();
/*
- * FATAL exit of startup during PM_STARTUP is treated as
- * catastrophic. There is no other processes running yet.
+ * Unexpected exit of startup process (including FATAL exit)
+ * during PM_STARTUP is treated as catastrophic. There is no
+ * other processes running yet.
*/
- if (!EXIT_STATUS_0(exitstatus) && pmState == PM_STARTUP)
+ if (pmState == PM_STARTUP)
{
LogChildExit(LOG, _("startup process"),
pid, exitstatus);
ExitPostmaster(1);
}
/*
- * Any other unexpected exit of the startup process (including
- * FATAL exit) is treated as a crash.
+ * Any unexpected exit (including FATAL exit) of the startup
+ * process is treated as a crash.
*/
if (!EXIT_STATUS_0(exitstatus))
{
_("startup process"));
continue;
}
-
/*
- * Startup succeeded - we are done with system startup or
- * recovery.
+ * Startup process exited normally, but didn't finish recovery.
+ * This can happen if someone else than postmaster kills the
+ * startup process with SIGTERM. Treat it like a crash.
*/
- if (pmState == PM_STARTUP)
- FatalError = false;
-
- /*
- * Go to shutdown mode if a shutdown request was pending.
- */
- if (Shutdown > NoShutdown)
+ if (pmState == PM_RECOVERY || pmState == PM_RECOVERY_CONSISTENT)
{
- pmState = PM_WAIT_BACKENDS;
- /* PostmasterStateMachine logic does the rest */
+ HandleChildCrash(pid, exitstatus,
+ _("startup process"));
continue;
}
-
- /*
- * Otherwise, commence normal operations.
- */
- pmState = PM_RUN;
-
- /*
- * Load the flat authorization file into postmaster's cache. The
- * startup process has recomputed this from the database contents,
- * so we wait till it finishes before loading it.
- */
- load_role();
-
- /*
- * Crank up the background writer, if we didn't do that already
- * when we entered consistent recovery phase. It doesn't matter
- * if this fails, we'll just try again later.
- */
- if (BgWriterPID == 0)
- BgWriterPID = StartBackgroundWriter();
-
- /*
- * Likewise, start other special children as needed. In a restart
- * situation, some of them may be alive already.
- */
- if (WalWriterPID == 0)
- WalWriterPID = StartWalWriter();
- if (AutoVacuumingActive() && AutoVacPID == 0)
- AutoVacPID = StartAutoVacLauncher();
- if (XLogArchivingActive() && PgArchPID == 0)
- PgArchPID = pgarch_start();
- if (PgStatPID == 0)
- PgStatPID = pgstat_start();
-
- /* at this point we are really open for business */
- ereport(LOG,
- (errmsg("database system is ready to accept connections")));
-
- continue;
}
/*
FatalError = true;
/* We now transit into a state of waiting for children to die */
- if (pmState == PM_RUN ||
- pmState == PM_RECOVERY ||
+ if (pmState == PM_RECOVERY ||
+ pmState == PM_RECOVERY_CONSISTENT ||
+ pmState == PM_RUN ||
pmState == PM_WAIT_BACKUP ||
pmState == PM_SHUTDOWN)
pmState = PM_WAIT_BACKENDS;
static void
PostmasterStateMachine(void)
{
+ /* Startup states */
+
+ if (pmState == PM_STARTUP && RecoveryStatus > NoRecovery)
+ {
+ /* Recovery has started */
+
+ /*
+ * Go to shutdown mode if a shutdown request was pending.
+ */
+ if (Shutdown > NoShutdown)
+ {
+ pmState = PM_WAIT_BACKENDS;
+ /* PostmasterStateMachine logic does the rest */
+ }
+ else
+ {
+ /*
+ * Crank up the background writer. It doesn't matter if this
+ * fails, we'll just try again later.
+ */
+ Assert(BgWriterPID == 0);
+ BgWriterPID = StartBackgroundWriter();
+
+ pmState = PM_RECOVERY;
+ }
+ }
+ if (pmState == PM_RECOVERY && RecoveryStatus >= RecoveryConsistent)
+ {
+ /*
+ * Go to shutdown mode if a shutdown request was pending.
+ */
+ if (Shutdown > NoShutdown)
+ {
+ pmState = PM_WAIT_BACKENDS;
+ /* PostmasterStateMachine logic does the rest */
+ }
+ else
+ {
+ /*
+ * Startup process has entered recovery. We consider that good
+ * enough to reset FatalError.
+ */
+ pmState = PM_RECOVERY_CONSISTENT;
+ FatalError = false;
+
+ /*
+ * Load the flat authorization file into postmaster's cache. The
+ * startup process won't have recomputed this from the database yet,
+ * so we it may change following recovery.
+ */
+ load_role();
+
+ /*
+ * Likewise, start other special children as needed.
+ */
+ Assert(PgStatPID == 0);
+ PgStatPID = pgstat_start();
+
+ /* XXX at this point we could accept read-only connections */
+ ereport(DEBUG1,
+ (errmsg("database system is in consistent recovery mode")));
+ }
+ }
+ if ((pmState == PM_RECOVERY || pmState == PM_RECOVERY_CONSISTENT || pmState == PM_STARTUP) && RecoveryStatus == RecoveryCompleted)
+ {
+ /*
+ * Startup succeeded - we are done with system startup or
+ * recovery.
+ */
+ FatalError = false;
+
+ /*
+ * Go to shutdown mode if a shutdown request was pending.
+ */
+ if (Shutdown > NoShutdown)
+ {
+ pmState = PM_WAIT_BACKENDS;
+ /* PostmasterStateMachine logic does the rest */
+ }
+ else
+ {
+ /*
+ * Otherwise, commence normal operations.
+ */
+ pmState = PM_RUN;
+
+ /*
+ * Load the flat authorization file into postmaster's cache. The
+ * startup process has recomputed this from the database contents,
+ * so we wait till it finishes before loading it.
+ */
+ load_role();
+
+ /*
+ * Crank up the background writer, if we didn't do that already
+ * when we entered consistent recovery phase. It doesn't matter
+ * if this fails, we'll just try again later.
+ */
+ if (BgWriterPID == 0)
+ BgWriterPID = StartBackgroundWriter();
+
+ /*
+ * Likewise, start other special children as needed. In a restart
+ * situation, some of them may be alive already.
+ */
+ if (WalWriterPID == 0)
+ WalWriterPID = StartWalWriter();
+ if (AutoVacuumingActive() && AutoVacPID == 0)
+ AutoVacPID = StartAutoVacLauncher();
+ if (XLogArchivingActive() && PgArchPID == 0)
+ PgArchPID = pgarch_start();
+ if (PgStatPID == 0)
+ PgStatPID = pgstat_start();
+
+ /* at this point we are really open for business */
+ ereport(LOG,
+ (errmsg("database system is ready to accept connections")));
+ }
+ }
+
+ /* Shutdown states */
+
if (pmState == PM_WAIT_BACKUP)
{
/*
shmem_exit(1);
reset_shared(PostPortNumber);
+ RecoveryStatus = NoRecovery;
+
StartupPID = StartupDataBase();
Assert(StartupPID != 0);
pmState = PM_STARTUP;
proc_exit(status);
}
-/*
- * sigusr1_handler - handle signal conditions from child processes
- */
static void
-sigusr1_handler(SIGNAL_ARGS)
+CheckRecoverySignals(void)
{
- int save_errno = errno;
-
- PG_SETMASK(&BlockSig);
+ bool changed = false;
- if (CheckPostmasterSignal(PMSIGNAL_RECOVERY_START))
+ if (CheckPostmasterSignal(PMSIGNAL_RECOVERY_STARTED))
{
Assert(pmState == PM_STARTUP);
- /*
- * Go to shutdown mode if a shutdown request was pending.
- */
- if (Shutdown > NoShutdown)
- {
- pmState = PM_WAIT_BACKENDS;
- /* PostmasterStateMachine logic does the rest */
- }
- else
- {
- /*
- * Startup process has entered recovery. We consider that good
- * enough to reset FatalError.
- */
- pmState = PM_RECOVERY;
- FatalError = false;
+ RecoveryStatus = RecoveryStarted;
+ changed = true;
+ }
+ if (CheckPostmasterSignal(PMSIGNAL_RECOVERY_CONSISTENT))
+ {
+ RecoveryStatus = RecoveryConsistent;
+ changed = true;
+ }
+ if (CheckPostmasterSignal(PMSIGNAL_RECOVERY_COMPLETED))
+ {
+ RecoveryStatus = RecoveryCompleted;
+ changed = true;
+ }
- /*
- * Load the flat authorization file into postmaster's cache. The
- * startup process won't have recomputed this from the database yet,
- * so we it may change following recovery.
- */
- load_role();
+ if (changed)
+ PostmasterStateMachine();
+}
- /*
- * Crank up the background writer. It doesn't matter if this
- * fails, we'll just try again later.
- */
- Assert(BgWriterPID == 0);
- BgWriterPID = StartBackgroundWriter();
+/*
+ * sigusr1_handler - handle signal conditions from child processes
+ */
+static void
+sigusr1_handler(SIGNAL_ARGS)
+{
+ int save_errno = errno;
- /*
- * Likewise, start other special children as needed.
- */
- Assert(PgStatPID == 0);
- PgStatPID = pgstat_start();
+ PG_SETMASK(&BlockSig);
- /* XXX at this point we could accept read-only connections */
- ereport(DEBUG1,
- (errmsg("database system is in consistent recovery mode")));
- }
- }
+ CheckRecoverySignals();
if (CheckPostmasterSignal(PMSIGNAL_PASSWORD_CHANGE))
{