summaryrefslogtreecommitdiff
path: root/src/backend/storage/ipc
diff options
context:
space:
mode:
authorTom Lane2001-01-14 05:08:17 +0000
committerTom Lane2001-01-14 05:08:17 +0000
commit36839c192706f5abd75bdcb02b6a7cace14ce108 (patch)
tree3022631b1208e1227684db86c12cbba7da15f611 /src/backend/storage/ipc
parent027f144e390afa6f189270e8c2a2a56c0a88f646 (diff)
Restructure backend SIGINT/SIGTERM handling so that 'die' interrupts
are treated more like 'cancel' interrupts: the signal handler sets a flag that is examined at well-defined spots, rather than trying to cope with an interrupt that might happen anywhere. See pghackers discussion of 1/12/01.
Diffstat (limited to 'src/backend/storage/ipc')
-rw-r--r--src/backend/storage/ipc/ipc.c44
-rw-r--r--src/backend/storage/ipc/spin.c37
2 files changed, 63 insertions, 18 deletions
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index d592a179867..9d796299dc6 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.59 2001/01/07 04:30:41 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.60 2001/01/14 05:08:15 tgl Exp $
*
* NOTES
*
@@ -131,8 +131,12 @@ proc_exit(int code)
* to close up shop already. Note that the signal handlers will not
* set these flags again, now that proc_exit_inprogress is set.
*/
- QueryCancel = false;
+ InterruptPending = false;
ProcDiePending = false;
+ QueryCancelPending = false;
+ /* And let's just make *sure* we're not interrupted ... */
+ ImmediateInterruptOK = false;
+ CritSectionCount = 1;
if (DebugLvl > 1)
elog(DEBUG, "proc_exit(%d)", code);
@@ -367,7 +371,7 @@ CallbackSemaphoreKill(int status, Datum semId)
/* IpcSemaphoreLock(semId, sem) - locks a semaphore */
/****************************************************************************/
void
-IpcSemaphoreLock(IpcSemaphoreId semId, int sem)
+IpcSemaphoreLock(IpcSemaphoreId semId, int sem, bool interruptOK)
{
int errStatus;
struct sembuf sops;
@@ -380,11 +384,43 @@ IpcSemaphoreLock(IpcSemaphoreId semId, int sem)
* Note: if errStatus is -1 and errno == EINTR then it means we
* returned from the operation prematurely because we were
* sent a signal. So we try and lock the semaphore again.
- * ----------------
+ *
+ * Each time around the loop, we check for a cancel/die interrupt.
+ * We assume that if such an interrupt comes in while we are waiting,
+ * it will cause the semop() call to exit with errno == EINTR, so that
+ * we will be able to service the interrupt (if not in a critical
+ * section already).
+ *
+ * Once we acquire the lock, we do NOT check for an interrupt before
+ * returning. The caller needs to be able to record ownership of
+ * the lock before any interrupt can be accepted.
+ *
+ * There is a window of a few instructions between CHECK_FOR_INTERRUPTS
+ * and entering the semop() call. If a cancel/die interrupt occurs in
+ * that window, we would fail to notice it until after we acquire the
+ * lock (or get another interrupt to escape the semop()). We can avoid
+ * this problem by temporarily setting ImmediateInterruptOK = true
+ * before we do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this
+ * interval will execute directly. However, there is a huge pitfall:
+ * there is another window of a few instructions after the semop()
+ * before we are able to reset ImmediateInterruptOK. If an interrupt
+ * occurs then, we'll lose control, which means that the lock has been
+ * acquired but our caller did not get a chance to record the fact.
+ * Therefore, we only set ImmediateInterruptOK if the caller tells us
+ * it's OK to do so, ie, the caller does not need to record acquiring
+ * the lock. (This is currently true for lockmanager locks, since the
+ * process that granted us the lock did all the necessary state updates.
+ * It's not true for SysV semaphores used to emulate spinlocks --- but
+ * our performance on such platforms is so horrible anyway that I'm
+ * not going to worry too much about it.)
+ * ----------------
*/
do
{
+ ImmediateInterruptOK = interruptOK;
+ CHECK_FOR_INTERRUPTS();
errStatus = semop(semId, &sops, 1);
+ ImmediateInterruptOK = false;
} while (errStatus == -1 && errno == EINTR);
if (errStatus == -1)
diff --git a/src/backend/storage/ipc/spin.c b/src/backend/storage/ipc/spin.c
index ed71d79ad9f..b27c1810020 100644
--- a/src/backend/storage/ipc/spin.c
+++ b/src/backend/storage/ipc/spin.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/Attic/spin.c,v 1.28 2001/01/12 21:53:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/Attic/spin.c,v 1.29 2001/01/14 05:08:15 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,9 +25,11 @@
#include <sys/sem.h>
#endif
+#include "miscadmin.h"
#include "storage/proc.h"
#include "storage/s_lock.h"
+
/* Probably should move these to an appropriate header file */
extern SPINLOCK ShmemLock;
extern SPINLOCK ShmemIndexLock;
@@ -145,19 +147,20 @@ SpinAcquire(SPINLOCK lockid)
PRINT_SLDEBUG("SpinAcquire", lockid, slckP);
/*
- * Lock out die() until we exit the critical section protected by the
- * spinlock. This ensures that die() will not interrupt manipulations
- * of data structures in shared memory. We don't want die() to
- * interrupt this routine between S_LOCK and PROC_INCR_SLOCK, either,
- * so must do it before acquiring the lock, not after.
- */
- START_CRIT_SECTION();
- /*
* Acquire the lock, then record that we have done so (for recovery
- * in case of elog(ERROR) during the critical section).
+ * in case of elog(ERROR) during the critical section). Note we assume
+ * here that S_LOCK will not accept cancel/die interrupts once it has
+ * acquired the lock. However, interrupts should be accepted while
+ * waiting, if CritSectionCount is zero.
*/
S_LOCK(&(slckP->shlock));
PROC_INCR_SLOCK(lockid);
+ /*
+ * Lock out cancel/die interrupts until we exit the critical section
+ * protected by the spinlock. This ensures that interrupts will not
+ * interfere with manipulations of data structures in shared memory.
+ */
+ START_CRIT_SECTION();
PRINT_SLDEBUG("SpinAcquire/done", lockid, slckP);
}
@@ -317,10 +320,16 @@ SpinFreeAllSemaphores(void)
void
SpinAcquire(SPINLOCK lock)
{
- /* See the TAS() version of this routine for commentary */
- START_CRIT_SECTION();
- IpcSemaphoreLock(SpinLockIds[0], lock);
+ /*
+ * See the TAS() version of this routine for primary commentary.
+ *
+ * NOTE we must pass interruptOK = false to IpcSemaphoreLock, to ensure
+ * that a cancel/die interrupt cannot prevent us from recording ownership
+ * of a lock we have just acquired.
+ */
+ IpcSemaphoreLock(SpinLockIds[0], lock, false);
PROC_INCR_SLOCK(lock);
+ START_CRIT_SECTION();
}
/*
@@ -338,8 +347,8 @@ SpinRelease(SPINLOCK lock)
semval = IpcSemaphoreGetValue(SpinLockIds[0], lock);
Assert(semval < 1);
- Assert(!MyProc || MyProc->sLocks[lockid] > 0);
#endif
+ Assert(!MyProc || MyProc->sLocks[lockid] > 0);
PROC_DECR_SLOCK(lock);
IpcSemaphoreUnlock(SpinLockIds[0], lock);
END_CRIT_SECTION();