summaryrefslogtreecommitdiff
path: root/src/include/storage
diff options
context:
space:
mode:
authorBruce Momjian2012-06-10 19:20:04 +0000
committerBruce Momjian2012-06-10 19:20:04 +0000
commit927d61eeff78363ea3938c818d07e511ebaf75cf (patch)
tree2f0bcecf53327f76272a8ce690fa62505520fab9 /src/include/storage
parent60801944fa105252b48ea5688d47dfc05c695042 (diff)
Run pgindent on 9.2 source tree in preparation for first 9.3
commit-fest.
Diffstat (limited to 'src/include/storage')
-rw-r--r--src/include/storage/barrier.h19
-rw-r--r--src/include/storage/latch.h10
-rw-r--r--src/include/storage/lock.h4
-rw-r--r--src/include/storage/lwlock.h6
-rw-r--r--src/include/storage/predicate.h2
-rw-r--r--src/include/storage/proc.h9
-rw-r--r--src/include/storage/procarray.h2
-rw-r--r--src/include/storage/sinval.h4
-rw-r--r--src/include/storage/smgr.h2
9 files changed, 24 insertions, 34 deletions
diff --git a/src/include/storage/barrier.h b/src/include/storage/barrier.h
index 57f03ecf2aa..50378709917 100644
--- a/src/include/storage/barrier.h
+++ b/src/include/storage/barrier.h
@@ -15,7 +15,7 @@
#include "storage/s_lock.h"
-extern slock_t dummy_spinlock;
+extern slock_t dummy_spinlock;
/*
* A compiler barrier need not (and preferably should not) emit any actual
@@ -30,10 +30,10 @@ extern slock_t dummy_spinlock;
* loads and stores are totally ordered (which is not the case on most
* architectures) this requires issuing some sort of memory fencing
* instruction.
- *
+ *
* A read barrier must act as a compiler barrier, and in addition must
* guarantee that any loads issued prior to the barrier are completed before
- * any loads issued after the barrier. Similarly, a write barrier acts
+ * any loads issued after the barrier. Similarly, a write barrier acts
* as a compiler barrier, and also orders stores. Read and write barriers
* are thus weaker than a full memory barrier, but stronger than a compiler
* barrier. In practice, on machines with strong memory ordering, read and
@@ -48,7 +48,6 @@ extern slock_t dummy_spinlock;
/*
* Fall through to the spinlock-based implementation.
*/
-
#elif defined(__INTEL_COMPILER)
/*
@@ -56,7 +55,6 @@ extern slock_t dummy_spinlock;
*/
#define pg_memory_barrier() _mm_mfence()
#define pg_compiler_barrier() __memory_barrier()
-
#elif defined(__GNUC__)
/* This works on any architecture, since it's only talking to GCC itself. */
@@ -75,7 +73,6 @@ extern slock_t dummy_spinlock;
__asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
#define pg_read_barrier() pg_compiler_barrier()
#define pg_write_barrier() pg_compiler_barrier()
-
#elif defined(__x86_64__) /* 64 bit x86 */
/*
@@ -90,7 +87,6 @@ extern slock_t dummy_spinlock;
__asm__ __volatile__ ("lock; addl $0,0(%%rsp)" : : : "memory")
#define pg_read_barrier() pg_compiler_barrier()
#define pg_write_barrier() pg_compiler_barrier()
-
#elif defined(__ia64__) || defined(__ia64)
/*
@@ -98,7 +94,6 @@ extern slock_t dummy_spinlock;
* fence.
*/
#define pg_memory_barrier() __asm__ __volatile__ ("mf" : : : "memory")
-
#elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
/*
@@ -109,8 +104,7 @@ extern slock_t dummy_spinlock;
#define pg_memory_barrier() __asm__ __volatile__ ("sync" : : : "memory")
#define pg_read_barrier() __asm__ __volatile__ ("lwsync" : : : "memory")
#define pg_write_barrier() __asm__ __volatile__ ("lwsync" : : : "memory")
-
-#elif defined(__alpha) || defined(__alpha__) /* Alpha */
+#elif defined(__alpha) || defined(__alpha__) /* Alpha */
/*
* Unlike all other known architectures, Alpha allows dependent reads to be
@@ -120,7 +114,6 @@ extern slock_t dummy_spinlock;
#define pg_memory_barrier() __asm__ __volatile__ ("mb" : : : "memory")
#define pg_read_barrier() __asm__ __volatile__ ("rmb" : : : "memory")
#define pg_write_barrier() __asm__ __volatile__ ("wmb" : : : "memory")
-
#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
/*
@@ -129,14 +122,11 @@ extern slock_t dummy_spinlock;
* own definitions where possible, and use this only as a fallback.
*/
#define pg_memory_barrier() __sync_synchronize()
-
#endif
-
#elif defined(__ia64__) || defined(__ia64)
#define pg_compiler_barrier() _Asm_sched_fence()
#define pg_memory_barrier() _Asm_mf()
-
#elif defined(WIN32_ONLY_COMPILER)
/* Should work on both MSVC and Borland. */
@@ -144,7 +134,6 @@ extern slock_t dummy_spinlock;
#pragma intrinsic(_ReadWriteBarrier)
#define pg_compiler_barrier() _ReadWriteBarrier()
#define pg_memory_barrier() MemoryBarrier()
-
#endif
/*
diff --git a/src/include/storage/latch.h b/src/include/storage/latch.h
index 6a7df38d1a2..71fb4868a00 100644
--- a/src/include/storage/latch.h
+++ b/src/include/storage/latch.h
@@ -68,7 +68,7 @@
* than an ad-hoc shared latch for signaling auxiliary processes. This is
* because generic signal handlers will call SetLatch on the process latch
* only, so using any latch other than the process latch effectively precludes
- * ever registering a generic handler. Since signals have the potential to
+ * ever registering a generic handler. Since signals have the potential to
* invalidate the latch timeout on some platforms, resulting in a
* denial-of-service, it is important to verify that all signal handlers
* within all WaitLatch-calling processes call SetLatch.
@@ -102,10 +102,10 @@ typedef struct
} Latch;
/* Bitmasks for events that may wake-up WaitLatch() clients */
-#define WL_LATCH_SET (1 << 0)
-#define WL_SOCKET_READABLE (1 << 1)
+#define WL_LATCH_SET (1 << 0)
+#define WL_SOCKET_READABLE (1 << 1)
#define WL_SOCKET_WRITEABLE (1 << 2)
-#define WL_TIMEOUT (1 << 3)
+#define WL_TIMEOUT (1 << 3)
#define WL_POSTMASTER_DEATH (1 << 4)
/*
@@ -115,7 +115,7 @@ extern void InitLatch(volatile Latch *latch);
extern void InitSharedLatch(volatile Latch *latch);
extern void OwnLatch(volatile Latch *latch);
extern void DisownLatch(volatile Latch *latch);
-extern int WaitLatch(volatile Latch *latch, int wakeEvents, long timeout);
+extern int WaitLatch(volatile Latch *latch, int wakeEvents, long timeout);
extern int WaitLatchOrSocket(volatile Latch *latch, int wakeEvents,
pgsocket sock, long timeout);
extern void SetLatch(volatile Latch *latch);
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index 17b894285ba..d629ac2ad2e 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -428,7 +428,7 @@ typedef struct LockInstanceData
LOCKMASK holdMask; /* locks held by this PGPROC */
LOCKMODE waitLockMode; /* lock awaited by this PGPROC, if any */
BackendId backend; /* backend ID of this PGPROC */
- LocalTransactionId lxid; /* local transaction ID of this PGPROC */
+ LocalTransactionId lxid; /* local transaction ID of this PGPROC */
int pid; /* pid of this PGPROC */
bool fastpath; /* taken via fastpath? */
} LockInstanceData;
@@ -436,7 +436,7 @@ typedef struct LockInstanceData
typedef struct LockData
{
int nelements; /* The length of the array */
- LockInstanceData *locks;
+ LockInstanceData *locks;
} LockData;
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index 6b59efcbb15..82d8ec4edc2 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -95,9 +95,9 @@ typedef enum LWLockMode
{
LW_EXCLUSIVE,
LW_SHARED,
- LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwlockMode, when
- * waiting for lock to become free. Not to be used
- * as LWLockAcquire argument */
+ LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwlockMode,
+ * when waiting for lock to become free. Not
+ * to be used as LWLockAcquire argument */
} LWLockMode;
diff --git a/src/include/storage/predicate.h b/src/include/storage/predicate.h
index 6ea70ea8459..7ec79e077d2 100644
--- a/src/include/storage/predicate.h
+++ b/src/include/storage/predicate.h
@@ -44,7 +44,7 @@ extern bool PageIsPredicateLocked(Relation relation, BlockNumber blkno);
/* predicate lock maintenance */
extern Snapshot GetSerializableTransactionSnapshot(Snapshot snapshot);
extern void SetSerializableTransactionSnapshot(Snapshot snapshot,
- TransactionId sourcexid);
+ TransactionId sourcexid);
extern void RegisterPredicateLockingXid(TransactionId xid);
extern void PredicateLockRelation(Relation relation, Snapshot snapshot);
extern void PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot);
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index 618a02f42be..71413aaf591 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -131,14 +131,15 @@ struct PGPROC
struct XidCache subxids; /* cache for subtransaction XIDs */
- /* Per-backend LWLock. Protects fields below. */
+ /* Per-backend LWLock. Protects fields below. */
LWLockId backendLock; /* protects the fields below */
/* Lock manager data, recording fast-path locks taken by this backend. */
uint64 fpLockBits; /* lock modes held for each fast-path slot */
- Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */
+ Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */
bool fpVXIDLock; /* are we holding a fast-path VXID lock? */
- LocalTransactionId fpLocalTransactionId; /* lxid for fast-path VXID lock */
+ LocalTransactionId fpLocalTransactionId; /* lxid for fast-path VXID
+ * lock */
};
/* NOTE: "typedef struct PGPROC PGPROC" appears in storage/lock.h. */
@@ -149,7 +150,7 @@ extern PGDLLIMPORT struct PGXACT *MyPgXact;
/*
* Prior to PostgreSQL 9.2, the fields below were stored as part of the
- * PGPROC. However, benchmarking revealed that packing these particular
+ * PGPROC. However, benchmarking revealed that packing these particular
* members into a separate array as tightly as possible sped up GetSnapshotData
* considerably on systems with many CPU cores, by reducing the number of
* cache lines needing to be fetched. Thus, think very carefully before adding
diff --git a/src/include/storage/procarray.h b/src/include/storage/procarray.h
index 0b0aa35ee1c..5b4cab926f5 100644
--- a/src/include/storage/procarray.h
+++ b/src/include/storage/procarray.h
@@ -43,7 +43,7 @@ extern int GetMaxSnapshotSubxidCount(void);
extern Snapshot GetSnapshotData(Snapshot snapshot);
extern bool ProcArrayInstallImportedXmin(TransactionId xmin,
- TransactionId sourcexid);
+ TransactionId sourcexid);
extern RunningTransactions GetRunningTransactionData(void);
diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h
index 7fdfdbe7c47..bcf2c8111dd 100644
--- a/src/include/storage/sinval.h
+++ b/src/include/storage/sinval.h
@@ -33,8 +33,8 @@
* updates and deletions in system catalogs (see CacheInvalidateHeapTuple).
* An update can generate two inval events, one for the old tuple and one for
* the new, but this is reduced to one event if the tuple's hash key doesn't
- * change. Note that the inval events themselves don't actually say whether
- * the tuple is being inserted or deleted. Also, since we transmit only a
+ * change. Note that the inval events themselves don't actually say whether
+ * the tuple is being inserted or deleted. Also, since we transmit only a
* hash key, there is a small risk of unnecessary invalidations due to chance
* matches of hash keys.
*
diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h
index f1e1b8cdefd..f8fc2b2d6e8 100644
--- a/src/include/storage/smgr.h
+++ b/src/include/storage/smgr.h
@@ -60,7 +60,7 @@ typedef struct SMgrRelationData
* submodules. Do not touch them from elsewhere.
*/
int smgr_which; /* storage manager selector */
- bool smgr_transient; /* T if files are to be closed at EOXact */
+ bool smgr_transient; /* T if files are to be closed at EOXact */
/* for md.c; NULL for forks that are not open */
struct _MdfdVec *md_fd[MAX_FORKNUM + 1];