diff options
| author | Robert Haas | 2016-04-08 06:04:46 +0000 |
|---|---|---|
| committer | Robert Haas | 2016-04-08 06:04:46 +0000 |
| commit | 719c84c1be51f3d3fe6049b77ddbaa0c4b58a9a9 (patch) | |
| tree | c2f4b1501655e50339e3365f267fcce00fd06bbb /src/backend/storage | |
| parent | 8643b91ecf8f47a1307df4a00d66b2fceada0d6f (diff) | |
Extend relations multiple blocks at a time to improve scalability.
Contention on the relation extension lock can become quite fierce when
multiple processes are inserting data into the same relation at the same
time at a high rate. Experimentation shows the extending the relation
multiple blocks at a time improves scalability.
Dilip Kumar, reviewed by Petr Jelinek, Amit Kapila, and me.
Diffstat (limited to 'src/backend/storage')
| -rw-r--r-- | src/backend/storage/freespace/freespace.c | 81 | ||||
| -rw-r--r-- | src/backend/storage/lmgr/lmgr.c | 35 | ||||
| -rw-r--r-- | src/backend/storage/lmgr/lock.c | 37 |
3 files changed, 153 insertions, 0 deletions
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c index 813990ea707..2ffa8ff24d0 100644 --- a/src/backend/storage/freespace/freespace.c +++ b/src/backend/storage/freespace/freespace.c @@ -109,6 +109,8 @@ static int fsm_set_and_search(Relation rel, FSMAddress addr, uint16 slot, uint8 newValue, uint8 minValue); static BlockNumber fsm_search(Relation rel, uint8 min_cat); static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof); +static BlockNumber fsm_get_lastblckno(Relation rel, FSMAddress addr); +static void fsm_update_recursive(Relation rel, FSMAddress addr, uint8 new_cat); /******** Public API ********/ @@ -189,6 +191,46 @@ RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail) } /* + * Update the upper levels of the free space map all the way up to the root + * to make sure we don't lose track of new blocks we just inserted. This is + * intended to be used after adding many new blocks to the relation; we judge + * it not worth updating the upper levels of the tree every time data for + * a single page changes, but for a bulk-extend it's worth it. + */ +void +UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum, + BlockNumber endBlkNum, Size freespace) +{ + int new_cat = fsm_space_avail_to_cat(freespace); + FSMAddress addr; + uint16 slot; + BlockNumber blockNum; + BlockNumber lastBlkOnPage; + + blockNum = startBlkNum; + + while (blockNum <= endBlkNum) + { + /* + * Find FSM address for this block; update tree all the way to the + * root. + */ + addr = fsm_get_location(blockNum, &slot); + fsm_update_recursive(rel, addr, new_cat); + + /* + * Get the last block number on this FSM page. If that's greater + * than or equal to our endBlkNum, we're done. Otherwise, advance + * to the first block on the next page. + */ + lastBlkOnPage = fsm_get_lastblckno(rel, addr); + if (lastBlkOnPage >= endBlkNum) + break; + blockNum = lastBlkOnPage + 1; + } +} + +/* * XLogRecordPageWithFreeSpace - like RecordPageWithFreeSpace, for use in * WAL replay */ @@ -788,3 +830,42 @@ fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof_p) return max_avail; } + +/* + * This function will return the last block number stored on given + * FSM page address. + */ +static BlockNumber +fsm_get_lastblckno(Relation rel, FSMAddress addr) +{ + int slot; + + /* + * Get the last slot number on the given address and convert that to + * block number + */ + slot = SlotsPerFSMPage - 1; + return fsm_get_heap_blk(addr, slot); +} + +/* + * Recursively update the FSM tree from given address to + * all the way up to root. + */ +static void +fsm_update_recursive(Relation rel, FSMAddress addr, uint8 new_cat) +{ + uint16 parentslot; + FSMAddress parent; + + if (addr.level == FSM_ROOT_LEVEL) + return; + + /* + * Get the parent page and our slot in the parent page, and + * update the information in that. + */ + parent = fsm_get_parent(addr, &parentslot); + fsm_set_and_search(rel, parent, parentslot, new_cat, 0); + fsm_update_recursive(rel, parent, new_cat); +} diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c index 0632fc009e5..7b08555b071 100644 --- a/src/backend/storage/lmgr/lmgr.c +++ b/src/backend/storage/lmgr/lmgr.c @@ -341,6 +341,41 @@ LockRelationForExtension(Relation relation, LOCKMODE lockmode) } /* + * ConditionalLockRelationForExtension + * + * As above, but only lock if we can get the lock without blocking. + * Returns TRUE iff the lock was acquired. + */ +bool +ConditionalLockRelationForExtension(Relation relation, LOCKMODE lockmode) +{ + LOCKTAG tag; + + SET_LOCKTAG_RELATION_EXTEND(tag, + relation->rd_lockInfo.lockRelId.dbId, + relation->rd_lockInfo.lockRelId.relId); + + return (LockAcquire(&tag, lockmode, false, true) != LOCKACQUIRE_NOT_AVAIL); +} + +/* + * RelationExtensionLockWaiterCount + * + * Count the number of processes waiting for the given relation extension lock. + */ +int +RelationExtensionLockWaiterCount(Relation relation) +{ + LOCKTAG tag; + + SET_LOCKTAG_RELATION_EXTEND(tag, + relation->rd_lockInfo.lockRelId.dbId, + relation->rd_lockInfo.lockRelId.relId); + + return LockWaiterCount(&tag); +} + +/* * UnlockRelationForExtension */ void diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index b30b7b1009b..41f69306459 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -4380,3 +4380,40 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait) LockRelease(&tag, ShareLock, false); return true; } + +/* + * LockWaiterCount + * + * Find the number of lock requester on this locktag + */ +int +LockWaiterCount(const LOCKTAG *locktag) +{ + LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid; + LOCK *lock; + bool found; + uint32 hashcode; + LWLock *partitionLock; + int waiters = 0; + + if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) + elog(ERROR, "unrecognized lock method: %d", lockmethodid); + + hashcode = LockTagHashCode(locktag); + partitionLock = LockHashPartitionLock(hashcode); + LWLockAcquire(partitionLock, LW_EXCLUSIVE); + + lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash, + (const void *) locktag, + hashcode, + HASH_FIND, + &found); + if (found) + { + Assert(lock != NULL); + waiters = lock->nRequested; + } + LWLockRelease(partitionLock); + + return waiters; +} |
