#define FastPathStrongMode(mode)       ((mode) > ShareUpdateExclusiveLock)
 #define FastPathRelevantMode(mode)     ((mode) != ShareUpdateExclusiveLock)
 
-static bool FastPathGrantLock(Oid relid, LOCKMODE lockmode);
-static bool FastPathUnGrantLock(Oid relid, LOCKMODE lockmode);
-static bool FastPathTransferLocks(LockMethod lockMethodTable,
+static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
+static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
+static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
                                          const LOCKTAG *locktag, uint32 hashcode);
-static PROCLOCK *FastPathGetLockEntry(LOCALLOCK *locallock);
+static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
 
 /*
  * To make the fast-path lock mechanism work, we must have some way of
 {
        slock_t mutex;
        uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
-} FastPathStrongLockData;
+} FastPathStrongRelationLockData;
 
-FastPathStrongLockData *FastPathStrongLocks;
+FastPathStrongRelationLockData *FastPathStrongRelationLocks;
 
 #ifndef LOCK_DEBUG
 static bool Dummy_trace = false;
        /*
         * Allocate fast-path structures.
         */
-       FastPathStrongLocks = ShmemInitStruct("Fast Path Strong Lock Data",
-               sizeof(FastPathStrongLockData), &found);
+       FastPathStrongRelationLocks =
+               ShmemInitStruct("Fast Path Strong Relation Lock Data",
+               sizeof(FastPathStrongRelationLockData), &found);
        if (!found)
-               SpinLockInit(&FastPathStrongLocks->mutex);
+               SpinLockInit(&FastPathStrongRelationLocks->mutex);
 
        /*
         * Allocate non-shared hash table for LOCALLOCK structs.  This stores lock
                         * yet to begin to transfer fast-path locks.
                         */
                        LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE);
-                       if (FastPathStrongLocks->count[fasthashcode] != 0)
+                       if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
                                acquired = false;
                        else
-                               acquired = FastPathGrantLock(locktag->locktag_field2, lockmode);
+                               acquired = FastPathGrantRelationLock(locktag->locktag_field2,
+                                                                                                        lockmode);
                        LWLockRelease(MyProc->backendLock);
                        if (acquired)
                        {
                         * instruction here, on architectures where that is supported.
                         */
                        Assert(locallock->holdsStrongLockCount == FALSE);
-                       SpinLockAcquire(&FastPathStrongLocks->mutex);
-                       FastPathStrongLocks->count[fasthashcode]++;
+                       SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
+                       FastPathStrongRelationLocks->count[fasthashcode]++;
                        locallock->holdsStrongLockCount = TRUE;
-                       SpinLockRelease(&FastPathStrongLocks->mutex);
-                       if (!FastPathTransferLocks(lockMethodTable, locktag, hashcode))
+                       SpinLockRelease(&FastPathStrongRelationLocks->mutex);
+                       if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
+                                                                                          hashcode))
                        {
                                if (reportMemoryError)
                                        ereport(ERROR,
                uint32  fasthashcode;
                fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
 
-               SpinLockAcquire(&FastPathStrongLocks->mutex);
-               Assert(FastPathStrongLocks->count[fasthashcode] > 0);
-               FastPathStrongLocks->count[fasthashcode]--;
+               SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
+               Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
+               FastPathStrongRelationLocks->count[fasthashcode]--;
                locallock->holdsStrongLockCount = FALSE;
-               SpinLockRelease(&FastPathStrongLocks->mutex);
+               SpinLockRelease(&FastPathStrongRelationLocks->mutex);
        }
        if (!hash_search(LockMethodLocalHash,
                                         (void *) &(locallock->tag),
                 * it here.  Another backend may have moved it to the main table.
                 */
                LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE);
-               released = FastPathUnGrantLock(locktag->locktag_field2, lockmode);
+               released = FastPathUnGrantRelationLock(locktag->locktag_field2,
+                                                                                          lockmode);
                LWLockRelease(MyProc->backendLock);
                if (released)
                {
 
                        /* Attempt fast-path release. */
                        relid = locallock->tag.lock.locktag_field2;
-                       if (FastPathUnGrantLock(relid, lockmode))
+                       if (FastPathUnGrantRelationLock(relid, lockmode))
                        {
                                RemoveLocalLock(locallock);
                                continue;
 }
 
 /*
- * FastPathGrantLock
+ * FastPathGrantRelationLock
  *             Grant lock using per-backend fast-path array, if there is space.
  */
 static bool
-FastPathGrantLock(Oid relid, LOCKMODE lockmode)
+FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
 {
        uint32          f;
        uint32          unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
 }
 
 /*
- * FastPathUnGrantLock
+ * FastPathUnGrantRelationLock
  *             Release fast-path lock, if present.  Update backend-private local
  *             use count, while we're at it.
  */
 static bool
-FastPathUnGrantLock(Oid relid, LOCKMODE lockmode)
+FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
 {
        uint32          f;
        bool            result = false;
 }
 
 /*
- * FastPathTransferLocks
+ * FastPathTransferRelationLocks
  *             Transfer locks matching the given lock tag from per-backend fast-path
  *             arrays to the shared hash table.
  */
 static bool
-FastPathTransferLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
+FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
                                          uint32 hashcode)
 {
        LWLockId                partitionLock = LockHashPartitionLock(hashcode);
  *      transferring it to the primary lock table if necessary.
  */
 static PROCLOCK *
-FastPathGetLockEntry(LOCALLOCK *locallock)
+FastPathGetRelationLockEntry(LOCALLOCK *locallock)
 {
        LockMethod              lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
        LOCKTAG            *locktag = &locallock->tag.lock;
                && FastPathTag(&lock->tag) && FastPathStrongMode(lockmode))
        {
                uint32  fasthashcode = FastPathStrongLockHashPartition(hashcode);
-               SpinLockAcquire(&FastPathStrongLocks->mutex);
-               FastPathStrongLocks->count[fasthashcode]--;
-               SpinLockRelease(&FastPathStrongLocks->mutex);
+               SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
+               FastPathStrongRelationLocks->count[fasthashcode]--;
+               SpinLockRelease(&FastPathStrongRelationLocks->mutex);
        }
 }
 
                /*
                 * If the local lock was taken via the fast-path, we need to move it
                 * to the primary lock table, or just get a pointer to the existing
-                * primary lock table if by chance it's already been transferred.
+                * primary lock table entry if by chance it's already been transferred.
                 */
                if (locallock->proclock == NULL)
                {
-                       locallock->proclock = FastPathGetLockEntry(locallock);
+                       locallock->proclock = FastPathGetRelationLockEntry(locallock);
                        locallock->lock = locallock->proclock->tag.myLock;
                }
 
 
                for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
                {
-                       LockInstanceData   *instance = &data->locks[el];
+                       LockInstanceData   *instance;
                        uint32          lockbits = FAST_PATH_GET_BITS(proc, f);
 
                        /* Skip unallocated slots. */
                                        repalloc(data->locks, sizeof(LockInstanceData) * els);
                        }
 
+                       instance = &data->locks[el];
                        SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
                                                                 proc->fpRelId[f]);
                        instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
        if (FastPathTag(&lock->tag) && FastPathStrongMode(lockmode))
        {
                uint32  fasthashcode = FastPathStrongLockHashPartition(hashcode);
-               SpinLockAcquire(&FastPathStrongLocks->mutex);
-               FastPathStrongLocks->count[fasthashcode]++;
-               SpinLockRelease(&FastPathStrongLocks->mutex);
+               SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
+               FastPathStrongRelationLocks->count[fasthashcode]++;
+               SpinLockRelease(&FastPathStrongRelationLocks->mutex);
        }
 
        LWLockRelease(partitionLock);