2222#include "logger.h"
2323#include "commit.h"
2424
25+
26+ /*
27+ * This DDD is based on following observations:
28+ *
29+ * Situation when a transaction (say T1) in apply_worker (or receiver
30+ * itself) stucks on some lock created by a transaction in a local backend (say
31+ * T2) will definetly lead to a deadlock since T2 after beeing prepared and
32+ * replicated will fail to obtain lock that is already held by T1.
33+ * Same reasoning may be applied to the situation when apply_worker (or
34+ * receiver) is waiting for an apply_worker (or receiver) belonging to other
35+ * origin -- no need to wait for a distributed deadlock detection and we may
36+ * just instantly abort.
37+ * Only case for distributed deadlock that is left is when apply_worker
38+ * (or receiver) is waiting for another apply_worker from same origin. However,
39+ * such situation isn't possible since one origin node can not have two
40+ * conflicting prepared transaction simultaneosly.
41+ *
42+ * So we may construct distributed deadlock avoiding mechanism by disallowing
43+ * such edges. Now we may ask inverse question: what amount of wait graphs
44+ * with such edges are actually do not represent distributed deadlock? That may
45+ * happen in cases when holding transaction is purely local since it holdind
46+ * locks only in SHARED mode. Only lock levels that are conflicting with this
47+ * modes are EXCLUSIVE and ACCESS EXCLUSIVE. In all other cases proposed
48+ * avoiding scheme should not yield false positives.
49+ *
50+ * To cope with false positives in EXCLUSIVE and ACCESS EXCLUSIVE modes we
51+ * may throw exception not in WaitOnLock() when we first saw forbidden edge
52+ * but later during first call to local deadlock detector. This way we still
53+ * have `deadlock_timeout` second to grab that lock and database user also can
54+ * increse it on per-transaction basis if there are long-living read-only
55+ * transactions.
56+ *
57+ * As a further optimization it is possible to check whether our lock is
58+ * EXCLUSIVE or higher so not to delay rollback till `deadlock_timeout` event.
59+ */
60+ bool
61+ MtmDetectGlobalDeadLock (PGPROC * proc )
62+ {
63+ return MtmIsReceiver || MtmIsPoolWorker ;
64+ }
65+
66+ #if 0
67+
2568#define LOCK_BY_INDEX (i ) ((LWLockId)&ddd_shared->locks[(i)])
2669#define EQUAL_GTID (x ,y ) ((x).node == (y).node && (x).xid == (y).xid)
2770
@@ -613,3 +656,5 @@ mtm_check_deadlock(PG_FUNCTION_ARGS)
613656
614657 PG_RETURN_BOOL (MtmDetectGlobalDeadLockForXid (xid ));
615658}
659+
660+ #endif
0 commit comments