compatibility with not-clustering-aware applications and the
lowest performance.
</para>
+
+ <para>
+ If this parameter is set to <varname>dml_adaptive</varname> <productname>Pgpool-II</>
+ keep track of each TABLE referenced in the WRITE statements within
+ the explicit transactions and will not load balances the subsequent
+ READ queries if the TABLE they are reading from is previously modified
+ inside the same transaction.
+ Dependent functions, triggers, and views on the tables can be configured
+ using <xref linkend="guc-dml-adaptive-object-relationship-list">
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="guc-dml-adaptive-object-relationship-list" xreflabel="dml_adaptive_object_relationship_list">
+ <term><varname>dml_adaptive_object_relationship_list</varname> (<type>string</type>)
+ <indexterm>
+ <primary><varname>dml_adaptive_object_relationship_list</varname> configuration parameter</primary>
+ </indexterm>
+ </term>
+ <listitem>
+
+ <para>
+ To prevent load balancing of READ dependent objects, you may specificy the object name
+ followed by a colon(<literal>:</>) and then a comma(<literal>,</>) separated list of dependent object names.
+ <replaceable>"[object]:[dependent-object]"</replaceable>
+ In an explicit transaction block after a WRITE statement has been issues, this will prevent
+ load balancing of any READ statements containing references of dependent object(s).
+ <example id="example-dml-adaptive-object-relationship-list-1">
+ <title>Configuring dml adaptive object relationship</title>
+ <para>
+ If you have a trigger installed on table_1 that do INSERT in <literal>table_2</> for each
+ INSERT on <literal>table_1</>. Then you would want to make sure that
+ read on <literal>table_2</> must not get load-balanced within the same transaction
+ after INSERT into <literal>table_1</>.
+ For this configuration you can set
+ <programlisting>
+ dml_adaptive_object_relationship_list = 'table_1:table_2'
+ </programlisting>
+ </para>
+ </example>
+
+ This parameter is only valid for
+ <xref linkend="guc-disable-load-balance-on-write">=<emphasis>'dml_adaptive'</emphasis>
+
+ <note>
+ <para>
+ To configure the dependency on the function,
+ The function must be present in the <xref linkend="guc-black-function-list">
+ </para>
+ </note>
+
+ </para>
+
</listitem>
</varlistentry>
+
<varlistentry id="guc-statement-level-load-balance" xreflabel="statement_level_load_balance">
<term><varname>statement_level_load_balance</varname> (<type>boolean</type>)
<indexterm>
static bool MakeDBRedirectListRegex(char *newval, int elevel);
static bool MakeAppRedirectListRegex(char *newval, int elevel);
+static bool MakeDMLAdaptiveObjectRelationList(char *newval, int elevel);
+static char* getParsedToken(char *token, DBObjectTypes *object_type);
+
static bool check_redirect_node_spec(char *node_spec);
static char **get_list_from_string(const char *str, const char *delimi, int *n);
static char **get_list_from_string_regex_delim(const char *str, const char *delimi, int *n);
{"transaction", DLBOW_TRANSACTION, false},
{"trans_transaction", DLBOW_TRANS_TRANSACTION, false},
{"always", DLBOW_ALWAYS, false},
+ {"dml_adaptive", DLBOW_DML_ADAPTIVE, false},
{NULL, 0, false}
};
MakeAppRedirectListRegex, NULL
},
+ {
+ {"dml_adaptive_object_relationship_list", CFGCXT_RELOAD, STREAMING_REPLICATION_CONFIG,
+ "list of relationships between objects.",
+ CONFIG_VAR_TYPE_STRING, false, 0
+ },
+ &g_pool_config.dml_adaptive_object_relationship_list,
+ NULL,
+ NULL, NULL,
+ MakeDMLAdaptiveObjectRelationList, NULL
+ },
+
{
{"listen_addresses", CFGCXT_INIT, CONNECTION_CONFIG,
"hostname or IP address on which pgpool will listen on.",
return true;
}
+static bool
+MakeDMLAdaptiveObjectRelationList(char *newval, int elevel)
+{
+ int i;
+ int elements_count = 0;
+ char **rawList = get_list_from_string(newval, ",", &elements_count);
+
+ if (rawList == NULL || elements_count == 0)
+ {
+ pool_config->parsed_dml_adaptive_object_relationship_list = NULL;
+ return true;
+ }
+ pool_config->parsed_dml_adaptive_object_relationship_list = palloc(sizeof(DBObjectRelation) * (elements_count + 1));
+
+ for (i = 0; i < elements_count; i++)
+ {
+ char *kvstr = pstrdup(rawList[i]);
+ char *left_token = strtok(kvstr, ":");
+ char *right_token = strtok(NULL, ":");
+ DBObjectTypes object_type;
+
+ ereport(DEBUG5,
+ (errmsg("dml_adaptive_init"),
+ errdetail("%s -- left_token[%s] right_token[%s]", kvstr, left_token, right_token)));
+
+ pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.name =
+ getParsedToken(left_token, &object_type);
+ pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.object_type = object_type;
+
+ pool_config->parsed_dml_adaptive_object_relationship_list[i].right_token.name =
+ getParsedToken(right_token,&object_type);
+ pool_config->parsed_dml_adaptive_object_relationship_list[i].right_token.object_type = object_type;
+ pfree(kvstr);
+ }
+ pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.name = NULL;
+ pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.object_type = OBJECT_TYPE_UNKNOWN;
+ pool_config->parsed_dml_adaptive_object_relationship_list[i].right_token.name = NULL;
+ pool_config->parsed_dml_adaptive_object_relationship_list[i].right_token.object_type = OBJECT_TYPE_UNKNOWN;
+ return true;
+}
+
+/*
+ * Identify the object type for dml adaptive object
+ * the function is very primitive and just looks for token
+ * ending with ().
+ * We also remove the trailing spaces from the function type token
+ * and return the palloc'd copy of token in new_token
+ */
+static char*
+getParsedToken(char *token, DBObjectTypes *object_type)
+{
+ int len;
+ *object_type = OBJECT_TYPE_UNKNOWN;
+
+ if (!token)
+ return NULL;
+
+ len = strlen(token);
+ if (len > strlen("*()"))
+ {
+ int namelen = len - 2;
+ /* check if token ends with () */
+ if (strcmp(token + namelen,"()") == 0)
+ {
+ /*
+ * Remove the Parentheses from end of
+ * token name
+ */
+ char *new_token;
+ int new_len = strlen(token) - 2;
+ new_token = palloc(new_len + 1);
+ strncpy(new_token,token,new_len);
+ new_token[new_len] = '\0';
+ *object_type = OBJECT_TYPE_FUNCTION;
+ return new_token;
+ }
+ }
+ *object_type = OBJECT_TYPE_RELATION;
+ return pstrdup(token);
+}
+
static bool
MakeAppRedirectListRegex(char *newval, int elevel)
{
static void set_virtual_master_node(POOL_QUERY_CONTEXT *query_context);
static void set_load_balance_info(POOL_QUERY_CONTEXT *query_context);
+static bool is_in_list(char *name, List *list);
+static bool is_select_object_in_temp_black_list(Node *node, void *context);
+static bool add_object_into_temp_black_list(Node *node, void *context);
+static void dml_adaptive(Node *node, char *query);
+static char* get_associated_object_from_dml_adaptive_relations
+ (char *left_token, DBObjectTypes object_type);
+
/*
* Create and initialize per query session context
*/
dest = send_to_where(node, query);
+ dml_adaptive(node, query);
+
ereport(DEBUG1,
(errmsg("decide where to send the query"),
errdetail("destination = %d for query= \"%s\"", dest, query)));
pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID);
}
+ else if (is_select_object_in_temp_black_list(node, query))
+ {
+ pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID);
+ }
else
{
if (pool_config->statement_level_load_balance)
pool_set_node_to_be_sent(query_context,
query_context->load_balance_node_id);
}
+
+/*
+ * Check if the name is in the list.
+ */
+static bool
+is_in_list(char *name, List *list)
+{
+ if (name == NULL || list == NIL)
+ return false;
+
+ ListCell *cell;
+ foreach (cell, list)
+ {
+ char *cell_name = (char *)lfirst(cell);
+ if (strcmp(name, cell_name) == 0)
+ {
+ ereport(DEBUG1,
+ (errmsg("[%s] is in list", name)));
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * Check if the relname of SelectStmt is in the temp black list.
+ */
+static bool
+is_select_object_in_temp_black_list(Node *node, void *context)
+{
+ if (node == NULL || pool_config->disable_load_balance_on_write != DLBOW_DML_ADAPTIVE)
+ return false;
+
+ if (IsA(node, RangeVar))
+ {
+ RangeVar *rgv = (RangeVar *) node;
+ POOL_SESSION_CONTEXT *session_context = pool_get_session_context(false);
+
+ if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE && session_context->is_in_transaction)
+ {
+ ereport(DEBUG1,
+ (errmsg("is_select_object_in_temp_black_list: \"%s\", found relation \"%s\"", (char*)context, rgv->relname)));
+
+ return is_in_list(rgv->relname, session_context->transaction_temp_black_list);
+ }
+ }
+
+ return raw_expression_tree_walker(node, is_select_object_in_temp_black_list, context);
+}
+
+static char*
+get_associated_object_from_dml_adaptive_relations
+ (char *left_token, DBObjectTypes object_type)
+{
+ int i;
+ char *right_token = NULL;
+ if (!pool_config->parsed_dml_adaptive_object_relationship_list)
+ return NULL;
+ for (i=0 ;; i++)
+ {
+ if (pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.name == NULL)
+ break;
+
+ if (pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.object_type != object_type)
+ continue;
+
+ if (strcasecmp(pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.name, left_token) == 0)
+ {
+ right_token = pool_config->parsed_dml_adaptive_object_relationship_list[i].right_token.name;
+ break;
+ }
+ }
+ return right_token;
+}
+
+/*
+ * Check the object relationship list.
+ * If find the name in the list, will add related objects to the transaction temp black list.
+ */
+void
+check_object_relationship_list(char *name, bool is_func_name)
+{
+ if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE && pool_config->parsed_dml_adaptive_object_relationship_list)
+ {
+ POOL_SESSION_CONTEXT *session_context = pool_get_session_context(false);
+
+ if (session_context->is_in_transaction)
+ {
+ char *right_token =
+ get_associated_object_from_dml_adaptive_relations
+ (name, is_func_name? OBJECT_TYPE_FUNCTION : OBJECT_TYPE_RELATION);
+
+ if (right_token)
+ {
+ MemoryContext old_context = MemoryContextSwitchTo(session_context->memory_context);
+ session_context->transaction_temp_black_list =
+ lappend(session_context->transaction_temp_black_list, pstrdup(right_token));
+ MemoryContextSwitchTo(old_context);
+ }
+ }
+
+ }
+}
+
+/*
+ * Find the relname and add it to the transaction temp black list.
+ */
+static bool
+add_object_into_temp_black_list(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+
+ if (IsA(node, RangeVar))
+ {
+ RangeVar *rgv = (RangeVar *) node;
+
+ ereport(DEBUG5,
+ (errmsg("add_object_into_temp_black_list: \"%s\", found relation \"%s\"", (char*)context, rgv->relname)));
+
+ POOL_SESSION_CONTEXT *session_context = pool_get_session_context(false);
+ MemoryContext old_context = MemoryContextSwitchTo(session_context->memory_context);
+
+ if (!is_in_list(rgv->relname, session_context->transaction_temp_black_list))
+ {
+ ereport(DEBUG1,
+ (errmsg("add \"%s\" into transaction_temp_black_list", rgv->relname)));
+
+ session_context->transaction_temp_black_list = lappend(session_context->transaction_temp_black_list, pstrdup(rgv->relname));
+ }
+
+ MemoryContextSwitchTo(old_context);
+
+ check_object_relationship_list(rgv->relname, false);
+ }
+
+ return raw_expression_tree_walker(node, add_object_into_temp_black_list, context);
+}
+
+/*
+ * dml adaptive.
+ */
+static void
+dml_adaptive(Node *node, char *query)
+{
+ if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE)
+ {
+ /* Set/Unset transaction status flags */
+ if (IsA(node, TransactionStmt))
+ {
+ POOL_SESSION_CONTEXT *session_context = pool_get_session_context(false);
+ MemoryContext old_context = MemoryContextSwitchTo(session_context->memory_context);
+
+ if (is_start_transaction_query(node))
+ {
+ session_context->is_in_transaction = true;
+
+ if (session_context->transaction_temp_black_list != NIL)
+ list_free_deep(session_context->transaction_temp_black_list);
+
+ session_context->transaction_temp_black_list = NIL;
+ }
+ else if(is_commit_or_rollback_query(node))
+ {
+ session_context->is_in_transaction = false;
+
+ if (session_context->transaction_temp_black_list != NIL)
+ list_free_deep(session_context->transaction_temp_black_list);
+
+ session_context->transaction_temp_black_list = NIL;
+ }
+
+ MemoryContextSwitchTo(old_context);
+ return;
+ }
+
+ /* If non-selectStmt, find the relname and add it to the transaction temp black list. */
+ if (!is_select_query(node, query))
+ add_object_into_temp_black_list(node, query);
+
+ }
+}
static void init_sent_message_list(void);
static POOL_PENDING_MESSAGE * copy_pending_message(POOL_PENDING_MESSAGE * messag);
static void dump_sent_message(char *caller, POOL_SENT_MESSAGE * m);
+static void dml_adaptive_init(void);
+static void dml_adaptive_destroy(void);
#ifdef PENDING_MESSAGE_DEBUG
static int Elevel = LOG;
/* Transaction read only */
session_context->transaction_read_only = false;
+
+ dml_adaptive_init();
}
/*
if (session_context->query_context)
pool_query_context_destroy(session_context->query_context);
MemoryContextDelete(session_context->memory_context);
+
+ dml_adaptive_destroy();
}
/* XXX For now, just zap memory */
memset(&session_context_d, 0, sizeof(session_context_d));
caller, m, m->kind, m->name, m->state)));
}
+static void
+dml_adaptive_init(void)
+{
+ if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE)
+ {
+ session_context->is_in_transaction = false;
+ session_context->transaction_temp_black_list = NIL;
+ }
+}
+
+static void
+dml_adaptive_destroy(void)
+{
+ if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE && session_context)
+ {
+ if (session_context->transaction_temp_black_list != NIL)
+ list_free_deep(session_context->transaction_temp_black_list);
+ }
+}
+
/*
* Create a sent message.
* kind: one of 'P':Parse, 'B':Bind or 'Q':Query(PREPARE)
pool_set_writing_transaction(void)
{
/*
- * If disable_transaction_on_write is 'off', then never turn on writing
+ * If disable_transaction_on_write is 'off' or 'dml_adaptive', then never turn on writing
* transaction flag.
*/
- if (pool_config->disable_load_balance_on_write != DLBOW_OFF)
+ if (pool_config->disable_load_balance_on_write != DLBOW_OFF && pool_config->disable_load_balance_on_write != DLBOW_DML_ADAPTIVE)
{
pool_get_session_context(false)->writing_transaction = true;
ereport(DEBUG5,
table->tablename, table->state)));
}
#endif
+
}
extern void pool_unset_cache_exceeded(void);
extern bool pool_is_transaction_read_only(Node *node);
extern void pool_force_query_node_to_backend(POOL_QUERY_CONTEXT * query_context, int backend_id);
+extern void check_object_relationship_list(char *name, bool is_func_name);
#endif /* POOL_QUERY_CONTEXT_H */
*/
List *temp_tables;
+ bool is_in_transaction;
+
+ /*
+ * Current transaction temp black list
+ */
+ List *transaction_temp_black_list;
+
#ifdef NOT_USED
/* Preferred "master" node id. Only used for SimpleForwardToFrontend. */
int preferred_master_node_id;
DLBOW_OFF = 1,
DLBOW_TRANSACTION,
DLBOW_TRANS_TRANSACTION,
- DLBOW_ALWAYS
+ DLBOW_ALWAYS,
+ DLBOW_DML_ADAPTIVE
} DLBOW_OPTION;
typedef enum RELQTARGET_OPTION
* connecting to backend */
} HealthCheckParams;
+/*
+ * For dml adaptive object relations
+ * Currently we only require functions
+ * and relations
+ *
+ */
+typedef enum
+{
+ OBJECT_TYPE_FUNCTION,
+ OBJECT_TYPE_RELATION,
+ OBJECT_TYPE_UNKNOWN
+} DBObjectTypes;
+
+typedef struct
+{
+ char *name;
+ DBObjectTypes object_type;
+} DBObject;
+
+typedef struct
+{
+ DBObject left_token;
+ DBObject right_token;
+} DBObjectRelation;
+
/*
* configuration parameters
*/
* black_query_pattern_list */
int num_wd_monitoring_interfaces_list; /* number of items in
* wd_monitoring_interfaces_list */
-
/* ssl configuration */
bool ssl; /* if non 0, activate ssl support
* (frontend+backend) */
* will not be load balanced
* until the session ends. */
+ char *dml_adaptive_object_relationship_list; /* objects relationship list*/
+ DBObjectRelation *parsed_dml_adaptive_object_relationship_list;
+
bool statement_level_load_balance; /* if on, select load balancing node per statement */
/*
pool_where_to_send(query_context, query_context->original_query,
query_context->parse_tree);
+ if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE && strlen(name) != 0)
+ pool_setall_node_to_be_sent(query_context);
+
if (REPLICATION)
{
char *rewrite_query;
return POOL_END;
}
+ if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE &&
+ TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID) == 'T')
+ {
+ pool_where_to_send(query_context, query_context->original_query,
+ query_context->parse_tree);
+ }
+
/*
* Start a transaction if necessary in replication mode
*/
disable_load_balance_on_write = 'transaction'
# Load balance behavior when write query is issued
# in an explicit transaction.
+ #
+ # Valid values:
+ #
+ # 'transaction' (default):
+ # if a write query is issued, subsequent
+ # read queries will not be load balanced
+ # until the transaction ends.
+ #
+ # 'trans_transaction':
+ # if a write query is issued, subsequent
+ # read queries in an explicit transaction
+ # will not be load balanced until the session ends.
+ #
+ # 'dml_adaptive':
+ # Queries on the tables that have already been
+ # modified within the current explicit transaction will
+ # not be load balanced until the end of the transaction.
+ #
+ # 'always':
+ # if a write query is issued, read queries will
+ # not be load balanced until the session ends.
+ #
# Note that any query not in an explicit transaction
# is not affected by the parameter.
- # 'transaction' (the default): if a write query is issued,
- # subsequent read queries will not be load balanced
- # until the transaction ends.
- # 'trans_transaction': if a write query is issued,
- # subsequent read queries in an explicit transaction
- # will not be load balanced until the session ends.
- # 'always': if a write query is issued, read queries will
- # not be load balanced until the session ends.
+
+dml_adaptive_object_relationship_list= ''
+ # comma separated list of object pairs
+ # [object]:[dependent-object], to disable load balancing
+ # of dependent objects within the explicit transaction
+ # after WRITE statement is issued on (depending-on) object.
+ #
+ # example: 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:my_view'
+ # Note: function name in this list must also be present in
+ # the black_function_list
+ # only valid for disable_load_balance_on_write = 'dml_adaptive'.
statement_level_load_balance = off
# Enables statement level load balancing
disable_load_balance_on_write = 'transaction'
# Load balance behavior when write query is issued
# in an explicit transaction.
+ #
+ # Valid values:
+ #
+ # 'transaction' (default):
+ # if a write query is issued, subsequent
+ # read queries will not be load balanced
+ # until the transaction ends.
+ #
+ # 'trans_transaction':
+ # if a write query is issued, subsequent
+ # read queries in an explicit transaction
+ # will not be load balanced until the session ends.
+ #
+ # 'dml_adaptive':
+ # Queries on the tables that have already been
+ # modified within the current explicit transaction will
+ # not be load balanced until the end of the transaction.
+ #
+ # 'always':
+ # if a write query is issued, read queries will
+ # not be load balanced until the session ends.
+ #
# Note that any query not in an explicit transaction
# is not affected by the parameter.
- # 'transaction' (the default): if a write query is issued,
- # subsequent read queries will not be load balanced
- # until the transaction ends.
- # 'trans_transaction': if a write query is issued,
- # subsequent read queries in an explicit transaction
- # will not be load balanced until the session ends.
- # 'always': if a write query is issued, read queries will
- # not be load balanced until the session ends.
+
+dml_adaptive_object_relationship_list= ''
+ # comma separated list of object pairs
+ # [object]:[dependent-object], to disable load balancing
+ # of dependent objects within the explicit transaction
+ # after WRITE statement is issued on (depending-on) object.
+ #
+ # example: 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:my_view'
+ # Note: function name in this list must also be present in
+ # the black_function_list
+ # only valid for disable_load_balance_on_write = 'dml_adaptive'.
statement_level_load_balance = off
# Enables statement level load balancing
disable_load_balance_on_write = 'transaction'
# Load balance behavior when write query is issued
# in an explicit transaction.
+ #
+ # Valid values:
+ #
+ # 'transaction' (default):
+ # if a write query is issued, subsequent
+ # read queries will not be load balanced
+ # until the transaction ends.
+ #
+ # 'trans_transaction':
+ # if a write query is issued, subsequent
+ # read queries in an explicit transaction
+ # will not be load balanced until the session ends.
+ #
+ # 'dml_adaptive':
+ # Queries on the tables that have already been
+ # modified within the current explicit transaction will
+ # not be load balanced until the end of the transaction.
+ #
+ # 'always':
+ # if a write query is issued, read queries will
+ # not be load balanced until the session ends.
+ #
# Note that any query not in an explicit transaction
# is not affected by the parameter.
- # 'transaction' (the default): if a write query is issued,
- # subsequent read queries will not be load balanced
- # until the transaction ends.
- # 'trans_transaction': if a write query is issued,
- # subsequent read queries in an explicit transaction
- # will not be load balanced until the session ends.
- # 'always': if a write query is issued, read queries will
- # not be load balanced until the session ends.
+
+dml_adaptive_object_relationship_list= ''
+ # comma separated list of object pairs
+ # [object]:[dependent-object], to disable load balancing
+ # of dependent objects within the explicit transaction
+ # after WRITE statement is issued on (depending-on) object.
+ #
+ # example: 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:my_view'
+ # Note: function name in this list must also be present in
+ # the black_function_list
+ # only valid for disable_load_balance_on_write = 'dml_adaptive'.
statement_level_load_balance = off
# Enables statement level load balancing
disable_load_balance_on_write = 'transaction'
# Load balance behavior when write query is issued
# in an explicit transaction.
+ #
+ # Valid values:
+ #
+ # 'transaction' (default):
+ # if a write query is issued, subsequent
+ # read queries will not be load balanced
+ # until the transaction ends.
+ #
+ # 'trans_transaction':
+ # if a write query is issued, subsequent
+ # read queries in an explicit transaction
+ # will not be load balanced until the session ends.
+ #
+ # 'dml_adaptive':
+ # Queries on the tables that have already been
+ # modified within the current explicit transaction will
+ # not be load balanced until the end of the transaction.
+ #
+ # 'always':
+ # if a write query is issued, read queries will
+ # not be load balanced until the session ends.
+ #
# Note that any query not in an explicit transaction
# is not affected by the parameter.
- # 'transaction' (the default): if a write query is issued,
- # subsequent read queries will not be load balanced
- # until the transaction ends.
- # 'trans_transaction': if a write query is issued,
- # subsequent read queries in an explicit transaction
- # will not be load balanced until the session ends.
- # 'always': if a write query is issued, read queries will
- # not be load balanced until the session ends.
+
+dml_adaptive_object_relationship_list= ''
+ # comma separated list of object pairs
+ # [object]:[dependent-object], to disable load balancing
+ # of dependent objects within the explicit transaction
+ # after WRITE statement is issued on (depending-on) object.
+ #
+ # example: 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:my_view'
+ # Note: function name in this list must also be present in
+ # the black_function_list
+ # only valid for disable_load_balance_on_write = 'dml_adaptive'.
statement_level_load_balance = off
# Enables statement level load balancing
disable_load_balance_on_write = 'transaction'
# Load balance behavior when write query is issued
# in an explicit transaction.
+ #
+ # Valid values:
+ #
+ # 'transaction' (default):
+ # if a write query is issued, subsequent
+ # read queries will not be load balanced
+ # until the transaction ends.
+ #
+ # 'trans_transaction':
+ # if a write query is issued, subsequent
+ # read queries in an explicit transaction
+ # will not be load balanced until the session ends.
+ #
+ # 'dml_adaptive':
+ # Queries on the tables that have already been
+ # modified within the current explicit transaction will
+ # not be load balanced until the end of the transaction.
+ #
+ # 'always':
+ # if a write query is issued, read queries will
+ # not be load balanced until the session ends.
+ #
# Note that any query not in an explicit transaction
# is not affected by the parameter.
- # 'transaction' (the default): if a write query is issued,
- # subsequent read queries will not be load balanced
- # until the transaction ends.
- # 'trans_transaction': if a write query is issued,
- # subsequent read queries in an explicit transaction
- # will not be load balanced until the session ends.
- # 'always': if a write query is issued, read queries will
- # not be load balanced until the session ends.
+
+dml_adaptive_object_relationship_list= ''
+ # comma separated list of object pairs
+ # [object]:[dependent-object], to disable load balancing
+ # of dependent objects within the explicit transaction
+ # after WRITE statement is issued on (depending-on) object.
+ #
+ # example: 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:my_view'
+ # Note: function name in this list must also be present in
+ # the black_function_list
+ # only valid for disable_load_balance_on_write = 'dml_adaptive'.
statement_level_load_balance = off
# Enables statement level load balancing
--- /dev/null
+FE=> Query (query="DROP TABLE IF EXISTS tb_dml_insert")
+<= BE NoticeResponse(S NOTICE V NOTICE C 00000 M table "tb_dml_insert" does not exist, skipping F tablecmds.c L 1183 R DropErrorMsgNonExistent )
+<= BE CommandComplete(DROP TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="CREATE TABLE tb_dml_insert(a INT)")
+<= BE CommandComplete(CREATE TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="INSERT INTO tb_dml_insert VALUES(1)")
+<= BE CommandComplete(INSERT 0 1)
+<= BE ReadyForQuery(I)
+FE=> Query (query="DROP TABLE IF EXISTS tb_dml_update")
+<= BE NoticeResponse(S NOTICE V NOTICE C 00000 M table "tb_dml_update" does not exist, skipping F tablecmds.c L 1183 R DropErrorMsgNonExistent )
+<= BE CommandComplete(DROP TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="CREATE TABLE tb_dml_update(a INT)")
+<= BE CommandComplete(CREATE TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="INSERT INTO tb_dml_update VALUES(1)")
+<= BE CommandComplete(INSERT 0 1)
+<= BE ReadyForQuery(I)
+FE=> Query (query="DROP TABLE IF EXISTS tb_dml_delete")
+<= BE NoticeResponse(S NOTICE V NOTICE C 00000 M table "tb_dml_delete" does not exist, skipping F tablecmds.c L 1183 R DropErrorMsgNonExistent )
+<= BE CommandComplete(DROP TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="CREATE TABLE tb_dml_delete(a INT)")
+<= BE CommandComplete(CREATE TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="INSERT INTO tb_dml_delete VALUES(1)")
+<= BE CommandComplete(INSERT 0 1)
+<= BE ReadyForQuery(I)
+FE=> Query (query="DROP TABLE IF EXISTS tb_t1")
+<= BE NoticeResponse(S NOTICE V NOTICE C 00000 M table "tb_t1" does not exist, skipping F tablecmds.c L 1183 R DropErrorMsgNonExistent )
+<= BE CommandComplete(DROP TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="CREATE TABLE tb_t1(a INT)")
+<= BE CommandComplete(CREATE TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="DROP TABLE IF EXISTS tb_t2")
+<= BE NoticeResponse(S NOTICE V NOTICE C 00000 M table "tb_t2" does not exist, skipping F tablecmds.c L 1183 R DropErrorMsgNonExistent )
+<= BE CommandComplete(DROP TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="CREATE TABLE tb_t2(b INT)")
+<= BE CommandComplete(CREATE TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="CREATE OR REPLACE FUNCTION insert_tb_t2_func() RETURNS TRIGGER AS $example_table$ BEGIN INSERT INTO tb_t2 VALUES (1); RETURN NEW; END; $example_table$ LANGUAGE plpgsql")
+<= BE CommandComplete(CREATE FUNCTION)
+<= BE ReadyForQuery(I)
+FE=> Query (query="CREATE TRIGGER insert_tb_t2_trigger AFTER INSERT ON tb_t1 FOR EACH ROW EXECUTE PROCEDURE insert_tb_t2_func()")
+<= BE CommandComplete(CREATE TRIGGER)
+<= BE ReadyForQuery(I)
+FE=> Query (query="DROP TABLE IF EXISTS tb_f")
+<= BE NoticeResponse(S NOTICE V NOTICE C 00000 M table "tb_f" does not exist, skipping F tablecmds.c L 1183 R DropErrorMsgNonExistent )
+<= BE CommandComplete(DROP TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="CREATE TABLE tb_f(f INT)")
+<= BE CommandComplete(CREATE TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="DROP FUNCTION IF EXISTS insert_tb_f_func")
+<= BE NoticeResponse(S NOTICE V NOTICE C 00000 M function insert_tb_f_func() does not exist, skipping F dropcmds.c L 491 R does_not_exist_skipping )
+<= BE CommandComplete(DROP FUNCTION)
+<= BE ReadyForQuery(I)
+FE=> Query (query="CREATE FUNCTION insert_tb_f_func(integer) RETURNS void AS $$ INSERT INTO tb_f VALUES ($1); $$ LANGUAGE SQL")
+<= BE CommandComplete(CREATE FUNCTION)
+<= BE ReadyForQuery(I)
+FE=> Query (query="DROP TABLE IF EXISTS tb_v CASCADE")
+<= BE NoticeResponse(S NOTICE V NOTICE C 00000 M table "tb_v" does not exist, skipping F tablecmds.c L 1183 R DropErrorMsgNonExistent )
+<= BE CommandComplete(DROP TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="CREATE TABLE tb_v(v INT)")
+<= BE CommandComplete(CREATE TABLE)
+<= BE ReadyForQuery(I)
+FE=> Query (query="DROP VIEW IF EXISTS tb_v_view")
+<= BE NoticeResponse(S NOTICE V NOTICE C 00000 M view "tb_v_view" does not exist, skipping F tablecmds.c L 1183 R DropErrorMsgNonExistent )
+<= BE CommandComplete(DROP VIEW)
+<= BE ReadyForQuery(I)
+FE=> Query (query="CREATE VIEW tb_v_view AS SELECT * FROM tb_v")
+<= BE CommandComplete(CREATE VIEW)
+<= BE ReadyForQuery(I)
+FE=> Parse(stmt="", query="BEGIN")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="S1", query="SELECT * FROM tb_dml_insert")
+FE=> Parse(stmt="S_SELECT_1", query="SELECT 1")
+FE=> Parse(stmt="", query="INSERT INTO tb_dml_insert VALUES(1)")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S1", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S_SELECT_1", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="", query="COMMIT")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Sync
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(BEGIN)
+<= BE ParseComplete
+<= BE ParseComplete
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(INSERT 0 1)
+<= BE BindComplete
+<= BE DataRow
+<= BE DataRow
+<= BE CommandComplete(SELECT 2)
+<= BE BindComplete
+<= BE DataRow
+<= BE CommandComplete(SELECT 1)
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(COMMIT)
+<= BE ReadyForQuery(I)
+FE=> Parse(stmt="", query="BEGIN")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="S2", query="SELECT * FROM tb_dml_update")
+FE=> Parse(stmt="", query="UPDATE tb_dml_update SET a = 2")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S2", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S_SELECT_1", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="", query="COMMIT")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Sync
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(BEGIN)
+<= BE ParseComplete
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(UPDATE 1)
+<= BE BindComplete
+<= BE DataRow
+<= BE CommandComplete(SELECT 1)
+<= BE BindComplete
+<= BE DataRow
+<= BE CommandComplete(SELECT 1)
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(COMMIT)
+<= BE ReadyForQuery(I)
+FE=> Parse(stmt="", query="BEGIN")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="S3", query="SELECT * FROM tb_dml_delete")
+FE=> Parse(stmt="", query="DELETE FROM tb_dml_delete")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S3", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S_SELECT_1", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="", query="COMMIT")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Sync
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(BEGIN)
+<= BE ParseComplete
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(DELETE 1)
+<= BE BindComplete
+<= BE CommandComplete(SELECT 0)
+<= BE BindComplete
+<= BE DataRow
+<= BE CommandComplete(SELECT 1)
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(COMMIT)
+<= BE ReadyForQuery(I)
+FE=> Parse(stmt="", query="BEGIN")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="S4", query="SELECT * FROM tb_t2")
+FE=> Parse(stmt="", query="INSERT INTO tb_t1 VALUES(1)")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S4", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S_SELECT_1", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="", query="COMMIT")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Sync
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(BEGIN)
+<= BE ParseComplete
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(INSERT 0 1)
+<= BE BindComplete
+<= BE DataRow
+<= BE CommandComplete(SELECT 1)
+<= BE BindComplete
+<= BE DataRow
+<= BE CommandComplete(SELECT 1)
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(COMMIT)
+<= BE ReadyForQuery(I)
+FE=> Parse(stmt="", query="BEGIN")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="S5", query="SELECT * FROM tb_f")
+FE=> Parse(stmt="", query="SELECT insert_tb_f_func(6)")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S5", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S_SELECT_1", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="", query="COMMIT")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Sync
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(BEGIN)
+<= BE ParseComplete
+<= BE ParseComplete
+<= BE BindComplete
+<= BE DataRow
+<= BE CommandComplete(SELECT 1)
+<= BE BindComplete
+<= BE DataRow
+<= BE CommandComplete(SELECT 1)
+<= BE BindComplete
+<= BE DataRow
+<= BE CommandComplete(SELECT 1)
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(COMMIT)
+<= BE ReadyForQuery(I)
+FE=> Parse(stmt="", query="BEGIN")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="S6", query="SELECT * FROM tb_v_view")
+FE=> Parse(stmt="", query="INSERT INTO tb_v VALUES(8)")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S6", portal="")
+FE=> Execute(portal="")
+FE=> Bind(stmt="S_SELECT_1", portal="")
+FE=> Execute(portal="")
+FE=> Parse(stmt="", query="COMMIT")
+FE=> Bind(stmt="", portal="")
+FE=> Execute(portal="")
+FE=> Sync
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(BEGIN)
+<= BE ParseComplete
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(INSERT 0 1)
+<= BE BindComplete
+<= BE DataRow
+<= BE CommandComplete(SELECT 1)
+<= BE BindComplete
+<= BE DataRow
+<= BE CommandComplete(SELECT 1)
+<= BE ParseComplete
+<= BE BindComplete
+<= BE CommandComplete(COMMIT)
+<= BE ReadyForQuery(I)
+FE=> Terminate
--- /dev/null
+#!/usr/bin/env bash
+#----------------------------------------------------------------------------------------
+# test script for dml adaptive.
+set -e
+
+#---------------------------------------------------------------------------------------
+
+HOST_IP="127.0.0.1"
+WHOAMI=`whoami`
+BASE_PORT=${BASE_PORT:-"11000"}
+CLUSTERS_NUM=${CLUSTERS_NUM:-"2"}
+
+BASE_DIR=$(cd `dirname $0`; pwd)
+TEST_DIR=$BASE_DIR/tmp_testdir
+LOG_FILE=$BASE_DIR/test.log
+
+PG_INSTALL_PATH=${PG_INSTALL_PATH:-"/usr/local/pgsql"}
+PG_USER=${PG_USER:-"postgres"}
+PG_REPLICATION_USER=${PG_REPLICATION_USER:-"repl"}
+PG_VERSION=""
+
+#---------------------------------------------------------------------------------------
+
+function check_version()
+{
+ echo "check PostgreSQL version ..."
+
+ # get PostgreSQL major version
+ vstr=`$PG_INSTALL_PATH/bin/initdb -V|awk '{print $3}'|sed 's/\./ /g'`
+
+ set +e
+ # check if alpha or beta
+ echo $vstr|egrep "[a-z]" > /dev/null
+ if [ $? = 0 ];then
+ vstr=`echo $vstr|sed 's/\([0-9]*\).*/\1/'`
+ major1=`echo $vstr|awk '{print $1}'`
+ major2=`echo $vstr|awk '{print $2}'`
+ if [ -z $major2 ];then
+ major2=0
+ fi
+ else
+ vstr=`echo $vstr|sed 's/\./ /g'`
+ major1=`echo $vstr|awk '{print $1}'`
+ major2=`echo $vstr|awk '{print $2}'`
+ fi
+ set -e
+
+ major1=`expr $major1 \* 10`
+ PG_VERSION=`expr $major1 + $major2`
+ echo PostgreSQL major version: $PG_VERSION
+ if [ $PG_VERSION -lt 100 ];then
+ echo "in order to make the script run normally, please make sure PostgreSQL major version greater than 10.0"
+ exit 1
+ fi
+
+ echo "check done."
+}
+
+#-------------------------------------------
+# create PostgreSQL cluster
+#-------------------------------------------
+function initdb_primary_cluster()
+{
+ echo "initdb_primary_cluster ..."
+
+ echo -n "creating database cluster $TEST_DIR/data-primary..."
+
+ INITDB_ARG="--no-locale -E UTF_8"
+ $PG_INSTALL_PATH/bin/initdb -D $TEST_DIR/data-primary $INITDB_ARG -U $PG_USER
+
+ echo "done"
+}
+
+function set_primary_postgresql_conf()
+{
+ echo "set_primary_postgresql_conf ..."
+
+ PG_CONF=$TEST_DIR/data-primary/postgresql.conf
+ PG_HBA_CONF_0=$TEST_DIR/data-primary/pg_hba.conf
+ PORT=`expr $BASE_PORT + 0`
+
+ echo "listen_addresses = '*'" >> $PG_CONF
+ echo "port = $PORT" >> $PG_CONF
+
+ echo "archive_mode = on" >> $PG_CONF
+ echo "archive_command = 'cp %p $TEST_DIR/archivedir/%f </dev/null'" >> $PG_CONF
+ mkdir $TEST_DIR/archivedir
+
+ echo "max_wal_senders = 10" >> $PG_CONF
+ echo "max_replication_slots = 10" >> $PG_CONF
+
+ echo "wal_level = replica" >> $PG_CONF
+ echo "wal_keep_segments = 512" >> $PG_CONF
+
+ echo "done"
+}
+
+function create_role()
+{
+ echo "create_role ..."
+
+ PORT=`expr $BASE_PORT + 0`
+ $PG_INSTALL_PATH/bin/psql -h $HOST_IP -p $PORT -U $PG_USER -d postgres -c "CREATE ROLE $PG_REPLICATION_USER REPLICATION LOGIN"
+
+ echo "done"
+}
+
+function create_standby()
+{
+ echo "create_standby ..."
+
+ CLUSTER_DIR=$TEST_DIR/data-standby
+ PG_CONF=$CLUSTER_DIR/postgresql.conf
+ PORT_PRIMARY=`expr $BASE_PORT + 0`
+ PORT_STANDBY=`expr $BASE_PORT + 1`
+
+ $PG_INSTALL_PATH/bin/pg_basebackup -h $HOST_IP -p $PORT_PRIMARY -U $PG_REPLICATION_USER -Fp -Xs -Pv -R -D $CLUSTER_DIR
+
+ echo "port = $PORT_STANDBY" >> $PG_CONF
+
+ if [ $PG_VERSION -lt 120 ];then
+ # PG_VERSION < 12.0
+ sed -i "s/primary_conninfo = '/primary_conninfo = 'application_name=standby01 /g" $CLUSTER_DIR/recovery.conf
+ else
+ # PG_VERSION >= 12.0
+ sed -i "s/primary_conninfo = '/primary_conninfo = 'application_name=standby01 /g" $CLUSTER_DIR/postgresql.auto.conf
+ fi
+
+ echo "done"
+}
+
+function set_sync_primary_postgresql_conf()
+{
+ echo "set_sync_primary_postgresql_conf ..."
+
+ CLUSTER_DIR=$TEST_DIR/data-primary
+ PG_CONF=$CLUSTER_DIR/postgresql.conf
+
+ echo "synchronous_commit = on" >> $PG_CONF
+ echo "synchronous_standby_names = 'standby01'" >> $PG_CONF
+
+ echo "done"
+}
+
+function start_primary()
+{
+ echo "start_primary ..."
+
+ CLUSTER_DIR=$TEST_DIR/data-primary
+
+ $PG_INSTALL_PATH/bin/pg_ctl -D $CLUSTER_DIR start
+
+ echo "done"
+}
+
+function start_pg_all()
+{
+ echo "start_pg_all ..."
+
+ CLUSTER_DIR_PRIMARY=$TEST_DIR/data-primary
+ CLUSTER_DIR_STANDBY=$TEST_DIR/data-standby
+
+ $PG_INSTALL_PATH/bin/pg_ctl -D $CLUSTER_DIR_PRIMARY restart
+ sleep 1
+ $PG_INSTALL_PATH/bin/pg_ctl -D $CLUSTER_DIR_STANDBY start
+ sleep 1
+
+ echo "done"
+}
+
+function stop_pg_all()
+{
+ echo "stop_pg_all ..."
+
+ CLUSTER_DIR_PRIMARY=$TEST_DIR/data-primary
+ CLUSTER_DIR_STANDBY=$TEST_DIR/data-standby
+
+ $PG_INSTALL_PATH/bin/pg_ctl -D $CLUSTER_DIR_STANDBY stop
+ sleep 1
+ $PG_INSTALL_PATH/bin/pg_ctl -D $CLUSTER_DIR_PRIMARY stop
+ sleep 1
+
+ echo "done"
+}
+
+function create_streaming_replication()
+{
+ echo "create_streaming_replication ..."
+
+ initdb_primary_cluster
+ set_primary_postgresql_conf
+ start_primary
+ create_role
+ create_standby
+ set_sync_primary_postgresql_conf
+ start_pg_all
+
+ echo "done"
+}
+
+function set_pool_conf()
+{
+echo "set_pool_conf ..."
+
+PORT_PRIMARY=`expr $BASE_PORT + 0`
+PORT_STANDBY=`expr $BASE_PORT + 1`
+PORT_POOL=`expr $BASE_PORT + 2`
+PORT_PCP=`expr $BASE_PORT + 3`
+TEST_DIR=$TEST_DIR
+
+rm -fr $TEST_DIR/pgpool.conf
+cp $POOL_INSTALL_PATH/etc/pgpool.conf.sample-stream $TEST_DIR/pgpool.conf
+
+cat >> $TEST_DIR/pgpool.conf <<'EOF'
+port = __PORT_POOL__
+pcp_port = __PORT_PCP__
+
+backend_hostname0 = '127.0.0.1'
+backend_port0 = __PORT_PRIMARY__
+backend_weight0 = 0
+
+backend_hostname1 = '127.0.0.1'
+backend_port1 = __PORT_STANDBY__
+backend_weight1 = 1
+
+log_per_node_statement = on
+
+pid_file_name = '/__TEST_DIR__/pgpool.pid'
+black_function_list = 'currval,lastval,nextval,setval,insert_tb_f_func'
+disable_load_balance_on_write = 'dml_adaptive'
+dml_adaptive_object_relationship_list= 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:tb_v_view'
+
+sr_check_period = 0
+sr_check_user = '__PG_USER__'
+health_check_user = '__PG_USER__'
+EOF
+
+/bin/sed -i \
+ -e "/__PORT_PRIMARY__/s@__PORT_PRIMARY__@$PORT_PRIMARY@" \
+ -e "/__PORT_STANDBY__/s@__PORT_STANDBY__@$PORT_STANDBY@" \
+ -e "/__PORT_POOL__/s@__PORT_POOL__@$PORT_POOL@" \
+ -e "/__PORT_PCP__/s@__PORT_PCP__@$PORT_PCP@" \
+ -e "/__TEST_DIR__/s@__TEST_DIR__@$TEST_DIR@" \
+ -e "/__PG_USER__/s@__PG_USER__@$PG_USER@" \
+ $TEST_DIR/pgpool.conf
+
+ echo "done"
+}
+
+function start_pool()
+{
+ echo "start_pool ..."
+
+ rm -rf /tmp/.s.PGSQL.110*
+ $POOL_INSTALL_PATH/bin/pgpool -D -n -f $TEST_DIR/pgpool.conf 2>&1 | cat > $TEST_DIR/pgpool.log &
+
+ echo "start_pool done"
+}
+
+function stop_pool()
+{
+ echo "stop_pool ..."
+
+ $POOL_INSTALL_PATH/bin/pgpool -D -n -f $TEST_DIR/pgpool.conf stop 2>&1 | cat >> $TEST_DIR/pgpool.log &
+ rm -rf /tmp/.s.PGSQL.110*
+
+ echo "done"
+}
+
+function test_dml_extended()
+{
+echo "test_dml_extended ..."
+
+ PORT_PRIMARY=`expr $BASE_PORT + 0`
+ PORT_POOL=`expr $BASE_PORT + 2`
+
+ export LD_LIBRARY_PATH=$PG_INSTALL_PATH/lib:$LD_LIBRARY_PATH
+
+ export PGPORT=$PORT_POOL
+ export PGHOST=$HOST_IP
+ export PGUSER=$PG_USER
+ export PGDATABASE=postgres
+
+ # Set up test data files
+ specified_tests=disable-load-balance-dml.data
+
+ if [ $# -gt 0 ];then
+ test_data_files=`(cd $BASE_DIR/test_data_files;ls |grep $specified_tests)`
+ else
+ test_data_files=`(cd $BASE_DIR/test_data_files;ls)`
+ fi
+
+ for i in $test_data_files
+ do
+ echo -n "testing $i ... "
+
+ # check if modification to pgpool.conf specified.
+ d=/tmp/diff$$
+ grep '^##' $BASE_DIR/test_data_files/$i > $d
+ if [ -s $d ]
+ then
+ sed -e 's/^##//' $d >> $TEST_DIR/pgpool.conf
+ fi
+ rm -f $d
+
+ start_pool
+ sleep 5
+
+ while :
+ do
+ $PG_INSTALL_PATH/bin/psql -d $PGDATABASE -h $PGHOST -p $PGPORT -U $PG_USER -c "select 1"
+ if [ $? = 0 ]
+ then
+ break
+ fi
+ sleep 1
+ done
+
+ $PG_INSTALL_PATH/bin/psql -h $HOST_IP -p $PORT_POOL -U $PG_USER -d postgres -c "show pool_nodes;"
+
+ timeout=30
+ timeout $timeout $POOL_INSTALL_PATH/bin/pgproto -f $BASE_DIR/test_data_files/$i > $BASE_DIR/results/$i 2>&1
+
+ if [ $? = 124 ]
+ then
+ echo "pgproto timeout."
+ timeoutcnt=`expr $timeoutcnt + 1`
+ else
+ echo "pgproto done."
+ fi
+ done
+
+echo "test_dml_extended done"
+}
+
+function check_test_data_result()
+{
+ echo "check test data result ..."
+
+ sed -e 's/L [0-9]*/L xxx/g' $BASE_DIR/expected/$i > expected_tmp
+ sed -e 's/L [0-9]*/L xxx/g' $BASE_DIR/results/$i > results_tmp
+
+ cmp expected_tmp results_tmp
+
+ if [ $? != 0 ]
+ then
+ echo "failed: please check the file \"$diffs\""
+ echo "=== $i ===" >> $diffs
+ diff -c expected_tmp results_tmp >> $diffs
+ rm expected_tmp results_tmp
+ exit 1
+ fi
+
+ rm expected_tmp results_tmp
+}
+
+function check_test_log_result()
+{
+ echo "check test log result ..."
+
+ # echo "check logfile \"$LOG_FILE\""
+
+ # check if dml adaptive worked
+ fgrep "standby" $LOG_FILE |grep "true">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: load_balance_node is not standby.
+ exit 1
+ fi
+
+ # echo "check logfile \"$TEST_DIR/pgpool.log\""
+
+ # ---------------------------------------------------------------------------------------------------------------------
+
+ fgrep "Parse: SELECT * FROM tb_dml_insert" $TEST_DIR/pgpool.log |grep "DB node id: 1">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Parse: SELECT * FROM tb_dml_insert\"" is no sent to standby node.
+ exit 1
+ fi
+
+ fgrep "Bind: SELECT * FROM tb_dml_insert" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Bind: SELECT * FROM tb_dml_insert\"" is no sent to primary node.
+ exit 1
+ fi
+
+ # ---------------------------------------------------------------------------------------------------------------------
+
+ fgrep "Parse: SELECT * FROM tb_dml_update" $TEST_DIR/pgpool.log |grep "DB node id: 1">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Parse: SELECT * FROM tb_dml_update\"" is no sent to standby node.
+ exit 1
+ fi
+
+ fgrep "Bind: SELECT * FROM tb_dml_update" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Bind: SELECT * FROM tb_dml_update\"" is no sent to primary node.
+ exit 1
+ fi
+
+ # ---------------------------------------------------------------------------------------------------------------------
+
+ fgrep "Parse: SELECT * FROM tb_dml_delete" $TEST_DIR/pgpool.log |grep "DB node id: 1">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Parse: SELECT * FROM tb_dml_delete\"" is no sent to standby node.
+ exit 1
+ fi
+
+ fgrep "Bind: SELECT * FROM tb_dml_delete" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Bind: SELECT * FROM tb_dml_delete\"" is no sent to primary node.
+ exit 1
+ fi
+
+ # ---------------------------------------------------------------------------------------------------------------------
+
+ fgrep "Parse: SELECT * FROM tb_t2" $TEST_DIR/pgpool.log |grep "DB node id: 1">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Parse: SELECT * FROM tb_t2\"" is no sent to standby node.
+ exit 1
+ fi
+
+ fgrep "Bind: SELECT * FROM tb_t2" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Bind: SELECT * FROM tb_t2\"" is no sent to primary node.
+ exit 1
+ fi
+
+ # ---------------------------------------------------------------------------------------------------------------------
+
+ fgrep "Parse: SELECT * FROM tb_f" $TEST_DIR/pgpool.log |grep "DB node id: 1">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Parse: SELECT * FROM tb_f\"" is no sent to standby node.
+ exit 1
+ fi
+
+ fgrep "Bind: SELECT * FROM tb_f" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Bind: SELECT * FROM tb_f\"" is no sent to primary node.
+ exit 1
+ fi
+
+ # ---------------------------------------------------------------------------------------------------------------------
+
+ fgrep "Parse: SELECT * FROM tb_v_view" $TEST_DIR/pgpool.log |grep "DB node id: 1">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Parse: SELECT * FROM tb_v_view\"" is no sent to standby node.
+ exit 1
+ fi
+
+ fgrep "Bind: SELECT * FROM tb_v_view" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"Bind: SELECT * FROM tb_v_view\"" is no sent to primary node.
+ exit 1
+ fi
+
+ # ---------------------------------------------------------------------------------------------------------------------
+
+ fgrep "Bind: SELECT 1" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? == 0 ];then
+ # expected result not found
+ echo failed: "\"Bind: SELECT 1\"" should not be sent to primary node.
+ exit 1
+ fi
+
+ # ---------------------------------------------------------------------------------------------------------------------
+
+ echo "success: dml extended query test pass."
+}
+
+function install_temp_pgpool
+{
+ echo "creating pgpool-II temporary installation ..."
+
+ POOL_INSTALL_PATH=$TEST_DIR/pgpool_temp_installed
+
+ test -d $POOL_INSTALL_PATH || mkdir $POOL_INSTALL_PATH
+
+ make install -C $BASE_DIR/../../../ -e prefix=${POOL_INSTALL_PATH} > ${POOL_INSTALL_PATH}/install.log 2>&1
+
+ if [ $? != 0 ];then
+ echo "pgpool make install failed"
+ exit 1
+ fi
+
+ echo "done"
+}
+
+function run_test()
+{
+ echo "run_test ..."
+
+ install_temp_pgpool
+ create_streaming_replication
+
+ set_pool_conf
+
+ test_dml_extended
+
+ stop_pool
+ stop_pg_all
+
+ echo "run_test done"
+}
+
+function print_usage
+{
+ printf "Usage:\n"
+ printf " %s [Options]...\n" $(basename $0) >&2
+ printf "\nOptions:\n"
+ printf " -p DIRECTORY Postgres installed directory\n" >&2
+ printf " -h print this help and then exit\n\n" >&2
+}
+
+function init_environment()
+{
+
+ # clear last test dir
+ rm -rf $BASE_DIR/results
+ diffs=$BASE_DIR/diffs
+ rm -f $diffs
+ rm -rf $TEST_DIR
+ rm -f $LOG_FILE
+
+ # exit
+
+ # create this test dir
+ test ! -d $BASE_DIR/results && mkdir $BASE_DIR/results
+ mkdir $TEST_DIR
+}
+
+function main
+{
+ check_version
+
+ echo "running test ..."
+
+ init_environment
+ run_test > $LOG_FILE 2>&1
+
+ echo "test done."
+
+ set +e
+ check_test_data_result && check_test_log_result
+}
+
+# ------------------------------------------- main --------------------------------------------
+
+while getopts "p:h" OPTION
+do
+case $OPTION in
+ p) PG_INSTALL_PATH="$OPTARG";;
+ h) print_usage
+ exit 2;;
+esac
+done
+
+main
\ No newline at end of file
--- /dev/null
+# Test for disable_load_balance_on_write feature (dml_adaptive).
+#
+
+# Force load balance node to 1.
+##backend_weight0 = 0
+##backend_weight1 = 1
+##disable_load_balance_on_write = 'dml_adaptive'
+
+# Create test data
+
+# for test dml -------------------------------------------------
+
+# Create test table tb_dml_insert
+'Q' "DROP TABLE IF EXISTS tb_dml_insert"
+'Y'
+'Q' "CREATE TABLE tb_dml_insert(a INT)"
+'Y'
+'Q' "INSERT INTO tb_dml_insert VALUES(1)"
+'Y'
+
+# Create test table tb_dml_update
+'Q' "DROP TABLE IF EXISTS tb_dml_update"
+'Y'
+'Q' "CREATE TABLE tb_dml_update(a INT)"
+'Y'
+'Q' "INSERT INTO tb_dml_update VALUES(1)"
+'Y'
+
+# Create test table tb_dml_delete
+'Q' "DROP TABLE IF EXISTS tb_dml_delete"
+'Y'
+'Q' "CREATE TABLE tb_dml_delete(a INT)"
+'Y'
+'Q' "INSERT INTO tb_dml_delete VALUES(1)"
+'Y'
+
+# for test trigger -------------------------------------------------
+
+# Create test table tb_t1
+'Q' "DROP TABLE IF EXISTS tb_t1"
+'Y'
+'Q' "CREATE TABLE tb_t1(a INT)"
+'Y'
+
+# Create test table tb_t2
+'Q' "DROP TABLE IF EXISTS tb_t2"
+'Y'
+'Q' "CREATE TABLE tb_t2(b INT)"
+'Y'
+
+# Create test FUNCTION insert_tb_t2_func
+'Q' "CREATE OR REPLACE FUNCTION insert_tb_t2_func() RETURNS TRIGGER AS $example_table$ BEGIN INSERT INTO tb_t2 VALUES (1); RETURN NEW; END; $example_table$ LANGUAGE plpgsql"
+'Y'
+
+# Create test TRIGGER insert_tb_t2_trigger
+'Q' "CREATE TRIGGER insert_tb_t2_trigger AFTER INSERT ON tb_t1 FOR EACH ROW EXECUTE PROCEDURE insert_tb_t2_func()"
+'Y'
+
+# for test function -------------------------------------------------
+
+# Create test table tb_f
+'Q' "DROP TABLE IF EXISTS tb_f"
+'Y'
+'Q' "CREATE TABLE tb_f(f INT)"
+'Y'
+
+# Create test FUNCTION insert_tb_f_func
+'Q' "DROP FUNCTION IF EXISTS insert_tb_f_func"
+'Y'
+'Q' "CREATE FUNCTION insert_tb_f_func(integer) RETURNS void AS $$ INSERT INTO tb_f VALUES ($1); $$ LANGUAGE SQL"
+'Y'
+
+# for test view -------------------------------------------------
+
+# Create test table tb_t2
+'Q' "DROP TABLE IF EXISTS tb_v CASCADE"
+'Y'
+'Q' "CREATE TABLE tb_v(v INT)"
+'Y'
+
+# Create test view tb_v_view
+'Q' "DROP VIEW IF EXISTS tb_v_view"
+'Y'
+'Q' "CREATE VIEW tb_v_view AS SELECT * FROM tb_v"
+'Y'
+
+# start test -------------------------------------------------
+
+# Start a transaction
+'P' "" "BEGIN" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Parse SELECT. This should be sent to standby node.
+'P' "S1" "SELECT * FROM tb_dml_insert" 0
+'P' "S_SELECT_1" "SELECT 1" 0
+
+# Issue INSERT
+'P' "" "INSERT INTO tb_dml_insert VALUES(1)" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to Primary node.
+'B' "" "S1" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to standby node.
+'B' "" "S_SELECT_1" 0 0 0
+'E' "" 0
+
+# Issue COMMIT
+'P' "" "COMMIT" 0
+'B' "" "" 0 0 0
+'E' "" 0
+'S'
+'Y'
+
+# Start a transaction
+'P' "" "BEGIN" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Parse SELECT. This should be sent to standby node.
+'P' "S2" "SELECT * FROM tb_dml_update" 0
+
+# Issue UPDATE
+'P' "" "UPDATE tb_dml_update SET a = 2" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to Primary node.
+'B' "" "S2" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to standby node.
+'B' "" "S_SELECT_1" 0 0 0
+'E' "" 0
+
+# Issue COMMIT
+'P' "" "COMMIT" 0
+'B' "" "" 0 0 0
+'E' "" 0
+'S'
+'Y'
+
+# Start a transaction
+'P' "" "BEGIN" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Parse SELECT. This should be sent to standby node.
+'P' "S3" "SELECT * FROM tb_dml_delete" 0
+
+# Issue DELETE
+'P' "" "DELETE FROM tb_dml_delete" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to Primary node.
+'B' "" "S3" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to standby node.
+'B' "" "S_SELECT_1" 0 0 0
+'E' "" 0
+
+# Issue COMMIT
+'P' "" "COMMIT" 0
+'B' "" "" 0 0 0
+'E' "" 0
+'S'
+'Y'
+
+# Start a transaction
+'P' "" "BEGIN" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Parse SELECT. This should be sent to standby node.
+'P' "S4" "SELECT * FROM tb_t2" 0
+
+# Issue INSERT
+'P' "" "INSERT INTO tb_t1 VALUES(1)" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to Primary node.
+'B' "" "S4" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to standby node.
+'B' "" "S_SELECT_1" 0 0 0
+'E' "" 0
+
+# Issue COMMIT
+'P' "" "COMMIT" 0
+'B' "" "" 0 0 0
+'E' "" 0
+'S'
+'Y'
+
+# Start a transaction
+'P' "" "BEGIN" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Parse SELECT. This should be sent to standby node.
+'P' "S5" "SELECT * FROM tb_f" 0
+
+# Issue SELECT FUNCTION
+'P' "" "SELECT insert_tb_f_func(6)" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to Primary node.
+'B' "" "S5" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to standby node.
+'B' "" "S_SELECT_1" 0 0 0
+'E' "" 0
+
+# Issue COMMIT
+'P' "" "COMMIT" 0
+'B' "" "" 0 0 0
+'E' "" 0
+'S'
+'Y'
+
+# Start a transaction
+'P' "" "BEGIN" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Parse SELECT. This should be sent to standby node.
+'P' "S6" "SELECT * FROM tb_v_view" 0
+
+# Issue INSERT
+'P' "" "INSERT INTO tb_v VALUES(8)" 0
+'B' "" "" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to Primary node.
+'B' "" "S6" 0 0 0
+'E' "" 0
+
+# Issue Bind/Execute SELECT. This should be sent to standby node.
+'B' "" "S_SELECT_1" 0 0 0
+'E' "" 0
+
+# Issue COMMIT
+'P' "" "COMMIT" 0
+'B' "" "" 0 0 0
+'E' "" 0
+'S'
+'Y'
+
+'X'
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env bash
+#----------------------------------------------------------------------------------------
+# test script for dml adaptive.
+set -e
+
+#---------------------------------------------------------------------------------------
+
+HOST_IP="127.0.0.1"
+WHOAMI=`whoami`
+BASE_PORT=${BASE_PORT:-"11000"}
+CLUSTERS_NUM=${CLUSTERS_NUM:-"2"}
+
+BASE_DIR=$(cd `dirname $0`; pwd)
+TEST_DIR=$BASE_DIR/testdir
+LOG_FILE=$BASE_DIR/test.log
+
+PG_INSTALL_PATH=${PG_INSTALL_PATH:-"/usr/local/pgsql"}
+PG_USER=${PG_USER:-"postgres"}
+PG_REPLICATION_USER=${PG_REPLICATION_USER:-"repl"}
+PG_VERSION=""
+
+#---------------------------------------------------------------------------------------
+
+function check_version()
+{
+ echo "check PostgreSQL version ..."
+
+ # get PostgreSQL major version
+ vstr=`$PG_INSTALL_PATH/bin/initdb -V|awk '{print $3}'|sed 's/\./ /g'`
+
+ set +e
+ # check if alpha or beta
+ echo $vstr|egrep "[a-z]" > /dev/null
+ if [ $? = 0 ];then
+ vstr=`echo $vstr|sed 's/\([0-9]*\).*/\1/'`
+ major1=`echo $vstr|awk '{print $1}'`
+ major2=`echo $vstr|awk '{print $2}'`
+ if [ -z $major2 ];then
+ major2=0
+ fi
+ else
+ vstr=`echo $vstr|sed 's/\./ /g'`
+ major1=`echo $vstr|awk '{print $1}'`
+ major2=`echo $vstr|awk '{print $2}'`
+ fi
+ set -e
+
+ major1=`expr $major1 \* 10`
+ PG_VERSION=`expr $major1 + $major2`
+ echo PostgreSQL major version: $PG_VERSION
+ if [ $PG_VERSION -lt 100 ];then
+ echo "in order to make the script run normally, please make sure PostgreSQL major version greater than 10.0"
+ exit 1
+ fi
+
+ echo "done"
+}
+
+#-------------------------------------------
+# create PostgreSQL cluster
+#-------------------------------------------
+function initdb_primary_cluster()
+{
+ echo "initdb_primary_cluster ..."
+
+ echo -n "creating database cluster $TEST_DIR/data-primary..."
+
+ INITDB_ARG="--no-locale -E UTF_8"
+ $PG_INSTALL_PATH/bin/initdb -D $TEST_DIR/data-primary $INITDB_ARG -U $PG_USER
+
+ echo "done"
+}
+
+function set_primary_postgresql_conf()
+{
+ echo "set_primary_postgresql_conf ..."
+
+ PG_CONF=$TEST_DIR/data-primary/postgresql.conf
+ PG_HBA_CONF_0=$TEST_DIR/data-primary/pg_hba.conf
+ PORT=`expr $BASE_PORT + 0`
+
+ echo "listen_addresses = '*'" >> $PG_CONF
+ echo "port = $PORT" >> $PG_CONF
+
+ echo "archive_mode = on" >> $PG_CONF
+ echo "archive_command = 'cp %p $TEST_DIR/archivedir/%f </dev/null'" >> $PG_CONF
+ mkdir $TEST_DIR/archivedir
+
+ echo "max_wal_senders = 10" >> $PG_CONF
+ echo "max_replication_slots = 10" >> $PG_CONF
+
+ echo "wal_level = replica" >> $PG_CONF
+ echo "wal_keep_segments = 512" >> $PG_CONF
+
+ echo "done"
+}
+
+function create_role()
+{
+ echo "create_role ..."
+
+ PORT=`expr $BASE_PORT + 0`
+ $PG_INSTALL_PATH/bin/psql -h $HOST_IP -p $PORT -U $PG_USER -d postgres -c "CREATE ROLE $PG_REPLICATION_USER REPLICATION LOGIN"
+
+ echo "done"
+}
+
+function create_standby()
+{
+ echo "create_standby ..."
+
+ CLUSTER_DIR=$TEST_DIR/data-standby
+ PG_CONF=$CLUSTER_DIR/postgresql.conf
+ PORT_PRIMARY=`expr $BASE_PORT + 0`
+ PORT_STANDBY=`expr $BASE_PORT + 1`
+
+ $PG_INSTALL_PATH/bin/pg_basebackup -h $HOST_IP -p $PORT_PRIMARY -U $PG_REPLICATION_USER -Fp -Xs -Pv -R -D $CLUSTER_DIR
+
+ echo "port = $PORT_STANDBY" >> $PG_CONF
+
+ if [ $PG_VERSION -lt 120 ];then
+ # PG_VERSION < 12.0
+ sed -i "s/primary_conninfo = '/primary_conninfo = 'application_name=standby01 /g" $CLUSTER_DIR/recovery.conf
+ else
+ # PG_VERSION >= 12.0
+ sed -i "s/primary_conninfo = '/primary_conninfo = 'application_name=standby01 /g" $CLUSTER_DIR/postgresql.auto.conf
+ fi
+
+ echo "done"
+}
+
+function set_sync_primary_postgresql_conf()
+{
+ echo "set_sync_primary_postgresql_conf ..."
+
+ CLUSTER_DIR=$TEST_DIR/data-primary
+ PG_CONF=$CLUSTER_DIR/postgresql.conf
+
+ echo "synchronous_commit = on" >> $PG_CONF
+ echo "synchronous_standby_names = 'standby01'" >> $PG_CONF
+
+ echo "done"
+}
+
+function start_primary()
+{
+ echo "start_primary ..."
+
+ CLUSTER_DIR=$TEST_DIR/data-primary
+
+ $PG_INSTALL_PATH/bin/pg_ctl -D $CLUSTER_DIR start
+
+ echo "done"
+}
+
+function start_pg_all()
+{
+ echo "start_pg_all ..."
+
+ CLUSTER_DIR_PRIMARY=$TEST_DIR/data-primary
+ CLUSTER_DIR_STANDBY=$TEST_DIR/data-standby
+
+ $PG_INSTALL_PATH/bin/pg_ctl -D $CLUSTER_DIR_PRIMARY restart
+ sleep 1
+ $PG_INSTALL_PATH/bin/pg_ctl -D $CLUSTER_DIR_STANDBY start
+ sleep 1
+
+ echo "done"
+}
+
+function stop_pg_all()
+{
+ echo "stop_pg_all ..."
+
+ CLUSTER_DIR_PRIMARY=$TEST_DIR/data-primary
+ CLUSTER_DIR_STANDBY=$TEST_DIR/data-standby
+
+ $PG_INSTALL_PATH/bin/pg_ctl -D $CLUSTER_DIR_STANDBY stop
+ sleep 1
+ $PG_INSTALL_PATH/bin/pg_ctl -D $CLUSTER_DIR_PRIMARY stop
+ sleep 1
+
+ echo "done"
+}
+
+function create_streaming_replication()
+{
+ echo "create_streaming_replication ..."
+
+ initdb_primary_cluster
+ set_primary_postgresql_conf
+ start_primary
+ create_role
+ create_standby
+ set_sync_primary_postgresql_conf
+ start_pg_all
+
+ echo "done"
+}
+
+function set_pool_conf()
+{
+echo "set_pool_conf ..."
+
+PORT_PRIMARY=`expr $BASE_PORT + 0`
+PORT_STANDBY=`expr $BASE_PORT + 1`
+PORT_POOL=`expr $BASE_PORT + 2`
+PORT_PCP=`expr $BASE_PORT + 3`
+TEST_DIR=$TEST_DIR
+
+rm -fr $TEST_DIR/pgpool.conf
+cp $POOL_INSTALL_PATH/etc/pgpool.conf.sample-stream $TEST_DIR/pgpool.conf
+
+cat >> $TEST_DIR/pgpool.conf <<'EOF'
+port = __PORT_POOL__
+pcp_port = __PORT_PCP__
+
+backend_hostname0 = '127.0.0.1'
+backend_port0 = __PORT_PRIMARY__
+backend_weight0 = 0
+
+backend_hostname1 = '127.0.0.1'
+backend_port1 = __PORT_STANDBY__
+backend_weight1 = 1
+
+log_per_node_statement = on
+
+pid_file_name = '/__TEST_DIR__/pgpool.pid'
+black_function_list = 'currval,lastval,nextval,setval,insert_tb_f_func'
+disable_load_balance_on_write = 'dml_adaptive'
+dml_adaptive_object_relationship_list= 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:tb_v_view'
+
+sr_check_period = 0
+sr_check_user = '__PG_USER__'
+health_check_user = '__PG_USER__'
+EOF
+
+/bin/sed -i \
+ -e "/__PORT_PRIMARY__/s@__PORT_PRIMARY__@$PORT_PRIMARY@" \
+ -e "/__PORT_STANDBY__/s@__PORT_STANDBY__@$PORT_STANDBY@" \
+ -e "/__PORT_POOL__/s@__PORT_POOL__@$PORT_POOL@" \
+ -e "/__PORT_PCP__/s@__PORT_PCP__@$PORT_PCP@" \
+ -e "/__TEST_DIR__/s@__TEST_DIR__@$TEST_DIR@" \
+ -e "/__PG_USER__/s@__PG_USER__@$PG_USER@" \
+ $TEST_DIR/pgpool.conf
+
+ echo "done"
+}
+
+function start_pool()
+{
+ echo "start_pool ..."
+
+ rm -rf /tmp/.s.PGSQL.110*
+ $POOL_INSTALL_PATH/bin/pgpool -D -n -f $TEST_DIR/pgpool.conf 2>&1 | cat > $TEST_DIR/pgpool.log &
+
+ echo "done"
+}
+
+function stop_pool()
+{
+ echo "stop_pool ..."
+
+ $POOL_INSTALL_PATH/bin/pgpool -D -n -f $TEST_DIR/pgpool.conf stop 2>&1 | cat >> $TEST_DIR/pgpool.log &
+ rm -rf /tmp/.s.PGSQL.110*
+
+ echo "done"
+}
+
+function test_dml_adaptive()
+{
+echo "test_dml_adaptive ..."
+
+PORT_PRIMARY=`expr $BASE_PORT + 0`
+PORT_POOL=`expr $BASE_PORT + 2`
+
+# Create test data
+cat >> $TEST_DIR/create.sql <<'EOF'
+-- for test dml
+create table tb_dml_insert (a int);
+create table tb_dml_update (a int);
+create table tb_dml_delete (a int);
+
+insert into tb_dml_insert values (1);
+insert into tb_dml_update values (1);
+insert into tb_dml_delete values (1);
+
+-- for test trigger
+create table tb_t1(a int);
+create table tb_t2(b int);
+
+CREATE OR REPLACE FUNCTION insert_tb_t2_func() RETURNS TRIGGER AS $example_table$
+ BEGIN
+ INSERT INTO tb_t2 VALUES (1);
+ RETURN NEW;
+ END;
+$example_table$ LANGUAGE plpgsql;
+
+CREATE TRIGGER insert_tb_t2_trigger AFTER INSERT ON tb_t1
+FOR EACH ROW EXECUTE PROCEDURE insert_tb_t2_func();
+
+-- for test function
+create table tb_f (f int);
+
+CREATE FUNCTION insert_tb_f_func(integer) RETURNS void AS $$
+ INSERT INTO tb_f VALUES ($1);
+$$ LANGUAGE SQL;
+
+-- for test view
+create table tb_v(v int);
+CREATE VIEW tb_v_view AS SELECT * FROM tb_v;
+EOF
+
+# Test
+cat >> $TEST_DIR/test.sql <<'EOF'
+show pool_nodes;
+
+\x
+pgpool show disable_load_balance_on_write;
+pgpool show dml_adaptive_object_relationship_list;
+\x
+
+-- test DML
+begin ;
+insert into tb_dml_insert values (1);
+select * from tb_dml_insert ;
+commit ;
+
+begin ;
+update tb_dml_update SET a = 2;
+select * from tb_dml_update ;
+commit ;
+
+begin ;
+delete from tb_dml_delete;
+select * from tb_dml_delete;
+commit ;
+
+-- test trigger
+begin ;
+insert into tb_t1 values (1);
+select * from tb_t2 ;
+commit ;
+
+-- test function
+begin ;
+select insert_tb_f_func(6);
+select * from tb_f ;
+commit ;
+
+-- test view
+begin ;
+insert into tb_v values (8);
+select * from tb_v_view ;
+commit ;
+EOF
+
+# Drop test data
+cat >> $TEST_DIR/drop.sql <<'EOF'
+drop view tb_v_view ;
+drop table tb_v;
+
+drop function insert_tb_f_func;
+drop table tb_f;
+
+drop trigger insert_tb_t2_trigger ON tb_t1 ;
+drop function insert_tb_t2_func;
+drop table tb_t1;
+drop table tb_t2;
+
+drop table tb_dml_insert;
+drop table tb_dml_update;
+drop table tb_dml_delete;
+EOF
+
+$PG_INSTALL_PATH/bin/psql -h $HOST_IP -p $PORT_PRIMARY -U $PG_USER -d postgres -f $TEST_DIR/create.sql
+$PG_INSTALL_PATH/bin/psql -h $HOST_IP -p $PORT_POOL -U $PG_USER -d postgres -f $TEST_DIR/test.sql
+$PG_INSTALL_PATH/bin/psql -h $HOST_IP -p $PORT_PRIMARY -U $PG_USER -d postgres -f $TEST_DIR/drop.sql
+
+echo "done"
+}
+
+function check_test_result()
+{
+ echo "check test result ..."
+
+ # check "pgpool show disable_load_balance_on_write;"
+ fgrep "disable_load_balance_on_write | " $LOG_FILE |grep "dml_adaptive">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: pgpool show disable_load_balance_on_write.
+ exit 1
+ fi
+
+ # check "pgpool show dml_adaptive_object_relationship_list;"
+ fgrep "dml_adaptive_object_relationship_list |" $LOG_FILE |grep "tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:tb_v_view">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: pgpool show dml_adaptive_object_relationship_list.
+ exit 1
+ fi
+
+ # check if dml adaptive worked
+ fgrep "standby" $LOG_FILE |grep "true">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: load_balance_node is not standby.
+ exit 1
+ fi
+ fgrep "select * from tb_dml_insert ;" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"select * from tb_dml_insert ;\"" is no sent to primary node.
+ exit 1
+ fi
+ fgrep "select * from tb_dml_update ;" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"select * from tb_dml_update ;\"" is no sent to primary node.
+ exit 1
+ fi
+ fgrep "select * from tb_dml_delete;" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"select * from tb_dml_delete;\"" is no sent to primary node.
+ exit 1
+ fi
+ fgrep "select * from tb_t2 ;" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"select * from tb_t2 ;\"" is no sent to primary node.
+ exit 1
+ fi
+ fgrep "select * from tb_f ;" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"select * from tb_f ;\"" is no sent to primary node.
+ exit 1
+ fi
+ fgrep "select * from tb_v_view ;" $TEST_DIR/pgpool.log |grep "DB node id: 0">/dev/null 2>&1
+ if [ $? != 0 ];then
+ # expected result not found
+ echo failed: "\"select * from tb_v_view ;\"" is no sent to primary node.
+ exit 1
+ fi
+
+ echo "success: dml simple query test pass."
+}
+
+function install_temp_pgpool
+{
+ echo "creating pgpool-II temporary installation ..."
+
+ POOL_INSTALL_PATH=$TEST_DIR/pgpool_temp_installed
+
+ test -d $POOL_INSTALL_PATH || mkdir $POOL_INSTALL_PATH
+
+ make install -C $BASE_DIR/../../../ -e prefix=${POOL_INSTALL_PATH} > ${POOL_INSTALL_PATH}/install.log 2>&1
+ # make install -C $BASE_DIR/../../../ -e prefix=${POOL_INSTALL_PATH}
+
+ if [ $? != 0 ];then
+ echo "pgpool make install failed"
+ exit 1
+ fi
+
+ echo "done"
+}
+
+function run_test()
+{
+ echo "run_test ..."
+
+ rm -fr $TEST_DIR && mkdir $TEST_DIR && cd $TEST_DIR
+
+ install_temp_pgpool
+ create_streaming_replication
+ set_pool_conf
+ start_pool
+ test_dml_adaptive
+
+ stop_pool
+ stop_pg_all
+
+ echo "run_test done"
+}
+
+function print_usage
+{
+ printf "Usage:\n"
+ printf " %s [Options]...\n" $(basename $0) >&2
+ printf "\nOptions:\n"
+ printf " -p DIRECTORY Postgres installed directory\n" >&2
+ printf " -h print this help and then exit\n\n" >&2
+}
+
+function main
+{
+ check_version
+
+ echo "running test ..."
+ run_test > $LOG_FILE 2>&1
+ echo "test done"
+
+ set +e
+ check_test_result
+}
+
+# ------------------------------------------- main --------------------------------------------
+
+while getopts "p:h" OPTION
+do
+case $OPTION in
+ p) PG_INSTALL_PATH="$OPTARG";;
+ h) print_usage
+ exit 2;;
+esac
+done
+
+main
\ No newline at end of file
StrNCpy(status[i].desc, "Load balance behavior when write query is received", POOLCONFIG_MAXDESCLEN);
i++;
+ StrNCpy(status[i].name, "dml_adaptive_object_relationship_list", POOLCONFIG_MAXNAMELEN);
+ snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%s", pool_config->dml_adaptive_object_relationship_list);
+ StrNCpy(status[i].desc, "list of relationships between objects", POOLCONFIG_MAXDESCLEN);
+ i++;
+
StrNCpy(status[i].name, "statement_level_load_balance", POOLCONFIG_MAXNAMELEN);
snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->statement_level_load_balance);
StrNCpy(status[i].desc, "statement level load balancing", POOLCONFIG_MAXDESCLEN);
ereport(DEBUG1,
(errmsg("function call walker, function name: \"%s\"", fname)));
+ check_object_relationship_list(fname, true);
+
if (ctx->pg_terminate_backend_pid == 0 && strcmp("pg_terminate_backend", fname) == 0)
{
if (list_length(fcall->args) == 1)