From 9e366c14e75aedfa3357e71800d6e3ac9847a39a Mon Sep 17 00:00:00 2001 From: Tatsuo Ishii Date: Thu, 17 Sep 2020 13:41:44 +0900 Subject: [PATCH] Apply language cleanup mega patch. black/white_function_list -> write_function_list, read_only_function_list black_query_pattern -> primay_routing_query_pattern black/white_memqcache_table_list -> cache_unsafe/cache_safe_table_list Watchdog: replace master to 'leader' for 'master' watchdog nodes ALWAYS_MASTER flag is changed to ALWAYS_PRIMARY. Replace relcache_query_target option 'master' to 'primary'. Replace Master Node with Main node ( node id with 0 or youngest live ). Replace follow_master with follow_primary with parameters. Replace some remaining occurrences of master with primary/main/leader Patch contributed by: Umar Hayat Reviewed by me: --- doc.ja/src/sgml/advanced.sgml | 16 +- doc.ja/src/sgml/connection-pooling.sgml | 2 +- doc.ja/src/sgml/connection-settings.sgml | 22 +- doc.ja/src/sgml/example-AWS.sgml | 22 +- doc.ja/src/sgml/example-Aurora.sgml | 10 +- doc.ja/src/sgml/example-cluster.sgml | 20 +- doc.ja/src/sgml/example-watchdog.sgml | 10 +- doc.ja/src/sgml/failover.sgml | 102 ++-- doc.ja/src/sgml/healthcheck.sgml | 4 +- doc.ja/src/sgml/intro.sgml | 2 +- doc.ja/src/sgml/loadbalance.sgml | 126 ++--- doc.ja/src/sgml/memcache.sgml | 38 +- doc.ja/src/sgml/misc-config.sgml | 8 +- doc.ja/src/sgml/online-recovery.sgml | 30 +- doc.ja/src/sgml/ref/pcp_promote_node.sgml | 18 +- doc.ja/src/sgml/ref/pcp_watchdog_info.sgml | 12 +- doc.ja/src/sgml/ref/pgpool_setup.sgml | 2 +- doc.ja/src/sgml/ref/pgpool_show.sgml | 8 +- .../ref/show_pool_health_check_stats.sgml | 2 +- doc.ja/src/sgml/ref/watchdog_setup.sgml | 12 +- doc.ja/src/sgml/release-3.4.sgml | 4 +- doc.ja/src/sgml/release-3.5.sgml | 4 +- doc.ja/src/sgml/release-3.6.sgml | 14 +- doc.ja/src/sgml/release-3.7.sgml | 8 +- doc.ja/src/sgml/release-4.0.sgml | 16 +- doc.ja/src/sgml/release-4.1.sgml | 10 +- doc.ja/src/sgml/release-4.2.sgml | 22 +- doc.ja/src/sgml/restrictions.sgml | 12 +- doc.ja/src/sgml/runtime.sgml | 12 +- doc.ja/src/sgml/start.sgml | 32 +- doc.ja/src/sgml/stream-check.sgml | 4 +- doc.ja/src/sgml/watchdog.sgml | 28 +- doc/src/sgml/advanced.sgml | 16 +- doc/src/sgml/connection-pooling.sgml | 2 +- doc/src/sgml/connection-settings.sgml | 26 +- doc/src/sgml/example-AWS.sgml | 14 +- doc/src/sgml/example-Aurora.sgml | 4 +- doc/src/sgml/example-cluster.sgml | 22 +- doc/src/sgml/example-watchdog.sgml | 10 +- doc/src/sgml/failover.sgml | 60 +-- doc/src/sgml/healthcheck.sgml | 2 +- doc/src/sgml/installation.sgml | 10 +- doc/src/sgml/intro.sgml | 2 +- doc/src/sgml/loadbalance.sgml | 96 ++-- doc/src/sgml/memcache.sgml | 28 +- doc/src/sgml/misc-config.sgml | 16 +- doc/src/sgml/online-recovery.sgml | 18 +- doc/src/sgml/ref/pcp_promote_node.sgml | 14 +- doc/src/sgml/ref/pcp_watchdog_info.sgml | 12 +- doc/src/sgml/ref/pgpool_setup.sgml | 2 +- doc/src/sgml/ref/pgpool_show.sgml | 6 +- .../ref/show_pool_health_check_stats.sgml | 2 +- doc/src/sgml/ref/watchdog_setup.sgml | 12 +- doc/src/sgml/release-3.4.sgml | 2 +- doc/src/sgml/release-3.5.sgml | 2 +- doc/src/sgml/release-3.6.sgml | 4 +- doc/src/sgml/release-3.7.sgml | 4 +- doc/src/sgml/release-4.0.sgml | 8 +- doc/src/sgml/release-4.1.sgml | 6 +- doc/src/sgml/release-4.2.sgml | 16 +- doc/src/sgml/restrictions.sgml | 12 +- doc/src/sgml/runtime.sgml | 12 +- doc/src/sgml/start.sgml | 32 +- doc/src/sgml/watchdog.sgml | 38 +- src/Makefile.am | 4 +- src/Makefile.in | 4 +- src/auth/pool_auth.c | 52 +- src/config/pool_config.c | 32 +- src/config/pool_config.l | 32 +- src/config/pool_config_variables.c | 92 ++-- src/context/pool_query_context.c | 178 +++---- src/context/pool_session_context.c | 32 +- src/include/context/pool_query_context.h | 2 +- src/include/context/pool_session_context.h | 14 +- src/include/pcp/libpcp_ext.h | 4 +- src/include/pcp/pcp.h | 4 +- src/include/pool.h | 30 +- src/include/pool_config.h | 60 +-- src/include/pool_config_variables.h | 2 +- src/include/protocol/pool_proto_modules.h | 4 +- src/include/query_cache/pool_memqcache.h | 4 +- src/include/watchdog/watchdog.h | 2 +- src/include/watchdog/wd_internal_commands.h | 2 +- src/include/watchdog/wd_ipc_defines.h | 6 +- src/include/watchdog/wd_json_data.h | 2 +- src/libs/pcp/pcp.c | 12 +- src/main/pgpool_main.c | 148 +++--- src/main/pool_internal_comms.c | 2 +- src/pcp_con/pcp_worker.c | 6 +- src/pcp_con/recovery.c | 14 +- src/pgpool.spec | 2 +- src/protocol/CommandComplete.c | 20 +- src/protocol/child.c | 32 +- src/protocol/pool_connection_pool.c | 76 +-- src/protocol/pool_pg_utils.c | 10 +- src/protocol/pool_process_query.c | 210 ++++---- src/protocol/pool_proto2.c | 84 ++-- src/protocol/pool_proto_modules.c | 240 ++++----- src/query_cache/pool_memqcache.c | 30 +- src/rewrite/pool_lobj.c | 12 +- src/rewrite/pool_timestamp.c | 6 +- src/sample/pgpool.conf.sample-logical | 60 +-- src/sample/pgpool.conf.sample-raw | 62 +-- src/sample/pgpool.conf.sample-replication | 60 +-- src/sample/pgpool.conf.sample-slony | 62 +-- src/sample/pgpool.conf.sample-snapshot | 60 +-- src/sample/pgpool.conf.sample-stream | 62 +-- src/sample/pgpool_recovery | 2 +- src/sample/scripts/failover.sh.sample | 38 +- src/sample/scripts/follow_primary.sh.sample | 171 +++++++ src/sample/scripts/recovery_2nd_stage.sample | 2 +- .../extended-query-test/test.sh | 2 +- .../simple-query-test/test.sh | 2 +- ...le-load-balance-always-black-function.data | 4 +- ...e-load-balance-default-black-function.data | 4 +- ...sable-load-balance-off-black-function.data | 4 +- ...le-load-balance-simple-black-function.data | 4 +- ...ble-load-balance-trans-black-function.data | 4 +- .../disable-load-balance-white-function.data | 2 +- src/test/pgpool_setup.in | 94 ++-- src/test/regression/clean.sh | 12 +- src/test/regression/libs.sh | 4 +- .../regression/tests/001.load_balance/test.sh | 56 +-- .../tests/003.failover/create_expected.sql | 4 +- .../003.failover/create_expected_node0.sql | 4 +- .../regression/tests/004.watchdog/.gitignore | 2 +- .../regression/tests/004.watchdog/leader.conf | 18 + .../regression/tests/004.watchdog/test.sh | 42 +- .../tests/005.jdbc/CreateTempTableTest.java | 2 +- .../regression/tests/006.memqcache/test.sh | 20 +- .../010.rewrite_timestamp/timestamp/main.c | 4 +- .../011.watchdog_quorum_failover/.gitignore | 2 +- .../011.watchdog_quorum_failover/leader.conf | 21 + .../011.watchdog_quorum_failover/test.sh | 56 +-- .../.gitignore | 2 +- .../leader.conf | 25 + .../test.sh | 48 +- .../.gitignore | 2 +- .../leader.conf | 25 + .../test.sh | 46 +- .../.gitignore | 2 +- .../leader.conf | 25 + .../014.watchdog_test_quorum_bypass/test.sh | 28 +- .../.gitignore | 2 +- .../leader.conf | 25 + .../test.sh | 52 +- .../tests/016.node_0_is_not_primary/test.sh | 8 +- src/test/regression/tests/051.bug60/bug.sql | 4 +- src/test/regression/tests/067.bug231/test.sh | 2 +- .../tests/071.execute_and_deallocate/test.sh | 2 +- src/tools/pcp/pcp_frontend_client.c | 16 +- src/utils/pool_process_reporting.c | 82 +-- src/utils/pool_relcache.c | 4 +- src/utils/pool_select_walker.c | 58 +-- src/utils/pool_stream.c | 6 +- src/utils/ps_status.c | 4 +- src/watchdog/watchdog.c | 476 +++++++++--------- src/watchdog/wd_internal_commands.c | 18 +- src/watchdog/wd_json_data.c | 8 +- 159 files changed, 2417 insertions(+), 2107 deletions(-) create mode 100755 src/sample/scripts/follow_primary.sh.sample create mode 100644 src/test/regression/tests/004.watchdog/leader.conf create mode 100644 src/test/regression/tests/011.watchdog_quorum_failover/leader.conf create mode 100644 src/test/regression/tests/012.watchdog_failover_when_quorum_exists/leader.conf create mode 100644 src/test/regression/tests/013.watchdog_failover_require_consensus/leader.conf create mode 100644 src/test/regression/tests/014.watchdog_test_quorum_bypass/leader.conf create mode 100644 src/test/regression/tests/015.watchdog_master_and_backend_fail/leader.conf diff --git a/doc.ja/src/sgml/advanced.sgml b/doc.ja/src/sgml/advanced.sgml index 86c36dfb8..f61edbf89 100644 --- a/doc.ja/src/sgml/advanced.sgml +++ b/doc.ja/src/sgml/advanced.sgml @@ -50,8 +50,8 @@ また、watchdogは、接続したすべてのPgpool-IIノードを調停し、フェイルバック、フェイルオーバ、フォローマスターコマンドがただひとつのPgpool-IIで実行されるようにします。 @@ -177,7 +177,7 @@ @@ -891,10 +891,10 @@ The heart of a watchdog process is a state machine that starts from its initial state (WD_LOADING) and transit towards either standby (WD_STANDBY) or - master/coordinator (WD_COORDINATOR) state. - Both standby and master/coordinator states are stable states of the + leader/coordinator (WD_COORDINATOR) state. + Both standby and leader/coordinator states are stable states of the watchdog state machine and the node stays in standby or - master/coordinator state until some problem in local + leader/coordinator state until some problem in local Pgpool-II node is detected or a remote Pgpool-II disconnects from the cluster. --> @@ -947,7 +947,7 @@ 参加しているすべてのPgpool-IIノードと通信し、マスター/コーディネーターノードの選択を調停し、クラスタのクォーラムを確実にする diff --git a/doc.ja/src/sgml/connection-pooling.sgml b/doc.ja/src/sgml/connection-pooling.sgml index 2893d242a..4a17eb194 100644 --- a/doc.ja/src/sgml/connection-pooling.sgml +++ b/doc.ja/src/sgml/connection-pooling.sgml @@ -1435,7 +1435,7 @@ - follow master child + follow primary child follow_child diff --git a/doc.ja/src/sgml/connection-settings.sgml b/doc.ja/src/sgml/connection-settings.sgml index 136e2d560..ffcc7fe9c 100644 --- a/doc.ja/src/sgml/connection-settings.sgml +++ b/doc.ja/src/sgml/connection-settings.sgml @@ -778,10 +778,10 @@ backend_clustering_mode = 'slony' - もし同票で、2つ以上のグループで同じノード数だった場合は、マスターノード(最も若いDBノード番号のバックエンドノード)を含むグループが優先されます。 + もし同票で、2つ以上のグループで同じノード数だった場合は、メインノード(最も若いDBノード番号のバックエンドノード)を含むグループが優先されます。 @@ -883,9 +883,9 @@ backend_clustering_mode = 'slony' - 結果(R:レプリケーション、M:マスターのみに送信、L:負荷分散) + 結果(R:レプリケーション、M:メインのみに送信、L:負荷分散) R M @@ -1355,10 +1355,10 @@ S2/N2: COMMIT; end of the parameter name (e.g.backend_hostname0). This number is referred to as "DB node ID", and it starts from 0. The backend which was given the DB node ID of 0 will be - called "master node". When multiple backends are defined, the - service can be continued even if the master node is down (not + called "main node". When multiple backends are defined, the + service can be continued even if the main node is down (not true in some modes). In this case, the youngest DB node ID - alive will be the new master node. + alive will be the new main node. --> パラメータ名の末尾に数字を付加することで複数のバックエンドを指定することができます(たとえばbackend_hostname0)。 この数字のことを「DBノードID」と呼び、0から開始します。 @@ -1476,7 +1476,7 @@ S2/N2: COMMIT; Pgpool-II V2.2.6, V2.3 or later allows alllow updating the values by reloading a configuration file. This is useful if you want to prevent any query sent to - slaves to perform some administrative work in master/slave mode. + standbys to perform some administrative work in native replication mode. --> 新しいbackend_weightはパラメータ行を追加して、設定ファイル再読み込みすることで追加できます。 Pgpool-II V2.2.6、V2.3以降では、設定ファイルの再読込みで値を変更できます。 @@ -1617,7 +1617,7 @@ S2/N2: COMMIT; - ALWAYS_MASTER + ALWAYS_PRIMARY ストリーミングレプリケーションでのみ有効です。 ストリーミングレプリケーションモードについてはをご覧ください。 このフラグがバックエンドのどれかにセットされていると、Pgpool-IIはバックエンドにアクセスしてプライマリノードを探すのではなく、このフラグがセットされているノードをプライマリノードと見なします。 diff --git a/doc.ja/src/sgml/example-AWS.sgml b/doc.ja/src/sgml/example-AWS.sgml index 214b189b8..6bc76f456 100644 --- a/doc.ja/src/sgml/example-AWS.sgml +++ b/doc.ja/src/sgml/example-AWS.sgml @@ -16,7 +16,7 @@ You can use watchdog with Pgpool-II in any mode: replication mode, - master/slave mode and raw mode. + native replication mode and raw mode. --> @@ -104,9 +104,9 @@ which we will not set in this example instead we will use and to switch the - Elastic IP address to the master/Active Pgpool-II node. + Elastic IP address to the leader/Active Pgpool-II node. --> - この例の設定はとほとんど同じになりますが、を設定せず、代わりにとを使ってmaster/Active Pgpool-IIノードのElastic IPアドレスを切り替えるのが異なります。 + この例の設定はとほとんど同じになりますが、を設定せず、代わりにとを使ってleader/Active Pgpool-IIノードのElastic IPアドレスを切り替えるのが異なります。 @@ -187,10 +187,10 @@ - このスクリプトは、watchdogがactive/masterノードになったときに、Elastic IPをアサインするためにwatchdogが実行します。 + このスクリプトは、watchdogがactive/leaderノードになったときに、Elastic IPをアサインするためにwatchdogが実行します。 aws-escalation.sh: @@ -222,9 +222,9 @@ - このスクリプトは、watchdogがactive/masterノードを退任するときに、Elastic IPのアサインを解除するためにwatchdogが実行します。 + このスクリプトは、watchdogがactive/leaderノードを退任するときに、Elastic IPのアサインを解除するためにwatchdogが実行します。 aws-de-escalation.sh: @@ -288,11 +288,11 @@ それぞれのサーバ上でPgpool-IIを"-n"スイッチ付きで起動し、pgpool.logにログメッセージをリダイレクトします。 - master/active Pgpool-IIノードは、Elastic IPのアサインメッセージを表示します。 + leader/active Pgpool-IIノードは、Elastic IPのアサインメッセージを表示します。 LOG: I am the cluster leader node. Starting escalation process LOG: escalation process started with PID:23543 @@ -357,8 +357,8 @@ LOG: watchdog node state changed from [JOINING] to [INITIALIZING] LOG: I am the only alive node in the watchdog cluster HINT: skipping stand for coordinator state - LOG: watchdog node state changed from [INITIALIZING] to [MASTER] - LOG: I am announcing my self as master/coordinator watchdog node + LOG: watchdog node state changed from [INITIALIZING] to [LEADER] + LOG: I am announcing my self as leader/coordinator watchdog node LOG: I am the cluster leader node DETAIL: our declare coordinator message is accepted by all nodes LOG: I am the cluster leader node. Starting escalation process diff --git a/doc.ja/src/sgml/example-Aurora.sgml b/doc.ja/src/sgml/example-Aurora.sgml index 11142c72e..cab014cac 100644 --- a/doc.ja/src/sgml/example-Aurora.sgml +++ b/doc.ja/src/sgml/example-Aurora.sgml @@ -15,14 +15,14 @@ streaming replication cluster with some exceptions. First, fail over and online recovery are managed by Aurora. So you don't need to - set , , + set , , and recovery related parameters. In this section we explain how to set up Pgpool-II for Aurora. --> Amazon Aurora for PostgreSQL Compatibility (Aurora) は、PostgreSQL用のマネージドサービスです。 ユーザから見ると、Auroraは、いくつか例外があるものの、ストリーミングレプリケーションのクラスタのように見えます。フェイルオーバやオンラインリカバリはAuroraによって管理されます。 - ですから、、、 + ですから、、、 それにオンラインリカバリ関連のパラメータは設定の必要がありません。 この章では、Aurora用のPgpool-II設定を説明します。 @@ -97,11 +97,11 @@ - 0ののALWAYS_MASTERをオンにします。 + 0ののALWAYS_PRIMARYをオンにします。 diff --git a/doc.ja/src/sgml/example-cluster.sgml b/doc.ja/src/sgml/example-cluster.sgml index 40a08312a..b3e811782 100644 --- a/doc.ja/src/sgml/example-cluster.sgml +++ b/doc.ja/src/sgml/example-cluster.sgml @@ -456,14 +456,14 @@ フェイルオーバの設定 PostgreSQLバックエンドノードがダウンした時に実行するスクリプトをに設定します。 - また、PostgreSQLサーバが3台の場合、プライマリノードのフェイルオーバ後に新しいプライマリからスレーブをリカバリするためにも設定する必要があります。はプライマリノードのフェイルオーバ後に実行されます。PostgreSQLサーバが2台の場合、の設定は不要です。 + また、PostgreSQLサーバが3台の場合、プライマリノードのフェイルオーバ後に新しいプライマリからスレーブをリカバリするためにも設定する必要があります。はプライマリノードのフェイルオーバ後に実行されます。PostgreSQLサーバが2台の場合、の設定は不要です。 それぞれの実行スクリプトの引数は、それぞれ実行時にPgpool-IIによってバックエンドの具体的な情報に置き換えられます。各引数の意味はをご参照ください。 failover_command = '/etc/pgpool-II/failover.sh %d %h %p %D %m %H %M %P %r %R %N %S' - follow_master_command = '/etc/pgpool-II/follow_master.sh %d %h %p %D %m %H %M %P %r %R' + follow_primary_command = '/etc/pgpool-II/follow_primary.sh %d %h %p %D %m %H %M %P %r %R' @@ -472,12 +472,12 @@ - サンプルスクリプトfailover.sh及びfollow_master.shは + サンプルスクリプトfailover.sh及びfollow_primary.shは /etc/pgpool-II/配下にインストールされていますので、これらのファイルをコピーして作成します。 # cp /etc/pgpool-II/failover.sh{.sample,} - # cp /etc/pgpool-II/follow_master.sh{.sample,} + # cp /etc/pgpool-II/follow_primary.sh{.sample,} 基本的にはPGHOMEを環境に合わせて変更すれば、動作します。 @@ -488,7 +488,7 @@ PGHOME=/usr/pgsql-11 ... - [server1]# vi /etc/pgpool-II/follow_master.sh + [server1]# vi /etc/pgpool-II/follow_primary.sh ... PGHOME=/usr/pgsql-11 ... @@ -904,7 +904,7 @@ arping_path = '/usr/sbin' .pcppassの設定 - 前述のfollow_master_commandのスクリプトでパスワード入力なしでPCPコマンドを実行できるように、すべてのサーバでPgpool-IIの起動ユーザのホームディレクトリに.pcppassを作成します。 + 前述のfollow_primary_commandのスクリプトでパスワード入力なしでPCPコマンドを実行できるように、すべてのサーバでPgpool-IIの起動ユーザのホームディレクトリに.pcppassを作成します。 [全サーバ]# echo 'localhost:9898:pgpool:pgpool' > ~/.pcppass @@ -985,14 +985,14 @@ arping_path = '/usr/sbin' watchdogアクティブ/スタンバイの切り替え - pcp_watchdog_infoでPgpool-IIのwatchdogの情報を確認します。最初に起動したPgpool-IIが「MASTER」になります。 + pcp_watchdog_infoでPgpool-IIのwatchdogの情報を確認します。最初に起動したPgpool-IIが「LEADER」になります。 # pcp_watchdog_info -h 192.168.137.150 -p 9898 -U pgpool Password: 3 YES server1:9999 Linux server1 server1 - server1:9999 Linux server1 server1 9999 9000 4 MASTER #最初に起動されたサーバがMASTERになる + server1:9999 Linux server1 server1 9999 9000 4 LEADER #最初に起動されたサーバがLEADERになる server2:9999 Linux server2 server2 9999 9000 7 STANDBY #スタンバイとして稼働 server3:9999 Linux server3 server3 9999 9000 7 STANDBY #スタンバイとして稼働 @@ -1006,7 +1006,7 @@ arping_path = '/usr/sbin' Password: 3 YES server2:9999 Linux server2 server2 - server2:9999 Linux server2 server2 9999 9000 4 MASTER #server2がアクティブに昇格 + server2:9999 Linux server2 server2 9999 9000 4 LEADER #server2がアクティブに昇格 server1:9999 Linux server1 server1 9999 9000 10 SHUTDOWN #server1が停止された server3:9999 Linux server3 server3 9999 9000 7 STANDBY #スタンバイとして稼働 @@ -1020,7 +1020,7 @@ arping_path = '/usr/sbin' Password: 3 YES server2:9999 Linux server2 server2 - server2:9999 Linux server2 server2 9999 9000 4 MASTER + server2:9999 Linux server2 server2 9999 9000 4 LEADER server1:9999 Linux server1 server1 9999 9000 7 STANDBY server3:9999 Linux server3 server3 9999 9000 7 STANDBY diff --git a/doc.ja/src/sgml/example-watchdog.sgml b/doc.ja/src/sgml/example-watchdog.sgml index 2fd3ed1d1..e286bc19e 100644 --- a/doc.ja/src/sgml/example-watchdog.sgml +++ b/doc.ja/src/sgml/example-watchdog.sgml @@ -15,7 +15,7 @@ that 1 node for backend exists. You can use watchdog with Pgpool-II in any mode: replication mode, - master/slave mode and raw mode. + native replication mode and raw mode. --> ここではwatchdogの機能を簡単に試す方法を説明します。 Linux マシン2台にそれぞれ Pgpool-II がインストールされているものとします。 @@ -209,14 +209,14 @@ --> ログから、仮想IP アドレスを使用し、またwatchdogプロセス起動したことが確認できます。 - LOG: I am announcing my self as master/coordinator watchdog node + LOG: I am announcing my self as leader/coordinator watchdog node LOG: I am the cluster leader node DETAIL: our declare coordinator message is accepted by all nodes LOG: I am the cluster leader node. Starting escalation process LOG: escalation process started with PID:59449 LOG: watchdog process is initialized LOG: watchdog: escalation started - LOG: I am the master watchdog node + LOG: I am the leader watchdog node DETAIL: using the local backend node status @@ -340,8 +340,8 @@ LOG: watchdog node state changed from [JOINING] to [INITIALIZING] LOG: I am the only alive node in the watchdog cluster HINT: skipping stand for coordinator state - LOG: watchdog node state changed from [INITIALIZING] to [MASTER] - LOG: I am announcing my self as master/coordinator watchdog node + LOG: watchdog node state changed from [INITIALIZING] to [LEADER] + LOG: I am announcing my self as leader/coordinator watchdog node LOG: I am the cluster leader node DETAIL: our declare coordinator message is accepted by all nodes diff --git a/doc.ja/src/sgml/failover.sgml b/doc.ja/src/sgml/failover.sgml index 3801df287..f839d2b30 100644 --- a/doc.ja/src/sgml/failover.sgml +++ b/doc.ja/src/sgml/failover.sgml @@ -137,23 +137,23 @@ %M - 古いマスターノードのID + 古いメインノードのID %m - 新しいマスターノードのID + 新しいメインノードのID %H - 新しいマスターノードのホスト名 + 新しいメインノードのホスト名 %P @@ -165,16 +165,16 @@ %r - 新しいマスターノードのポート番号 + 新しいメインノードのポート番号 %R - 新しいマスターノードのデータベースクラスタパス + 新しいメインノードのデータベースクラスタパス %N @@ -205,12 +205,12 @@ - マスターノードは、生きているデータベースノードの中から一番若い(小さい)ノードIDを持つノードを参照します。 + メインノードは、生きているデータベースノードの中から一番若い(小さい)ノードIDを持つノードを参照します。 ストリーミングレプリケーションモードでは、このノードはプライマリノードとは異なる場合があります。 - 内では、 %mはPgpool-IIによって選出された新しいマスターノードとなります。 - 新しいマスターノードには生きているノードで一番若い(小さい)ノードが割り当てられます。 + 内では、 %mはPgpool-IIによって選出された新しいメインノードとなります。 + 新しいメインノードには生きているノードで一番若い(小さい)ノードが割り当てられます。 例えば、ノード0、1、2という3つのノードを持ち、ノード1がプライマリノード、全てのノードは正常である(ダウンしているノードがない)と仮定します。 ノード1がダウンした場合、failover_command は %m = 0 で呼び出されます。 また、全てのスタンバイノードがダウンした状態でプライマリノードのフェイルオーバが起きた場合、failover_commandは %m = -1、および%H,%R,%r = "" で呼び出されます。 @@ -355,23 +355,23 @@ %M - 古いマスターノードのID + 古いメインノードのID %m - 新しいマスターノードのID + 新しいメインノードのID %H - 新しいマスターノードのホスト名 + 新しいメインノードのホスト名 %P @@ -383,16 +383,16 @@ %r - 新しいマスターノードのポート番号 + 新しいメインノードのポート番号 %R - 新しいマスターノードのデータベースクラスタパス + 新しいメインノードのデータベースクラスタパス %N @@ -446,10 +446,10 @@ - - follow_master_command (string) + + follow_primary_command (string) - follow_master_command 設定パラメータ + follow_primary_command 設定パラメータ @@ -466,14 +466,14 @@ プライマリノードのフェイルオーバー後に実行するユーザコマンドを指定します。 スタンバイがフェイルオーバーした際にはこのコマンドは実行されません。 このコマンドは、コマンドによってノードを昇格する要求があった場合にも起動されます。 - この機能は、マスタースレーブモードでストリーミングレプリケーション構成の場合のみ有効です。 + この機能は、メインスレーブモードでストリーミングレプリケーション構成の場合のみ有効です。 - フォローマスターコマンドオプション + フォロープライマリコマンドオプション @@ -553,7 +553,7 @@ %M 古いマスターノードのID @@ -621,22 +621,22 @@ - follow_master_commandが空文字列でない場合、ストリーミングレプリケーションによるマスタースレーブでプライマリノードのフェイルオーバーが完了した後に、Pgpool-IIは新しいプライマリ以外のすべてのノードを切り離し、クライアントから再び接続を受け付ける準備のため再度新しい子プロセスを起動します。 - その後、Pgpool-IIは切り離されたそれぞれのノードに対してfollow_master_commandに設定したコマンドを実行します。 + follow_primary_commandが空文字列でない場合、ストリーミングレプリケーションモードでプライマリノードのフェイルオーバーが完了した後に、Pgpool-IIは新しいプライマリ以外のすべてのノードを切り離し、クライアントから再び接続を受け付ける準備のため再度新しい子プロセスを起動します。 + その後、Pgpool-IIは切り離されたそれぞれのノードに対してfollow_primary_commandに設定したコマンドを実行します。 - - 通常は、follow_master_commandコマンドはコマンドを呼んで新しいプライマリからスレーブをリカバリするために使用します。 - follow_master_command中では、pg_ctlを使ってターゲットのPostgreSQLノードが動いているかどうかを確認することをお勧めします。 + 通常は、follow_primary_commandコマンドはコマンドを呼んで新しいプライマリからスタンバイをリカバリするために使用します。 + follow_primary_command中では、pg_ctlを使ってターゲットのPostgreSQLノードが動いているかどうかを確認することをお勧めします。 たとえば、そのノードはハードウェア障害で停止しているかも知れませんし、管理者が保守のために停止しているのかも知れません。 ノードが停止している場合は、そのノードをスキップしてください。 ノードが動いている場合は、まずそのノードを停止してからリカバリしてください。 - follow_master_commandの完全な例は、にあります。 + follow_primary_commandの完全な例は、にあります。 - このパラメータはストリーミングレプリケーションによるマスタースレーブモードの場合のみ有効です。 + このパラメータはストリーミングレプリケーションモードの場合のみ有効です。 - この設定は動作にむらのあるネットワーク環境において、マスターノードが正常であるにも関わらず、たまにヘルスチェックが失敗することが予想される場合に有用です。 + この設定は動作にむらのあるネットワーク環境において、ノードが正常であるにも関わらず、たまにヘルスチェックが失敗することが予想される場合に有用です。 diff --git a/doc.ja/src/sgml/intro.sgml b/doc.ja/src/sgml/intro.sgml index 8bbe566a9..be715d96a 100644 --- a/doc.ja/src/sgml/intro.sgml +++ b/doc.ja/src/sgml/intro.sgml @@ -147,7 +147,7 @@ もし負荷分散対象のクエリをPgpool-IIに負荷分散してほしくない場合には、SELECT文の前の任意のコメント(/*NO LOAD BALANCE*/がよく使われます)を付与することができます。 - これにより、そのクエリの負荷分散は無効となり、Pgpool-IIはこれをマスターノード(マスタースレーブモードではプライマリノード)に送信します。 + これにより、そのクエリの負荷分散は無効となり、Pgpool-IIはこれをメインノード(プライマリノード)に送信します。 @@ -73,9 +73,9 @@ - レプリケーションモードまたはマスタースレーブモードである + ローモードでない @@ -109,23 +109,23 @@ transaction has not issued a write query yet (until a write query is issued, load balance is possible. Here "write query" means non SELECT DML or DDL. Before Pgpool-II 4.1, - SELECTs having write functions as specified in black or - white function list is not regarded as a write query.) + SELECTs having write functions as specified in write or + read_only function list is not regarded as a write query.) --> トランザクション内で更新を伴うクエリが実行されていない (更新を伴うクエリが実行されるまでは負荷分散されます。 ここで「更新を伴うクエリ」とは、SELECT以外のDDLやDMLを指します。 - Pgpool-II4.1以前のバージョンでは、black/white function list + Pgpool-II4.1以前のバージョンでは、write/read_only function list で指定される更新関数を含むSELECTは更新を伴うクエリとは見なされません。) - もしblack/white function listが空の場合は、関数を持つSELECTは、更新を伴うクエリとは見なされません。 + もしwrite/read_only function listが空の場合は、関数を持つSELECTは、更新を伴うクエリとは見なされません。 @@ -154,22 +154,22 @@ SELECT または COPY TO STDOUT, EXPLAIN, EXPLAIN ANALYZE SELECT... から始まる。 - (またはで指定された書き込み関数を含むSELECTを除く) + (またはで指定された書き込み関数を含むSELECTを除く) = trueの場合は最初の空白文字は無視されます。 - マスタースレーブモードの場合、更に以下の条件が満たされなければなりません。 + ネィティブレプリケーションモードの場合、更に以下の条件が満たされなければなりません。 @@ -302,7 +302,7 @@ 更新を伴うクエリが存在する場合、続いて実行される参照クエリは負荷分散しません。 @@ -648,13 +648,13 @@ - - white_function_list (string) + + read_only_function_list (string) - white_function_list 設定パラメータ + read_only_function_list 設定パラメータ @@ -664,7 +664,7 @@ DO NOT update the database. SELECTs including functions not specified in this list are not load balanced. These are replicated among all the DB nodes in Replication mode, - sent to the primary node only in Maste Slave mode. + sent to the primary node only in Native Replication mode. --> データベースに対して更新を行なわない関数名をコンマ区切りで指定します。 このリストに指定されていない関数呼び出しを含むSELECTは負荷分散されません。 @@ -679,7 +679,7 @@ 正規表現には自動的に^と$が付与されます。 - + @@ -688,11 +688,11 @@ - もし読み出しのみを行う関数が"get_"あるいは"select_"で始まるようにしてあるのであれば、を以下のように設定可能です。 + もし読み出しのみを行う関数が"get_"あるいは"select_"で始まるようにしてあるのであれば、を以下のように設定可能です。 - white_function_list = 'get_.*,select_.*' + read_only_function_list = 'get_.*,select_.*' @@ -702,9 +702,9 @@ 問い合わせがスキーマ修飾なしあるいはスキーマ修飾ありの両方の関数を参照するなら、その両者をリストに登録しなければなりません。 #たとえば、ある問い合わせが"f1()"を、他の問い合わせが"public.f1()"を関数f1を参照するなら、 - #black_function_listは以下のように設定されるでしょう。 + #write_function_listは以下のように設定されるでしょう。 - black_function_list = "f1,public.f1" + write_function_list = "f1,public.f1" @@ -713,7 +713,7 @@ - このパラメータとが空文字なら、関数の揮発性属性が検査されます。 + このパラメータとが空文字なら、関数の揮発性属性が検査されます。 これが揮発性(volatile)なら、この関数は書き込みを行うと見なされます。 これは便利ですし、推奨できる設定です。 しかしこれにより初回システムカタログへのアクセスが発生します(次回以降はキャッシュされた結果が使用されるので余計な問い合わせは送信されません。) @@ -730,13 +730,13 @@ - - black_function_list (string) + + write_function_list (string) - black_function_list 設定パラメータ + write_function_list 設定パラメータ @@ -747,7 +747,7 @@ SELECTs including functions specified in this list are not load balanced. These are replicated among all the DB nodes in Replication mode, - sent to the primary node only in Maste Slave mode. + sent to the primary node only in Native Replication mode. --> データベースに対して更新を行う関数名をコンマ区切りで指定します。 このリストに指定された関数呼び出しを含むSELECTは負荷分散されません。 @@ -762,7 +762,7 @@ 正規表現には自動的に^と$が付与されます。 - + @@ -771,11 +771,11 @@ - もし更新を行う関数が"set_"、"update_"、"delete_"、あるいは"insert_"で始まるようにしてあるのであれば、を以下のように設定可能です。 + もし更新を行う関数が"set_"、"update_"、"delete_"、あるいは"insert_"で始まるようにしてあるのであれば、を以下のように設定可能です。 - black_function_list = 'nextval,setval,set_.*,update_.*,delete_.*,insert_.*' + write_function_list = 'nextval,setval,set_.*,update_.*,delete_.*,insert_.*' @@ -785,9 +785,9 @@ 問い合わせがスキーマ修飾なしあるいはスキーマ修飾ありの両方の関数を参照するなら、その両者をリストに登録しなければなりません。 #たとえば、ある問い合わせが"f1()"を、他の問い合わせが"public.f1()"を関数f1を参照するなら、 - #white_function_listは以下のように設定されるでしょう。 + #read_only_function_listは以下のように設定されるでしょう。 - white_function_list = "f1,public.f1" + read_only_function_list = "f1,public.f1" @@ -797,14 +797,14 @@ - とは互いに排他的で、2つのリストの内、どちらか一方のみ設定することができます。 + とは互いに排他的で、2つのリストの内、どちらか一方のみ設定することができます。 - + @@ -813,14 +813,14 @@ Pgpool-II V3.0より前のバージョンでは、固定でnextval()とsetval()がデータベースに書き込みを行なう関数であると認識されていました。 - 以下のようにとを設定することで、それと同じように動作させることができます。 + 以下のようにとを設定することで、それと同じように動作させることができます。 - white_function_list = '' - black_function_list = 'nextval,setval,lastval,currval' + read_only_function_list = '' + write_function_list = 'nextval,setval,lastval,currval' @@ -844,7 +844,7 @@ - このパラメータとが空文字なら、関数の揮発性属性が検査されます。 + このパラメータとが空文字なら、関数の揮発性属性が検査されます。 これが揮発性(volatile)なら、この関数は書き込みを行うと見なされます。 これは便利ですし、推奨できる設定です。 しかしこれにより初回システムカタログへのアクセスが発生します(次回以降はキャッシュされた結果が使用されるので余計な問い合わせは送信されません。) @@ -861,13 +861,13 @@ - - black_query_pattern_list (string) + + primary_routing_query_pattern_list (string) - black_query_pattern_list 設定パラメータ + primary_routing_query_pattern_list 設定パラメータ @@ -877,9 +877,9 @@ should be sent to primary node. SQL that matched patterns specified in this list are not load balanced. - Only Maste Slave mode is supported. + Only Native Replication mode is supported. --> - 特定の SQL をプライマリノードに送信するように を設定します。 + 特定の SQL をプライマリノードに送信するように を設定します。 SQL パターンをセミコロン区切りで指定します。 マスタースレーブモードのみで動作します。 @@ -901,7 +901,7 @@ また、指定される SQL パターンでは大文字と小文字を区別しません。 - + @@ -909,9 +909,9 @@ - もし以下の SQL をプライマリノードに送信したい場合、を以下のように設定可能です。 + もし以下の SQL をプライマリノードに送信したい場合、を以下のように設定可能です。 @@ -937,7 +937,7 @@ - black_query_pattern_list = 'SELECT \* FROM table_name1\;;SELECT col1, col2 FROM table_name2 WHERE col1 LIKE \'%a%\'\;;.*table_name3.*' + primary_routing_query_pattern_list = 'SELECT \* FROM table_name1\;;SELECT col1, col2 FROM table_name2 WHERE col1 LIKE \'%a%\'\;;.*table_name3.*' @@ -945,12 +945,12 @@ - との両方にマッチした場合、 - の設定が無視され、プライマリノードのみに送信されます。 + との両方にマッチした場合、 + の設定が無視され、プライマリノードのみに送信されます。 @@ -1461,9 +1461,9 @@ -関数の依存性を設定するには、関数はで設定済みでなければなりません。 +関数の依存性を設定するには、関数はで設定済みでなければなりません。 diff --git a/doc.ja/src/sgml/memcache.sgml b/doc.ja/src/sgml/memcache.sgml index 674cda771..521b73da5 100644 --- a/doc.ja/src/sgml/memcache.sgml +++ b/doc.ja/src/sgml/memcache.sgml @@ -33,7 +33,7 @@ However, VIEWs and SELECTs accessing unlogged tables can be cached by specifying in - the . + the . --> 基本的に以下のSELECTはキャッシュされません。 @@ -45,7 +45,7 @@ システムカタログを使用しているSELECT TABLESAMPLEを使っているSELECT - ただし、VIEWと unloggedテーブルは、white_memqcache_table_list に記載することでキャッシュされます。 + ただし、VIEWと unloggedテーブルは、cache_safe_memqcache_table_list に記載することでキャッシュされます。 @@ -353,13 +353,13 @@ - - white_memqcache_table_list (string) + + cache_safe_memqcache_table_list (string) - white_memqcache_table_list 設定パラメータ + cache_safe_memqcache_table_list 設定パラメータ @@ -371,11 +371,11 @@ Pgpool-II. This parameter only applies to VIEWs and SELECTs accessing unlogged tables. Regular tables can be cached unless specified - by . + by . --> SELECT結果がPgpool-IIにキャッシュされるべきテーブル名のリストをカンマ区切りで指定します。 このパラメータは、VIEWとunloggedテーブルにのみ適用されます。 - 通常のテーブルは、に記載されていない限りキャッシュされます。 + 通常のテーブルは、に記載されていない限りキャッシュされます。 @@ -397,10 +397,10 @@ #For example: #If the queries sometime use "table1" and other times "public.table1" - #to refer the table1 then the white_memqcache_table_list + #to refer the table1 then the cache_safe_memqcache_table_list #would be configured as follows. - white_memqcache_table_list = "table1,public.table1" + cache_safe_memqcache_table_list = "table1,public.table1" @@ -409,10 +409,10 @@ - black_memqcache_table_listはより優先されます。 + cache_unsafe_memqcache_table_listはより優先されます。 @@ -424,13 +424,13 @@ - - black_memqcache_table_list (string) + + cache_unsafe_memqcache_table_list (string) - black_memqcache_table_list 設定パラメータ + cache_unsafe_memqcache_table_list 設定パラメータ @@ -462,10 +462,10 @@ #For example: #If the queries sometime use "table1" and other times "public.table1" - #to refer the table1 then the black_memqcache_table_list + #to refer the table1 then the cache_unsafe_memqcache_table_list #would be configured as follows. - black_memqcache_table_list = "table1,public.table1" + cache_unsafe_memqcache_table_list = "table1,public.table1" diff --git a/doc.ja/src/sgml/misc-config.sgml b/doc.ja/src/sgml/misc-config.sgml index f50fcdb7d..cc0355001 100644 --- a/doc.ja/src/sgml/misc-config.sgml +++ b/doc.ja/src/sgml/misc-config.sgml @@ -147,9 +147,9 @@ リレーションキャッシュを作成するためのクエリを送る先のノードを指定します。 - masterにすると、クエリはマスタ(プライマリ)に送られます。 + primaryにすると、クエリはメイン(プライマリ)に送られます。 これがデフォルトで、最新のデータを入手できるため、ほとんどのユーザに推奨する設定です。 - もしマスタ(プライマリ)の負荷を下げたい場合は、このパラメータをload_balance_nodeに設定できます。 + もしメイン(プライマリ)の負荷を下げたい場合は、このパラメータをload_balance_nodeに設定できます。 これにより、クエリは負荷分散ノードに送られます。 これは特に、大陸AにPgpool-IIとプライマリサーバがあり、一方大陸BにPgpool-IIとスタンバイサーバがあるような構成で有効です。 Bのユーザは地理的にスタンバイサーバが近いため、スタンバイからデータを読みたいと思うでしょう。 @@ -225,8 +225,8 @@ リカバリされるバックエンドノードのデータベースクラスタへのパス @@ -183,9 +183,9 @@ - スクリプトのパスおよびファイル名は固定されており、マスタ(プライマリ)ノード上で$PGDATA/pgpool_remote_startが実行されます。 + スクリプトのパスおよびファイル名は固定されており、メイン(プライマリ)ノード上で$PGDATA/pgpool_remote_startが実行されます。 @@ -338,7 +338,7 @@ - マスター(プライマリ)ノードのデータベースクラスタへのパス + メイン(プライマリ)ノードのデータベースクラスタへのパス @@ -385,9 +385,9 @@ - マスター(プライマリ)ノードのポート番号(Pgpool-II 3.4以降) + メイン(プライマリ)ノードのポート番号(Pgpool-II 3.4以降) @@ -465,7 +465,7 @@ - マスター(プライマリ)ノードのデータベースクラスタへのパス + メイン(プライマリ)ノードのデータベースクラスタへのパス @@ -515,9 +515,9 @@ - マスター(プライマリ)ノードのポート番号(Pgpool-II 3.4以降) + メイン(プライマリ)ノードのポート番号(Pgpool-II 3.4以降) diff --git a/doc.ja/src/sgml/ref/pcp_promote_node.sgml b/doc.ja/src/sgml/ref/pcp_promote_node.sgml index 519a4efee..460e0991e 100644 --- a/doc.ja/src/sgml/ref/pcp_promote_node.sgml +++ b/doc.ja/src/sgml/ref/pcp_promote_node.sgml @@ -18,9 +18,9 @@ Pgpool-II documentation pcp_promote_node - Pgpool-II のノードをマスターに昇格させる + Pgpool-II のノードをプライマリに昇格させる @@ -39,16 +39,16 @@ Pgpool-II documentation 説明 pcp_promote_node - は Pgpool-II のノードをマスターに昇格させます。 - これは、マスタースレーブモードで ストリーミングレプリケーション構成の場合のみ使用できます。 + は Pgpool-II のノードをプライマリに昇格させます。 + これは、ストリーミングレプリケーション構成の場合のみ使用できます。 このコマンドは実際に PostgreSQL のスタンバイサーバを昇格するわけではないことに注意してください。 単に Pgpool-II の内部ステータスを変更するだけです。 ですから、ユーザはこのコマンドを使う際には自分で PostgreSQL のスタンバイを昇格させるようにしてください。 pcp_promote_nodeは、起動された後以下を行います。 - が設定されている場合はそれも起動されるので、十分な注意が必要です。 - 通常はを無効にしてからこのコマンドを実行することをお勧めします。 + が設定されている場合はそれも起動されるので、十分な注意が必要です。 + 通常はを無効にしてからこのコマンドを実行することをお勧めします。 @@ -68,7 +68,7 @@ Pgpool-II documentation - が設定されている場合は、2でダウンさせたPostgreSQLノードに対してを実行します。 + が設定されている場合は、2でダウンさせたPostgreSQLノードに対してを実行します。 @@ -90,9 +90,9 @@ Pgpool-II documentation - マスターに昇格させるバックエンドノードのインデックスを指定します。 + プライマリに昇格させるバックエンドノードのインデックスを指定します。 diff --git a/doc.ja/src/sgml/ref/pcp_watchdog_info.sgml b/doc.ja/src/sgml/ref/pcp_watchdog_info.sgml index d3051a38c..dd24be472 100644 --- a/doc.ja/src/sgml/ref/pcp_watchdog_info.sgml +++ b/doc.ja/src/sgml/ref/pcp_watchdog_info.sgml @@ -134,7 +134,7 @@ Pgpool-II documentation 3 NO Linux_host1.localdomain_9991 host1 Linux_host1.localdomain_9991 host1 9991 9001 7 STANDBY - Linux_host2.localdomain_9992 host2 9992 9002 4 MASTER + Linux_host2.localdomain_9992 host2 9992 9002 4 LEADER Linux_host3.localdomain_9993 host3 9993 9003 7 STANDBY @@ -151,8 +151,8 @@ Pgpool-II documentation 1. クラスタ内の全 watchdog ノード数 @@ -196,8 +196,8 @@ Pgpool-II documentation Quorum state : QUORUM EXIST Alive Remote Nodes : 2 VIP up on local node : NO - Master Node Name : Linux_host2.localdomain_9992 - Master Host Name : localhost + Leader Node Name : Linux_host2.localdomain_9992 + Leader Host Name : localhost Watchdog Node Information Node Name : Linux_host1.localdomain_9991 @@ -216,7 +216,7 @@ Pgpool-II documentation Watchdog port : 9002 Node priority : 1 Status : 4 - Status Name : MASTER + Status Name : LEADER Node Name : Linux_host3.localdomain_9993 Host Name : host3 diff --git a/doc.ja/src/sgml/ref/pgpool_setup.sgml b/doc.ja/src/sgml/ref/pgpool_setup.sgml index 47c1f02b8..74b80b6a0 100644 --- a/doc.ja/src/sgml/ref/pgpool_setup.sgml +++ b/doc.ja/src/sgml/ref/pgpool_setup.sgml @@ -417,7 +417,7 @@ temporarily start pgpool-II to create standby nodes recovery node 1...pcp_recovery_node -- Command Successful done. -creating follow master script +creating follow primary script node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay | replication_state | replication_sync_state | last_status_change ---------+----------+-------+--------+-----------+---------+------------+-------------------+-------------------+-------------------+------------------------+--------------------- 0 | /tmp | 11002 | up | 0.500000 | primary | 0 | true | 0 | | | 2020-08-18 13:50:19 diff --git a/doc.ja/src/sgml/ref/pgpool_show.sgml b/doc.ja/src/sgml/ref/pgpool_show.sgml index 945da074f..7a8407806 100644 --- a/doc.ja/src/sgml/ref/pgpool_show.sgml +++ b/doc.ja/src/sgml/ref/pgpool_show.sgml @@ -181,12 +181,12 @@ PostgreSQL documentation - パラメータ の現在の設定値を表示: + パラメータ の現在の設定値を表示: - PGPOOL SHOW black_function_list; - black_function_list + PGPOOL SHOW write_function_list; + write_function_list --------------------- nextval,setval (1 row) diff --git a/doc.ja/src/sgml/ref/show_pool_health_check_stats.sgml b/doc.ja/src/sgml/ref/show_pool_health_check_stats.sgml index a3512e9cc..eaa38e39a 100644 --- a/doc.ja/src/sgml/ref/show_pool_health_check_stats.sgml +++ b/doc.ja/src/sgml/ref/show_pool_health_check_stats.sgml @@ -86,7 +86,7 @@ role このノードの役割。ストリーミングレプリケーションモードではprimaryあるいはstandbyです。 - 他のモードでは、masterあるいはslaveです。 + 他のモードでは、mainあるいはreplicaです。 diff --git a/doc.ja/src/sgml/ref/watchdog_setup.sgml b/doc.ja/src/sgml/ref/watchdog_setup.sgml index 68e2f7851..7da4bce09 100644 --- a/doc.ja/src/sgml/ref/watchdog_setup.sgml +++ b/doc.ja/src/sgml/ref/watchdog_setup.sgml @@ -344,7 +344,7 @@ Pgpool-II documentation recovery node 1...pcp_recovery_node -- Command Successful done. - creating follow master script + creating follow primary script Pager usage is off. node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay ---------+----------+-------+--------+-----------+---------+------------+-------------------+------------------- @@ -389,7 +389,7 @@ Pgpool-II documentation recovery node 1...pcp_recovery_node -- Command Successful done. - creating follow master script + creating follow primary script Pager usage is off. node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay ---------+----------+-------+--------+-----------+---------+------------+-------------------+------------------- @@ -434,7 +434,7 @@ Pgpool-II documentation recovery node 1...pcp_recovery_node -- Command Successful done. - creating follow master script + creating follow primary script Pager usage is off. node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay ---------+----------+-------+--------+-----------+---------+------------+-------------------+------------------- @@ -484,8 +484,8 @@ Pgpool-II documentation Quorum state : QUORUM EXIST Alive Remote Nodes : 2 VIP up on local node : NO - Master Node Name : Linux_tishii-CF-SX3HE4BP_50004 - Master Host Name : localhost + Leader Node Name : Linux_tishii-CF-SX3HE4BP_50004 + Leader Host Name : localhost Watchdog Node Information Node Name : Linux_tishii-CF-SX3HE4BP_50000 @@ -504,7 +504,7 @@ Pgpool-II documentation Watchdog port : 50006 Node priority : 1 Status : 4 - Status Name : MASTER + Status Name : LEADER Node Name : Linux_tishii-CF-SX3HE4BP_50008 Host Name : localhost diff --git a/doc.ja/src/sgml/release-3.4.sgml b/doc.ja/src/sgml/release-3.4.sgml index 5a08e3f38..5fa7c8356 100644 --- a/doc.ja/src/sgml/release-3.4.sgml +++ b/doc.ja/src/sgml/release-3.4.sgml @@ -390,9 +390,9 @@ --> - のデフォル値が間違っていた不具合を修正しました。(Tatsuo Ishii) + のデフォル値が間違っていた不具合を修正しました。(Tatsuo Ishii) diff --git a/doc.ja/src/sgml/release-3.5.sgml b/doc.ja/src/sgml/release-3.5.sgml index 354b45035..b7e112d23 100644 --- a/doc.ja/src/sgml/release-3.5.sgml +++ b/doc.ja/src/sgml/release-3.5.sgml @@ -930,9 +930,9 @@ --> - のデフォル値が間違っていた不具合を修正しました。(Tatsuo Ishii) + のデフォル値が間違っていた不具合を修正しました。(Tatsuo Ishii) diff --git a/doc.ja/src/sgml/release-3.6.sgml b/doc.ja/src/sgml/release-3.6.sgml index f0ac99116..341cb0a8f 100644 --- a/doc.ja/src/sgml/release-3.6.sgml +++ b/doc.ja/src/sgml/release-3.6.sgml @@ -1252,9 +1252,9 @@ --> - のデフォル値が間違っていた不具合を修正しました。(Tatsuo Ishii) + のデフォル値が間違っていた不具合を修正しました。(Tatsuo Ishii) @@ -2137,9 +2137,9 @@ --> - Doc: ドキュメントの"new master"を"new primary"に変更しました。(Bo Peng) + Doc: ドキュメントの"new master"を"new primary"に変更しました。(Bo Peng) @@ -5210,14 +5210,14 @@ - black_function_list/white_function_list + black_function_list/read_only_function_list に任意のアイテムの数が使用できるようになりました。(Muhammad Usama) diff --git a/doc.ja/src/sgml/release-3.7.sgml b/doc.ja/src/sgml/release-3.7.sgml index e0cedd667..10e552825 100644 --- a/doc.ja/src/sgml/release-3.7.sgml +++ b/doc.ja/src/sgml/release-3.7.sgml @@ -1484,9 +1484,9 @@ --> - のデフォル値が間違っていた不具合を修正しました。(Tatsuo Ishii) + のデフォル値が間違っていた不具合を修正しました。(Tatsuo Ishii) @@ -2407,9 +2407,9 @@ --> - Doc: ドキュメントの"new master"を"new primary"に変更しました。(Bo Peng) + Doc: ドキュメントの"new master"を"new primary"に変更しました。(Bo Peng) diff --git a/doc.ja/src/sgml/release-4.0.sgml b/doc.ja/src/sgml/release-4.0.sgml index b2e266486..8f6796ef3 100644 --- a/doc.ja/src/sgml/release-4.0.sgml +++ b/doc.ja/src/sgml/release-4.0.sgml @@ -1661,9 +1661,9 @@ --> - のデフォル値が間違っていた不具合を修正しました。(Tatsuo Ishii) + のデフォル値が間違っていた不具合を修正しました。(Tatsuo Ishii) @@ -1934,15 +1934,15 @@ --> - Doc:に関する説明を追加しました。(Tatsuo Ishii) + Doc:に関する説明を追加しました。(Tatsuo Ishii) - の実行条件などの説明を追加しました。 + の実行条件などの説明を追加しました。 @@ -2986,10 +2986,10 @@ --> - 負荷分散すべきではないSQLパターンのリストを指定できる を追加しました。(Bo Peng) + 負荷分散すべきではないSQLパターンのリストを指定できる を追加しました。(Bo Peng) Pgpool-II4.0以前では、 = transactionに設定することで明確なトランザクション内で更新を伴うクエリが発行されたとき、後続のクエリはレプリケーション遅延を避けるためにトランザクションが終了するまでプライマリのみに送られるべきです。 - しかしながら、で指定された、もしくはで指定されていない 更新を伴う関数は、white queryとは見なされず、後続の参照クエリはまだ負荷分散します。 + しかしながら、で指定された、もしくはで指定されていない 更新を伴う関数は、white queryとは見なされず、後続の参照クエリはまだ負荷分散します。 このcommitは、black function listで指定された、もしくはwhite function listで指定されていない関数を持つSELECT後の負荷分散を無効化します。 @@ -1990,10 +1990,10 @@ - とについて説明を強化しました。(tatsuo Ishii) + とについて説明を強化しました。(tatsuo Ishii) diff --git a/doc.ja/src/sgml/release-4.2.sgml b/doc.ja/src/sgml/release-4.2.sgml index f8ac05a23..7a898c69f 100644 --- a/doc.ja/src/sgml/release-4.2.sgml +++ b/doc.ja/src/sgml/release-4.2.sgml @@ -78,7 +78,7 @@ - システムカタログの情報を参照することにより、との記述を省略できるようになりました。 + システムカタログの情報を参照することにより、との記述を省略できるようになりました。 @@ -221,11 +221,11 @@ Author: Bo Peng - との値は空となりました。 + との値は空となりました。 @@ -513,13 +513,13 @@ Author: Bo Peng If a function included in SELECT/WITH has volatile property by checking system catalog, regard it a writing function. This feature is available only when the and the are both empty. The default of + linkend="guc-write-function-list"> and the are both empty. The default of the black_function_list and the white_function_list are now empty string so that this feature is enabled by default. --> システムカタログをチェックした結果、SELECT/WITHに含まれる関数が揮発性の性質を持っているなら、書き込みを行う関数であると見なします。 - この機能はとが空である場合にのみ有効になります。 + この機能はとが空である場合にのみ有効になります。 この機能がデフォルトで有効になるように、black_function_listとwhite_function_listのデフォルトは空となりました。 @@ -548,12 +548,12 @@ Author: Bo Peng - また、とでスキーマ修飾付きの関数名が認識されなかったのも修正されました。 + また、とでスキーマ修飾付きの関数名が認識されなかったのも修正されました。 なお、スキーマ修飾付きの関数名を登録する際は、スキーマ修飾なし関数名も登録するようにしてください。 diff --git a/doc.ja/src/sgml/restrictions.sgml b/doc.ja/src/sgml/restrictions.sgml index 042e1fd54..316d03a2f 100644 --- a/doc.ja/src/sgml/restrictions.sgml +++ b/doc.ja/src/sgml/restrictions.sgml @@ -60,7 +60,7 @@ 一時テーブル ちなみに、psqlの\dコマンドのように、システムカタログを問い合わせる中で 文字列としてのテーブル名を使っている場合: @@ -275,7 +275,7 @@ using CURRENT_TIMESTAMP, CURRENT_DATE, now() as their DEFAULT values will also be replicated correctly. This is done by replacing those - functions by constants fetched from master at query execution + functions by constants fetched from primary at query execution time. There are a few limitations however: --> CURRENT_TIMESTAMP、CURRENT_DATE、now()を含むINSERT/UPDATEは正しくレプリケーションされます。 diff --git a/doc.ja/src/sgml/runtime.sgml b/doc.ja/src/sgml/runtime.sgml index 71d2bddcb..098c57bc9 100644 --- a/doc.ja/src/sgml/runtime.sgml +++ b/doc.ja/src/sgml/runtime.sgml @@ -256,9 +256,9 @@ - マスタースレーブモード + slonyモード @@ -337,7 +337,7 @@ Pgpool-IIはPostgreSQL 9.0から利用可能になったPostgreSQL組み込みのストリーミングレプリケーション機能と一緒に動作することができます。 ストリーミングレプリケーション向けにPgpool-IIを設定するには、に'streaming-replication'を設定します。 diff --git a/doc.ja/src/sgml/watchdog.sgml b/doc.ja/src/sgml/watchdog.sgml index abb63968c..94000d5e4 100644 --- a/doc.ja/src/sgml/watchdog.sgml +++ b/doc.ja/src/sgml/watchdog.sgml @@ -610,7 +610,7 @@ マスターwatchdogに昇格した時に、ここで指定したコマンドがwatchdogによって実行されます。 @@ -642,10 +642,10 @@ Pgpool-IIのマスターwatchdogが責務を辞退し降格するときに、ここで指定したコマンドが実行されます。 @@ -750,10 +750,10 @@ 隔離とフェイルオーバ操作には多くの類似点がありますが、どちらも非常に基本的な方法で異なります。 隔離操作はを実行せず、障害の発生したノードを隔離します。 @@ -1183,15 +1183,15 @@ このパラメータによってローカルのwatchdogノードがマスターに選ばれる優先度を上げることができます。 古いマスターノードが故障した状況でクラスタがマスターノードの選択を行う際に、wd_priorityが高いノードがマスターwatchdogノードに選ばれます。 diff --git a/doc/src/sgml/advanced.sgml b/doc/src/sgml/advanced.sgml index ebd8c5f1d..cedf5b74d 100644 --- a/doc/src/sgml/advanced.sgml +++ b/doc/src/sgml/advanced.sgml @@ -39,8 +39,8 @@ At the startup, if the watchdog is enabled, Pgpool-II node - sync the status of all configured backend nodes from the master watchdog node. - And if the node goes on to become a master node itself it initializes the backend + sync the status of all configured backend nodes from the leader watchdog node. + And if the node goes on to become a leader node itself it initializes the backend status locally. When a backend node status changes by failover etc.., watchdog notifies the information to other Pgpool-II nodes and synchronizes them. When online recovery occurs, watchdog restricts @@ -50,7 +50,7 @@ Watchdog also coordinates with all connected Pgpool-II nodes to ensure - that failback, failover and follow_master commands must be executed only on one pgpool-II node. + that failback, failover and follow_primary commands must be executed only on one pgpool-II node. @@ -122,7 +122,7 @@ At startup watchdog verifies the Pgpool-II configuration of the local node for the consistency with the configurations - on the master watchdog node and warns the user of any differences. + on the leader watchdog node and warns the user of any differences. This eliminates the likelihood of undesired behavior that can happen because of different configuration on different Pgpool-II nodes. @@ -596,10 +596,10 @@ The heart of a watchdog process is a state machine that starts from its initial state (WD_LOADING) and transit towards either standby (WD_STANDBY) or - master/coordinator (WD_COORDINATOR) state. - Both standby and master/coordinator states are stable states of the + leader/coordinator (WD_COORDINATOR) state. + Both standby and leader/coordinator states are stable states of the watchdog state machine and the node stays in standby or - master/coordinator state until some problem in local + leader/coordinator state until some problem in local Pgpool-II node is detected or a remote Pgpool-II disconnects from the cluster. @@ -634,7 +634,7 @@ Communicates with all the participating Pgpool-II nodes to coordinate the selection of - master/coordinator node and to ensure the quorum in the cluster. + leader/coordinator node and to ensure the quorum in the cluster. diff --git a/doc/src/sgml/connection-pooling.sgml b/doc/src/sgml/connection-pooling.sgml index ccc99cf69..e68613c93 100644 --- a/doc/src/sgml/connection-pooling.sgml +++ b/doc/src/sgml/connection-pooling.sgml @@ -949,7 +949,7 @@ - follow master child + follow primary child follow_child diff --git a/doc/src/sgml/connection-settings.sgml b/doc/src/sgml/connection-settings.sgml index b0630b740..e5607a322 100644 --- a/doc/src/sgml/connection-settings.sgml +++ b/doc/src/sgml/connection-settings.sgml @@ -455,8 +455,8 @@ backend_clustering_mode = 'logical_replication' backend_clustering_mode = 'slony' - In this mode you can have up to 127 slave servers. Also it is - possible not to have slave server at all. + In this mode you can have up to 127 standby servers. Also it is + possible not to have standby server at all. @@ -471,8 +471,8 @@ backend_clustering_mode = 'slony' backend_clustering_mode = 'native_replication' - In this mode you can have up to 127 slave replication servers. - Also it is possible not to have slave server at all. + In this mode you can have up to 127 standby replication servers. + Also it is possible not to have standby server at all. @@ -552,7 +552,7 @@ backend_clustering_mode = 'native_replication' In case of a tie, when two or more groups have the same - number of nodes, then the group containing the master + number of nodes, then the group containing the primary node (backend node having the youngest node id) gets the precedence. @@ -626,7 +626,7 @@ backend_clustering_mode = 'native_replication' - results(R:replication, M: send only to master, L: load balance) + results(R:replication, M: send only to main, L: load balance) R M @@ -1004,10 +1004,10 @@ S2/N2: COMMIT; end of the parameter name (e.g.backend_hostname0). This number is referred to as "DB node ID", and it starts from 0. The backend which was given the DB node ID of 0 will be - called "master node". When multiple backends are defined, the - service can be continued even if the master node is down (not + called "main node". When multiple backends are defined, the + service can be continued even if the main node is down (not true in some modes). In this case, the youngest DB node ID - alive will be the new master node. + alive will be the new main node. @@ -1085,8 +1085,8 @@ S2/N2: COMMIT; Pgpool-II V2.2.6, V2.3 or later allows updating the values by reloading a configuration file. This is useful if you want to prevent - any query sent to slaves to perform some administrative - work in master/slave mode. + any query sent to standbys to perform some administrative + work in native replication mode. @@ -1174,7 +1174,7 @@ S2/N2: COMMIT; - ALWAYS_MASTER + ALWAYS_PRIMARY This is only useful in streaming replication mode. See about streaming replication mode. If this flag is set to @@ -1185,7 +1185,7 @@ S2/N2: COMMIT; flag is set as the primary node. This is useful for systems including Amazon Aurora for PostgreSQL Compatibility which has - fixed master server name. See + fixed primary server name. See for an example settings. diff --git a/doc/src/sgml/example-AWS.sgml b/doc/src/sgml/example-AWS.sgml index daa63c69f..fd7328c8f 100644 --- a/doc/src/sgml/example-AWS.sgml +++ b/doc/src/sgml/example-AWS.sgml @@ -12,7 +12,7 @@ You can use watchdog with Pgpool-II in any mode: replication mode, - master/slave mode and raw mode. + native replication mode and raw mode. @@ -67,7 +67,7 @@ which we will not set in this example instead we will use and to switch the - Elastic IP address to the master/Active Pgpool-II node. + Elastic IP address to the leader/Active Pgpool-II node. @@ -128,7 +128,7 @@ This script will be executed by the watchdog - to assign the Elastic IP on the instance when the watchdog becomes the active/master node. + to assign the Elastic IP on the instance when the watchdog becomes the active/leader node. Change the INSTANCE_ID and ELASTIC_IP values as per your AWS setup values. @@ -158,7 +158,7 @@ This script will be executed by watchdog - to remove the Elastic IP from the instance when the watchdog resign from the active/master node. + to remove the Elastic IP from the instance when the watchdog resign from the active/leader node. aws-de-escalation.sh: @@ -215,7 +215,7 @@ Start Pgpool-II on each server with "-n" switch and redirect log messages to the pgpool.log file. - The log message of master/active Pgpool-II node + The log message of leader/active Pgpool-II node will show the message of Elastic IP assignment. LOG: I am the cluster leader node. Starting escalation process @@ -268,8 +268,8 @@ LOG: watchdog node state changed from [JOINING] to [INITIALIZING] LOG: I am the only alive node in the watchdog cluster HINT: skipping stand for coordinator state - LOG: watchdog node state changed from [INITIALIZING] to [MASTER] - LOG: I am announcing my self as master/coordinator watchdog node + LOG: watchdog node state changed from [INITIALIZING] to [LEADER] + LOG: I am announcing my self as leader/coordinator watchdog node LOG: I am the cluster leader node DETAIL: our declare coordinator message is accepted by all nodes LOG: I am the cluster leader node. Starting escalation process diff --git a/doc/src/sgml/example-Aurora.sgml b/doc/src/sgml/example-Aurora.sgml index 004e99553..d89f83e3b 100644 --- a/doc/src/sgml/example-Aurora.sgml +++ b/doc/src/sgml/example-Aurora.sgml @@ -9,7 +9,7 @@ streaming replication cluster with some exceptions. First, fail over and online recovery are managed by Aurora. So you don't need to - set , , + set , , and recovery related parameters. In this section we explain how to set up Pgpool-II for Aurora. @@ -60,7 +60,7 @@ - Set ALWAYS_MASTER flag to + Set ALWAYS_PRIMARY flag to the for 0. diff --git a/doc/src/sgml/example-cluster.sgml b/doc/src/sgml/example-cluster.sgml index e61586490..772e50712 100644 --- a/doc/src/sgml/example-cluster.sgml +++ b/doc/src/sgml/example-cluster.sgml @@ -482,8 +482,8 @@ Specify failover.sh script to be executed after failover in failover_command parameter. - If we use 3 PostgreSQL servers, we need to specify follow_master_command to run after failover on the primary node failover. - In case of two PostgreSQL servers, follow_master_command setting is not necessary. + If we use 3 PostgreSQL servers, we need to specify follow_primary_command to run after failover on the primary node failover. + In case of two PostgreSQL servers, follow_primary_command setting is not necessary. Pgpool-II replaces the following special characters with the backend specific @@ -492,7 +492,7 @@ failover_command = '/etc/pgpool-II/failover.sh %d %h %p %D %m %H %M %P %r %R %N %S' - follow_master_command = '/etc/pgpool-II/follow_master.sh %d %h %p %D %m %H %M %P %r %R' + follow_primary_command = '/etc/pgpool-II/follow_primary.sh %d %h %p %D %m %H %M %P %r %R' @@ -502,12 +502,12 @@ Sample scripts failover.sh - and follow_master.sh + and follow_primary.sh are installed in /etc/pgpool-II/. Create failover scripts using these sample files. # cp /etc/pgpool-II/failover.sh{.sample,} - # cp /etc/pgpool-II/follow_master.sh{.sample,} + # cp /etc/pgpool-II/follow_primary.sh{.sample,} Basically, it should work if you change PGHOME according to PostgreSQL installation directory. @@ -518,7 +518,7 @@ PGHOME=/usr/pgsql-11 ... - [server1]# vi /etc/pgpool-II/follow_master.sh + [server1]# vi /etc/pgpool-II/follow_primary.sh ... PGHOME=/usr/pgsql-11 ... @@ -961,7 +961,7 @@ arping_path = '/usr/sbin' .pcppass - Since follow_master_command script has to execute PCP command without entering the + Since follow_primary_command script has to execute PCP command without entering the password, we create .pcppass in the home directory of Pgpool-II startup user (root user). @@ -1059,14 +1059,14 @@ arping_path = '/usr/sbin' Switching active/standby watchdog - Confirm the watchdog status by using pcp_watchdog_info. The Pgpool-II server which is started first run as MASTER. + Confirm the watchdog status by using pcp_watchdog_info. The Pgpool-II server which is started first run as LEADER. # pcp_watchdog_info -h 192.168.137.150 -p 9898 -U pgpool Password: 3 YES server1:9999 Linux server1 server1 - server1:9999 Linux server1 server1 9999 9000 4 MASTER #The Pgpool-II server started first becames "MASTER". + server1:9999 Linux server1 server1 9999 9000 4 LEADER #The Pgpool-II server started first becames "LEADER". server2:9999 Linux server2 server2 9999 9000 7 STANDBY #run as standby server3:9999 Linux server3 server3 9999 9000 7 STANDBY #run as standby @@ -1083,7 +1083,7 @@ arping_path = '/usr/sbin' Password: 3 YES server2:9999 Linux server2 server2 - server2:9999 Linux server2 server2 9999 9000 4 MASTER #server2 is promoted to MASTER + server2:9999 Linux server2 server2 9999 9000 4 LEADER #server2 is promoted to LEADER server1:9999 Linux server1 server1 9999 9000 10 SHUTDOWN #server1 is stopped server3:9999 Linux server3 server3 9999 9000 7 STANDBY #server3 runs as STANDBY @@ -1098,7 +1098,7 @@ arping_path = '/usr/sbin' Password: 3 YES server2:9999 Linux server2 server2 - server2:9999 Linux server2 server2 9999 9000 4 MASTER + server2:9999 Linux server2 server2 9999 9000 4 LEADER server1:9999 Linux server1 server1 9999 9000 7 STANDBY server3:9999 Linux server3 server3 9999 9000 7 STANDBY diff --git a/doc/src/sgml/example-watchdog.sgml b/doc/src/sgml/example-watchdog.sgml index 30193544e..ac753a2ef 100644 --- a/doc/src/sgml/example-watchdog.sgml +++ b/doc/src/sgml/example-watchdog.sgml @@ -9,7 +9,7 @@ that 1 node for backend exists. You can use watchdog with Pgpool-II in any mode: replication mode, - master/slave mode and raw mode. + native replication mode and raw mode. This example uses use "osspc16" as an Active node and @@ -132,14 +132,14 @@ Log messages will show that Pgpool-II has the virtual IP address and starts watchdog process. - LOG: I am announcing my self as master/coordinator watchdog node + LOG: I am announcing my self as leader/coordinator watchdog node LOG: I am the cluster leader node DETAIL: our declare coordinator message is accepted by all nodes LOG: I am the cluster leader node. Starting escalation process LOG: escalation process started with PID:59449 LOG: watchdog process is initialized LOG: watchdog: escalation started - LOG: I am the master watchdog node + LOG: I am the leader watchdog node DETAIL: using the local backend node status @@ -229,8 +229,8 @@ LOG: watchdog node state changed from [JOINING] to [INITIALIZING] LOG: I am the only alive node in the watchdog cluster HINT: skipping stand for coordinator state - LOG: watchdog node state changed from [INITIALIZING] to [MASTER] - LOG: I am announcing my self as master/coordinator watchdog node + LOG: watchdog node state changed from [INITIALIZING] to [LEADER] + LOG: I am announcing my self as leader/coordinator watchdog node LOG: I am the cluster leader node DETAIL: our declare coordinator message is accepted by all nodes diff --git a/doc/src/sgml/failover.sgml b/doc/src/sgml/failover.sgml index 16198ff03..004d765fb 100644 --- a/doc/src/sgml/failover.sgml +++ b/doc/src/sgml/failover.sgml @@ -152,15 +152,15 @@ %M - Old master node ID + Old main node ID %m - New master node ID + New main node ID %H - Hostname of the new master node + Hostname of the new main node %P @@ -168,11 +168,11 @@ %r - Port number of the new master node + Port number of the new main node %R - Database cluster directory of the new master node + Database cluster directory of the new main node %N @@ -193,12 +193,12 @@ - The "master node" refers to a node which has the + The "main node" refers to a node which has the "youngest (or the smallest) node id" among live the database nodes. In streaming replication mode, this may be different from primary node. In , - %m is the new master node chosen + %m is the new main node chosen by Pgpool-II. It is the node being assigned the youngest (smallest) node id which is alive. For example if you have 3 nodes, namely node 0, 1, @@ -301,15 +301,15 @@ %M - Old master node ID + Old main node ID %m - New master node ID + New main node ID %H - Hostname of the new master node + Hostname of the new main node %P @@ -317,11 +317,11 @@ %r - Port number of the new master node + Port number of the new main node %R - Database cluster directory of the new master node + Database cluster directory of the new main node %N @@ -359,10 +359,10 @@ - - follow_master_command (string) + + follow_primary_command (string) - follow_master_command configuration parameter + follow_primary_command configuration parameter @@ -373,13 +373,13 @@ command will not be executed. This command also runs if a node promote request is issued by command. This works only - in Master Replication mode with streaming replication. + in Replication mode with streaming replication. Since the command is executed within a child process forked off by Pgpool-II after failover - is completed, execution of follow master command does not + is completed, execution of follow primary command does not block the service of Pgpool-II. Here is a pseud code to illustrate how the command is executed: @@ -388,15 +388,15 @@ { if (the node is not the new primary) set down node status to shared memory status - memorize that follow master command is needed to execute + memorize that follow primary command is needed to execute } - if (we need to executed follow master command) + if (we need to executed follow primary command) fork a child process (within the child process) for each backend node if (the node status in shared memory is down) - execute follow master command + execute follow primary command @@ -405,8 +405,8 @@ with the backend specific information before executing the command. - - follow master command options +
+ follow primary command options @@ -434,7 +434,7 @@ %M - Old master node ID + Old main node ID %m @@ -475,19 +475,19 @@ - If follow_master_command is not empty, then after failover - on the primary node gets completed in Master Slave mode with streaming replication, + If follow_primary_command is not empty, then after failover + on the primary node gets completed in Native Replication mode with streaming replication, Pgpool-II degenerates all nodes except the new primary and starts new child processes to be ready again to accept connections from the clients. After this, Pgpool-II executes the command configured - in the follow_master_command for each degenerated backend nodes. + in the follow_primary_command for each degenerated backend nodes. - Typically follow_master_command command - is used to recover the slave from the new primary by calling + Typically follow_primary_command command + is used to recover the standby from the new primary by calling the pcp_recovery_node command. In - the follow_master_command, it is + the follow_primary_command, it is recommended to check whether target PostgreSQL node is running or not using pg_ctl since already stopped node usually has a @@ -495,7 +495,7 @@ problems or administrator is maintaining the node. If the node is stopped, skip the node. If the node is running, stop the node first and recovery it. A - complete follow_master_command example + complete follow_primary_command example can be found in . diff --git a/doc/src/sgml/healthcheck.sgml b/doc/src/sgml/healthcheck.sgml index 220e57028..232bd6412 100644 --- a/doc/src/sgml/healthcheck.sgml +++ b/doc/src/sgml/healthcheck.sgml @@ -218,7 +218,7 @@ This setting can be useful in spotty networks, when it is expected that - health checks will fail occasionally even when the master node is fine. + health checks will fail occasionally even when the primary node is fine. diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index c0afa95b0..6614e547f 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -66,19 +66,19 @@ database unavailability due to the Pgpool-II being down. Multiple Pgpool-II work together and monitor - each other. One of them is called "master" and it has a virtual + each other. One of them is called "leader" and it has a virtual IP. Clients do not need to aware that there are multiple Pgpool-II because they always access the same VIP. (See for watchdog). If one of Pgpool-II goes down, other Pgpool-II takes over the - master role. + leader role. - Since it is not allowed to have multiple master, watchdog votes to - decide a new master. If there are even number of + Since it is not allowed to have multiple leader, watchdog votes to + decide a new leader. If there are even number of Pgpool-II, it is impossible to decide - the new master by voting. Thus we recommend to deploy + the new leader by voting. Thus we recommend to deploy Pgpool-II in more than 3 odd numbers. diff --git a/doc/src/sgml/intro.sgml b/doc/src/sgml/intro.sgml index 76df592f6..0ff24c129 100644 --- a/doc/src/sgml/intro.sgml +++ b/doc/src/sgml/intro.sgml @@ -100,7 +100,7 @@ If a database is replicated (because running in either - replication mode or master/slave mode), performing a SELECT + replication mode or native replication mode), performing a SELECT query on any server will return the same result. Pgpool-II takes advantage of the replication feature in order to reduce the load on each PostgreSQL server. It does that by diff --git a/doc/src/sgml/loadbalance.sgml b/doc/src/sgml/loadbalance.sgml index 61f7ff250..64bdda661 100644 --- a/doc/src/sgml/loadbalance.sgml +++ b/doc/src/sgml/loadbalance.sgml @@ -7,7 +7,7 @@ Pgpool-II load balancing of SELECT queries works with any clustering mode except raw mode. When enabled Pgpool-II sends the writing queries to the - primary node in Master Slave mode, all of the + primary node in Native Replication mode, all of the backend nodes in Replication mode, and other queries get load balanced among all backend nodes. To which node the load balancing mechanism sends read queries is decided at the session @@ -27,7 +27,7 @@ arbitrary comment(/*NO LOAD BALANCE*/ is usually used) before the SELECT statement. This will disable the load balance of the particular query and Pgpool-II - will send it to the master node (the primary node in Master Slave mode). + will send it to the main node (the primary node in Native Replication mode). @@ -52,7 +52,7 @@ - either in replication mode or master slave mode + either in replication mode or native replication mode @@ -76,13 +76,13 @@ transaction has not issued a write query yet (until a write query is issued, load balance is possible. Here "write query" means non SELECT DML or DDL. Before Pgpool-II 4.1, - SELECTs having write functions as specified in black or - white function list is not regarded as a write query.) + SELECTs having write functions as specified in write or + read_only function list is not regarded as a write query.) - If black and white function list is empty, SELECTs having + If write and read_only function list is empty, SELECTs having functions is regarded as a read only query. @@ -106,13 +106,13 @@ it starts with "SELECT" or one of COPY TO STDOUT, EXPLAIN, EXPLAIN ANALYZE SELECT... = true will ignore leading white space. - (Except for SELECTs using writing functions specified in or - ) + (Except for SELECTs using writing functions specified in or + ) - in master slave mode, in addition to above, following conditions must be met: + in native replication mode, in addition to above, following conditions must be met: @@ -177,8 +177,8 @@ SELECT/WITH without writing functions. Volatile functions are regarded writing functions. - You can define your own writing functions by using - or . + You can define your own writing functions by using + or . @@ -224,7 +224,7 @@ If writing queries appear, succeeding read queries may not be load balanced. i.e. sent to primary node (in streaming - replication mode) or master node (in other mode) depending on the + replication mode) or main node (in other mode) depending on the setting of . @@ -496,10 +496,10 @@ - - white_function_list (string) + + read_only_function_list (string) - white_function_list configuration parameter + read_only_function_list configuration parameter @@ -508,21 +508,21 @@ DO NOT update the database. SELECTs including functions not specified in this list are not load balanced. These are replicated among all the DB nodes in Replication mode, - sent to the primary node only in Master Slave mode. + sent to the primary node only in Native Replication mode. You can use regular expression to match function names, to which ^ and $ are automatically added. - + Using regular expression If you have prefixed all your read only function with 'get_' or 'select_', You can - set the like below: + set the like below: - white_function_list = 'get_.*,select_.*' + read_only_function_list = 'get_.*,select_.*' @@ -535,10 +535,10 @@ #For example: #If the queries sometime use "f1()" and other times "public.f1()" - #to refer the function f1 then the white_function_list + #to refer the function f1 then the read_only_function_list #would be configured as follows. - white_function_list = "f1,public.f1" + read_only_function_list = "f1,public.f1" @@ -547,7 +547,7 @@ - If this parameter and + If this parameter and is empty string, function's volatile proper will be checked. If the property is volatile, the function is regarded as a writing function. This is convenient and recommended way. However this @@ -564,10 +564,10 @@ - - black_function_list (string) + + write_function_list (string) - black_function_list configuration parameter + write_function_list configuration parameter @@ -577,21 +577,21 @@ SELECTs including functions specified in this list are not load balanced. These are replicated among all the DB nodes in Replication mode, - sent to the primary node only in Master Slave mode. + sent to the primary node only in Native Replication mode. You can use regular expression to match function names, to which ^ and $ are automatically added. - + Using regular expression If you have prefixed all your updating functions with 'set_', 'update_', 'delete_' or 'insert_', You can - set the like below: + set the like below: - black_function_list = 'nextval,setval,set_.*,update_.*,delete_.*,insert_.*' + write_function_list = 'nextval,setval,set_.*,update_.*,delete_.*,insert_.*' @@ -604,10 +604,10 @@ #For example: #If the queries sometime use "f1()" and other times "public.f1()" - #to refer the function f1 then the black_function_list + #to refer the function f1 then the write_function_list #would be configured as follows. - black_function_list = "f1,public.f1" + write_function_list = "f1,public.f1" @@ -616,21 +616,21 @@ - and + and are mutually exclusive and only one of the two lists can be set in the configuration. - + Configuring using <literal>nextval()</literal> and <literal>setval()</literal> to land on proper backend Prior to Pgpool-IIV3.0, nextval() and setval() were known as functions writing to the database. - You can configure this by setting - and as follows + You can configure this by setting + and as follows - white_function_list = '' - black_function_list = 'nextval,setval,lastval,currval' + read_only_function_list = '' + write_function_list = 'nextval,setval,lastval,currval' @@ -650,7 +650,7 @@ - If this parameter and + If this parameter and is empty string, function's volatile proper will be checked. If the property is volatile, the function is regarded as a writing function. This is convenient and recommended way. However this @@ -667,10 +667,10 @@ - - black_query_pattern_list (string) + + primary_routing_query_pattern_list (string) - black_query_pattern_list configuration parameter + primary_routing_query_pattern_list configuration parameter @@ -679,7 +679,7 @@ should be sent to primary node. SQL that matched patterns specified in this list are not load balanced. - Only Master Slave mode is supported. + Only Native Replication mode is supported. You can use regular expression to match SQL patterns, @@ -691,11 +691,11 @@ SQL pattern specified in this parameter is case-insensitive. - + Using regular expression If the following SQL should be sent to the primary node only, You can - set the like below: + set the like below: @@ -717,15 +717,15 @@ - black_query_pattern_list = 'SELECT \* FROM table_name1\;;SELECT col1, col2 FROM table_name2 WHERE col1 LIKE \'%a%\'\;;.*table_name3.*' + primary_routing_query_pattern_list = 'SELECT \* FROM table_name1\;;SELECT col1, col2 FROM table_name2 WHERE col1 LIKE \'%a%\'\;;.*table_name3.*' - If SQL matches both and - , + If SQL matches both and + , setting is ignored and the SQL should be sent only to the primary node. @@ -1063,7 +1063,7 @@ To configure the dependency on the function, - The function must be present in the + The function must be present in the diff --git a/doc/src/sgml/memcache.sgml b/doc/src/sgml/memcache.sgml index 679bf5ff4..42d9549a6 100644 --- a/doc/src/sgml/memcache.sgml +++ b/doc/src/sgml/memcache.sgml @@ -35,7 +35,7 @@ However, VIEWs and SELECTs accessing unlogged tables can be cached by specifying in - the . + the . @@ -229,10 +229,10 @@ - - white_memqcache_table_list (string) + + cache_safe_memqcache_table_list (string) - white_memqcache_table_list configuration parameter + cache_safe_memqcache_table_list configuration parameter @@ -243,7 +243,7 @@ Pgpool-II. This parameter only applies to VIEWs and SELECTs accessing unlogged tables. Regular tables can be cached unless specified - by . + by . @@ -259,10 +259,10 @@ #For example: #If the queries sometime use "table1" and other times "public.table1" - #to refer the table1 then the white_memqcache_table_list + #to refer the table1 then the cache_safe_memqcache_table_list #would be configured as follows. - white_memqcache_table_list = "table1,public.table1" + cache_safe_memqcache_table_list = "table1,public.table1" @@ -274,10 +274,10 @@ - - black_memqcache_table_list (string) + + cache_unsafe_memqcache_table_list (string) - black_memqcache_table_list configuration parameter + cache_unsafe_memqcache_table_list configuration parameter @@ -300,10 +300,10 @@ #For example: #If the queries sometime use "table1" and other times "public.table1" - #to refer the table1 then the black_memqcache_table_list + #to refer the table1 then the cache_unsafe_memqcache_table_list #would be configured as follows. - black_memqcache_table_list = "table1,public.table1" + cache_unsafe_memqcache_table_list = "table1,public.table1" @@ -314,8 +314,8 @@ - black_memqcache_table_list - precedence over + cache_unsafe_memqcache_table_list + precedence over diff --git a/doc/src/sgml/misc-config.sgml b/doc/src/sgml/misc-config.sgml index 2e5a68d35..918b32fdd 100644 --- a/doc/src/sgml/misc-config.sgml +++ b/doc/src/sgml/misc-config.sgml @@ -121,11 +121,11 @@ The target node to send queries to create relation cache - entries. If set to master, queries will - be sent to master (primary) node. This is the default and + entries. If set to primary, queries will + be sent to primary node. This is the default and recommended to most users because the query could get the - latest information. If you want to lower the load of master - (primary) node, you can set the parameter to + latest information. If you want to lower the load of + primary node, you can set the parameter to load_balance_node, which will send queries to the load balance node. This is especially useful for such a system @@ -167,9 +167,9 @@ check in the SELECT statements. To check the temporary table Pgpool-II queries the system catalog of - primary/master PostgreSQL backend + primary/main PostgreSQL backend if catalog is specified, which - increases the load on the primary/master server. + increases the load on the primary/main server. If trace is @@ -220,8 +220,8 @@ Setting to on, enables the unlogged table check in the SELECT statements. To check the unlogged table Pgpool-II - queries the system catalog of primary/master PostgreSQL backend which increases - the load on the primary/master server. + queries the system catalog of primary/main PostgreSQL backend which increases + the load on the primary/main server. If you are absolutely sure that your system never uses the unlogged tables (for example, you are using 9.0 or earlier version of PostgreSQL) then you can safely turn off the check_unlogged_table. diff --git a/doc/src/sgml/online-recovery.sgml b/doc/src/sgml/online-recovery.sgml index 9cd2471d0..98e7aee28 100644 --- a/doc/src/sgml/online-recovery.sgml +++ b/doc/src/sgml/online-recovery.sgml @@ -31,7 +31,7 @@ Make sure that autovacuum is stopped on the - master node (the first node which is up and running) before starting the + main node (the first node which is up and running) before starting the online recovery. Autovacuum can change the contents of the database which can cause the inconsistency after the online recovery. @@ -115,14 +115,14 @@ - Path to the database cluster of the master(primary) node. + Path to the database cluster of the main(primary) node. The script example can be found in . - The script path and filename are hard coded, $PGDATA/pgpool_remote_start is executed on master(primary) node. + The script path and filename are hard coded, $PGDATA/pgpool_remote_start is executed on main(primary) node. @@ -221,7 +221,7 @@ - Specifies a command to be run by master (primary) node at the + Specifies a command to be run by main (primary) node at the first stage of online recovery. The command file must be placed in the database cluster directory for security reasons. For example, if recovery_1st_stage_command = @@ -236,7 +236,7 @@ - Path to the database cluster of the master (primary) node. + Path to the database cluster of the main (primary) node. @@ -251,7 +251,7 @@ - Port number of the master (primary) node (Pgpool-II 3.4 or after). + Port number of the main (primary) node (Pgpool-II 3.4 or after). @@ -306,7 +306,7 @@ - Specifies a command to be run by master node at the second + Specifies a command to be run by main node at the second stage of online recovery. This command is required only , so for other modes don't need to provide a command file. The command file must be placed in the @@ -323,7 +323,7 @@ - Path to the database cluster of the master(primary) node. + Path to the database cluster of the main(primary) node. @@ -338,7 +338,7 @@ - Port number of the master (primary) node (Pgpool-II 3.4 or after). + Port number of the main (primary) node (Pgpool-II 3.4 or after). diff --git a/doc/src/sgml/ref/pcp_promote_node.sgml b/doc/src/sgml/ref/pcp_promote_node.sgml index 537f18aac..927e71049 100644 --- a/doc/src/sgml/ref/pcp_promote_node.sgml +++ b/doc/src/sgml/ref/pcp_promote_node.sgml @@ -17,7 +17,7 @@ Pgpool-II documentation pcp_promote_node - promotes the given node as new master to Pgpool-II + promotes the given node as new main to Pgpool-II @@ -33,14 +33,14 @@ Pgpool-II documentation Description pcp_promote_node - promotes the given node as new master to Pgpool-II. In master/slave streaming replication only. Please note that this command does not actually promote standby PostgreSQL backend: it just changes the internal status of Pgpool-II and trigger failover and users have to promote standby PostgreSQL outside Pgpool-II. + promotes the given node as new primary to Pgpool-II. In streaming replication mode only. Please note that this command does not actually promote standby PostgreSQL backend: it just changes the internal status of Pgpool-II and trigger failover and users have to promote standby PostgreSQL outside Pgpool-II. pcp_promote_node executes followings. Please be - warned that if is set, + warned that if is set, the command will be executed. It is a standard advice that you - disable before executing + disable before executing this command. @@ -67,8 +67,8 @@ Pgpool-II documentation - If is set, execute - against + If is set, execute + against PostgreSQL. @@ -88,7 +88,7 @@ Pgpool-II documentation - The index of backend node to promote as new master. + The index of backend node to promote as new main. diff --git a/doc/src/sgml/ref/pcp_watchdog_info.sgml b/doc/src/sgml/ref/pcp_watchdog_info.sgml index 088a9df45..d9a29d3e7 100644 --- a/doc/src/sgml/ref/pcp_watchdog_info.sgml +++ b/doc/src/sgml/ref/pcp_watchdog_info.sgml @@ -79,7 +79,7 @@ Pgpool-II documentation 3 NO Linux_host1.localdomain_9991 host1 Linux_host1.localdomain_9991 host1 9991 9001 7 STANDBY - Linux_host2.localdomain_9992 host2 9992 9002 4 MASTER + Linux_host2.localdomain_9992 host2 9992 9002 4 LEADER Linux_host3.localdomain_9993 host3 9993 9003 7 STANDBY @@ -90,8 +90,8 @@ Pgpool-II documentation 1. Total watchdog nodes in the cluster 2. Is VIP is up on current node? - 3. Master node name - 4. Master node host + 3. Leader node name + 4. Leader node host Next is the list of watchdog nodes: @@ -116,8 +116,8 @@ Pgpool-II documentation Quorum state : QUORUM EXIST Alive Remote Nodes : 2 VIP up on local node : NO - Master Node Name : Linux_host2.localdomain_9992 - Master Host Name : localhost + Leader Node Name : Linux_host2.localdomain_9992 + Leader Host Name : localhost Watchdog Node Information Node Name : Linux_host1.localdomain_9991 @@ -136,7 +136,7 @@ Pgpool-II documentation Watchdog port : 9002 Node priority : 1 Status : 4 - Status Name : MASTER + Status Name : LEADER Node Name : Linux_host3.localdomain_9993 Host Name : host3 diff --git a/doc/src/sgml/ref/pgpool_setup.sgml b/doc/src/sgml/ref/pgpool_setup.sgml index 3a0800e9d..c86ebec9b 100644 --- a/doc/src/sgml/ref/pgpool_setup.sgml +++ b/doc/src/sgml/ref/pgpool_setup.sgml @@ -301,7 +301,7 @@ temporarily start pgpool-II to create standby nodes recovery node 1...pcp_recovery_node -- Command Successful done. -creating follow master script +creating follow primary script node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay | replication_state | replication_sync_state | last_status_change ---------+----------+-------+--------+-----------+---------+------------+-------------------+-------------------+-------------------+------------------------+--------------------- 0 | /tmp | 11002 | up | 0.500000 | primary | 0 | true | 0 | | | 2020-08-18 13:50:19 diff --git a/doc/src/sgml/ref/pgpool_show.sgml b/doc/src/sgml/ref/pgpool_show.sgml index b592beea2..cc097d0d2 100644 --- a/doc/src/sgml/ref/pgpool_show.sgml +++ b/doc/src/sgml/ref/pgpool_show.sgml @@ -132,10 +132,10 @@ PostgreSQL documentation - Show the current setting of the parameter : + Show the current setting of the parameter : - PGPOOL SHOW black_function_list; - black_function_list + PGPOOL SHOW write_function_list; + write_function_list --------------------- nextval,setval (1 row) diff --git a/doc/src/sgml/ref/show_pool_health_check_stats.sgml b/doc/src/sgml/ref/show_pool_health_check_stats.sgml index ce85516fd..ff2a0e34d 100644 --- a/doc/src/sgml/ref/show_pool_health_check_stats.sgml +++ b/doc/src/sgml/ref/show_pool_health_check_stats.sgml @@ -91,7 +91,7 @@ role Role of the node. Either primary or standby in streaming - replication mode. Either master or slave in other mode. + replication mode. Either main or replica in other mode. diff --git a/doc/src/sgml/ref/watchdog_setup.sgml b/doc/src/sgml/ref/watchdog_setup.sgml index 69f3147b3..9b932db84 100644 --- a/doc/src/sgml/ref/watchdog_setup.sgml +++ b/doc/src/sgml/ref/watchdog_setup.sgml @@ -249,7 +249,7 @@ Pgpool-II documentation recovery node 1...pcp_recovery_node -- Command Successful done. - creating follow master script + creating follow primary script Pager usage is off. node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay ---------+----------+-------+--------+-----------+---------+------------+-------------------+------------------- @@ -294,7 +294,7 @@ Pgpool-II documentation recovery node 1...pcp_recovery_node -- Command Successful done. - creating follow master script + creating follow primary script Pager usage is off. node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay ---------+----------+-------+--------+-----------+---------+------------+-------------------+------------------- @@ -339,7 +339,7 @@ Pgpool-II documentation recovery node 1...pcp_recovery_node -- Command Successful done. - creating follow master script + creating follow primary script Pager usage is off. node_id | hostname | port | status | lb_weight | role | select_cnt | load_balance_node | replication_delay ---------+----------+-------+--------+-----------+---------+------------+-------------------+------------------- @@ -389,8 +389,8 @@ Pgpool-II documentation Quorum state : QUORUM EXIST Alive Remote Nodes : 2 VIP up on local node : NO - Master Node Name : Linux_tishii-CF-SX3HE4BP_50004 - Master Host Name : localhost + Leader Node Name : Linux_tishii-CF-SX3HE4BP_50004 + Leader Host Name : localhost Watchdog Node Information Node Name : Linux_tishii-CF-SX3HE4BP_50000 @@ -409,7 +409,7 @@ Pgpool-II documentation Watchdog port : 50006 Node priority : 1 Status : 4 - Status Name : MASTER + Status Name : LEADER Node Name : Linux_tishii-CF-SX3HE4BP_50008 Host Name : localhost diff --git a/doc/src/sgml/release-3.4.sgml b/doc/src/sgml/release-3.4.sgml index 938534a52..7f92a5c7f 100644 --- a/doc/src/sgml/release-3.4.sgml +++ b/doc/src/sgml/release-3.4.sgml @@ -266,7 +266,7 @@ 2019-04-18 [233673c] --> - Fix 's broken default value. (Tatsuo Ishii) + Fix 's broken default value. (Tatsuo Ishii) diff --git a/doc/src/sgml/release-3.5.sgml b/doc/src/sgml/release-3.5.sgml index a25d33e07..1154673a7 100644 --- a/doc/src/sgml/release-3.5.sgml +++ b/doc/src/sgml/release-3.5.sgml @@ -669,7 +669,7 @@ 2019-04-18 [157c5fe] --> - Fix 's broken default value. (Tatsuo Ishii) + Fix 's broken default value. (Tatsuo Ishii) diff --git a/doc/src/sgml/release-3.6.sgml b/doc/src/sgml/release-3.6.sgml index c7711ce36..626509e49 100644 --- a/doc/src/sgml/release-3.6.sgml +++ b/doc/src/sgml/release-3.6.sgml @@ -924,7 +924,7 @@ 2019-04-18 [f3742eb] --> - Fix 's broken default value. (Tatsuo Ishii) + Fix 's broken default value. (Tatsuo Ishii) @@ -1537,7 +1537,7 @@ 2018-08-20 [900afee] --> - Doc: Change description "new master" to "new primary". (Bo Peng) + Doc: Change description "new master" to "new primary". (Bo Peng) diff --git a/doc/src/sgml/release-3.7.sgml b/doc/src/sgml/release-3.7.sgml index 62d1f050d..fc3526f1d 100644 --- a/doc/src/sgml/release-3.7.sgml +++ b/doc/src/sgml/release-3.7.sgml @@ -1086,7 +1086,7 @@ 2019-04-18 [56a1fd8] --> - Fix 's broken default value. (Tatsuo Ishii) + Fix 's broken default value. (Tatsuo Ishii) @@ -1725,7 +1725,7 @@ 2018-08-20 [57b43ae] --> - Doc: Change description "new master" to "new primary". (Bo Peng) + Doc: Change description "new master" to "new primary". (Bo Peng) diff --git a/doc/src/sgml/release-4.0.sgml b/doc/src/sgml/release-4.0.sgml index bf26ef2b8..360194530 100644 --- a/doc/src/sgml/release-4.0.sgml +++ b/doc/src/sgml/release-4.0.sgml @@ -1230,7 +1230,7 @@ 2019-04-18 [9b1bb1f] --> - Fix 's broken default value. (Tatsuo Ishii) + Fix 's broken default value. (Tatsuo Ishii) @@ -1416,10 +1416,10 @@ 2019-03-05 [5a8d511] --> - Doc: add more explanation to . (Tatsuo Ishii) + Doc: add more explanation to . (Tatsuo Ishii) - Add description how is executed etc. + Add description how is executed etc. @@ -2122,7 +2122,7 @@ 2018-06-14 [83906d1] --> - Add new parameter to enable + Add new parameter to enable specifying SQL patterns lists that should not be load-balanced. (Bo Peng) diff --git a/doc/src/sgml/release-4.1.sgml b/doc/src/sgml/release-4.1.sgml index 2dfe29b3c..34d4ffd85 100644 --- a/doc/src/sgml/release-4.1.sgml +++ b/doc/src/sgml/release-4.1.sgml @@ -1130,8 +1130,8 @@ sent to primary only until the end of this transaction in order to avoid the replication delay. However, the SELECTs having write functions specified - in or not specified - in are not regarded + in or not specified + in are not regarded as a write query and the subsequent read queries are still load balanced. This commit will disable load balance after a SELECT having functions specified in black function list @@ -1467,7 +1467,7 @@ Enhance explanation about - and . (tatsuo + and . (tatsuo ishii) diff --git a/doc/src/sgml/release-4.2.sgml b/doc/src/sgml/release-4.2.sgml index 5912d59d7..99b95f6d3 100644 --- a/doc/src/sgml/release-4.2.sgml +++ b/doc/src/sgml/release-4.2.sgml @@ -72,8 +72,8 @@ Now it is possible to omit and by looking at system catalog + linkend="guc-write-function-list"> and by looking at system catalog information. @@ -220,8 +220,8 @@ Author: Bo Peng - and are now both empty. + and are now both empty. @@ -438,8 +438,8 @@ Author: Bo Peng If a function included in SELECT/WITH has volatile property by checking system catalog, regard it a writing function. This feature is available only when the and the are both empty. The default of + linkend="guc-write-function-list"> and the are both empty. The default of the black_function_list and the white_function_list are now empty string so that this feature is enabled by default. @@ -460,8 +460,8 @@ Author: Bo Peng Also now schema qualified function names in and are allowed. Note that if you + linkend="guc-write-function-list"> and are allowed. Note that if you want to register schema qualified function names, you have to register function names without schema qualification as well. diff --git a/doc/src/sgml/restrictions.sgml b/doc/src/sgml/restrictions.sgml index ab8208248..19932f7dc 100644 --- a/doc/src/sgml/restrictions.sgml +++ b/doc/src/sgml/restrictions.sgml @@ -31,7 +31,7 @@ Multi-statement queries (multiple SQL commands on single line) are always sent to primary node (in streaming replication mode) - or master node (in other + or main node (in other modes). Usually Pgpool-II dispatch query to appropriate node, but it's not applied to multi-statement queries. @@ -43,7 +43,7 @@ Authentication/Access Controls - In the replication mode or master/slave mode, trust and pam methods are supported. md5 is also + In the replication mode or native replication mode, trust and pam methods are supported. md5 is also supported since Pgpool-II 3.0. md5 is supported by using an authentication file pool_passwd. scram-sha-256, cert, and clear text password is also supported since @@ -126,8 +126,8 @@ Creating/inserting/updating/deleting temporary tables are - always executed on the master (primary) in master slave mode. - SELECT on these tables is executed on master as well. However + always executed on the primary in native replication mode. + SELECT on these tables is executed on primary as well. However if the temporary table name is used as a literal in SELECT, there's no way to detect it, and the SELECT will be load balanced. That will trigger a "not found the table" error or @@ -142,7 +142,7 @@ SELECT 't1'::regclass::oid; In such that case Pgpool-II always - sends the query to master and will not cause the problem. + sends the query to primary and will not cause the problem. @@ -183,7 +183,7 @@ using CURRENT_TIMESTAMP, CURRENT_DATE, now() as their DEFAULT values will also be replicated correctly. This is done by replacing those - functions by constants fetched from master at query execution + functions by constants fetched from primary at query execution time. There are a few limitations however: diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml index d70598c25..4f9915208 100644 --- a/doc/src/sgml/runtime.sgml +++ b/doc/src/sgml/runtime.sgml @@ -169,7 +169,7 @@ - master slave mode + native replication mode @@ -187,7 +187,7 @@ There are four different running modes in Pgpool-II: streaming replication mode, logical - replication mode, master slave mode (slony mode), native + replication mode, native replication mode (slony mode), native replication mode, raw mode and snapshot isolaton mode. In any mode, Pgpool-II provides connection pooling, and automatic fail over. @@ -225,7 +225,7 @@ - The master slave mode mode (slony mode) + The native replication mode mode (slony mode) can be used with PostgreSQL servers operating Slony. In this mode, Slony/PostgreSQL is @@ -356,10 +356,10 @@ end of the parameter name (e.g.backend_hostname0). This number is referred to as "DB node ID", and it starts from 0. The backend which was given the DB node ID of 0 will be - called "Master DB". When multiple backends are defined, the - service can be continued even if the Master DB is down (not + called "Main DB". When multiple backends are defined, the + service can be continued even if the Main DB is down (not true in some modes). In this case, the youngest DB node ID - alive will be the new Master DB. + alive will be the new Main DB. diff --git a/doc/src/sgml/start.sgml b/doc/src/sgml/start.sgml index 9ca51ba88..ad9ed0890 100644 --- a/doc/src/sgml/start.sgml +++ b/doc/src/sgml/start.sgml @@ -266,23 +266,23 @@ # %h = host name # %p = port number # %D = database cluster path - # %m = new master node id - # %M = old master node id - # %H = new master node host name + # %m = new main node id + # %M = old main node id + # %H = new main node host name # %P = old primary node id - # %R = new master database cluster path - # %r = new master port number + # %R = new main database cluster path + # %r = new main port number # %% = '%' character failed_node_id=$1 failed_host_name=$2 failed_port=$3 failed_db_cluster=$4 - new_master_id=$5 - old_master_id=$6 - new_master_host_name=$7 + new_main_id=$5 + old_main_id=$6 + new_main_host_name=$7 old_primary_node_id=$8 - new_master_port_number=$9 - new_master_db_cluster=${10} + new_main_port_number=$9 + new_main_db_cluster=${10} mydir=/home/t-ishii/tmp/Tutorial log=$mydir/log/failover.log pg_ctl=/usr/local/pgsql/bin/pg_ctl @@ -290,10 +290,10 @@ cluster1=$mydir/data1 date >> $log - echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_master_id $new_master_id old_master_id $old_master_id new_master_host_name $new_master_host_name old_primary_node_id $old_primary_node_id new_master_port_number $new_master_port_number new_master_db_cluster $new_master_db_cluster" >> $log + echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_main_id $new_main_id old_main_id $old_main_id new_main_host_name $new_main_host_name old_primary_node_id $old_primary_node_id new_main_port_number $new_main_port_number new_main_db_cluster $new_main_db_cluster" >> $log - if [ a"$failed_node_id" = a"$old_primary_node_id" ];then # master failed - ! new_primary_db_cluster=${mydir}/data"$new_master_id" + if [ a"$failed_node_id" = a"$old_primary_node_id" ];then # main failed + ! new_primary_db_cluster=${mydir}/data"$new_main_id" echo $pg_ctl -D $new_primary_db_cluster promote >>$log # let standby take over $pg_ctl -D $new_primary_db_cluster promote >>$log # let standby take over sleep 2 @@ -349,7 +349,7 @@ DATADIR_BASE=/home/t-ishii/tmp/Tutorial PGSUPERUSER=t-ishii - master_db_cluster=$1 + main_db_cluster=$1 recovery_node_host_name=$2 DEST_CLUSTER=$3 PORT=$4 @@ -364,13 +364,13 @@ $psql -p $PORT -c "SELECT pg_start_backup('Streaming Replication', true)" postgres - echo "source: $master_db_cluster dest: $DEST_CLUSTER" >> $log + echo "source: $main_db_cluster dest: $DEST_CLUSTER" >> $log rsync -C -a -c --delete --exclude postgresql.conf --exclude postmaster.pid \ --exclude postmaster.opts --exclude pg_log \ --exclude recovery.conf --exclude recovery.done \ --exclude pg_xlog \ - $master_db_cluster/ $DEST_CLUSTER/ + $main_db_cluster/ $DEST_CLUSTER/ rm -fr $DEST_CLUSTER/pg_xlog mkdir $DEST_CLUSTER/pg_xlog diff --git a/doc/src/sgml/watchdog.sgml b/doc/src/sgml/watchdog.sgml index b62020ffe..be177c8b2 100644 --- a/doc/src/sgml/watchdog.sgml +++ b/doc/src/sgml/watchdog.sgml @@ -397,7 +397,7 @@ Watchdog executes this command on the node that is escalated - to the master watchdog. + to the leader watchdog. This command is executed just before bringing up the @@ -417,10 +417,10 @@ - Watchdog executes this command on the master Pgpool-II - watchdog node when that node resigns from the master node responsibilities. - A master watchdog node can resign from being a master node, - when the master node Pgpool-II shuts down, detects a network + Watchdog executes this command on the leader Pgpool-II + watchdog node when that node resigns from the leader node responsibilities. + A leader watchdog node can resign from being a leader node, + when the leader node Pgpool-II shuts down, detects a network blackout or detects the lost of quorumquorum. @@ -520,10 +520,10 @@ Although there are many similarities in quarantine and failover operations, but they both differ in a very fundamental way. The quarantine operations does not executes the - and silently detaches the problematic node, So in the case when the master backend node is quarantined, the - Pgpool-II will not promote the standby to take over the master responsibilities - and until the master node is quarantined the Pgpool-II will not have - any usable master backend node. + and silently detaches the problematic node, So in the case when the main backend node is quarantined, the + Pgpool-II will not promote the standby to take over the main node responsibilities + and until the main node is quarantined the Pgpool-II will not have + any usable main backend node. Moreover, unlike for the failed nodes, @@ -538,14 +538,14 @@ solved. - From Pgpool-II V4.1 onward, if the watchdog-master node + From Pgpool-II V4.1 onward, if the watchdog-leader node fails to build the consensus for primary backend node failover and the primary backend node gets into a - quarantine state, then it resigns from its master/coordinator responsibilities and lowers its wd_priority + quarantine state, then it resigns from its leader/coordinator responsibilities and lowers its wd_priority for next leader election and let the cluster elect some different new leader. - When the master node fails to build the consensus for standby backend node failure, it takes no action - and similarly quarantined standby backend nodes on watchdog-master do not trigger a new leader election. + When the leader node fails to build the consensus for standby backend node failure, it takes no action + and similarly quarantined standby backend nodes on watchdog-leader do not trigger a new leader election. @@ -710,7 +710,7 @@ cluster goes into two separated networks (A, B) and (C, D). For (A, B) and (C, D) the quorum still exist since for both groups there are two live nodes out of 4. The two groups choose their - own master watchdog, which is a split-brain. + own leader watchdog, which is a split-brain. Default is off. @@ -844,15 +844,15 @@ This parameter can be used to elevate the local watchdog node priority in the elections - to select master watchdog node. + to select leader watchdog node. The node with the higher wd_priority value will get selected - as master watchdog node when cluster will be electing its new master node - in the event of old master watchdog node failure. + as leader watchdog node when cluster will be electing its new leader node + in the event of old leader watchdog node failure. wd_priority is also valid at the time of cluster startup. When some watchdog nodes start up at same time,a node with the higher wd_priority - value is selected as a master node. + value is selected as a leader node. So we should start watchdog nodes in order of wd_priority priority to prevent - unintended nodes from being selected as masters. + unintended nodes from being selected as leader. wd_priority is not available in versions prior to diff --git a/src/Makefile.am b/src/Makefile.am index 0b18141ae..deb839274 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -79,7 +79,7 @@ sysconf_DATA = sample/pgpool.conf.sample \ sample/pgpool.conf.sample-raw \ sample/pgpool.conf.sample-snapshot \ sample/scripts/failover.sh.sample \ - sample/scripts/follow_master.sh.sample \ + sample/scripts/follow_primary.sh.sample \ sample/scripts/pgpool_remote_start.sample \ sample/scripts/recovery_1st_stage.sample \ sample/scripts/recovery_2nd_stage.sample @@ -105,7 +105,7 @@ AM_YFLAGS = -d EXTRA_DIST = sample/pgpool.pam \ sample/scripts/failover.sh.sample \ - sample/scripts/follow_master.sh.sample \ + sample/scripts/follow_primary.sh.sample \ sample/scripts/pgpool_remote_start.sample \ sample/scripts/recovery_1st_stage.sample \ sample/scripts/recovery_2nd_stage.sample \ diff --git a/src/Makefile.in b/src/Makefile.in index d5535f30e..4a678f1fa 100644 --- a/src/Makefile.in +++ b/src/Makefile.in @@ -507,7 +507,7 @@ sysconf_DATA = sample/pgpool.conf.sample \ sample/pgpool.conf.sample-raw \ sample/pgpool.conf.sample-snapshot \ sample/scripts/failover.sh.sample \ - sample/scripts/follow_master.sh.sample \ + sample/scripts/follow_primary.sh.sample \ sample/scripts/pgpool_remote_start.sample \ sample/scripts/recovery_1st_stage.sample \ sample/scripts/recovery_2nd_stage.sample @@ -528,7 +528,7 @@ pgpool_LDADD = -L@PGSQL_LIB_DIR@ -lpq parser/libsql-parser.a \ AM_YFLAGS = -d EXTRA_DIST = sample/pgpool.pam \ sample/scripts/failover.sh.sample \ - sample/scripts/follow_master.sh.sample \ + sample/scripts/follow_primary.sh.sample \ sample/scripts/pgpool_remote_start.sample \ sample/scripts/recovery_1st_stage.sample \ sample/scripts/recovery_2nd_stage.sample \ diff --git a/src/auth/pool_auth.c b/src/auth/pool_auth.c index bc0462680..b60842dde 100644 --- a/src/auth/pool_auth.c +++ b/src/auth/pool_auth.c @@ -345,7 +345,7 @@ pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) StartupPacket *sp; - protoMajor = MASTER_CONNECTION(cp)->sp->major; + protoMajor = MAIN_CONNECTION(cp)->sp->major; kind = pool_read_kind(cp); if (kind < 0) @@ -433,7 +433,7 @@ pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) msglen = htonl(0); pool_write_and_flush(frontend, &msglen, sizeof(msglen)); - MASTER(cp)->auth_kind = AUTH_REQ_OK; + MAIN(cp)->auth_kind = AUTH_REQ_OK; } /* clear text password authentication? */ @@ -498,7 +498,7 @@ pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) */ if (!RAW_MODE && NUM_BACKENDS > 1) { - if (get_auth_password(MASTER(cp), frontend, 0, + if (get_auth_password(MAIN(cp), frontend, 0, &password, &passwordType) == false) { /* @@ -514,7 +514,7 @@ pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) errhint("you can disable this behavior by setting allow_clear_text_frontend_auth to off"))); authenticate_frontend_clear_text(frontend); /* now check again if we have a password now */ - if (get_auth_password(MASTER(cp), frontend, 0, + if (get_auth_password(MAIN(cp), frontend, 0, &password, &passwordType) == false) { ereport(ERROR, @@ -560,7 +560,7 @@ pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) char *password; PasswordType passwordType = PASSWORD_TYPE_UNKNOWN; - if (get_auth_password(MASTER(cp), frontend, 0, + if (get_auth_password(MAIN(cp), frontend, 0, &password, &passwordType) == false) { /* @@ -575,7 +575,7 @@ pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) errhint("you can disable this behavior by setting allow_clear_text_frontend_auth to off"))); authenticate_frontend_clear_text(frontend); /* now check again if we have a password now */ - if (get_auth_password(MASTER(cp), frontend, 0, + if (get_auth_password(MAIN(cp), frontend, 0, &password, &passwordType) == false) { ereport(ERROR, @@ -675,7 +675,7 @@ pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) break; case 'N': - if (pool_extract_error_message(false, MASTER(cp), protoMajor, true, &message) == 1) + if (pool_extract_error_message(false, MAIN(cp), protoMajor, true, &message) == 1) { ereport(NOTICE, (errmsg("notice from backend"), @@ -693,7 +693,7 @@ pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) /* process error message */ case 'E': - if (pool_extract_error_message(false, MASTER(cp), protoMajor, true, &message) == 1) + if (pool_extract_error_message(false, MAIN(cp), protoMajor, true, &message) == 1) { ereport(LOG, (errmsg("backend throws an error message"), @@ -758,7 +758,7 @@ pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) /* * OK, read pid and secret key */ - sp = MASTER_CONNECTION(cp)->sp; + sp = MAIN_CONNECTION(cp)->sp; pid = -1; for (i = 0; i < NUM_BACKENDS; i++) @@ -826,7 +826,7 @@ pool_do_reauth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) */ if (!frontend->frontend_authenticated) { - switch (MASTER(cp)->auth_kind) + switch (MAIN(cp)->auth_kind) { case AUTH_REQ_OK: /* trust */ @@ -834,26 +834,26 @@ pool_do_reauth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) case AUTH_REQ_PASSWORD: /* clear text password */ - do_clear_text_password(MASTER(cp), frontend, 1, protoMajor); + do_clear_text_password(MAIN(cp), frontend, 1, protoMajor); break; case AUTH_REQ_CRYPT: /* crypt password */ - do_crypt(MASTER(cp), frontend, 1, protoMajor); + do_crypt(MAIN(cp), frontend, 1, protoMajor); break; case AUTH_REQ_MD5: /* md5 password */ - authenticate_frontend_md5(MASTER(cp), frontend, 1, protoMajor); + authenticate_frontend_md5(MAIN(cp), frontend, 1, protoMajor); break; case AUTH_REQ_SASL: /* SCRAM */ - authenticate_frontend_SCRAM(MASTER(cp), frontend, 1); + authenticate_frontend_SCRAM(MAIN(cp), frontend, 1); break; default: ereport(ERROR, (errmsg("authentication failed"), - errdetail("unknown authentication request code %d", MASTER(cp)->auth_kind))); + errdetail("unknown authentication request code %d", MAIN(cp)->auth_kind))); } } @@ -867,7 +867,7 @@ pool_do_reauth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) msglen = htonl(0); pool_write_and_flush(frontend, &msglen, sizeof(msglen)); - pool_send_backend_key_data(frontend, MASTER_CONNECTION(cp)->pid, MASTER_CONNECTION(cp)->key, protoMajor); + pool_send_backend_key_data(frontend, MAIN_CONNECTION(cp)->pid, MAIN_CONNECTION(cp)->key, protoMajor); return 0; } @@ -885,11 +885,11 @@ pool_send_auth_fail(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp) protoMajor = MAJOR(cp); - messagelen = strlen(MASTER_CONNECTION(cp)->sp->user) + 100; + messagelen = strlen(MAIN_CONNECTION(cp)->sp->user) + 100; errmessage = (char *) palloc(messagelen + 1); snprintf(errmessage, messagelen, "password authentication failed for user \"%s\"", - MASTER_CONNECTION(cp)->sp->user); + MAIN_CONNECTION(cp)->sp->user); if (send_error_to_frontend) pool_send_fatal_message(frontend, protoMajor, "XX000", errmessage, "", "", __FILE__, __LINE__); @@ -1100,7 +1100,7 @@ do_clear_text_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, in /* if authenticated, save info */ if (kind == AUTH_REQ_OK) { - if (IS_MASTER_NODE_ID(backend->db_node_id)) + if (IS_MAIN_NODE_ID(backend->db_node_id)) { send_auth_ok(frontend, protoMajor); } @@ -1136,8 +1136,8 @@ do_crypt(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int memcpy(salt, backend->salt, sizeof(salt)); } - /* master? */ - if (IS_MASTER_NODE_ID(backend->db_node_id)) + /* main? */ + if (IS_MAIN_NODE_ID(backend->db_node_id)) { pool_write(frontend, "R", 1); /* authentication */ if (protoMajor == PROTO_MAJOR_V3) @@ -1780,8 +1780,8 @@ do_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int pr errdetail("unable to get the password"))); } - /* master? */ - if (IS_MASTER_NODE_ID(backend->db_node_id) && frontend->frontend_authenticated == false) + /* main? */ + if (IS_MAIN_NODE_ID(backend->db_node_id) && frontend->frontend_authenticated == false) { /* * If frontend is not authenticated and do it it first. but if we have @@ -1834,7 +1834,7 @@ do_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int pr if (!reauth && kind == 0) { - if (IS_MASTER_NODE_ID(backend->db_node_id)) + if (IS_MAIN_NODE_ID(backend->db_node_id)) { /* Send auth ok to frontend */ send_auth_ok(frontend, protoMajor); @@ -2107,8 +2107,8 @@ do_SCRAM(POOL_CONNECTION * frontend, POOL_CONNECTION * backend, int protoMajor, errdetail("password not found"))); } - /* master? */ - if (frontend && IS_MASTER_NODE_ID(backend->db_node_id) && frontend->frontend_authenticated == false) + /* main? */ + if (frontend && IS_MAIN_NODE_ID(backend->db_node_id) && frontend->frontend_authenticated == false) { /* * If frontend is not authenticated and do it it first. but if we have diff --git a/src/config/pool_config.c b/src/config/pool_config.c index a7340e73f..112c0f572 100644 --- a/src/config/pool_config.c +++ b/src/config/pool_config.c @@ -1856,7 +1856,7 @@ int pool_init_config(void) /* * Add regex expression to patterns array - * The supported type are: black_function_list and white_function_list + * The supported type are: write_function_list and read_only_function_list * Return 0 on error, 1 on success */ int add_regex_pattern(const char *type, char *s) @@ -1868,16 +1868,16 @@ int add_regex_pattern(const char *type, char *s) /* Add extended regex search */ regex_flags |= REG_EXTENDED; /* Fill the pattern type */ - if (strcmp(type, "black_function_list") == 0 || - strcmp(type, "black_query_pattern_list") == 0 || - strcmp(type, "black_memqcache_table_list") == 0) + if (strcmp(type, "write_function_list") == 0 || + strcmp(type, "primary_routing_query_pattern_list") == 0 || + strcmp(type, "cache_unsafe_memqcache_table_list") == 0) { - currItem.type = BLACKLIST; + currItem.type = WRITELIST; } - else if (strcmp(type, "white_function_list") == 0 || - strcmp(type, "white_memqcache_table_list") == 0) + else if (strcmp(type, "read_only_function_list") == 0 || + strcmp(type, "cache_safe_memqcache_table_list") == 0) { - currItem.type = WHITELIST; + currItem.type = READONLYLIST; } else { @@ -1913,23 +1913,23 @@ int add_regex_pattern(const char *type, char *s) ereport(WARNING, (errmsg("unable to add regex pattern for \"%s\", invalid pattern: \"%s\"", type,currItem.pattern))); } - else if ((strcmp(type, "white_function_list") == 0 || - strcmp(type, "black_function_list") == 0) && + else if ((strcmp(type, "read_only_function_list") == 0 || + strcmp(type, "write_function_list") == 0) && growFunctionPatternArray(currItem) < 0) { ereport(WARNING, (errmsg("unable to add regex pattern for \"%s\", unable to allocate new pattern", type))); return 0; } - else if ((strcmp(type, "white_memqcache_table_list") == 0 || - strcmp(type, "black_memqcache_table_list") == 0) && + else if ((strcmp(type, "cache_safe_memqcache_table_list") == 0 || + strcmp(type, "cache_unsafe_memqcache_table_list") == 0) && growMemqcacheTablePatternArray(currItem) < 0) { ereport(WARNING, (errmsg("unable to add regex pattern for \"%s\", unable to allocate new pattern", type))); return 0; } - else if (strcmp(type, "black_query_pattern_list") == 0 && + else if (strcmp(type, "primary_routing_query_pattern_list") == 0 && growQueryPatternArray(currItem) < 0) { ereport(WARNING, @@ -2310,12 +2310,12 @@ char *pool_flag_to_str(unsigned short flag) else if (POOL_DISALLOW_TO_FAILOVER(flag)) snprintf(buf, sizeof(buf), "DISALLOW_TO_FAILOVER"); - if (POOL_ALWAYS_MASTER & flag) + if (POOL_ALWAYS_PRIMARY & flag) { if (*buf == '\0') - snprintf(buf, sizeof(buf), "ALWAYS_MASTER"); + snprintf(buf, sizeof(buf), "ALWAYS_PRIMARY"); else - snprintf(buf+strlen(buf), sizeof(buf), "|ALWAYS_MASTER"); + snprintf(buf+strlen(buf), sizeof(buf), "|ALWAYS_PRIMARY"); } return buf; diff --git a/src/config/pool_config.l b/src/config/pool_config.l index 026ae6e92..f9a502d3b 100644 --- a/src/config/pool_config.l +++ b/src/config/pool_config.l @@ -125,7 +125,7 @@ int pool_init_config(void) /* * Add regex expression to patterns array - * The supported type are: black_function_list and white_function_list + * The supported type are: write_function_list and read_only_function_list * Return 0 on error, 1 on success */ int add_regex_pattern(const char *type, char *s) @@ -137,16 +137,16 @@ int add_regex_pattern(const char *type, char *s) /* Add extended regex search */ regex_flags |= REG_EXTENDED; /* Fill the pattern type */ - if (strcmp(type, "black_function_list") == 0 || - strcmp(type, "black_query_pattern_list") == 0 || - strcmp(type, "black_memqcache_table_list") == 0) + if (strcmp(type, "write_function_list") == 0 || + strcmp(type, "primary_routing_query_pattern_list") == 0 || + strcmp(type, "cache_unsafe_memqcache_table_list") == 0) { - currItem.type = BLACKLIST; + currItem.type = WRITELIST; } - else if (strcmp(type, "white_function_list") == 0 || - strcmp(type, "white_memqcache_table_list") == 0) + else if (strcmp(type, "read_only_function_list") == 0 || + strcmp(type, "cache_safe_memqcache_table_list") == 0) { - currItem.type = WHITELIST; + currItem.type = READONLYLIST; } else { @@ -182,23 +182,23 @@ int add_regex_pattern(const char *type, char *s) ereport(WARNING, (errmsg("unable to add regex pattern for \"%s\", invalid pattern: \"%s\"", type,currItem.pattern))); } - else if ((strcmp(type, "white_function_list") == 0 || - strcmp(type, "black_function_list") == 0) && + else if ((strcmp(type, "read_only_function_list") == 0 || + strcmp(type, "write_function_list") == 0) && growFunctionPatternArray(currItem) < 0) { ereport(WARNING, (errmsg("unable to add regex pattern for \"%s\", unable to allocate new pattern", type))); return 0; } - else if ((strcmp(type, "white_memqcache_table_list") == 0 || - strcmp(type, "black_memqcache_table_list") == 0) && + else if ((strcmp(type, "cache_safe_memqcache_table_list") == 0 || + strcmp(type, "cache_unsafe_memqcache_table_list") == 0) && growMemqcacheTablePatternArray(currItem) < 0) { ereport(WARNING, (errmsg("unable to add regex pattern for \"%s\", unable to allocate new pattern", type))); return 0; } - else if (strcmp(type, "black_query_pattern_list") == 0 && + else if (strcmp(type, "primary_routing_query_pattern_list") == 0 && growQueryPatternArray(currItem) < 0) { ereport(WARNING, @@ -579,12 +579,12 @@ char *pool_flag_to_str(unsigned short flag) else if (POOL_DISALLOW_TO_FAILOVER(flag)) snprintf(buf, sizeof(buf), "DISALLOW_TO_FAILOVER"); - if (POOL_ALWAYS_MASTER & flag) + if (POOL_ALWAYS_PRIMARY & flag) { if (*buf == '\0') - snprintf(buf, sizeof(buf), "ALWAYS_MASTER"); + snprintf(buf, sizeof(buf), "ALWAYS_PRIMARY"); else - snprintf(buf+strlen(buf), sizeof(buf), "|ALWAYS_MASTER"); + snprintf(buf+strlen(buf), sizeof(buf), "|ALWAYS_PRIMARY"); } return buf; diff --git a/src/config/pool_config_variables.c b/src/config/pool_config_variables.c index 28a5ad41d..b5fab2ee5 100644 --- a/src/config/pool_config_variables.c +++ b/src/config/pool_config_variables.c @@ -49,7 +49,7 @@ #endif #define default_reset_query_list "ABORT;DISCARD ALL" -#define default_black_function_list "nextval,setval" +#define default_write_function_list "nextval,setval" #define EMPTY_CONFIG_GENERIC {NULL, 0, 0, NULL, 0, false, 0, 0, 0, 0, NULL, NULL} #define EMPTY_CONFIG_BOOL {EMPTY_CONFIG_GENERIC, NULL, false, NULL, NULL, NULL, false} @@ -229,7 +229,7 @@ static const struct config_enum_entry backend_clustering_mode_options[] = { {NULL, 0, false} }; -static const struct config_enum_entry master_slave_sub_mode_options[] = { +static const struct config_enum_entry native_replication_sub_mode_options[] = { {"slony", SLONY_MODE, false}, {"stream", STREAM_MODE, false}, {"logical", LOGICAL_MODE, false}, @@ -279,7 +279,7 @@ static const struct config_enum_entry disable_load_balance_on_write_options[] = }; static const struct config_enum_entry relcache_query_target_options[] = { - {"master", RELQTARGET_MASTER, false}, + {"primary", RELQTARGET_PRIMARY, false}, {"load_balance_node", RELQTARGET_LOAD_BALANCE_NODE, false}, {NULL, 0, false} }; @@ -482,7 +482,7 @@ static struct config_bool ConfigureNamesBool[] = { {"replication_stop_on_mismatch", CFGCXT_RELOAD, REPLICATION_CONFIG, - "Starts degeneration and stops replication, If there's a data mismatch between master and secondary.", + "Starts degeneration and stops replication, If there's a data mismatch between primary and secondary.", CONFIG_VAR_TYPE_BOOL, false, 0 }, &g_pool_config.replication_stop_on_mismatch, @@ -492,7 +492,7 @@ static struct config_bool ConfigureNamesBool[] = { {"failover_if_affected_tuples_mismatch", CFGCXT_RELOAD, REPLICATION_CONFIG, - "Starts degeneration, If there's a data mismatch between master and secondary.", + "Starts degeneration, If there's a data mismatch between primary and secondary.", CONFIG_VAR_TYPE_BOOL, false, 0 }, &g_pool_config.failover_if_affected_tuples_mismatch, @@ -511,11 +511,11 @@ static struct config_bool ConfigureNamesBool[] = }, { - {"master_slave_mode", CFGCXT_INIT, MASTER_SLAVE_CONFIG, - "Enables Master/Slave mode.", + {"native_replication_mode", CFGCXT_INIT, NATIVE_REPLICATION_CONFIG, + "Enables Native Replication mode.", CONFIG_VAR_TYPE_BOOL, false, 0 }, - &g_pool_config.master_slave_mode, + &g_pool_config.native_replication_mode, false, NULL, NULL, NULL }, @@ -622,7 +622,7 @@ static struct config_bool ConfigureNamesBool[] = { {"clear_memqcache_on_escalation", CFGCXT_RELOAD, WATCHDOG_CONFIG, - "Clears the query cache in the shared memory when pgpool-II escaltes to master watchdog node.", + "Clears the query cache in the shared memory when pgpool-II escaltes to leader watchdog node.", CONFIG_VAR_TYPE_BOOL, false, 0 }, &g_pool_config.clear_memqcache_on_escalation, @@ -934,11 +934,11 @@ static struct config_string ConfigureNamesString[] = }, { - {"follow_master_command", CFGCXT_RELOAD, FAILOVER_CONFIG, - "Command to execute in master/slave streaming replication mode after a master node failover.", + {"follow_primary_command", CFGCXT_RELOAD, FAILOVER_CONFIG, + "Command to execute in streaming replication mode after a primary node failover.", CONFIG_VAR_TYPE_STRING, false, 0 }, - &g_pool_config.follow_master_command, + &g_pool_config.follow_primary_command, "", NULL, NULL, NULL, NULL }, @@ -1005,7 +1005,7 @@ static struct config_string ConfigureNamesString[] = { {"wd_escalation_command", CFGCXT_RELOAD, WATCHDOG_CONFIG, - "Command to execute when watchdog node becomes cluster master/leader node.", + "Command to execute when watchdog node becomes cluster leader node.", CONFIG_VAR_TYPE_STRING, false, 0 }, &g_pool_config.wd_escalation_command, @@ -1015,7 +1015,7 @@ static struct config_string ConfigureNamesString[] = { {"wd_de_escalation_command", CFGCXT_RELOAD, WATCHDOG_CONFIG, - "Command to execute when watchdog node resigns from the cluster master/leader node.", + "Command to execute when watchdog node resigns from the cluster leader node.", CONFIG_VAR_TYPE_STRING, false, 0 }, &g_pool_config.wd_de_escalation_command, @@ -1035,7 +1035,7 @@ static struct config_string ConfigureNamesString[] = { {"delegate_IP", CFGCXT_INIT, WATCHDOG_CONFIG, - "Delegate IP address to be used when pgpool node become a watchdog cluster master/leader.", + "Delegate IP address to be used when pgpool node become a watchdog cluster leader.", CONFIG_VAR_TYPE_STRING, false, 0 }, &g_pool_config.delegate_IP, @@ -1313,12 +1313,12 @@ static struct config_string_list ConfigureNamesStringList[] = }, { - {"white_function_list", CFGCXT_RELOAD, CONNECTION_POOL_CONFIG, + {"read_only_function_list", CFGCXT_RELOAD, CONNECTION_POOL_CONFIG, "list of functions that does not writes to database.", CONFIG_VAR_TYPE_STRING_LIST, false, 0 }, - &g_pool_config.white_function_list, - &g_pool_config.num_white_function_list, + &g_pool_config.read_only_function_list, + &g_pool_config.num_read_only_function_list, NULL, ",", true, @@ -1326,24 +1326,24 @@ static struct config_string_list ConfigureNamesStringList[] = }, { - {"black_function_list", CFGCXT_RELOAD, CONNECTION_POOL_CONFIG, + {"write_function_list", CFGCXT_RELOAD, CONNECTION_POOL_CONFIG, "list of functions that writes to database.", CONFIG_VAR_TYPE_STRING_LIST, false, 0 }, - &g_pool_config.black_function_list, - &g_pool_config.num_black_function_list, - (const char *) default_black_function_list, + &g_pool_config.write_function_list, + &g_pool_config.num_write_function_list, + (const char *) default_write_function_list, ",", true, NULL, NULL, NULL }, { - {"white_memqcache_table_list", CFGCXT_RELOAD, CACHE_CONFIG, + {"cache_safe_memqcache_table_list", CFGCXT_RELOAD, CACHE_CONFIG, "list of tables to be cached.", CONFIG_VAR_TYPE_STRING_LIST, false, 0 }, - &g_pool_config.white_memqcache_table_list, - &g_pool_config.num_white_memqcache_table_list, + &g_pool_config.cache_safe_memqcache_table_list, + &g_pool_config.num_cache_safe_memqcache_table_list, NULL, ",", true, @@ -1351,12 +1351,12 @@ static struct config_string_list ConfigureNamesStringList[] = }, { - {"black_memqcache_table_list", CFGCXT_RELOAD, CACHE_CONFIG, + {"cache_unsafe_memqcache_table_list", CFGCXT_RELOAD, CACHE_CONFIG, "list of tables should not be cached.", CONFIG_VAR_TYPE_STRING_LIST, false, 0 }, - &g_pool_config.black_memqcache_table_list, - &g_pool_config.num_black_memqcache_table_list, + &g_pool_config.cache_unsafe_memqcache_table_list, + &g_pool_config.num_cache_unsafe_memqcache_table_list, NULL, ",", true, @@ -1364,12 +1364,12 @@ static struct config_string_list ConfigureNamesStringList[] = }, { - {"black_query_pattern_list", CFGCXT_RELOAD, CONNECTION_POOL_CONFIG, + {"primary_routing_query_pattern_list", CFGCXT_RELOAD, CONNECTION_POOL_CONFIG, "list of query patterns that should be sent to primary node.", CONFIG_VAR_TYPE_STRING_LIST, false, 0 }, - &g_pool_config.black_query_pattern_list, - &g_pool_config.num_black_query_pattern_list, + &g_pool_config.primary_routing_query_pattern_list, + &g_pool_config.num_primary_routing_query_pattern_list, NULL, ";", true, @@ -1673,7 +1673,7 @@ static struct config_string_array ConfigureNamesStringArray[] = CONFIG_VAR_TYPE_STRING_ARRAY, true, 0, MAX_NUM_BACKENDS }, NULL, - "", /* for ALWAYS_MASTER */ + "", /* for ALWAYS_PRIMARY */ EMPTY_CONFIG_STRING, BackendFlagsAssignFunc, NULL, BackendFlagsShowFunc, BackendSlotEmptyCheckFunc }, @@ -2113,7 +2113,7 @@ static struct config_int ConfigureNamesInt[] = static struct config_enum ConfigureNamesEnum[] = { { - {"backend_clustering_mode", CFGCXT_INIT, MASTER_SLAVE_CONFIG, + {"backend_clustering_mode", CFGCXT_INIT, NATIVE_REPLICATION_CONFIG, "backend clustering mode.", CONFIG_VAR_TYPE_ENUM, false, 0 }, @@ -2171,18 +2171,18 @@ static struct config_enum ConfigureNamesEnum[] = }, { - {"master_slave_sub_mode", CFGCXT_INIT, MASTER_SLAVE_CONFIG, - "master/slave sub mode.", + {"native_replication_sub_mode", CFGCXT_INIT, NATIVE_REPLICATION_CONFIG, + "native replication sub mode.", CONFIG_VAR_TYPE_ENUM, false, 0 }, - (int *) &g_pool_config.master_slave_sub_mode, + (int *) &g_pool_config.native_replication_sub_mode, STREAM_MODE, - master_slave_sub_mode_options, + native_replication_sub_mode_options, NULL, NULL, NULL, NULL }, { - {"log_standby_delay", CFGCXT_RELOAD, MASTER_SLAVE_CONFIG, + {"log_standby_delay", CFGCXT_RELOAD, NATIVE_REPLICATION_CONFIG, "When to log standby delay.", CONFIG_VAR_TYPE_ENUM, false, 0 }, @@ -2231,7 +2231,7 @@ static struct config_enum ConfigureNamesEnum[] = CONFIG_VAR_TYPE_ENUM, false, 0 }, (int *) &g_pool_config.relcache_query_target, - RELQTARGET_MASTER, + RELQTARGET_PRIMARY, relcache_query_target_options, NULL, NULL, NULL, NULL }, @@ -2813,7 +2813,7 @@ initialize_variables_with_default(struct config_generic *gconf) } else { - if (strcmp(gconf->name, "black_query_pattern_list") == 0) + if (strcmp(gconf->name, "primary_routing_query_pattern_list") == 0) { *conf->variable = get_list_from_string_regex_delim(newval, conf->seperator, conf->list_elements_count); } @@ -3803,7 +3803,7 @@ setConfigOptionVar(struct config_generic *record, const char *name, int index_va pfree(*conf->variable); } - if (strcmp(name, "black_query_pattern_list") == 0) + if (strcmp(name, "primary_routing_query_pattern_list") == 0) { *conf->variable = get_list_from_string_regex_delim(newval, conf->seperator, conf->list_elements_count); } @@ -4120,9 +4120,9 @@ BackendFlagsAssignFunc(ConfigContext context, char *newval, int index, int eleve disallow_to_failover_is_specified = true; } - else if ((!strcmp(flags[i], "ALWAYS_MASTER"))) + else if ((!strcmp(flags[i], "ALWAYS_PRIMARY"))) { - flag |= POOL_ALWAYS_MASTER; + flag |= POOL_ALWAYS_PRIMARY; } else @@ -4293,12 +4293,12 @@ BackendFlagsShowFunc(int index) else if (POOL_DISALLOW_TO_FAILOVER(flag)) snprintf(buffer, sizeof(buffer), "DISALLOW_TO_FAILOVER"); - if (POOL_ALWAYS_MASTER & flag) + if (POOL_ALWAYS_PRIMARY & flag) { if (*buffer == '\0') - snprintf(buffer, sizeof(buffer), "ALWAYS_MASTER"); + snprintf(buffer, sizeof(buffer), "ALWAYS_PRIMARY"); else - snprintf(buffer+strlen(buffer), sizeof(buffer), "|ALWAYS_MASTER"); + snprintf(buffer+strlen(buffer), sizeof(buffer), "|ALWAYS_PRIMARY"); } return buffer; } diff --git a/src/context/pool_query_context.c b/src/context/pool_query_context.c index 2c91762d1..d82923e0c 100644 --- a/src/context/pool_query_context.c +++ b/src/context/pool_query_context.c @@ -59,12 +59,12 @@ typedef enum static POOL_DEST send_to_where(Node *node, char *query); static void where_to_send_deallocate(POOL_QUERY_CONTEXT * query_context, Node *node); static char *remove_read_write(int len, const char *contents, int *rewritten_len); -static void set_virtual_master_node(POOL_QUERY_CONTEXT *query_context); +static void set_virtual_main_node(POOL_QUERY_CONTEXT *query_context); static void set_load_balance_info(POOL_QUERY_CONTEXT *query_context); static bool is_in_list(char *name, List *list); -static bool is_select_object_in_temp_black_list(Node *node, void *context); -static bool add_object_into_temp_black_list(Node *node, void *context); +static bool is_select_object_in_temp_write_list(Node *node, void *context); +static bool add_object_into_temp_write_list(Node *node, void *context); static void dml_adaptive(Node *node, char *query); static char* get_associated_object_from_dml_adaptive_relations (char *left_token, DBObjectTypes object_type); @@ -157,8 +157,8 @@ pool_start_query(POOL_QUERY_CONTEXT * query_context, char *query, int len, Node query_context->original_query = pstrdup(query); query_context->rewritten_query = NULL; query_context->parse_tree = node; - query_context->virtual_master_node_id = my_master_node_id; - query_context->load_balance_node_id = my_master_node_id; + query_context->virtual_main_node_id = my_main_node_id; + query_context->load_balance_node_id = my_main_node_id; query_context->is_cache_safe = false; query_context->num_original_params = -1; if (pool_config->memory_cache_enabled) @@ -304,7 +304,7 @@ pool_is_node_to_be_sent_in_current_query(int node_id) POOL_SESSION_CONTEXT *sc; if (RAW_MODE) - return node_id == REAL_MASTER_NODE_ID; + return node_id == REAL_MAIN_NODE_ID; sc = pool_get_session_context(true); if (!sc) @@ -318,10 +318,10 @@ pool_is_node_to_be_sent_in_current_query(int node_id) } /* - * Returns virtual master DB node id, + * Returns virtual main DB node id, */ int -pool_virtual_master_db_node_id(void) +pool_virtual_main_db_node_id(void) { POOL_SESSION_CONTEXT *sc; @@ -344,19 +344,19 @@ pool_virtual_master_db_node_id(void) sc = pool_get_session_context(true); if (!sc) { - return REAL_MASTER_NODE_ID; + return REAL_MAIN_NODE_ID; } if (sc->in_progress && sc->query_context) { - int node_id = sc->query_context->virtual_master_node_id; + int node_id = sc->query_context->virtual_main_node_id; if (SL_MODE) { /* - * Make sure that virtual_master_node_id is either primary node id + * Make sure that virtual_main_node_id is either primary node id * or load balance node id. If not, it is likely that - * virtual_master_node_id is not set up yet. Let's use the primary + * virtual_main_node_id is not set up yet. Let's use the primary * node id. except for the special case where we need to send the * query to the node which is not primary nor the load balance * node. Currently there is only one special such case that is @@ -369,7 +369,7 @@ pool_virtual_master_db_node_id(void) */ ereport(DEBUG5, - (errmsg("pool_virtual_master_db_node_id: virtual_master_node_id:%d load_balance_node_id:%d PRIMARY_NODE_ID:%d", + (errmsg("pool_virtual_main_db_node_id: virtual_main_node_id:%d load_balance_node_id:%d PRIMARY_NODE_ID:%d", node_id, sc->load_balance_node_id, PRIMARY_NODE_ID))); if (node_id != sc->query_context->load_balance_node_id && node_id != PRIMARY_NODE_ID) @@ -387,15 +387,15 @@ pool_virtual_master_db_node_id(void) } /* - * No query context exists. If in master/slave mode, returns primary node - * if exists. Otherwise returns my_master_node_id, which represents the - * last REAL_MASTER_NODE_ID. + * No query context exists. If in native replication mode, returns primary node + * if exists. Otherwise returns my_main_node_id, which represents the + * last REAL_MAIN_NODE_ID. */ - if (MASTER_SLAVE) + if (NATIVE_REPLICATION) { return PRIMARY_NODE_ID; } - return my_master_node_id; + return my_main_node_id; } /* @@ -416,7 +416,7 @@ pool_force_query_node_to_backend(POOL_QUERY_CONTEXT * query_context, int backend { if (query_context->where_to_send[i]) { - query_context->virtual_master_node_id = i; + query_context->virtual_main_node_id = i; break; } } @@ -443,17 +443,17 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) pool_clear_node_to_be_sent(query_context); /* - * If there is "NO LOAD BALANCE" comment, we send only to master node. + * If there is "NO LOAD BALANCE" comment, we send only to main node. */ if (!strncasecmp(query, NO_LOAD_BALANCE, NO_LOAD_BALANCE_COMMENT_SZ)) { pool_set_node_to_be_sent(query_context, - MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID); + NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID); for (i = 0; i < NUM_BACKENDS; i++) { if (query_context->where_to_send[i]) { - query_context->virtual_master_node_id = i; + query_context->virtual_main_node_id = i; break; } } @@ -461,16 +461,16 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) } /* - * In raw mode, we send only to master node. Simple enough. + * In raw mode, we send only to main node. Simple enough. */ if (RAW_MODE) { - pool_set_node_to_be_sent(query_context, REAL_MASTER_NODE_ID); + pool_set_node_to_be_sent(query_context, REAL_MAIN_NODE_ID); } - else if (MASTER_SLAVE && query_context->is_multi_statement) + else if (NATIVE_REPLICATION && query_context->is_multi_statement) { /* - * If we are in master/slave mode and we have multi statement query, + * If we are in native replication mode and we have multi statement query, * we should send it to primary server only. Otherwise it is possible * to send a write query to standby servers because we only use the * first element of the multi statement query and don't care about the @@ -483,7 +483,7 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) */ pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID); } - else if (MASTER_SLAVE) + else if (NATIVE_REPLICATION) { POOL_DEST dest; @@ -606,10 +606,10 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID); } /* - * When query match the query patterns in black_query_pattern_list, we - * send only to master node. + * When query match the query patterns in primary_routing_query_pattern_list, we + * send only to main node. */ - else if (pattern_compare(query, BLACKLIST, "black_query_pattern_list") == 1) + else if (pattern_compare(query, WRITELIST, "primary_routing_query_pattern_list") == 1) { pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID); } @@ -625,7 +625,7 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID); } - else if (is_select_object_in_temp_black_list(node, query)) + else if (is_select_object_in_temp_write_list(node, query)) { pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID); } @@ -665,7 +665,7 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) if (pool_config->backend_clustering_mode == CM_SNAPSHOT_ISOLATION && pool_config->load_balance_mode) { - if (TSTATE(backend, MASTER_NODE_ID) == 'T') + if (TSTATE(backend, MAIN_NODE_ID) == 'T') { /* * We are in an explicit transaction. If the transaction is @@ -675,11 +675,11 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) { /* Ok, we can load balance. We are done! */ set_load_balance_info(query_context); - set_virtual_master_node(query_context); + set_virtual_main_node(query_context); return; } } - else if (TSTATE(backend, MASTER_NODE_ID) == 'I') + else if (TSTATE(backend, MAIN_NODE_ID) == 'I') { /* * We are out side transaction. If default transaction is read only, @@ -689,7 +689,7 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) POOL_SELECT_RESULT *res; bool load_balance = false; - do_query(CONNECTION(backend, MASTER_NODE_ID), si_query, &res, MAJOR(backend)); + do_query(CONNECTION(backend, MAIN_NODE_ID), si_query, &res, MAJOR(backend)); if (res) { if (res->data[0] && !strcmp(res->data[0], "on")) @@ -699,13 +699,13 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) free_select_result(res); } - per_node_statement_log(backend, MASTER_NODE_ID, si_query); + per_node_statement_log(backend, MAIN_NODE_ID, si_query); if (load_balance) { /* Ok, we can load balance. We are done! */ set_load_balance_info(query_context); - set_virtual_master_node(query_context); + set_virtual_main_node(query_context); return; } } @@ -726,7 +726,7 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) * isolation level is not SERIALIZABLE) we might be able to load * balance. */ - else if (TSTATE(backend, MASTER_NODE_ID) == 'I' || + else if (TSTATE(backend, MAIN_NODE_ID) == 'I' || (!pool_is_writing_transaction() && !pool_is_failed_transaction() && pool_get_transaction_isolation() != POOL_SERIALIZABLE)) @@ -735,8 +735,8 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) } else { - /* only send to master node */ - pool_set_node_to_be_sent(query_context, REAL_MASTER_NODE_ID); + /* only send to main node */ + pool_set_node_to_be_sent(query_context, REAL_MAIN_NODE_ID); } } else @@ -744,8 +744,8 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) if (is_select_query(node, query) && !pool_config->replicate_select && !pool_has_function_call(node)) { - /* only send to master node */ - pool_set_node_to_be_sent(query_context, REAL_MASTER_NODE_ID); + /* only send to main node */ + pool_set_node_to_be_sent(query_context, REAL_MAIN_NODE_ID); } else { @@ -784,8 +784,8 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node) where_to_send_deallocate(query_context, node); } - /* Set virtual master node according to the where_to_send map. */ - set_virtual_master_node(query_context); + /* Set virtual main node according to the where_to_send map. */ + set_virtual_main_node(query_context); return; } @@ -820,7 +820,7 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context, /* * If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE in - * master/slave mode, we send BEGIN to slaves/standbys instead. + * native replication mode, we send BEGIN to standbys instead. * original_query which is BEGIN READ WRITE is sent to primary. * rewritten_query which is BEGIN is sent to standbys. */ @@ -853,10 +853,10 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context, continue; /* - * If in master/slave mode, we do not send COMMIT/ABORT to - * slaves/standbys if it's in I(idle) state. + * If in native replication mode, we do not send COMMIT/ABORT to + * standbys if it's in I(idle) state. */ - if (is_commit && MASTER_SLAVE && !IS_MASTER_NODE_ID(i) && TSTATE(backend, i) == 'I') + if (is_commit && NATIVE_REPLICATION && !IS_MAIN_NODE_ID(i) && TSTATE(backend, i) == 'I') { pool_unset_node_to_be_sent(query_context, i); continue; @@ -904,10 +904,10 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context, #ifdef NOT_USED /* - * If in master/slave mode, we do not send COMMIT/ABORT to - * slaves/standbys if it's in I(idle) state. + * If in native replication mode, we do not send COMMIT/ABORT to + * standbys if it's in I(idle) state. */ - if (is_commit && MASTER_SLAVE && !IS_MASTER_NODE_ID(i) && TSTATE(backend, i) == 'I') + if (is_commit && NATIVE_REPLICATION && !IS_MAIN_NODE_ID(i) && TSTATE(backend, i) == 'I') { continue; } @@ -924,8 +924,8 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context, wait_for_query_response_with_trans_cleanup(frontend, CONNECTION(backend, i), MAJOR(backend), - MASTER_CONNECTION(backend)->pid, - MASTER_CONNECTION(backend)->key); + MAIN_CONNECTION(backend)->pid, + MAIN_CONNECTION(backend)->key); /* * Check if some error detected. If so, emit log. This is useful when @@ -973,7 +973,7 @@ pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context, /* * If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE in - * master/slave mode, we send BEGIN to slaves/standbys instead. + * native replication mode, we send BEGIN to standbys instead. * original_query which is BEGIN READ WRITE is sent to primary. * rewritten_query which is BEGIN is sent to standbys. */ @@ -1114,10 +1114,10 @@ pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context, continue; /* - * If in master/slave mode, we do not send COMMIT/ABORT to - * slaves/standbys if it's in I(idle) state. + * If in native replication mode, we do not send COMMIT/ABORT to + * standbys if it's in I(idle) state. */ - if (is_commit && MASTER_SLAVE && !IS_MASTER_NODE_ID(i) && TSTATE(backend, i) == 'I') + if (is_commit && NATIVE_REPLICATION && !IS_MAIN_NODE_ID(i) && TSTATE(backend, i) == 'I') { continue; } @@ -1133,8 +1133,8 @@ pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context, wait_for_query_response_with_trans_cleanup(frontend, CONNECTION(backend, i), MAJOR(backend), - MASTER_CONNECTION(backend)->pid, - MASTER_CONNECTION(backend)->key); + MAIN_CONNECTION(backend)->pid, + MAIN_CONNECTION(backend)->key); /* * Check if some error detected. If so, emit log. This is useful @@ -1153,7 +1153,7 @@ pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context, /* * From syntactically analysis decide the statement to be sent to the - * primary, the standby or either or both in master/slave+HR/SR mode. + * primary, the standby or either or both in native replication+HR/SR mode. */ static POOL_DEST send_to_where(Node *node, char *query) @@ -1738,15 +1738,15 @@ is_serializable(TransactionStmt *node) /* * If the query is BEGIN READ WRITE or - * BEGIN ... SERIALIZABLE in master/slave mode, - * we send BEGIN to slaves/standbys instead. + * BEGIN ... SERIALIZABLE in native replication mode, + * we send BEGIN to standbys instead. * original_query which is BEGIN READ WRITE is sent to primary. * rewritten_query which is BEGIN is sent to standbys. */ bool pool_need_to_treat_as_if_default_transaction(POOL_QUERY_CONTEXT * query_context) { - return (MASTER_SLAVE && + return (NATIVE_REPLICATION && is_start_transaction_query(query_context->parse_tree) && (is_read_write((TransactionStmt *) query_context->parse_tree) || is_serializable((TransactionStmt *) query_context->parse_tree))); @@ -2061,10 +2061,10 @@ pool_is_transaction_read_only(Node *node) } /* - * Set virtual master node according to the where_to_send map. + * Set virtual main node according to the where_to_send map. */ static void -set_virtual_master_node(POOL_QUERY_CONTEXT *query_context) +set_virtual_main_node(POOL_QUERY_CONTEXT *query_context) { int i; @@ -2072,7 +2072,7 @@ set_virtual_master_node(POOL_QUERY_CONTEXT *query_context) { if (query_context->where_to_send[i]) { - query_context->virtual_master_node_id = i; + query_context->virtual_main_node_id = i; break; } } @@ -2121,10 +2121,10 @@ is_in_list(char *name, List *list) } /* - * Check if the relname of SelectStmt is in the temp black list. + * Check if the relname of SelectStmt is in the temp write list. */ static bool -is_select_object_in_temp_black_list(Node *node, void *context) +is_select_object_in_temp_write_list(Node *node, void *context) { if (node == NULL || pool_config->disable_load_balance_on_write != DLBOW_DML_ADAPTIVE) return false; @@ -2137,13 +2137,13 @@ is_select_object_in_temp_black_list(Node *node, void *context) if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE && session_context->is_in_transaction) { ereport(DEBUG1, - (errmsg("is_select_object_in_temp_black_list: \"%s\", found relation \"%s\"", (char*)context, rgv->relname))); + (errmsg("is_select_object_in_temp_write_list: \"%s\", found relation \"%s\"", (char*)context, rgv->relname))); - return is_in_list(rgv->relname, session_context->transaction_temp_black_list); + return is_in_list(rgv->relname, session_context->transaction_temp_write_list); } } - return raw_expression_tree_walker(node, is_select_object_in_temp_black_list, context); + return raw_expression_tree_walker(node, is_select_object_in_temp_write_list, context); } static char* @@ -2173,7 +2173,7 @@ get_associated_object_from_dml_adaptive_relations /* * Check the object relationship list. - * If find the name in the list, will add related objects to the transaction temp black list. + * If find the name in the list, will add related objects to the transaction temp write list. */ void check_object_relationship_list(char *name, bool is_func_name) @@ -2191,8 +2191,8 @@ check_object_relationship_list(char *name, bool is_func_name) if (right_token) { MemoryContext old_context = MemoryContextSwitchTo(session_context->memory_context); - session_context->transaction_temp_black_list = - lappend(session_context->transaction_temp_black_list, pstrdup(right_token)); + session_context->transaction_temp_write_list = + lappend(session_context->transaction_temp_write_list, pstrdup(right_token)); MemoryContextSwitchTo(old_context); } } @@ -2201,10 +2201,10 @@ check_object_relationship_list(char *name, bool is_func_name) } /* - * Find the relname and add it to the transaction temp black list. + * Find the relname and add it to the transaction temp write list. */ static bool -add_object_into_temp_black_list(Node *node, void *context) +add_object_into_temp_write_list(Node *node, void *context) { if (node == NULL) return false; @@ -2214,17 +2214,17 @@ add_object_into_temp_black_list(Node *node, void *context) RangeVar *rgv = (RangeVar *) node; ereport(DEBUG5, - (errmsg("add_object_into_temp_black_list: \"%s\", found relation \"%s\"", (char*)context, rgv->relname))); + (errmsg("add_object_into_temp_write_list: \"%s\", found relation \"%s\"", (char*)context, rgv->relname))); POOL_SESSION_CONTEXT *session_context = pool_get_session_context(false); MemoryContext old_context = MemoryContextSwitchTo(session_context->memory_context); - if (!is_in_list(rgv->relname, session_context->transaction_temp_black_list)) + if (!is_in_list(rgv->relname, session_context->transaction_temp_write_list)) { ereport(DEBUG1, - (errmsg("add \"%s\" into transaction_temp_black_list", rgv->relname))); + (errmsg("add \"%s\" into transaction_temp_write_list", rgv->relname))); - session_context->transaction_temp_black_list = lappend(session_context->transaction_temp_black_list, pstrdup(rgv->relname)); + session_context->transaction_temp_write_list = lappend(session_context->transaction_temp_write_list, pstrdup(rgv->relname)); } MemoryContextSwitchTo(old_context); @@ -2232,7 +2232,7 @@ add_object_into_temp_black_list(Node *node, void *context) check_object_relationship_list(rgv->relname, false); } - return raw_expression_tree_walker(node, add_object_into_temp_black_list, context); + return raw_expression_tree_walker(node, add_object_into_temp_write_list, context); } /* @@ -2253,28 +2253,28 @@ dml_adaptive(Node *node, char *query) { session_context->is_in_transaction = true; - if (session_context->transaction_temp_black_list != NIL) - list_free_deep(session_context->transaction_temp_black_list); + if (session_context->transaction_temp_write_list != NIL) + list_free_deep(session_context->transaction_temp_write_list); - session_context->transaction_temp_black_list = NIL; + session_context->transaction_temp_write_list = NIL; } else if(is_commit_or_rollback_query(node)) { session_context->is_in_transaction = false; - if (session_context->transaction_temp_black_list != NIL) - list_free_deep(session_context->transaction_temp_black_list); + if (session_context->transaction_temp_write_list != NIL) + list_free_deep(session_context->transaction_temp_write_list); - session_context->transaction_temp_black_list = NIL; + session_context->transaction_temp_write_list = NIL; } MemoryContextSwitchTo(old_context); return; } - /* If non-selectStmt, find the relname and add it to the transaction temp black list. */ + /* If non-selectStmt, find the relname and add it to the transaction temp write list. */ if (!is_select_query(node, query)) - add_object_into_temp_black_list(node, query); + add_object_into_temp_write_list(node, query); } } diff --git a/src/context/pool_session_context.c b/src/context/pool_session_context.c index b4d4b1f2e..f8550e7dc 100644 --- a/src/context/pool_session_context.c +++ b/src/context/pool_session_context.c @@ -102,7 +102,7 @@ pool_init_session_context(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * bac } else { - node_id = SL_MODE ? PRIMARY_NODE_ID : MASTER_NODE_ID; + node_id = SL_MODE ? PRIMARY_NODE_ID : MAIN_NODE_ID; } session_context->load_balance_node_id = node_id; @@ -169,8 +169,8 @@ pool_init_session_context(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * bac pool_temp_tables_init(); #ifdef NOT_USED - /* Initialize preferred master node id */ - pool_reset_preferred_master_node_id(); + /* Initialize preferred main node id */ + pool_reset_preferred_main_node_id(); #endif /* Snapshot isolation state */ @@ -540,7 +540,7 @@ dml_adaptive_init(void) if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE) { session_context->is_in_transaction = false; - session_context->transaction_temp_black_list = NIL; + session_context->transaction_temp_write_list = NIL; } } @@ -549,8 +549,8 @@ dml_adaptive_destroy(void) { if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE && session_context) { - if (session_context->transaction_temp_black_list != NIL) - list_free_deep(session_context->transaction_temp_black_list); + if (session_context->transaction_temp_write_list != NIL) + list_free_deep(session_context->transaction_temp_write_list); } } @@ -849,7 +849,7 @@ pool_get_transaction_isolation(void) /* No cached data is available. Ask backend. */ - do_query(MASTER(session_context->backend), + do_query(MAIN(session_context->backend), "SELECT current_setting('transaction_isolation')", &res, MAJOR(session_context->backend)); error_context_stack = callback.previous; @@ -1824,33 +1824,33 @@ pool_unset_suspend_reading_from_frontend(void) #ifdef NOT_USED /* - * Set preferred "master" node id. + * Set preferred "main" node id. * Only used for SimpleForwardToFrontend. */ void -pool_set_preferred_master_node_id(int node_id) +pool_set_preferred_main_node_id(int node_id) { - session_context->preferred_master_node_id = node_id; + session_context->preferred_main_node_id = node_id; } /* - * Return preferred "master" node id. + * Return preferred "main" node id. * Only used for SimpleForwardToFrontend. */ int -pool_get_preferred_master_node_id(void) +pool_get_preferred_main_node_id(void) { - return session_context->preferred_master_node_id; + return session_context->preferred_main_node_id; } /* - * Reset preferred "master" node id. + * Reset preferred "main" node id. * Only used for SimpleForwardToFrontend. */ void -pool_reset_preferred_master_node_id(void) +pool_reset_preferred_main_node_id(void) { - session_context->preferred_master_node_id = -1; + session_context->preferred_main_node_id = -1; } #endif diff --git a/src/include/context/pool_query_context.h b/src/include/context/pool_query_context.h index 73fef91ec..b732c90fd 100644 --- a/src/include/context/pool_query_context.h +++ b/src/include/context/pool_query_context.h @@ -63,7 +63,7 @@ typedef struct bool where_to_send[MAX_NUM_BACKENDS]; /* DB node map to send * query */ int load_balance_node_id; /* load balance node id per statement */ - int virtual_master_node_id; /* the 1st DB node to send query */ + int virtual_main_node_id; /* the 1st DB node to send query */ POOL_QUERY_STATE query_state[MAX_NUM_BACKENDS]; /* for extended query * protocol */ bool is_cache_safe; /* true if SELECT is safe to cache */ diff --git a/src/include/context/pool_session_context.h b/src/include/context/pool_session_context.h index df234de31..6aec6e5b9 100644 --- a/src/include/context/pool_session_context.h +++ b/src/include/context/pool_session_context.h @@ -288,13 +288,13 @@ typedef struct bool is_in_transaction; /* - * Current transaction temp black list + * Current transaction temp write list */ - List *transaction_temp_black_list; + List *transaction_temp_write_list; #ifdef NOT_USED - /* Preferred "master" node id. Only used for SimpleForwardToFrontend. */ - int preferred_master_node_id; + /* Preferred "main" node id. Only used for SimpleForwardToFrontend. */ + int preferred_main_node_id; #endif /* Whether snapshot is aquired in this transaction. Only used by Snapshot Isolation mode. */ @@ -386,9 +386,9 @@ extern void pool_temp_tables_remove_pending(void); extern void pool_temp_tables_dump(void); #ifdef NOT_USED -extern void pool_set_preferred_master_node_id(int node_id); -extern int pool_get_preferred_master_node_id(void); -extern void pool_reset_preferred_master_node_id(void); +extern void pool_set_preferred_main_node_id(int node_id); +extern int pool_get_preferred_main_node_id(void); +extern void pool_reset_preferred_main_node_id(void); #endif #ifdef NOT_USED diff --git a/src/include/pcp/libpcp_ext.h b/src/include/pcp/libpcp_ext.h index e816656f1..c0d88a5fa 100644 --- a/src/include/pcp/libpcp_ext.h +++ b/src/include/pcp/libpcp_ext.h @@ -69,8 +69,8 @@ typedef struct typedef enum { - ROLE_MASTER, - ROLE_SLAVE, + ROLE_MAIN, + ROLE_REPLICA, ROLE_PRIMARY, ROLE_STANDBY } SERVER_ROLE; diff --git a/src/include/pcp/pcp.h b/src/include/pcp/pcp.h index 6df3eb12e..aa7bd89c4 100644 --- a/src/include/pcp/pcp.h +++ b/src/include/pcp/pcp.h @@ -54,8 +54,8 @@ typedef struct PCPWDClusterInfo int quorumStatus; int aliveNodeCount; bool escalated; - char masterNodeName[WD_MAX_HOST_NAMELEN]; - char masterHostName[WD_MAX_HOST_NAMELEN]; + char leaderNodeName[WD_MAX_HOST_NAMELEN]; + char leaderHostName[WD_MAX_HOST_NAMELEN]; int nodeCount; PCPWDNodeInfo nodeList[1]; } PCPWDClusterInfo; diff --git a/src/include/pool.h b/src/include/pool.h index 1aa44d37f..b30e65b92 100644 --- a/src/include/pool.h +++ b/src/include/pool.h @@ -295,13 +295,13 @@ extern int pool_get_major_version(void); * no query is in progress and the DB node is healthy */ extern bool pool_is_node_to_be_sent_in_current_query(int node_id); -extern int pool_virtual_master_db_node_id(void); +extern int pool_virtual_main_db_node_id(void); extern BACKEND_STATUS * my_backend_status[]; -extern int my_master_node_id; +extern int my_main_node_id; #define VALID_BACKEND(backend_id) \ - ((RAW_MODE && (backend_id) == REAL_MASTER_NODE_ID) || \ + ((RAW_MODE && (backend_id) == REAL_MAIN_NODE_ID) || \ (pool_is_node_to_be_sent_in_current_query((backend_id)) && \ ((*(my_backend_status[(backend_id)]) == CON_UP) || \ (*(my_backend_status[(backend_id)]) == CON_CONNECT_WAIT)))) @@ -320,15 +320,15 @@ extern int my_master_node_id; * The first DB node id appears in pgpool.conf or the first "live" DB * node otherwise. */ -#define REAL_MASTER_NODE_ID (Req_info->master_node_id) +#define REAL_MAIN_NODE_ID (Req_info->main_node_id) /* * The primary node id in streaming replication mode. If not in the * mode or there's no primary node, this macro returns - * REAL_MASTER_NODE_ID. + * REAL_MAIN_NODE_ID. */ #define PRIMARY_NODE_ID (Req_info->primary_node_id >=0 && VALID_BACKEND_RAW(Req_info->primary_node_id) ? \ - Req_info->primary_node_id:REAL_MASTER_NODE_ID) + Req_info->primary_node_id:REAL_MAIN_NODE_ID) #define IS_PRIMARY_NODE_ID(node_id) (node_id == PRIMARY_NODE_ID) /* @@ -338,14 +338,14 @@ extern int my_master_node_id; #define REAL_PRIMARY_NODE_ID (Req_info->primary_node_id) /* - * "Virtual" master node id. It's same as REAL_MASTER_NODE_ID if not + * "Virtual" main node id. It's same as REAL_MAIN_NODE_ID if not * in load balance mode. If in load balance, it's the first load * balance node. */ -#define MASTER_NODE_ID (pool_virtual_master_db_node_id()) -#define IS_MASTER_NODE_ID(node_id) (MASTER_NODE_ID == (node_id)) -#define MASTER_CONNECTION(p) ((p)->slots[MASTER_NODE_ID]) -#define MASTER(p) MASTER_CONNECTION(p)->con +#define MAIN_NODE_ID (pool_virtual_main_db_node_id()) +#define IS_MAIN_NODE_ID(node_id) (MAIN_NODE_ID == (node_id)) +#define MAIN_CONNECTION(p) ((p)->slots[MAIN_NODE_ID]) +#define MAIN(p) MAIN_CONNECTION(p)->con /* * Backend node status in streaming replication mode. @@ -361,13 +361,13 @@ typedef enum /* Clustering mode macros */ #define REPLICATION (pool_config->backend_clustering_mode == CM_NATIVE_REPLICATION || \ pool_config->backend_clustering_mode == CM_SNAPSHOT_ISOLATION) -#define MASTER_SLAVE (pool_config->backend_clustering_mode == CM_STREAMING_REPLICATION || \ +#define NATIVE_REPLICATION (pool_config->backend_clustering_mode == CM_STREAMING_REPLICATION || \ pool_config->backend_clustering_mode == CM_LOGICAL_REPLICATION || \ pool_config->backend_clustering_mode == CM_SLONY) #define STREAM (pool_config->backend_clustering_mode == CM_STREAMING_REPLICATION) #define LOGICAL (pool_config->backend_clustering_mode == CM_LOGICAL_REPLICATION) #define SLONY (pool_config->backend_clustering_mode == CM_SLONY) -#define DUAL_MODE (REPLICATION || MASTER_SLAVE) +#define DUAL_MODE (REPLICATION || NATIVE_REPLICATION) #define RAW_MODE (pool_config->backend_clustering_mode == CM_RAW) #define SL_MODE (STREAM || LOGICAL) /* streaming or logical replication mode */ @@ -441,7 +441,7 @@ typedef struct POOL_REQUEST_NODE request[MAX_REQUEST_QUEUE_SIZE]; int request_queue_head; int request_queue_tail; - int master_node_id; /* the youngest node id which is not in down + int main_node_id; /* the youngest node id which is not in down * status */ int primary_node_id; /* the primary node id in streaming * replication mode */ @@ -623,7 +623,7 @@ extern int write_status_file(void); extern POOL_NODE_STATUS * verify_backend_node_status(POOL_CONNECTION_POOL_SLOT * *slots); extern POOL_NODE_STATUS * pool_get_node_status(void); extern void pool_set_backend_status_changed_time(int backend_id); -extern int get_next_master_node(void); +extern int get_next_main_node(void); diff --git a/src/include/pool_config.h b/src/include/pool_config.h index 156d9d935..5020aa4cd 100644 --- a/src/include/pool_config.h +++ b/src/include/pool_config.h @@ -43,11 +43,11 @@ #include "utils/regex_array.h" /* - * Regex support in white and black list function + * Regex support in write and readonly list function */ #include -#define BLACKLIST 0 -#define WHITELIST 1 +#define WRITELIST 0 +#define READONLYLIST 1 #define PATTERN_ARR_SIZE 16 /* Default length of regex array: 16 patterns */ typedef struct { @@ -57,12 +57,12 @@ typedef struct regex_t regexv; } RegPattern; -typedef enum MasterSlaveSubModes +typedef enum NativeReplicationSubModes { SLONY_MODE = 1, STREAM_MODE, LOGICAL_MODE -} MasterSlaveSubModes; +} NativeReplicationSubModes; typedef enum ClusteringModes { @@ -106,7 +106,7 @@ typedef enum DLBOW_OPTION typedef enum RELQTARGET_OPTION { - RELQTARGET_MASTER = 1, + RELQTARGET_PRIMARY = 1, RELQTARGET_LOAD_BALANCE_NODE } RELQTARGET_OPTION; @@ -123,7 +123,7 @@ typedef enum CHECK_TEMP_TABLE_OPTION * Flags for backendN_flag */ #define POOL_FAILOVER (1 << 0) /* allow or disallow failover */ -#define POOL_ALWAYS_MASTER (1 << 1) /* this backend is always master */ +#define POOL_ALWAYS_PRIMARY (1 << 1) /* this backend is always primary */ #define POOL_DISALLOW_TO_FAILOVER(x) ((unsigned short)(x) & POOL_FAILOVER) #define POOL_ALLOW_TO_FAILOVER(x) (!(POOL_DISALLOW_TO_FAILOVER(x))) @@ -252,7 +252,7 @@ typedef struct bool load_balance_mode; /* load balance mode */ bool replication_stop_on_mismatch; /* if there's a data mismatch - * between master and + * between primary and * secondary start * degeneration to stop * replication mode */ @@ -275,10 +275,10 @@ typedef struct * balancing is disabled. */ char **reset_query_list; /* comma separated list of queries to be * issued at the end of session */ - char **white_function_list; /* list of functions with no side + char **read_only_function_list; /* list of functions with no side * effects */ - char **black_function_list; /* list of functions with side effects */ - char **black_query_pattern_list; /* list of query patterns that + char **write_function_list; /* list of functions with side effects */ + char **primary_routing_query_pattern_list; /* list of query patterns that * should be sent to primary node */ char *log_line_prefix; /* printf-style string to output at * beginning of each log line */ @@ -297,8 +297,8 @@ typedef struct bool log_truncate_on_rotation; int log_file_mode; - bool master_slave_mode; /* operate in master/slave mode */ - MasterSlaveSubModes master_slave_sub_mode; /* either "slony" or "stream" */ + bool native_replication_mode; /* operate in native replication mode */ + NativeReplicationSubModes native_replication_sub_mode; /* either "slony" or "stream" */ int64 delay_threshold; /* If the standby server delays more than * delay_threshold, any query goes to the * primary only. The unit is in bytes. 0 @@ -328,7 +328,7 @@ typedef struct char *sr_check_database; /* PostgreSQL database name for streaming * replication check */ char *failover_command; /* execute command when failover happens */ - char *follow_master_command; /* execute command when failover is + char *follow_primary_command; /* execute command when failover is * ended */ char *failback_command; /* execute command when failback happens */ @@ -378,16 +378,16 @@ typedef struct /* followings till syslog, does not exist in the configuration file */ int num_reset_queries; /* number of queries in reset_query_list */ - int num_white_function_list; /* number of functions in - * white_function_list */ - int num_black_function_list; /* number of functions in - * black_function_list */ - int num_white_memqcache_table_list; /* number of functions in - * white_memqcache_table_list */ - int num_black_memqcache_table_list; /* number of functions in - * black_memqcache_table_list */ - int num_black_query_pattern_list; /* number of query patterns in - * black_query_pattern_list */ + int num_read_only_function_list; /* number of functions in + * read_only_function_list */ + int num_write_function_list; /* number of functions in + * write_function_list */ + int num_cache_safe_memqcache_table_list; /* number of functions in + * cache_safe_memqcache_table_list */ + int num_cache_unsafe_memqcache_table_list; /* number of functions in + * cache_unsafe_memqcache_table_list */ + int num_primary_routing_query_pattern_list; /* number of query patterns in + * primary_routing_query_pattern_list */ int num_wd_monitoring_interfaces_list; /* number of items in * wd_monitoring_interfaces_list */ /* ssl configuration */ @@ -415,13 +415,13 @@ typedef struct * followings are for regex support and do not exist in the configuration * file */ - RegPattern *lists_patterns; /* Precompiled regex patterns for black/white + RegPattern *lists_patterns; /* Precompiled regex patterns for write/readonly * lists */ int pattc; /* number of regexp pattern */ int current_pattern_size; /* size of the regex pattern array */ RegPattern *lists_query_patterns; /* Precompiled regex patterns for - * black query pattern lists */ + * primary routing query pattern lists */ int query_pattc; /* number of regexp pattern */ int current_query_pattern_size; /* size of the regex pattern array */ @@ -454,11 +454,11 @@ typedef struct * by default */ char *memqcache_oiddir; /* Temporary work directory to record * table oids */ - char **white_memqcache_table_list; /* list of tables to memqcache */ - char **black_memqcache_table_list; /* list of tables not to memqcache */ + char **cache_safe_memqcache_table_list; /* list of tables to memqcache */ + char **cache_unsafe_memqcache_table_list; /* list of tables not to memqcache */ RegPattern *lists_memqcache_table_patterns; /* Precompiled regex patterns - * for black/white lists */ + * for cache safe/unsafe lists */ int memqcache_table_pattc; /* number of regexp pattern */ int current_memqcache_table_pattern_size; /* size of the regex * pattern array */ @@ -543,7 +543,7 @@ typedef struct char *wd_escalation_command; /* Executes this command at escalation * on new active pgpool. */ char *wd_de_escalation_command; /* Executes this command when - * master pgpool goes down. */ + * leader pgpool goes down. */ int wd_priority; /* watchdog node priority, during leader * election */ int pgpool_node_id; /* pgpool (watchdog) node id */ diff --git a/src/include/pool_config_variables.h b/src/include/pool_config_variables.h index 9c4ec0028..23e9fbd6f 100644 --- a/src/include/pool_config_variables.h +++ b/src/include/pool_config_variables.h @@ -35,7 +35,7 @@ typedef enum LOAD_BALANCE_CONFIG, REPLICATION_CONFIG, STREAMING_REPLICATION_CONFIG, - MASTER_SLAVE_CONFIG, + NATIVE_REPLICATION_CONFIG, WATCHDOG_CONFIG, SSL_CONFIG, FAILOVER_CONFIG, diff --git a/src/include/protocol/pool_proto_modules.h b/src/include/protocol/pool_proto_modules.h index 0d84e89d3..8191e84c9 100644 --- a/src/include/protocol/pool_proto_modules.h +++ b/src/include/protocol/pool_proto_modules.h @@ -161,8 +161,8 @@ extern int check_copy_from_stdin(Node *node); /* returns non 0 if this is a extern void query_ps_status(char *query, POOL_CONNECTION_POOL * backend); /* show ps status */ extern POOL_STATUS start_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, Node *node); extern POOL_STATUS end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend); -extern int detect_deadlock_error(POOL_CONNECTION * master, int major); -extern int detect_serialization_error(POOL_CONNECTION * master, int major, bool unread); +extern int detect_deadlock_error(POOL_CONNECTION * backend, int major); +extern int detect_serialization_error(POOL_CONNECTION * backend, int major, bool unread); extern int detect_active_sql_transaction_error(POOL_CONNECTION * backend, int major); extern int detect_query_cancel_error(POOL_CONNECTION * backend, int major); extern int detect_idle_in_transaction_sesion_timeout_error(POOL_CONNECTION * backend, int major); diff --git a/src/include/query_cache/pool_memqcache.h b/src/include/query_cache/pool_memqcache.h index 8ebf132af..971e6317e 100644 --- a/src/include/query_cache/pool_memqcache.h +++ b/src/include/query_cache/pool_memqcache.h @@ -242,8 +242,8 @@ extern int pool_fetch_cache(POOL_CONNECTION_POOL * backend, const char *query, c extern int pool_catalog_commit_cache(POOL_CONNECTION_POOL * backend, char *query, char *data, size_t datalen); extern bool pool_is_likely_select(char *query); -extern bool pool_is_table_in_black_list(const char *table_name); -extern bool pool_is_table_in_white_list(const char *table_name); +extern bool pool_is_table_in_unsafe_list(const char *table_name); +extern bool pool_is_table_in_safe_list(const char *table_name); extern bool pool_is_allow_to_cache(Node *node, char *query); extern int pool_extract_table_oids(Node *node, int **oidsp); extern int pool_extract_withclause_oids(Node *with, int *oidsp); diff --git a/src/include/watchdog/watchdog.h b/src/include/watchdog/watchdog.h index 073920ee5..8c7247cfa 100644 --- a/src/include/watchdog/watchdog.h +++ b/src/include/watchdog/watchdog.h @@ -176,7 +176,7 @@ typedef struct WatchdogNode int standby_nodes_count; /* number of standby nodes joined the * cluster only applicable when this * WatchdogNode is the - * master/coordinator node */ + * leader/coordinator node */ int quorum_status; /* quorum status on the node */ bool escalated; /* true if the Watchdog node has performed * escalation */ diff --git a/src/include/watchdog/wd_internal_commands.h b/src/include/watchdog/wd_internal_commands.h index d8d701475..afe17df0c 100644 --- a/src/include/watchdog/wd_internal_commands.h +++ b/src/include/watchdog/wd_internal_commands.h @@ -40,7 +40,7 @@ extern WDFailoverCMDResults wd_promote_backend(int node_id, unsigned char flags) extern WdCommandResult wd_execute_cluster_command(char* clusterCommand, int nArgs, WDExecCommandArg *wdExecCommandArg); -extern WDPGBackendStatus * get_pg_backend_status_from_master_wd_node(void); +extern WDPGBackendStatus * get_pg_backend_status_from_leader_wd_node(void); extern WD_STATES wd_internal_get_watchdog_local_node_state(void); extern int wd_internal_get_watchdog_quorum_state(void); diff --git a/src/include/watchdog/wd_ipc_defines.h b/src/include/watchdog/wd_ipc_defines.h index b7ccb7865..e66c4551d 100644 --- a/src/include/watchdog/wd_ipc_defines.h +++ b/src/include/watchdog/wd_ipc_defines.h @@ -38,7 +38,7 @@ typedef enum WDFailoverCMDResults FAILOVER_RES_WILL_BE_DONE, FAILOVER_RES_NOT_ALLOWED, FAILOVER_RES_INVALID_FUNCTION, - FAILOVER_RES_MASTER_REJECTED, + FAILOVER_RES_LEADER_REJECTED, FAILOVER_RES_BUILDING_CONSENSUS, FAILOVER_RES_CONSENSUS_MAY_FAIL, FAILOVER_RES_TIMEOUT @@ -66,13 +66,13 @@ typedef enum WDValueDataType #define WD_IPC_FAILOVER_COMMAND 'f' #define WD_IPC_ONLINE_RECOVERY_COMMAND 'r' #define WD_FAILOVER_LOCKING_REQUEST 's' -#define WD_GET_MASTER_DATA_REQUEST 'd' +#define WD_GET_LEADER_DATA_REQUEST 'd' #define WD_GET_RUNTIME_VARIABLE_VALUE 'v' #define WD_FAILOVER_INDICATION 'i' #define WD_COMMAND_RESTART_CLUSTER "RESTART_CLUSTER" -#define WD_COMMAND_REELECT_MASTER "REELECT_MASTER" +#define WD_COMMAND_REELECT_LEADER "REELECT_LEADER" #define WD_COMMAND_SHUTDOWN_CLUSTER "SHUTDOWN_CLUSTER" #define WD_COMMAND_RELOAD_CONFIG_CLUSTER "RELOAD_CONFIG_CLUSTER" diff --git a/src/include/watchdog/wd_json_data.h b/src/include/watchdog/wd_json_data.h index 6bd460632..4a45c4e06 100644 --- a/src/include/watchdog/wd_json_data.h +++ b/src/include/watchdog/wd_json_data.h @@ -30,7 +30,7 @@ /* * The structure to hold the parsed PG backend node status data fetched - * from the master watchdog node + * from the leader watchdog node */ typedef struct WDPGBackendStatus { diff --git a/src/libs/pcp/pcp.c b/src/libs/pcp/pcp.c index 865dd0299..44e16baa5 100644 --- a/src/libs/pcp/pcp.c +++ b/src/libs/pcp/pcp.c @@ -1424,7 +1424,7 @@ pcp_recovery_node(PCPConnInfo * pcpConn, int nid) } /* -------------------------------- - * pcp_promote_node - promote a node given by the argument as new pgpool's master + * pcp_promote_node - promote a node given by the argument as new pgpool's main node * * return 0 on success, -1 otherwise * -------------------------------- @@ -1437,7 +1437,7 @@ pcp_promote_node(PCPConnInfo * pcpConn, int nid) /* -------------------------------- - * and promote a node given by the argument as new pgpool's master + * and promote a node given by the argument as new pgpool's main node * * return 0 on success, -1 otherwise * -------------------------------- @@ -1563,21 +1563,21 @@ process_watchdog_info_response(PCPConnInfo * pcpConn, char *buf, int len) } wd_cluster_info->escalated = tempVal == 0 ? false : true; - ptr = json_get_string_value_for_key(root, "MasterNodeName"); + ptr = json_get_string_value_for_key(root, "LeaderNodeName"); if (ptr == NULL) { json_value_free(root); goto INVALID_RESPONSE; } - strncpy(wd_cluster_info->masterNodeName, ptr, sizeof(wd_cluster_info->masterNodeName) - 1); + strncpy(wd_cluster_info->leaderNodeName, ptr, sizeof(wd_cluster_info->leaderNodeName) - 1); - ptr = json_get_string_value_for_key(root, "MasterHostName"); + ptr = json_get_string_value_for_key(root, "LeaderHostName"); if (ptr == NULL) { json_value_free(root); goto INVALID_RESPONSE; } - strncpy(wd_cluster_info->masterHostName, ptr, sizeof(wd_cluster_info->masterHostName) - 1); + strncpy(wd_cluster_info->leaderHostName, ptr, sizeof(wd_cluster_info->leaderHostName) - 1); /* Get watchdog nodes data */ for (i = 0; i < nodeCount; i++) diff --git a/src/main/pgpool_main.c b/src/main/pgpool_main.c index 1e085aeff..21f1ad574 100644 --- a/src/main/pgpool_main.c +++ b/src/main/pgpool_main.c @@ -125,7 +125,7 @@ static void wakeup_children(void); static void reload_config(void); static int pool_pause(struct timeval *timeout); static void kill_all_children(int sig); -static pid_t fork_follow_child(int old_master, int new_primary, int old_primary); +static pid_t fork_follow_child(int old_main_node, int new_primary, int old_primary); static int read_status_file(bool discard_status); static RETSIGTYPE exit_handler(int sig); static RETSIGTYPE reap_handler(int sig); @@ -136,7 +136,7 @@ static RETSIGTYPE wakeup_handler(int sig); static void initialize_shared_mem_objects(bool clear_memcache_oidmaps); static int trigger_failover_command(int node, const char *command_line, - int old_master, int new_master, int old_primary); + int old_main_node, int new_main_node, int old_primary); static int find_primary_node(void); static int find_primary_node_repeatedly(void); static void terminate_all_childrens(int sig); @@ -206,7 +206,7 @@ static pid_t wd_lifecheck_pid = 0; /* pid for child process handling watchdog * lifecheck */ BACKEND_STATUS *my_backend_status[MAX_NUM_BACKENDS]; /* Backend status buffer */ -int my_master_node_id; /* Master node id buffer */ +int my_main_node_id; /* Main node id buffer */ /* * Dummy varibale to suppress compiler warnings by discarding return values @@ -1151,11 +1151,11 @@ static RETSIGTYPE exit_handler(int sig) } /* - * Calculate next valid master node id. + * Calculate next valid main node id. * If no valid node found, returns -1. */ int -get_next_master_node(void) +get_next_main_node(void) { int i; @@ -1163,7 +1163,7 @@ get_next_master_node(void) { /* * Do not use VALID_BACKEND macro in raw mode. VALID_BACKEND return - * true only if the argument is master node id. In other words, + * true only if the argument is main node id. In other words, * standby nodes are false. So need to check backend status with * VALID_BACKEND_RAW. */ @@ -1249,8 +1249,8 @@ sigusr1_interupt_processor(void) if (wd_internal_get_watchdog_local_node_state() == WD_STANDBY) { ereport(LOG, - (errmsg("master watchdog has performed failover"), - errdetail("syncing the backend states from the MASTER watchdog node"))); + (errmsg("leader watchdog has performed failover"), + errdetail("syncing the backend states from the LEADER watchdog node"))); sync_backend_from_watchdog(); } } @@ -1265,7 +1265,7 @@ sigusr1_interupt_processor(void) { ereport(LOG, (errmsg("we have joined the watchdog cluster as STANDBY node"), - errdetail("syncing the backend states from the MASTER watchdog node"))); + errdetail("syncing the backend states from the LEADER watchdog node"))); sync_backend_from_watchdog(); } } @@ -1318,7 +1318,7 @@ failover(void) j, k; int node_id; - int new_master; + int new_main_node; int new_primary = -1; int nodes[MAX_NUM_BACKENDS]; bool need_to_restart_children = true; @@ -1414,13 +1414,13 @@ failover(void) wd_failover_start(); /* - * if not in replication mode/master slave mode, we treat this a + * if not in replication mode/native replication mode, we treat this a * restart request. otherwise we need to check if we have already * failovered. */ ereport(DEBUG1, (errmsg("failover handler"), - errdetail("starting to select new master node"))); + errdetail("starting to select new main node"))); node_id = node_id_set[0]; /* failback request? */ @@ -1464,11 +1464,11 @@ failover(void) search_primary = false; /* - * recalculate the master node id after setting the backend + * recalculate the main node id after setting the backend * status of quarantined node, this will bring us to the old - * master_node_id that was beofre the quarantine state + * main_node_id that was beofre the quarantine state */ - Req_info->master_node_id = get_next_master_node(); + Req_info->main_node_id = get_next_main_node(); if (Req_info->primary_node_id == -1 && BACKEND_INFO(node_id).role == ROLE_PRIMARY) { @@ -1515,7 +1515,7 @@ failover(void) (void) write_status_file(); trigger_failover_command(node_id, pool_config->failback_command, - MASTER_NODE_ID, get_next_master_node(), PRIMARY_NODE_ID); + MAIN_NODE_ID, get_next_main_node(), PRIMARY_NODE_ID); } sync_required = true; @@ -1596,9 +1596,9 @@ failover(void) } } - new_master = get_next_master_node(); + new_main_node = get_next_main_node(); - if (new_master < 0) + if (new_main_node < 0) { ereport(LOG, (errmsg("failover: no valid backend node found"))); @@ -1732,7 +1732,7 @@ failover(void) if (nodes[i]) { trigger_failover_command(i, pool_config->failover_command, - MASTER_NODE_ID, new_master, REAL_PRIMARY_NODE_ID); + MAIN_NODE_ID, new_main_node, REAL_PRIMARY_NODE_ID); sync_required = true; } } @@ -1796,7 +1796,7 @@ failover(void) } /* - * If follow_master_command is provided and in master/slave streaming + * If follow_primary_command is provided and in streaming * replication mode, we start degenerating all backends as they are * not replicated anymore. */ @@ -1804,7 +1804,7 @@ failover(void) if (STREAM) { - if (*pool_config->follow_master_command != '\0' || + if (*pool_config->follow_primary_command != '\0' || reqkind == PROMOTE_NODE_REQUEST) { /* only if the failover is against the current primary */ @@ -1842,8 +1842,8 @@ failover(void) } else { - /* update new master node */ - new_master = get_next_master_node(); + /* update new primary node */ + new_main_node = get_next_main_node(); ereport(LOG, (errmsg("failover: %d follow backends have been degenerated", follow_cnt))); } @@ -1851,9 +1851,9 @@ failover(void) } } - if ((follow_cnt > 0) && (*pool_config->follow_master_command != '\0')) + if ((follow_cnt > 0) && (*pool_config->follow_primary_command != '\0')) { - follow_pid = fork_follow_child(Req_info->master_node_id, new_primary, + follow_pid = fork_follow_child(Req_info->primary_node_id, new_primary, Req_info->primary_node_id); } @@ -1874,12 +1874,12 @@ failover(void) ereport(LOG, (errmsg("failover: set new primary node: %d", Req_info->primary_node_id))); - if (new_master >= 0) + if (new_main_node >= 0) { - Req_info->master_node_id = new_master; + Req_info->main_node_id = new_main_node; sync_required = true; ereport(LOG, - (errmsg("failover: set new master node: %d", Req_info->master_node_id))); + (errmsg("failover: set new main node: %d", Req_info->main_node_id))); } @@ -2651,14 +2651,14 @@ pool_sleep(unsigned int second) */ static int trigger_failover_command(int node, const char *command_line, - int old_master, int new_master, int old_primary) + int old_main_node, int new_main_node, int old_primary) { int r = 0; String *exec_cmd; char port_buf[6]; char buf[2]; BackendInfo *info; - BackendInfo *newmaster; + BackendInfo *newmain; BackendInfo *oldprimary; if (command_line == NULL || (strlen(command_line) == 0)) @@ -2703,43 +2703,43 @@ trigger_failover_command(int node, const char *command_line, string_append_char(exec_cmd, info->backend_hostname); break; - case 'H': /* new master host name */ - newmaster = pool_get_node_info(new_master); - if (newmaster) - string_append_char(exec_cmd, newmaster->backend_hostname); + case 'H': /* new main host name */ + newmain = pool_get_node_info(new_main_node); + if (newmain) + string_append_char(exec_cmd, newmain->backend_hostname); else - /* no valid new master */ + /* no valid new main */ string_append_char(exec_cmd, "\"\""); break; - case 'm': /* new master node id */ - snprintf(port_buf, sizeof(port_buf), "%d", new_master); + case 'm': /* new main node id */ + snprintf(port_buf, sizeof(port_buf), "%d", new_main_node); string_append_char(exec_cmd, port_buf); break; - case 'r': /* new master port */ - newmaster = pool_get_node_info(get_next_master_node()); - if (newmaster) + case 'r': /* new main node port */ + newmain = pool_get_node_info(get_next_main_node()); + if (newmain) { - snprintf(port_buf, sizeof(port_buf), "%d", newmaster->backend_port); + snprintf(port_buf, sizeof(port_buf), "%d", newmain->backend_port); string_append_char(exec_cmd, port_buf); } else - /* no valid new master */ + /* no valid new main node */ string_append_char(exec_cmd, "\"\""); break; - case 'R': /* new master database directory */ - newmaster = pool_get_node_info(get_next_master_node()); - if (newmaster) - string_append_char(exec_cmd, newmaster->backend_data_directory); + case 'R': /* new main database directory */ + newmain = pool_get_node_info(get_next_main_node()); + if (newmain) + string_append_char(exec_cmd, newmain->backend_data_directory); else - /* no valid new master */ + /* no valid new main */ string_append_char(exec_cmd, "\"\""); break; - case 'M': /* old master node id */ - snprintf(port_buf, sizeof(port_buf), "%d", old_master); + case 'M': /* old main node id */ + snprintf(port_buf, sizeof(port_buf), "%d", old_main_node); string_append_char(exec_cmd, port_buf); break; @@ -3096,15 +3096,15 @@ find_primary_node(void) } /* - * First check for "ALWAYS_MASTER" flags exists. If so, do not perform + * First check for "ALWAYS_PRIMARY" flags exists. If so, do not perform * actual primary node check and just returns the node id. */ for (i = 0; i < NUM_BACKENDS; i++) { - if (POOL_ALWAYS_MASTER & BACKEND_INFO(i).flag) + if (POOL_ALWAYS_PRIMARY & BACKEND_INFO(i).flag) { ereport(DEBUG1, - (errmsg("find_primary_node: ALWAYS_MASTER flag found. Returns node id: %d", i))); + (errmsg("find_primary_node: ALWAYS_PRIMARY flag found. Returns node id: %d", i))); return i; } } @@ -3230,7 +3230,7 @@ find_primary_node_repeatedly(void) * fork a follow child */ pid_t -fork_follow_child(int old_master, int new_primary, int old_primary) +fork_follow_child(int old_main_node, int new_primary, int old_primary) { pid_t pid; int i; @@ -3249,8 +3249,8 @@ fork_follow_child(int old_master, int new_primary, int old_primary) bkinfo = pool_get_node_info(i); if (bkinfo->backend_status == CON_DOWN) - trigger_failover_command(i, pool_config->follow_master_command, - old_master, new_primary, old_primary); + trigger_failover_command(i, pool_config->follow_primary_command, + old_main_node, new_primary, old_primary); } exit(0); } @@ -3309,7 +3309,7 @@ initialize_shared_mem_objects(bool clear_memcache_oidmaps) /* * Initialize backend status area. From now on, VALID_BACKEND macro can be - * used. (get_next_master_node() uses VALID_BACKEND) + * used. (get_next_main_node() uses VALID_BACKEND) */ for (i = 0; i < MAX_NUM_BACKENDS; i++) @@ -3318,7 +3318,7 @@ initialize_shared_mem_objects(bool clear_memcache_oidmaps) } /* initialize Req_info */ - Req_info->master_node_id = get_next_master_node(); + Req_info->main_node_id = get_next_main_node(); Req_info->conn_counter = 0; Req_info->switching = false; Req_info->request_queue_head = Req_info->request_queue_tail = -1; @@ -3785,7 +3785,7 @@ update_backend_quarantine_status(void) /* * The function fetch the current status of all configured backend - * nodes from the MASTER/COORDINATOR watchdog Pgpool-II and synchronize the + * nodes from the LEADER/COORDINATOR watchdog Pgpool-II and synchronize the * local backend states with the cluster wide status of each node. * * Latter in the funcrtion after syncing the backend node status the function @@ -3809,14 +3809,14 @@ sync_backend_from_watchdog(void) /* * Ask the watchdog to get all the backend states from the - * Master/Coordinator Pgpool-II node + * Leader/Coordinator Pgpool-II node */ - WDPGBackendStatus *backendStatus = get_pg_backend_status_from_master_wd_node(); + WDPGBackendStatus *backendStatus = get_pg_backend_status_from_leader_wd_node(); if (!backendStatus) { ereport(WARNING, - (errmsg("failed to get the backend status from the master watchdog node"), + (errmsg("failed to get the backend status from the leader watchdog node"), errdetail("using the local backend node status"))); return; } @@ -3824,21 +3824,21 @@ sync_backend_from_watchdog(void) { /* * -ve node count is returned by watchdog when the node itself is a - * master and in that case we need to use the loacl backend node + * leader and in that case we need to use the loacl backend node * status */ ereport(LOG, - (errmsg("I am the master watchdog node"), + (errmsg("I am the leader watchdog node"), errdetail("using the local backend node status"))); pfree(backendStatus); return; } ereport(LOG, - (errmsg("master watchdog node \"%s\" returned status for %d backend nodes", backendStatus->nodeName, backendStatus->node_count))); + (errmsg("leader watchdog node \"%s\" returned status for %d backend nodes", backendStatus->nodeName, backendStatus->node_count))); ereport(DEBUG1, - (errmsg("primary node on master watchdog node \"%s\" is %d", backendStatus->nodeName, backendStatus->primary_node_id))); + (errmsg("primary node on leader watchdog node \"%s\" is %d", backendStatus->nodeName, backendStatus->primary_node_id))); /* @@ -3858,7 +3858,7 @@ sync_backend_from_watchdog(void) node_status_was_changed_to_down = true; ereport(LOG, (errmsg("backend:%d is set to down status", i), - errdetail("backend:%d is DOWN on cluster master \"%s\"", i, backendStatus->nodeName))); + errdetail("backend:%d is DOWN on cluster leader \"%s\"", i, backendStatus->nodeName))); down_node_ids[down_node_ids_index++] = i; } } @@ -3877,7 +3877,7 @@ sync_backend_from_watchdog(void) ereport(LOG, (errmsg("backend:%d is set to UP status", i), - errdetail("backend:%d is UP on cluster master \"%s\"", i, backendStatus->nodeName))); + errdetail("backend:%d is UP on cluster leader \"%s\"", i, backendStatus->nodeName))); } } @@ -3885,7 +3885,7 @@ sync_backend_from_watchdog(void) /* * Update primary node id info on the shared memory area if it's different - * from the one on master watchdog node. This should be done only in streaming + * from the one on leader watchdog node. This should be done only in streaming * or logical replication mode. */ if (SL_MODE && Req_info->primary_node_id != backendStatus->primary_node_id) @@ -3893,13 +3893,13 @@ sync_backend_from_watchdog(void) /* Do not produce this log message if we are starting up the Pgpool-II */ if (processState != INITIALIZING) ereport(LOG, - (errmsg("primary node:%d on master watchdog node \"%s\" is different from local primary node:%d", + (errmsg("primary node:%d on leader watchdog node \"%s\" is different from local primary node:%d", backendStatus->primary_node_id, backendStatus->nodeName, Req_info->primary_node_id))); /* - * master node returns primary_node_id = -1 when the primary node is - * in quarantine state on the master. So we will not update our + * leader node returns primary_node_id = -1 when the primary node is + * in quarantine state on the leader. So we will not update our * primary node id when the status of current primary node is not - * CON_DOWN while primary_node_id sent by master watchdong node is -1 + * CON_DOWN while primary_node_id sent by leader watchdong node is -1 * * Note that Req_info->primary_node_id could be -2, which is the * initial value. So we need to avoid crash by checking the value is @@ -3910,7 +3910,7 @@ sync_backend_from_watchdog(void) backendStatus->primary_node_id == -1 && BACKEND_INFO(Req_info->primary_node_id).backend_status != CON_DOWN) { ereport(LOG, - (errmsg("primary node:%d on master watchdog node \"%s\" seems to be quarantined", + (errmsg("primary node:%d on leader watchdog node \"%s\" seems to be quarantined", Req_info->primary_node_id, backendStatus->nodeName), errdetail("keeping the current primary"))); } @@ -3925,7 +3925,7 @@ sync_backend_from_watchdog(void) if (reload_maste_node_id) { - Req_info->master_node_id = get_next_master_node(); + Req_info->main_node_id = get_next_main_node(); } /* We don't need to do anything else if the Pgpool-II is starting up */ diff --git a/src/main/pool_internal_comms.c b/src/main/pool_internal_comms.c index 89a532940..e8b33105f 100644 --- a/src/main/pool_internal_comms.c +++ b/src/main/pool_internal_comms.c @@ -209,7 +209,7 @@ degenerate_backend_set_ex(int *node_id_set, int count, unsigned char flags, bool } else if (res == FAILOVER_RES_WILL_BE_DONE) { - /* we will receive a sync request from master watchdog node */ + /* we will receive a sync request from leader watchdog node */ ereport(LOG, (errmsg("degenerate backend request for %d node(s) from pid [%d], will be handled by watchdog" ,node_count, getpid()))); diff --git a/src/pcp_con/pcp_worker.c b/src/pcp_con/pcp_worker.c index 24edf19f8..b315afa61 100644 --- a/src/pcp_con/pcp_worker.c +++ b/src/pcp_con/pcp_worker.c @@ -872,10 +872,10 @@ inform_node_info(PCP_CONNECTION * frontend, char *buf) } else { - if (Req_info->master_node_id == node_id) - role = ROLE_MASTER; + if (Req_info->main_node_id == node_id) + role = ROLE_MAIN; else - role = ROLE_SLAVE; + role = ROLE_REPLICA; } snprintf(role_str, sizeof(role_str), "%d", role); diff --git a/src/pcp_con/recovery.c b/src/pcp_con/recovery.c index faee71a2d..34f746f8c 100644 --- a/src/pcp_con/recovery.c +++ b/src/pcp_con/recovery.c @@ -45,7 +45,7 @@ #define SECOND_STAGE 1 static void exec_checkpoint(PGconn *conn); -static void exec_recovery(PGconn *conn, BackendInfo * master_backend, BackendInfo * recovery_backend, char stage, int recovery_node); +static void exec_recovery(PGconn *conn, BackendInfo * main_backend, BackendInfo * recovery_backend, char stage, int recovery_node); static void exec_remote_start(PGconn *conn, BackendInfo * backend); static PGconn *connect_backend_libpq(BackendInfo * backend); static void check_postmaster_started(BackendInfo * backend); @@ -57,7 +57,7 @@ extern volatile sig_atomic_t pcp_worker_wakeup_request; /* * Start online recovery. * "recovery_node" is the node to be recovered. - * Master or primary node is chosen in this function. + * Main or primary node is chosen in this function. */ void start_recovery(int recovery_node) @@ -85,8 +85,8 @@ start_recovery(int recovery_node) ereport(ERROR, (errmsg("node recovery failed, node id: %d is alive", recovery_node))); - /* select master/primary node */ - node_id = MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID; + /* select main/primary node */ + node_id = NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID; backend = &pool_config->backend_desc->backend_info[node_id]; /* get node info to be recovered */ @@ -95,7 +95,7 @@ start_recovery(int recovery_node) conn = connect_backend_libpq(backend); if (conn == NULL) ereport(ERROR, - (errmsg("node recovery failed, unable to connect to master node: %d ", node_id))); + (errmsg("node recovery failed, unable to connect to main node: %d ", node_id))); PG_TRY(); { @@ -234,7 +234,7 @@ exec_checkpoint(PGconn *conn) * Call pgpool_recovery() function. */ static void -exec_recovery(PGconn *conn, BackendInfo * master_backend, BackendInfo * recovery_backend, char stage, int recovery_node) +exec_recovery(PGconn *conn, BackendInfo * main_backend, BackendInfo * recovery_backend, char stage, int recovery_node) { PGresult *result; char *hostname; @@ -263,7 +263,7 @@ exec_recovery(PGconn *conn, BackendInfo * master_backend, BackendInfo * recovery script, hostname, recovery_backend->backend_data_directory, - master_backend->backend_port, + main_backend->backend_port, recovery_node, recovery_backend->backend_port ); diff --git a/src/pgpool.spec b/src/pgpool.spec index 9daf31fa3..211d0a07c 100644 --- a/src/pgpool.spec +++ b/src/pgpool.spec @@ -269,7 +269,7 @@ fi %{_sysconfdir}/%{short_name}/pool_hba.conf.sample %defattr(755,postgres,postgres,-) %{_sysconfdir}/%{short_name}/failover.sh.sample -%{_sysconfdir}/%{short_name}/follow_master.sh.sample +%{_sysconfdir}/%{short_name}/follow_primary.sh.sample %{_sysconfdir}/%{short_name}/pgpool_remote_start.sample %{_sysconfdir}/%{short_name}/recovery_1st_stage.sample %{_sysconfdir}/%{short_name}/recovery_2nd_stage.sample diff --git a/src/protocol/CommandComplete.c b/src/protocol/CommandComplete.c index 41ec0ab65..f4339a280 100644 --- a/src/protocol/CommandComplete.c +++ b/src/protocol/CommandComplete.c @@ -121,11 +121,11 @@ CommandComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, bool } /* - * Otherwise just read from master node. + * Otherwise just read from main node. */ else { - con = MASTER(backend); + con = MAIN(backend); if (pool_read(con, &len, sizeof(len)) < 0) return POOL_END; @@ -196,7 +196,7 @@ CommandComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, bool } query = session_context->query_context->query_w_hex; node = pool_get_parse_tree(); - state = TSTATE(backend, MASTER_NODE_ID); + state = TSTATE(backend, MAIN_NODE_ID); pool_handle_query_cache(backend, query, node, state); } } @@ -350,7 +350,7 @@ handle_query_context(POOL_CONNECTION_POOL * backend) /* Is this a temporary table? */ if (stmt->relation->relpersistence == 't') { - if (TSTATE(backend, MASTER_NODE_ID ) == 'T') /* Are we inside a transaction? */ + if (TSTATE(backend, MAIN_NODE_ID ) == 'T') /* Are we inside a transaction? */ { state = TEMP_TABLE_CREATING; } @@ -375,7 +375,7 @@ handle_query_context(POOL_CONNECTION_POOL * backend) ListCell *cell; ListCell *next; - if (TSTATE(backend, MASTER_NODE_ID ) == 'T') /* Are we inside a transaction? */ + if (TSTATE(backend, MAIN_NODE_ID ) == 'T') /* Are we inside a transaction? */ { state = TEMP_TABLE_DROPPING; } @@ -440,14 +440,14 @@ static POOL_STATUS handle_mismatch_tuples(POOL_CONNECTION * frontend, POOL_CONNE rows = extract_ntuples(packet); /* - * Save number of affected tuples of master node. + * Save number of affected tuples of main node. */ - session_context->ntuples[MASTER_NODE_ID] = rows; + session_context->ntuples[MAIN_NODE_ID] = rows; for (i = 0; i < NUM_BACKENDS; i++) { - if (!IS_MASTER_NODE_ID(i)) + if (!IS_MAIN_NODE_ID(i)) { if (!VALID_BACKEND(i)) { @@ -468,7 +468,7 @@ static POOL_STATUS handle_mismatch_tuples(POOL_CONNECTION * frontend, POOL_CONNE { ereport(DEBUG1, (errmsg("processing command complete"), - errdetail("length does not match between backends master(%d) %d th backend(%d)", + errdetail("length does not match between backends main(%d) %d th backend(%d)", len, i, packetlen))); } @@ -500,7 +500,7 @@ static POOL_STATUS handle_mismatch_tuples(POOL_CONNECTION * frontend, POOL_CONNE string_append_char(msg, "\""); pool_send_error_message(frontend, MAJOR(backend), "XX001", msg->data, "", - "check data consistency between master and other db node", __FILE__, __LINE__); + "check data consistency between main and other db node", __FILE__, __LINE__); ereport(LOG, (errmsg("%s", msg->data))); free_string(msg); diff --git a/src/protocol/child.c b/src/protocol/child.c index 4b2bd43e7..7b167cb4f 100644 --- a/src/protocol/child.c +++ b/src/protocol/child.c @@ -400,7 +400,7 @@ do_child(int *fds) /* * show ps status */ - sp = MASTER_CONNECTION(backend)->sp; + sp = MAIN_CONNECTION(backend)->sp; snprintf(psbuf, sizeof(psbuf), "%s %s %s idle", sp->user, sp->database, remote_ps_data); set_ps_display(psbuf, false); @@ -492,7 +492,7 @@ backend_cleanup(POOL_CONNECTION * volatile *frontend, POOL_CONNECTION_POOL * vol if (backend == NULL) return false; - sp = MASTER_CONNECTION(backend)->sp; + sp = MAIN_CONNECTION(backend)->sp; /* * cach connection if connection cache configuration parameter is enabled @@ -809,8 +809,8 @@ connect_using_existing_connection(POOL_CONNECTION * frontend, if (VALID_BACKEND(i)) if (do_command(frontend, CONNECTION(backend, i), command_buf, MAJOR(backend), - MASTER_CONNECTION(backend)->pid, - MASTER_CONNECTION(backend)->key, 0) != POOL_CONTINUE) + MAIN_CONNECTION(backend)->pid, + MAIN_CONNECTION(backend)->key, 0) != POOL_CONTINUE) { ereport(ERROR, (errmsg("unable to process command for backend connection"), @@ -818,7 +818,7 @@ connect_using_existing_connection(POOL_CONNECTION * frontend, } } - pool_add_param(&MASTER(backend)->params, "application_name", sp->application_name); + pool_add_param(&MAIN(backend)->params, "application_name", sp->application_name); set_application_name_with_string(sp->application_name); } @@ -835,7 +835,7 @@ connect_using_existing_connection(POOL_CONNECTION * frontend, len = htonl(5); pool_write(frontend, &len, sizeof(len)); - tstate = TSTATE(backend, MASTER_NODE_ID); + tstate = TSTATE(backend, MAIN_NODE_ID); pool_write(frontend, &tstate, 1); } @@ -1144,19 +1144,19 @@ static RETSIGTYPE close_idle_connection(int sig) for (j = 0; j < pool_config->max_pool; j++, p++) { - if (!MASTER_CONNECTION(p)) + if (!MAIN_CONNECTION(p)) continue; - if (!MASTER_CONNECTION(p)->sp) + if (!MAIN_CONNECTION(p)->sp) continue; - if (MASTER_CONNECTION(p)->sp->user == NULL) + if (MAIN_CONNECTION(p)->sp->user == NULL) continue; - if (MASTER_CONNECTION(p)->closetime > 0) /* idle connection? */ + if (MAIN_CONNECTION(p)->closetime > 0) /* idle connection? */ { #ifdef NOT_USED ereport(DEBUG1, (errmsg("closing idle connection"), - errdetail("user: %s database: %s", MASTER_CONNECTION(p)->sp->user, MASTER_CONNECTION(p)->sp->database))); + errdetail("user: %s database: %s", MAIN_CONNECTION(p)->sp->user, MAIN_CONNECTION(p)->sp->database))); #endif pool_send_frontend_exits(p); @@ -1231,7 +1231,7 @@ send_params(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend) sendlen; index = 0; - while (pool_get_param(&MASTER(backend)->params, index++, &name, &value) == 0) + while (pool_get_param(&MAIN(backend)->params, index++, &name, &value) == 0) { pool_write(frontend, "S", 1); len = sizeof(sendlen) + strlen(name) + 1 + strlen(value) + 1; @@ -1392,7 +1392,7 @@ check_stop_request(void) } /* - * Initialize my backend status and master node id. + * Initialize my backend status and main node id. * We copy the backend status to private area so that * they are not changed while I am alive. */ @@ -1411,7 +1411,7 @@ pool_initialize_private_backend_status(void) my_backend_status[i] = &private_backend_status[i]; } - my_master_node_id = REAL_MASTER_NODE_ID; + my_main_node_id = REAL_MAIN_NODE_ID; } static void @@ -1968,7 +1968,7 @@ retry_startup: * however we should make sure that the startup packet contents are * identical. OPTION data and others might be different. */ - if (sp->len != MASTER_CONNECTION(backend)->sp->len) + if (sp->len != MAIN_CONNECTION(backend)->sp->len) { ereport(DEBUG1, (errmsg("selecting backend connection"), @@ -1976,7 +1976,7 @@ retry_startup: found = 0; } - else if (memcmp(sp->startup_packet, MASTER_CONNECTION(backend)->sp->startup_packet, sp->len) != 0) + else if (memcmp(sp->startup_packet, MAIN_CONNECTION(backend)->sp->startup_packet, sp->len) != 0) { ereport(DEBUG1, (errmsg("selecting backend connection"), diff --git a/src/protocol/pool_connection_pool.c b/src/protocol/pool_connection_pool.c index cb5026713..f00b5638c 100644 --- a/src/protocol/pool_connection_pool.c +++ b/src/protocol/pool_connection_pool.c @@ -115,18 +115,18 @@ pool_get_cp(char *user, char *database, int protoMajor, int check_socket) for (i = 0; i < pool_config->max_pool; i++) { - if (MASTER_CONNECTION(connection_pool) && - MASTER_CONNECTION(connection_pool)->sp && - MASTER_CONNECTION(connection_pool)->sp->major == protoMajor && - MASTER_CONNECTION(connection_pool)->sp->user != NULL && - strcmp(MASTER_CONNECTION(connection_pool)->sp->user, user) == 0 && - strcmp(MASTER_CONNECTION(connection_pool)->sp->database, database) == 0) + if (MAIN_CONNECTION(connection_pool) && + MAIN_CONNECTION(connection_pool)->sp && + MAIN_CONNECTION(connection_pool)->sp->major == protoMajor && + MAIN_CONNECTION(connection_pool)->sp->user != NULL && + strcmp(MAIN_CONNECTION(connection_pool)->sp->user, user) == 0 && + strcmp(MAIN_CONNECTION(connection_pool)->sp->database, database) == 0) { int sock_broken = 0; int j; /* mark this connection is under use */ - MASTER_CONNECTION(connection_pool)->closetime = 0; + MAIN_CONNECTION(connection_pool)->closetime = 0; for (j = 0; j < NUM_BACKENDS; j++) { connection_pool->info[j].counter++; @@ -259,7 +259,7 @@ pool_create_cp(void) for (i = 0; i < pool_config->max_pool; i++) { - if (MASTER_CONNECTION(p) == NULL) + if (MAIN_CONNECTION(p) == NULL) { ret = new_connection(p); if (ret) @@ -277,7 +277,7 @@ pool_create_cp(void) * discard it. */ oldestp = p = pool_connection_pool; - closetime = MASTER_CONNECTION(p)->closetime; + closetime = MAIN_CONNECTION(p)->closetime; pool_index = 0; for (i = 0; i < pool_config->max_pool; i++) @@ -285,13 +285,13 @@ pool_create_cp(void) ereport(DEBUG1, (errmsg("creating connection pool"), errdetail("user: %s database: %s closetime: %ld", - MASTER_CONNECTION(p)->sp->user, - MASTER_CONNECTION(p)->sp->database, - MASTER_CONNECTION(p)->closetime))); + MAIN_CONNECTION(p)->sp->user, + MAIN_CONNECTION(p)->sp->database, + MAIN_CONNECTION(p)->closetime))); - if (MASTER_CONNECTION(p)->closetime < closetime) + if (MAIN_CONNECTION(p)->closetime < closetime) { - closetime = MASTER_CONNECTION(p)->closetime; + closetime = MAIN_CONNECTION(p)->closetime; oldestp = p; pool_index = i; } @@ -305,8 +305,8 @@ pool_create_cp(void) (errmsg("creating connection pool"), errdetail("discarding old %zd th connection. user: %s database: %s", oldestp - pool_connection_pool, - MASTER_CONNECTION(p)->sp->user, - MASTER_CONNECTION(p)->sp->database))); + MAIN_CONNECTION(p)->sp->user, + MAIN_CONNECTION(p)->sp->database))); for (i = 0; i < NUM_BACKENDS; i++) { @@ -360,14 +360,14 @@ pool_connection_pool_timer(POOL_CONNECTION_POOL * backend) /* look for any other timeout */ for (i = 0; i < pool_config->max_pool; i++, p++) { - if (!MASTER_CONNECTION(p)) + if (!MAIN_CONNECTION(p)) continue; - if (!MASTER_CONNECTION(p)->sp) + if (!MAIN_CONNECTION(p)->sp) continue; - if (MASTER_CONNECTION(p)->sp->user == NULL) + if (MAIN_CONNECTION(p)->sp->user == NULL) continue; - if (p != backend && MASTER_CONNECTION(p)->closetime) + if (p != backend && MAIN_CONNECTION(p)->closetime) return; } @@ -409,30 +409,30 @@ pool_backend_timer(void) for (i = 0; i < pool_config->max_pool; i++, p++) { - if (!MASTER_CONNECTION(p)) + if (!MAIN_CONNECTION(p)) continue; - if (!MASTER_CONNECTION(p)->sp) + if (!MAIN_CONNECTION(p)->sp) continue; - if (MASTER_CONNECTION(p)->sp->user == NULL) + if (MAIN_CONNECTION(p)->sp->user == NULL) continue; /* timer expire? */ - if (MASTER_CONNECTION(p)->closetime) + if (MAIN_CONNECTION(p)->closetime) { int freed = 0; ereport(DEBUG1, (errmsg("backend timer handler called"), errdetail("expire time: %ld", - MASTER_CONNECTION(p)->closetime + pool_config->connection_life_time))); + MAIN_CONNECTION(p)->closetime + pool_config->connection_life_time))); - if (now >= (MASTER_CONNECTION(p)->closetime + pool_config->connection_life_time)) + if (now >= (MAIN_CONNECTION(p)->closetime + pool_config->connection_life_time)) { /* discard expired connection */ ereport(DEBUG1, (errmsg("backend timer handler called"), errdetail("expired user: \"%s\" database: \"%s\"", - MASTER_CONNECTION(p)->sp->user, MASTER_CONNECTION(p)->sp->database))); + MAIN_CONNECTION(p)->sp->user, MAIN_CONNECTION(p)->sp->database))); pool_send_frontend_exits(p); for (j = 0; j < NUM_BACKENDS; j++) @@ -457,8 +457,8 @@ pool_backend_timer(void) else { /* look for nearest timer */ - if (MASTER_CONNECTION(p)->closetime < nearest) - nearest = MASTER_CONNECTION(p)->closetime; + if (MAIN_CONNECTION(p)->closetime < nearest) + nearest = MAIN_CONNECTION(p)->closetime; } } } @@ -931,16 +931,16 @@ static POOL_CONNECTION_POOL * new_connection(POOL_CONNECTION_POOL * p) /* set down status to local status area */ *(my_backend_status[i]) = CON_DOWN; - /* if master_node_id is not updated, then update it */ - if (Req_info->master_node_id == i) + /* if main_node_id is not updated, then update it */ + if (Req_info->main_node_id == i) { - int old_master = Req_info->master_node_id; + int old_main = Req_info->main_node_id; - Req_info->master_node_id = get_next_master_node(); + Req_info->main_node_id = get_next_main_node(); ereport(LOG, - (errmsg("master node %d is down. Update master node to %d", - old_master, Req_info->master_node_id))); + (errmsg("main node %d is down. Update main node to %d", + old_main, Req_info->main_node_id))); } /* @@ -1048,11 +1048,11 @@ close_all_backend_connections(void) for (i = 0; i < pool_config->max_pool; i++, p++) { - if (!MASTER_CONNECTION(p)) + if (!MAIN_CONNECTION(p)) continue; - if (!MASTER_CONNECTION(p)->sp) + if (!MAIN_CONNECTION(p)->sp) continue; - if (MASTER_CONNECTION(p)->sp->user == NULL) + if (MAIN_CONNECTION(p)->sp->user == NULL) continue; pool_send_frontend_exits(p); } diff --git a/src/protocol/pool_pg_utils.c b/src/protocol/pool_pg_utils.c index 50bcd4111..9a4114131 100644 --- a/src/protocol/pool_pg_utils.c +++ b/src/protocol/pool_pg_utils.c @@ -319,7 +319,7 @@ select_load_balancing_node(void) */ if (SL_MODE && pool_config->redirect_dbnames) { - char *database = MASTER_CONNECTION(ses->backend)->sp->database; + char *database = MAIN_CONNECTION(ses->backend)->sp->database; /* * Check to see if the database matches any of @@ -346,7 +346,7 @@ select_load_balancing_node(void) */ if (SL_MODE && pool_config->redirect_app_names) { - char *app_name = MASTER_CONNECTION(ses->backend)->sp->application_name; + char *app_name = MAIN_CONNECTION(ses->backend)->sp->application_name; /* * Check only if application name is set. Old applications may not @@ -417,7 +417,7 @@ select_load_balancing_node(void) } /* Choose a backend in random manner with weight */ - selected_slot = MASTER_NODE_ID; + selected_slot = MAIN_NODE_ID; total_weight = 0.0; for (i = 0; i < NUM_BACKENDS; i++) @@ -625,14 +625,14 @@ Pgversion(POOL_CONNECTION_POOL * backend) * standby: any of standby node * numeric: physical node id * - * If specified node does exist, returns MASTER_NODE_ID. If "standby" is + * If specified node does exist, returns MAIN_NODE_ID. If "standby" is * specified, returns -1. Caller should choose one of standby nodes * appropriately. */ static int choose_db_node_id(char *str) { - int node_id = MASTER_NODE_ID; + int node_id = MAIN_NODE_ID; if (!strcmp("primary", str) && PRIMARY_NODE_ID >= 0) { diff --git a/src/protocol/pool_process_query.c b/src/protocol/pool_process_query.c index ca6c172f4..43bb4585f 100644 --- a/src/protocol/pool_process_query.c +++ b/src/protocol/pool_process_query.c @@ -83,15 +83,15 @@ static int reset_backend(POOL_CONNECTION_POOL * backend, int qcnt); static char *get_insert_command_table_name(InsertStmt *node); static bool is_cache_empty(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend); static bool is_panic_or_fatal_error(char *message, int major); -static int extract_message(POOL_CONNECTION * master, char *error_code, int major, char class, bool unread); -static int detect_postmaster_down_error(POOL_CONNECTION * master, int major); +static int extract_message(POOL_CONNECTION * backend, char *error_code, int major, char class, bool unread); +static int detect_postmaster_down_error(POOL_CONNECTION * backend, int major); static bool is_internal_transaction_needed(Node *node); static bool pool_has_insert_lock(void); static POOL_STATUS add_lock_target(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *table); static bool has_lock_target(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *table, bool for_update); static POOL_STATUS insert_oid_into_insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *table); static POOL_STATUS read_packets_and_process(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int reset_request, int *state, short *num_fields, bool *cont); -static bool is_all_slaves_command_complete(unsigned char *kind_list, int num_backends, int master); +static bool is_all_standbys_command_complete(unsigned char *kind_list, int num_backends, int main_node); static bool pool_process_notice_message_from_one_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int backend_idx, char kind); /* @@ -297,11 +297,11 @@ pool_process_query(POOL_CONNECTION * frontend, else { /* - * If we have pending data in master, we need to process + * If we have pending data in main, we need to process * it */ - if (pool_ssl_pending(MASTER(backend)) || - !pool_read_buffer_is_empty(MASTER(backend))) + if (pool_ssl_pending(MAIN(backend)) || + !pool_read_buffer_is_empty(MAIN(backend))) { status = ProcessBackendResponse(frontend, backend, &state, &num_fields); if (status != POOL_CONTINUE) @@ -318,10 +318,10 @@ pool_process_query(POOL_CONNECTION * frontend, !pool_read_buffer_is_empty(CONNECTION(backend, i))) { /* - * If we have pending data in master, we need + * If we have pending data in main, we need * to process it */ - if (IS_MASTER_NODE_ID(i)) + if (IS_MAIN_NODE_ID(i)) { status = ProcessBackendResponse(frontend, backend, &state, &num_fields); if (status != POOL_CONTINUE) @@ -335,7 +335,7 @@ pool_process_query(POOL_CONNECTION * frontend, char *string; /* - * If master does not have pending data, + * If main does not have pending data, * we discard one packet from other * backend */ @@ -344,21 +344,21 @@ pool_process_query(POOL_CONNECTION * frontend, if (kind == 'A') { - if (MASTER_SLAVE) + if (NATIVE_REPLICATION) { int sendlen; /* - * In master slave mode we may + * In native replication mode we may * send the query to the standby * node and the NOTIFY comes back * only from primary node. But * since we have sent the query to * the standby, so the current - * MASTER_NODE_ID will be pointing + * MAIN_NODE_ID will be pointing * to the standby node. And we * will get stuck if we keep - * waiting for the current master + * waiting for the current main * node (standby) in this case to * send us the NOTIFY message. see * "0000116: LISTEN Notifications @@ -387,21 +387,21 @@ pool_process_query(POOL_CONNECTION * frontend, * sent to all backends. However * the order of arrival of * 'Notification response' is not - * necessarily the master first - * and then slaves. So if it - * arrives slave first, we should - * try to read from master, rather + * necessarily the main first + * and then standbys. So if it + * arrives standby first, we should + * try to read from main, rather * than just discard it. */ pool_unread(CONNECTION(backend, i), &kind, sizeof(kind)); ereport(LOG, (errmsg("pool process query"), - errdetail("received %c packet from backend %d. Don't dicard and read %c packet from master", kind, i, kind))); + errdetail("received %c packet from backend %d. Don't dicard and read %c packet from main", kind, i, kind))); - pool_read_with_error(CONNECTION(backend, MASTER_NODE_ID), &kind, sizeof(kind), + pool_read_with_error(CONNECTION(backend, MAIN_NODE_ID), &kind, sizeof(kind), "reading message kind from backend"); - pool_unread(CONNECTION(backend, MASTER_NODE_ID), &kind, sizeof(kind)); + pool_unread(CONNECTION(backend, MAIN_NODE_ID), &kind, sizeof(kind)); } } else if (SL_MODE) @@ -704,13 +704,13 @@ SimpleForwardToFrontend(char kind, POOL_CONNECTION * frontend, int sendlen; int i; - pool_read(MASTER(backend), &len, sizeof(len)); + pool_read(MAIN(backend), &len, sizeof(len)); len = ntohl(len); len -= 4; len1 = len; - p = pool_read2(MASTER(backend), len); + p = pool_read2(MAIN(backend), len); if (p == NULL) ereport(ERROR, (errmsg("unable to forward message to frontend"), @@ -719,11 +719,11 @@ SimpleForwardToFrontend(char kind, POOL_CONNECTION * frontend, memcpy(p1, p, len); /* - * If we received a notification message in master/slave mode, other + * If we received a notification message in native replication mode, other * backends will not receive the message. So we should skip other nodes * otherwise we will hang in pool_read. */ - if (!MASTER_SLAVE || kind != 'A') + if (!NATIVE_REPLICATION || kind != 'A') { for (i = 0; i < NUM_BACKENDS; i++) { @@ -732,7 +732,7 @@ SimpleForwardToFrontend(char kind, POOL_CONNECTION * frontend, continue; #endif - if (VALID_BACKEND(i) && !IS_MASTER_NODE_ID(i)) + if (VALID_BACKEND(i) && !IS_MAIN_NODE_ID(i)) { #ifdef NOT_USED if (use_sync_map == POOL_SYNC_MAP_IS_VALID && !pool_is_set_sync_map(i)) @@ -755,7 +755,7 @@ SimpleForwardToFrontend(char kind, POOL_CONNECTION * frontend, if (len != len1) { ereport(DEBUG1, - (errmsg("SimpleForwardToFrontend: length does not match between backends master(%d) %d th backend(%d) kind:(%c)", + (errmsg("SimpleForwardToFrontend: length does not match between backends main(%d) %d th backend(%d) kind:(%c)", len, i, len1, kind))); } } @@ -904,7 +904,7 @@ ParameterStatus(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend) (errmsg("unable to process parameter status"), errdetail("read on backend failed"))); - len = len_array[MASTER_NODE_ID]; + len = len_array[MAIN_NODE_ID]; sendlen = htonl(len); pool_write(frontend, &sendlen, sizeof(sendlen)); @@ -927,7 +927,7 @@ ParameterStatus(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend) (errmsg("process parameter status"), errdetail("backend:%d name:\"%s\" value:\"%s\"", i, name, value))); - if (IS_MASTER_NODE_ID(i)) + if (IS_MAIN_NODE_ID(i)) { len1 = len; int pos; @@ -942,7 +942,7 @@ ParameterStatus(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend) } #ifdef DEBUG - pool_param_debug_print(&MASTER(backend)->params); + pool_param_debug_print(&MAIN(backend)->params); #endif } } @@ -1613,7 +1613,7 @@ retry_read_packet: * from backend and discard it until we get Error response. * * We need to sync transaction status in transaction block. - * SELECT query is sent to master only. + * SELECT query is sent to main node only. * If SELECT is error, we must abort transaction on other nodes. */ void @@ -1667,11 +1667,11 @@ do_error_command(POOL_CONNECTION * backend, int major) /* * Send invalid portal execution to specified DB node to abort current - * transaction. Pgpool-II sends a SELECT query to master node only in - * load balance mode. Problem is, if the query failed, master node + * transaction. Pgpool-II sends a SELECT query to main node only in + * load balance mode. Problem is, if the query failed, main node * goes to abort status while other nodes remain normal status. To * sync transaction status in each node, we send error query to other - * than master node to ket them go into abort status. + * than main node to ket them go into abort status. */ void do_error_execute_command(POOL_CONNECTION_POOL * backend, int node_id, int major) @@ -2671,30 +2671,30 @@ insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *qu } } - per_node_statement_log(backend, MASTER_NODE_ID, qbuf); + per_node_statement_log(backend, MAIN_NODE_ID, qbuf); if (lock_kind == 1) { if (pool_get_session_context(true) && pool_is_doing_extended_query_message()) { - do_query(MASTER(backend), qbuf, &result, MAJOR(backend)); + do_query(MAIN(backend), qbuf, &result, MAJOR(backend)); } else { - status = do_command(frontend, MASTER(backend), qbuf, MAJOR(backend), MASTER_CONNECTION(backend)->pid, - MASTER_CONNECTION(backend)->key, 0); + status = do_command(frontend, MAIN(backend), qbuf, MAJOR(backend), MAIN_CONNECTION(backend)->pid, + MAIN_CONNECTION(backend)->key, 0); } } else if (lock_kind == 2) { - do_query(MASTER(backend), qbuf, &result, MAJOR(backend)); + do_query(MAIN(backend), qbuf, &result, MAJOR(backend)); } else { POOL_SELECT_RESULT *result; /* issue row lock command */ - do_query(MASTER(backend), qbuf, &result, MAJOR(backend)); + do_query(MAIN(backend), qbuf, &result, MAJOR(backend)); if (status == POOL_CONTINUE) { /* does oid exist in insert_lock table? */ @@ -2707,10 +2707,10 @@ insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *qu status = add_lock_target(frontend, backend, table); if (status == POOL_CONTINUE) { - per_node_statement_log(backend, MASTER_NODE_ID, qbuf); + per_node_statement_log(backend, MAIN_NODE_ID, qbuf); /* issue row lock command */ - do_query(MASTER(backend), qbuf, &result, MAJOR(backend)); + do_query(MAIN(backend), qbuf, &result, MAJOR(backend)); if (!(result && result->data[0] && !strcmp(result->data[0], "1"))) ereport(FATAL, @@ -2729,18 +2729,18 @@ insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *qu /* try to lock table finally, if row lock failed */ lock_kind = 1; snprintf(qbuf, sizeof(qbuf), "LOCK TABLE %s IN SHARE ROW EXCLUSIVE MODE", table); - per_node_statement_log(backend, MASTER_NODE_ID, qbuf); + per_node_statement_log(backend, MAIN_NODE_ID, qbuf); if (pool_get_session_context(true) && pool_is_doing_extended_query_message()) { - do_query(MASTER(backend), qbuf, &result, MAJOR(backend)); + do_query(MAIN(backend), qbuf, &result, MAJOR(backend)); if (result) free_select_result(result); } else { - status = do_command(frontend, MASTER(backend), qbuf, MAJOR(backend), MASTER_CONNECTION(backend)->pid, - MASTER_CONNECTION(backend)->key, 0); + status = do_command(frontend, MAIN(backend), qbuf, MAJOR(backend), MAIN_CONNECTION(backend)->pid, + MAIN_CONNECTION(backend)->key, 0); } } } @@ -2754,11 +2754,11 @@ insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *qu for (i = 0; i < NUM_BACKENDS; i++) { - if (VALID_BACKEND(i) && !IS_MASTER_NODE_ID(i)) + if (VALID_BACKEND(i) && !IS_MAIN_NODE_ID(i)) { if (deadlock_detected) status = do_command(frontend, CONNECTION(backend, i), POOL_ERROR_QUERY, PROTO_MAJOR_V3, - MASTER_CONNECTION(backend)->pid, MASTER_CONNECTION(backend)->key, 0); + MAIN_CONNECTION(backend)->pid, MAIN_CONNECTION(backend)->key, 0); else { if (lock_kind == 1) @@ -2766,14 +2766,14 @@ insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *qu per_node_statement_log(backend, i, qbuf); if (pool_get_session_context(true) && pool_is_doing_extended_query_message()) { - do_query(MASTER(backend), qbuf, &result, MAJOR(backend)); + do_query(MAIN(backend), qbuf, &result, MAJOR(backend)); if (result) free_select_result(result); } else { status = do_command(frontend, CONNECTION(backend, i), qbuf, PROTO_MAJOR_V3, - MASTER_CONNECTION(backend)->pid, MASTER_CONNECTION(backend)->key, 0); + MAIN_CONNECTION(backend)->pid, MAIN_CONNECTION(backend)->key, 0); } } else if (lock_kind == 2) @@ -2845,10 +2845,10 @@ static POOL_STATUS add_lock_target(POOL_CONNECTION * frontend, POOL_CONNECTION_P ereport(LOG, (errmsg("add lock target: not lock the row where reloid is 0"))); - per_node_statement_log(backend, MASTER_NODE_ID, "LOCK TABLE pgpool_catalog.insert_lock IN SHARE ROW EXCLUSIVE MODE"); + per_node_statement_log(backend, MAIN_NODE_ID, "LOCK TABLE pgpool_catalog.insert_lock IN SHARE ROW EXCLUSIVE MODE"); - if (do_command(frontend, MASTER(backend), "LOCK TABLE pgpool_catalog.insert_lock IN SHARE ROW EXCLUSIVE MODE", - PROTO_MAJOR_V3, MASTER_CONNECTION(backend)->pid, MASTER_CONNECTION(backend)->key, 0) != POOL_CONTINUE) + if (do_command(frontend, MAIN(backend), "LOCK TABLE pgpool_catalog.insert_lock IN SHARE ROW EXCLUSIVE MODE", + PROTO_MAJOR_V3, MAIN_CONNECTION(backend)->pid, MAIN_CONNECTION(backend)->key, 0) != POOL_CONTINUE) ereport(ERROR, (errmsg("unable to add lock target"), errdetail("do_command returned DEADLOCK status"))); @@ -2918,8 +2918,8 @@ has_lock_target(POOL_CONNECTION * frontend, snprintf(qbuf, sizeof(qbuf), "SELECT 1 FROM pgpool_catalog.insert_lock WHERE reloid = 0%s", suffix); } - per_node_statement_log(backend, MASTER_NODE_ID, qbuf); - do_query(MASTER(backend), qbuf, &result, MAJOR(backend)); + per_node_statement_log(backend, MAIN_NODE_ID, qbuf); + do_query(MAIN(backend), qbuf, &result, MAJOR(backend)); if (result && result->data[0] && !strcmp(result->data[0], "1")) { free_select_result(result); @@ -2956,9 +2956,9 @@ static POOL_STATUS insert_oid_into_insert_lock(POOL_CONNECTION * frontend, snprintf(qbuf, sizeof(qbuf), "INSERT INTO pgpool_catalog.insert_lock VALUES (0)"); } - per_node_statement_log(backend, MASTER_NODE_ID, qbuf); - status = do_command(frontend, MASTER(backend), qbuf, PROTO_MAJOR_V3, - MASTER_CONNECTION(backend)->pid, MASTER_CONNECTION(backend)->key, 0); + per_node_statement_log(backend, MAIN_NODE_ID, qbuf); + status = do_command(frontend, MAIN(backend), qbuf, PROTO_MAJOR_V3, + MAIN_CONNECTION(backend)->pid, MAIN_CONNECTION(backend)->key, 0); return status; } @@ -3156,17 +3156,17 @@ read_kind_from_one_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * ba } /* - * returns true if all slaves status are 'C' (Command Complete) + * returns true if all standbys status are 'C' (Command Complete) */ static bool -is_all_slaves_command_complete(unsigned char *kind_list, int num_backends, int master) +is_all_standbys_command_complete(unsigned char *kind_list, int num_backends, int main_node) { int i; int ok = true; for (i = 0; i < num_backends; i++) { - if (i == master || kind_list[i] == 0) + if (i == main_node || kind_list[i] == 0) continue; if (kind_list[i] != 'C') { @@ -3277,16 +3277,16 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen } } - if (MASTER_SLAVE) + if (NATIVE_REPLICATION) { ereport(DEBUG5, (errmsg("reading backend data packet kind"), - errdetail("master node id: %d", MASTER_NODE_ID))); + errdetail("main node id: %d", MAIN_NODE_ID))); - read_kind_from_one_backend(frontend, backend, (char *) &kind, MASTER_NODE_ID); + read_kind_from_one_backend(frontend, backend, (char *) &kind, MAIN_NODE_ID); /* - * If we received a notification message in master/slave mode, other + * If we received a notification message in native replication mode, other * backends will not receive the message. So we should skip other * nodes otherwise we will hang in pool_read. */ @@ -3296,13 +3296,13 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen ereport(DEBUG5, (errmsg("reading backend data packet kind"), - errdetail("received notification message for master node %d", - MASTER_NODE_ID))); + errdetail("received notification message for main node %d", + MAIN_NODE_ID))); if (msg) pool_pending_message_free_pending_message(msg); return; } - pool_unread(CONNECTION(backend, MASTER_NODE_ID), &kind, sizeof(kind)); + pool_unread(CONNECTION(backend, MAIN_NODE_ID), &kind, sizeof(kind)); } for (i = 0; i < NUM_BACKENDS; i++) @@ -3367,7 +3367,7 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen (errmsg("reading backend data packet kind"), errdetail("parameter name: %s value: \"%s\"", p, value))); - if (IS_MASTER_NODE_ID(i)) + if (IS_MAIN_NODE_ID(i)) { int pos; pool_add_param(&CONNECTION(backend, i)->params, p, value); @@ -3458,7 +3458,7 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen /* * If we are in streaming replication mode and kind = 'Z' (ready for - * query) on master and kind on standby is not 'Z', it is likely that + * query) on primary and kind on standby is not 'Z', it is likely that * following scenario happened. * * FE=>Parse("BEGIN") FE=>Bind FE=>Execute("BEGIN"); @@ -3505,11 +3505,11 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen * cases it is possible that similar issue could happen since returned * messages do not follow the sequence recorded in the pending * messages because the backend ignores requests till sync message is - * received. In this case we need to re-sync either master or standby. - * So we check not only the standby but master node. + * received. In this case we need to re-sync either primary or standby. + * So we check not only the standby but primary node. */ - if (session_context->load_balance_node_id != MASTER_NODE_ID && - (kind_list[MASTER_NODE_ID] == 'Z' || + if (session_context->load_balance_node_id != MAIN_NODE_ID && + (kind_list[MAIN_NODE_ID] == 'Z' || kind_list[session_context->load_balance_node_id] == 'Z') && SL_MODE) { @@ -3517,10 +3517,10 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen char *buf; int len; - if (kind_list[MASTER_NODE_ID] == 'Z') + if (kind_list[MAIN_NODE_ID] == 'Z') s = CONNECTION(backend, session_context->load_balance_node_id); else - s = CONNECTION(backend, MASTER_NODE_ID); + s = CONNECTION(backend, MAIN_NODE_ID); /* skip len and contents corresponding standby data */ pool_read(s, &len, sizeof(len)); @@ -3583,19 +3583,19 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen } /* - * In master slave mode, if master gets an error at commit, while - * other slaves are normal at commit, we don't need to degenerate any + * In native replication mode, if primary gets an error at commit, while + * other standbys are normal at commit, we don't need to degenerate any * backend because it is likely that the error was caused by a * deferred trigger. */ - else if (MASTER_SLAVE && query_context->parse_tree && + else if (NATIVE_REPLICATION && query_context->parse_tree && is_commit_query(query_context->parse_tree) && - kind_list[MASTER_NODE_ID] == 'E' && - is_all_slaves_command_complete(kind_list, NUM_BACKENDS, MASTER_NODE_ID)) + kind_list[MAIN_NODE_ID] == 'E' && + is_all_standbys_command_complete(kind_list, NUM_BACKENDS, MAIN_NODE_ID)) { - *decided_kind = kind_list[MASTER_NODE_ID]; + *decided_kind = kind_list[MAIN_NODE_ID]; ereport(LOG, - (errmsg("reading backend data packet kind. Error on master while all slaves are normal"), + (errmsg("reading backend data packet kind. Error on primary while all standbys are normal"), errdetail("do not degenerate because it is likely caused by a delayed commit"))); if (SL_MODE && pool_is_doing_extended_query_message() && msg) @@ -3604,8 +3604,8 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen } else if (max_count <= NUM_BACKENDS / 2.0) { - /* no one gets majority. We trust master node's kind */ - trust_kind = kind_list[MASTER_NODE_ID]; + /* no one gets majority. We trust main node's kind */ + trust_kind = kind_list[MAIN_NODE_ID]; } else /* max_count > NUM_BACKENDS / 2.0 */ { @@ -3622,7 +3622,7 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen { /* degenerate */ ereport(WARNING, - (errmsg("packet kind of backend %d ['%c'] does not match with master/majority nodes packet kind ['%c']", i, kind_list[i], trust_kind))); + (errmsg("packet kind of backend %d ['%c'] does not match with main/majority nodes packet kind ['%c']", i, kind_list[i], trust_kind))); degenerate_node[degenerate_node_num++] = i; } } @@ -3850,7 +3850,7 @@ query_ps_status(char *query, POOL_CONNECTION_POOL * backend) if (*query == '\0') return; - sp = MASTER_CONNECTION(backend)->sp; + sp = MAIN_CONNECTION(backend)->sp; if (sp) i = snprintf(psbuf, sizeof(psbuf) - 1, "%s %s %s ", sp->user, sp->database, remote_ps_data); @@ -4044,7 +4044,7 @@ start_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * ba per_node_statement_log(backend, i, "BEGIN"); if (do_command(frontend, CONNECTION(backend, i), "BEGIN", MAJOR(backend), - MASTER_CONNECTION(backend)->pid, MASTER_CONNECTION(backend)->key, 0) != POOL_CONTINUE) + MAIN_CONNECTION(backend)->pid, MAIN_CONNECTION(backend)->key, 0) != POOL_CONTINUE) ereport(ERROR, (errmsg("unable to start the internal transaction"), errdetail("do_command returned DEADLOCK status"))); @@ -4077,10 +4077,10 @@ end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * back PG_TRY(); { - /* We need to commit from secondary to master. */ + /* We need to commit from secondary to primary. */ for (i = 0; i < NUM_BACKENDS; i++) { - if (VALID_BACKEND(i) && !IS_MASTER_NODE_ID(i) && + if (VALID_BACKEND(i) && !IS_MAIN_NODE_ID(i) && TSTATE(backend, i) != 'I' && INTERNAL_TRANSACTION_STARTED(backend, i)) { @@ -4097,7 +4097,7 @@ end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * back PG_TRY(); { if (do_command(frontend, CONNECTION(backend, i), "COMMIT", MAJOR(backend), - MASTER_CONNECTION(backend)->pid, MASTER_CONNECTION(backend)->key, 1) != POOL_CONTINUE) + MAIN_CONNECTION(backend)->pid, MAIN_CONNECTION(backend)->key, 1) != POOL_CONTINUE) { ereport(ERROR, (errmsg("unable to COMMIT the transaction"), @@ -4115,24 +4115,24 @@ end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * back } } - /* Commit on master */ - if (TSTATE(backend, MASTER_NODE_ID) != 'I' && - INTERNAL_TRANSACTION_STARTED(backend, MASTER_NODE_ID)) + /* Commit on main */ + if (TSTATE(backend, MAIN_NODE_ID) != 'I' && + INTERNAL_TRANSACTION_STARTED(backend, MAIN_NODE_ID)) { if (MAJOR(backend) == PROTO_MAJOR_V3) { /* * Skip rest of Ready for Query packet */ - pool_read(CONNECTION(backend, MASTER_NODE_ID), &len, sizeof(len)); - pool_read(CONNECTION(backend, MASTER_NODE_ID), &tstate, sizeof(tstate)); + pool_read(CONNECTION(backend, MAIN_NODE_ID), &len, sizeof(len)); + pool_read(CONNECTION(backend, MAIN_NODE_ID), &tstate, sizeof(tstate)); } - per_node_statement_log(backend, MASTER_NODE_ID, "COMMIT"); + per_node_statement_log(backend, MAIN_NODE_ID, "COMMIT"); PG_TRY(); { - if (do_command(frontend, MASTER(backend), "COMMIT", MAJOR(backend), - MASTER_CONNECTION(backend)->pid, MASTER_CONNECTION(backend)->key, 1) != POOL_CONTINUE) + if (do_command(frontend, MAIN(backend), "COMMIT", MAJOR(backend), + MAIN_CONNECTION(backend)->pid, MAIN_CONNECTION(backend)->key, 1) != POOL_CONTINUE) { ereport(ERROR, (errmsg("unable to COMMIT the transaction"), @@ -4141,11 +4141,11 @@ end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * back } PG_CATCH(); { - INTERNAL_TRANSACTION_STARTED(backend, MASTER_NODE_ID) = false; + INTERNAL_TRANSACTION_STARTED(backend, MAIN_NODE_ID) = false; PG_RE_THROW(); } PG_END_TRY(); - INTERNAL_TRANSACTION_STARTED(backend, MASTER_NODE_ID) = false; + INTERNAL_TRANSACTION_STARTED(backend, MAIN_NODE_ID) = false; } } PG_CATCH(); @@ -4959,12 +4959,12 @@ SELECT_RETRY: } } - if (FD_ISSET(MASTER(backend)->fd, &exceptmask)) + if (FD_ISSET(MAIN(backend)->fd, &exceptmask)) ereport(FATAL, (errmsg("unable to read from backend socket"), errdetail("exception occured on backend socket"))); - else if (FD_ISSET(MASTER(backend)->fd, &readmask)) + else if (FD_ISSET(MAIN(backend)->fd, &readmask)) { status = ProcessBackendResponse(frontend, backend, state, num_fields); if (status != POOL_CONTINUE) @@ -4980,8 +4980,8 @@ void pool_dump_valid_backend(int backend_id) { ereport(LOG, - (errmsg("RAW_MODE:%d REAL_MASTER_NODE_ID:%d pool_is_node_to_be_sent_in_current_query:%d my_backend_status:%d", - RAW_MODE, REAL_MASTER_NODE_ID, pool_is_node_to_be_sent_in_current_query(backend_id), + (errmsg("RAW_MODE:%d REAL_MAIN_NODE_ID:%d pool_is_node_to_be_sent_in_current_query:%d my_backend_status:%d", + RAW_MODE, REAL_MAIN_NODE_ID, pool_is_node_to_be_sent_in_current_query(backend_id), *my_backend_status[backend_id]))); } @@ -5037,7 +5037,7 @@ pool_push_pending_data(POOL_CONNECTION * backend) /* * If we have not send the flush message to load balance node yet, send a * flush message to the load balance node. Otherwise only the non load - * balance node (usually the master node) produces response if we do not + * balance node (usually the primary node) produces response if we do not * send sync message to it yet. */ session_context = pool_get_session_context(false); diff --git a/src/protocol/pool_proto2.c b/src/protocol/pool_proto2.c index b7c85c34f..ffca996c3 100644 --- a/src/protocol/pool_proto2.c +++ b/src/protocol/pool_proto2.c @@ -58,11 +58,11 @@ AsciiRow(POOL_CONNECTION * frontend, return POOL_CONTINUE; /* NULL map */ - pool_read(MASTER(backend), nullmap, nbytes); + pool_read(MAIN(backend), nullmap, nbytes); memcpy(nullmap1, nullmap, nbytes); for (i = 0; i < NUM_BACKENDS; i++) { - if (VALID_BACKEND(i) && !IS_MASTER_NODE_ID(i)) + if (VALID_BACKEND(i) && !IS_MAIN_NODE_ID(i)) { pool_read(CONNECTION(backend, i), nullmap, nbytes); if (memcmp(nullmap, nullmap1, nbytes)) @@ -75,7 +75,7 @@ AsciiRow(POOL_CONNECTION * frontend, */ ereport(DEBUG1, (errmsg("processing ASCII row"), - errdetail("NULLMAP is different between master and backend no %d", i))); + errdetail("NULLMAP is different between main and backend no %d", i))); } } } @@ -94,7 +94,7 @@ AsciiRow(POOL_CONNECTION * frontend, if (mask & nullmap[i / 8]) { /* field size */ - if (pool_read(MASTER(backend), &size, sizeof(int)) < 0) + if (pool_read(MAIN(backend), &size, sizeof(int)) < 0) return POOL_END; size1 = ntohl(size) - 4; @@ -102,7 +102,7 @@ AsciiRow(POOL_CONNECTION * frontend, /* read and send actual data only when size > 0 */ if (size1 > 0) { - sendbuf = pool_read2(MASTER(backend), size1); + sendbuf = pool_read2(MAIN(backend), size1); if (sendbuf == NULL) return POOL_END; } @@ -119,7 +119,7 @@ AsciiRow(POOL_CONNECTION * frontend, for (j = 0; j < NUM_BACKENDS; j++) { - if (VALID_BACKEND(j) && !IS_MASTER_NODE_ID(j)) + if (VALID_BACKEND(j) && !IS_MAIN_NODE_ID(j)) { /* field size */ if (pool_read(CONNECTION(backend, j), &size, sizeof(int)) < 0) @@ -137,7 +137,7 @@ AsciiRow(POOL_CONNECTION * frontend, if (size != size1) ereport(DEBUG1, (errmsg("processing ASCII row"), - errdetail("size of field no %d does not match between master [size:%d] and backend no %d [size:%d]", + errdetail("size of field no %d does not match between main [size:%d] and backend no %d [size:%d]", i, ntohl(size), j, ntohl(size1)))); /* read and send actual data only when size > 0 */ @@ -183,13 +183,13 @@ BinaryRow(POOL_CONNECTION * frontend, return POOL_CONTINUE; /* NULL map */ - pool_read(MASTER(backend), nullmap, nbytes); + pool_read(MAIN(backend), nullmap, nbytes); if (pool_write(frontend, nullmap, nbytes) < 0) return POOL_END; memcpy(nullmap1, nullmap, nbytes); for (i = 0; i < NUM_BACKENDS; i++) { - if (VALID_BACKEND(i) && !IS_MASTER_NODE_ID(i)) + if (VALID_BACKEND(i) && !IS_MAIN_NODE_ID(i)) { pool_read(CONNECTION(backend, i), nullmap, nbytes); if (memcmp(nullmap, nullmap1, nbytes)) @@ -202,7 +202,7 @@ BinaryRow(POOL_CONNECTION * frontend, */ ereport(DEBUG1, (errmsg("processing binary row"), - errdetail("NULLMAP is different between master and backend no %d", i))); + errdetail("NULLMAP is different between main and backend no %d", i))); } } } @@ -218,11 +218,11 @@ BinaryRow(POOL_CONNECTION * frontend, if (mask & nullmap[i / 8]) { /* field size */ - if (pool_read(MASTER(backend), &size, sizeof(int)) < 0) + if (pool_read(MAIN(backend), &size, sizeof(int)) < 0) return POOL_END; for (j = 0; j < NUM_BACKENDS; j++) { - if (VALID_BACKEND(j) && !IS_MASTER_NODE_ID(j)) + if (VALID_BACKEND(j) && !IS_MAIN_NODE_ID(j)) { /* field size */ if (pool_read(CONNECTION(backend, i), &size, sizeof(int)) < 0) @@ -237,14 +237,14 @@ BinaryRow(POOL_CONNECTION * frontend, if (size != size1) ereport(DEBUG1, (errmsg("processing binary row"), - errdetail("size of field no %d does not match between master [size:%d] and backend no %d [size:%d]", + errdetail("size of field no %d does not match between main [size:%d] and backend no %d [size:%d]", i, ntohl(size), j, ntohl(size1)))); } buf = NULL; /* forward to frontend */ - if (IS_MASTER_NODE_ID(j)) + if (IS_MAIN_NODE_ID(j)) pool_write(frontend, &size, sizeof(int)); size = ntohl(size) - 4; @@ -255,7 +255,7 @@ BinaryRow(POOL_CONNECTION * frontend, if (buf == NULL) return POOL_END; - if (IS_MASTER_NODE_ID(j)) + if (IS_MAIN_NODE_ID(j)) { pool_write(frontend, buf, size); } @@ -283,20 +283,20 @@ CompletedResponse(POOL_CONNECTION * frontend, len1 = 0; /* read command tag */ - string = pool_read_string(MASTER(backend), &len, 0); + string = pool_read_string(MAIN(backend), &len, 0); if (string == NULL) return POOL_END; else if (!strncmp(string, "BEGIN", 5)) - TSTATE(backend, MASTER_NODE_ID) = 'T'; + TSTATE(backend, MAIN_NODE_ID) = 'T'; else if (!strncmp(string, "COMMIT", 6) || !strncmp(string, "ROLLBACK", 8)) - TSTATE(backend, MASTER_NODE_ID) = 'I'; + TSTATE(backend, MAIN_NODE_ID) = 'I'; len1 = len; string1 = pstrdup(string); for (i = 0; i < NUM_BACKENDS; i++) { - if (!VALID_BACKEND(i) || IS_MASTER_NODE_ID(i)) + if (!VALID_BACKEND(i) || IS_MAIN_NODE_ID(i)) continue; /* read command tag */ @@ -316,7 +316,7 @@ CompletedResponse(POOL_CONNECTION * frontend, { ereport(DEBUG1, (errmsg("processing completed response"), - errdetail("message length does not match between master(%d \"%s\",) and %d th server (%d \"%s\",)", + errdetail("message length does not match between main(%d \"%s\",) and %d th server (%d \"%s\",)", len, string, i, len1, string1))); /* we except INSERT, because INSERT response has OID */ @@ -349,7 +349,7 @@ CursorResponse(POOL_CONNECTION * frontend, int i; /* read cursor name */ - string = pool_read_string(MASTER(backend), &len, 0); + string = pool_read_string(MAIN(backend), &len, 0); if (string == NULL) return POOL_END; len1 = len; @@ -357,7 +357,7 @@ CursorResponse(POOL_CONNECTION * frontend, for (i = 0; i < NUM_BACKENDS; i++) { - if (VALID_BACKEND(i) && !IS_MASTER_NODE_ID(i)) + if (VALID_BACKEND(i) && !IS_MAIN_NODE_ID(i)) { /* read cursor name */ string = pool_read_string(CONNECTION(backend, i), &len, 0); @@ -371,8 +371,8 @@ CursorResponse(POOL_CONNECTION * frontend, ereport(FATAL, (return_code(2), errmsg("unable to process cursor response"), - errdetail("length does not match between master(%d) and %d th backend(%d)", len, i, len1), - errhint("master(%s) %d th backend(%s)", string1, i, string))); + errdetail("length does not match between main(%d) and %d th backend(%d)", len, i, len1), + errhint("main(%s) %d th backend(%s)", string1, i, string))); } } } @@ -487,7 +487,7 @@ FunctionResultResponse(POOL_CONNECTION * frontend, if (VALID_BACKEND(i)) { /* result value itself */ - if ((result = pool_read2(MASTER(backend), len)) == NULL) + if ((result = pool_read2(MAIN(backend), len)) == NULL) ereport(FATAL, (return_code(2), errmsg("unable to process function result response"), @@ -508,7 +508,7 @@ FunctionResultResponse(POOL_CONNECTION * frontend, if (VALID_BACKEND(i)) { /* unused ('0') */ - pool_read(MASTER(backend), &dummy, 1); + pool_read(MAIN(backend), &dummy, 1); } } pool_write(frontend, "0", 1); @@ -575,7 +575,7 @@ NotificationResponse(POOL_CONNECTION * frontend, errmsg("unable to process Notification response"), errdetail("reading from backend node %d failed", i))); - if (IS_MASTER_NODE_ID(i)) + if (IS_MAIN_NODE_ID(i)) { pid1 = pid; len1 = len; @@ -610,11 +610,11 @@ RowDescription(POOL_CONNECTION * frontend, len1; int i; - pool_read(MASTER(backend), &num_fields, sizeof(short)); + pool_read(MAIN(backend), &num_fields, sizeof(short)); num_fields1 = num_fields; for (i = 0; i < NUM_BACKENDS; i++) { - if (VALID_BACKEND(i) && !IS_MASTER_NODE_ID(i)) + if (VALID_BACKEND(i) && !IS_MAIN_NODE_ID(i)) { /* # of fields (could be 0) */ pool_read(CONNECTION(backend, i), &num_fields, sizeof(short)); @@ -623,7 +623,7 @@ RowDescription(POOL_CONNECTION * frontend, ereport(FATAL, (return_code(2), errmsg("unable to process row description"), - errdetail("num_fields does not match between backends master(%d) and %d th backend(%d)", + errdetail("num_fields does not match between backends main(%d) and %d th backend(%d)", num_fields, i, num_fields1))); } } @@ -638,7 +638,7 @@ RowDescription(POOL_CONNECTION * frontend, int j; /* field name */ - string = pool_read_string(MASTER(backend), &len, 0); + string = pool_read_string(MAIN(backend), &len, 0); if (string == NULL) return POOL_END; len1 = len; @@ -646,7 +646,7 @@ RowDescription(POOL_CONNECTION * frontend, for (j = 0; j < NUM_BACKENDS; j++) { - if (VALID_BACKEND(j) && !IS_MASTER_NODE_ID(j)) + if (VALID_BACKEND(j) && !IS_MAIN_NODE_ID(j)) { string = pool_read_string(CONNECTION(backend, j), &len, 0); if (string == NULL) @@ -660,21 +660,21 @@ RowDescription(POOL_CONNECTION * frontend, ereport(FATAL, (return_code(2), errmsg("unable to process row description"), - errdetail("field length does not match between backends master(%d) and %d th backend(%d)", + errdetail("field length does not match between backends main(%d) and %d th backend(%d)", ntohl(len), j, ntohl(len1)))); } } } /* type oid */ - pool_read(MASTER(backend), &oid, sizeof(int)); + pool_read(MAIN(backend), &oid, sizeof(int)); oid1 = oid; ereport(DEBUG1, (errmsg("processing ROW DESCRIPTION"), errdetail("type oid: %d", ntohl(oid)))); for (j = 0; j < NUM_BACKENDS; j++) { - if (VALID_BACKEND(j) && !IS_MASTER_NODE_ID(j)) + if (VALID_BACKEND(j) && !IS_MAIN_NODE_ID(j)) { pool_read(CONNECTION(backend, j), &oid, sizeof(int)); @@ -683,7 +683,7 @@ RowDescription(POOL_CONNECTION * frontend, { ereport(DEBUG1, (errmsg("processing ROW DESCRIPTION"), - errdetail("field oid does not match between backends master(%d) and %d th backend(%d)", + errdetail("field oid does not match between backends main(%d) and %d th backend(%d)", ntohl(oid), j, ntohl(oid1)))); } } @@ -691,18 +691,18 @@ RowDescription(POOL_CONNECTION * frontend, pool_write(frontend, &oid1, sizeof(int)); /* size */ - pool_read(MASTER(backend), &size, sizeof(short)); + pool_read(MAIN(backend), &size, sizeof(short)); size1 = size; for (j = 0; j < NUM_BACKENDS; j++) { - if (VALID_BACKEND(j) && !IS_MASTER_NODE_ID(j)) + if (VALID_BACKEND(j) && !IS_MAIN_NODE_ID(j)) { pool_read(CONNECTION(backend, j), &size, sizeof(short)); if (size1 != size) { ereport(FATAL, (errmsg("data among backends are different"), - errdetail("field size does not match between backends master(%d) and %d th backend(%d", ntohs(size), j, ntohs(size1)))); + errdetail("field size does not match between backends main(%d) and %d th backend(%d", ntohs(size), j, ntohs(size1)))); } } @@ -713,21 +713,21 @@ RowDescription(POOL_CONNECTION * frontend, pool_write(frontend, &size1, sizeof(short)); /* modifier */ - pool_read(MASTER(backend), &mod, sizeof(int)); + pool_read(MAIN(backend), &mod, sizeof(int)); ereport(DEBUG1, (errmsg("processing ROW DESCRIPTION"), errdetail("modifier: %d", ntohs(mod)))); mod1 = mod; for (j = 0; j < NUM_BACKENDS; j++) { - if (VALID_BACKEND(j) && !IS_MASTER_NODE_ID(j)) + if (VALID_BACKEND(j) && !IS_MAIN_NODE_ID(j)) { pool_read(CONNECTION(backend, j), &mod, sizeof(int)); if (mod != mod1) { ereport(DEBUG1, (errmsg("processing ROW DESCRIPTION"), - errdetail("modifier does not match between backends master(%d) and %d th backend(%d)", + errdetail("modifier does not match between backends main(%d) and %d th backend(%d)", ntohl(mod), j, ntohl(mod1)))); } } diff --git a/src/protocol/pool_proto_modules.c b/src/protocol/pool_proto_modules.c index bd3fb4c2c..80e53096a 100644 --- a/src/protocol/pool_proto_modules.c +++ b/src/protocol/pool_proto_modules.c @@ -91,7 +91,7 @@ static POOL_STATUS parse_before_bind(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, POOL_SENT_MESSAGE * message, POOL_SENT_MESSAGE * bind_message); -static int *find_victim_nodes(int *ntuples, int nmembers, int master_node, int *number_of_nodes); +static int *find_victim_nodes(int *ntuples, int nmembers, int main_node, int *number_of_nodes); static POOL_STATUS close_standby_transactions(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend); @@ -228,7 +228,7 @@ SimpleQuery(POOL_CONNECTION * frontend, */ if (pool_config->memory_cache_enabled && is_likely_select && !pool_is_writing_transaction() && - TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID) != 'E') + TSTATE(backend, NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID) != 'E') { bool foundp; @@ -278,7 +278,7 @@ SimpleQuery(POOL_CONNECTION * frontend, * execute, instead the original query will be sent to backends, * which may or may not cause an actual syntax errors. The command * will be sent to all backends in replication mode or - * master/primary in master/slave mode. + * primary in native replication mode. */ if (!strcmp(remote_host, "[local]")) { @@ -678,7 +678,7 @@ SimpleQuery(POOL_CONNECTION * frontend, /* * If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE - * in master/slave mode, we send BEGIN to slaves/standbys + * in native replication mode, we send BEGIN to standbys * instead. original_query which is BEGIN READ WRITE is sent * to primary. rewritten_query which is BEGIN is sent to * standbys. @@ -697,9 +697,9 @@ SimpleQuery(POOL_CONNECTION * frontend, /* * Optimization effort: If there's only one session, we do not - * need to wait for the master node's response, and could execute + * need to wait for the main node's response, and could execute * the query concurrently. In snapshot isolation mode we cannot - * do this optimization because we need to wait for master's + * do this optimization because we need to wait for main node's * response first. */ if (pool_config->num_init_children == 1 && @@ -711,11 +711,11 @@ SimpleQuery(POOL_CONNECTION * frontend, return status; } - /* Send the query to master node */ - pool_send_and_wait(query_context, 1, MASTER_NODE_ID); + /* Send the query to main node */ + pool_send_and_wait(query_context, 1, MAIN_NODE_ID); /* Check specific errors */ - specific_error = check_errors(backend, MASTER_NODE_ID); + specific_error = check_errors(backend, MAIN_NODE_ID); if (specific_error) { /* log error message */ @@ -733,7 +733,7 @@ SimpleQuery(POOL_CONNECTION * frontend, /* send query to other nodes */ query_context->rewritten_query = msg; query_context->rewritten_length = len; - pool_send_and_wait(query_context, -1, MASTER_NODE_ID); + pool_send_and_wait(query_context, -1, MAIN_NODE_ID); } else { @@ -747,19 +747,19 @@ SimpleQuery(POOL_CONNECTION * frontend, } /* - * Send the query to other than master node. + * Send the query to other than main node. */ - pool_send_and_wait(query_context, -1, MASTER_NODE_ID); + pool_send_and_wait(query_context, -1, MAIN_NODE_ID); } /* - * Send "COMMIT" or "ROLLBACK" to only master node if query is + * Send "COMMIT" or "ROLLBACK" to only main node if query is * "COMMIT" or "ROLLBACK" */ if (commit) { - pool_send_and_wait(query_context, 1, MASTER_NODE_ID); + pool_send_and_wait(query_context, 1, MAIN_NODE_ID); /* * If we are in the snapshot isolation mode, we need to declare @@ -776,7 +776,7 @@ SimpleQuery(POOL_CONNECTION * frontend, } else { - pool_send_and_wait(query_context, 1, MASTER_NODE_ID); + pool_send_and_wait(query_context, 1, MAIN_NODE_ID); } return POOL_CONTINUE; @@ -843,7 +843,7 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, ereport(DEBUG1, (errmsg("Execute: pool_is_writing_transaction: %d TSTATE: %c", pool_is_writing_transaction(), - TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID)))); + TSTATE(backend, NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID)))); /* log query to log file if necessary */ if (pool_config->log_statement) @@ -853,7 +853,7 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, * Fetch memory cache if possible */ if (pool_config->memory_cache_enabled && !pool_is_writing_transaction() && - (TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID) != 'E') + (TSTATE(backend, NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID) != 'E') && pool_is_likely_select(query)) { POOL_STATUS status; @@ -863,7 +863,7 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, #define STR_ALLOC_SIZE 1024 ereport(DEBUG1, (errmsg("Execute: pool_is_likely_select: true pool_is_writing_transaction: %d TSTATE: %c", pool_is_writing_transaction(), - TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID)))); + TSTATE(backend, NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID)))); len = strlen(query) + 1; search_query = MemoryContextStrdup(query_context->memory_context, query); @@ -985,11 +985,11 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, */ if (!commit) { - /* Send the query to master node */ - pool_extended_send_and_wait(query_context, "E", len, contents, 1, MASTER_NODE_ID, false); + /* Send the query to main node */ + pool_extended_send_and_wait(query_context, "E", len, contents, 1, MAIN_NODE_ID, false); /* Check specific errors */ - specific_error = check_errors(backend, MASTER_NODE_ID); + specific_error = check_errors(backend, MAIN_NODE_ID); if (specific_error) { /* log error message */ @@ -1005,7 +1005,7 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, memset(msg + len, 0, sizeof(int)); /* send query to other nodes */ - pool_extended_send_and_wait(query_context, "E", len, msg, -1, MASTER_NODE_ID, false); + pool_extended_send_and_wait(query_context, "E", len, msg, -1, MAIN_NODE_ID, false); } else { @@ -1018,16 +1018,16 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, si_commit_request(); } - pool_extended_send_and_wait(query_context, "E", len, contents, -1, MASTER_NODE_ID, false); + pool_extended_send_and_wait(query_context, "E", len, contents, -1, MAIN_NODE_ID, false); } /* - * send "COMMIT" or "ROLLBACK" to only master node if query is + * send "COMMIT" or "ROLLBACK" to only main node if query is * "COMMIT" or "ROLLBACK" */ if (commit) { - pool_extended_send_and_wait(query_context, "E", len, contents, 1, MASTER_NODE_ID, false); + pool_extended_send_and_wait(query_context, "E", len, contents, 1, MAIN_NODE_ID, false); /* * If we are in the snapshot isolation mode, we need to declare @@ -1047,8 +1047,8 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, if (!foundp) { - pool_extended_send_and_wait(query_context, "E", len, contents, 1, MASTER_NODE_ID, true); - pool_extended_send_and_wait(query_context, "E", len, contents, -1, MASTER_NODE_ID, true); + pool_extended_send_and_wait(query_context, "E", len, contents, 1, MAIN_NODE_ID, true); + pool_extended_send_and_wait(query_context, "E", len, contents, -1, MAIN_NODE_ID, true); } /* Add pending message */ @@ -1070,14 +1070,14 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, { ereport(DEBUG1, (errmsg("Execute: TSTATE:%c", - TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID)))); + TSTATE(backend, NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID)))); /* * If the query was not READ SELECT, and we are in an explicit * transaction, remember that we had a write query in this * transaction. */ - if (TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID) == 'T' || + if (TSTATE(backend, NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID) == 'T' || pool_config->disable_load_balance_on_write == DLBOW_ALWAYS) { /* @@ -1159,7 +1159,7 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, * execute, instead the original query will be sent to backends, * which may or may not cause an actual syntax errors. The command * will be sent to all backends in replication mode or - * master/primary in master/slave mode. + * primary in native replication mode. */ if (!strcmp(remote_host, "[local]")) { @@ -1315,14 +1315,14 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, } /* - * If the query is BEGIN READ WRITE in master/slave mode, we send - * BEGIN instead of it to slaves/standbys. original_query which is + * If the query is BEGIN READ WRITE in native replication mode, we send + * BEGIN instead of it to standbys. original_query which is * BEGIN READ WRITE is sent to primary. rewritten_query which is BEGIN * is sent to standbys. */ if (is_start_transaction_query(query_context->parse_tree) && is_read_write((TransactionStmt *) query_context->parse_tree) && - MASTER_SLAVE) + NATIVE_REPLICATION) { query_context->rewritten_query = pstrdup("BEGIN"); } @@ -1335,7 +1335,7 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, { char kind; - if (TSTATE(backend, MASTER_NODE_ID) != 'T') + if (TSTATE(backend, MAIN_NODE_ID) != 'T') { int i; @@ -1395,16 +1395,16 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, * We must synchronize because Parse message acquires table locks. */ ereport(DEBUG1, - (errmsg("Parse: waiting for master completing the query"))); - pool_extended_send_and_wait(query_context, "P", len, contents, 1, MASTER_NODE_ID, false); + (errmsg("Parse: waiting for main node completing the query"))); + pool_extended_send_and_wait(query_context, "P", len, contents, 1, MAIN_NODE_ID, false); /* * We must check deadlock error because a aborted transaction by * detecting deadlock isn't same on all nodes. If a transaction is - * aborted on master node, pgpool send a error query to another nodes. + * aborted on main node, pgpool send a error query to another nodes. */ - deadlock_detected = detect_deadlock_error(MASTER(backend), MAJOR(backend)); + deadlock_detected = detect_deadlock_error(MAIN(backend), MAJOR(backend)); /* * Check if other than deadlock error detected. If so, emit log. This @@ -1412,7 +1412,7 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, * PostgreSQL does not report what statement caused that error and * make users confused. */ - per_node_error_log(backend, MASTER_NODE_ID, stmt, "Parse: Error or notice message from backend: ", true); + per_node_error_log(backend, MAIN_NODE_ID, stmt, "Parse: Error or notice message from backend: ", true); if (deadlock_detected) { @@ -1425,9 +1425,9 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, pool_copy_prep_where(query_context->where_to_send, error_qc->where_to_send); ereport(LOG, - (errmsg("Parse: received deadlock error message from master node"))); + (errmsg("Parse: received deadlock error message from main node"))); - pool_send_and_wait(error_qc, -1, MASTER_NODE_ID); + pool_send_and_wait(error_qc, -1, MAIN_NODE_ID); pool_query_context_destroy(error_qc); @@ -1437,7 +1437,7 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, } else { - pool_extended_send_and_wait(query_context, "P", len, contents, -1, MASTER_NODE_ID, false); + pool_extended_send_and_wait(query_context, "P", len, contents, -1, MAIN_NODE_ID, false); } } else if (SL_MODE) @@ -1452,8 +1452,8 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, #ifdef NOT_USED pool_clear_sync_map(); #endif - pool_extended_send_and_wait(query_context, "P", len, contents, 1, MASTER_NODE_ID, true); - pool_extended_send_and_wait(query_context, "P", len, contents, -1, MASTER_NODE_ID, true); + pool_extended_send_and_wait(query_context, "P", len, contents, 1, MAIN_NODE_ID, true); + pool_extended_send_and_wait(query_context, "P", len, contents, -1, MAIN_NODE_ID, true); pool_add_sent_message(session_context->uncompleted_message); /* Add pending message */ @@ -1466,7 +1466,7 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, } else { - pool_extended_send_and_wait(query_context, "P", len, contents, 1, MASTER_NODE_ID, false); + pool_extended_send_and_wait(query_context, "P", len, contents, 1, MAIN_NODE_ID, false); } return POOL_CONTINUE; @@ -1549,7 +1549,7 @@ Bind(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, * primary node. */ if (pool_config->load_balance_mode && pool_is_writing_transaction() && - TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID) == 'T' && + TSTATE(backend, NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID) == 'T' && pool_config->disable_load_balance_on_write != DLBOW_OFF) { if (!SL_MODE) @@ -1563,7 +1563,7 @@ Bind(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, } if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE && - TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID) == 'T') + TSTATE(backend, NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID) == 'T') { pool_where_to_send(query_context, query_context->original_query, query_context->parse_tree); @@ -1601,7 +1601,7 @@ Bind(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, } ereport(DEBUG1, - (errmsg("Bind: waiting for master completing the query"))); + (errmsg("Bind: waiting for main node completing the query"))); pool_set_query_in_progress(); @@ -1613,8 +1613,8 @@ Bind(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, else nowait = false; - pool_extended_send_and_wait(query_context, "B", len, contents, 1, MASTER_NODE_ID, nowait); - pool_extended_send_and_wait(query_context, "B", len, contents, -1, MASTER_NODE_ID, nowait); + pool_extended_send_and_wait(query_context, "B", len, contents, 1, MAIN_NODE_ID, nowait); + pool_extended_send_and_wait(query_context, "B", len, contents, -1, MAIN_NODE_ID, nowait); if (SL_MODE) { @@ -1696,13 +1696,13 @@ Describe(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, * leads to "portal not found" etc. errors. */ ereport(DEBUG1, - (errmsg("Describe: waiting for master completing the query"))); + (errmsg("Describe: waiting for main node completing the query"))); nowait = (SL_MODE ? true : false); pool_set_query_in_progress(); - pool_extended_send_and_wait(query_context, "D", len, contents, 1, MASTER_NODE_ID, nowait); - pool_extended_send_and_wait(query_context, "D", len, contents, -1, MASTER_NODE_ID, nowait); + pool_extended_send_and_wait(query_context, "D", len, contents, 1, MAIN_NODE_ID, nowait); + pool_extended_send_and_wait(query_context, "D", len, contents, -1, MAIN_NODE_ID, nowait); if (SL_MODE) { @@ -1794,14 +1794,14 @@ Close(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, */ ereport(DEBUG1, - (errmsg("Close: waiting for master completing the query"))); + (errmsg("Close: waiting for main node completing the query"))); pool_set_query_in_progress(); if (!SL_MODE) { - pool_extended_send_and_wait(query_context, "C", len, contents, 1, MASTER_NODE_ID, false); - pool_extended_send_and_wait(query_context, "C", len, contents, -1, MASTER_NODE_ID, false); + pool_extended_send_and_wait(query_context, "C", len, contents, 1, MAIN_NODE_ID, false); + pool_extended_send_and_wait(query_context, "C", len, contents, -1, MAIN_NODE_ID, false); } else { @@ -1823,8 +1823,8 @@ Close(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, query_context->where_to_send[session_context->load_balance_node_id] = true; } - pool_extended_send_and_wait(query_context, "C", len, contents, 1, MASTER_NODE_ID, true); - pool_extended_send_and_wait(query_context, "C", len, contents, -1, MASTER_NODE_ID, true); + pool_extended_send_and_wait(query_context, "C", len, contents, 1, MAIN_NODE_ID, true); + pool_extended_send_and_wait(query_context, "C", len, contents, -1, MAIN_NODE_ID, true); /* Add pending message */ pmsg = pool_pending_message_create('C', len, contents); @@ -1936,7 +1936,7 @@ ReadyForQuery(POOL_CONNECTION * frontend, char msgbuf[128]; victim_nodes = find_victim_nodes(session_context->ntuples, NUM_BACKENDS, - MASTER_NODE_ID, &number_of_nodes); + MAIN_NODE_ID, &number_of_nodes); if (victim_nodes) { int i; @@ -2033,7 +2033,7 @@ ReadyForQuery(POOL_CONNECTION * frontend, /* if (pool_is_query_in_progress() && allow_close_transaction) */ if (allow_close_transaction) { - bool internal_transaction_started = INTERNAL_TRANSACTION_STARTED(backend, MASTER_NODE_ID); + bool internal_transaction_started = INTERNAL_TRANSACTION_STARTED(backend, MAIN_NODE_ID); if (end_internal_transaction(frontend, backend) != POOL_CONTINUE) return POOL_END; @@ -2060,7 +2060,7 @@ ReadyForQuery(POOL_CONNECTION * frontend, * Set transaction state for each node */ state = TSTATE(backend, - MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID); + NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID); for (i = 0; i < NUM_BACKENDS; i++) { @@ -2076,9 +2076,9 @@ ReadyForQuery(POOL_CONNECTION * frontend, errdetail("transaction state '%c'(%02x)", state, state))); /* - * The transaction state to be returned to frontend is master's. + * The transaction state to be returned to frontend is main node's. */ - if (i == (MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID)) + if (i == (NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID)) { state = kind; } @@ -2154,7 +2154,7 @@ ReadyForQuery(POOL_CONNECTION * frontend, if (pool_is_doing_extended_query_message()) { if (session_context->query_context && - session_context->query_context->query_state[MASTER_NODE_ID] == POOL_EXECUTE_COMPLETE) + session_context->query_context->query_state[MAIN_NODE_ID] == POOL_EXECUTE_COMPLETE) { pool_handle_query_cache(backend, session_context->query_context->query_w_hex, node, state); if (session_context->query_context->query_w_hex) @@ -2183,7 +2183,7 @@ ReadyForQuery(POOL_CONNECTION * frontend, { if ((pool_is_doing_extended_query_message() && session_context->query_context && - session_context->query_context->query_state[MASTER_NODE_ID] != POOL_UNPARSED && + session_context->query_context->query_state[MAIN_NODE_ID] != POOL_UNPARSED && session_context->uncompleted_message) || (!pool_is_doing_extended_query_message() && session_context->uncompleted_message && session_context->uncompleted_message->kind != 0)) @@ -2226,12 +2226,12 @@ static POOL_STATUS close_standby_transactions(POOL_CONNECTION * frontend, if (CONNECTION_SLOT(backend, i) && TSTATE(backend, i) == 'T' && BACKEND_INFO(i).backend_status == CON_UP && - (MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID) != i) + (NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID) != i) { per_node_statement_log(backend, i, "COMMIT"); if (do_command(frontend, CONNECTION(backend, i), "COMMIT", MAJOR(backend), - MASTER_CONNECTION(backend)->pid, - MASTER_CONNECTION(backend)->key, 0) != POOL_CONTINUE) + MAIN_CONNECTION(backend)->pid, + MAIN_CONNECTION(backend)->key, 0) != POOL_CONTINUE) ereport(ERROR, (errmsg("unable to close standby transactions"), errdetail("do_command returned DEADLOCK status"))); @@ -2382,17 +2382,17 @@ ParameterDescription(POOL_CONNECTION * frontend, /* get number of parameters in original query */ num_params = session_context->query_context->num_original_params; - pool_read(MASTER(backend), &len, sizeof(len)); + pool_read(MAIN(backend), &len, sizeof(len)); len = ntohl(len); len -= sizeof(int32); len1 = len; /* number of parameters in rewritten query is just discarded */ - pool_read(MASTER(backend), &num_dmy, sizeof(int16)); + pool_read(MAIN(backend), &num_dmy, sizeof(int16)); len -= sizeof(int16); - p = pool_read2(MASTER(backend), len); + p = pool_read2(MAIN(backend), len); if (p == NULL) ereport(ERROR, (errmsg("ParameterDescription. connection error"), @@ -2404,7 +2404,7 @@ ParameterDescription(POOL_CONNECTION * frontend, for (i = 0; i < NUM_BACKENDS; i++) { - if (VALID_BACKEND(i) && !IS_MASTER_NODE_ID(i)) + if (VALID_BACKEND(i) && !IS_MAIN_NODE_ID(i)) { pool_read(CONNECTION(backend, i), &len, sizeof(len)); @@ -2420,7 +2420,7 @@ ParameterDescription(POOL_CONNECTION * frontend, if (len != len1) ereport(DEBUG1, (errmsg("ParameterDescription. backends does not match"), - errdetail("length does not match between backends master(%d) %d th backend(%d) kind:(%c)", len, i, len1, kind))); + errdetail("length does not match between backends main(%d) %d th backend(%d) kind:(%c)", len, i, len1, kind))); } } @@ -2954,8 +2954,8 @@ ProcessBackendResponse(POOL_CONNECTION * frontend, case 'E': /* ErrorResponse */ status = ErrorResponse3(frontend, backend); pool_unset_command_success(); - if (TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : - REAL_MASTER_NODE_ID) != 'I') + if (TSTATE(backend, NATIVE_REPLICATION ? PRIMARY_NODE_ID : + REAL_MAIN_NODE_ID) != 'I') { pool_set_failed_transaction(); @@ -3042,8 +3042,8 @@ ProcessBackendResponse(POOL_CONNECTION * frontend, case 'E': /* ErrorResponse */ status = ErrorResponse(frontend, backend); - if (TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : - REAL_MASTER_NODE_ID) != 'I') + if (TSTATE(backend, NATIVE_REPLICATION ? PRIMARY_NODE_ID : + REAL_MAIN_NODE_ID) != 'I') pool_set_failed_transaction(); break; @@ -3309,9 +3309,9 @@ raise_intentional_error_if_need(POOL_CONNECTION_POOL * backend) query_context = session_context->query_context; - if (MASTER_SLAVE && + if (NATIVE_REPLICATION && TSTATE(backend, PRIMARY_NODE_ID) == 'T' && - PRIMARY_NODE_ID != MASTER_NODE_ID && + PRIMARY_NODE_ID != MAIN_NODE_ID && query_context && is_select_query(query_context->parse_tree, query_context->original_query)) { @@ -3330,7 +3330,7 @@ raise_intentional_error_if_need(POOL_CONNECTION_POOL * backend) } if (REPLICATION && - TSTATE(backend, REAL_MASTER_NODE_ID) == 'T' && + TSTATE(backend, REAL_MAIN_NODE_ID) == 'T' && !pool_config->replicate_select && query_context && is_select_query(query_context->parse_tree, query_context->original_query)) @@ -3383,7 +3383,7 @@ check_errors(POOL_CONNECTION_POOL * backend, int backend_id) { /* - * Check dead lock error on the master node and abort transactions on all + * Check dead lock error on the main node and abort transactions on all * nodes if so. */ if (detect_deadlock_error(CONNECTION(backend, backend_id), MAJOR(backend)) == SPECIFIED_ERROR) @@ -3393,7 +3393,7 @@ check_errors(POOL_CONNECTION_POOL * backend, int backend_id) * Check serialization failure error and abort * transactions on all nodes if so. Otherwise we allow * data inconsistency among DB nodes. See following - * scenario: (M:master, S:slave) + * scenario: (M:main, S:replica) * * M:S1:BEGIN; * M:S2:BEGIN; @@ -3421,7 +3421,7 @@ check_errors(POOL_CONNECTION_POOL * backend, int backend_id) * * M:S1:BEGIN; * S:S1:BEGIN; - * M:S1:SELECT 1; <-- only sent to MASTER + * M:S1:SELECT 1; <-- only sent to MAIN * M:S1:SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; * S:S1:SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; * M: <-- error @@ -3449,10 +3449,10 @@ generate_error_message(char *prefix, int specific_error, char *query) return; static char *error_messages[] = { - "received deadlock error message from master node. query: %s", - "received serialization failure error message from master node. query: %s", + "received deadlock error message from main node. query: %s", + "received serialization failure error message from main node. query: %s", "received SET TRANSACTION ISOLATION LEVEL must be called before any query error. query: %s", - "received query cancel error message from master node. query: %s" + "received query cancel error message from main node. query: %s" }; String *msg; @@ -3515,8 +3515,8 @@ per_node_error_log(POOL_CONNECTION_POOL * backend, int node_id, char *query, cha } /* - * Send parse message to primary/master node and wait for reply if particular - * message is not yet parsed on the primary/master node but parsed on other + * Send parse message to primary/main node and wait for reply if particular + * message is not yet parsed on the primary/main node but parsed on other * node. Caller must provide the parse message data as "message". */ static POOL_STATUS parse_before_bind(POOL_CONNECTION * frontend, @@ -3553,7 +3553,7 @@ static POOL_STATUS parse_before_bind(POOL_CONNECTION * frontend, new_qc = pool_query_context_shallow_copy(qc); memset(new_qc->where_to_send, 0, sizeof(new_qc->where_to_send)); new_qc->where_to_send[PRIMARY_NODE_ID] = 1; - new_qc->virtual_master_node_id = PRIMARY_NODE_ID; + new_qc->virtual_main_node_id = PRIMARY_NODE_ID; new_qc->load_balance_node_id = PRIMARY_NODE_ID; /* @@ -3630,7 +3630,7 @@ static POOL_STATUS parse_before_bind(POOL_CONNECTION * frontend, } else { - /* expect to send to master node only */ + /* expect to send to main node only */ for (i = 0; i < NUM_BACKENDS; i++) { if (qc->where_to_send[i] && statecmp(qc->query_state[i], POOL_PARSE_COMPLETE) < 0) @@ -3688,18 +3688,18 @@ static POOL_STATUS parse_before_bind(POOL_CONNECTION * frontend, * Arguments: * ntuples: Array of number of affected tuples. -1 represents down node. * nmembers: Number of elements in ntuples. - * master_node: The master node id. Less than 0 means ignore this parameter. + * main_node: The main node id. Less than 0 means ignore this parameter. * number_of_nodes: Number of elements in victim nodes array. * - * Note: If no one wins and master_node >= 0, winner would be the - * master and other nodes who has same number of tuples as the master. + * Note: If no one wins and main_node >= 0, winner would be the + * main and other nodes who has same number of tuples as the main. * * Caution: Returned victim node array is allocated in static memory * of this function. Subsequent calls to this function will overwrite * the memory. */ static int * -find_victim_nodes(int *ntuples, int nmembers, int master_node, int *number_of_nodes) +find_victim_nodes(int *ntuples, int nmembers, int main_node, int *number_of_nodes) { static int victim_nodes[MAX_NUM_BACKENDS]; static int votes[MAX_NUM_BACKENDS]; @@ -3748,15 +3748,15 @@ find_victim_nodes(int *ntuples, int nmembers, int master_node, int *number_of_no /* Everyone is different */ if (maxvotes == 1) { - /* Master node is specified? */ - if (master_node < 0) + /* Main node is specified? */ + if (main_node < 0) return NULL; /* - * If master node is specified, let it and others who has same ntuples + * If main node is specified, let it and others who has same ntuples * win. */ - majority_ntuples = ntuples[master_node]; + majority_ntuples = ntuples[main_node]; } else { @@ -3774,15 +3774,15 @@ find_victim_nodes(int *ntuples, int nmembers, int master_node, int *number_of_no { /* No one wins */ - /* Master node is specified? */ - if (master_node < 0) + /* Main node is specified? */ + if (main_node < 0) return NULL; /* - * If master node is specified, let it and others who has same + * If main node is specified, let it and others who has same * ntuples win. */ - majority_ntuples = ntuples[master_node]; + majority_ntuples = ntuples[main_node]; } } @@ -4127,7 +4127,7 @@ pool_at_command_success(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backe * transaction or disable_load_balance_on_write is 'ALWAYS', remember * that we had a write query in this transaction. */ - if (TSTATE(backend, MASTER_SLAVE ? PRIMARY_NODE_ID : REAL_MASTER_NODE_ID) == 'T' || + if (TSTATE(backend, NATIVE_REPLICATION ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID) == 'T' || pool_config->disable_load_balance_on_write == DLBOW_ALWAYS) { /* @@ -4168,17 +4168,17 @@ pool_read_message_length(POOL_CONNECTION_POOL * cp) length0; int i; - /* read message from master node */ - pool_read(CONNECTION(cp, MASTER_NODE_ID), &length0, sizeof(length0)); + /* read message from main node */ + pool_read(CONNECTION(cp, MAIN_NODE_ID), &length0, sizeof(length0)); length0 = ntohl(length0); ereport(DEBUG5, (errmsg("reading message length"), - errdetail("slot: %d length: %d", MASTER_NODE_ID, length0))); + errdetail("slot: %d length: %d", MAIN_NODE_ID, length0))); for (i = 0; i < NUM_BACKENDS; i++) { - if (!VALID_BACKEND(i) || IS_MASTER_NODE_ID(i)) + if (!VALID_BACKEND(i) || IS_MAIN_NODE_ID(i)) { continue; } @@ -4221,25 +4221,25 @@ pool_read_message_length2(POOL_CONNECTION_POOL * cp) int i; static int length_array[MAX_CONNECTION_SLOTS]; - /* read message from master node */ - pool_read(CONNECTION(cp, MASTER_NODE_ID), &length0, sizeof(length0)); + /* read message from main node */ + pool_read(CONNECTION(cp, MAIN_NODE_ID), &length0, sizeof(length0)); length0 = ntohl(length0); - length_array[MASTER_NODE_ID] = length0; + length_array[MAIN_NODE_ID] = length0; ereport(DEBUG5, (errmsg("reading message length"), - errdetail("master slot: %d length: %d", MASTER_NODE_ID, length0))); + errdetail("main slot: %d length: %d", MAIN_NODE_ID, length0))); for (i = 0; i < NUM_BACKENDS; i++) { - if (VALID_BACKEND(i) && !IS_MASTER_NODE_ID(i)) + if (VALID_BACKEND(i) && !IS_MAIN_NODE_ID(i)) { pool_read(CONNECTION(cp, i), &length, sizeof(length)); length = ntohl(length); ereport(DEBUG5, (errmsg("reading message length"), - errdetail("master slot: %d length: %d", i, length))); + errdetail("main slot: %d length: %d", i, length))); if (length != length0) { @@ -4281,7 +4281,7 @@ pool_read_kind(POOL_CONNECTION_POOL * cp) pool_read(CONNECTION(cp, i), &kind, sizeof(kind)); - if (IS_MASTER_NODE_ID(i)) + if (IS_MAIN_NODE_ID(i)) { kind0 = kind; } @@ -4293,10 +4293,10 @@ pool_read_kind(POOL_CONNECTION_POOL * cp) if (kind0 == 'E') { - if (pool_extract_error_message(false, MASTER(cp), MAJOR(cp), true, &message) == 1) + if (pool_extract_error_message(false, MAIN(cp), MAJOR(cp), true, &message) == 1) { ereport(LOG, - (errmsg("pool_read_kind: error message from master backend:%s", message))); + (errmsg("pool_read_kind: error message from main backend:%s", message))); pfree(message); } } @@ -4311,7 +4311,7 @@ pool_read_kind(POOL_CONNECTION_POOL * cp) } ereport(ERROR, (errmsg("unable to read message kind"), - errdetail("kind does not match between master(%x) slot[%d] (%x)", kind0, i, kind))); + errdetail("kind does not match between main(%x) slot[%d] (%x)", kind0, i, kind))); } } } @@ -4336,7 +4336,7 @@ pool_read_int(POOL_CONNECTION_POOL * cp) continue; } pool_read(CONNECTION(cp, i), &data, sizeof(data)); - if (IS_MASTER_NODE_ID(i)) + if (IS_MAIN_NODE_ID(i)) { data0 = data; } @@ -4346,7 +4346,7 @@ pool_read_int(POOL_CONNECTION_POOL * cp) { ereport(ERROR, (errmsg("unable to read int value"), - errdetail("data does not match between between master(%x) slot[%d] (%x)", data0, i, data))); + errdetail("data does not match between between main(%x) slot[%d] (%x)", data0, i, data))); } } @@ -4376,7 +4376,7 @@ si_get_snapshot(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, Node * deadlock. */ if (pool_config->backend_clustering_mode == CM_SNAPSHOT_ISOLATION && - TSTATE(backend, MASTER_NODE_ID) == 'T' && + TSTATE(backend, MAIN_NODE_ID) == 'T' && si_snapshot_aquire_command(node) && !si_snapshot_prepared() && frontend && frontend->no_forward == 0) diff --git a/src/query_cache/pool_memqcache.c b/src/query_cache/pool_memqcache.c index 263274769..61726c29b 100644 --- a/src/query_cache/pool_memqcache.c +++ b/src/query_cache/pool_memqcache.c @@ -801,7 +801,7 @@ pool_fetch_from_memory_cache(POOL_CONNECTION * frontend, /* * We keep previous transaction state. */ - state = MASTER(backend)->tstate; + state = MAIN(backend)->tstate; send_message(frontend, 'Z', 5, (char *) &state); } @@ -931,9 +931,9 @@ pool_is_allow_to_cache(Node *node, char *query) return false; /* - * Check black table list first. + * Check cache unsafe table list first. */ - if (pool_config->num_black_memqcache_table_list > 0) + if (pool_config->num_cache_unsafe_memqcache_table_list > 0) { /* * Extract oids in from clause of SELECT, and check if SELECT to them @@ -946,7 +946,7 @@ pool_is_allow_to_cache(Node *node, char *query) { ereport(DEBUG1, (errmsg("memcache: checking if node is allowed to cache: check table_names[%d] = \"%s\"", i, ctx.table_names[i]))); - if (pool_is_table_in_black_list(ctx.table_names[i]) == true) + if (pool_is_table_in_unsafe_list(ctx.table_names[i]) == true) { ereport(DEBUG1, (errmsg("memcache: node is not allowed to cache"))); @@ -998,7 +998,7 @@ pool_is_allow_to_cache(Node *node, char *query) * If the table is in the while list, allow to cache even if it is VIEW or * unlogged table. */ - if (pool_config->num_white_memqcache_table_list > 0) + if (pool_config->num_cache_safe_memqcache_table_list > 0) { if (num_oids < 0) num_oids = pool_extract_table_oids_from_select_stmt(node, &ctx); @@ -1013,7 +1013,7 @@ pool_is_allow_to_cache(Node *node, char *query) (errmsg("memcache: checking if node is allowed to cache: check table_names[%d] = \"%s\"", i, table))); if (is_view(table) || is_unlogged_table(table)) { - if (pool_is_table_in_white_list(table) == false) + if (pool_is_table_in_safe_list(table) == false) { ereport(DEBUG1, (errmsg("memcache: node is not allowed to cache"))); @@ -1066,11 +1066,11 @@ pool_is_allow_to_cache(Node *node, char *query) * Return true If the SELECTed table is in back list. */ bool -pool_is_table_in_black_list(const char *table_name) +pool_is_table_in_unsafe_list(const char *table_name) { - if (pool_config->num_black_memqcache_table_list > 0 && - pattern_compare((char *) table_name, BLACKLIST, "black_memqcache_table_list") == 1) + if (pool_config->num_cache_unsafe_memqcache_table_list > 0 && + pattern_compare((char *) table_name, WRITELIST, "cache_unsafe_memqcache_table_list") == 1) { return true; } @@ -1079,13 +1079,13 @@ pool_is_table_in_black_list(const char *table_name) } /* - * Return true If the SELECTed table is in white list. + * Return true If the SELECTed table is in cache_safe list. */ bool -pool_is_table_in_white_list(const char *table_name) +pool_is_table_in_safe_list(const char *table_name) { - if (pool_config->num_white_memqcache_table_list > 0 && - pattern_compare((char *) table_name, WHITELIST, "white_memqcache_table_list") == 1) + if (pool_config->num_cache_safe_memqcache_table_list > 0 && + pattern_compare((char *) table_name, READONLYLIST, "cache_safe_memqcache_table_list") == 1) { return true; } @@ -1523,7 +1523,7 @@ pool_get_database_oid(void) * Search relcache. */ oid = (int) (intptr_t) pool_search_relcache(relcache, backend, - MASTER_CONNECTION(backend)->sp->database); + MAIN_CONNECTION(backend)->sp->database); return oid; } @@ -1543,7 +1543,7 @@ pool_get_database_oid_from_dbname(char *dbname) backend = pool_get_session_context(false)->backend; snprintf(query, sizeof(query), DATABASE_TO_OID_QUERY, dbname); - do_query(MASTER(backend), query, &res, MAJOR(backend)); + do_query(MAIN(backend), query, &res, MAJOR(backend)); if (res->numrows != 1) { diff --git a/src/rewrite/pool_lobj.c b/src/rewrite/pool_lobj.c index 01e50a86f..38187fe14 100644 --- a/src/rewrite/pool_lobj.c +++ b/src/rewrite/pool_lobj.c @@ -172,9 +172,9 @@ pool_rewrite_lo_creat(char kind, char *packet, int packet_len, */ /* issue lock table command to lob_lock_table */ snprintf(qbuf, sizeof(qbuf), "LOCK TABLE %s IN SHARE ROW EXCLUSIVE MODE", pool_config->lobj_lock_table); - per_node_statement_log(backend, MASTER_NODE_ID, qbuf); - status = do_command(frontend, MASTER(backend), qbuf, MAJOR(backend), MASTER_CONNECTION(backend)->pid, - MASTER_CONNECTION(backend)->key, 0); + per_node_statement_log(backend, MAIN_NODE_ID, qbuf); + status = do_command(frontend, MAIN(backend), qbuf, MAJOR(backend), MAIN_CONNECTION(backend)->pid, + MAIN_CONNECTION(backend)->key, 0); if (status == POOL_END) { ereport(WARNING, @@ -185,7 +185,7 @@ pool_rewrite_lo_creat(char kind, char *packet, int packet_len, /* * If transaction state is E, do_command failed to execute command */ - if (TSTATE(backend, MASTER_NODE_ID) == 'E') + if (TSTATE(backend, MAIN_NODE_ID) == 'E') { ereport(LOG, (errmsg("failed while rewriting LO CREATE"), @@ -194,8 +194,8 @@ pool_rewrite_lo_creat(char kind, char *packet, int packet_len, } /* get max lobj id */ - per_node_statement_log(backend, MASTER_NODE_ID, GET_MAX_LOBJ_KEY); - do_query(MASTER(backend), GET_MAX_LOBJ_KEY, &result, MAJOR(backend)); + per_node_statement_log(backend, MAIN_NODE_ID, GET_MAX_LOBJ_KEY); + do_query(MAIN(backend), GET_MAX_LOBJ_KEY, &result, MAJOR(backend)); if (!result) { diff --git a/src/rewrite/pool_timestamp.c b/src/rewrite/pool_timestamp.c index 58ef5f68a..f218c4289 100644 --- a/src/rewrite/pool_timestamp.c +++ b/src/rewrite/pool_timestamp.c @@ -478,7 +478,7 @@ rewrite_timestamp_walker(Node *node, void *context) /* - * Get `now()' from MASTER node + * Get `now()' from MAIN node */ static char * get_current_timestamp(POOL_CONNECTION_POOL * backend) @@ -486,7 +486,7 @@ get_current_timestamp(POOL_CONNECTION_POOL * backend) POOL_SELECT_RESULT *res; static char timestamp[64]; - do_query(MASTER(backend), "SELECT now()", &res, MAJOR(backend)); + do_query(MAIN(backend), "SELECT now()", &res, MAJOR(backend)); if (res->numrows != 1) { @@ -1227,7 +1227,7 @@ makeStringConstFromQuery(POOL_CONNECTION_POOL * backend, char *expression) char *str; snprintf(query, sizeof(query), "SELECT %s", expression); - do_query(MASTER(backend), query, &res, MAJOR(backend)); + do_query(MAIN(backend), query, &res, MAJOR(backend)); if (res->numrows != 1) { diff --git a/src/sample/pgpool.conf.sample-logical b/src/sample/pgpool.conf.sample-logical index 8aa9b901c..3fd624d9d 100644 --- a/src/sample/pgpool.conf.sample-logical +++ b/src/sample/pgpool.conf.sample-logical @@ -84,7 +84,7 @@ backend_data_directory0 = '/data' backend_flag0 = 'ALLOW_TO_FAILOVER' # Controls various backend behavior # ALLOW_TO_FAILOVER, DISALLOW_TO_FAILOVER - # or ALWAYS_MASTER + # or ALWAYS_PRIMARY backend_application_name0 = 'server0' # walsender's application_name, used for "show pool_nodes" command #backend_hostname1 = 'host2' @@ -359,20 +359,20 @@ load_balance_mode = on # (change requires restart) ignore_leading_white_space = on # Ignore leading white spaces of each query -white_function_list = '' +read_only_function_list = '' # Comma separated list of function names # that don't write to database # Regexp are accepted -black_function_list = '' +write_function_list = '' # Comma separated list of function names # that write to database # Regexp are accepted - # If both white_function_list and black_function_list + # If both read_only_function_list and write_function_list # is empty, function's volatile property is checked. # If it's volatile, the function is regarded as a # writing function. -black_query_pattern_list = '' +primary_routing_query_pattern_list = '' # Semicolon separated list of query patterns # that should be sent to primary node # Regexp are accepted @@ -397,7 +397,7 @@ statement_level_load_balance = off # Enables statement level load balancing #------------------------------------------------------------------------------ -# MASTER/SLAVE MODE +# NATIVE REPLICATION MODE #------------------------------------------------------------------------------ # - Streaming - @@ -423,19 +423,19 @@ delay_threshold = 10000000 # - Special commands - -follow_master_command = '' - # Executes this command after master failover +follow_primary_command = '' + # Executes this command after main node failover # Special values: # %d = failed node id # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -493,12 +493,12 @@ failover_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -509,12 +509,12 @@ failback_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -665,7 +665,7 @@ wd_escalation_command = '' # Executes this command at escalation on new active pgpool. # (change requires restart) wd_de_escalation_command = '' - # Executes this command when master pgpool resigns from being master. + # Executes this command when leader pgpool resigns from being leader. # (change requires restart) # - Watchdog consensus settings for failover - @@ -806,10 +806,10 @@ check_temp_table = catalog check_unlogged_table = on # If on, enable unlogged table check in SELECT statements. - # This initiates queries against system catalog of primary/master - # thus increases load of master. + # This initiates queries against system catalog of primary/main + # thus increases load of primary. # If you are absolutely sure that your system never uses unlogged tables - # and you want to save access to primary/master, you could turn this off. + # and you want to save access to primary/main, you could turn this off. # Default is on. enable_shared_relcache = on # If on, relation cache stored in memory cache, @@ -817,7 +817,7 @@ enable_shared_relcache = on # Default is on. # (change requires restart) -relcache_query_target = master # Target node to send relcache queries. Default is master (primary) node. +relcache_query_target = primary # Target node to send relcache queries. Default is primary node. # If load_balance_node is specified, queries will be sent to load balance node. #------------------------------------------------------------------------------ # IN MEMORY QUERY MEMORY CACHE @@ -869,11 +869,11 @@ memqcache_cache_block_size = 1MB memqcache_oiddir = '/var/log/pgpool/oiddir' # Temporary work directory to record table oids # (change requires restart) -white_memqcache_table_list = '' +cache_safe_memqcache_table_list = '' # Comma separated list of table names to memcache # that don't write to database # Regexp are accepted -black_memqcache_table_list = '' +cache_unsafe_memqcache_table_list = '' # Comma separated list of table names not to memcache # that don't write to database # Regexp are accepted diff --git a/src/sample/pgpool.conf.sample-raw b/src/sample/pgpool.conf.sample-raw index 2141d5c8f..c250556c4 100644 --- a/src/sample/pgpool.conf.sample-raw +++ b/src/sample/pgpool.conf.sample-raw @@ -85,7 +85,7 @@ backend_data_directory0 = '/data' backend_flag0 = 'ALLOW_TO_FAILOVER' # Controls various backend behavior # ALLOW_TO_FAILOVER, DISALLOW_TO_FAILOVER - # or ALWAYS_MASTER + # or ALWAYS_PRIMARY backend_application_name0 = 'server0' # walsender's application_name, used for "show pool_nodes" command #backend_hostname1 = 'host2' @@ -360,20 +360,20 @@ load_balance_mode = on # (change requires restart) ignore_leading_white_space = on # Ignore leading white spaces of each query -white_function_list = '' +read_only_function_list = '' # Comma separated list of function names # that don't write to database # Regexp are accepted -black_function_list = '' +write_function_list = '' # Comma separated list of function names # that write to database # Regexp are accepted - # If both white_function_list and black_function_list + # If both read_only_function_list and write_function_list # is empty, function's volatile property is checked. # If it's volatile, the function is regarded as a # writing function. -black_query_pattern_list = '' +primary_routing_query_pattern_list = '' # Semicolon separated list of query patterns # that should be sent to primary node # Regexp are accepted @@ -430,14 +430,14 @@ dml_adaptive_object_relationship_list= '' # # example: 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:my_view' # Note: function name in this list must also be present in - # the black_function_list + # the write_function_list # only valid for disable_load_balance_on_write = 'dml_adaptive'. statement_level_load_balance = off # Enables statement level load balancing #------------------------------------------------------------------------------ -# MASTER/SLAVE MODE +# NATIVE REPLICATION MODE #------------------------------------------------------------------------------ # - Streaming - @@ -463,19 +463,19 @@ delay_threshold = 10000000 # - Special commands - -follow_master_command = '' - # Executes this command after master failover +follow_primary_command = '' + # Executes this command after main node failover # Special values: # %d = failed node id # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -533,12 +533,12 @@ failover_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -549,12 +549,12 @@ failback_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -703,7 +703,7 @@ wd_escalation_command = '' # Executes this command at escalation on new active pgpool. # (change requires restart) wd_de_escalation_command = '' - # Executes this command when master pgpool resigns from being master. + # Executes this command when leader pgpool resigns from being leader. # (change requires restart) # - Watchdog consensus settings for failover - @@ -850,10 +850,10 @@ check_temp_table = catalog check_unlogged_table = on # If on, enable unlogged table check in SELECT statements. - # This initiates queries against system catalog of primary/master - # thus increases load of master. + # This initiates queries against system catalog of primary/main + # thus increases load of primary. # If you are absolutely sure that your system never uses unlogged tables - # and you want to save access to primary/master, you could turn this off. + # and you want to save access to primary/main, you could turn this off. # Default is on. enable_shared_relcache = on # If on, relation cache stored in memory cache, @@ -861,7 +861,7 @@ enable_shared_relcache = on # Default is on. # (change requires restart) -relcache_query_target = master # Target node to send relcache queries. Default is master (primary) node. +relcache_query_target = primary # Target node to send relcache queries. Default is primary node. # If load_balance_node is specified, queries will be sent to load balance node. #------------------------------------------------------------------------------ # IN MEMORY QUERY MEMORY CACHE @@ -913,11 +913,11 @@ memqcache_cache_block_size = 1MB memqcache_oiddir = '/var/log/pgpool/oiddir' # Temporary work directory to record table oids # (change requires restart) -white_memqcache_table_list = '' +cache_safe_memqcache_table_list = '' # Comma separated list of table names to memcache # that don't write to database # Regexp are accepted -black_memqcache_table_list = '' +cache_unsafe_memqcache_table_list = '' # Comma separated list of table names not to memcache # that don't write to database # Regexp are accepted diff --git a/src/sample/pgpool.conf.sample-replication b/src/sample/pgpool.conf.sample-replication index 41b572806..5a22093c6 100644 --- a/src/sample/pgpool.conf.sample-replication +++ b/src/sample/pgpool.conf.sample-replication @@ -355,20 +355,20 @@ load_balance_mode = on # (change requires restart) ignore_leading_white_space = on # Ignore leading white spaces of each query -white_function_list = '' +read_only_function_list = '' # Comma separated list of function names # that don't write to database # Regexp are accepted -black_function_list = '' +write_function_list = '' # Comma separated list of function names # that write to database # Regexp are accepted - # If both white_function_list and black_function_list + # If both read_only_function_list and write_function_list # is empty, function's volatile property is checked. # If it's volatile, the function is regarded as a # writing function. -black_query_pattern_list = '' +primary_routing_query_pattern_list = '' # Semicolon separated list of query patterns # that should be sent to primary node # Regexp are accepted @@ -425,14 +425,14 @@ dml_adaptive_object_relationship_list= '' # # example: 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:my_view' # Note: function name in this list must also be present in - # the black_function_list + # the write_function_list # only valid for disable_load_balance_on_write = 'dml_adaptive'. statement_level_load_balance = off # Enables statement level load balancing #------------------------------------------------------------------------------ -# MASTER/SLAVE MODE +# NATIVE REPLICATION MODE #------------------------------------------------------------------------------ # - Streaming - @@ -459,19 +459,19 @@ delay_threshold = 0 # - Special commands - -follow_master_command = '' - # Executes this command after master failover +follow_primary_command = '' + # Executes this command after main node failover # Special values: # %d = failed node id # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -529,12 +529,12 @@ failover_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -545,12 +545,12 @@ failback_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -701,7 +701,7 @@ wd_escalation_command = '' # Executes this command at escalation on new active pgpool. # (change requires restart) wd_de_escalation_command = '' - # Executes this command when master pgpool resigns from being master. + # Executes this command when leader pgpool resigns from being leader. # (change requires restart) # - Watchdog consensus settings for failover - @@ -850,10 +850,10 @@ check_temp_table = catalog check_unlogged_table = on # If on, enable unlogged table check in SELECT statements. - # This initiates queries against system catalog of primary/master - # thus increases load of master. + # This initiates queries against system catalog of primary/main + # thus increases load of primary. # If you are absolutely sure that your system never uses unlogged tables - # and you want to save access to primary/master, you could turn this off. + # and you want to save access to primary/main, you could turn this off. # Default is on. enable_shared_relcache = on # If on, relation cache stored in memory cache, @@ -861,7 +861,7 @@ enable_shared_relcache = on # Default is on. # (change requires restart) -relcache_query_target = master # Target node to send relcache queries. Default is master (primary) node. +relcache_query_target = primary # Target node to send relcache queries. Default is primary node. # If load_balance_node is specified, queries will be sent to load balance node. #------------------------------------------------------------------------------ # IN MEMORY QUERY MEMORY CACHE @@ -913,11 +913,11 @@ memqcache_cache_block_size = 1MB memqcache_oiddir = '/var/log/pgpool/oiddir' # Temporary work directory to record table oids # (change requires restart) -white_memqcache_table_list = '' +cache_safe_memqcache_table_list = '' # Comma separated list of table names to memcache # that don't write to database # Regexp are accepted -black_memqcache_table_list = '' +cache_unsafe_memqcache_table_list = '' # Comma separated list of table names not to memcache # that don't write to database # Regexp are accepted diff --git a/src/sample/pgpool.conf.sample-slony b/src/sample/pgpool.conf.sample-slony index 071ba7b41..de9fc9101 100644 --- a/src/sample/pgpool.conf.sample-slony +++ b/src/sample/pgpool.conf.sample-slony @@ -84,7 +84,7 @@ backend_data_directory0 = '/data' backend_flag0 = 'ALLOW_TO_FAILOVER' # Controls various backend behavior # ALLOW_TO_FAILOVER, DISALLOW_TO_FAILOVER - # or ALWAYS_MASTER + # or ALWAYS_PRIMARY #backend_hostname1 = 'host2' #backend_port1 = 5433 #backend_weight1 = 1 @@ -356,20 +356,20 @@ load_balance_mode = on # (change requires restart) ignore_leading_white_space = on # Ignore leading white spaces of each query -white_function_list = '' +read_only_function_list = '' # Comma separated list of function names # that don't write to database # Regexp are accepted -black_function_list = '' +write_function_list = '' # Comma separated list of function names # that write to database # Regexp are accepted - # If both white_function_list and black_function_list + # If both read_only_function_list and write_function_list # is empty, function's volatile property is checked. # If it's volatile, the function is regarded as a # writing function. -black_query_pattern_list = '' +primary_routing_query_pattern_list = '' # Semicolon separated list of query patterns # that should be sent to primary node # Regexp are accepted @@ -426,14 +426,14 @@ dml_adaptive_object_relationship_list= '' # # example: 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:my_view' # Note: function name in this list must also be present in - # the black_function_list + # the write_function_list # only valid for disable_load_balance_on_write = 'dml_adaptive'. statement_level_load_balance = off # Enables statement level load balancing #------------------------------------------------------------------------------ -# MASTER/SLAVE MODE +# NATIVE REPLICATION MODE #------------------------------------------------------------------------------ # - Streaming - @@ -460,19 +460,19 @@ delay_threshold = 0 # - Special commands - -follow_master_command = '' - # Executes this command after master failover +follow_primary_command = '' + # Executes this command after main node failover # Special values: # %d = failed node id # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -530,12 +530,12 @@ failover_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -546,12 +546,12 @@ failback_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -701,7 +701,7 @@ wd_escalation_command = '' # Executes this command at escalation on new active pgpool. # (change requires restart) wd_de_escalation_command = '' - # Executes this command when master pgpool resigns from being master. + # Executes this command when leader pgpool resigns from being leader. # (change requires restart) # - Watchdog consensus settings for failover - @@ -849,10 +849,10 @@ check_temp_table = catalog check_unlogged_table = on # If on, enable unlogged table check in SELECT statements. - # This initiates queries against system catalog of primary/master - # thus increases load of master. + # This initiates queries against system catalog of primary/main + # thus increases load of primary. # If you are absolutely sure that your system never uses unlogged tables - # and you want to save access to primary/master, you could turn this off. + # and you want to save access to primary/main, you could turn this off. # Default is on. enable_shared_relcache = on # If on, relation cache stored in memory cache, @@ -860,7 +860,7 @@ enable_shared_relcache = on # Default is on. # (change requires restart) -relcache_query_target = master # Target node to send relcache queries. Default is master (primary) node. +relcache_query_target = primary # Target node to send relcache queries. Default is primary node. # If load_balance_node is specified, queries will be sent to load balance node. #------------------------------------------------------------------------------ # IN MEMORY QUERY MEMORY CACHE @@ -912,11 +912,11 @@ memqcache_cache_block_size = 1MB memqcache_oiddir = '/var/log/pgpool/oiddir' # Temporary work directory to record table oids # (change requires restart) -white_memqcache_table_list = '' +cache_safe_memqcache_table_list = '' # Comma separated list of table names to memcache # that don't write to database # Regexp are accepted -black_memqcache_table_list = '' +cache_unsafe_memqcache_table_list = '' # Comma separated list of table names not to memcache # that don't write to database # Regexp are accepted diff --git a/src/sample/pgpool.conf.sample-snapshot b/src/sample/pgpool.conf.sample-snapshot index 16a12d106..e8769ebf6 100644 --- a/src/sample/pgpool.conf.sample-snapshot +++ b/src/sample/pgpool.conf.sample-snapshot @@ -353,20 +353,20 @@ load_balance_mode = on # (change requires restart) ignore_leading_white_space = on # Ignore leading white spaces of each query -white_function_list = '' +read_only_function_list = '' # Comma separated list of function names # that don't write to database # Regexp are accepted -black_function_list = '' +write_function_list = '' # Comma separated list of function names # that write to database # Regexp are accepted - # If both white_function_list and black_function_list + # If both read_only_function_list and write_function_list # is empty, function's volatile property is checked. # If it's volatile, the function is regarded as a # writing function. -black_query_pattern_list = '' +primary_routing_query_pattern_list = '' # Semicolon separated list of query patterns # that should be sent to primary node # Regexp are accepted @@ -423,14 +423,14 @@ dml_adaptive_object_relationship_list= '' # # example: 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:my_view' # Note: function name in this list must also be present in - # the black_function_list + # the write_function_list # only valid for disable_load_balance_on_write = 'dml_adaptive'. statement_level_load_balance = off # Enables statement level load balancing #------------------------------------------------------------------------------ -# MASTER/SLAVE MODE +# NATIVE REPLICATION MODE #------------------------------------------------------------------------------ # - Streaming - @@ -457,19 +457,19 @@ delay_threshold = 0 # - Special commands - -follow_master_command = '' - # Executes this command after master failover +follow_primary_command = '' + # Executes this command after main node failover # Special values: # %d = failed node id # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -527,12 +527,12 @@ failover_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -543,12 +543,12 @@ failback_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -699,7 +699,7 @@ wd_escalation_command = '' # Executes this command at escalation on new active pgpool. # (change requires restart) wd_de_escalation_command = '' - # Executes this command when master pgpool resigns from being master. + # Executes this command when leader pgpool resigns from being leader. # (change requires restart) # - Watchdog consensus settings for failover - @@ -832,10 +832,10 @@ check_temp_table = catalog check_unlogged_table = on # If on, enable unlogged table check in SELECT statements. - # This initiates queries against system catalog of primary/master - # thus increases load of master. + # This initiates queries against system catalog of primary/main + # thus increases load of primary. # If you are absolutely sure that your system never uses unlogged tables - # and you want to save access to primary/master, you could turn this off. + # and you want to save access to primary/main, you could turn this off. # Default is on. enable_shared_relcache = on # If on, relation cache stored in memory cache, @@ -843,7 +843,7 @@ enable_shared_relcache = on # Default is on. # (change requires restart) -relcache_query_target = master # Target node to send relcache queries. Default is master (primary) node. +relcache_query_target = primary # Target node to send relcache queries. Default is primary node. # If load_balance_node is specified, queries will be sent to load balance node. #------------------------------------------------------------------------------ # IN MEMORY QUERY MEMORY CACHE @@ -895,11 +895,11 @@ memqcache_cache_block_size = 1MB memqcache_oiddir = '/var/log/pgpool/oiddir' # Temporary work directory to record table oids # (change requires restart) -white_memqcache_table_list = '' +cache_safe_memqcache_table_list = '' # Comma separated list of table names to memcache # that don't write to database # Regexp are accepted -black_memqcache_table_list = '' +cache_unsafe_memqcache_table_list = '' # Comma separated list of table names not to memcache # that don't write to database # Regexp are accepted diff --git a/src/sample/pgpool.conf.sample-stream b/src/sample/pgpool.conf.sample-stream index 1529b4880..6ca78f5ea 100644 --- a/src/sample/pgpool.conf.sample-stream +++ b/src/sample/pgpool.conf.sample-stream @@ -85,7 +85,7 @@ backend_data_directory0 = '/data' backend_flag0 = 'ALLOW_TO_FAILOVER' # Controls various backend behavior # ALLOW_TO_FAILOVER, DISALLOW_TO_FAILOVER - # or ALWAYS_MASTER + # or ALWAYS_PRIMARY backend_application_name0 = 'server0' # walsender's application_name, used for "show pool_nodes" command #backend_hostname1 = 'host2' @@ -359,20 +359,20 @@ load_balance_mode = on # (change requires restart) ignore_leading_white_space = on # Ignore leading white spaces of each query -white_function_list = '' +read_only_function_list = '' # Comma separated list of function names # that don't write to database # Regexp are accepted -black_function_list = '' +write_function_list = '' # Comma separated list of function names # that write to database # Regexp are accepted - # If both white_function_list and black_function_list + # If both read_only_function_list and write_function_list # is empty, function's volatile property is checked. # If it's volatile, the function is regarded as a # writing function. -black_query_pattern_list = '' +primary_routing_query_pattern_list = '' # Semicolon separated list of query patterns # that should be sent to primary node # Regexp are accepted @@ -429,14 +429,14 @@ dml_adaptive_object_relationship_list= '' # # example: 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:my_view' # Note: function name in this list must also be present in - # the black_function_list + # the write_function_list # only valid for disable_load_balance_on_write = 'dml_adaptive'. statement_level_load_balance = off # Enables statement level load balancing #------------------------------------------------------------------------------ -# MASTER/SLAVE MODE +# NATIVE REPLICATION MODE #------------------------------------------------------------------------------ # - Streaming - @@ -462,19 +462,19 @@ delay_threshold = 10000000 # - Special commands - -follow_master_command = '' - # Executes this command after master failover +follow_primary_command = '' + # Executes this command after main node failover # Special values: # %d = failed node id # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -532,12 +532,12 @@ failover_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -548,12 +548,12 @@ failback_command = '' # %h = failed node host name # %p = failed node port number # %D = failed node database cluster path - # %m = new master node id - # %H = new master node hostname - # %M = old master node id + # %m = new main node id + # %H = new main node hostname + # %M = old main node id # %P = old primary node id - # %r = new master port number - # %R = new master database cluster path + # %r = new main port number + # %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -703,7 +703,7 @@ wd_escalation_command = '' # Executes this command at escalation on new active pgpool. # (change requires restart) wd_de_escalation_command = '' - # Executes this command when master pgpool resigns from being master. + # Executes this command when leader pgpool resigns from being leader. # (change requires restart) # - Watchdog consensus settings for failover - @@ -834,10 +834,10 @@ check_temp_table = catalog check_unlogged_table = on # If on, enable unlogged table check in SELECT statements. - # This initiates queries against system catalog of primary/master - # thus increases load of master. + # This initiates queries against system catalog of primary/main + # thus increases load of primary. # If you are absolutely sure that your system never uses unlogged tables - # and you want to save access to primary/master, you could turn this off. + # and you want to save access to primary/main, you could turn this off. # Default is on. enable_shared_relcache = on # If on, relation cache stored in memory cache, @@ -845,7 +845,7 @@ enable_shared_relcache = on # Default is on. # (change requires restart) -relcache_query_target = master # Target node to send relcache queries. Default is master (primary) node. +relcache_query_target = primary # Target node to send relcache queries. Default is primary node. # If load_balance_node is specified, queries will be sent to load balance node. #------------------------------------------------------------------------------ # IN MEMORY QUERY MEMORY CACHE @@ -897,11 +897,11 @@ memqcache_cache_block_size = 1MB memqcache_oiddir = '/var/log/pgpool/oiddir' # Temporary work directory to record table oids # (change requires restart) -white_memqcache_table_list = '' +cache_safe_memqcache_table_list = '' # Comma separated list of table names to memcache # that don't write to database # Regexp are accepted -black_memqcache_table_list = '' +cache_unsafe_memqcache_table_list = '' # Comma separated list of table names not to memcache # that don't write to database # Regexp are accepted diff --git a/src/sample/pgpool_recovery b/src/sample/pgpool_recovery index 1579f91c7..d69e77ded 100644 --- a/src/sample/pgpool_recovery +++ b/src/sample/pgpool_recovery @@ -2,7 +2,7 @@ if [ $# -ne 4 ] then - echo "pgpool_recovery datadir remote_host remote_datadir master_port" + echo "pgpool_recovery datadir remote_host remote_datadir primary_port" exit 1 fi diff --git a/src/sample/scripts/failover.sh.sample b/src/sample/scripts/failover.sh.sample index 4317ce04c..8b4f46072 100755 --- a/src/sample/scripts/failover.sh.sample +++ b/src/sample/scripts/failover.sh.sample @@ -9,12 +9,12 @@ exec > >(logger -i -p local1.info) 2>&1 # %h = failed node hostname # %p = failed node port number # %D = failed node database cluster path -# %m = new master node id -# %H = new master node hostname -# %M = old master node id +# %m = new main node id +# %H = new main node hostname +# %M = old main node id # %P = old primary node id -# %r = new master port number -# %R = new master database cluster path +# %r = new main port number +# %R = new main database cluster path # %N = old primary node hostname # %S = old primary node port number # %% = '%' character @@ -23,31 +23,31 @@ FAILED_NODE_ID="$1" FAILED_NODE_HOST="$2" FAILED_NODE_PORT="$3" FAILED_NODE_PGDATA="$4" -NEW_MASTER_NODE_ID="$5" -NEW_MASTER_NODE_HOST="$6" -OLD_MASTER_NODE_ID="$7" +NEW_MAIN_NODE_ID="$5" +NEW_MAIN_NODE_HOST="$6" +OLD_MAIN_NODE_ID="$7" OLD_PRIMARY_NODE_ID="$8" -NEW_MASTER_NODE_PORT="$9" -NEW_MASTER_NODE_PGDATA="${10}" +NEW_MAIN_NODE_PORT="$9" +NEW_MAIN_NODE_PGDATA="${10}" OLD_PRIMARY_NODE_HOST="${11}" OLD_PRIMARY_NODE_PORT="${12}" PGHOME=/usr/pgsql-11 -logger -i -p local1.info failover.sh: start: failed_node_id=$FAILED_NODE_ID old_primary_node_id=$OLD_PRIMARY_NODE_ID failed_host=$FAILED_NODE_HOST new_master_host=$NEW_MASTER_NODE_HOST +logger -i -p local1.info failover.sh: start: failed_node_id=$FAILED_NODE_ID old_primary_node_id=$OLD_PRIMARY_NODE_ID failed_host=$FAILED_NODE_HOST new_main_host=$NEW_MAIN_NODE_HOST -## If there's no master node anymore, skip failover. -if [ $NEW_MASTER_NODE_ID -lt 0 ]; then +## If there's no main node anymore, skip failover. +if [ $NEW_MAIN_NODE_ID -lt 0 ]; then logger -i -p local1.info failover.sh: All nodes are down. Skipping failover. exit 0 fi ## Test passwrodless SSH -ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MASTER_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ls /tmp > /dev/null +ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ls /tmp > /dev/null if [ $? -ne 0 ]; then - logger -i -p local1.info failover.sh: passwrodless SSH to postgres@${NEW_MASTER_NODE_HOST} failed. Please setup passwrodless SSH. + logger -i -p local1.info failover.sh: passwrodless SSH to postgres@${NEW_MAIN_NODE_HOST} failed. Please setup passwrodless SSH. exit 1 fi @@ -68,15 +68,15 @@ if [ $FAILED_NODE_ID -ne $OLD_PRIMARY_NODE_ID ]; then fi ## Promote Standby node. -logger -i -p local1.info failover.sh: Primary node is down, promote standby node ${NEW_MASTER_NODE_HOST}. +logger -i -p local1.info failover.sh: Primary node is down, promote standby node ${NEW_MAIN_NODE_HOST}. ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ - postgres@${NEW_MASTER_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ${PGHOME}/bin/pg_ctl -D ${NEW_MASTER_NODE_PGDATA} -w promote + postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ${PGHOME}/bin/pg_ctl -D ${NEW_MAIN_NODE_PGDATA} -w promote if [ $? -ne 0 ]; then - logger -i -p local1.error failover.sh: new_master_host=$NEW_MASTER_NODE_HOST promote failed + logger -i -p local1.error failover.sh: new_main_host=$NEW_MAIN_NODE_HOST promote failed exit 1 fi -logger -i -p local1.info failover.sh: end: new_master_node_id=$NEW_MASTER_NODE_ID started as the primary node +logger -i -p local1.info failover.sh: end: new_main_node_id=$NEW_MAIN_NODE_ID started as the primary node exit 0 diff --git a/src/sample/scripts/follow_primary.sh.sample b/src/sample/scripts/follow_primary.sh.sample new file mode 100755 index 000000000..10939ca59 --- /dev/null +++ b/src/sample/scripts/follow_primary.sh.sample @@ -0,0 +1,171 @@ +#!/bin/bash +# This script is run after failover_command to synchronize the Standby with the new Primary. +# First try pg_rewind. If pg_rewind failed, use pg_basebackup. + +set -o xtrace +exec > >(logger -i -p local1.info) 2>&1 + +# Special values: +# %d = failed node id +# %h = failed node hostname +# %p = failed node port number +# %D = failed node database cluster path +# %m = new main node id +# %H = new main node hostname +# %M = old main node id +# %P = old primary node id +# %r = new main port number +# %R = new main database cluster path +# %N = old primary node hostname +# %S = old primary node port number +# %% = '%' character + +FAILED_NODE_ID="$1" +FAILED_NODE_HOST="$2" +FAILED_NODE_PORT="$3" +FAILED_NODE_PGDATA="$4" +NEW_MAIN_NODE_ID="$5" +NEW_MAIN_NODE_HOST="$6" +OLD_MAIN_NODE_ID="$7" +OLD_PRIMARY_NODE_ID="$8" +NEW_MAIN_NODE_PORT="$9" +NEW_MAIN_NODE_PGDATA="${10}" + +PGHOME=/usr/pgsql-11 +ARCHIVEDIR=/var/lib/pgsql/archivedir +REPLUSER=repl +PCP_USER=pgpool +PGPOOL_PATH=/usr/bin +PCP_PORT=9898 + +logger -i -p local1.info follow_primary.sh: start: Standby node ${FAILED_NODE_ID} + +## Test passwrodless SSH +ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ls /tmp > /dev/null + +if [ $? -ne 0 ]; then + logger -i -p local1.info follow_main.sh: passwrodless SSH to postgres@${NEW_MAIN_NODE_HOST} failed. Please setup passwrodless SSH. + exit 1 +fi + +## Get PostgreSQL major version +PGVERSION=`${PGHOME}/bin/initdb -V | awk '{print $3}' | sed 's/\..*//' | sed 's/\([0-9]*\)[a-zA-Z].*/\1/'` + +if [ $PGVERSION -ge 12 ]; then +RECOVERYCONF=${FAILED_NODE_PGDATA}/myrecovery.conf +else +RECOVERYCONF=${FAILED_NODE_PGDATA}/recovery.conf +fi + +## Check the status of Standby +ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ +postgres@${FAILED_NODE_HOST} -i ~/.ssh/id_rsa_pgpool ${PGHOME}/bin/pg_ctl -w -D ${FAILED_NODE_PGDATA} status + + +## If Standby is running, synchronize it with the new Primary. +if [ $? -eq 0 ]; then + + logger -i -p local1.info follow_primary.sh: pg_rewind for $FAILED_NODE_ID + + # Create replication slot "${FAILED_NODE_HOST}" + ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool " + ${PGHOME}/bin/psql -p ${NEW_MAIN_NODE_PORT} -c \"SELECT pg_create_physical_replication_slot('${FAILED_NODE_HOST}');\" + " + + ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${FAILED_NODE_HOST} -i ~/.ssh/id_rsa_pgpool " + + set -o errexit + + ${PGHOME}/bin/pg_ctl -w -m f -D ${FAILED_NODE_PGDATA} stop + + cat > ${RECOVERYCONF} << EOT +primary_conninfo = 'host=${NEW_MAIN_NODE_HOST} port=${NEW_MAIN_NODE_PORT} user=${REPLUSER} application_name=${FAILED_NODE_HOST} passfile=''/var/lib/pgsql/.pgpass''' +recovery_target_timeline = 'latest' +restore_command = 'scp ${NEW_MAIN_NODE_HOST}:${ARCHIVEDIR}/%f %p' +primary_slot_name = '${FAILED_NODE_HOST}' +EOT + + if [ ${PGVERSION} -ge 12 ]; then + touch ${FAILED_NODE_PGDATA}/standby.signal + else + echo \"standby_mode = 'on'\" >> ${RECOVERYCONF} + fi + + ${PGHOME}/bin/pg_rewind -D ${FAILED_NODE_PGDATA} --source-server=\"user=postgres host=${NEW_MAIN_NODE_HOST} port=${NEW_MAIN_NODE_PORT}\" + + " + + if [ $? -ne 0 ]; then + logger -i -p local1.error follow_primary.sh: end: pg_rewind failed. Try pg_basebackup. + + ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${FAILED_NODE_HOST} -i ~/.ssh/id_rsa_pgpool " + + set -o errexit + + # Execute pg_basebackup + rm -rf ${FAILED_NODE_PGDATA} + rm -rf ${ARCHIVEDIR}/* + ${PGHOME}/bin/pg_basebackup -h ${NEW_MAIN_NODE_HOST} -U $REPLUSER -p ${NEW_MAIN_NODE_PORT} -D ${FAILED_NODE_PGDATA} -X stream + + if [ ${PGVERSION} -ge 12 ]; then + sed -i -e \"\\\$ainclude_if_exists = '$(echo ${RECOVERYCONF} | sed -e 's/\//\\\//g')'\" \ + -e \"/^include_if_exists = '$(echo ${RECOVERYCONF} | sed -e 's/\//\\\//g')'/d\" ${FAILED_NODE_PGDATA}/postgresql.conf + fi + + cat > ${RECOVERYCONF} << EOT +primary_conninfo = 'host=${NEW_MAIN_NODE_HOST} port=${NEW_MAIN_NODE_PORT} user=${REPLUSER} application_name=${FAILED_NODE_HOST} passfile=''/var/lib/pgsql/.pgpass''' +recovery_target_timeline = 'latest' +restore_command = 'scp ${NEW_MAIN_NODE_HOST}:${ARCHIVEDIR}/%f %p' +primary_slot_name = '${FAILED_NODE_HOST}' +EOT + + if [ ${PGVERSION} -ge 12 ]; then + touch ${FAILED_NODE_PGDATA}/standby.signal + else + echo \"standby_mode = 'on'\" >> ${RECOVERYCONF} + fi + " + + if [ $? -ne 0 ]; then + # drop replication slot + ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool " + ${PGHOME}/bin/psql -p ${NEW_MAIN_NODE_PORT} -c \"SELECT pg_drop_replication_slot('${FAILED_NODE_HOST}')\" + " + + logger -i -p local1.error follow_primary.sh: end: pg_basebackup failed + exit 1 + fi + fi + + # start Standby node on ${FAILED_NODE_HOST} + ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ + postgres@${FAILED_NODE_HOST} -i ~/.ssh/id_rsa_pgpool $PGHOME/bin/pg_ctl -l /dev/null -w -D ${FAILED_NODE_PGDATA} start + + # If start Standby successfully, attach this node + if [ $? -eq 0 ]; then + + # Run pcp_attact_node to attach Standby node to Pgpool-II. + ${PGPOOL_PATH}/pcp_attach_node -w -h localhost -U $PCP_USER -p ${PCP_PORT} -n ${FAILED_NODE_ID} + + if [ $? -ne 0 ]; then + logger -i -p local1.error follow_primary.sh: end: pcp_attach_node failed + exit 1 + fi + + # If start Standby failed, drop replication slot "${FAILED_NODE_HOST}" + else + + ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@${NEW_MAIN_NODE_HOST} -i ~/.ssh/id_rsa_pgpool \ + ${PGHOME}/bin/psql -p ${NEW_MAIN_NODE_PORT} -c "SELECT pg_drop_replication_slot('${FAILED_NODE_HOST}')" + + logger -i -p local1.error follow_primary.sh: end: follow primary command failed + exit 1 + fi + +else + logger -i -p local1.info follow_primary.sh: failed_nod_id=${FAILED_NODE_ID} is not running. skipping follow primary command + exit 0 +fi + +logger -i -p local1.info follow_primary.sh: end: follow primary command complete +exit 0 diff --git a/src/sample/scripts/recovery_2nd_stage.sample b/src/sample/scripts/recovery_2nd_stage.sample index 64eae3260..b110fc64e 100755 --- a/src/sample/scripts/recovery_2nd_stage.sample +++ b/src/sample/scripts/recovery_2nd_stage.sample @@ -1,7 +1,7 @@ #! /bin/sh # Online recovery 2nd stage script # -DATADIR=$1 # master dabatase cluster +DATADIR=$1 # main dabatase cluster DEST=$2 # hostname of the DB node to be recovered DESTDIR=$3 # database cluster of the DB node to be recovered PORT=$4 # PostgreSQL port number diff --git a/src/test/dml-adaptive-test/extended-query-test/test.sh b/src/test/dml-adaptive-test/extended-query-test/test.sh index 02f977672..83796cadb 100755 --- a/src/test/dml-adaptive-test/extended-query-test/test.sh +++ b/src/test/dml-adaptive-test/extended-query-test/test.sh @@ -226,7 +226,7 @@ backend_weight1 = 1 log_per_node_statement = on pid_file_name = '/__TEST_DIR__/pgpool.pid' -black_function_list = 'currval,lastval,nextval,setval,insert_tb_f_func' +write_function_list = 'currval,lastval,nextval,setval,insert_tb_f_func' disable_load_balance_on_write = 'dml_adaptive' dml_adaptive_object_relationship_list= 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:tb_v_view' diff --git a/src/test/dml-adaptive-test/simple-query-test/test.sh b/src/test/dml-adaptive-test/simple-query-test/test.sh index 345056fea..741609998 100755 --- a/src/test/dml-adaptive-test/simple-query-test/test.sh +++ b/src/test/dml-adaptive-test/simple-query-test/test.sh @@ -226,7 +226,7 @@ backend_weight1 = 1 log_per_node_statement = on pid_file_name = '/__TEST_DIR__/pgpool.pid' -black_function_list = 'currval,lastval,nextval,setval,insert_tb_f_func' +write_function_list = 'currval,lastval,nextval,setval,insert_tb_f_func' disable_load_balance_on_write = 'dml_adaptive' dml_adaptive_object_relationship_list= 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:tb_v_view' diff --git a/src/test/extended-query-test/tests/disable-load-balance-always-black-function.data b/src/test/extended-query-test/tests/disable-load-balance-always-black-function.data index c943ba141..260206c9a 100644 --- a/src/test/extended-query-test/tests/disable-load-balance-always-black-function.data +++ b/src/test/extended-query-test/tests/disable-load-balance-always-black-function.data @@ -5,14 +5,14 @@ ##backend_weight0 = 0 ##backend_weight1 = 1 -##black_function_list = 'f1' +##write_function_list = 'f1' # disable_load_balance_on_write = 'always' # Disable load balance after a WRITE function is issued. ## disable_load_balance_on_write = 'always' -# Create test white function +# Create test read_only function 'Q' "DROP FUNCTION IF EXISTS f1" 'Y' 'Q' "CREATE FUNCTION f1(INTEGER) returns INTEGER AS 'SELECT $1' LANGUAGE SQL" diff --git a/src/test/extended-query-test/tests/disable-load-balance-default-black-function.data b/src/test/extended-query-test/tests/disable-load-balance-default-black-function.data index 7a94243ec..34f203b8e 100644 --- a/src/test/extended-query-test/tests/disable-load-balance-default-black-function.data +++ b/src/test/extended-query-test/tests/disable-load-balance-default-black-function.data @@ -5,12 +5,12 @@ ##backend_weight0 = 0 ##backend_weight1 = 1 -##black_function_list = 'f1' +##write_function_list = 'f1' # Disable load balance in current transaction only. (disable_load_balance_on_write = 'transaction') # (default behavior) -# Create test white function +# Create test read_only function 'Q' "DROP FUNCTION IF EXISTS f1" 'Y' 'Q' "CREATE FUNCTION f1(INTEGER) returns INTEGER AS 'SELECT $1' LANGUAGE SQL" diff --git a/src/test/extended-query-test/tests/disable-load-balance-off-black-function.data b/src/test/extended-query-test/tests/disable-load-balance-off-black-function.data index d57adf6e7..beda99dea 100644 --- a/src/test/extended-query-test/tests/disable-load-balance-off-black-function.data +++ b/src/test/extended-query-test/tests/disable-load-balance-off-black-function.data @@ -5,7 +5,7 @@ ##backend_weight0 = 0 ##backend_weight1 = 1 -##black_function_list = 'f1' +##write_function_list = 'f1' # disable_load_balance_on_write = 'off' # Don't disable load balance even if a WRITE function is issued. @@ -13,7 +13,7 @@ ## disable_load_balance_on_write = 'off' -# Create test white function +# Create test read_only function 'Q' "DROP FUNCTION IF EXISTS f1" 'Y' 'Q' "CREATE FUNCTION f1(INTEGER) returns INTEGER AS 'SELECT $1' LANGUAGE SQL" diff --git a/src/test/extended-query-test/tests/disable-load-balance-simple-black-function.data b/src/test/extended-query-test/tests/disable-load-balance-simple-black-function.data index 176f8c97f..d16f7e020 100644 --- a/src/test/extended-query-test/tests/disable-load-balance-simple-black-function.data +++ b/src/test/extended-query-test/tests/disable-load-balance-simple-black-function.data @@ -5,12 +5,12 @@ ##backend_weight0 = 0 ##backend_weight1 = 1 -##black_function_list = 'f1' +##write_function_list = 'f1' # Disable load balance in current transaction only. (disable_load_balance_on_write = 'transaction') # (default behavior) -# Create test white function +# Create test read_only function 'Q' "DROP FUNCTION IF EXISTS f1" 'Y' 'Q' "CREATE FUNCTION f1(INTEGER) returns INTEGER AS 'SELECT $1' LANGUAGE SQL" diff --git a/src/test/extended-query-test/tests/disable-load-balance-trans-black-function.data b/src/test/extended-query-test/tests/disable-load-balance-trans-black-function.data index f353e6e71..d4b5443f9 100644 --- a/src/test/extended-query-test/tests/disable-load-balance-trans-black-function.data +++ b/src/test/extended-query-test/tests/disable-load-balance-trans-black-function.data @@ -5,14 +5,14 @@ ##backend_weight0 = 0 ##backend_weight1 = 1 -##black_function_list = 'f1' +##write_function_list = 'f1' # disable_load_balance_on_write = 'trans_transaction' # Disable load balance in next transaction ## disable_load_balance_on_write = 'trans_transaction' -# Create test white function +# Create test read_only function 'Q' "DROP FUNCTION IF EXISTS f1" 'Y' 'Q' "CREATE FUNCTION f1(INTEGER) returns INTEGER AS 'SELECT $1' LANGUAGE SQL" diff --git a/src/test/extended-query-test/tests/disable-load-balance-white-function.data b/src/test/extended-query-test/tests/disable-load-balance-white-function.data index 6399a903d..b66bd5de0 100644 --- a/src/test/extended-query-test/tests/disable-load-balance-white-function.data +++ b/src/test/extended-query-test/tests/disable-load-balance-white-function.data @@ -8,7 +8,7 @@ # Disable load balance in current transaction only. (disable_load_balance_on_write = 'transaction') # (default behavior) -# Create test white function +# Create test read_only function 'Q' "DROP FUNCTION IF EXISTS f1" 'Y' 'Q' "CREATE FUNCTION f1(INTEGER) returns INTEGER AS 'SELECT $1' LANGUAGE SQL" diff --git a/src/test/pgpool_setup.in b/src/test/pgpool_setup.in index e59e1321d..33caf3222 100644 --- a/src/test/pgpool_setup.in +++ b/src/test/pgpool_setup.in @@ -131,8 +131,8 @@ fi CONF=$BASEDIR/etc/pgpool.conf # failover script FAILOVER_SCRIPT=$BASEDIR/etc/failover.sh -# follow master script -FOLLOW_MASTER_SCRIPT=$BASEDIR/etc/follow_master.sh +# follow primary script +FOLLOW_PRIMARY_SCRIPT=$BASEDIR/etc/follow_primary.sh # pgpool_remote_start PGPOOL_REMOTE_START_SCRIPT=pgpool_remote_start # Start script name. This will be generated in this script. @@ -157,23 +157,23 @@ cat >> $FAILOVER_SCRIPT <<'EOF' # %h = host name # %p = port number # %D = database cluster path -# %m = new master node id -# %M = old master node id -# %H = new master node host name +# %m = new main node id +# %M = old main node id +# %H = new main node host name # %P = old primary node id -# %R = new master database cluster path -# %r = new master port number +# %R = new main database cluster path +# %r = new main port number # %% = '%' character failed_node_id=$1 failed_host_name=$2 failed_port=$3 failed_db_cluster=$4 -new_master_id=$5 -old_master_id=$6 -new_master_host_name=$7 +new_main_id=$5 +old_main_id=$6 +new_main_host_name=$7 old_primary_node_id=$8 -new_master_port_number=$9 -new_master_db_cluster=${10} +new_main_port_number=$9 +new_main_db_cluster=${10} mydir=__MYDIR__ log=$mydir/log/failover.log pg_ctl=__PGBIN__/pg_ctl @@ -181,10 +181,10 @@ cluster0=$mydir/data0 cluster1=$mydir/data1 date >> $log -echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_master_id $new_master_id old_master_id $old_master_id new_master_host_name $new_master_host_name old_primary_node_id $old_primary_node_id new_master_port_number $new_master_port_number new_master_db_cluster $new_master_db_cluster" >> $log +echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_main_id $new_main_id old_main_id $old_main_id new_main_host_name $new_main_host_name old_primary_node_id $old_primary_node_id new_main_port_number $new_main_port_number new_main_db_cluster $new_main_db_cluster" >> $log -if [ a"$failed_node_id" = a"$old_primary_node_id" ];then # master failed -! new_primary_db_cluster=${mydir}/data"$new_master_id" +if [ a"$failed_node_id" = a"$old_primary_node_id" ];then # main node failed +! new_primary_db_cluster=${mydir}/data"$new_main_id" echo $pg_ctl -D $new_primary_db_cluster promote >>$log # let standby take over $pg_ctl -D $new_primary_db_cluster promote >>$log # let standby take over sleep 2 @@ -203,34 +203,34 @@ chmod 755 $FAILOVER_SCRIPT } #------------------------------------------- -# create follow master script +# create follow primary script #------------------------------------------- -function create_follow_master_script() +function create_follow_primary_script() { -cat >> $FOLLOW_MASTER_SCRIPT <<'EOF' +cat >> $FOLLOW_PRIMARY_SCRIPT <<'EOF' #! /bin/sh # Execute command by failover. # special values: %d = node id # %h = host name # %p = port number # %D = database cluster path -# %m = new master node id -# %M = old master node id -# %H = new master node host name +# %m = new main node id +# %M = old main node id +# %H = new main node host name # %P = old primary node id -# %R = new master database cluster path -# %r = new master port number +# %R = new main database cluster path +# %r = new main port number # %% = '%' character failed_node_id=$1 failed_host_name=$2 failed_port=$3 failed_db_cluster=$4 -new_master_id=$5 -old_master_id=$6 -new_master_host_name=$7 +new_main_id=$5 +old_main_id=$6 +new_main_host_name=$7 old_primary_node_id=$8 -new_master_port_number=$9 -new_master_db_cluster=${10} +new_main_port_number=$9 +new_main_db_cluster=${10} mydir=__MYDIR__ log=$mydir/log/failover.log pg_ctl=__PGBIN__/pg_ctl @@ -241,7 +241,7 @@ PCP_PORT=__PCPPORT__ export PCPPASSFILE=__PCPPASSFILE__ date >> $log -echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_master_id $new_master_id old_master_id $old_master_id new_master_host_name $new_master_host_name old_primary_node_id $old_primary_node_id new_master_port_number $new_master_port_number new_master_db_cluster $new_master_db_cluster" >> $log +echo "failed_node_id $failed_node_id failed_host_name $failed_host_name failed_port $failed_port failed_db_cluster $failed_db_cluster new_main_id $new_main_id old_main_id $old_main_id new_main_host_name $new_main_host_name old_primary_node_id $old_primary_node_id new_main_port_number $new_main_port_number new_main_db_cluster $new_main_db_cluster" >> $log # Stop standby node if it's running $pg_ctl -D $failed_db_cluster status >/dev/null 2>&1 @@ -252,7 +252,7 @@ then # recovery the node pcp_recovery_node -w -h localhost -p $PCP_PORT -n $failed_node_id >> $log 2>&1 else - echo "$failed_db_cluster is not running. skipping follow master command." >> $log + echo "$failed_db_cluster is not running. skipping follow primary command." >> $log fi EOF @@ -265,9 +265,9 @@ EOF -e "/__PGBIN__/s@__PGBIN__@$PGBIN@" \ -e "/__PCPPASSFILE__/s@__PCPPASSFILE__@$PCP_PASS_FILE@" \ -e "/__PCPPORT__/s/__PCPPORT__/$PCP_PORT/" \ - $FOLLOW_MASTER_SCRIPT + $FOLLOW_PRIMARY_SCRIPT -chmod 755 $FOLLOW_MASTER_SCRIPT +chmod 755 $FOLLOW_PRIMARY_SCRIPT } #------------------------------------------- @@ -396,7 +396,7 @@ pg_rewind=__PGBIN__/pg_rewind DATADIR_BASE=__DATADIR_BASE__ PGSUPERUSER=__PGSUPERUSER__ -master_db_cluster=$1 +main_db_cluster=$1 recovery_node_host_name=$2 DEST_CLUSTER=$3 PORT=$4 @@ -419,7 +419,7 @@ cat >> $1/$SCRIPT <<'EOF' # First try pg_rewind # Make backup copy of postgresql.conf since pg_rewind blindly copies -# $master_db_cluster/postgresql.conf. +# $main_db_cluster/postgresql.conf. cp $DEST_CLUSTER/postgresql.conf /tmp/ echo "pg_rewind starts" >> $log $pg_rewind -P -D $DEST_CLUSTER --source-server="port=$PORT user=$PGSUPERUSER dbname=postgres" >> $log 2>&1 @@ -438,13 +438,13 @@ if [ $pg_rewind_failed = "true" ];then $psql -p $PORT -c "SELECT pg_start_backup('Streaming Replication', true)" postgres -echo "source: $master_db_cluster dest: $DEST_CLUSTER" >> $log +echo "source: $main_db_cluster dest: $DEST_CLUSTER" >> $log rsync -C -a -c --delete --exclude postgresql.conf --exclude postmaster.pid \ --exclude postmaster.opts --exclude pg_log \ --exclude recovery.conf --exclude recovery.done \ --exclude pg_xlog pg_wal \ -$master_db_cluster/ $DEST_CLUSTER/ +$main_db_cluster/ $DEST_CLUSTER/ rm -fr $DEST_CLUSTER/pg_xlog mkdir $DEST_CLUSTER/pg_xlog @@ -532,7 +532,7 @@ EOF echo "export PGHOST=$PGSOCKET_DIR" >> $1/$SCRIPT cat >> $1/$SCRIPT <<'EOF' -master_db_cluster=$1 +main_db_cluster=$1 recovery_node_host_name=$2 DEST_CLUSTER=$3 PORT=$4 @@ -541,12 +541,12 @@ log=$DATADIR_BASE/log/recovery.log $psql -p $PORT -c "SELECT pg_start_backup('Native Replication', true)" postgres -echo "source: $master_db_cluster dest: $DEST_CLUSTER" > $log +echo "source: $main_db_cluster dest: $DEST_CLUSTER" > $log EOF if [ $PGVERSION -ge 120 ];then cat >> $1/$SCRIPT <<'EOF' -cat > $master_db_cluster/myrecovery.conf < $main_db_cluster/myrecovery.conf <> $1/$SCRIPT <<'EOF' -cat > $master_db_cluster/recovery.conf < $main_db_cluster/recovery.conf <> $CONF + echo "backend_flag$n = ALWAYS_PRIMARY" >> $CONF fi CLUSTER="data"`expr $n` @@ -1145,12 +1145,12 @@ EOF fi #------------------------------------------- -# create follow_masterfailover script +# create follow_primary failover script #------------------------------------------- if [ $MODE = "s" ];then - echo "creating follow master script" - create_follow_master_script >&5 2>&1 - echo "follow_master_command = '$FOLLOW_MASTER_SCRIPT %d %h %p %D %m %M %H %P %r %R'" >> $CONF + echo "creating follow primary script" + create_follow_primary_script >&5 2>&1 + echo "follow_primary_command = '$FOLLOW_PRIMARY_SCRIPT %d %h %p %D %m %M %H %P %r %R'" >> $CONF fi $PSQL -p $PGPOOL_PORT test </dev/null 2>&1; cd $dir -rm -fr $dir/tests/011.watchdoc_quorum_failover/master/ +rm -fr $dir/tests/011.watchdoc_quorum_failover/leader/ rm -fr $dir/tests/011.watchdoc_quorum_failover/standby/ rm -fr $dir/tests/011.watchdoc_quorum_failover/standby2/ -rm -fr $dir/tests/012.watchdog_failover_when_quorum_exists/master/ +rm -fr $dir/tests/012.watchdog_failover_when_quorum_exists/leader/ rm -fr $dir/tests/012.watchdog_failover_when_quorum_exists/standby/ rm -fr $dir/tests/012.watchdog_failover_when_quorum_exists/standby2/ -rm -fr $dir/tests/013.watchdoc_test_failover_require_consensus/master/ +rm -fr $dir/tests/013.watchdoc_test_failover_require_consensus/leader/ rm -fr $dir/tests/013.watchdoc_test_failover_require_consensus/standby/ rm -fr $dir/tests/013.watchdoc_test_failover_require_consensus/standby2/ -rm -fr $dir/tests/014.watchdoc_test_quorum_bypass/master/ -rm -fr $dir/tests/015.watchdoc_test_master_and_backend_fail/master/ +rm -fr $dir/tests/014.watchdoc_test_quorum_bypass/leader/ +rm -fr $dir/tests/015.watchdoc_test_master_and_backend_fail/leader/ rm -fr $dir/tests/015.watchdoc_test_master_and_backend_fail/standby/ rm -fr $dir/tests/015.watchdoc_test_master_and_backend_fail/standby2/ diff --git a/src/test/regression/libs.sh b/src/test/regression/libs.sh index f0e5939c2..40fdbee6a 100644 --- a/src/test/regression/libs.sh +++ b/src/test/regression/libs.sh @@ -16,7 +16,7 @@ function wait_for_pgpool_startup { } #------------------------------------------- -# wait for primary/master failover done +# wait for primary/main failover done #------------------------------------------- function wait_for_failover_done { timeout=20 @@ -25,7 +25,7 @@ function wait_for_failover_done { do $PGBIN/psql -p $PGPOOL_PORT -c "show pool_nodes" test >/dev/null 2>&1 if [ $? = 0 ];then - $PGBIN/psql -p $PGPOOL_PORT -c "show pool_nodes" test |egrep -i "primary|master">/dev/null 2>&1 + $PGBIN/psql -p $PGPOOL_PORT -c "show pool_nodes" test |egrep -i "primary|main">/dev/null 2>&1 if [ $? = 0 ];then break; fi diff --git a/src/test/regression/tests/001.load_balance/test.sh b/src/test/regression/tests/001.load_balance/test.sh index dbc658bef..675495a68 100755 --- a/src/test/regression/tests/001.load_balance/test.sh +++ b/src/test/regression/tests/001.load_balance/test.sh @@ -24,7 +24,7 @@ do echo "backend_weight0 = 0" >> etc/pgpool.conf echo "backend_weight1 = 1" >> etc/pgpool.conf - echo "black_function_list = 'f1,public.f2'" >> etc/pgpool.conf + echo "write_function_list = 'f1,public.f2'" >> etc/pgpool.conf ./startall @@ -52,25 +52,25 @@ EOF fi echo ok: load balance works. -# check if black function list worked +# check if write function list worked fgrep "SELECT f1(1);" log/pgpool.log |grep "DB node id: 0">/dev/null 2>&1 if [ $? != 0 ];then # expected result not found - echo fail: black function is sent to node 1. + echo fail: write function is sent to node 1. ./shutdownall exit 1 fi fgrep "SELECT public.f2(1);" log/pgpool.log |grep "DB node id: 0">/dev/null 2>&1 if [ $? != 0 ];then # expected result not found - echo fail: black function is sent to node 1. + echo fail: write function is sent to node 1. ./shutdownall exit 1 fi - echo ok: black function list works. + echo ok: write function list works. - echo "white_function_list = 'f1,public.f2'" >> etc/pgpool.conf - echo "black_function_list = ''" >> etc/pgpool.conf + echo "read_only_function_list = 'f1,public.f2'" >> etc/pgpool.conf + echo "write_function_list = ''" >> etc/pgpool.conf ./pgpool_reload sleep $st @@ -80,26 +80,26 @@ SELECT f1(1); -- this does load balance SELECT public.f2(1); -- this does load balance EOF -# check if white function list worked +# check if read_only function list worked fgrep "SELECT f1(1);" log/pgpool.log |grep "DB node id: 1">/dev/null 2>&1 if [ $? != 0 ];then # expected result not found - echo fail: white function is sent to zero-weight node. + echo fail: read_only function is sent to zero-weight node. ./shutdownall exit 1 fi fgrep "SELECT public.f2(1);" log/pgpool.log |grep "DB node id: 1">/dev/null 2>&1 if [ $? != 0 ];then # expected result not found - echo fail: white function is sent to zero-weight node. + echo fail: read_only function is sent to zero-weight node. ./shutdownall exit 1 fi - echo ok: white function list works. + echo ok: read_only function list works. -# check if black query pattern list worked +# check if primary routing query pattern list worked ./shutdownall - echo "black_query_pattern_list = 'SELECT \'a\'\;;SELECT 1\;;SELECT \'\;\'\;;SELECT \* FROM t1\;;^.*t2.*\;$;^.*f1.*$'" >> etc/pgpool.conf + echo "primary_routing_query_pattern_list = 'SELECT \'a\'\;;SELECT 1\;;SELECT \'\;\'\;;SELECT \* FROM t1\;;^.*t2.*\;$;^.*f1.*$'" >> etc/pgpool.conf ./startall wait_for_pgpool_startup @@ -115,9 +115,9 @@ EOF echo "$queries" | while read query; do $PSQL test -c "$query" - # If master-slave mode, all queries are sent to primary node only. - # If query match both black_query_pattern_list and white_function_list, - # white_function_list will be ignored, and query is sent to primary node only. + # If native replication mode, all queries are sent to primary node only. + # If query match both primary_routing_query_pattern_list and read_only_function_list, + # read_only_function_list will be ignored, and query is sent to primary node only. # # If replication node, all queries are load-blanced. if [[ $mode = "s" ]];then @@ -129,7 +129,7 @@ EOF if [ $? != 0 ];then # expected result not found - echo "fail: black query: ${query} is load-blanced." + echo "fail: primary routing query: ${query} is load-blanced." ./shutdownall exit 1 fi @@ -138,12 +138,12 @@ EOF if [ $? -eq 1 ]; then exit 1 fi - echo ok: black query pattern list works. + echo ok: primary routing query pattern list works. # check if statement level load balance worked ./shutdownall - echo "white_function_list = ''" >> etc/pgpool.conf - echo "black_function_list = ''" >> etc/pgpool.conf + echo "read_only_function_list = ''" >> etc/pgpool.conf + echo "write_function_list = ''" >> etc/pgpool.conf echo "statement_level_load_balance = on" >> etc/pgpool.conf echo "log_min_messages = debug1" >> etc/pgpool.conf @@ -165,7 +165,7 @@ EOF echo ok: statement level load balance works. # in replication mode if load_balance_mode = off, SELECT query inside -# an explicit transaction should be sent to master only. +# an explicit transaction should be sent to main node only. if [ $mode = "r" ];then ./shutdownall echo "load_balance_mode = off" >> etc/pgpool.conf @@ -184,7 +184,7 @@ EOF fgrep "SELECT 1;" log/pgpool.log |grep "DB node id: 1">/dev/null 2>&1 if [ $? != 0 ];then # the SELECT should not be executed on node 1 - echo ok: select is sent to only master when not load-blanced. + echo ok: select is sent to only main node when not load-blanced. ok=1 fi # the SELECT should be executed on node 0 @@ -193,8 +193,8 @@ EOF # in replication mode if load_balance_mode = off, SELECT query # including writing function should be sent to all the nodes. # per [pgpool-general: 2221]. - echo "black_function_list = 'f1'" >> etc/pgpool.conf - echo "white_function_list = ''" >> etc/pgpool.conf + echo "write_function_list = 'f1'" >> etc/pgpool.conf + echo "read_only_function_list = ''" >> etc/pgpool.conf ./pgpool_reload sleep $st $PSQL test </dev/null 2>&1 if [ $? = 0 ];then # the SELECT should be executed on node 0 & 1 - echo ok: black function is sent to all nodes. + echo ok: write function is sent to all nodes. ok=`expr $ok + 1` fi # the SELECT should be executed on node 0 @@ -217,12 +217,12 @@ EOF fi # ------------------------------------------------------------------------------- -# check the case when black_function_list and white_function_list are both empty. +# check the case when write_function_list and read_only_function_list are both empty. # In this case pg_proc.provolatile is checked. If it is 'v' (volatile), then the # function is regarded doing writes. # ------------------------------------------------------------------------------- - echo "black_function_list = ''" >> etc/pgpool.conf - echo "white_function_list = ''" >> etc/pgpool.conf + echo "write_function_list = ''" >> etc/pgpool.conf + echo "read_only_function_list = ''" >> etc/pgpool.conf ./pgpool_reload sleep $st diff --git a/src/test/regression/tests/003.failover/create_expected.sql b/src/test/regression/tests/003.failover/create_expected.sql index f1753562f..ea39f39a8 100644 --- a/src/test/regression/tests/003.failover/create_expected.sql +++ b/src/test/regression/tests/003.failover/create_expected.sql @@ -16,8 +16,8 @@ CREATE TEMP TABLE tmp ( INSERT INTO tmp VALUES ('0',:dir,'11002','up','0.500000','primary','0','false','0','','','XXXX-XX-XX XX:XX:XX','s'), ('1',:dir,'11003','down','0.500000','standby','0','false','0','','','XXXX-XX-XX XX:XX:XX','s'), -('0',:dir,'11002','up','0.500000','master','0','false','0','','','XXXX-XX-XX XX:XX:XX','r'), -('1',:dir,'11003','down','0.500000','slave','0','false','0','','','XXXX-XX-XX XX:XX:XX','r'); +('0',:dir,'11002','up','0.500000','main','0','false','0','','','XXXX-XX-XX XX:XX:XX','r'), +('1',:dir,'11003','down','0.500000','replica','0','false','0','','','XXXX-XX-XX XX:XX:XX','r'); SELECT node_id,hostname,port,status,lb_weight,role,select_cnt,load_balance_node,replication_delay,replication_state, replication_sync_state, last_status_change FROM tmp diff --git a/src/test/regression/tests/003.failover/create_expected_node0.sql b/src/test/regression/tests/003.failover/create_expected_node0.sql index 0a7af89ec..5a12e5784 100644 --- a/src/test/regression/tests/003.failover/create_expected_node0.sql +++ b/src/test/regression/tests/003.failover/create_expected_node0.sql @@ -16,8 +16,8 @@ CREATE TEMP TABLE tmp ( INSERT INTO tmp VALUES ('0',:dir,'11002','down','0.500000','standby','0','false','0','','','XXXX-XX-XX XX:XX:XX','s'), ('1',:dir,'11003','up','0.500000','primary','0','false','0','','','XXXX-XX-XX XX:XX:XX','s'), -('0',:dir,'11002','down','0.500000','slave','0','false','0','','','XXXX-XX-XX XX:XX:XX','r'), -('1',:dir,'11003','up','0.500000','master','0','false','0','','','XXXX-XX-XX XX:XX:XX','r'); +('0',:dir,'11002','down','0.500000','replica','0','false','0','','','XXXX-XX-XX XX:XX:XX','r'), +('1',:dir,'11003','up','0.500000','main','0','false','0','','','XXXX-XX-XX XX:XX:XX','r'); SELECT node_id,hostname,port,status,lb_weight,role,select_cnt,load_balance_node,replication_delay,replication_state, replication_sync_state, last_status_change FROM tmp diff --git a/src/test/regression/tests/004.watchdog/.gitignore b/src/test/regression/tests/004.watchdog/.gitignore index c30a6fd70..c1185f35c 100644 --- a/src/test/regression/tests/004.watchdog/.gitignore +++ b/src/test/regression/tests/004.watchdog/.gitignore @@ -1,2 +1,2 @@ -master/ +leader/ standby/ diff --git a/src/test/regression/tests/004.watchdog/leader.conf b/src/test/regression/tests/004.watchdog/leader.conf new file mode 100644 index 000000000..4c95a80ab --- /dev/null +++ b/src/test/regression/tests/004.watchdog/leader.conf @@ -0,0 +1,18 @@ +# leader watchdog +use_watchdog = on +wd_interval = 1 +wd_priority = 2 + +hostname0 = 'localhost' +wd_port0 = 21004 +pgpool_port0 = 11000 +hostname1 = 'localhost' +wd_port1 = 21104 +pgpool_port0 = 11100 + +heartbeat_hostname0 = 'localhost' +heartbeat_port0 = 21005 +heartbeat_hostname1 = 'localhost' +heartbeat_port01 = 21105 + +enable_consensus_with_half_votes = on diff --git a/src/test/regression/tests/004.watchdog/test.sh b/src/test/regression/tests/004.watchdog/test.sh index 599775dd9..a9748d03f 100755 --- a/src/test/regression/tests/004.watchdog/test.sh +++ b/src/test/regression/tests/004.watchdog/test.sh @@ -2,31 +2,31 @@ #------------------------------------------------------------------- # test script for watchdog source $TESTLIBS -MASTER_DIR=master +LEADER_DIR=leader STANDBY_DIR=standby success_count=0 -rm -fr $MASTER_DIR +rm -fr $LEADER_DIR rm -fr $STANDBY_DIR -mkdir $MASTER_DIR +mkdir $LEADER_DIR mkdir $STANDBY_DIR -# dir in master directory -cd $MASTER_DIR +# dir in leader directory +cd $LEADER_DIR -# create master environment -echo -n "creating master pgpool..." +# create leader environment +echo -n "creating leader pgpool..." $PGPOOL_SETUP -m n -n 1 -p 11000|| exit 1 -echo "master setup done." +echo "leader setup done." # copy the configurations from to standby cp -r etc ../$STANDBY_DIR/ source ./bashrc.ports -cat ../master.conf >> etc/pgpool.conf +cat ../leader.conf >> etc/pgpool.conf echo 0 > etc/pgpool_node_id ./startall @@ -42,27 +42,27 @@ cd .. mkdir $STANDBY_DIR/log echo -n "creating standby pgpool..." cat standby.conf >> $STANDBY_DIR/etc/pgpool.conf -# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf +# since we are using the same pgpool-II conf as of leader. so change the pid file path in standby pgpool conf echo "pid_file_name = '$PWD/pgpool2.pid'" >> $STANDBY_DIR/etc/pgpool.conf echo 1 > $STANDBY_DIR/etc/pgpool_node_id # start the stnadby pgpool-II by hand $PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY_DIR/etc/pgpool.conf -F $STANDBY_DIR/etc/pcp.conf -a $STANDBY_DIR/etc/pool_hba.conf > $STANDBY_DIR/log/pgpool.log 2>&1 & # First test check if both pgpool-II have found their correct place in watchdog cluster. -echo "Waiting for the pgpool master..." +echo "Waiting for the pgpool leader..." for i in 1 2 3 4 5 6 7 8 9 10 do - grep "I am the cluster leader node. Starting escalation process" $MASTER_DIR/log/pgpool.log > /dev/null 2>&1 + grep "I am the cluster leader node. Starting escalation process" $LEADER_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Master brought up successfully." + echo "Leader brought up successfully." break; fi echo "[check] $i times" sleep 2 done -# now check if standby has successfully joined connected to the master. +# now check if standby has successfully joined connected to the leader. echo "Waiting for the standby to join cluster..." for i in 1 2 3 4 5 6 7 8 9 10 do @@ -76,16 +76,16 @@ do sleep 2 done -# step 2 stop master pgpool and see if standby take over -$PGPOOL_INSTALL_DIR/bin/pgpool -f $MASTER_DIR/etc/pgpool.conf -m f stop +# step 2 stop leader pgpool and see if standby take over +$PGPOOL_INSTALL_DIR/bin/pgpool -f $LEADER_DIR/etc/pgpool.conf -m f stop -echo "Checking if the Standby pgpool-II detected the master shutdown..." +echo "Checking if the Standby pgpool-II detected the leader shutdown..." for i in 1 2 3 4 5 6 7 8 9 10 do grep " is shutting down" $STANDBY_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Master shutdown detected." + echo "Leader shutdown detected." break; fi echo "[check] $i times" @@ -94,13 +94,13 @@ done # Finally see if standby take over -echo "Checking if the Standby pgpool-II takes over the master responsibility..." +echo "Checking if the Standby pgpool-II takes over the leader responsibility..." for i in 1 2 3 4 5 6 7 8 9 10 do grep "I am the cluster leader node. Starting escalation process" $STANDBY_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Standby successfully became the new master." + echo "Standby successfully became the new leader." break; fi echo "[check] $i times" @@ -109,7 +109,7 @@ done # we are done. Just stop the standby pgpool-II $PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY_DIR/etc/pgpool.conf -m f stop -cd master +cd leader ./shutdownall echo "$success_count out of 4 successfull"; diff --git a/src/test/regression/tests/005.jdbc/CreateTempTableTest.java b/src/test/regression/tests/005.jdbc/CreateTempTableTest.java index 0045a2ae7..7c530c166 100644 --- a/src/test/regression/tests/005.jdbc/CreateTempTableTest.java +++ b/src/test/regression/tests/005.jdbc/CreateTempTableTest.java @@ -1,5 +1,5 @@ /* - * Test for master/slave mode + temporary tables + * Test for native replication mode + temporary tables */ import java.sql.*; diff --git a/src/test/regression/tests/006.memqcache/test.sh b/src/test/regression/tests/006.memqcache/test.sh index 50ab7d2b7..840ede6ac 100755 --- a/src/test/regression/tests/006.memqcache/test.sh +++ b/src/test/regression/tests/006.memqcache/test.sh @@ -22,8 +22,8 @@ do echo "done." echo "memory_cache_enabled = on" >> etc/pgpool.conf - echo "white_memqcache_table_list = 'white_v'" >> etc/pgpool.conf - echo "black_memqcache_table_list = 'black_t'" >> etc/pgpool.conf + echo "cache_safe_memqcache_table_list = 'cache_safe_v'" >> etc/pgpool.conf + echo "cache_unsafe_memqcache_table_list = 'cache_unsafe_t'" >> etc/pgpool.conf source ./bashrc.ports @@ -41,22 +41,22 @@ do $PSQL test < /dev/null || success=false - grep "fetched from cache" log/pgpool.log | grep black_t > /dev/null && success=false + grep "fetched from cache" log/pgpool.log | grep cache_unsafe_t > /dev/null && success=false grep "fetched from cache" log/pgpool.log | grep normal_v > /dev/null && success=false - grep "fetched from cache" log/pgpool.log | grep white_v > /dev/null || success=false + grep "fetched from cache" log/pgpool.log | grep cache_safe_v > /dev/null || success=false grep "fetched from cache" log/pgpool.log | grep with_modify > /dev/null && success=false grep "fetched from cache" log/pgpool.log | grep immutable_func > /dev/null || success=false grep "fetched from cache" log/pgpool.log | grep volatile_func > /dev/null && success=false diff --git a/src/test/regression/tests/010.rewrite_timestamp/timestamp/main.c b/src/test/regression/tests/010.rewrite_timestamp/timestamp/main.c index 4c8d4e48b..0b7ad28e2 100644 --- a/src/test/regression/tests/010.rewrite_timestamp/timestamp/main.c +++ b/src/test/regression/tests/010.rewrite_timestamp/timestamp/main.c @@ -10,7 +10,7 @@ #include "rewrite/pool_timestamp.h" #include "parser/parser.h" -/* for get_current_timestamp() (MASTER() macro) */ +/* for get_current_timestamp() (MAIN() macro) */ POOL_REQUEST_INFO _req_info; POOL_REQUEST_INFO *Req_info = &_req_info; @@ -48,7 +48,7 @@ TSRel rc[2] = { }; int -pool_virtual_master_db_node_id(void) +pool_virtual_main_db_node_id(void) { return 0; } diff --git a/src/test/regression/tests/011.watchdog_quorum_failover/.gitignore b/src/test/regression/tests/011.watchdog_quorum_failover/.gitignore index 76d02bbfb..e31af86fa 100644 --- a/src/test/regression/tests/011.watchdog_quorum_failover/.gitignore +++ b/src/test/regression/tests/011.watchdog_quorum_failover/.gitignore @@ -1,3 +1,3 @@ -master/ +leader/ standby/ standby2/ diff --git a/src/test/regression/tests/011.watchdog_quorum_failover/leader.conf b/src/test/regression/tests/011.watchdog_quorum_failover/leader.conf new file mode 100644 index 000000000..b35906ca2 --- /dev/null +++ b/src/test/regression/tests/011.watchdog_quorum_failover/leader.conf @@ -0,0 +1,21 @@ +# leader watchdog +use_watchdog = on +wd_interval = 1 +wd_priority = 5 + +hostname0 = 'localhost' +wd_port0 = 21004 +pgpool_port0 = 11000 +hostname1 = 'localhost' +wd_port1 = 21104 +pgpool_port1 = 11100 +hostname2 = 'localhost' +wd_port2 = 21204 +pgpool_port2 = 11200 + +heartbeat_hostname0 = 'localhost' +heartbeat_port0 = 21005 +heartbeat_hostname1 = 'localhost' +heartbeat_port1 = 21105 +heartbeat_hostname2 = 'localhost' +heartbeat_port2 = 21205 diff --git a/src/test/regression/tests/011.watchdog_quorum_failover/test.sh b/src/test/regression/tests/011.watchdog_quorum_failover/test.sh index 74843aa45..7ca98d36a 100755 --- a/src/test/regression/tests/011.watchdog_quorum_failover/test.sh +++ b/src/test/regression/tests/011.watchdog_quorum_failover/test.sh @@ -6,7 +6,7 @@ # must be defined before compiling main/health_check.c. source $TESTLIBS -MASTER_DIR=master +LEADER_DIR=leader STANDBY_DIR=standby STANDBY2_DIR=standby2 num_tests=9 @@ -14,32 +14,32 @@ success_count=0 PSQL=$PGBIN/psql PG_CTL=$PGBIN/pg_ctl -rm -fr $MASTER_DIR +rm -fr $LEADER_DIR rm -fr $STANDBY_DIR rm -fr $STANDBY2_DIR -mkdir $MASTER_DIR +mkdir $LEADER_DIR mkdir $STANDBY_DIR mkdir $STANDBY2_DIR -# dir in master directory -cd $MASTER_DIR +# dir in leader directory +cd $LEADER_DIR -# create master environment -echo -n "creating master pgpool and PostgreSQL clusters..." +# create leader environment +echo -n "creating leader pgpool and PostgreSQL clusters..." $PGPOOL_SETUP -m s -n 2 -p 11000|| exit 1 -echo "master setup done." +echo "leader setup done." -# copy the configurations from master to standby +# copy the configurations from leader to standby cp -r etc ../$STANDBY_DIR/ -# copy the configurations from master to standby2 +# copy the configurations from leader to standby2 cp -r etc ../$STANDBY2_DIR/ source ./bashrc.ports -cat ../master.conf >> etc/pgpool.conf +cat ../leader.conf >> etc/pgpool.conf echo 0 > etc/pgpool_node_id ./startall @@ -55,7 +55,7 @@ cd .. mkdir $STANDBY_DIR/log echo -n "creating standby pgpool..." cat standby.conf >> $STANDBY_DIR/etc/pgpool.conf -# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf +# since we are using the same pgpool-II conf as of leader. so change the pid file path in standby pgpool conf echo "pid_file_name = '$PWD/pgpool2.pid'" >> $STANDBY_DIR/etc/pgpool.conf echo "logdir = $STANDBY_DIR/log" >> $STANDBY_DIR/etc/pgpool.conf echo 1 > $STANDBY_DIR/etc/pgpool_node_id @@ -67,7 +67,7 @@ $PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY_DIR/etc/pgpool.conf -F $STANDBY mkdir $STANDBY2_DIR/log echo -n "creating standby2 pgpool..." cat standby2.conf >> $STANDBY2_DIR/etc/pgpool.conf -# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf +# since we are using the same pgpool-II conf as of leader. so change the pid file path in standby pgpool conf echo "pid_file_name = '$PWD/pgpool3.pid'" >> $STANDBY2_DIR/etc/pgpool.conf echo "logdir = $STANDBY2_DIR/log" >> $STANDBY2_DIR/etc/pgpool.conf echo 2 > $STANDBY2_DIR/etc/pgpool_node_id @@ -75,20 +75,20 @@ echo 2 > $STANDBY2_DIR/etc/pgpool_node_id $PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY2_DIR/etc/pgpool.conf -F $STANDBY2_DIR/etc/pcp.conf -a $STANDBY2_DIR/etc/pool_hba.conf > $STANDBY2_DIR/log/pgpool.log 2>&1 & # First test check if both pgpool-II have found their correct place in watchdog cluster. -echo "Waiting for the pgpool master..." +echo "Waiting for the pgpool leader..." for i in 1 2 3 4 5 6 7 8 9 10 do - grep "I am the cluster leader node" $MASTER_DIR/log/pgpool.log > /dev/null 2>&1 + grep "I am the cluster leader node" $LEADER_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Master brought up successfully." + echo "Leader brought up successfully." break; fi echo "[check] $i times" sleep 2 done -# now check if standby has successfully joined connected to the master. +# now check if standby has successfully joined connected to the leader. echo "Waiting for the standby to join cluster..." for i in 1 2 3 4 5 6 7 8 9 10 do @@ -102,7 +102,7 @@ do sleep 2 done -# now check if standby2 has successfully joined connected to the master. +# now check if standby2 has successfully joined connected to the leader. echo "Waiting for the standby2 to join cluster..." for i in 1 2 3 4 5 6 7 8 9 10 do @@ -148,11 +148,11 @@ if [ $n -eq 3 ];then fi # raise an real DB node 1 error -$PG_CTL -D master/data1 -m f stop -echo "Checking if master detects the shutdown error" +$PG_CTL -D leader/data1 -m f stop +echo "Checking if leader detects the shutdown error" for i in 1 2 3 4 5 6 7 8 9 10 do - grep -i "failover" $MASTER_DIR/log/pgpool.log + grep -i "failover" $LEADER_DIR/log/pgpool.log if [ $? = 0 ];then success_count=$(( success_count + 1 )) echo "DB error detected." @@ -184,16 +184,16 @@ do sleep 2 done -# stop master pgpool and see if standby takes over the roll -$PGPOOL_INSTALL_DIR/bin/pgpool -f $MASTER_DIR/etc/pgpool.conf -m f stop +# stop leader pgpool and see if standby takes over the roll +$PGPOOL_INSTALL_DIR/bin/pgpool -f $LEADER_DIR/etc/pgpool.conf -m f stop -echo "Checking if the Standby pgpool-II detected the master shutdown..." +echo "Checking if the Standby pgpool-II detected the leader shutdown..." for i in 1 2 3 4 5 6 7 8 9 10 do grep " is shutting down" $STANDBY_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Master shutdown detected." + echo "Leader shutdown detected." break; fi echo "[check] $i times" @@ -202,13 +202,13 @@ done # Finally see if standby take over -echo "Checking if the Standby pgpool-II takes over the master responsibility..." +echo "Checking if the Standby pgpool-II takes over the leader responsibility..." for i in 1 2 3 4 5 6 7 8 9 10 do grep "I am the cluster leader node" $STANDBY_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Standby successfully became the new master." + echo "Standby successfully became the new leader." break; fi echo "[check] $i times" @@ -218,7 +218,7 @@ done # we are done. Just stop the standby pgpool-II $PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY_DIR/etc/pgpool.conf -m f stop $PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY2_DIR/etc/pgpool.conf -m f stop -cd master +cd leader ./shutdownall echo "$success_count out of $num_tests successfull"; diff --git a/src/test/regression/tests/012.watchdog_failover_when_quorum_exists/.gitignore b/src/test/regression/tests/012.watchdog_failover_when_quorum_exists/.gitignore index 76d02bbfb..e31af86fa 100644 --- a/src/test/regression/tests/012.watchdog_failover_when_quorum_exists/.gitignore +++ b/src/test/regression/tests/012.watchdog_failover_when_quorum_exists/.gitignore @@ -1,3 +1,3 @@ -master/ +leader/ standby/ standby2/ diff --git a/src/test/regression/tests/012.watchdog_failover_when_quorum_exists/leader.conf b/src/test/regression/tests/012.watchdog_failover_when_quorum_exists/leader.conf new file mode 100644 index 000000000..18d412be1 --- /dev/null +++ b/src/test/regression/tests/012.watchdog_failover_when_quorum_exists/leader.conf @@ -0,0 +1,25 @@ +# leader watchdog +num_init_children = 4 +use_watchdog = on +failover_when_quorum_exists = true +failover_require_consensus = false +allow_multiple_failover_requests_from_node = false +wd_interval = 1 +wd_priority = 5 + +hostname0 = 'localhost' +wd_port0 = 21004 +pgpool_port0 = 11000 +hostname1 = 'localhost' +wd_port1 = 21104 +pgpool_port1 = 11100 +hostname2 = 'localhost' +wd_port2 = 21204 +pgpool_port2 = 11200 + +heartbeat_hostname0 = 'localhost' +heartbeat_port0 = 21005 +heartbeat_hostname1 = 'localhost' +heartbeat_port1 = 21105 +heartbeat_hostname2 = 'localhost' +heartbeat_port2 = 21205 diff --git a/src/test/regression/tests/012.watchdog_failover_when_quorum_exists/test.sh b/src/test/regression/tests/012.watchdog_failover_when_quorum_exists/test.sh index 819f5a28a..5ad89f6c2 100755 --- a/src/test/regression/tests/012.watchdog_failover_when_quorum_exists/test.sh +++ b/src/test/regression/tests/012.watchdog_failover_when_quorum_exists/test.sh @@ -8,7 +8,7 @@ # test failover_when_quorum_exists # source $TESTLIBS -MASTER_DIR=master +LEADER_DIR=leader STANDBY_DIR=standby STANDBY2_DIR=standby2 num_tests=5 @@ -16,32 +16,32 @@ success_count=0 PSQL=$PGBIN/psql PG_CTL=$PGBIN/pg_ctl -rm -fr $MASTER_DIR +rm -fr $LEADER_DIR rm -fr $STANDBY_DIR rm -fr $STANDBY2_DIR -mkdir $MASTER_DIR +mkdir $LEADER_DIR mkdir $STANDBY_DIR mkdir $STANDBY2_DIR -# dir in master directory -cd $MASTER_DIR +# dir in leader directory +cd $LEADER_DIR -# create master environment -echo -n "creating master pgpool and PostgreSQL clusters..." +# create leader environment +echo -n "creating leader pgpool and PostgreSQL clusters..." $PGPOOL_SETUP -m s -n 2 -p 11000|| exit 1 -echo "master setup done." +echo "leader setup done." -# copy the configurations from master to standby +# copy the configurations from leader to standby cp -r etc ../$STANDBY_DIR/ -# copy the configurations from master to standby2 +# copy the configurations from leader to standby2 cp -r etc ../$STANDBY2_DIR/ source ./bashrc.ports -cat ../master.conf >> etc/pgpool.conf +cat ../leader.conf >> etc/pgpool.conf echo 0 > etc/pgpool_node_id ./startall @@ -57,7 +57,7 @@ cd .. mkdir $STANDBY_DIR/log echo -n "creating standby pgpool..." cat standby.conf >> $STANDBY_DIR/etc/pgpool.conf -# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf +# since we are using the same pgpool-II conf as of leader. so change the pid file path in standby pgpool conf echo "pid_file_name = '$PWD/pgpool2.pid'" >> $STANDBY_DIR/etc/pgpool.conf echo "logdir = $STANDBY_DIR/log" >> $STANDBY_DIR/etc/pgpool.conf echo 1 > $STANDBY_DIR/etc/pgpool_node_id @@ -69,7 +69,7 @@ echo 1 > $STANDBY_DIR/etc/pgpool_node_id mkdir $STANDBY2_DIR/log echo -n "creating standby2 pgpool..." cat standby2.conf >> $STANDBY2_DIR/etc/pgpool.conf -# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf +# since we are using the same pgpool-II conf as of leader. so change the pid file path in standby pgpool conf echo "pid_file_name = '$PWD/pgpool3.pid'" >> $STANDBY2_DIR/etc/pgpool.conf echo "logdir = $STANDBY2_DIR/log" >> $STANDBY2_DIR/etc/pgpool.conf echo 2 > $STANDBY2_DIR/etc/pgpool_node_id @@ -77,13 +77,13 @@ echo 2 > $STANDBY2_DIR/etc/pgpool_node_id #$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY2_DIR/etc/pgpool.conf -F $STANDBY2_DIR/etc/pcp.conf -a $STANDBY2_DIR/etc/pool_hba.conf > $STANDBY2_DIR/log/pgpool.log 2>&1 & # First test check if both pgpool-II have found their correct place in watchdog cluster. -echo "Waiting for the pgpool master..." +echo "Waiting for the pgpool leader..." for i in 1 2 3 4 5 6 7 8 9 10 do - grep "I am the cluster leader node" $MASTER_DIR/log/pgpool.log > /dev/null 2>&1 + grep "I am the cluster leader node" $LEADER_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Master brought up successfully." + echo "Leader brought up successfully." break; fi echo "[check] $i times" @@ -91,12 +91,12 @@ do done -# raise an artificial communication error on master for DB node 1 -echo "1 down" > $MASTER_DIR/log/backend_down_request -echo "Checking if the Master rejects the failover because quorum is not present..." +# raise an artificial communication error on leader for DB node 1 +echo "1 down" > $LEADER_DIR/log/backend_down_request +echo "Checking if the Leader rejects the failover because quorum is not present..." for i in 1 2 3 4 5 6 7 8 9 10 do - grep -i "Rejecting the failover request" $MASTER_DIR/log/pgpool.log + grep -i "Rejecting the failover request" $LEADER_DIR/log/pgpool.log if [ $? = 0 ];then success_count=$(( success_count + 1 )) echo "Fake DB error detected. and Failover rejected because of absence of quorum" @@ -113,7 +113,7 @@ $PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY_DIR/etc/pgpool.conf -F $STANDBY # start the second stnadby pgpool-II by hand $PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY2_DIR/etc/pgpool.conf -F $STANDBY2_DIR/etc/pcp.conf -a $STANDBY2_DIR/etc/pool_hba.conf > $STANDBY2_DIR/log/pgpool.log 2>&1 & -# now check if standby1 has successfully joined connected to the master. +# now check if standby1 has successfully joined connected to the leader. echo "Waiting for the standby1 to join cluster..." for i in 1 2 3 4 5 6 7 8 9 10 do @@ -127,7 +127,7 @@ do sleep 2 done -# now check if standby2 has successfully joined connected to the master. +# now check if standby2 has successfully joined connected to the leader. echo "Waiting for the standby2 to join cluster..." for i in 1 2 3 4 5 6 7 8 9 10 do @@ -142,7 +142,7 @@ do done # raise an artificial communication again to check if failover is executed this time -echo "1 down" > $MASTER_DIR/log/backend_down_request +echo "1 down" > $LEADER_DIR/log/backend_down_request #give some time to pgpool-II to execute failover sleep 5 # check to see if all Pgpool-II agrees that the failover request is @@ -171,7 +171,7 @@ done # we are done. Just stop the standby pgpool-II $PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY_DIR/etc/pgpool.conf -m f stop $PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY2_DIR/etc/pgpool.conf -m f stop -cd master +cd leader ./shutdownall echo "$success_count out of $num_tests successfull"; diff --git a/src/test/regression/tests/013.watchdog_failover_require_consensus/.gitignore b/src/test/regression/tests/013.watchdog_failover_require_consensus/.gitignore index 76d02bbfb..e31af86fa 100644 --- a/src/test/regression/tests/013.watchdog_failover_require_consensus/.gitignore +++ b/src/test/regression/tests/013.watchdog_failover_require_consensus/.gitignore @@ -1,3 +1,3 @@ -master/ +leader/ standby/ standby2/ diff --git a/src/test/regression/tests/013.watchdog_failover_require_consensus/leader.conf b/src/test/regression/tests/013.watchdog_failover_require_consensus/leader.conf new file mode 100644 index 000000000..fc59360ae --- /dev/null +++ b/src/test/regression/tests/013.watchdog_failover_require_consensus/leader.conf @@ -0,0 +1,25 @@ +# leader watchdog +num_init_children = 4 +use_watchdog = on +failover_when_quorum_exists = true +failover_require_consensus = true +allow_multiple_failover_requests_from_node = false +wd_interval = 1 +wd_priority = 5 + +hostname0 = 'localhost' +wd_port0 = 21004 +pgpool_port0 = 11000 +hostname1 = 'localhost' +wd_port1 = 21104 +pgpool_port1 = 11100 +hostname2 = 'localhost' +wd_port2 = 21204 +pgpool_port2 = 11200 + +heartbeat_hostname0 = 'localhost' +heartbeat_port0 = 21005 +heartbeat_hostname1 = 'localhost' +heartbeat_port1 = 21105 +heartbeat_hostname2 = 'localhost' +heartbeat_port2 = 21205 diff --git a/src/test/regression/tests/013.watchdog_failover_require_consensus/test.sh b/src/test/regression/tests/013.watchdog_failover_require_consensus/test.sh index f0c77b501..d71fc7e5b 100755 --- a/src/test/regression/tests/013.watchdog_failover_require_consensus/test.sh +++ b/src/test/regression/tests/013.watchdog_failover_require_consensus/test.sh @@ -8,7 +8,7 @@ # test failover_require_consensus # source $TESTLIBS -MASTER_DIR=master +LEADER_DIR=leader STANDBY_DIR=standby STANDBY2_DIR=standby2 num_tests=7 @@ -16,32 +16,32 @@ success_count=0 PSQL=$PGBIN/psql PG_CTL=$PGBIN/pg_ctl -rm -fr $MASTER_DIR +rm -fr $LEADER_DIR rm -fr $STANDBY_DIR rm -fr $STANDBY2_DIR -mkdir $MASTER_DIR +mkdir $LEADER_DIR mkdir $STANDBY_DIR mkdir $STANDBY2_DIR -# dir in master directory -cd $MASTER_DIR +# dir in leader directory +cd $LEADER_DIR -# create master environment -echo -n "creating master pgpool and PostgreSQL clusters..." +# create leader environment +echo -n "creating leader pgpool and PostgreSQL clusters..." $PGPOOL_SETUP -m s -n 2 -p 11000|| exit 1 -echo "master setup done." +echo "leader setup done." -# copy the configurations from master to standby +# copy the configurations from leader to standby cp -r etc ../$STANDBY_DIR/ -# copy the configurations from master to standby2 +# copy the configurations from leader to standby2 cp -r etc ../$STANDBY2_DIR/ source ./bashrc.ports -cat ../master.conf >> etc/pgpool.conf +cat ../leader.conf >> etc/pgpool.conf echo 0 > etc/pgpool_node_id ./startall @@ -57,7 +57,7 @@ cd .. mkdir $STANDBY_DIR/log echo -n "creating standby pgpool..." cat standby.conf >> $STANDBY_DIR/etc/pgpool.conf -# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf +# since we are using the same pgpool-II conf as of leader. so change the pid file path in standby pgpool conf echo "pid_file_name = '$PWD/pgpool2.pid'" >> $STANDBY_DIR/etc/pgpool.conf echo "logdir = $STANDBY_DIR/log" >> $STANDBY_DIR/etc/pgpool.conf echo 1 > $STANDBY_DIR/etc/pgpool_node_id @@ -68,7 +68,7 @@ $PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY_DIR/etc/pgpool.conf -F $STANDBY mkdir $STANDBY2_DIR/log echo -n "creating standby2 pgpool..." cat standby2.conf >> $STANDBY2_DIR/etc/pgpool.conf -# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf +# since we are using the same pgpool-II conf as of leader. so change the pid file path in standby pgpool conf echo "pid_file_name = '$PWD/pgpool3.pid'" >> $STANDBY2_DIR/etc/pgpool.conf echo "logdir = $STANDBY2_DIR/log" >> $STANDBY2_DIR/etc/pgpool.conf echo 2 > $STANDBY2_DIR/etc/pgpool_node_id @@ -76,20 +76,20 @@ echo 2 > $STANDBY2_DIR/etc/pgpool_node_id $PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY2_DIR/etc/pgpool.conf -F $STANDBY2_DIR/etc/pcp.conf -a $STANDBY2_DIR/etc/pool_hba.conf > $STANDBY2_DIR/log/pgpool.log 2>&1 & # First test check if both pgpool-II have found their correct place in watchdog cluster. -echo "Waiting for the pgpool master..." +echo "Waiting for the pgpool leader..." for i in 1 2 3 4 5 6 7 8 9 10 do - grep "I am the cluster leader node" $MASTER_DIR/log/pgpool.log > /dev/null 2>&1 + grep "I am the cluster leader node" $LEADER_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Master brought up successfully." + echo "Leader brought up successfully." break; fi echo "[check] $i times" sleep 2 done -# now check if standby1 has successfully joined connected to the master. +# now check if standby1 has successfully joined connected to the leader. echo "Waiting for the standby1 to join cluster..." for i in 1 2 3 4 5 6 7 8 9 10 do @@ -103,7 +103,7 @@ do sleep 2 done -# now check if standby2 has successfully joined connected to the master. +# now check if standby2 has successfully joined connected to the leader. echo "Waiting for the standby2 to join cluster..." for i in 1 2 3 4 5 6 7 8 9 10 do @@ -125,19 +125,19 @@ do grep -i "building consensus for request" $STANDBY_DIR/log/pgpool.log if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Fake DB error generated and master is waiting for consensus" + echo "Fake DB error generated and leader is waiting for consensus" break; fi echo "[check] $i times" sleep 2 done -echo "Checking if the Master receives the failover request and waiting for consensus..." +echo "Checking if the Leader receives the failover request and waiting for consensus..." for i in 1 2 3 4 5 6 7 8 9 10 do - grep -i "failover request noted" $MASTER_DIR/log/pgpool.log + grep -i "failover request noted" $LEADER_DIR/log/pgpool.log if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Fake DB error delivered to master. and master is waiting for consensus" + echo "Fake DB error delivered to leader. and leader is waiting for consensus" break; fi echo "[check] $i times" @@ -192,7 +192,7 @@ done # we are done. Just stop the standby pgpool-II $PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY_DIR/etc/pgpool.conf -m f stop $PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY2_DIR/etc/pgpool.conf -m f stop -cd master +cd leader ./shutdownall echo "$success_count out of $num_tests successfull"; diff --git a/src/test/regression/tests/014.watchdog_test_quorum_bypass/.gitignore b/src/test/regression/tests/014.watchdog_test_quorum_bypass/.gitignore index b9f13726d..b0a8a94bf 100644 --- a/src/test/regression/tests/014.watchdog_test_quorum_bypass/.gitignore +++ b/src/test/regression/tests/014.watchdog_test_quorum_bypass/.gitignore @@ -1 +1 @@ -master/ +leader/ diff --git a/src/test/regression/tests/014.watchdog_test_quorum_bypass/leader.conf b/src/test/regression/tests/014.watchdog_test_quorum_bypass/leader.conf new file mode 100644 index 000000000..fc59360ae --- /dev/null +++ b/src/test/regression/tests/014.watchdog_test_quorum_bypass/leader.conf @@ -0,0 +1,25 @@ +# leader watchdog +num_init_children = 4 +use_watchdog = on +failover_when_quorum_exists = true +failover_require_consensus = true +allow_multiple_failover_requests_from_node = false +wd_interval = 1 +wd_priority = 5 + +hostname0 = 'localhost' +wd_port0 = 21004 +pgpool_port0 = 11000 +hostname1 = 'localhost' +wd_port1 = 21104 +pgpool_port1 = 11100 +hostname2 = 'localhost' +wd_port2 = 21204 +pgpool_port2 = 11200 + +heartbeat_hostname0 = 'localhost' +heartbeat_port0 = 21005 +heartbeat_hostname1 = 'localhost' +heartbeat_port1 = 21105 +heartbeat_hostname2 = 'localhost' +heartbeat_port2 = 21205 diff --git a/src/test/regression/tests/014.watchdog_test_quorum_bypass/test.sh b/src/test/regression/tests/014.watchdog_test_quorum_bypass/test.sh index 1ead1540b..e6a47a6d7 100755 --- a/src/test/regression/tests/014.watchdog_test_quorum_bypass/test.sh +++ b/src/test/regression/tests/014.watchdog_test_quorum_bypass/test.sh @@ -8,32 +8,32 @@ # test pcp_detach bypass failover_when_quorum_exists and failover_require_consensus # source $TESTLIBS -MASTER_DIR=master +LEADER_DIR=leader num_tests=2 success_count=0 PSQL=$PGBIN/psql PG_CTL=$PGBIN/pg_ctl -rm -fr $MASTER_DIR +rm -fr $LEADER_DIR rm -fr $STANDBY_DIR rm -fr $STANDBY2_DIR -mkdir $MASTER_DIR +mkdir $LEADER_DIR mkdir $STANDBY_DIR mkdir $STANDBY2_DIR -# dir in master directory -cd $MASTER_DIR +# dir in leader directory +cd $LEADER_DIR -# create master environment -echo -n "creating master pgpool and PostgreSQL clusters..." +# create leader environment +echo -n "creating leader pgpool and PostgreSQL clusters..." $PGPOOL_SETUP -m s -n 2 -p 11000|| exit 1 -echo "master setup done." +echo "leader setup done." source ./bashrc.ports -cat ../master.conf >> etc/pgpool.conf +cat ../leader.conf >> etc/pgpool.conf echo 0 > etc/pgpool_node_id ./startall @@ -44,14 +44,14 @@ wait_for_pgpool_startup cd .. -# First test check if pgpool-II became a master. -echo "Waiting for the pgpool master..." +# First test check if pgpool-II became a leader. +echo "Waiting for the pgpool leader..." for i in 1 2 3 4 5 6 7 8 9 10 do - grep "I am the cluster leader node" $MASTER_DIR/log/pgpool.log > /dev/null 2>&1 + grep "I am the cluster leader node" $LEADER_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Master brought up successfully." + echo "Leader brought up successfully." break; fi echo "[check] $i times" @@ -69,7 +69,7 @@ if [ $? = 0 ];then success_count=$(( success_count + 1 )) fi -cd master +cd leader ./shutdownall echo "$success_count out of $num_tests successfull"; diff --git a/src/test/regression/tests/015.watchdog_master_and_backend_fail/.gitignore b/src/test/regression/tests/015.watchdog_master_and_backend_fail/.gitignore index 76d02bbfb..e31af86fa 100644 --- a/src/test/regression/tests/015.watchdog_master_and_backend_fail/.gitignore +++ b/src/test/regression/tests/015.watchdog_master_and_backend_fail/.gitignore @@ -1,3 +1,3 @@ -master/ +leader/ standby/ standby2/ diff --git a/src/test/regression/tests/015.watchdog_master_and_backend_fail/leader.conf b/src/test/regression/tests/015.watchdog_master_and_backend_fail/leader.conf new file mode 100644 index 000000000..fc59360ae --- /dev/null +++ b/src/test/regression/tests/015.watchdog_master_and_backend_fail/leader.conf @@ -0,0 +1,25 @@ +# leader watchdog +num_init_children = 4 +use_watchdog = on +failover_when_quorum_exists = true +failover_require_consensus = true +allow_multiple_failover_requests_from_node = false +wd_interval = 1 +wd_priority = 5 + +hostname0 = 'localhost' +wd_port0 = 21004 +pgpool_port0 = 11000 +hostname1 = 'localhost' +wd_port1 = 21104 +pgpool_port1 = 11100 +hostname2 = 'localhost' +wd_port2 = 21204 +pgpool_port2 = 11200 + +heartbeat_hostname0 = 'localhost' +heartbeat_port0 = 21005 +heartbeat_hostname1 = 'localhost' +heartbeat_port1 = 21105 +heartbeat_hostname2 = 'localhost' +heartbeat_port2 = 21205 diff --git a/src/test/regression/tests/015.watchdog_master_and_backend_fail/test.sh b/src/test/regression/tests/015.watchdog_master_and_backend_fail/test.sh index f253d3380..527123c75 100755 --- a/src/test/regression/tests/015.watchdog_master_and_backend_fail/test.sh +++ b/src/test/regression/tests/015.watchdog_master_and_backend_fail/test.sh @@ -5,10 +5,10 @@ # Please note that to successfully run the test, "HEALTHCHECK_DEBUG" # must be defined before compiling main/health_check.c. # -# test if master and backend goes down at same time Pgpool-II behaves as expected +# test if leader and backend goes down at same time Pgpool-II behaves as expected # source $TESTLIBS -MASTER_DIR=master +LEADER_DIR=leader STANDBY_DIR=standby STANDBY2_DIR=standby2 num_tests=6 @@ -16,32 +16,32 @@ success_count=0 PSQL=$PGBIN/psql PG_CTL=$PGBIN/pg_ctl -rm -fr $MASTER_DIR +rm -fr $LEADER_DIR rm -fr $STANDBY_DIR rm -fr $STANDBY2_DIR -mkdir $MASTER_DIR +mkdir $LEADER_DIR mkdir $STANDBY_DIR mkdir $STANDBY2_DIR -# dir in master directory -cd $MASTER_DIR +# dir in leader directory +cd $LEADER_DIR -# create master environment -echo -n "creating master pgpool and PostgreSQL clusters..." +# create leader environment +echo -n "creating leader pgpool and PostgreSQL clusters..." $PGPOOL_SETUP -m s -n 2 -p 11000|| exit 1 -echo "master setup done." +echo "leader setup done." -# copy the configurations from master to standby +# copy the configurations from leader to standby cp -r etc ../$STANDBY_DIR/ -# copy the configurations from master to standby2 +# copy the configurations from leader to standby2 cp -r etc ../$STANDBY2_DIR/ source ./bashrc.ports -cat ../master.conf >> etc/pgpool.conf +cat ../leader.conf >> etc/pgpool.conf echo 0 > etc/pgpool_node_id ./startall @@ -57,7 +57,7 @@ cd .. mkdir $STANDBY_DIR/log echo -n "creating standby pgpool..." cat standby.conf >> $STANDBY_DIR/etc/pgpool.conf -# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf +# since we are using the same pgpool-II conf as of leader. so change the pid file path in standby pgpool conf echo "pid_file_name = '$PWD/pgpool2.pid'" >> $STANDBY_DIR/etc/pgpool.conf echo "logdir = $STANDBY_DIR/log" >> $STANDBY_DIR/etc/pgpool.conf echo 1 > $STANDBY_DIR/etc/pgpool_node_id @@ -68,7 +68,7 @@ $PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY_DIR/etc/pgpool.conf -F $STANDBY mkdir $STANDBY2_DIR/log echo -n "creating standby2 pgpool..." cat standby2.conf >> $STANDBY2_DIR/etc/pgpool.conf -# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf +# since we are using the same pgpool-II conf as of leader. so change the pid file path in standby pgpool conf echo "pid_file_name = '$PWD/pgpool3.pid'" >> $STANDBY2_DIR/etc/pgpool.conf echo "logdir = $STANDBY2_DIR/log" >> $STANDBY2_DIR/etc/pgpool.conf echo 2 > $STANDBY2_DIR/etc/pgpool_node_id @@ -76,20 +76,20 @@ echo 2 > $STANDBY2_DIR/etc/pgpool_node_id $PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY2_DIR/etc/pgpool.conf -F $STANDBY2_DIR/etc/pcp.conf -a $STANDBY2_DIR/etc/pool_hba.conf > $STANDBY2_DIR/log/pgpool.log 2>&1 & # First test check if both pgpool-II have found their correct place in watchdog cluster. -echo "Waiting for the pgpool master..." +echo "Waiting for the pgpool leader..." for i in 1 2 3 4 5 6 7 8 9 10 do - grep "I am the cluster leader node" $MASTER_DIR/log/pgpool.log > /dev/null 2>&1 + grep "I am the cluster leader node" $LEADER_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Master brought up successfully." + echo "Leader brought up successfully." break; fi echo "[check] $i times" sleep 2 done -# now check if standby1 has successfully joined connected to the master. +# now check if standby1 has successfully joined connected to the leader. echo "Waiting for the standby1 to join cluster..." for i in 1 2 3 4 5 6 7 8 9 10 do @@ -103,7 +103,7 @@ do sleep 2 done -# now check if standby2 has successfully joined connected to the master. +# now check if standby2 has successfully joined connected to the leader. echo "Waiting for the standby2 to join cluster..." for i in 1 2 3 4 5 6 7 8 9 10 do @@ -117,18 +117,18 @@ do sleep 2 done -#shutdown master and one PG server by hand -$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $MASTER_DIR/etc/pgpool.conf -m f stop -$PG_CTL -D $MASTER_DIR/data1 -m f stop +#shutdown leader and one PG server by hand +$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $LEADER_DIR/etc/pgpool.conf -m f stop +$PG_CTL -D $LEADER_DIR/data1 -m f stop # First test check if both pgpool-II have found their correct place in watchdog cluster. -echo "Waiting for the standby to become new master..." +echo "Waiting for the standby to become new leader..." for i in 1 2 3 4 5 6 7 8 9 10 do grep "I am the cluster leader node" $STANDBY_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Standby became new master successfully." + echo "Standby became new leader successfully." break; fi echo "[check] $i times" @@ -144,7 +144,7 @@ do grep " failover done" $STANDBY_DIR/log/pgpool.log > /dev/null 2>&1 if [ $? = 0 ];then success_count=$(( success_count + 1 )) - echo "Standby became new master successfully." + echo "Standby became new leader successfully." break; fi echo "[check] $i times" @@ -176,7 +176,7 @@ done # we are done. Just stop the standby pgpool-II $PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY_DIR/etc/pgpool.conf -m f stop $PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY2_DIR/etc/pgpool.conf -m f stop -cd master +cd leader ./shutdownall echo "$success_count out of $num_tests successfull"; diff --git a/src/test/regression/tests/016.node_0_is_not_primary/test.sh b/src/test/regression/tests/016.node_0_is_not_primary/test.sh index 677841621..5c365e4b3 100755 --- a/src/test/regression/tests/016.node_0_is_not_primary/test.sh +++ b/src/test/regression/tests/016.node_0_is_not_primary/test.sh @@ -37,12 +37,12 @@ do echo "Swapping the role of node 0 and node 1...done." $PSQL -c "show pool_nodes" - # make sure that node 1 is master/primary - test `$PSQL -c "show pool_nodes"|egrep "primary|master"|awk '{print $1}'` = 1 && - test `$PSQL -c "show pool_nodes"|egrep "primary|master"|awk '{print $7}'` = "up" + # make sure that node 1 is main/primary + test `$PSQL -c "show pool_nodes"|egrep "primary|main"|awk '{print $1}'` = 1 && + test `$PSQL -c "show pool_nodes"|egrep "primary|main"|awk '{print $7}'` = "up" if [ $? != 0 ] then - echo "node 1 is not primary or master" + echo "node 1 is not primary or main" ./shutdownall exit 1 fi diff --git a/src/test/regression/tests/051.bug60/bug.sql b/src/test/regression/tests/051.bug60/bug.sql index bfce7bdf7..134199bd7 100644 --- a/src/test/regression/tests/051.bug60/bug.sql +++ b/src/test/regression/tests/051.bug60/bug.sql @@ -1,7 +1,7 @@ -- This script shows the problem. When run in a psql connected to --- pgpool with one master and one read-only replica, this will cause +-- pgpool with one primary and one read-only replica, this will cause -- the pgpool process to exit because the exception found in the --- commit (in the master node) does not match the success code for the +-- commit (in the primary node) does not match the success code for the -- commit in the read-only replica. -- The expected behavior would be to continue execution normally diff --git a/src/test/regression/tests/067.bug231/test.sh b/src/test/regression/tests/067.bug231/test.sh index 46629e84a..02b6a86de 100755 --- a/src/test/regression/tests/067.bug231/test.sh +++ b/src/test/regression/tests/067.bug231/test.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash #------------------------------------------------------------------- # test script for for bug 231 (packet kind of backend 1 ['3'] does not -# match with master/majority nodes packet kind ['2']). +# match with main/majority nodes packet kind ['2']). # source $TESTLIBS TESTDIR=testdir diff --git a/src/test/regression/tests/071.execute_and_deallocate/test.sh b/src/test/regression/tests/071.execute_and_deallocate/test.sh index 4fbc895c4..982356ee0 100755 --- a/src/test/regression/tests/071.execute_and_deallocate/test.sh +++ b/src/test/regression/tests/071.execute_and_deallocate/test.sh @@ -26,7 +26,7 @@ do export PGPORT=$PGPOOL_PORT - # set backend_weight , loadbalance to slave only + # set backend_weight , loadbalance to standby only echo "backend_weight0 = 0" >> etc/pgpool.conf echo "backend_weight1 = 1" >> etc/pgpool.conf diff --git a/src/tools/pcp/pcp_frontend_client.c b/src/tools/pcp/pcp_frontend_client.c index a992026c0..1b51adca3 100644 --- a/src/tools/pcp/pcp_frontend_client.c +++ b/src/tools/pcp/pcp_frontend_client.c @@ -90,7 +90,7 @@ struct AppTypes AllAppTypes[] = {"pcp_pool_status", PCP_POOL_STATUS, "h:p:U:wWvd", "display pgpool configuration and status"}, {"pcp_proc_count", PCP_PROC_COUNT, "h:p:U:wWvd", "display the list of pgpool-II child process PIDs"}, {"pcp_proc_info", PCP_PROC_INFO, "h:p:P:U:awWvd", "display a pgpool-II child process' information"}, - {"pcp_promote_node", PCP_PROMOTE_NODE, "n:h:p:U:gwWvd", "promote a node as new master from pgpool-II"}, + {"pcp_promote_node", PCP_PROMOTE_NODE, "n:h:p:U:gwWvd", "promote a node as new main from pgpool-II"}, {"pcp_recovery_node", PCP_RECOVERY_NODE, "n:h:p:U:wWvd", "recover a node"}, {"pcp_stop_pgpool", PCP_STOP_PGPOOL, "m:h:p:U:s:wWvda", "terminate pgpool-II"}, {"pcp_watchdog_info", PCP_WATCHDOG_INFO, "n:h:p:U:wWvd", "display a pgpool-II watchdog's information"}, @@ -753,7 +753,7 @@ output_watchdog_info_result(PCPResultInfo * pcpResInfo, bool verbose) else if (cluster->quorumStatus == -1) quorumStatus = "QUORUM ABSENT"; else if (cluster->quorumStatus == -2) - quorumStatus = "NO MASTER NODE"; + quorumStatus = "NO LEADER NODE"; else quorumStatus = "UNKNOWN"; @@ -763,8 +763,8 @@ output_watchdog_info_result(PCPResultInfo * pcpResInfo, bool verbose) printf("Quorum state : %s\n", quorumStatus); printf("Alive Remote Nodes : %d\n", cluster->aliveNodeCount); printf("VIP up on local node : %s\n", cluster->escalated ? "YES" : "NO"); - printf("Master Node Name : %s\n", cluster->masterNodeName); - printf("Master Host Name : %s\n\n", cluster->masterHostName); + printf("Leader Node Name : %s\n", cluster->leaderNodeName); + printf("Leader Host Name : %s\n\n", cluster->leaderHostName); printf("Watchdog Node Information \n"); for (i = 0; i < cluster->nodeCount; i++) @@ -786,8 +786,8 @@ output_watchdog_info_result(PCPResultInfo * pcpResInfo, bool verbose) printf("%d %s %s %s\n\n", cluster->remoteNodeCount + 1, cluster->escalated ? "YES" : "NO", - cluster->masterNodeName, - cluster->masterHostName); + cluster->leaderNodeName, + cluster->leaderHostName); for (i = 0; i < cluster->nodeCount; i++) { @@ -960,9 +960,9 @@ backend_status_to_string(BackendInfo * bi) char * role_to_str(SERVER_ROLE role) { - static char *role_str[] = {"master", "slave", "primary", "standby"}; + static char *role_str[] = {"main", "replica", "primary", "standby"}; - if (role < ROLE_MASTER || role > ROLE_STANDBY) + if (role < ROLE_MAIN || role > ROLE_STANDBY) return "unknown"; return role_str[role]; } diff --git a/src/utils/pool_process_reporting.c b/src/utils/pool_process_reporting.c index cda74d99d..af0db6b3e 100644 --- a/src/utils/pool_process_reporting.c +++ b/src/utils/pool_process_reporting.c @@ -488,40 +488,40 @@ get_config(int *nrows) StrNCpy(status[i].desc, "ignore leading white spaces", POOLCONFIG_MAXDESCLEN); i++; - StrNCpy(status[i].name, "white_function_list", POOLCONFIG_MAXNAMELEN); + StrNCpy(status[i].name, "read_only_function_list", POOLCONFIG_MAXNAMELEN); *(status[i].value) = '\0'; - for (j = 0; j < pool_config->num_white_function_list; j++) + for (j = 0; j < pool_config->num_read_only_function_list; j++) { len = POOLCONFIG_MAXVALLEN - strlen(status[i].value); - strncat(status[i].value, pool_config->white_function_list[j], len); + strncat(status[i].value, pool_config->read_only_function_list[j], len); len = POOLCONFIG_MAXVALLEN - strlen(status[i].value); - if (j != pool_config->num_white_function_list - 1) + if (j != pool_config->num_read_only_function_list - 1) strncat(status[i].value, ",", len); } StrNCpy(status[i].desc, "functions those do not write to database", POOLCONFIG_MAXDESCLEN); i++; - StrNCpy(status[i].name, "black_function_list", POOLCONFIG_MAXNAMELEN); + StrNCpy(status[i].name, "write_function_list", POOLCONFIG_MAXNAMELEN); *(status[i].value) = '\0'; - for (j = 0; j < pool_config->num_black_function_list; j++) + for (j = 0; j < pool_config->num_write_function_list; j++) { len = POOLCONFIG_MAXVALLEN - strlen(status[i].value); - strncat(status[i].value, pool_config->black_function_list[j], len); + strncat(status[i].value, pool_config->write_function_list[j], len); len = POOLCONFIG_MAXVALLEN - strlen(status[i].value); - if (j != pool_config->num_black_function_list - 1) + if (j != pool_config->num_write_function_list - 1) strncat(status[i].value, ",", len); } StrNCpy(status[i].desc, "functions those write to database", POOLCONFIG_MAXDESCLEN); i++; - StrNCpy(status[i].name, "black_query_pattern_list", POOLCONFIG_MAXNAMELEN); + StrNCpy(status[i].name, "primary_routing_query_pattern_list", POOLCONFIG_MAXNAMELEN); *(status[i].value) = '\0'; - for (j = 0; j < pool_config->num_black_query_pattern_list; j++) + for (j = 0; j < pool_config->num_primary_routing_query_pattern_list; j++) { len = POOLCONFIG_MAXVALLEN - strlen(status[i].value); - strncat(status[i].value, pool_config->black_query_pattern_list[j], len); + strncat(status[i].value, pool_config->primary_routing_query_pattern_list[j], len); len = POOLCONFIG_MAXVALLEN - strlen(status[i].value); - if (j != pool_config->num_black_query_pattern_list - 1) + if (j != pool_config->num_primary_routing_query_pattern_list - 1) strncat(status[i].value, ";", len); } StrNCpy(status[i].desc, "query patterns that should be sent to primary node", POOLCONFIG_MAXDESCLEN); @@ -542,16 +542,16 @@ get_config(int *nrows) StrNCpy(status[i].desc, "statement level load balancing", POOLCONFIG_MAXDESCLEN); i++; - /* MASTER/SLAVE MODE */ + /* NATIVE REPLICATION MODE */ - StrNCpy(status[i].name, "master_slave_mode", POOLCONFIG_MAXNAMELEN); - snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->master_slave_mode); - StrNCpy(status[i].desc, "if true, operate in master/slave mode", POOLCONFIG_MAXDESCLEN); + StrNCpy(status[i].name, "native_replication_mode", POOLCONFIG_MAXNAMELEN); + snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->native_replication_mode); + StrNCpy(status[i].desc, "if true, operate in native replication mode", POOLCONFIG_MAXDESCLEN); i++; - StrNCpy(status[i].name, "master_slave_sub_mode", POOLCONFIG_MAXNAMELEN); - snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->master_slave_sub_mode); - StrNCpy(status[i].desc, "master/slave sub mode", POOLCONFIG_MAXDESCLEN); + StrNCpy(status[i].name, "native_replication_sub_mode", POOLCONFIG_MAXNAMELEN); + snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->native_replication_sub_mode); + StrNCpy(status[i].desc, "native replication sub mode", POOLCONFIG_MAXDESCLEN); i++; /* - Streaming - */ @@ -581,9 +581,9 @@ get_config(int *nrows) i++; /* - Special commands - */ - StrNCpy(status[i].name, "follow_master_command", POOLCONFIG_MAXNAMELEN); - snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%s", pool_config->follow_master_command); - StrNCpy(status[i].desc, "follow master command", POOLCONFIG_MAXDESCLEN); + StrNCpy(status[i].name, "follow_primary_command", POOLCONFIG_MAXNAMELEN); + snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%s", pool_config->follow_primary_command); + StrNCpy(status[i].desc, "follow primary command", POOLCONFIG_MAXDESCLEN); i++; StrNCpy(status[i].name, "database_redirect_preference_list", POOLCONFIG_MAXNAMELEN); @@ -769,7 +769,7 @@ get_config(int *nrows) StrNCpy(status[i].name, "wd_de_escalation_command", POOLCONFIG_MAXNAMELEN); snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%s", pool_config->wd_de_escalation_command); - StrNCpy(status[i].desc, "command executed when master pgpool resigns occurs", POOLCONFIG_MAXDESCLEN); + StrNCpy(status[i].desc, "command executed when leader pgpool resigns occurs", POOLCONFIG_MAXDESCLEN); i++; StrNCpy(status[i].name, "trusted_servers", POOLCONFIG_MAXNAMELEN); @@ -779,7 +779,7 @@ get_config(int *nrows) StrNCpy(status[i].name, "delegate_IP", POOLCONFIG_MAXNAMELEN); snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%s", pool_config->delegate_IP); - StrNCpy(status[i].desc, "delegate IP address of master pgpool", POOLCONFIG_MAXDESCLEN); + StrNCpy(status[i].desc, "delegate IP address of leader pgpool", POOLCONFIG_MAXDESCLEN); i++; StrNCpy(status[i].name, "wd_priority", POOLCONFIG_MAXNAMELEN); @@ -949,27 +949,27 @@ get_config(int *nrows) StrNCpy(status[i].desc, "Number of SELECTs hitting query cache", POOLCONFIG_MAXDESCLEN); i++; - StrNCpy(status[i].name, "white_memqcache_table_list", POOLCONFIG_MAXNAMELEN); + StrNCpy(status[i].name, "cache_safe_memqcache_table_list", POOLCONFIG_MAXNAMELEN); *(status[i].value) = '\0'; - for (j = 0; j < pool_config->num_white_memqcache_table_list; j++) + for (j = 0; j < pool_config->num_cache_safe_memqcache_table_list; j++) { len = POOLCONFIG_MAXVALLEN - strlen(status[i].value); - strncat(status[i].value, pool_config->white_memqcache_table_list[j], len); + strncat(status[i].value, pool_config->cache_safe_memqcache_table_list[j], len); len = POOLCONFIG_MAXVALLEN - strlen(status[i].value); - if (j != pool_config->num_white_memqcache_table_list - 1) + if (j != pool_config->num_cache_safe_memqcache_table_list - 1) strncat(status[i].value, ",", len); } StrNCpy(status[i].desc, "tables to memqcache", POOLCONFIG_MAXDESCLEN); i++; - StrNCpy(status[i].name, "black_memqcache_table_list", POOLCONFIG_MAXNAMELEN); + StrNCpy(status[i].name, "cache_unsafe_memqcache_table_list", POOLCONFIG_MAXNAMELEN); *(status[i].value) = '\0'; - for (j = 0; j < pool_config->num_black_memqcache_table_list; j++) + for (j = 0; j < pool_config->num_cache_unsafe_memqcache_table_list; j++) { len = POOLCONFIG_MAXVALLEN - strlen(status[i].value); - strncat(status[i].value, pool_config->black_memqcache_table_list[j], len); + strncat(status[i].value, pool_config->cache_unsafe_memqcache_table_list[j], len); len = POOLCONFIG_MAXVALLEN - strlen(status[i].value); - if (j != pool_config->num_black_memqcache_table_list - 1) + if (j != pool_config->num_cache_unsafe_memqcache_table_list - 1) strncat(status[i].value, ",", len); } StrNCpy(status[i].desc, "tables not to memqcache", POOLCONFIG_MAXDESCLEN); @@ -1311,10 +1311,10 @@ get_nodes(int *nrows) } else { - if (i == REAL_MASTER_NODE_ID) - snprintf(nodes[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "master"); + if (i == REAL_MAIN_NODE_ID) + snprintf(nodes[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "main"); else - snprintf(nodes[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "slave"); + snprintf(nodes[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "replica"); } /* status last changed */ @@ -1844,10 +1844,10 @@ get_health_check_stats(int *nrows) } else { - if (i == REAL_MASTER_NODE_ID) - snprintf(stats[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "master"); + if (i == REAL_MAIN_NODE_ID) + snprintf(stats[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "main"); else - snprintf(stats[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "slave"); + snprintf(stats[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "replica"); } /* status last changed */ @@ -1991,10 +1991,10 @@ get_backend_stats(int *nrows) } else { - if (i == REAL_MASTER_NODE_ID) - snprintf(backend_stats[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "master"); + if (i == REAL_MAIN_NODE_ID) + snprintf(backend_stats[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "main"); else - snprintf(backend_stats[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "slave"); + snprintf(backend_stats[i].role, POOLCONFIG_MAXWEIGHTLEN, "%s", "replica"); } } diff --git a/src/utils/pool_relcache.c b/src/utils/pool_relcache.c index abf5ed70f..fc2e35745 100644 --- a/src/utils/pool_relcache.c +++ b/src/utils/pool_relcache.c @@ -152,7 +152,7 @@ pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, c } else { - dbname = MASTER_CONNECTION(backend)->sp->database; + dbname = MAIN_CONNECTION(backend)->sp->database; /* * If in streaming replication mode, prefer to send query to the @@ -161,7 +161,7 @@ pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, c if (STREAM && PRIMARY_NODE_ID >= 0) node_id = PRIMARY_NODE_ID; else - node_id = MASTER_NODE_ID; + node_id = MAIN_NODE_ID; } now = time(NULL); diff --git a/src/utils/pool_select_walker.c b/src/utils/pool_select_walker.c index 712577c57..da8d83ea5 100644 --- a/src/utils/pool_select_walker.c +++ b/src/utils/pool_select_walker.c @@ -56,7 +56,7 @@ static bool function_volatile_property(char *fname, FUNC_VOLATILE_PROPERTY prope /* * Return true if this SELECT has function calls *and* supposed to - * modify database. We check black/white function list to determine + * modify database. We check write/read_only function list to determine * whether the function modifies database. */ bool @@ -194,11 +194,11 @@ pool_has_insertinto_or_locking_clause(Node *node) } /* - * Search function name in whilelist or blacklist regex array + * Search function name in readonlylist or writelist regex array * Return 1 on success (found in list) * Return 0 when not found in list * Return -1 if the given search type doesn't exist. - * Search type supported are: WHITELIST and BLACKLIST + * Search type supported are: READONLYLIST and WRITELIST */ int pattern_compare(char *str, const int type, const char *param_name) @@ -210,21 +210,21 @@ pattern_compare(char *str, const int type, const char *param_name) RegPattern *lists_patterns; int *pattc; - if (strcmp(param_name, "white_function_list") == 0 || - strcmp(param_name, "black_function_list") == 0) + if (strcmp(param_name, "read_only_function_list") == 0 || + strcmp(param_name, "write_function_list") == 0) { lists_patterns = pool_config->lists_patterns; pattc = &pool_config->pattc; } - else if (strcmp(param_name, "white_memqcache_table_list") == 0 || - strcmp(param_name, "black_memqcache_table_list") == 0) + else if (strcmp(param_name, "cache_safe_memqcache_table_list") == 0 || + strcmp(param_name, "cache_unsafe_memqcache_table_list") == 0) { lists_patterns = pool_config->lists_memqcache_table_patterns; pattc = &pool_config->memqcache_table_pattc; } - else if (strcmp(param_name, "black_query_pattern_list") == 0) + else if (strcmp(param_name, "primary_routing_query_pattern_list") == 0) { lists_patterns = pool_config->lists_query_patterns; pattc = &pool_config->query_pattc; @@ -253,18 +253,18 @@ pattern_compare(char *str, const int type, const char *param_name) { switch (type) { - /* return 1 if string matches whitelist pattern */ - case WHITELIST: + /* return 1 if string matches readonly list pattern */ + case READONLYLIST: ereport(DEBUG2, - (errmsg("comparing function name in whitelist regex array"), + (errmsg("comparing function name in readonly list regex array"), errdetail("pattern_compare: %s (%s) matched: %s", param_name, lists_patterns[i].pattern, s))); result = 1; break; - /* return 1 if string matches blacklist pattern */ - case BLACKLIST: + /* return 1 if string matches writelist pattern */ + case WRITELIST: ereport(DEBUG2, - (errmsg("comparing function name in blacklist regex array"), + (errmsg("comparing function name in writelist regex array"), errdetail("pattern_compare: %s (%s) matched: %s", param_name, lists_patterns[i].pattern, s))); result = 1; @@ -279,7 +279,7 @@ pattern_compare(char *str, const int type, const char *param_name) break; } ereport(DEBUG2, - (errmsg("comparing function name in blacklist/whitelist regex array"), + (errmsg("comparing function name in write/readonly list regex array"), errdetail("pattern_compare: %s (%s) not matched: %s", param_name, lists_patterns[i].pattern, s))); } @@ -373,11 +373,11 @@ function_call_walker(Node *node, void *context) } /* - * If both white_function_list and black_function_list is empty, + * If both read_only_function_list and write_function_list is empty, * check volatile property of the function in the system catalog. */ - if (pool_config->num_white_function_list == 0 && - pool_config->num_black_function_list == 0) + if (pool_config->num_read_only_function_list == 0 && + pool_config->num_write_function_list == 0) { if (function_volatile_property(fname, FUNC_VOLATILE)) { @@ -388,22 +388,22 @@ function_call_walker(Node *node, void *context) } /* - * Check white list if any. + * Check read_only list if any. */ - if (pool_config->num_white_function_list > 0) + if (pool_config->num_read_only_function_list > 0) { - /* Search function in the white list regex patterns */ - if (pattern_compare(fname, WHITELIST, "white_function_list") == 1) + /* Search function in the read_only list regex patterns */ + if (pattern_compare(fname, READONLYLIST, "read_only_function_list") == 1) { /* - * If the function is found in the white list, we can + * If the function is found in the read_only list, we can * ignore it */ return raw_expression_tree_walker(node, function_call_walker, context); } /* - * Since the function was not found in white list, we have + * Since the function was not found in read_only list, we have * found a writing function. */ ctx->has_function_call = true; @@ -411,12 +411,12 @@ function_call_walker(Node *node, void *context) } /* - * Check black list if any. + * Check write list if any. */ - if (pool_config->num_black_function_list > 0) + if (pool_config->num_write_function_list > 0) { - /* Search function in the black list regex patterns */ - if (pattern_compare(fname, BLACKLIST, "black_function_list") == 1) + /* Search function in the write list regex patterns */ + if (pattern_compare(fname, WRITELIST, "write_function_list") == 1) { /* Found. */ ctx->has_function_call = true; @@ -916,7 +916,7 @@ pool_has_pgpool_regclass(void) char *user; backend = pool_get_session_context(false)->backend; - user = MASTER_CONNECTION(backend)->sp->user; + user = MAIN_CONNECTION(backend)->sp->user; if (!relcache) { diff --git a/src/utils/pool_stream.c b/src/utils/pool_stream.c index d95779659..9f6fcb7dd 100644 --- a/src/utils/pool_stream.c +++ b/src/utils/pool_stream.c @@ -172,7 +172,7 @@ pool_read(POOL_CONNECTION * cp, void *buf, int len) */ if (pool_get_timeout() >= 0 && pool_check_fd(cp)) { - if (!IS_MASTER_NODE_ID(cp->db_node_id) && (getpid() != mypid)) + if (!IS_MAIN_NODE_ID(cp->db_node_id) && (getpid() != mypid)) { ereport(FATAL, (errmsg("unable to read data from DB node %d", cp->db_node_id), @@ -340,7 +340,7 @@ pool_read2(POOL_CONNECTION * cp, int len) */ if (pool_get_timeout() >= 0 && pool_check_fd(cp)) { - if (!IS_MASTER_NODE_ID(cp->db_node_id)) + if (!IS_MAIN_NODE_ID(cp->db_node_id)) { ereport(FATAL, (errmsg("unable to read data from DB node %d", cp->db_node_id), @@ -944,7 +944,7 @@ pool_read_string(POOL_CONNECTION * cp, int *len, int line) { if (pool_check_fd(cp)) { - if (!IS_MASTER_NODE_ID(cp->db_node_id)) + if (!IS_MAIN_NODE_ID(cp->db_node_id)) { ereport(FATAL, (errmsg("unable to read data from DB node %d", cp->db_node_id), diff --git a/src/utils/ps_status.c b/src/utils/ps_status.c index 1e12ec19e..6d7d9eee5 100644 --- a/src/utils/ps_status.c +++ b/src/utils/ps_status.c @@ -397,8 +397,8 @@ pool_ps_idle_display(POOL_CONNECTION_POOL * backend) StartupPacket *sp; char psbuf[1024]; - sp = MASTER_CONNECTION(backend)->sp; - if (MASTER(backend)->tstate == 'T') + sp = MAIN_CONNECTION(backend)->sp; + if (MAIN(backend)->tstate == 'T') snprintf(psbuf, sizeof(psbuf), "%s %s %s idle in transaction", sp->user, sp->database, remote_ps_data); else diff --git a/src/watchdog/watchdog.c b/src/watchdog/watchdog.c index fb4a34177..29cd14737 100644 --- a/src/watchdog/watchdog.c +++ b/src/watchdog/watchdog.c @@ -145,16 +145,16 @@ typedef enum IPC_CMD_PREOCESS_RES #define CLUSTER_QUORUM_FOUND 'F' #define CLUSTER_IN_SPLIT_BRAIN 'B' #define CLUSTER_NEEDS_ELECTION 'E' -#define CLUSTER_IAM_TRUE_MASTER 'M' -#define CLUSTER_IAM_NOT_TRUE_MASTER 'X' -#define CLUSTER_IAM_RESIGNING_FROM_MASTER 'R' +#define CLUSTER_IAM_TRUE_LEADER 'M' +#define CLUSTER_IAM_NOT_TRUE_LEADER 'X' +#define CLUSTER_IAM_RESIGNING_FROM_LEADER 'R' #define CLUSTER_NODE_INVALID_VERSION 'V' #define CLUSTER_NODE_REQUIRE_TO_RELOAD 'I' #define CLUSTER_NODE_APPEARING_LOST 'Y' #define CLUSTER_NODE_APPEARING_FOUND 'Z' -#define WD_MASTER_NODE getMasterWatchdogNode() +#define WD_LEADER_NODE getLeaderWatchdogNode() typedef struct packet_types { @@ -183,7 +183,7 @@ packet_types all_packet_types[] = { {WD_INFORM_I_AM_GOING_DOWN, "INFORM I AM GOING DOWN"}, {WD_ASK_FOR_POOL_CONFIG, "ASK FOR POOL CONFIG"}, {WD_POOL_CONFIG_DATA, "CONFIG DATA"}, - {WD_GET_MASTER_DATA_REQUEST, "DATA REQUEST FOR MASTER"}, + {WD_GET_LEADER_DATA_REQUEST, "DATA REQUEST FOR LEADER"}, {WD_GET_RUNTIME_VARIABLE_VALUE, "GET WD RUNTIME VARIABLE VALUE"}, {WD_CMD_REPLY_IN_DATA, "COMMAND REPLY IN DATA"}, {WD_FAILOVER_LOCKING_REQUEST, "FAILOVER LOCKING REQUEST"}, @@ -227,9 +227,9 @@ char *wd_state_names[] = { "LOADING", "JOINING", "INITIALIZING", - "MASTER", + "LEADER", "PARTICIPATING IN ELECTION", - "STANDING FOR MASTER", + "STANDING FOR LEADER", "STANDBY", "LOST", "IN NETWORK TROUBLE", @@ -337,19 +337,19 @@ typedef struct WDInterfaceStatus bool if_up; } WDInterfaceStatus; -typedef struct WDClusterMaster +typedef struct WDClusterLeader { - WatchdogNode *masterNode; + WatchdogNode *leaderNode; WatchdogNode **standbyNodes; int standby_nodes_count; bool holding_vip; -} WDClusterMasterInfo; +} WDClusterLeaderInfo; typedef struct wd_cluster { WatchdogNode *localNode; WatchdogNode *remoteNodes; - WDClusterMasterInfo clusterMasterInfo; + WDClusterLeaderInfo clusterLeaderInfo; int remoteNodeCount; int quorum_status; unsigned int nextCommandID; @@ -500,7 +500,7 @@ static void cluster_service_message_processor(WatchdogNode * wdNode, WDPacketDat static int get_cluster_node_count(void); static void clear_command_node_result(WDCommandNodeResult * nodeResult); -static inline bool is_local_node_true_master(void); +static inline bool is_local_node_true_leader(void); static inline WD_STATES get_local_node_state(void); static int set_state(WD_STATES newState); @@ -515,8 +515,8 @@ static int watchdog_state_machine(WD_EVENTS event, WatchdogNode * wdNode, WDPack static int watchdog_state_machine_nw_error(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand); static int watchdog_state_machine_nw_isolation(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand); -static int I_am_master_and_cluser_in_split_brain(WatchdogNode * otherMasterNode); -static void handle_split_brain(WatchdogNode * otherMasterNode, WDPacketData * pkt); +static int I_am_leader_and_cluser_in_split_brain(WatchdogNode * otherLeaderNode); +static void handle_split_brain(WatchdogNode * otherLeaderNode, WDPacketData * pkt); static bool beacon_message_received_from_node(WatchdogNode * wdNode, WDPacketData * pkt); static void cleanUpIPCCommand(WDCommandData * ipcCommand); @@ -542,7 +542,7 @@ static IPC_CMD_PREOCESS_RES process_IPC_nodeList_command(WDCommandData * ipcComm static IPC_CMD_PREOCESS_RES process_IPC_get_runtime_variable_value_request(WDCommandData * ipcCommand); static IPC_CMD_PREOCESS_RES process_IPC_online_recovery(WDCommandData * ipcCommand); static IPC_CMD_PREOCESS_RES process_IPC_failover_indication(WDCommandData * ipcCommand); -static IPC_CMD_PREOCESS_RES process_IPC_data_request_from_master(WDCommandData * ipcCommand); +static IPC_CMD_PREOCESS_RES process_IPC_data_request_from_leader(WDCommandData * ipcCommand); static IPC_CMD_PREOCESS_RES process_IPC_failover_command(WDCommandData * ipcCommand); static IPC_CMD_PREOCESS_RES process_failover_command_on_coordinator(WDCommandData * ipcCommand); static IPC_CMD_PREOCESS_RES process_IPC_execute_cluster_command(WDCommandData * ipcCommand); @@ -578,8 +578,8 @@ static void update_interface_status(void); static bool any_interface_available(void); static WDPacketData * process_data_request(WatchdogNode * wdNode, WDPacketData * pkt); -static WatchdogNode * getMasterWatchdogNode(void); -static void set_cluster_master_node(WatchdogNode * wdNode); +static WatchdogNode * getLeaderWatchdogNode(void); +static void set_cluster_leader_node(WatchdogNode * wdNode); static void clear_standby_nodes_list(void); static int standby_node_left_cluster(WatchdogNode * wdNode); static int standby_node_join_cluster(WatchdogNode * wdNode); @@ -777,10 +777,10 @@ wd_cluster_initialize(void) idx++; } - g_cluster.clusterMasterInfo.masterNode = NULL; - g_cluster.clusterMasterInfo.standbyNodes = palloc0(sizeof(WatchdogNode *) * g_cluster.remoteNodeCount); - g_cluster.clusterMasterInfo.standby_nodes_count = 0; - g_cluster.clusterMasterInfo.holding_vip = false; + g_cluster.clusterLeaderInfo.leaderNode = NULL; + g_cluster.clusterLeaderInfo.standbyNodes = palloc0(sizeof(WatchdogNode *) * g_cluster.remoteNodeCount); + g_cluster.clusterLeaderInfo.standby_nodes_count = 0; + g_cluster.clusterLeaderInfo.holding_vip = false; g_cluster.quorum_status = -1; g_cluster.nextCommandID = 1; g_cluster.clusterInitialized = false; @@ -2027,8 +2027,8 @@ static IPC_CMD_PREOCESS_RES process_IPC_command(WDCommandData * ipcCommand) return process_IPC_failover_indication(ipcCommand); break; - case WD_GET_MASTER_DATA_REQUEST: - return process_IPC_data_request_from_master(ipcCommand); + case WD_GET_LEADER_DATA_REQUEST: + return process_IPC_data_request_from_leader(ipcCommand); break; case WD_GET_RUNTIME_VARIABLE_VALUE: @@ -2143,7 +2143,7 @@ static IPC_CMD_PREOCESS_RES process_IPC_get_runtime_variable_value_request(WDCom else if (strcasecmp(WD_RUNTIME_VAR_QUORUM_STATE, requestVarName) == 0) { jw_put_int(jNode, WD_JSON_KEY_VALUE_DATA_TYPE, VALUE_DATA_TYPE_INT); - jw_put_int(jNode, WD_JSON_KEY_VALUE_DATA, WD_MASTER_NODE ? WD_MASTER_NODE->quorum_status : -2); + jw_put_int(jNode, WD_JSON_KEY_VALUE_DATA, WD_LEADER_NODE ? WD_LEADER_NODE->quorum_status : -2); } else if (strcasecmp(WD_RUNTIME_VAR_ESCALATION_STATE, requestVarName) == 0) { @@ -2403,7 +2403,7 @@ service_expired_failovers(void) { /* lower my wd_priority for moment */ g_cluster.localNode->wd_priority = -1; - send_cluster_service_message(NULL, NULL, CLUSTER_IAM_RESIGNING_FROM_MASTER); + send_cluster_service_message(NULL, NULL, CLUSTER_IAM_RESIGNING_FROM_LEADER); set_state(WD_JOINING); } } @@ -2688,7 +2688,7 @@ static WDFailoverObject * add_failover(POOL_REQUEST_KIND reqKind, int *node_id_l } /* - * The function processes all failover commands on master node + * The function processes all failover commands on leader node */ static IPC_CMD_PREOCESS_RES process_failover_command_on_coordinator(WDCommandData * ipcCommand) { @@ -2806,7 +2806,7 @@ static IPC_CMD_PREOCESS_RES process_failover_command_on_coordinator(WDCommandDat static IPC_CMD_PREOCESS_RES process_IPC_failover_command(WDCommandData * ipcCommand) { - if (is_local_node_true_master()) + if (is_local_node_true_leader()) { ereport(LOG, (errmsg("watchdog received the failover command from local pgpool-II on IPC interface"))); @@ -2819,13 +2819,13 @@ static IPC_CMD_PREOCESS_RES process_IPC_failover_command(WDCommandData * ipcComm wd_packet_shallow_copy(&ipcCommand->sourcePacket, &ipcCommand->commandPacket); set_next_commandID_in_message(&ipcCommand->commandPacket); - ipcCommand->sendToNode = WD_MASTER_NODE; /* send the command to - * master node */ + ipcCommand->sendToNode = WD_LEADER_NODE; /* send the command to + * leader node */ if (send_command_packet_to_remote_nodes(ipcCommand, true) <= 0) { ereport(LOG, (errmsg("unable to process the failover command request received on IPC interface"), - errdetail("failed to forward the request to the master watchdog node \"%s\"", WD_MASTER_NODE->nodeName))); + errdetail("failed to forward the request to the leader watchdog node \"%s\"", WD_LEADER_NODE->nodeName))); return IPC_CMD_ERROR; } else @@ -2834,8 +2834,8 @@ static IPC_CMD_PREOCESS_RES process_IPC_failover_command(WDCommandData * ipcComm * we need to wait for the result */ ereport(LOG, - (errmsg("failover request from local pgpool-II node received on IPC interface is forwarded to master watchdog node \"%s\"", - WD_MASTER_NODE->nodeName), + (errmsg("failover request from local pgpool-II node received on IPC interface is forwarded to leader watchdog node \"%s\"", + WD_LEADER_NODE->nodeName), errdetail("waiting for the reply..."))); return IPC_CMD_PROCESSING; } @@ -2869,12 +2869,12 @@ static IPC_CMD_PREOCESS_RES process_IPC_online_recovery(WDCommandData * ipcComma { ereport(LOG, (errmsg("unable to process the online recovery request received on IPC interface"), - errdetail("failed to forward the request to the master watchdog node \"%s\"", WD_MASTER_NODE->nodeName))); + errdetail("failed to forward the request to the leader watchdog node \"%s\"", WD_LEADER_NODE->nodeName))); return IPC_CMD_ERROR; } ereport(LOG, - (errmsg("online recovery request from local pgpool-II node received on IPC interface is forwarded to master watchdog node \"%s\"", - WD_MASTER_NODE->nodeName), + (errmsg("online recovery request from local pgpool-II node received on IPC interface is forwarded to leader watchdog node \"%s\"", + WD_LEADER_NODE->nodeName), errdetail("waiting for the reply..."))); return IPC_CMD_PROCESSING; @@ -2889,7 +2889,7 @@ static IPC_CMD_PREOCESS_RES process_IPC_online_recovery(WDCommandData * ipcComma return IPC_CMD_TRY_AGAIN; } -static IPC_CMD_PREOCESS_RES process_IPC_data_request_from_master(WDCommandData * ipcCommand) +static IPC_CMD_PREOCESS_RES process_IPC_data_request_from_leader(WDCommandData * ipcCommand) { /* * if cluster or myself is not in stable state just return cluster in @@ -2907,12 +2907,12 @@ static IPC_CMD_PREOCESS_RES process_IPC_data_request_from_master(WDCommandData * wd_packet_shallow_copy(&ipcCommand->sourcePacket, &ipcCommand->commandPacket); set_next_commandID_in_message(&ipcCommand->commandPacket); - ipcCommand->sendToNode = WD_MASTER_NODE; + ipcCommand->sendToNode = WD_LEADER_NODE; if (send_command_packet_to_remote_nodes(ipcCommand, true) <= 0) { ereport(LOG, (errmsg("unable to process the get data request received on IPC interface"), - errdetail("failed to forward the request to the master watchdog node \"%s\"", WD_MASTER_NODE->nodeName))); + errdetail("failed to forward the request to the leader watchdog node \"%s\"", WD_LEADER_NODE->nodeName))); return IPC_CMD_ERROR; } else @@ -2921,17 +2921,17 @@ static IPC_CMD_PREOCESS_RES process_IPC_data_request_from_master(WDCommandData * * we need to wait for the result */ ereport(LOG, - (errmsg("get data request from local pgpool-II node received on IPC interface is forwarded to master watchdog node \"%s\"", - WD_MASTER_NODE->nodeName), + (errmsg("get data request from local pgpool-II node received on IPC interface is forwarded to leader watchdog node \"%s\"", + WD_LEADER_NODE->nodeName), errdetail("waiting for the reply..."))); return IPC_CMD_PROCESSING; } } - else if (is_local_node_true_master()) + else if (is_local_node_true_leader()) { /* - * This node is itself a master node, So send the empty result with OK + * This node is itself a leader node, So send the empty result with OK * tag */ return IPC_CMD_OK; @@ -3013,7 +3013,7 @@ static IPC_CMD_PREOCESS_RES process_IPC_failover_indication(WDCommandData * ipcC else { ereport(LOG, - (errmsg("received the failover indication from Pgpool-II on IPC interface, but only master can do failover"))); + (errmsg("received the failover indication from Pgpool-II on IPC interface, but only leader can do failover"))); } reply_to_failove_command(ipcCommand, res, 0); @@ -3024,7 +3024,7 @@ static IPC_CMD_PREOCESS_RES process_IPC_failover_indication(WDCommandData * ipcC /* Failover start basically does nothing fency, It just sets the failover_in_progress * flag and inform all nodes that the failover is in progress. * - * only the local node that is a master can start the failover. + * only the local node that is a leader can start the failover. */ static WDFailoverCMDResults failover_start_indication(WDCommandData * ipcCommand) @@ -3032,7 +3032,7 @@ failover_start_indication(WDCommandData * ipcCommand) ereport(LOG, (errmsg("watchdog is informed of failover start by the main process"))); - /* only coordinator(master) node is allowed to process failover */ + /* only coordinator(leader) node is allowed to process failover */ if (get_local_node_state() == WD_COORDINATOR) { /* inform to all nodes about failover start */ @@ -3060,7 +3060,7 @@ failover_end_indication(WDCommandData * ipcCommand) ereport(LOG, (errmsg("watchdog is informed of failover end by the main process"))); - /* only coordinator(master) node is allowed to process failover */ + /* only coordinator(leader) node is allowed to process failover */ if (get_local_node_state() == WD_COORDINATOR) { send_message_of_type(NULL, WD_FAILOVER_END, NULL); @@ -3613,11 +3613,11 @@ static JsonNode * get_node_list_json(int id) JsonNode *jNode = jw_create_with_object(true); jw_put_int(jNode, "RemoteNodeCount", g_cluster.remoteNodeCount); - jw_put_int(jNode, "QuorumStatus", WD_MASTER_NODE ? WD_MASTER_NODE->quorum_status : -2); - jw_put_int(jNode, "AliveNodeCount", WD_MASTER_NODE ? WD_MASTER_NODE->standby_nodes_count : 0); + jw_put_int(jNode, "QuorumStatus", WD_LEADER_NODE ? WD_LEADER_NODE->quorum_status : -2); + jw_put_int(jNode, "AliveNodeCount", WD_LEADER_NODE ? WD_LEADER_NODE->standby_nodes_count : 0); jw_put_int(jNode, "Escalated", g_cluster.localNode->escalated); - jw_put_string(jNode, "MasterNodeName", WD_MASTER_NODE ? WD_MASTER_NODE->nodeName : "Not Set"); - jw_put_string(jNode, "MasterHostName", WD_MASTER_NODE ? WD_MASTER_NODE->hostname : "Not Set"); + jw_put_string(jNode, "LeaderNodeName", WD_LEADER_NODE ? WD_LEADER_NODE->nodeName : "Not Set"); + jw_put_string(jNode, "LeaderHostName", WD_LEADER_NODE ? WD_LEADER_NODE->hostname : "Not Set"); if (id < 0) { jw_put_int(jNode, "NodeCount", g_cluster.remoteNodeCount + 1); @@ -3868,28 +3868,28 @@ cluster_service_message_processor(WatchdogNode * wdNode, WDPacketData * pkt) switch (pkt->data[0]) { - case CLUSTER_IAM_TRUE_MASTER: + case CLUSTER_IAM_TRUE_LEADER: { /* * The cluster was in split-brain and remote node thiks it is - * the worthy master + * the worthy leader */ if (get_local_node_state() == WD_COORDINATOR) { ereport(LOG, - (errmsg("remote node \"%s\" decided it is the true master", wdNode->nodeName), + (errmsg("remote node \"%s\" decided it is the true leader", wdNode->nodeName), errdetail("re-initializing the local watchdog cluster state because of split-brain"))); - send_cluster_service_message(NULL, pkt, CLUSTER_IAM_RESIGNING_FROM_MASTER); + send_cluster_service_message(NULL, pkt, CLUSTER_IAM_RESIGNING_FROM_LEADER); set_state(WD_JOINING); } - else if (WD_MASTER_NODE != NULL && WD_MASTER_NODE != wdNode) + else if (WD_LEADER_NODE != NULL && WD_LEADER_NODE != wdNode) { ereport(LOG, - (errmsg("remote node \"%s\" thinks it is a master/coordinator and I am causing the split-brain," \ - " but as per our record \"%s\" is the cluster master/coordinator", + (errmsg("remote node \"%s\" thinks it is a leader/coordinator and I am causing the split-brain," \ + " but as per our record \"%s\" is the cluster leader/coordinator", wdNode->nodeName, - WD_MASTER_NODE->nodeName), + WD_LEADER_NODE->nodeName), errdetail("restarting the cluster"))); send_cluster_service_message(NULL, pkt, CLUSTER_NEEDS_ELECTION); set_state(WD_JOINING); @@ -3897,12 +3897,12 @@ cluster_service_message_processor(WatchdogNode * wdNode, WDPacketData * pkt) } break; - case CLUSTER_IAM_RESIGNING_FROM_MASTER: + case CLUSTER_IAM_RESIGNING_FROM_LEADER: { - if (WD_MASTER_NODE == wdNode) + if (WD_LEADER_NODE == wdNode) { ereport(LOG, - (errmsg("master/coordinator node \"%s\" decided to resigning from master, probably because of split-brain", + (errmsg("leader/coordinator node \"%s\" decided to resigning from leader, probably because of split-brain", wdNode->nodeName), errdetail("re-initializing the local watchdog cluster state"))); @@ -3911,9 +3911,9 @@ cluster_service_message_processor(WatchdogNode * wdNode, WDPacketData * pkt) else { ereport(LOG, - (errmsg("master/coordinator node \"%s\" decided to resign from master, probably because of split-brain", + (errmsg("leader/coordinator node \"%s\" decided to resign from leader, probably because of split-brain", wdNode->nodeName), - errdetail("It was not our coordinator/master anyway. ignoring the message"))); + errdetail("It was not our coordinator/leader anyway. ignoring the message"))); } } break; @@ -3940,12 +3940,12 @@ cluster_service_message_processor(WatchdogNode * wdNode, WDPacketData * pkt) } break; - case CLUSTER_IAM_NOT_TRUE_MASTER: + case CLUSTER_IAM_NOT_TRUE_LEADER: { - if (WD_MASTER_NODE == wdNode) + if (WD_LEADER_NODE == wdNode) { ereport(LOG, - (errmsg("master/coordinator node \"%s\" decided it was not true master, probably because of split-brain", wdNode->nodeName), + (errmsg("leader/coordinator node \"%s\" decided it was not true leader, probably because of split-brain", wdNode->nodeName), errdetail("re-initializing the local watchdog cluster state"))); set_state(WD_JOINING); @@ -3953,15 +3953,15 @@ cluster_service_message_processor(WatchdogNode * wdNode, WDPacketData * pkt) else if (get_local_node_state() == WD_COORDINATOR) { ereport(LOG, - (errmsg("node \"%s\" was also thinking it was a master/coordinator and decided to resign", wdNode->nodeName), + (errmsg("node \"%s\" was also thinking it was a leader/coordinator and decided to resign", wdNode->nodeName), errdetail("cluster is recovering from split-brain"))); } else { ereport(LOG, - (errmsg("master/coordinator node \"%s\" decided to resign from master, probably because of split-brain", + (errmsg("leader/coordinator node \"%s\" decided to resign from leader, probably because of split-brain", wdNode->nodeName), - errdetail("but it was not our coordinator/master anyway. ignoring the message"))); + errdetail("but it was not our coordinator/leader anyway. ignoring the message"))); } } break; @@ -4095,7 +4095,7 @@ standard_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt) cluster_service_message_processor(wdNode, pkt); break; - case WD_GET_MASTER_DATA_REQUEST: + case WD_GET_LEADER_DATA_REQUEST: replyPkt = process_data_request(wdNode, pkt); break; @@ -4171,15 +4171,15 @@ standard_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt) if (wdNode->state == WD_COORDINATOR) { - if (WD_MASTER_NODE == NULL) + if (WD_LEADER_NODE == NULL) { - set_cluster_master_node(wdNode); + set_cluster_leader_node(wdNode); } - else if (WD_MASTER_NODE != wdNode) + else if (WD_LEADER_NODE != wdNode) { ereport(LOG, (errmsg("\"%s\" is the coordinator as per our record but \"%s\" is also announcing as a coordinator", - WD_MASTER_NODE->nodeName, wdNode->nodeName), + WD_LEADER_NODE->nodeName, wdNode->nodeName), errdetail("cluster is in the split-brain"))); if (get_local_node_state() != WD_COORDINATOR) @@ -4196,16 +4196,16 @@ standard_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt) /* * okay the contention is between me and the other * node try to figure out which node is the worthy - * master + * leader */ ereport(LOG, (errmsg("I am the coordinator but \"%s\" is also announcing as a coordinator", wdNode->nodeName), - errdetail("trying to figure out the best contender for the master/coordinator node"))); + errdetail("trying to figure out the best contender for the leader/coordinator node"))); handle_split_brain(wdNode, pkt); } } - else if (WD_MASTER_NODE == wdNode && oldQuorumStatus != wdNode->quorum_status) + else if (WD_LEADER_NODE == wdNode && oldQuorumStatus != wdNode->quorum_status) { /* inform Pgpool main about quorum status changes */ register_watchdog_quorum_change_interupt(); @@ -4213,10 +4213,10 @@ standard_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt) } /* - * if the info message is from master node. Make sure we are - * in sync with the master node state + * if the info message is from leader node. Make sure we are + * in sync with the leader node state */ - else if (WD_MASTER_NODE == wdNode) + else if (WD_LEADER_NODE == wdNode) { if (wdNode->state != WD_COORDINATOR) { @@ -4267,7 +4267,7 @@ standard_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt) /* * if I am coordinator reply with accept, otherwise reject */ - if (g_cluster.localNode == WD_MASTER_NODE) + if (g_cluster.localNode == WD_LEADER_NODE) { replyPkt = get_minimum_message(WD_ACCEPT_MESSAGE, pkt); } @@ -4284,24 +4284,24 @@ standard_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt) * if the message is received from coordinator reply with * info, otherwise reject */ - if (WD_MASTER_NODE != NULL && wdNode != WD_MASTER_NODE) + if (WD_LEADER_NODE != NULL && wdNode != WD_LEADER_NODE) { ereport(LOG, (errmsg("\"%s\" is our coordinator node, but \"%s\" is also announcing as a coordinator", - WD_MASTER_NODE->nodeName, wdNode->nodeName), + WD_LEADER_NODE->nodeName, wdNode->nodeName), errdetail("broadcasting the cluster in split-brain message"))); send_cluster_service_message(NULL, pkt, CLUSTER_IN_SPLIT_BRAIN); } - else if (WD_MASTER_NODE != NULL) + else if (WD_LEADER_NODE != NULL) { replyPkt = get_mynode_info_message(pkt); beacon_message_received_from_node(wdNode, pkt); } /* - * if (WD_MASTER_NODE == NULL) + * if (WD_LEADER_NODE == NULL) * do not reply to beacon if we are not connected to - * any master node + * any leader node */ } break; @@ -5114,9 +5114,9 @@ static inline WD_STATES get_local_node_state(void) } static inline bool -is_local_node_true_master(void) +is_local_node_true_leader(void) { - return (get_local_node_state() == WD_COORDINATOR && WD_MASTER_NODE == g_cluster.localNode); + return (get_local_node_state() == WD_COORDINATOR && WD_LEADER_NODE == g_cluster.localNode); } /* @@ -5202,7 +5202,7 @@ wd_commands_packet_processor(WD_EVENTS event, WatchdogNode * wdNode, WDPacketDat if (pkt->type == WD_ACCEPT_MESSAGE) reply_to_failove_command(ipcCommand, FAILOVER_RES_PROCEED, 0); else - reply_to_failove_command(ipcCommand, FAILOVER_RES_MASTER_REJECTED, 0); + reply_to_failove_command(ipcCommand, FAILOVER_RES_LEADER_REJECTED, 0); return true; } @@ -5299,11 +5299,11 @@ watchdog_state_machine(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pk /* Inform the node, that it is lost for us */ send_cluster_service_message(wdNode, pkt, CLUSTER_NODE_APPEARING_LOST); } - if (wdNode == WD_MASTER_NODE) + if (wdNode == WD_LEADER_NODE) { ereport(LOG, (errmsg("watchdog cluster has lost the coordinator node"))); - set_cluster_master_node(NULL); + set_cluster_leader_node(NULL); } /* close all socket connections to the node */ @@ -5579,7 +5579,7 @@ watchdog_state_machine_joining(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD switch (event) { case WD_EVENT_WD_STATE_CHANGED: - set_cluster_master_node(NULL); + set_cluster_leader_node(NULL); try_connecting_with_all_unreachable_nodes(); send_cluster_command(NULL, WD_REQ_INFO_MESSAGE, 4); set_timeout(MAX_SECS_WAIT_FOR_REPLY_FROM_NODE); @@ -5663,10 +5663,10 @@ watchdog_state_machine_initializing(WD_EVENTS event, WatchdogNode * wdNode, WDPa case WD_EVENT_TIMEOUT: { /* - * If master node exists in cluser, Join it otherwise try - * becoming a master + * If leader node exists in cluser, Join it otherwise try + * becoming a leader */ - if (WD_MASTER_NODE) + if (WD_LEADER_NODE) { /* * we found the coordinator node in network. Just join the @@ -5761,7 +5761,7 @@ watchdog_state_machine_standForCord(WD_EVENTS event, WatchdogNode * wdNode, WDPa { ereport(LOG, (errmsg("our stand for coordinator request is rejected by node \"%s\"",wdNode->nodeName), - errdetail("we might be in partial network isolation and cluster already have a valid master"), + errdetail("we might be in partial network isolation and cluster already have a valid leader"), errhint("please verify the watchdog life-check and network is working properly"))); set_state(WD_NETWORK_ISOLATION); } @@ -5853,9 +5853,9 @@ watchdog_state_machine_standForCord(WD_EVENTS event, WatchdogNode * wdNode, WDPa } /* - * Event handler for the coordinator/master state. + * Event handler for the coordinator/leader state. * The function handels all the event received when the local - * node is the master/coordinator node. + * node is the leader/coordinator node. */ static int watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand) @@ -5870,7 +5870,7 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac set_timeout(MAX_SECS_WAIT_FOR_REPLY_FROM_NODE); update_missed_beacon_count(NULL,true); ereport(LOG, - (errmsg("I am announcing my self as master/coordinator watchdog node"))); + (errmsg("I am announcing my self as leader/coordinator watchdog node"))); for (i = 0; i < g_cluster.remoteNodeCount; i++) { @@ -5908,7 +5908,7 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac (errmsg("I am the cluster leader node"), errdetail("our declare coordinator message is accepted by all nodes"))); - set_cluster_master_node(g_cluster.localNode); + set_cluster_leader_node(g_cluster.localNode); register_watchdog_state_change_interupt(); /* @@ -5979,8 +5979,8 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac case WD_EVENT_CLUSTER_QUORUM_CHANGED: { - /* make sure we are accepted as master */ - if (WD_MASTER_NODE == g_cluster.localNode) + /* make sure we are accepted as leader */ + if (WD_LEADER_NODE == g_cluster.localNode) { if (g_cluster.quorum_status == -1) { @@ -5988,7 +5988,7 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac (errmsg("We have lost the quorum"))); /* - * We have lost the quorum, stay as a master node but + * We have lost the quorum, stay as a leader node but * perform de-escalation. As keeping the VIP may * result in split-brain */ @@ -6030,7 +6030,7 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac * We do have some IP addresses assigned so its not a * total black-out check if we still have the VIP assigned */ - if (g_cluster.clusterMasterInfo.holding_vip == true) + if (g_cluster.clusterLeaderInfo.holding_vip == true) { ListCell *lc; bool vip_exists = false; @@ -6087,10 +6087,10 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac { /* * Since data version 1.1 we support CLUSTER_NODE_REQUIRE_TO_RELOAD - * which makes the standby nodes to re-send the join master node + * which makes the standby nodes to re-send the join leader node */ ereport(DEBUG1, - (errmsg("asking remote node \"%s\" to rejoin master", wdNode->nodeName), + (errmsg("asking remote node \"%s\" to rejoin leader", wdNode->nodeName), errdetail("watchdog data version %s",WD_MESSAGE_DATA_VERSION))); send_cluster_service_message(wdNode, pkt, CLUSTER_NODE_REQUIRE_TO_RELOAD); @@ -6121,16 +6121,16 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac (errmsg("remote node \"%s\" is reachable again", wdNode->nodeName), errdetail("trying to add it back as a standby"))); wdNode->node_lost_reason = NODE_LOST_UNKNOWN_REASON; - /* If I am the cluster master. Ask for the node info and to re-send the join message */ + /* If I am the cluster leader. Ask for the node info and to re-send the join message */ send_message_of_type(wdNode, WD_REQ_INFO_MESSAGE, NULL); if (wdNode->wd_data_major_version >= 1 && wdNode->wd_data_minor_version >= 1) { /* * Since data version 1.1 we support CLUSTER_NODE_REQUIRE_TO_RELOAD - * which makes the standby nodes to re-send the join master node + * which makes the standby nodes to re-send the join leader node */ ereport(DEBUG1, - (errmsg("asking remote node \"%s\" to rejoin master", wdNode->nodeName), + (errmsg("asking remote node \"%s\" to rejoin leader", wdNode->nodeName), errdetail("watchdog data version %s",WD_MESSAGE_DATA_VERSION))); send_cluster_service_message(wdNode, pkt, CLUSTER_NODE_REQUIRE_TO_RELOAD); @@ -6185,13 +6185,13 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac /* * we are not able to decide which should be * the best candidate to stay as - * master/coordinator node This could also + * leader/coordinator node This could also * happen if the remote node is using the * older version of Pgpool-II which send the * empty beacon messages. */ ereport(LOG, - (errmsg("We are in split brain, and not able to decide the best candidate for master/coordinator"), + (errmsg("We are in split brain, and not able to decide the best candidate for leader/coordinator"), errdetail("re-initializing the local watchdog cluster state"))); send_cluster_service_message(wdNode, pkt, CLUSTER_NEEDS_ELECTION); @@ -6260,7 +6260,7 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac * and incorrect from the other pgpool-II nodes in the cluster. So the ideal solution * for the situation is to make the pgpool-II main process aware of the network black out * and when the network recovers the pgpool-II asks the watchdog to sync again the state of - * all configured backend nodes from the master pgpool-II node. But to implement this lot + * all configured backend nodes from the leader pgpool-II node. But to implement this lot * of time is required, So until that time we are just opting for the easiest solution here * which is to commit a suicide as soon an the network becomes unreachable */ @@ -6329,7 +6329,7 @@ watchdog_state_machine_nw_error(WD_EVENTS event, WatchdogNode * wdNode, WDPacket /* * we could end up in tis state if we were connected to the - * master node as standby and got lost on the master. + * leader node as standby and got lost on the leader. * Here we just wait for BEACON_MESSAGE_INTERVAL_SECONDS * and retry to join the cluster. */ @@ -6401,107 +6401,107 @@ beacon_message_received_from_node(WatchdogNode * wdNode, WDPacketData * pkt) } /* - * This function decides the best contender for a coordinator/master node + * This function decides the best contender for a coordinator/leader node * when the remote node info states it is a coordinator while - * the local node is also in the master/coordinator state. + * the local node is also in the leader/coordinator state. * * return: - * -1 : remote node is the best candidate to remain as master - * 0 : both local and remote nodes are not worthy master or error - * 1 : local node should remain as the master/coordinator + * -1 : remote node is the best candidate to remain as leader + * 0 : both local and remote nodes are not worthy leader or error + * 1 : local node should remain as the leader/coordinator */ static int -I_am_master_and_cluser_in_split_brain(WatchdogNode * otherMasterNode) +I_am_leader_and_cluser_in_split_brain(WatchdogNode * otherLeaderNode) { if (get_local_node_state() != WD_COORDINATOR) return 0; - if (otherMasterNode->state != WD_COORDINATOR) + if (otherLeaderNode->state != WD_COORDINATOR) return 0; - if (otherMasterNode->current_state_time.tv_sec == 0) + if (otherLeaderNode->current_state_time.tv_sec == 0) { ereport(LOG, - (errmsg("not enough data to decide the master node"), - errdetail("the watchdog node:\"%s\" is using the older version of Pgpool-II", otherMasterNode->nodeName))); + (errmsg("not enough data to decide the leader node"), + errdetail("the watchdog node:\"%s\" is using the older version of Pgpool-II", otherLeaderNode->nodeName))); return 0; } - /* Decide which node should stay as master */ - if (otherMasterNode->escalated != g_cluster.localNode->escalated) + /* Decide which node should stay as leader */ + if (otherLeaderNode->escalated != g_cluster.localNode->escalated) { - if (otherMasterNode->escalated == true && g_cluster.localNode->escalated == false) + if (otherLeaderNode->escalated == true && g_cluster.localNode->escalated == false) { - /* remote node stays as the master */ + /* remote node stays as the leader */ ereport(LOG, - (errmsg("remote node:\"%s\" is best suitable to stay as master because it is escalated and I am not", - otherMasterNode->nodeName))); + (errmsg("remote node:\"%s\" is best suitable to stay as leader because it is escalated and I am not", + otherLeaderNode->nodeName))); return -1; } else { - /* local node stays as master */ + /* local node stays as leader */ ereport(LOG, - (errmsg("remote node:\"%s\" should step down from master because it is not escalated", - otherMasterNode->nodeName))); + (errmsg("remote node:\"%s\" should step down from leader because it is not escalated", + otherLeaderNode->nodeName))); return 1; } } - else if (otherMasterNode->quorum_status != g_cluster.quorum_status) + else if (otherLeaderNode->quorum_status != g_cluster.quorum_status) { - if (otherMasterNode->quorum_status > g_cluster.quorum_status) + if (otherLeaderNode->quorum_status > g_cluster.quorum_status) { /* quorum of remote node is in better state */ ereport(LOG, - (errmsg("remote node:\"%s\" is best suitable to stay as master because it holds the quorum" - ,otherMasterNode->nodeName))); + (errmsg("remote node:\"%s\" is best suitable to stay as leader because it holds the quorum" + ,otherLeaderNode->nodeName))); return -1; } else { - /* local node stays as master */ + /* local node stays as leader */ ereport(LOG, - (errmsg("remote node:\"%s\" should step down from master because it does not hold the quorum" - ,otherMasterNode->nodeName))); + (errmsg("remote node:\"%s\" should step down from leader because it does not hold the quorum" + ,otherLeaderNode->nodeName))); return 1; } } - else if (otherMasterNode->standby_nodes_count != g_cluster.clusterMasterInfo.standby_nodes_count) + else if (otherLeaderNode->standby_nodes_count != g_cluster.clusterLeaderInfo.standby_nodes_count) { - if (otherMasterNode->standby_nodes_count > g_cluster.clusterMasterInfo.standby_nodes_count) + if (otherLeaderNode->standby_nodes_count > g_cluster.clusterLeaderInfo.standby_nodes_count) { /* remote node has more alive nodes */ ereport(LOG, - (errmsg("remote node:\"%s\" is best suitable to stay as master because it has more connected standby nodes" - ,otherMasterNode->nodeName))); + (errmsg("remote node:\"%s\" is best suitable to stay as leader because it has more connected standby nodes" + ,otherLeaderNode->nodeName))); return -1; } else { - /* local node stays as master */ + /* local node stays as leader */ ereport(LOG, - (errmsg("remote node:\"%s\" should step down from master because we have more connected standby nodes" - ,otherMasterNode->nodeName))); + (errmsg("remote node:\"%s\" should step down from leader because we have more connected standby nodes" + ,otherLeaderNode->nodeName))); return 1; } } else /* decide on which node is the older mater */ { - if (otherMasterNode->current_state_time.tv_sec < g_cluster.localNode->current_state_time.tv_sec) + if (otherLeaderNode->current_state_time.tv_sec < g_cluster.localNode->current_state_time.tv_sec) { /* remote node has more alive nodes */ ereport(LOG, - (errmsg("remote node:\"%s\" is best suitable to stay as master because it is the older master" - ,otherMasterNode->nodeName))); + (errmsg("remote node:\"%s\" is best suitable to stay as leader because it is the older leader" + ,otherLeaderNode->nodeName))); return -1; } else { - /* local node should keep the master status */ + /* local node should keep the leader status */ ereport(LOG, - (errmsg("remote node:\"%s\" should step down from master because we are the older master" - ,otherMasterNode->nodeName))); + (errmsg("remote node:\"%s\" should step down from leader because we are the older leader" + ,otherLeaderNode->nodeName))); return 1; } @@ -6510,42 +6510,42 @@ I_am_master_and_cluser_in_split_brain(WatchdogNode * otherMasterNode) } static void -handle_split_brain(WatchdogNode * otherMasterNode, WDPacketData * pkt) +handle_split_brain(WatchdogNode * otherLeaderNode, WDPacketData * pkt) { - int decide_master = I_am_master_and_cluser_in_split_brain(otherMasterNode); + int decide_leader = I_am_leader_and_cluser_in_split_brain(otherLeaderNode); - if (decide_master == 0) + if (decide_leader == 0) { /* * we are not able to decide which should be the best candidate to - * stay as master/coordinator node This could also happen if the + * stay as leader/coordinator node This could also happen if the * remote node is using the older version of Pgpool-II which send the * empty beacon messages. */ ereport(LOG, - (errmsg("We are in split brain, and not able to decide the best candidate for master/coordinator"), + (errmsg("We are in split brain, and not able to decide the best candidate for leader/coordinator"), errdetail("re-initializing the local watchdog cluster state"))); - send_cluster_service_message(otherMasterNode, pkt, CLUSTER_NEEDS_ELECTION); + send_cluster_service_message(otherLeaderNode, pkt, CLUSTER_NEEDS_ELECTION); set_state(WD_JOINING); } - else if (decide_master == -1) + else if (decide_leader == -1) { - /* Remote node is the best candidate for the master node */ + /* Remote node is the best candidate for the leader node */ ereport(LOG, - (errmsg("We are in split brain, and \"%s\" node is the best candidate for master/coordinator" - ,otherMasterNode->nodeName), + (errmsg("We are in split brain, and \"%s\" node is the best candidate for leader/coordinator" + ,otherLeaderNode->nodeName), errdetail("re-initializing the local watchdog cluster state"))); - /* broadcast the message about I am not the true master node */ - send_cluster_service_message(NULL, pkt, CLUSTER_IAM_NOT_TRUE_MASTER); + /* broadcast the message about I am not the true leader node */ + send_cluster_service_message(NULL, pkt, CLUSTER_IAM_NOT_TRUE_LEADER); set_state(WD_JOINING); } else { - /* I am the best candidate for the master node */ + /* I am the best candidate for the leader node */ ereport(LOG, - (errmsg("We are in split brain, and I am the best candidate for master/coordinator"), - errdetail("asking the remote node \"%s\" to step down", otherMasterNode->nodeName))); - send_cluster_service_message(otherMasterNode, pkt, CLUSTER_IAM_TRUE_MASTER); + (errmsg("We are in split brain, and I am the best candidate for leader/coordinator"), + errdetail("asking the remote node \"%s\" to step down", otherLeaderNode->nodeName))); + send_cluster_service_message(otherLeaderNode, pkt, CLUSTER_IAM_TRUE_LEADER); } } @@ -6583,7 +6583,7 @@ start_escalated_node(void) ereport(LOG, (errmsg("escalation process started with PID:%d", g_cluster.escalation_pid))); if (strlen(g_cluster.localNode->delegate_ip) > 0) - g_cluster.clusterMasterInfo.holding_vip = true; + g_cluster.clusterLeaderInfo.holding_vip = true; } else { @@ -6617,7 +6617,7 @@ resign_from_escalated_node(void) (errmsg("escalation process does not exited in time"), errdetail("starting the de-escalation anyway"))); g_cluster.de_escalation_pid = fork_plunging_process(); - g_cluster.clusterMasterInfo.holding_vip = false; + g_cluster.clusterLeaderInfo.holding_vip = false; g_cluster.localNode->escalated = false; reset_watchdog_node_escalated(); } @@ -6697,7 +6697,7 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD switch (event) { case WD_EVENT_WD_STATE_CHANGED: - send_cluster_command(WD_MASTER_NODE, WD_JOIN_COORDINATOR_MESSAGE, 5); + send_cluster_command(WD_LEADER_NODE, WD_JOIN_COORDINATOR_MESSAGE, 5); /* Also reset my priority as per the original configuration */ g_cluster.localNode->wd_priority = pool_config->wd_priority; set_timeout(BEACON_MESSAGE_INTERVAL_SECONDS); @@ -6710,9 +6710,9 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD case WD_EVENT_WD_STATE_REQUIRE_RELOAD: ereport(LOG, - (errmsg("re-sending join coordinator message to master node: \"%s\"", WD_MASTER_NODE->nodeName))); + (errmsg("re-sending join coordinator message to leader node: \"%s\"", WD_LEADER_NODE->nodeName))); - send_cluster_command(WD_MASTER_NODE, WD_JOIN_COORDINATOR_MESSAGE, 5); + send_cluster_command(WD_LEADER_NODE, WD_JOIN_COORDINATOR_MESSAGE, 5); break; case WD_EVENT_COMMAND_FINISHED: @@ -6726,7 +6726,7 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD ereport(LOG, (errmsg("successfully joined the watchdog cluster as standby node"), - errdetail("our join coordinator request is accepted by cluster leader node \"%s\"", WD_MASTER_NODE->nodeName))); + errdetail("our join coordinator request is accepted by cluster leader node \"%s\"", WD_LEADER_NODE->nodeName))); } else { @@ -6734,10 +6734,10 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD (errmsg("our join coordinator is rejected by node \"%s\"", wdNode->nodeName), errhint("rejoining the cluster."))); - if (WD_MASTER_NODE->has_lost_us) + if (WD_LEADER_NODE->has_lost_us) { ereport(LOG, - (errmsg("master node \"%s\" thinks we are lost, and \"%s\" is not letting us join",WD_MASTER_NODE->nodeName,wdNode->nodeName), + (errmsg("leader node \"%s\" thinks we are lost, and \"%s\" is not letting us join",WD_LEADER_NODE->nodeName,wdNode->nodeName), errhint("please verify the watchdog life-check and network is working properly"))); set_state(WD_NETWORK_ISOLATION); } @@ -6757,10 +6757,10 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD * removed from it's standby list * So re-Join the cluster */ - if (WD_MASTER_NODE == wdNode) + if (WD_LEADER_NODE == wdNode) { ereport(LOG, - (errmsg("we are lost on the master node \"%s\"",wdNode->nodeName))); + (errmsg("we are lost on the leader node \"%s\"",wdNode->nodeName))); set_state(WD_JOINING); } } @@ -6772,10 +6772,10 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD * we have lost one remote connected node check if the node * was coordinator */ - if (WD_MASTER_NODE == NULL) + if (WD_LEADER_NODE == NULL) { ereport(LOG, - (errmsg("We have lost the cluster master node \"%s\"", wdNode->nodeName))); + (errmsg("We have lost the cluster leader node \"%s\"", wdNode->nodeName))); set_state(WD_JOINING); } } @@ -6790,10 +6790,10 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD /* In case we received the ADD node message from * our coordinator. Reset the cluster state */ - if (wdNode == WD_MASTER_NODE) + if (wdNode == WD_LEADER_NODE) { ereport(LOG, - (errmsg("received ADD NODE message from the master node \"%s\"", wdNode->nodeName), + (errmsg("received ADD NODE message from the leader node \"%s\"", wdNode->nodeName), errdetail("re-joining the cluster"))); set_state(WD_JOINING); } @@ -6809,7 +6809,7 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD case WD_STAND_FOR_COORDINATOR_MESSAGE: { - if (WD_MASTER_NODE == NULL) + if (WD_LEADER_NODE == NULL) { reply_with_minimal_message(wdNode, WD_ACCEPT_MESSAGE, pkt); set_state(WD_PARTICIPATE_IN_ELECTION); @@ -6817,27 +6817,27 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD else { ereport(LOG, - (errmsg("We are connected to master node \"%s\" and another node \"%s\" is trying to become a master",WD_MASTER_NODE->nodeName, wdNode->nodeName))); + (errmsg("We are connected to leader node \"%s\" and another node \"%s\" is trying to become a leader",WD_LEADER_NODE->nodeName, wdNode->nodeName))); reply_with_minimal_message(wdNode, WD_ERROR_MESSAGE, pkt); - /* Ask master to re-send its node info */ - send_message_of_type(WD_MASTER_NODE, WD_REQ_INFO_MESSAGE, NULL); + /* Ask leader to re-send its node info */ + send_message_of_type(WD_LEADER_NODE, WD_REQ_INFO_MESSAGE, NULL); } } break; case WD_DECLARE_COORDINATOR_MESSAGE: { - if (wdNode != WD_MASTER_NODE) + if (wdNode != WD_LEADER_NODE) { /* - * we already have a master node and we got a - * new node trying to be master + * we already have a leader node and we got a + * new node trying to be leader */ ereport(LOG, - (errmsg("We are connected to master node \"%s\" and another node \"%s\" is trying to declare itself as a master",WD_MASTER_NODE->nodeName, wdNode->nodeName))); + (errmsg("We are connected to leader node \"%s\" and another node \"%s\" is trying to declare itself as a leader",WD_LEADER_NODE->nodeName, wdNode->nodeName))); reply_with_minimal_message(wdNode, WD_ERROR_MESSAGE, pkt); - /* Ask master to re-send its node info */ - send_message_of_type(WD_MASTER_NODE, WD_REQ_INFO_MESSAGE, NULL); + /* Ask leader to re-send its node info */ + send_message_of_type(WD_LEADER_NODE, WD_REQ_INFO_MESSAGE, NULL); } } @@ -6849,11 +6849,11 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD * if the message is received from coordinator * reply with info, otherwise reject */ - if (wdNode != WD_MASTER_NODE) + if (wdNode != WD_LEADER_NODE) { ereport(LOG, (errmsg("\"%s\" is our coordinator node, but \"%s\" is also announcing as a coordinator", - WD_MASTER_NODE->nodeName, wdNode->nodeName), + WD_LEADER_NODE->nodeName, wdNode->nodeName), errdetail("broadcasting the cluster in split-brain message"))); send_cluster_service_message(NULL, pkt, CLUSTER_IN_SPLIT_BRAIN); @@ -6879,35 +6879,35 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD /* * before returning from the function make sure that we are connected with - * the master node + * the leader node */ - if (WD_MASTER_NODE) + if (WD_LEADER_NODE) { struct timeval currTime; gettimeofday(&currTime, NULL); - int last_rcv_sec = WD_TIME_DIFF_SEC(currTime, WD_MASTER_NODE->last_rcv_time); + int last_rcv_sec = WD_TIME_DIFF_SEC(currTime, WD_LEADER_NODE->last_rcv_time); if (last_rcv_sec >= (3 * BEACON_MESSAGE_INTERVAL_SECONDS)) { - /* we have missed atleast two beacons from master node */ + /* we have missed atleast two beacons from leader node */ ereport(WARNING, - (errmsg("we have not received a beacon message from master node \"%s\" and it has not replied to our info request", - WD_MASTER_NODE->nodeName), + (errmsg("we have not received a beacon message from leader node \"%s\" and it has not replied to our info request", + WD_LEADER_NODE->nodeName), errdetail("re-initializing the cluster"))); set_state(WD_JOINING); } else if (last_rcv_sec >= (2 * BEACON_MESSAGE_INTERVAL_SECONDS)) { /* - * We have not received a last becacon from master ask for the - * node info from master node + * We have not received a last becacon from leader ask for the + * node info from leader node */ ereport(WARNING, - (errmsg("we have not received a beacon message from master node \"%s\"", - WD_MASTER_NODE->nodeName), - errdetail("requesting info message from master node"))); - send_message_of_type(WD_MASTER_NODE, WD_REQ_INFO_MESSAGE, NULL); + (errmsg("we have not received a beacon message from leader node \"%s\"", + WD_LEADER_NODE->nodeName), + errdetail("requesting info message from leader node"))); + send_message_of_type(WD_LEADER_NODE, WD_REQ_INFO_MESSAGE, NULL); } } return 0; @@ -6930,11 +6930,11 @@ update_quorum_status(void) { int quorum_status = g_cluster.quorum_status; - if (g_cluster.clusterMasterInfo.standby_nodes_count > get_minimum_remote_nodes_required_for_quorum()) + if (g_cluster.clusterLeaderInfo.standby_nodes_count > get_minimum_remote_nodes_required_for_quorum()) { g_cluster.quorum_status = 1; } - else if (g_cluster.clusterMasterInfo.standby_nodes_count == get_minimum_remote_nodes_required_for_quorum()) + else if (g_cluster.clusterLeaderInfo.standby_nodes_count == get_minimum_remote_nodes_required_for_quorum()) { if (g_cluster.remoteNodeCount % 2 != 0) { @@ -7374,7 +7374,7 @@ verify_pool_configurations(WatchdogNode * wdNode, POOL_CONFIG * config) WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, failover_if_affected_tuples_mismatch); WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, failover_on_backend_error); WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, replicate_select); - WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, master_slave_mode); + WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, native_replication_mode); WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, connection_cache); WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, insert_lock); WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_BOOL(config, wdNode, memory_cache_enabled); @@ -7577,7 +7577,7 @@ check_and_report_IPC_authentication(WDCommandData * ipcCommand) case WD_IPC_FAILOVER_COMMAND: case WD_IPC_ONLINE_RECOVERY_COMMAND: case WD_EXECUTE_CLUSTER_COMMAND: - case WD_GET_MASTER_DATA_REQUEST: + case WD_GET_LEADER_DATA_REQUEST: /* only allowed internaly. */ internal_client_only = true; break; @@ -7778,27 +7778,27 @@ send_command_packet_to_remote_nodes(WDCommandData * ipcCommand, bool source_incl } static void -set_cluster_master_node(WatchdogNode * wdNode) +set_cluster_leader_node(WatchdogNode * wdNode) { - if (WD_MASTER_NODE != wdNode) + if (WD_LEADER_NODE != wdNode) { if (wdNode == NULL) ereport(LOG, - (errmsg("removing the %s node \"%s\" from watchdog cluster master", - (g_cluster.localNode == WD_MASTER_NODE) ? "local" : "remote", - WD_MASTER_NODE->nodeName))); + (errmsg("removing the %s node \"%s\" from watchdog cluster leader", + (g_cluster.localNode == WD_LEADER_NODE) ? "local" : "remote", + WD_LEADER_NODE->nodeName))); else ereport(LOG, - (errmsg("setting the %s node \"%s\" as watchdog cluster master", + (errmsg("setting the %s node \"%s\" as watchdog cluster leader", (g_cluster.localNode == wdNode) ? "local" : "remote", wdNode->nodeName))); - g_cluster.clusterMasterInfo.masterNode = wdNode; + g_cluster.clusterLeaderInfo.leaderNode = wdNode; } } -static WatchdogNode * getMasterWatchdogNode(void) +static WatchdogNode * getLeaderWatchdogNode(void) { - return g_cluster.clusterMasterInfo.masterNode; + return g_cluster.clusterLeaderInfo.leaderNode; } static int @@ -7809,24 +7809,24 @@ standby_node_join_cluster(WatchdogNode * wdNode) int i; /* First check if the node is already in the List */ - for (i = 0; i < g_cluster.clusterMasterInfo.standby_nodes_count; i++) + for (i = 0; i < g_cluster.clusterLeaderInfo.standby_nodes_count; i++) { - WatchdogNode *node = g_cluster.clusterMasterInfo.standbyNodes[i]; + WatchdogNode *node = g_cluster.clusterLeaderInfo.standbyNodes[i]; if (node && node == wdNode) { /* The node is already in the standby list */ - return g_cluster.clusterMasterInfo.standby_nodes_count; + return g_cluster.clusterLeaderInfo.standby_nodes_count; } } /* okay the node is not in the list */ ereport(LOG, (errmsg("adding watchdog node \"%s\" to the standby list", wdNode->nodeName))); - g_cluster.clusterMasterInfo.standbyNodes[g_cluster.clusterMasterInfo.standby_nodes_count] = wdNode; - g_cluster.clusterMasterInfo.standby_nodes_count++; + g_cluster.clusterLeaderInfo.standbyNodes[g_cluster.clusterLeaderInfo.standby_nodes_count] = wdNode; + g_cluster.clusterLeaderInfo.standby_nodes_count++; } - g_cluster.localNode->standby_nodes_count = g_cluster.clusterMasterInfo.standby_nodes_count; - return g_cluster.clusterMasterInfo.standby_nodes_count; + g_cluster.localNode->standby_nodes_count = g_cluster.clusterLeaderInfo.standby_nodes_count; + return g_cluster.clusterLeaderInfo.standby_nodes_count; } static int @@ -7836,19 +7836,19 @@ standby_node_left_cluster(WatchdogNode * wdNode) { int i; bool removed = false; - int standby_nodes_count = g_cluster.clusterMasterInfo.standby_nodes_count; + int standby_nodes_count = g_cluster.clusterLeaderInfo.standby_nodes_count; for (i = 0; i < standby_nodes_count; i++) { - WatchdogNode *node = g_cluster.clusterMasterInfo.standbyNodes[i]; + WatchdogNode *node = g_cluster.clusterLeaderInfo.standbyNodes[i]; if (node) { if (removed) { /* move this to previous index */ - g_cluster.clusterMasterInfo.standbyNodes[i - 1] = node; - g_cluster.clusterMasterInfo.standbyNodes[i] = NULL; + g_cluster.clusterLeaderInfo.standbyNodes[i - 1] = node; + g_cluster.clusterLeaderInfo.standbyNodes[i] = NULL; } else if (node == wdNode) { @@ -7858,15 +7858,15 @@ standby_node_left_cluster(WatchdogNode * wdNode) ereport(LOG, (errmsg("removing watchdog node \"%s\" from the standby list", wdNode->nodeName))); - g_cluster.clusterMasterInfo.standbyNodes[i] = NULL; - g_cluster.clusterMasterInfo.standby_nodes_count--; + g_cluster.clusterLeaderInfo.standbyNodes[i] = NULL; + g_cluster.clusterLeaderInfo.standby_nodes_count--; removed = true; } } } } - g_cluster.localNode->standby_nodes_count = g_cluster.clusterMasterInfo.standby_nodes_count; - return g_cluster.clusterMasterInfo.standby_nodes_count; + g_cluster.localNode->standby_nodes_count = g_cluster.clusterLeaderInfo.standby_nodes_count; + return g_cluster.clusterLeaderInfo.standby_nodes_count; } static void @@ -7876,12 +7876,12 @@ clear_standby_nodes_list(void) ereport(DEBUG1, (errmsg("removing all watchdog nodes from the standby list"), - errdetail("standby list contains %d nodes", g_cluster.clusterMasterInfo.standby_nodes_count))); + errdetail("standby list contains %d nodes", g_cluster.clusterLeaderInfo.standby_nodes_count))); for (i = 0; i < g_cluster.remoteNodeCount; i++) { - g_cluster.clusterMasterInfo.standbyNodes[i] = NULL; + g_cluster.clusterLeaderInfo.standbyNodes[i] = NULL; } - g_cluster.clusterMasterInfo.standby_nodes_count = 0; + g_cluster.clusterLeaderInfo.standby_nodes_count = 0; g_cluster.localNode->standby_nodes_count = 0; } @@ -7931,7 +7931,7 @@ static void update_missed_beacon_count(WDCommandData* ipcCommand, bool clear) * Node down request file. In the file, each line consists of watchdog * debug command. The possible commands are same as the defines below * for example to stop Pgpool-II from sending the reply to beacon messages - * from the master node write DO_NOT_REPLY_TO_BEACON in watchdog_debug_requests + * from the leader node write DO_NOT_REPLY_TO_BEACON in watchdog_debug_requests * * * echo "DO_NOT_REPLY_TO_BEACON" > pgpool_logdir/watchdog_debug_requests diff --git a/src/watchdog/wd_internal_commands.c b/src/watchdog/wd_internal_commands.c index e85b49cda..f5dacaaa4 100644 --- a/src/watchdog/wd_internal_commands.c +++ b/src/watchdog/wd_internal_commands.c @@ -104,16 +104,16 @@ wd_ipc_initialize_data(void) /* * function gets the PG backend status of all attached nodes from - * the master watchdog node. + * the leader watchdog node. */ WDPGBackendStatus * -get_pg_backend_status_from_master_wd_node(void) +get_pg_backend_status_from_leader_wd_node(void) { unsigned int *shared_key = get_ipc_shared_key(); char *data = get_data_request_json(WD_DATE_REQ_PG_BACKEND_DATA, shared_key ? *shared_key : 0, pool_config->wd_authkey); - WDIPCCmdResult *result = issue_command_to_watchdog(WD_GET_MASTER_DATA_REQUEST, + WDIPCCmdResult *result = issue_command_to_watchdog(WD_GET_LEADER_DATA_REQUEST, WD_DEFAULT_IPC_COMMAND_TIMEOUT, data, strlen(data), true); @@ -122,14 +122,14 @@ get_pg_backend_status_from_master_wd_node(void) if (result == NULL) { ereport(WARNING, - (errmsg("get backend node status from master watchdog failed"), + (errmsg("get backend node status from leader watchdog failed"), errdetail("issue command to watchdog returned NULL"))); return NULL; } if (result->type == WD_IPC_CMD_CLUSTER_IN_TRAN) { ereport(WARNING, - (errmsg("get backend node status from master watchdog failed"), + (errmsg("get backend node status from leader watchdog failed"), errdetail("watchdog cluster is not in stable state"), errhint("try again when the cluster is fully initialized"))); FreeCmdResult(result); @@ -138,7 +138,7 @@ get_pg_backend_status_from_master_wd_node(void) else if (result->type == WD_IPC_CMD_TIMEOUT) { ereport(WARNING, - (errmsg("get backend node status from master watchdog failed"), + (errmsg("get backend node status from leader watchdog failed"), errdetail("ipc command timeout"))); FreeCmdResult(result); return NULL; @@ -149,7 +149,7 @@ get_pg_backend_status_from_master_wd_node(void) /* * Watchdog returns the zero length data when the node itself is a - * master watchdog node + * leader watchdog node */ if (result->length <= 0) { @@ -165,7 +165,7 @@ get_pg_backend_status_from_master_wd_node(void) } ereport(WARNING, - (errmsg("get backend node status from master watchdog failed"))); + (errmsg("get backend node status from leader watchdog failed"))); FreeCmdResult(result); return NULL; } @@ -432,7 +432,7 @@ wd_issue_failover_command(char *func_name, int *node_id_set, int count, unsigned * now watchdog can respond to the request in following ways. * * 1 - It can tell the caller to procees with failover. This - * happens when the current node is the master watchdog node. + * happens when the current node is the leader watchdog node. * * 2 - It can tell the caller to failover not allowed * this happens when either cluster does not have the quorum diff --git a/src/watchdog/wd_json_data.c b/src/watchdog/wd_json_data.c index 9c8ff044f..69719099d 100644 --- a/src/watchdog/wd_json_data.c +++ b/src/watchdog/wd_json_data.c @@ -76,7 +76,7 @@ get_pool_config_from_json(char *json_data, int data_len) goto ERROR_EXIT; if (json_get_bool_value_for_key(root, "replicate_select", &config->replicate_select)) goto ERROR_EXIT; - if (json_get_bool_value_for_key(root, "master_slave_mode", &config->master_slave_mode)) + if (json_get_bool_value_for_key(root, "native_replication_mode", &config->native_replication_mode)) goto ERROR_EXIT; if (json_get_bool_value_for_key(root, "connection_cache", &config->connection_cache)) goto ERROR_EXIT; @@ -108,7 +108,7 @@ get_pool_config_from_json(char *json_data, int data_len) goto ERROR_EXIT; if (json_get_int_value_for_key(root, "pgpool_node_id", &config->pgpool_node_id)) goto ERROR_EXIT; - if (json_get_int_value_for_key(root, "master_slave_sub_mode", (int *) &config->master_slave_sub_mode)) + if (json_get_int_value_for_key(root, "native_replication_sub_mode", (int *) &config->native_replication_sub_mode)) goto ERROR_EXIT; if (json_get_bool_value_for_key(root, "failover_when_quorum_exists", &config->failover_when_quorum_exists)) @@ -192,7 +192,7 @@ get_pool_config_json(void) jw_put_bool(jNode, "replication_stop_on_mismatch", pool_config->replication_stop_on_mismatch); jw_put_bool(jNode, "failover_if_affected_tuples_mismatch", pool_config->failover_if_affected_tuples_mismatch); jw_put_bool(jNode, "replicate_select", pool_config->replicate_select); - jw_put_bool(jNode, "master_slave_mode", pool_config->master_slave_mode); + jw_put_bool(jNode, "native_replication_mode", pool_config->native_replication_mode); jw_put_bool(jNode, "connection_cache", pool_config->connection_cache); jw_put_int(jNode, "health_check_timeout", pool_config->health_check_timeout); jw_put_int(jNode, "health_check_period", pool_config->health_check_period); @@ -208,7 +208,7 @@ get_pool_config_json(void) jw_put_bool(jNode, "clear_memqcache_on_escalation", pool_config->clear_memqcache_on_escalation); jw_put_int(jNode, "wd_priority", pool_config->wd_priority); jw_put_int(jNode, "pgpool_node_id", pool_config->pgpool_node_id); - jw_put_int(jNode, "master_slave_sub_mode", pool_config->master_slave_sub_mode); + jw_put_int(jNode, "native_replication_sub_mode", pool_config->native_replication_sub_mode); jw_put_bool(jNode, "failover_when_quorum_exists", pool_config->failover_when_quorum_exists); jw_put_bool(jNode, "failover_require_consensus", pool_config->failover_require_consensus); -- 2.39.5