From e9ad137732f4515e8509168daba5a0eed4769d38 Mon Sep 17 00:00:00 2001 From: Bo Peng Date: Tue, 22 Sep 2020 23:02:37 +0900 Subject: [PATCH] Doc: update "Pgpool-II + Watchdog Setup Example". --- doc.ja/src/sgml/example-cluster.sgml | 8 +- doc/src/sgml/example-cluster.sgml | 139 ++++++++++++++------------- 2 files changed, 74 insertions(+), 73 deletions(-) diff --git a/doc.ja/src/sgml/example-cluster.sgml b/doc.ja/src/sgml/example-cluster.sgml index b46cc4212..d6ebb3fa0 100644 --- a/doc.ja/src/sgml/example-cluster.sgml +++ b/doc.ja/src/sgml/example-cluster.sgml @@ -606,7 +606,7 @@ PGHOME=/usr/pgsql-13 PCPコマンドを使用するにはユーザ認証が必要になるので、ユーザ名とmd5ハッシュに変換されたパスワードを"username:encrypted password"の形式でpcp.confファイルに設定します。 - follow_primary.shで"pgpool" userがPCP_USERに指定している場合、 + follow_primary.shでpgpoolユーザがPCP_USERに指定している場合、 # cat /etc/pgpool-II/follow_primary.sh @@ -615,7 +615,7 @@ PCP_USER=pgpool ... - 以下のコマンドを実行し、ハッシュ化されたpgpoolユーザのパスワードエントリを/etc/pgpool-II/pcp.confに追加します。 + 以下のようにコマンドを利用し、ハッシュ化されたpgpoolユーザのパスワードエントリを/etc/pgpool-II/pcp.confに追加します。 [全サーバ]# echo 'pgpool:'`pg_md5 PCPコマンドパスワード` >> /etc/pgpool-II/pcp.conf @@ -625,7 +625,7 @@ PCP_USER=pgpool [全サーバ]# su - postgres -[全サーバ]$ echo 'localhost:9898:pgpool:<pgpool user's password>' > ~/.pcppass +[全サーバ]$ echo 'localhost:9898:pgpool:<pgpool user password>' > ~/.pcppass [全サーバ]$ chmod 600 ~/.pcppass @@ -1018,7 +1018,7 @@ OPTS=" -D -n" 次にプライマリであるserver1のPostgreSQLを停止し、フェイルオーバするかどうか確認してみます。 - [server1]$ pg_ctl -D /var/lib/pgsql/11/data -m immediate stop + [server1]$ pg_ctl -D /var/lib/pgsql/13/data -m immediate stop ノード1を停止後、フェイルオーバが発生し、server2がプライマリに昇格したことを確認します。 diff --git a/doc/src/sgml/example-cluster.sgml b/doc/src/sgml/example-cluster.sgml index 2af82df19..ba5c167d2 100644 --- a/doc/src/sgml/example-cluster.sgml +++ b/doc/src/sgml/example-cluster.sgml @@ -557,7 +557,7 @@ port = 9999 # Port number for backend 0 backend_weight0 = 1 # Weight for backend 0 (only in load balancing mode) - backend_data_directory0 = '/var/lib/pgsql/11/data' + backend_data_directory0 = '/var/lib/pgsql/13/data' # Data directory for backend 0 backend_flag0 = 'ALLOW_TO_FAILOVER' # Controls various backend behavior @@ -644,7 +644,7 @@ PGHOME=/usr/pgsql-13 in format "username:encrypted password". - if "pgpool" user is specified in PCP_USER in follow_primary.sh, + if pgpool user is specified in PCP_USER in follow_primary.sh, # cat /etc/pgpool-II/follow_primary.sh @@ -653,19 +653,19 @@ PCP_USER=pgpool ... - then we create the encrypted password entry for pgpool user as below: + then we use to create the encrypted password entry for pgpool user as below: [all servers]# echo 'pgpool:'`pg_md5 PCP passowrd` >> /etc/pgpool-II/pcp.conf - Since follow_primary_command script has to execute PCP command without entering the + Since follow_primary.sh script must execute PCP command without entering a password, we need to create .pcppass in the home directory of Pgpool-II startup user (postgres user) on each server. [all servers]# su - postgres -[all servers]$ echo 'localhost:9898:pgpool:<pgpool user's password>' > ~/.pcppass +[all servers]$ echo 'localhost:9898:pgpool:<pgpool user password>' > ~/.pcppass [all servers]$ chmod 600 ~/.pcppass @@ -698,7 +698,7 @@ recovery_1st_stage_command = 'recovery_1st_stage' [server1]# cp -p /etc/pgpool-II/recovery_1st_stage.sample /var/lib/pgsql/13/data/recovery_1st_stage [server1]# cp -p /etc/pgpool-II/pgpool_remote_start.sample /var/lib/pgsql/13/data/pgpool_remote_start -[server1]# chown postgres:postgres /var/lib/pgsql/11/data/{recovery_1st_stage,pgpool_remote_start} +[server1]# chown postgres:postgres /var/lib/pgsql/13/data/{recovery_1st_stage,pgpool_remote_start} Basically, it should work if you change PGHOME according to PostgreSQL installation directory. @@ -816,8 +816,9 @@ delegate_IP = '192.168.137.150' - If installed from RPM, the postgres user has been configured to run - ip/arping via sudo without a password. + If Pgpool-II is installed using RPM, the postgres + user has been configured to run ip/arping via sudo without + a password. postgres ALL=NOPASSWD: /sbin/ip postgres ALL=NOPASSWD: /usr/sbin/arping @@ -1017,16 +1018,16 @@ OPTS=" -D -n" cluster directory of PostgreSQL primary server (server1). - # pcp_recovery_node -h 192.168.137.150 -p 9898 -U pgpool -n 1 - Password: - pcp_recovery_node -- Command Successful +# pcp_recovery_node -h 192.168.137.150 -p 9898 -U pgpool -n 1 +Password: +pcp_recovery_node -- Command Successful - # pcp_recovery_node -h 192.168.137.150 -p 9898 -U pgpool -n 2 - Password: - pcp_recovery_node -- Command Successful +# pcp_recovery_node -h 192.168.137.150 -p 9898 -U pgpool -n 2 +Password: +pcp_recovery_node -- Command Successful - After executing pcp_recovery_node command, + After executing pcp_recovery_node command, verify that server2 and server3 are started as PostgreSQL standby server. @@ -1048,13 +1049,13 @@ OPTS=" -D -n" Confirm the watchdog status by using pcp_watchdog_info. The Pgpool-II server which is started first run as LEADER. - # pcp_watchdog_info -h 192.168.137.150 -p 9898 -U pgpool - Password: - 3 YES server1:9999 Linux server1 server1 +# pcp_watchdog_info -h 192.168.137.150 -p 9898 -U pgpool +Password: +3 YES server1:9999 Linux server1 server1 - server1:9999 Linux server1 server1 9999 9000 4 LEADER #The Pgpool-II server started first becames "LEADER". - server2:9999 Linux server2 server2 9999 9000 7 STANDBY #run as standby - server3:9999 Linux server3 server3 9999 9000 7 STANDBY #run as standby +server1:9999 Linux server1 server1 9999 9000 4 LEADER #The Pgpool-II server started first becames "LEADER". +server2:9999 Linux server2 server2 9999 9000 7 STANDBY #run as standby +server3:9999 Linux server3 server3 9999 9000 7 STANDBY #run as standby Stop active server server1, then server2 or @@ -1063,30 +1064,30 @@ OPTS=" -D -n" service or shutdown the whole system. Here, we stop Pgpool-II service. - [server1]# systemctl stop pgpool.service +[server1]# systemctl stop pgpool.service - # pcp_watchdog_info -p 9898 -h 192.168.137.150 -U pgpool - Password: - 3 YES server2:9999 Linux server2 server2 +# pcp_watchdog_info -p 9898 -h 192.168.137.150 -U pgpool +Password: +3 YES server2:9999 Linux server2 server2 - server2:9999 Linux server2 server2 9999 9000 4 LEADER #server2 is promoted to LEADER - server1:9999 Linux server1 server1 9999 9000 10 SHUTDOWN #server1 is stopped - server3:9999 Linux server3 server3 9999 9000 7 STANDBY #server3 runs as STANDBY +server2:9999 Linux server2 server2 9999 9000 4 LEADER #server2 is promoted to LEADER +server1:9999 Linux server1 server1 9999 9000 10 SHUTDOWN #server1 is stopped +server3:9999 Linux server3 server3 9999 9000 7 STANDBY #server3 runs as STANDBY Start Pgpool-II (server1) which we have stopped again, and verify that server1 runs as a standby. - [server1]# systemctl start pgpool.service +[server1]# systemctl start pgpool.service - [server1]# pcp_watchdog_info -p 9898 -h 192.168.137.150 -U pgpool - Password: - 3 YES server2:9999 Linux server2 server2 +[server1]# pcp_watchdog_info -p 9898 -h 192.168.137.150 -U pgpool +Password: +3 YES server2:9999 Linux server2 server2 - server2:9999 Linux server2 server2 9999 9000 4 LEADER - server1:9999 Linux server1 server1 9999 9000 7 STANDBY - server3:9999 Linux server3 server3 9999 9000 7 STANDBY +server2:9999 Linux server2 server2 9999 9000 4 LEADER +server1:9999 Linux server1 server1 9999 9000 7 STANDBY +server3:9999 Linux server3 server3 9999 9000 7 STANDBY @@ -1111,7 +1112,7 @@ OPTS=" -D -n" server1, and verify automatic failover. - [server1]$ pg_ctl -D /var/lib/pgsql/11/data -m immediate stop +[server1]$ pg_ctl -D /var/lib/pgsql/13/data -m immediate stop After stopping PostgreSQL on server1, @@ -1133,38 +1134,38 @@ OPTS=" -D -n" - [server3]# psql -h server3 -p 5432 -U pgpool postgres -c "select pg_is_in_recovery()" - pg_is_in_recovery - ------------------- - t +[server3]# psql -h server3 -p 5432 -U pgpool postgres -c "select pg_is_in_recovery()" +pg_is_in_recovery +------------------- +t - [server2]# psql -h server2 -p 5432 -U pgpool postgres -c "select pg_is_in_recovery()" - pg_is_in_recovery - ------------------- - f +[server2]# psql -h server2 -p 5432 -U pgpool postgres -c "select pg_is_in_recovery()" +pg_is_in_recovery +------------------- +f - [server2]# psql -h server2 -p 5432 -U pgpool postgres -c "select * from pg_stat_replication" -x - -[ RECORD 1 ]----+------------------------------ - pid | 11059 - usesysid | 16392 - usename | repl - application_name | server3 - client_addr | 192.168.137.103 - client_hostname | - client_port | 48694 - backend_start | 2019-08-06 11:36:07.479161+09 - backend_xmin | - state | streaming - sent_lsn | 0/75000148 - write_lsn | 0/75000148 - flush_lsn | 0/75000148 - replay_lsn | 0/75000148 - write_lag | - flush_lag | - replay_lag | - sync_priority | 0 - sync_state | async - reply_time | 2019-08-06 11:42:59.823961+09 +[server2]# psql -h server2 -p 5432 -U pgpool postgres -c "select * from pg_stat_replication" -x +-[ RECORD 1 ]----+------------------------------ +pid | 11059 +usesysid | 16392 +usename | repl +application_name | server3 +client_addr | 192.168.137.103 +client_hostname | +client_port | 48694 +backend_start | 2019-08-06 11:36:07.479161+09 +backend_xmin | +state | streaming +sent_lsn | 0/75000148 +write_lsn | 0/75000148 +flush_lsn | 0/75000148 +replay_lsn | 0/75000148 +write_lag | +flush_lag | +replay_lag | +sync_priority | 0 +sync_state | async +reply_time | 2019-08-06 11:42:59.823961+09 @@ -1178,9 +1179,9 @@ OPTS=" -D -n" exist in database cluster directory of current primary server server2. - # pcp_recovery_node -h 192.168.137.150 -p 9898 -U pgpool -n 0 - Password: - pcp_recovery_node -- Command Successful +# pcp_recovery_node -h 192.168.137.150 -p 9898 -U pgpool -n 0 +Password: +pcp_recovery_node -- Command Successful Then verify that server1 is started as a standby. -- 2.39.5