[pgpool-general: 9012] Re: Massive performance and latency issues when remote replica is attached to cluster.
Brian Ismay
jismay at cenic.org
Thu Feb 1 02:22:28 JST 2024
I am not aware of anything in my configuration being especially
non-standard.
Here is a sanitized version of my config.
--------------------------------------------------------------
backend_clustering_mode = 'streaming_replication'
listen_addresses = '*'
port = 1234
socket_dir = '/var/run/postgresql'
reserved_connections = 0
pcp_listen_addresses = '*'
pcp_port = 9898
pcp_socket_dir = '/var/run/postgresql'
listen_backlog_multiplier = 2
serialize_accept = on
backend_hostname0 = '10.1.1.3'
backend_port0 = 1234
backend_weight0 = 1
backend_data_directory0 = '/var/lib/pgsql/12/data'
backend_flag0 = 'ALLOW_TO_FAILOVER'
backend_application_name0 = 'walreceiver'
backend_hostname1 = '10.1.1.2'
backend_port1 = 1234
backend_weight1 = 1
backend_data_directory1 = '/var/lib/pgsql/12/data'
backend_flag1 = 'ALLOW_TO_FAILOVER'
backend_application_name1 = 'walreceiver'
backend_hostname2 = '10.1.1.17'
backend_port2 = 1234
backend_weight2 = 0
backend_data_directory2 = '/var/lib/pgsql/12/data'
backend_flag2 = 'ALLOW_TO_FAILOVER'
backend_application_name2 = 'walreceiver'
enable_pool_hba = on
pool_passwd = 'pool_passwd'
authentication_timeout = 1min
allow_clear_text_frontend_auth = on
ssl = off
num_init_children = 600
max_pool = 4
child_life_time = 5min
child_max_connections = 20
connection_life_time = 60
client_idle_limit = 0
log_destination = 'stderr'
log_line_prefix = '%t: pid %p: ' # printf-style string to output at
beginning of each log line.
log_connections = on
log_disconnections = on
log_hostname = on
log_statement = on
log_per_node_statement = on
log_client_messages = off
log_standby_delay = 'if_over_threshold'
syslog_facility = 'LOCAL0'
syslog_ident = 'pgpool'
log_error_verbosity = verbose
logging_collector = on
log_directory = '/var/log/pgpool'
log_filename = 'pgpool-%a.log'
log_truncate_on_rotation = on
log_rotation_age = 1d
log_rotation_size = 0
pid_file_name = '/var/run/pgpool/pgpool.pid'
logdir = '/var/log/pgpool'
connection_cache = on
reset_query_list = 'ABORT; DISCARD ALL'
replicate_select = off
insert_lock = off
lobj_lock_table = ''
replication_stop_on_mismatch = off
failover_if_affected_tuples_mismatch = off
load_balance_mode = off
ignore_leading_white_space = on
read_only_function_list = ''
write_function_list = ''
primary_routing_query_pattern_list = '.*'
app_name_redirect_preference_list = ''
allow_sql_comments = off
disable_load_balance_on_write = 'transaction'
dml_adaptive_object_relationship_list= ''
statement_level_load_balance = on
sr_check_period = 10
sr_check_user = 'monitor'
sr_check_password = '*****'
sr_check_database = 'monitor'
delay_threshold = 10240
follow_primary_command = '/etc/pgpool-II/follow_master.sh %d %h %p %D %m %M
%H %P %r %R'
health_check_period = 1
health_check_timeout = 20
health_check_user = 'monitor'
health_check_password = '*****'
health_check_database = ''
health_check_max_retries = 3
health_check_retry_delay = 1
connect_timeout = 10000
failover_command = '/etc/pgpool-II/failover.sh %d %h %p %D %m %H %M %P %r
%R %N %S'
failback_command = ''
failover_on_backend_error = on
detach_false_primary = off
search_primary_node_timeout = 5min
recovery_user = 'postgres'
recovery_password = ''
recovery_1st_stage_command = 'recovery_1st_stage'
recovery_2nd_stage_command = ''
recovery_timeout = 90
client_idle_limit_in_recovery = 0
auto_failback = off
auto_failback_interval = 1min
use_watchdog = on
trusted_servers = 'node1,node2,node3'
ping_path = '/bin'
hostname0 = 'node01
wd_port0 = 9000
pgpool_port0 = 1234
hostname1 = 'node02'
wd_port1 = 9000
pgpool_port1 = 1234
hostname2 = 'node03'
wd_port2 = 9000
pgpool_port2 = 1234
hostname3 = 'node04'
wd_port3 = 9000
pgpool_port3 = 1234
wd_priority = 4
wd_authkey = ''
wd_ipc_socket_dir = '/var/run/postgresql'
delegate_IP = '10.1.1.1'
if_cmd_path = '/sbin'
if_up_cmd = '/usr/bin/sudo /sbin/ip addr add $_IP_$/24 dev eth1 label
eth1:0'
if_down_cmd = '/usr/bin/sudo /sbin/ip addr del $_IP_$/24 dev eth1'
arping_path = '/usr/sbin'
arping_cmd = '/usr/bin/sudo /usr/sbin/arping -U $_IP_$ -w 1 -I eth1'
clear_memqcache_on_escalation = on
wd_escalation_command = ''
wd_de_escalation_command = ''
failover_when_quorum_exists = on
failover_require_consensus = on
allow_multiple_failover_requests_from_node = on
enable_consensus_with_half_votes = on
wd_monitoring_interfaces_list = ''
wd_lifecheck_method = 'heartbeat'
wd_interval = 10
heartbeat_hostname0 = 'node01'
heartbeat_port0 = 9694
heartbeat_device0 = 'eth1'
heartbeat_hostname1 = 'node02'
heartbeat_port1 = 9694
heartbeat_device1 = 'eth1'
heartbeat_hostname2 = 'node03'
heartbeat_port2 = 9694
heartbeat_device2 = 'eth1'
heartbeat_hostname3 = 'node04'
heartbeat_port3 = 9694
heartbeat_device3 = 'eth1'
wd_heartbeat_keepalive = 2
wd_heartbeat_deadtime = 30
wd_life_point = 3
wd_lifecheck_query = 'SELECT 1'
wd_lifecheck_dbname = 'template1'
wd_lifecheck_user = 'nobody'
wd_lifecheck_password = ''
relcache_expire = 0
relcache_size = 256
check_temp_table = catalog
check_unlogged_table = on
enable_shared_relcache = on
relcache_query_target = primary
memory_cache_enabled = off
memqcache_method = 'shmem'
memqcache_memcached_host = 'localhost'
memqcache_memcached_port = 11211
memqcache_total_size = 64MB
memqcache_max_num_cache = 1000000
memqcache_expire = 0
memqcache_auto_cache_invalidation = on
memqcache_maxcache = 400kB
memqcache_cache_block_size = 1MB
memqcache_oiddir = '/var/log/pgpool/oiddir'
cache_safe_memqcache_table_list = ''
cache_unsafe_memqcache_table_list = ''
--
Brian Ismay
jismay at cenic.org
On Fri, Jan 26, 2024 at 4:16 PM Tatsuo Ishii <ishii at sraoss.co.jp> wrote:
> > Sure thing:
> >
> > postgres=# show pool_nodes;
> > node_id | hostname | port | status | lb_weight | role |
> select_cnt |
> > load_balance_node | replication_delay | replication_state |
> > replication_sync_state | last_status_change
> >
> ---------+-------------+------+--------+-----------+---------+------------+-------------------+-------------------+-------------------+------------------------+---------------------
> > 0 | 10.1.1.3 | 1234 | up | 0.500000 | standby | 0 |
> > false | 0 | |
> > | 2024-01-23 10:09:51
> > 1 | 10.1.1.2 | 1234 | up | 0.500000 | primary | 82024640 |
> > true | 0 | |
> > | 2024-01-23 10:09:51
> > 2 | 10.1.1.17 | 1234 | up | 0.000000 | standby | 0 |
> > false | 0 | |
> > | 2024-01-23 10:09:51
> > (3 rows)
>
> All selects were sent to node 1 as expected. However,
> replication_state and replication_sync_state are shown as NULL. Is it
> intended?
>
> Moreover I noticed that in the log BEGIN and END were sent to all
> nodes. This is unexpected. Do you have any non standard configuration
> in pgpool.con?
>
> Best reagards,
> --
> Tatsuo Ishii
> SRA OSS LLC
> English: http://www.sraoss.co.jp/index_en/
> Japanese:http://www.sraoss.co.jp
>
> > postgres=# PGPOOL SHOW load_balance_mode;
> > load_balance_mode
> > -------------------
> > off
> > (1 row)
> >
> > --
> > Brian
> >
> > On Thu, Jan 25, 2024 at 5:55 PM Tatsuo Ishii <ishii at sraoss.co.jp> wrote:
> >
> >> Ok. To make sure that can you share the result of following commands?
> >>
> >> 1) show pool_nodes
> >>
> >> 2) pgpool show load_balance_mode
> >>
> >> Best reagards,
> >> --
> >> Tatsuo Ishii
> >> SRA OSS LLC
> >> English: http://www.sraoss.co.jp/index_en/
> >> Japanese:http://www.sraoss.co.jp
> >>
> >> > I actually did have the load_balance_mode set to ON, but I caught that
> >> > before I sent the initial data-dump to the list.
> >> >
> >> > --
> >> > Brian
> >> >
> >> > On Wed, Jan 24, 2024 at 10:34 PM Tatsuo Ishii <ishii at sraoss.co.jp>
> >> wrote:
> >> >
> >> >> > Brian,
> >> >> >
> >> >> > I have looked into the log and found some queries were sent to
> remote
> >> >> node (node id 2).
> >> >> > This is pretty strange because you said:
> >> >> >> Load-balancing is turned off.
> >> >> >
> >> >> > In this case every queries should be sent to primary node only
> (node
> >> >> > id 1).
> >> >> >
> >> >> > Are you sure that you set load_balance_node to off?
> >> >> ~~~~~~~~~~~~~~~~
> >> >> I meant "load_balance_mode".
> >> >>
> >> >> > Best reagards,
> >> >> > --
> >> >> > Tatsuo Ishii
> >> >> > SRA OSS LLC
> >> >> > English: http://www.sraoss.co.jp/index_en/
> >> >> > Japanese:http://www.sraoss.co.jp
> >> >> >
> >> >> >> Tatsuo,
> >> >> >>
> >> >> >> As requested, I re-ran pgbench with the log_per_node_statement
> option
> >> >> >> turned on.
> >> >> >> I included just a small excerpt, but everything I saw looked to be
> >> the
> >> >> same
> >> >> >> pattern.
> >> >> >>
> >> >> >> $ pgbench -h 10.1.1.1 -p1234 -U pgbench -c 75 -j 12 -t 500 -P 15
> -r
> >> >> >> pgbench1 -M extended
> >> >> >> starting vacuum...end.
> >> >> >> progress: 15.0 s, 1044.2 tps, lat 69.690 ms stddev 7.344
> >> >> >> progress: 30.0 s, 1060.1 tps, lat 69.940 ms stddev 7.635
> >> >> >> transaction type: <builtin: TPC-B (sort of)>
> >> >> >> scaling factor: 100
> >> >> >> query mode: extended
> >> >> >> number of clients: 75
> >> >> >> number of threads: 12
> >> >> >> number of transactions per client: 500
> >> >> >> number of transactions actually processed: 37500/37500
> >> >> >> latency average = 69.725 ms
> >> >> >> latency stddev = 7.464 ms
> >> >> >> tps = 1003.125524 (including connections establishing)
> >> >> >> tps = 1004.224113 (excluding connections establishing)
> >> >> >> statement latencies in milliseconds:
> >> >> >> 0.001 \set aid random(1, 100000 * :scale)
> >> >> >> 0.001 \set bid random(1, 1 * :scale)
> >> >> >> 0.000 \set tid random(1, 10 * :scale)
> >> >> >> 0.000 \set delta random(-5000, 5000)
> >> >> >> 11.958 BEGIN;
> >> >> >> 8.933 UPDATE pgbench_accounts SET abalance = abalance +
> >> :delta
> >> >> >> WHERE aid = :aid;
> >> >> >> 8.921 SELECT abalance FROM pgbench_accounts WHERE aid =
> >> :aid;
> >> >> >> 9.230 UPDATE pgbench_tellers SET tbalance = tbalance +
> >> :delta
> >> >> >> WHERE tid = :tid;
> >> >> >> 10.102 UPDATE pgbench_branches SET bbalance = bbalance +
> >> :delta
> >> >> >> WHERE bid = :bid;
> >> >> >> 8.907 INSERT INTO pgbench_history (tid, bid, aid, delta,
> >> >> mtime)
> >> >> >> VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);
> >> >> >> 11.671 END;
> >> >> >>
> >> >> >>
> >> >> >>
> >> >> >>
> >> >> >> -----------------------
> >> >> >>
> >> >> >> Looped through the logfile to grab the connections coming from
> >> pgbench
> >> >> and
> >> >> >> then find those:
> >> >> >>
> >> >> >> for i in `cat pgpool-Tue.log | grep "connecting\ host" | grep
> >> >> >> "10\.1\.1\.150"|awk '{print $4}'|sed "s/://"`;do cat
> pgpool-Tue.log
> >> >> |grep
> >> >> >> $i;done|less
> >> >> >>
> >> >> >> A snippet from there:
> >> >> >> NOTE: node_id 0 and 1 are the "local" db_nodes
> >> >> >> node_id 2 is the "remote" db_node
> >> >> >> node_id 1 is the current primary.
> >> >> >>
> >> >> >>
> >> >> >>
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: new connection received
> >> >> >> 2024-01-23 10:10:03: pid 4507: DETAIL: connecting host=10.1.1.150
> >> >> >> port=52802
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: child.c:1860
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: select count(*)
> from
> >> >> >> pgbench_branches
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:224
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: SELECT pg_catalog.version()
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: SELECT count(*) FROM pg_catalog.pg_proc AS p,
> >> >> >> pg_catalog.pg_namespace AS n WHERE p.proname = 'count' AND n.oid =
> >> >> >> p.pronamespace AND n.nspname ~ '.*' AND p.provolatile = 'v'
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: select count(*) from pgbench_branches
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: vacuum
> >> pgbench_branches
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:224
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: vacuum pgbench_branches
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: vacuum
> >> pgbench_tellers
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:224
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: vacuum pgbench_tellers
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: truncate
> >> pgbench_history
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:224
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: truncate pgbench_history
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: DISCARD ALL
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:224
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 0 backend pid:
> 19865
> >> >> >> statement: DISCARD ALL
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: DISCARD ALL
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 2 backend pid:
> 17091
> >> >> >> statement: DISCARD ALL
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: frontend disconnection:
> session
> >> >> time:
> >> >> >> 0:00:00.215 user=pgbench database=pgbench1 host=10.1.1.150
> port=52802
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: child.c:2076
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: new connection received
> >> >> >> 2024-01-23 10:10:03: pid 4507: DETAIL: connecting host=10.1.1.150
> >> >> >> port=52828
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: child.c:1860
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 0 backend pid:
> 19865
> >> >> >> statement: Parse: BEGIN;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Parse: BEGIN;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 2 backend pid:
> 17091
> >> >> >> statement: Parse: BEGIN;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 0 backend pid:
> 19865
> >> >> >> statement: Bind: BEGIN;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Bind: BEGIN;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 2 backend pid:
> 17091
> >> >> >> statement: Bind: BEGIN;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 0 backend pid:
> 19865
> >> >> >> statement: D message
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: D message
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 2 backend pid:
> 17091
> >> >> >> statement: D message
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: BEGIN;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:914
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 0 backend pid:
> 19865
> >> >> >> statement: Execute: BEGIN;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Execute: BEGIN;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 2 backend pid:
> 17091
> >> >> >> statement: Execute: BEGIN;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Parse: UPDATE pgbench_accounts SET abalance = abalance
> +
> >> $1
> >> >> >> WHERE aid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Bind: UPDATE pgbench_accounts SET abalance = abalance
> + $1
> >> >> WHERE
> >> >> >> aid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: D message
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: UPDATE
> >> pgbench_accounts
> >> >> SET
> >> >> >> abalance = abalance + $1 WHERE aid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:914
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Execute: UPDATE pgbench_accounts SET abalance =
> abalance
> >> + $1
> >> >> >> WHERE aid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Parse: SELECT abalance FROM pgbench_accounts WHERE aid
> =
> >> $1;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Bind: SELECT abalance FROM pgbench_accounts WHERE aid =
> >> $1;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: D message
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: SELECT abalance
> FROM
> >> >> >> pgbench_accounts WHERE aid = $1;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:914
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Execute: SELECT abalance FROM pgbench_accounts WHERE
> aid =
> >> >> $1;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Parse: UPDATE pgbench_tellers SET tbalance = tbalance
> + $1
> >> >> WHERE
> >> >> >> tid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Bind: UPDATE pgbench_tellers SET tbalance = tbalance +
> $1
> >> >> WHERE
> >> >> >> tid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: D message
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: UPDATE
> >> pgbench_tellers
> >> >> SET
> >> >> >> tbalance = tbalance + $1 WHERE tid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:914
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Execute: UPDATE pgbench_tellers SET tbalance =
> tbalance +
> >> $1
> >> >> >> WHERE tid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Parse: UPDATE pgbench_branches SET bbalance = bbalance
> +
> >> $1
> >> >> >> WHERE bid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Bind: UPDATE pgbench_branches SET bbalance = bbalance
> + $1
> >> >> WHERE
> >> >> >> bid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: D message
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: UPDATE
> >> pgbench_branches
> >> >> SET
> >> >> >> bbalance = bbalance + $1 WHERE bid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:914
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Execute: UPDATE pgbench_branches SET bbalance =
> bbalance
> >> + $1
> >> >> >> WHERE bid = $2;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Parse: INSERT INTO pgbench_history (tid, bid, aid,
> delta,
> >> >> mtime)
> >> >> >> VALUES ($1, $2, $3, $4, CURRENT_TIMESTAMP);
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Bind: INSERT INTO pgbench_history (tid, bid, aid,
> delta,
> >> >> mtime)
> >> >> >> VALUES ($1, $2, $3, $4, CURRENT_TIMESTAMP);
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: D message
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: INSERT INTO
> >> >> pgbench_history
> >> >> >> (tid, bid, aid, delta, mtime) VALUES ($1, $2, $3, $4,
> >> >> CURRENT_TIMESTAMP);
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:914
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Execute: INSERT INTO pgbench_history (tid, bid, aid,
> >> delta,
> >> >> >> mtime) VALUES ($1, $2, $3, $4, CURRENT_TIMESTAMP);
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 0 backend pid:
> 19865
> >> >> >> statement: Parse: END;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Parse: END;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 2 backend pid:
> 17091
> >> >> >> statement: Parse: END;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 0 backend pid:
> 19865
> >> >> >> statement: Bind: END;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Bind: END;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 2 backend pid:
> 17091
> >> >> >> statement: Bind: END;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 0 backend pid:
> 19865
> >> >> >> statement: D message
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: D message
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 2 backend pid:
> 17091
> >> >> >> statement: D message
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: statement: END;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION: pool_proto_modules.c:914
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 0 backend pid:
> 19865
> >> >> >> statement: Execute: END;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 1 backend pid:
> 6857
> >> >> >> statement: Execute: END;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOG: DB node id: 2 backend pid:
> 17091
> >> >> >> statement: Execute: END;
> >> >> >> 2024-01-23 10:10:03: pid 4507: LOCATION:
> pool_proto_modules.c:3596
> >> >> >>
> >> >> >> --
> >> >> >> Brian Ismay
> >> >> >> jismay at cenic.org
> >> >> >>
> >> >> >> On Sat, Jan 20, 2024 at 10:39 PM Tatsuo Ishii <ishii at sraoss.co.jp
> >
> >> >> wrote:
> >> >> >>
> >> >> >>> > Any recommendations would be greatly appreciated. So far, after
> >> >> making
> >> >> >>> > multiple tuning and settings changes there have been no
> meaningful
> >> >> >>> changes.
> >> >> >>> >
> >> >> >>> > What we are highly suspicious of is that the latency numbers
> seem
> >> to
> >> >> go
> >> >> >>> > from around .2 ms to around 8 ms. This is very close to the
> >> >> >>> round-trip-time
> >> >> >>> > between our datacenters.
> >> >> >>>
> >> >> >>> > statement latencies in milliseconds:
> >> >> >>> > 0.003 \set aid random(1, 100000 * :scale)
> >> >> >>> > 0.001 \set bid random(1, 1 * :scale)
> >> >> >>> > 0.001 \set tid random(1, 10 * :scale)
> >> >> >>> > 0.001 \set delta random(-5000, 5000)
> >> >> >>> > 8.844 BEGIN;
> >> >> >>> > 9.695 UPDATE pgbench_accounts SET abalance =
> abalance +
> >> >> :delta
> >> >> >>> > WHERE aid = :aid;
> >> >> >>> > 8.878 SELECT abalance FROM pgbench_accounts WHERE
> aid =
> >> >> :aid;
> >> >> >>> > 685.873 UPDATE pgbench_tellers SET tbalance = tbalance
> +
> >> >> :delta
> >> >> >>> > WHERE tid = :tid;
> >> >> >>> > 92.049 UPDATE pgbench_branches SET bbalance =
> bbalance +
> >> >> :delta
> >> >> >>> > WHERE bid = :bid;
> >> >> >>> > 8.826 INSERT INTO pgbench_history (tid, bid, aid,
> delta,
> >> >> mtime)
> >> >> >>> > VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);
> >> >> >>> > 8.847 END;
> >> >> >>>
> >> >> >>> If BEGIN and END were sent to the remote standby node, then the
> >> >> >>> latency number makes sense. But theoretically it should not
> happen
> >> >> >>> because you turn off load balance mode. In this case everything
> >> should
> >> >> >>> be sent to primary except the connection establishing. But even
> if
> >> >> >>> load balance mode is off, pgpool establish connections to both
> >> primary
> >> >> >>> and standbys. However the default option of pgbench is
> establishing
> >> >> >>> connection only once at the beginning of session, meaning the
> >> >> >>> connection overhead should only happen only once in the session.
> >> Thus
> >> >> >>> I can not think of plausible reason for the latency numbers.
> >> >> >>>
> >> >> >>> Anyway, can you turn on log_per_node_statement and see
> everything is
> >> >> >>> sent to standby? I want to confirm that first.
> >> >> >>>
> >> >> >>> Best reagards,
> >> >> >>> --
> >> >> >>> Tatsuo Ishii
> >> >> >>> SRA OSS LLC
> >> >> >>> English: http://www.sraoss.co.jp/index_en/
> >> >> >>> Japanese:http://www.sraoss.co.jp
> >> >> >>>
> >> >> > _______________________________________________
> >> >> > pgpool-general mailing list
> >> >> > pgpool-general at pgpool.net
> >> >> > http://www.pgpool.net/mailman/listinfo/pgpool-general
> >> >> _______________________________________________
> >> >> pgpool-general mailing list
> >> >> pgpool-general at pgpool.net
> >> >> http://www.pgpool.net/mailman/listinfo/pgpool-general
> >> >>
> >>
> _______________________________________________
> pgpool-general mailing list
> pgpool-general at pgpool.net
> http://www.pgpool.net/mailman/listinfo/pgpool-general
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://www.pgpool.net/pipermail/pgpool-general/attachments/20240131/eab48d43/attachment.htm>
More information about the pgpool-general
mailing list