[pgpool-hackers: 3831] Re: 答复: Re: discuss of pgpool enhancement
Tatsuo Ishii
ishii at sraoss.co.jp
Sat Sep 26 13:11:37 JST 2020
Here are some comments on the patch.
- The patch lacks a switch to turn off the feature to preserve
existing behavior.
- The patch lacks changes to pgpool_setup. As a result, pgpool.conf
and postgresql.conf create by pgpool_setup is now inconsistent.
max_children = 120 vs. max_connections = 100
- Why "pool_remain" variables is declared as int *? It seems plain
"int" is enough. Also you should declare the variable with
"volatile".
- "pool_remain" variable is counted up/down separately from
connection_count_up() and connection_count_down(). I think you can
does the counting inside the functions because the timing is
identical.
- Compiler warning: protocol/child.c:441:4: warning: implicit
declaration of function ‘register_fork_new_service_req’
[-Wimplicit-function-declaration]
diff --git a/src/config/pool_config_variables.c b/src/config/pool_config_variables.c
old mode 100644
new mode 100755
index b5fab2ee..3e090459
--- a/src/config/pool_config_variables.c
+++ b/src/config/pool_config_variables.c
@@ -1801,12 +1801,34 @@ static struct config_int ConfigureNamesInt[] =
},
{
- {"num_init_children", CFGCXT_INIT, CONNECTION_POOL_CONFIG,
- "Number of children pre-forked for client connections.",
+ {"max_children", CFGCXT_INIT, CONNECTION_POOL_CONFIG,
+ "Number of servers we can create most.",
CONFIG_VAR_TYPE_INT, false, 0
},
- &g_pool_config.num_init_children,
- 32,
+ &g_pool_config.max_children,
+ 150,
+ 1, INT_MAX,
+ NULL, NULL, NULL
+ },
+
+ {
+ {"min_spare_children", CFGCXT_INIT, CONNECTION_POOL_CONFIG,
+ "Minimum number of spare servers.",
+ CONFIG_VAR_TYPE_INT, false, 0
+ },
+ &g_pool_config.min_spare_children,
+ 5,
+ 1, INT_MAX,
+ NULL, NULL, NULL
+ },
+
+ {
+ {"max_spare_children", CFGCXT_INIT, CONNECTION_POOL_CONFIG,
+ "Maximum number of spare servers.",
+ CONFIG_VAR_TYPE_INT, false, 0
+ },
+ &g_pool_config.max_spare_children,
+ 10,
1, INT_MAX,
NULL, NULL, NULL
},
diff --git a/src/context/pool_process_context.c b/src/context/pool_process_context.c
old mode 100644
new mode 100755
index 46139f5a..6098143b
--- a/src/context/pool_process_context.c
+++ b/src/context/pool_process_context.c
@@ -110,14 +110,15 @@ pool_coninfo_size(void)
{
int size;
- size = pool_config->num_init_children *
+
+ size = pool_config->max_children *
pool_config->max_pool *
MAX_NUM_BACKENDS *
sizeof(ConnectionInfo);
-
+
ereport(DEBUG1,
- (errmsg("pool_coninfo_size: num_init_children (%d) * max_pool (%d) * MAX_NUM_BACKENDS (%d) * sizeof(ConnectionInfo) (%zu) = %d bytes requested for shared memory",
- pool_config->num_init_children,
+ (errmsg("pool_coninfo_size: max_children (%d) * max_pool (%d) * MAX_NUM_BACKENDS (%d) * sizeof(ConnectionInfo) (%zu) = %d bytes requested for shared memory",
+ pool_config->max_children,
pool_config->max_pool,
MAX_NUM_BACKENDS,
sizeof(ConnectionInfo),
@@ -133,7 +134,7 @@ pool_coninfo_num(void)
{
int nelm;
- nelm = pool_config->num_init_children *
+ nelm = pool_config->max_children *
pool_config->max_pool *
MAX_NUM_BACKENDS;
@@ -147,7 +148,7 @@ pool_coninfo_num(void)
ConnectionInfo *
pool_coninfo(int child, int connection_pool, int backend)
{
- if (child < 0 || child >= pool_config->num_init_children)
+ if (child < 0 || child >=pool_config->max_children)
{
ereport(WARNING,
(errmsg("failed to get connection info, invalid child number: %d", child)));
@@ -183,7 +184,7 @@ pool_coninfo_pid(int pid, int connection_pool, int backend)
int child = -1;
int i;
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
if (process_info[i].pid == pid)
{
@@ -195,7 +196,7 @@ pool_coninfo_pid(int pid, int connection_pool, int backend)
if (child < 0)
elog(ERROR, "failed to get child pid, invalid child PID:%d", pid);
- if (child < 0 || child >= pool_config->num_init_children)
+ if (child < 0 || child >= pool_config->max_children)
elog(ERROR, "failed to get child pid, invalid child no:%d", child);
if (connection_pool < 0 || connection_pool >= pool_config->max_pool)
@@ -227,28 +228,33 @@ pool_coninfo_backend_pid(int backend_pid, int *backend_node_id)
ereport(DEBUG1,
(errmsg("searching for the connection with backend pid:%d", backend_pid)));
- for (child = 0; child < pool_config->num_init_children; child++)
+ for (child = 0; child < pool_config->max_children; child++)
{
int pool;
- ProcessInfo *pi = pool_get_process_info(process_info[child].pid);
-
- for (pool = 0; pool < pool_config->max_pool; pool++)
+
+ if (process_info[child].pid)
{
- int backend_id;
+ ProcessInfo *pi = pool_get_process_info(process_info[child].pid);
- for (backend_id = 0; backend_id < NUM_BACKENDS; backend_id++)
+ for (pool = 0; pool < pool_config->max_pool; pool++)
{
- int poolBE = pool * MAX_NUM_BACKENDS + backend_id;
+ int backend_id;
- if (ntohl(pi->connection_info[poolBE].pid) == backend_pid)
+ for (backend_id = 0; backend_id < NUM_BACKENDS; backend_id++)
{
- ereport(DEBUG1,
- (errmsg("found for the connection with backend pid:%d on backend node %d", backend_pid, backend_id)));
- *backend_node_id = backend_id;
- return &pi->connection_info[poolBE];
+ int poolBE = pool * MAX_NUM_BACKENDS + backend_id;
+
+ if (ntohl(pi->connection_info[poolBE].pid) == backend_pid)
+ {
+ ereport(DEBUG1,
+ (errmsg("found for the connection with backend pid:%d on backend node %d", backend_pid, backend_id)));
+ *backend_node_id = backend_id;
+ return &pi->connection_info[poolBE];
+ }
}
}
}
+
}
return NULL;
}
diff --git a/src/include/main/pool_internal_comms.h b/src/include/main/pool_internal_comms.h
old mode 100644
new mode 100755
index a90a642c..a67c899a
--- a/src/include/main/pool_internal_comms.h
+++ b/src/include/main/pool_internal_comms.h
@@ -42,5 +42,5 @@ extern void register_backend_state_sync_req_interupt(void);
extern void register_inform_quarantine_nodes_req(void);
extern bool register_node_operation_request(POOL_REQUEST_KIND kind,
int *node_id_set, int count, unsigned char flags);
-
+extern void register_fork_new_service_req(void);
#endif /* pool_internal_comms_h */
diff --git a/src/include/pool.h b/src/include/pool.h
old mode 100644
new mode 100755
index d1c1fd66..51b29bc9
--- a/src/include/pool.h
+++ b/src/include/pool.h
@@ -379,7 +379,7 @@ typedef enum
#define Min(x, y) ((x) < (y) ? (x) : (y))
-#define MAX_NUM_SEMAPHORES 7
+#define MAX_NUM_SEMAPHORES 8
#define CONN_COUNTER_SEM 0
#define REQUEST_INFO_SEM 1
#define SHM_CACHE_SEM 2
@@ -387,6 +387,7 @@ typedef enum
#define PCP_REQUEST_SEM 4
#define ACCEPT_FD_SEM 5
#define SI_CRITICAL_REGION_SEM 6
+#define CONNECTION_POOL_REMAIN_SEM 7
#define MAX_REQUEST_QUEUE_SIZE 10
#define MAX_SEC_WAIT_FOR_CLUSTER_TRANSATION 10 /* time in seconds to keep
@@ -531,8 +532,8 @@ typedef struct
{
uint32 commit_counter; /* number of committing children */
uint32 snapshot_counter; /* number of snapshot acquiring children */
- pid_t *snapshot_waiting_children; /* array size is num_init_children */
- pid_t *commit_waiting_children; /* array size is num_init_children */
+ pid_t *snapshot_waiting_children; /* array size is max_children */
+ pid_t *commit_waiting_children; /* array size is max_children */
} SI_ManageInfo;
/*
@@ -575,6 +576,9 @@ extern BACKEND_STATUS private_backend_status[MAX_NUM_BACKENDS];
extern char remote_host[]; /* client host */
extern char remote_port[]; /* client port */
+extern int *pool_remain;
+
+
/*
* public functions
*/
diff --git a/src/include/pool_config.h b/src/include/pool_config.h
old mode 100644
new mode 100755
index 5020aa4c..798af054
--- a/src/include/pool_config.h
+++ b/src/include/pool_config.h
@@ -210,7 +210,10 @@ typedef struct
char *socket_dir; /* pgpool socket directory */
char *wd_ipc_socket_dir; /* watchdog command IPC socket directory */
char *pcp_socket_dir; /* PCP socket directory */
- int num_init_children; /* # of children initially pre-forked */
+ int max_children; /* Maximum number of child to
+ * accept connections */
+ int min_spare_children; /* Minimum number of spare children */
+ int max_spare_children; /* Maximum number of spare children */
int listen_backlog_multiplier; /* determines the size of the
* connection queue */
int reserved_connections; /* # of reserved connections */
diff --git a/src/main/main.c b/src/main/main.c
old mode 100644
new mode 100755
index 1c83cef7..c598486c
--- a/src/main/main.c
+++ b/src/main/main.c
@@ -229,6 +229,7 @@ main(int argc, char **argv)
pool_get_config(conf_file, CFGCXT_INIT);
+
/*
* Override debug level if command line -d arg is given adjust the
* log_min_message config variable
diff --git a/src/main/pgpool_main.c b/src/main/pgpool_main.c
old mode 100644
new mode 100755
index 21f1ad57..63542612
--- a/src/main/pgpool_main.c
+++ b/src/main/pgpool_main.c
@@ -72,6 +72,7 @@ typedef enum
* of watchdog cluster */
SIG_INFORM_QURANTINE_NODES, /* notify main about send degenerate requests
* for all quarantine nodes */
+ SIG_FORK_NEW_SERVICE, /* signal main to fork new service */
MAX_INTERUPTS /* Must be last! */
} User1SignalReason;
@@ -152,6 +153,8 @@ static struct sockaddr_un pcp_un_addr; /* unix domain socket path for PCP */
ProcessInfo *process_info = NULL; /* Per child info table on shmem */
volatile User1SignalSlot *user1SignalSlot = NULL; /* User 1 signal slot on
* shmem */
+int *pool_remain = NULL; /* remain pool number for connection */
Why this counter is int *? It seems define it as just "int" is
enough. Also you need to add "volatile" qualifier to this because it's
in shared memory area.
+
struct timeval random_start_time;
/*
@@ -167,7 +170,7 @@ BACKEND_STATUS private_backend_status[MAX_NUM_BACKENDS];
/*
* shmem connection info table
* this is a three dimension array. i.e.:
- * con_info[pool_config->num_init_children][pool_config->max_pool][MAX_NUM_BACKENDS]
+ * con_info[pool_config->max_children][pool_config->max_pool][MAX_NUM_BACKENDS]
*/
ConnectionInfo *con_info;
@@ -397,12 +400,14 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
*/
POOL_SETMASK(&BlockSig);
/* fork the children */
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->min_spare_children; i++)
{
process_info[i].pid = fork_a_child(fds, i);
process_info[i].start_time = time(NULL);
}
+ *pool_remain = pool_config->min_spare_children;
+
/* create pipe for delivering event */
if (pipe(pipe_fds) < 0)
{
@@ -592,6 +597,11 @@ register_inform_quarantine_nodes_req(void)
{
signal_user1_to_parent_with_reason(SIG_INFORM_QURANTINE_NODES);
}
+void
+register_fork_new_service_req(void)
+{
+ signal_user1_to_parent_with_reason(SIG_FORK_NEW_SERVICE);
+}
static void
signal_user1_to_parent_with_reason(User1SignalReason reason)
@@ -839,7 +849,7 @@ create_inet_domain_sockets(const char *hostname, const int port)
errdetail("bind on socket failed with error \"%s\"", strerror(errno))));
}
- backlog = pool_config->num_init_children * pool_config->listen_backlog_multiplier;
+ backlog = pool_config->max_spare_children * pool_config->listen_backlog_multiplier;
if (backlog > PGPOOLMAXLITSENQUEUELENGTH)
backlog = PGPOOLMAXLITSENQUEUELENGTH;
@@ -939,7 +949,7 @@ create_inet_domain_socket(const char *hostname, const int port)
errdetail("bind on host:\"%s\" server:\"%s\" failed with error \"%s\"", host, servname, strerror(saved_errno))));
}
- backlog = pool_config->num_init_children * pool_config->listen_backlog_multiplier;
+ backlog = pool_config->max_spare_children * pool_config->listen_backlog_multiplier;
if (backlog > PGPOOLMAXLITSENQUEUELENGTH)
backlog = PGPOOLMAXLITSENQUEUELENGTH;
@@ -1030,7 +1040,7 @@ terminate_all_childrens(int sig)
return;
}
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
pid_t pid = process_info[i].pid;
@@ -1284,6 +1294,34 @@ sigusr1_interupt_processor(void)
failover();
}
}
+ if (user1SignalSlot->signalFlags[SIG_FORK_NEW_SERVICE])
+ {
+ int i;
+ int j = 0;
+ ereport(LOG,
+ (errmsg("Pgpool-II parent process has received restart request")));
+ user1SignalSlot->signalFlags[SIG_FORK_NEW_SERVICE] = false;
+
+ POOL_SETMASK(&BlockSig);
+
+ /* fork the children */
+ for (i = 0; i < pool_config->max_children; i++)
+ {
+ if (process_info[i].pid == 0)
+ {
+ process_info[i].pid = fork_a_child(fds, i);
+ process_info[i].start_time = time(NULL);
+ j++;
+ }
+ if (j >= pool_config->min_spare_children)
+ break;
+ }
+
+ pool_semaphore_lock(CONNECTION_POOL_REMAIN_SEM);
+ *pool_remain += pool_config->min_spare_children;
+ pool_semaphore_unlock(CONNECTION_POOL_REMAIN_SEM);
+
+ }
}
/* returns true if all backends are down */
@@ -1663,7 +1701,7 @@ failover(void)
need_to_restart_children = true;
partial_restart = true;
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
bool restart = false;
@@ -1704,9 +1742,10 @@ failover(void)
(errmsg("Restart all children")));
/* kill all children */
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
pid_t pid = process_info[i].pid;
+
if (pid)
{
@@ -1886,7 +1925,7 @@ failover(void)
/* Kill children and restart them if needed */
if (need_to_restart_children)
{
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
/*
* Try to kill pgpool child because previous kill signal may
@@ -1945,9 +1984,10 @@ failover(void)
* Set restart request to each child. Children will exit(1)
* whenever they are convenient.
*/
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
- process_info[i].need_to_restart = 1;
+ if (process_info[i].pid)
+ process_info[i].need_to_restart = 1;
}
}
@@ -2376,7 +2416,7 @@ reaper(void)
if (found == false)
{
/* look for exiting child's pid */
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
if (pid == process_info[i].pid)
{
@@ -2465,12 +2505,27 @@ int *
pool_get_process_list(int *array_size)
{
int *array;
+ int cnt = 0;
int i;
- *array_size = pool_config->num_init_children;
+ for (i=0;i < pool_config->max_children;i++)
+ {
+ if (process_info[i].pid != 0)
+ cnt++;
+ }
+ *array_size = cnt;
+ cnt = 0;
array = palloc0(*array_size * sizeof(int));
- for (i = 0; i < *array_size; i++)
- array[i] = process_info[i].pid;
+ for (i = 0; i < pool_config->max_children; i++)
+ {
+ if (process_info[i].pid != 0)
+ {
+ array[cnt] = process_info[i].pid;
+ cnt++;
+ }
+
+ }
+
return array;
}
@@ -2483,7 +2538,7 @@ pool_get_process_info(pid_t pid)
{
int i;
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
if (process_info[i].pid == pid)
return &process_info[i];
@@ -2550,7 +2605,7 @@ kill_all_children(int sig)
if (process_info)
{
/* kill all children */
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
pid_t pid = process_info[i].pid;
@@ -3283,22 +3338,24 @@ initialize_shared_mem_objects(bool clear_memcache_oidmaps)
con_info = pool_shared_memory_create(size);
memset(con_info, 0, size);
- size = pool_config->num_init_children * (sizeof(ProcessInfo));
+ size = pool_config->max_children * (sizeof(ProcessInfo));
ereport(DEBUG1,
- (errmsg("ProcessInfo: num_init_children (%d) * sizeof(ProcessInfo) (%zu) = %d bytes requested for shared memory",
- pool_config->num_init_children,
+ (errmsg("ProcessInfo: max_children (%d) * sizeof(ProcessInfo) (%zu) = %d bytes requested for shared memory",
+ pool_config->max_children,
sizeof(ProcessInfo),
size)));
process_info = pool_shared_memory_create(size);
memset(process_info, 0, size);
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
process_info[i].connection_info = pool_coninfo(i, 0, 0);
}
+ pool_remain = pool_shared_memory_create(sizeof(int));
+
"sizeof" declaration is wrong. Shouldn't it be sizeof(int *)? Although
I doubt it needs to int* as I said elsewhere.
user1SignalSlot = pool_shared_memory_create(sizeof(User1SignalSlot));
/* create fail over/switch over event area */
Req_info = pool_shared_memory_create(sizeof(POOL_REQUEST_INFO));
@@ -3397,7 +3454,7 @@ initialize_shared_mem_objects(bool clear_memcache_oidmaps)
size = MAXALIGN(sizeof(SI_ManageInfo));
si_manage_info = pool_shared_memory_create(size);
memset((void *)si_manage_info, 0, size);
- size = MAXALIGN(pool_config->num_init_children * sizeof(pid_t));
+ size = MAXALIGN(pool_config->max_children * sizeof(pid_t));
si_manage_info->snapshot_waiting_children = pool_shared_memory_create(size);
si_manage_info->commit_waiting_children = pool_shared_memory_create(size);
}
@@ -3997,7 +4054,7 @@ sync_backend_from_watchdog(void)
/* Kill children and restart them if needed */
if (need_to_restart_children)
{
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
bool restart = false;
@@ -4057,9 +4114,10 @@ sync_backend_from_watchdog(void)
* Set restart request to each child. Children will exit(1) whenever
* they are convenient.
*/
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
- process_info[i].need_to_restart = 1;
+ if (process_info[i].pid)
+ process_info[i].need_to_restart = 1;
}
}
diff --git a/src/pcp_con/pcp_worker.c b/src/pcp_con/pcp_worker.c
old mode 100644
new mode 100755
index b315afa6..68642c0a
--- a/src/pcp_con/pcp_worker.c
+++ b/src/pcp_con/pcp_worker.c
@@ -675,7 +675,7 @@ inform_process_info(PCP_CONNECTION * frontend, char *buf)
{
int proc_id;
int wsize;
- int num_proc = pool_config->num_init_children;
+ int num_proc = pool_config->max_children;
int i;
proc_id = atoi(buf);
diff --git a/src/protocol/child.c b/src/protocol/child.c
old mode 100644
new mode 100755
index 7b167cb4..62365775
--- a/src/protocol/child.c
+++ b/src/protocol/child.c
@@ -344,7 +344,7 @@ do_child(int *fds)
* Check if max connections from clients execeeded.
*/
con_count = connection_count_up();
- if (con_count > (pool_config->num_init_children - pool_config->reserved_connections))
+ if (con_count > (pool_config->max_children - pool_config->reserved_connections))
{
POOL_CONNECTION * cp;
cp = pool_open(front_end_fd, false);
@@ -427,6 +427,19 @@ do_child(int *fds)
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
+ pool_semaphore_lock(CONNECTION_POOL_REMAIN_SEM);
+ *pool_remain -= 1;
+ pool_semaphore_unlock(CONNECTION_POOL_REMAIN_SEM);
+
+ ereport(DEBUG1,
+ (errmsg("pool remain is %d",*pool_remain)));
+
+ if (*pool_remain < pool_config->min_spare_children)
+ {
+ /* send signal to parent process to fork child process */
+ register_fork_new_service_req();
+
+ }
/* query process loop */
for (;;)
{
@@ -455,6 +468,10 @@ do_child(int *fds)
if (pool_config->log_disconnections)
log_disconnections(sp->database, sp->user);
+ pool_semaphore_lock(CONNECTION_POOL_REMAIN_SEM);
+ *pool_remain += 1;
+ pool_semaphore_unlock(CONNECTION_POOL_REMAIN_SEM);
+
timeout.tv_sec = pool_config->child_life_time;
timeout.tv_usec = 0;
@@ -462,6 +479,14 @@ do_child(int *fds)
if (pool_config->child_max_connections > 0)
connections_count++;
+ /* check if number of spare children exceeds max_spare_children*/
+ if (*pool_remain > pool_config->max_spare_children)
+ {
+ *pool_remain -= 1;
+ process_info[my_proc_id].pid = 0;
+ child_exit(POOL_EXIT_NO_RESTART);
+ }
+
/* check if maximum connections count for this child reached */
if ((pool_config->child_max_connections > 0) &&
(connections_count >= pool_config->child_max_connections))
@@ -474,6 +499,8 @@ do_child(int *fds)
child_exit(POOL_EXIT_NO_RESTART);
}
+
+
/* -------------------------------------------------------------------
* private functions
* -------------------------------------------------------------------
@@ -868,7 +895,7 @@ cancel_request(CancelPacket * sp)
(errmsg("Cancel request received")));
/* look for cancel key from shmem info */
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->max_children; i++)
{
for (j = 0; j < pool_config->max_pool; j++)
{
diff --git a/src/protocol/pool_pg_utils.c b/src/protocol/pool_pg_utils.c
old mode 100644
new mode 100755
index 9a411413..1f9aabdd
--- a/src/protocol/pool_pg_utils.c
+++ b/src/protocol/pool_pg_utils.c
@@ -790,7 +790,7 @@ si_snapshot_aquired(void)
if (si_manage_info->snapshot_counter == 0)
{
/* wakeup all waiting children */
- for (i = 0; i < pool_config->num_init_children ; i++)
+ for (i = 0; i < pool_config->max_children ; i++)
{
pid_t pid = si_manage_info->snapshot_waiting_children[i];
if (pid > 0)
@@ -864,7 +864,7 @@ si_commit_done(void)
if (si_manage_info->commit_counter == 0)
{
/* wakeup all waiting children */
- for (i = 0; i < pool_config->num_init_children ; i++)
+ for (i = 0; i < pool_config->max_children ; i++)
{
pid_t pid = si_manage_info->commit_waiting_children[i];
if (pid > 0)
diff --git a/src/protocol/pool_proto_modules.c b/src/protocol/pool_proto_modules.c
old mode 100644
new mode 100755
index 80e53096..183426fb
--- a/src/protocol/pool_proto_modules.c
+++ b/src/protocol/pool_proto_modules.c
@@ -702,7 +702,7 @@ SimpleQuery(POOL_CONNECTION * frontend,
* do this optimization because we need to wait for main node's
* response first.
*/
- if (pool_config->num_init_children == 1 &&
+ if (pool_config->max_children == 1 &&
pool_config->backend_clustering_mode != CM_SNAPSHOT_ISOLATION)
{
/* Send query to all DB nodes at once */
diff --git a/src/sample/pgpool.conf.sample-logical b/src/sample/pgpool.conf.sample-logical
index ba3a0a31..c74f258a 100644
--- a/src/sample/pgpool.conf.sample-logical
+++ b/src/sample/pgpool.conf.sample-logical
@@ -156,13 +156,23 @@ ssl_dh_params_file = ''
# - Concurrent session and pool size -
-num_init_children = 32
- # Number of concurrent sessions allowed
- # (change requires restart)
max_pool = 4
# Number of connection pool caches per connection
# (change requires restart)
+max_children = 120
+ # Maximum number of concurrent connections
+ # (change requires restart)
+
+min_spare_children = 5
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
+max_spare_children = 10
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
+
# - Life time -
child_life_time = 5min
diff --git a/src/sample/pgpool.conf.sample-raw b/src/sample/pgpool.conf.sample-raw
index 6d3cf52f..90d6096f 100644
--- a/src/sample/pgpool.conf.sample-raw
+++ b/src/sample/pgpool.conf.sample-raw
@@ -157,13 +157,23 @@ ssl_dh_params_file = ''
# - Concurrent session and pool size -
-num_init_children = 32
- # Number of concurrent sessions allowed
- # (change requires restart)
max_pool = 4
# Number of connection pool caches per connection
# (change requires restart)
+max_children = 120
+ # Maximum number of concurrent connections
+ # (change requires restart)
+
+min_spare_children = 5
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
+max_spare_children = 10
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
+
# - Life time -
child_life_time = 5min
diff --git a/src/sample/pgpool.conf.sample-replication b/src/sample/pgpool.conf.sample-replication
index 08d11ecd..a9a9f75a 100644
--- a/src/sample/pgpool.conf.sample-replication
+++ b/src/sample/pgpool.conf.sample-replication
@@ -152,13 +152,23 @@ ssl_dh_params_file = ''
# - Concurrent session and pool size -
-num_init_children = 32
- # Number of concurrent sessions allowed
- # (change requires restart)
max_pool = 4
# Number of connection pool caches per connection
# (change requires restart)
+max_children = 120
+ # Maximum number of concurrent connections
+ # (change requires restart)
+
+min_spare_children = 5
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
+max_spare_children = 10
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
+
# - Life time -
child_life_time = 5min
diff --git a/src/sample/pgpool.conf.sample-slony b/src/sample/pgpool.conf.sample-slony
index 004bdac4..4fd670f2 100644
--- a/src/sample/pgpool.conf.sample-slony
+++ b/src/sample/pgpool.conf.sample-slony
@@ -153,13 +153,23 @@ ssl_dh_params_file = ''
# - Concurrent session and pool size -
-num_init_children = 32
- # Number of concurrent sessions allowed
- # (change requires restart)
max_pool = 4
# Number of connection pool caches per connection
# (change requires restart)
+max_children = 120
+ # Maximum number of concurrent connections
+ # (change requires restart)
+
+min_spare_children = 5
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
+max_spare_children = 10
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
+
# - Life time -
child_life_time = 5min
diff --git a/src/sample/pgpool.conf.sample-snapshot b/src/sample/pgpool.conf.sample-snapshot
index 8cf4123c..0bf8d677 100644
--- a/src/sample/pgpool.conf.sample-snapshot
+++ b/src/sample/pgpool.conf.sample-snapshot
@@ -152,13 +152,22 @@ ssl_dh_params_file = ''
# - Concurrent session and pool size -
-num_init_children = 32
- # Number of concurrent sessions allowed
- # (change requires restart)
max_pool = 4
# Number of connection pool caches per connection
# (change requires restart)
+max_children = 120
+ # Maximum number of concurrent connections
+ # (change requires restart)
+
+min_spare_children = 5
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
+max_spare_children = 10
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
# - Life time -
child_life_time = 5min
diff --git a/src/sample/pgpool.conf.sample-stream b/src/sample/pgpool.conf.sample-stream
index 96630d5f..3fdca7c8 100644
--- a/src/sample/pgpool.conf.sample-stream
+++ b/src/sample/pgpool.conf.sample-stream
@@ -157,13 +157,23 @@ ssl_dh_params_file = ''
# - Concurrent session and pool size -
-num_init_children = 32
- # Number of concurrent sessions allowed
- # (change requires restart)
max_pool = 4
# Number of connection pool caches per connection
# (change requires restart)
+max_children = 120
+ # Maximum number of concurrent connections
+ # (change requires restart)
+
+min_spare_children = 5
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
+max_spare_children = 10
+ # Minimum number of spare child processes waiting for connection
+ # (change requires restart)
+
+
# - Life time -
child_life_time = 5min
diff --git a/src/tools/pcp/pcp_frontend_client.c b/src/tools/pcp/pcp_frontend_client.c
old mode 100644
new mode 100755
index 1b51adca..52833e36
--- a/src/tools/pcp/pcp_frontend_client.c
+++ b/src/tools/pcp/pcp_frontend_client.c
@@ -719,7 +719,9 @@ output_procinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose)
if (process_info->connection_info->create_time)
strftime(strcreatetime, 128, "%Y-%m-%d %H:%M:%S", localtime(&process_info->connection_info->create_time));
- printf(frmt,
+ if (process_info->pid != 0)
+ {
+ printf(frmt,
process_info->connection_info->database,
process_info->connection_info->user,
strstarttime,
@@ -731,6 +733,7 @@ output_procinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose)
process_info->connection_info->connected,
process_info->pid,
process_info->connection_info->backend_id);
+ }
}
if (printed == false)
printf("No process information available\n\n");
diff --git a/src/utils/pool_process_reporting.c b/src/utils/pool_process_reporting.c
old mode 100644
new mode 100755
index af0db6b3..d2c79a11
--- a/src/utils/pool_process_reporting.c
+++ b/src/utils/pool_process_reporting.c
@@ -290,10 +290,6 @@ get_config(int *nrows)
/* POOLS */
/* - Pool size - */
- StrNCpy(status[i].name, "num_init_children", POOLCONFIG_MAXNAMELEN);
- snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->num_init_children);
- StrNCpy(status[i].desc, "# of children initially pre-forked", POOLCONFIG_MAXDESCLEN);
- i++;
StrNCpy(status[i].name, "listen_backlog_multiplier", POOLCONFIG_MAXNAMELEN);
snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->listen_backlog_multiplier);
@@ -315,6 +311,21 @@ get_config(int *nrows)
StrNCpy(status[i].desc, "max # of connection pool per child", POOLCONFIG_MAXDESCLEN);
i++;
+ StrNCpy(status[i].name, "max_children", POOLCONFIG_MAXNAMELEN);
+ snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->max_children);
+ StrNCpy(status[i].desc, "max # of connections in pool", POOLCONFIG_MAXDESCLEN);
+ i++;
+
+ StrNCpy(status[i].name, "min_spare_children", POOLCONFIG_MAXNAMELEN);
+ snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->min_spare_children);
+ StrNCpy(status[i].desc, "min # of spare children waitting for connection", POOLCONFIG_MAXDESCLEN);
+ i++;
+
+ StrNCpy(status[i].name, "max_spare_children", POOLCONFIG_MAXNAMELEN);
+ snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->max_spare_children);
+ StrNCpy(status[i].desc, "max # of spare children waitting for connection", POOLCONFIG_MAXDESCLEN);
+ i++;
+
/* - Life time - */
StrNCpy(status[i].name, "child_life_time", POOLCONFIG_MAXNAMELEN);
snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->child_life_time);
@@ -1384,45 +1395,49 @@ get_pools(int *nrows)
int lines = 0;
POOL_REPORT_POOLS *pools = palloc(
- pool_config->num_init_children * pool_config->max_pool * NUM_BACKENDS * sizeof(POOL_REPORT_POOLS)
+ pool_config->max_children * pool_config->max_pool * NUM_BACKENDS * sizeof(POOL_REPORT_POOLS)
);
- for (child = 0; child < pool_config->num_init_children; child++)
+ for (child = 0; child < pool_config->max_children; child++)
{
proc_id = process_info[child].pid;
- pi = pool_get_process_info(proc_id);
-
- for (pool = 0; pool < pool_config->max_pool; pool++)
+ if (proc_id)
{
- for (backend_id = 0; backend_id < NUM_BACKENDS; backend_id++)
+ pi = pool_get_process_info(proc_id);
+
+ for (pool = 0; pool < pool_config->max_pool; pool++)
{
- poolBE = pool * MAX_NUM_BACKENDS + backend_id;
- pools[lines].pool_pid = proc_id;
- pools[lines].start_time = pi->start_time;
- pools[lines].pool_id = pool;
- pools[lines].backend_id = backend_id;
- if (strlen(pi->connection_info[poolBE].database) == 0)
- {
- StrNCpy(pools[lines].database, "", POOLCONFIG_MAXIDENTLEN);
- StrNCpy(pools[lines].username, "", POOLCONFIG_MAXIDENTLEN);
- pools[lines].create_time = 0;
- pools[lines].pool_majorversion = 0;
- pools[lines].pool_minorversion = 0;
- }
- else
+ for (backend_id = 0; backend_id < NUM_BACKENDS; backend_id++)
{
- StrNCpy(pools[lines].database, pi->connection_info[poolBE].database, POOLCONFIG_MAXIDENTLEN);
- StrNCpy(pools[lines].username, pi->connection_info[poolBE].user, POOLCONFIG_MAXIDENTLEN);
- pools[lines].create_time = pi->connection_info[poolBE].create_time;
- pools[lines].pool_majorversion = pi->connection_info[poolBE].major;
- pools[lines].pool_minorversion = pi->connection_info[poolBE].minor;
+ poolBE = pool * MAX_NUM_BACKENDS + backend_id;
+ pools[lines].pool_pid = proc_id;
+ pools[lines].start_time = pi->start_time;
+ pools[lines].pool_id = pool;
+ pools[lines].backend_id = backend_id;
+ if (strlen(pi->connection_info[poolBE].database) == 0)
+ {
+ StrNCpy(pools[lines].database, "", POOLCONFIG_MAXIDENTLEN);
+ StrNCpy(pools[lines].username, "", POOLCONFIG_MAXIDENTLEN);
+ pools[lines].create_time = 0;
+ pools[lines].pool_majorversion = 0;
+ pools[lines].pool_minorversion = 0;
+ }
+ else
+ {
+ StrNCpy(pools[lines].database, pi->connection_info[poolBE].database, POOLCONFIG_MAXIDENTLEN);
+ StrNCpy(pools[lines].username, pi->connection_info[poolBE].user, POOLCONFIG_MAXIDENTLEN);
+ pools[lines].create_time = pi->connection_info[poolBE].create_time;
+ pools[lines].pool_majorversion = pi->connection_info[poolBE].major;
+ pools[lines].pool_minorversion = pi->connection_info[poolBE].minor;
+ }
+ pools[lines].pool_counter = pi->connection_info[poolBE].counter;
+ pools[lines].pool_backendpid = ntohl(pi->connection_info[poolBE].pid);
+ pools[lines].pool_connected = pi->connection_info[poolBE].connected;
+ lines++;
}
- pools[lines].pool_counter = pi->connection_info[poolBE].counter;
- pools[lines].pool_backendpid = ntohl(pi->connection_info[poolBE].pid);
- pools[lines].pool_connected = pi->connection_info[poolBE].connected;
- lines++;
}
}
+
}
*nrows = lines;
@@ -1580,37 +1595,53 @@ pools_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
POOL_REPORT_PROCESSES *
get_processes(int *nrows)
{
- int child;
+ int child = 0;
int pool;
int poolBE;
ProcessInfo *pi = NULL;
int proc_id;
+ int i,
+ cnt = 0;
- POOL_REPORT_PROCESSES *processes = palloc(pool_config->num_init_children * sizeof(POOL_REPORT_PROCESSES));
-
- for (child = 0; child < pool_config->num_init_children; child++)
+ for (i=0;i<pool_config->max_children;i++)
{
- proc_id = process_info[child].pid;
- pi = pool_get_process_info(proc_id);
+ if (process_info[i].pid != 0)
+ cnt++;
+ }
- snprintf(processes[child].pool_pid, POOLCONFIG_MAXCOUNTLEN, "%d", proc_id);
- strftime(processes[child].start_time, POOLCONFIG_MAXDATELEN, "%Y-%m-%d %H:%M:%S", localtime(&pi->start_time));
- StrNCpy(processes[child].database, "", POOLCONFIG_MAXIDENTLEN);
- StrNCpy(processes[child].username, "", POOLCONFIG_MAXIDENTLEN);
- StrNCpy(processes[child].create_time, "", POOLCONFIG_MAXDATELEN);
- StrNCpy(processes[child].pool_counter, "", POOLCONFIG_MAXCOUNTLEN);
+ POOL_REPORT_PROCESSES *processes = palloc(cnt * sizeof(POOL_REPORT_PROCESSES));
+
+
+ for (i = 0; i < pool_config->max_children; i++)
+ {
+ proc_id = process_info[i].pid;
- for (pool = 0; pool < pool_config->max_pool; pool++)
+ if(proc_id)
{
- poolBE = pool * MAX_NUM_BACKENDS;
- if (pi->connection_info[poolBE].connected && strlen(pi->connection_info[poolBE].database) > 0 && strlen(pi->connection_info[poolBE].user) > 0)
+
+ pi = pool_get_process_info(proc_id);
+
+ snprintf(processes[child].pool_pid, POOLCONFIG_MAXCOUNTLEN, "%d", proc_id);
+ strftime(processes[child].start_time, POOLCONFIG_MAXDATELEN, "%Y-%m-%d %H:%M:%S", localtime(&pi->start_time));
+ StrNCpy(processes[child].database, "", POOLCONFIG_MAXIDENTLEN);
+ StrNCpy(processes[child].username, "", POOLCONFIG_MAXIDENTLEN);
+ StrNCpy(processes[child].create_time, "", POOLCONFIG_MAXDATELEN);
+ StrNCpy(processes[child].pool_counter, "", POOLCONFIG_MAXCOUNTLEN);
+
+ for (pool = 0; pool < pool_config->max_pool; pool++)
{
- StrNCpy(processes[child].database, pi->connection_info[poolBE].database, POOLCONFIG_MAXIDENTLEN);
- StrNCpy(processes[child].username, pi->connection_info[poolBE].user, POOLCONFIG_MAXIDENTLEN);
- strftime(processes[child].create_time, POOLCONFIG_MAXDATELEN, "%Y-%m-%d %H:%M:%S", localtime(&pi->connection_info[poolBE].create_time));
- snprintf(processes[child].pool_counter, POOLCONFIG_MAXCOUNTLEN, "%d", pi->connection_info[poolBE].counter);
+ poolBE = pool * MAX_NUM_BACKENDS;
+ if (pi->connection_info[poolBE].connected && strlen(pi->connection_info[poolBE].database) > 0 && strlen(pi->connection_info[poolBE].user) > 0)
+ {
+ StrNCpy(processes[child].database, pi->connection_info[poolBE].database, POOLCONFIG_MAXIDENTLEN);
+ StrNCpy(processes[child].username, pi->connection_info[poolBE].user, POOLCONFIG_MAXIDENTLEN);
+ strftime(processes[child].create_time, POOLCONFIG_MAXDATELEN, "%Y-%m-%d %H:%M:%S", localtime(&pi->connection_info[poolBE].create_time));
+ snprintf(processes[child].pool_counter, POOLCONFIG_MAXCOUNTLEN, "%d", pi->connection_info[poolBE].counter);
+ }
}
+ child++;
}
+
}
*nrows = child;
diff --git a/src/watchdog/watchdog.c b/src/watchdog/watchdog.c
old mode 100644
new mode 100755
index 29cd1473..8e6f8e22
--- a/src/watchdog/watchdog.c
+++ b/src/watchdog/watchdog.c
@@ -7351,7 +7351,6 @@ verify_pool_configurations(WatchdogNode * wdNode, POOL_CONFIG * config)
{
int i;
- WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, num_init_children);
WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, listen_backlog_multiplier);
WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, child_life_time);
WD_VERIFY_RECEIVED_CONFIG_PARAMETER_VAL_INT(config, wdNode, connection_life_time);
diff --git a/src/watchdog/wd_json_data.c b/src/watchdog/wd_json_data.c
old mode 100644
new mode 100755
index 69719099..f10ed53d
--- a/src/watchdog/wd_json_data.c
+++ b/src/watchdog/wd_json_data.c
@@ -48,8 +48,6 @@ get_pool_config_from_json(char *json_data, int data_len)
if (root == NULL || root->type != json_object)
goto ERROR_EXIT;
- if (json_get_int_value_for_key(root, "num_init_children", &config->num_init_children))
- goto ERROR_EXIT;
if (json_get_int_value_for_key(root, "listen_backlog_multiplier", &config->listen_backlog_multiplier))
goto ERROR_EXIT;
if (json_get_int_value_for_key(root, "child_life_time", &config->child_life_time))
@@ -62,6 +60,12 @@ get_pool_config_from_json(char *json_data, int data_len)
goto ERROR_EXIT;
if (json_get_int_value_for_key(root, "max_pool", &config->max_pool))
goto ERROR_EXIT;
+ if (json_get_int_value_for_key(root, "max_children", &config->max_children))
+ goto ERROR_EXIT;
+ if (json_get_int_value_for_key(root, "min_spare_children", &config->min_spare_children))
+ goto ERROR_EXIT;
+ if (json_get_int_value_for_key(root, "max_spare_children", &config->max_spare_children))
+ goto ERROR_EXIT;
if (json_get_bool_value_for_key(root, "replication_mode", &config->replication_mode))
goto ERROR_EXIT;
if (json_get_bool_value_for_key(root, "enable_pool_hba", &config->enable_pool_hba))
@@ -178,13 +182,15 @@ get_pool_config_json(void)
JsonNode *jNode = jw_create_with_object(true);
- jw_put_int(jNode, "num_init_children", pool_config->num_init_children);
jw_put_int(jNode, "listen_backlog_multiplier", pool_config->listen_backlog_multiplier);
jw_put_int(jNode, "child_life_time", pool_config->child_life_time);
jw_put_int(jNode, "connection_life_time", pool_config->connection_life_time);
jw_put_int(jNode, "child_max_connections", pool_config->child_max_connections);
jw_put_int(jNode, "client_idle_limit", pool_config->client_idle_limit);
jw_put_int(jNode, "max_pool", pool_config->max_pool);
+ jw_put_int(jNode, "max_children", pool_config->max_children);
+ jw_put_int(jNode, "min_spare_children", pool_config->min_spare_children);
+ jw_put_int(jNode, "max_spare_children", pool_config->max_spare_children);
jw_put_bool(jNode, "replication_mode", pool_config->replication_mode);
jw_put_bool(jNode, "enable_pool_hba", pool_config->enable_pool_hba);
jw_put_bool(jNode, "load_balance_mode", pool_config->load_balance_mode);
>> Thanks. I am going to look into this.
>
> Can you please add documentation and test for this? Othewiese it is
> hard to understand what the patch is trying to do. Also we are about
> to release alpha. So it is required to include documentation and tests
> to accept the patch.
>
>> In the mean time there are some white space errors in the patch. I
>> would appreciate if you fix them.
>>
>> $ git apply ~/on_demand_spawn.patch
>> /home/t-ishii/on_demand_spawn.patch:63: trailing whitespace.
>>
>> /home/t-ishii/on_demand_spawn.patch:119: trailing whitespace.
>>
>> /home/t-ishii/on_demand_spawn.patch:222: trailing whitespace.
>> int max_children; /* Maximum number of child to
>> /home/t-ishii/on_demand_spawn.patch:343: trailing whitespace.
>>
>> /home/t-ishii/on_demand_spawn.patch:360: trailing whitespace.
>>
>> warning: squelched 6 whitespace errors
>> warning: 11 lines add whitespace errors.
>>
>> Best regards,
>> --
>> Tatsuo Ishii
>> SRA OSS, Inc. Japan
>> English: http://www.sraoss.co.jp/index_en.php
>> Japanese:http://www.sraoss.co.jp
>>
>> From: 周建身 <zhoujianshen at highgo.com>
>> Subject: 答复: [pgpool-hackers: 3712] Re: discuss of pgpool enhancement
>> Date: Fri, 25 Sep 2020 10:28:34 +0000
>> Message-ID: <6d11923a4276453a8a89c4a87abb6958 at EX02.highgo.com>
>>
>>> Hello Tatsuo and Usama,
>>> I have worked on this topic of on-demand spawning of child processes. In this patch ,I use parameter max_children,max_spare_children,min_spare_children
>>>
>>> # min_spare_children: minimum number of server processes which are kept spare
>>> # max_spare_children: maximum number of server processes which are kept spare
>>> # max_children: maximum number of server processes allowed to start
>>> My patch is in the attachment.
>>> Any comments and suggestions are welcome.
>>>
>>> Thanks
>>> Best regards
>>> Zhou Jianshen
>>> zhoujianshen at highgo.com
>>> ________________________________________
>>> 发件人: Tatsuo Ishii <ishii at sraoss.co.jp>
>>> 发送时间: 2020年7月14日 19:40
>>> 收件人: m.usama at gmail.com
>>> 抄送: 周建身; Muhammad; pgpool-hackers at pgpool.net
>>> 主题: Re: [pgpool-hackers: 3712] Re: discuss of pgpool enhancement
>>>
>>>> Hi Jianshen,
>>>>
>>>> I think it is a very good idea to have on-demand spawning of the
>>>> child processes and it will enable us to effectively configure the
>>>> Pgpool-II in environments where we get instantaneous connection spikes.
>>>> Currently we have to configure the Pgpool's num_init_children to a
>>>> value of "maximum number of connections expected" and most of the
>>>> time, 50 to 60 percent of child processes keep sitting idle and
>>>> consuming system resources.
>>>>
>>>> Similarly, I also agree with having an option of the global connection
>>>> pool and I believe that will enable us to have less number of opened backend
>>>> connections and also in the future we can build a different type of pooling
>>>> options on that like transaction pooling and similar features.
>>>
>>> Not sure abut this. Suppose we have num_init_children = 4 and max_pool
>>> = 1. We have 4 users u1, u2, u3 and u4. For simplicity, all those
>>> users connect to the same database.
>>>
>>> Current Pgpool-II:
>>> 1. u1 connects to pgpool child p1 and creates connection pool p_u1.
>>> 2. u2 connects to pgpool child p2 and creates connection pool p_u2.
>>> 3. u3 connects to pgpool child p3 and creates connection pool p_u3.
>>> 4. u4 connects to pgpool child p4 and creates connection pool p_u4.
>>>
>>> Pgpool-II with global connection pooling.
>>> 1. u1 connects to pgpool child p1 and creates connection pool p_u1.
>>> 2. u2 connects to pgpool child p2 and creates connection pool p_u2.
>>> 3. u3 connects to pgpool child p3 and creates connection pool p_u3.
>>> 4. u4 connects to pgpool child p4 and creates connection pool p_u4.
>>>
>>> So there's no difference with/without global connection pooling in the
>>> end.
>>>
>>> The case global connection when pooling wins would be, number of kind
>>> of users and concurrent connections are much smaller than
>>> num_init_children. For example, if there's only one user u1 and
>>> there's only one concurrent connections, we will have:
>>>
>>> Current Pgpool-II:
>>> 1. u1 connects to pgpool child p1 and creates connection pool p_u1.
>>> 2. u1 connects to pgpool child p2 and creates connection pool p_u2.
>>> 3. u1 connects to pgpool child p3 and creates connection pool p_u3.
>>> 4. u1 connects to pgpool child p4 and creates connection pool p_u4.
>>>
>>> Pgpool-II with global connection pooling.
>>> 1. u1 connects to pgpool child p1 and creates connection pool p_u1.
>>> 2. u1 connects to pgpool child p2 and reuses connection pool p_u1.
>>> 3. u1 connects to pgpool child p3 and reuses connection pool p_u1.
>>> 4. u1 connects to pgpool child p4 and reuses connection pool p_u1.
>>>
>>> Thus global connection pool wins having only 1 connection pool if
>>> number of kind of users and concurrent connections are much smaller
>>> than num_init_children.
>>>
>>> But question is, if we have only 1 concurrent session,
>>> num_init_children = 1 would be enough in the first place. In this case
>>> we will have similar result with current Pgpool-II.
>>>
>>> 1. u1 connects to pgpool child p1 and creates connection pool p_u1.
>>> 2. u1 connects to pgpool child p1 and reuses connection pool p_u1.
>>> 3. u1 connects to pgpool child p1 and reuses connection pool p_u1.
>>> 4. u1 connects to pgpool child p1 and reuses connection pool p_u1.
>>>
>>> So there's no point to have global connection pool here.
>>>
>>>> IMHO we should take both of these features as a separate project.
>>>> We can start with on-demand child spawning feature and once we have
>>>> that in Pgpool-II we build the global connection pool option on top of that.
>>>>
>>>> So if you are interested in working on that, you can send the proposal and
>>>> include the details like how are you planning to manage the
>>>> child-process-pool
>>>> and when will the Pgpool-II spawn and destroy the child processes?
>>>> My idea would be to make child-process-pool as much configurable as
>>>> possible.
>>>> Some of the configuration parameters I can think of for the purpose are.
>>>>
>>>> CPP_batch_size /* number of child
>>>> process we will spawn when required */
>>>>
>>>> CPP_downscale_trigger /* number of idle child
>>>> process in Pgpool-II to start
>>>> * killing
>>>> the idle child process */
>>>>
>>>> CPP_upscale_trigger /* number of idle child
>>>> process in Pgpool-II to start
>>>> * spawning
>>>> new child process */
>>>>
>>>> CPP here stands for CHILD-PROCESS-POOL and these are just my thoughts and
>>>> you may want to choose
>>>> different names and/or different types of configurations altogether.
>>>
>>> Apache already has similar parameters:
>>>
>>> -------------------------------------------------------------------------
>>> # prefork MPM
>>> # StartServers: number of server processes to start
>>> # MinSpareServers: minimum number of server processes which are kept spare
>>> # MaxSpareServers: maximum number of server processes which are kept spare
>>> # MaxRequestWorkers: maximum number of server processes allowed to start
>>>
>>> StartServers 5
>>> MinSpareServers 5
>>> MaxSpareServers 10
>>> MaxRequestWorkers 150
>>> -------------------------------------------------------------------------
>>>
>>> I think our num_init_children looks similar to StartServers. So all we
>>> have to have are MinSpareServers, MaxSpareServers, and
>>> MaxRequestWorkers. (Probably we should rename them to more appropreate
>>> ones).
>>>
>>>> Looking forward to getting an actual proposal.
>>>>
>>>> Thanks
>>>> Best regards
>>>> Muhammad Usama
>>>>
>>>>
>>>>
>>>> On Mon, Jul 13, 2020 at 2:56 PM 周建身 <zhoujianshen at highgo.com> wrote:
>>>>
>>>>> Hello Usama and Hackers,
>>>>>
>>>>> I have tested the pgpool connection pool.And I think there are some
>>>>> parts need to be enhanced.
>>>>>
>>>>> When you set the parameter num_init_children = 32.only 32 child
>>>>> processes will be forked and waiting for the connection from client.Each
>>>>> child process can only receive one client connection,therefore, only 32
>>>>> clients can connect to pgpool at the same time.The extra
>>>>> connections,before connection, can only wait for the previous connection to
>>>>> be disconnected.So,can we change the waiting connection structure of
>>>>> pgpool. When pgpool starts ,we can fork ten child processes to wait for the
>>>>> client to connect.When the child process receives the connection request,
>>>>> it creates a new child process to maintain the session connection.
>>>>>
>>>>> there is also another one which should be enhance is the connection
>>>>> pool.Now, for each connection, the child process can only receive one
>>>>> client connection. Therefore, the connection pool in the child process does
>>>>> not play a global reuse effect.And each connection will re-initialize the
>>>>> connection pool. Therefore we should implement a global connection pool to
>>>>> achieve the effect of back end reuse.However ,we should confirm how many
>>>>> connections the global connection pool should maintain.And also we should
>>>>> confirm that if the connection pool is full,how should we respond to the
>>>>> arrival of new connections.I can come up with two kind of solutions.
>>>>>
>>>>> The first one is waiting until the connection in the connection pool
>>>>> disconnected.and then receive the new connection.The second one,We should
>>>>> check the number of connection and the last access time of the connection
>>>>> in connection pool.And we replace the connection which has the oldest
>>>>> access time in the connection pool to the new connection.or we periodic
>>>>> detection the access time of each connection,and throw away the connections
>>>>> whose access time exceed a certain value.then we can use the extra space in
>>>>> connection pool.
>>>>>
>>>>> In my opinion,these two aspects need to be enhanced.How about your
>>>>> opinion.And what do you think we need to do to enhance these two
>>>>> aspects.Any suggestions and comments are welcome.
>>>>>
>>>>>
>>>>>
>>>>> Thanks
>>>>> Best regards
>>>>> Zhou Jianshen
>>>>> zhoujianshen at highgo.com
>>>>>
>>>>>
>>>>>
>>>>>
>>>>>
>>>>> _______________________________________________
>>>>> pgpool-hackers mailing list
>>>>> pgpool-hackers at pgpool.net
>>>>> http://www.pgpool.net/mailman/listinfo/pgpool-hackers
>>>>>
>> _______________________________________________
>> pgpool-hackers mailing list
>> pgpool-hackers at pgpool.net
>> http://www.pgpool.net/mailman/listinfo/pgpool-hackers
> _______________________________________________
> pgpool-hackers mailing list
> pgpool-hackers at pgpool.net
> http://www.pgpool.net/mailman/listinfo/pgpool-hackers
More information about the pgpool-hackers
mailing list