static char *build_client_final_message(fe_scram_state *state);
static bool verify_server_signature(fe_scram_state *state);
static void calculate_client_proof(fe_scram_state *state,
- const char *client_final_message_without_proof,
- uint8 *result);
+ const char *client_final_message_without_proof,
+ uint8 *result);
static void read_client_first_message(scram_state *state, char *input);
static void read_client_final_message(scram_state *state, char *input);
static bool verify_client_proof(scram_state *state);
static bool verify_final_nonce(scram_state *state);
static bool parse_scram_verifier(const char *verifier, int *iterations,
- char **salt, uint8 *stored_key, uint8 *server_key);
+ char **salt, uint8 *stored_key, uint8 *server_key);
static void mock_scram_verifier(const char *username, int *iterations,
- char **salt, uint8 *stored_key, uint8 *server_key);
+ char **salt, uint8 *stored_key, uint8 *server_key);
static bool is_scram_printable(char *p);
static char *sanitize_char(char c);
static char *GetMockAuthenticationNonce(void);
char *server_signature_base64;
int siglen;
scram_HMAC_ctx ctx;
- char *res;
+ char *res;
/* calculate ServerSignature */
scram_HMAC_init(&ctx, state->ServerKey, SCRAM_KEY_LEN);
#define MAX_SASL_PAYLOAD_LEN 1024
-static void pool_send_backend_key_data(POOL_CONNECTION * frontend, int pid,
+static void pool_send_backend_key_data(POOL_CONNECTION *frontend, int pid,
char *key, int32 keylen, int protoMajor);
-static int do_clear_text_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor);
-static void pool_send_auth_fail(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp);
-static int do_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor,
- char *storedPassword, PasswordType passwordType);
-static void send_md5auth_request(POOL_CONNECTION * frontend, int protoMajor, char *salt);
-static int read_password_packet(POOL_CONNECTION * frontend, int protoMajor, char *password, int *pwdSize);
-static int send_password_packet(POOL_CONNECTION * backend, int protoMajor, char *password);
-static int send_auth_ok(POOL_CONNECTION * frontend, int protoMajor);
-static void sendAuthRequest(POOL_CONNECTION * frontend, int protoMajor, int32 auth_req_type, char *extradata, int extralen);
-
-static int pg_SASL_continue(POOL_CONNECTION * backend, char *payload, int payloadlen, void *sasl_state, bool final);
-static void *pg_SASL_init(POOL_CONNECTION * backend, char *payload, int payloadlen, char *username, char *storedPassword);
-static bool do_SCRAM(POOL_CONNECTION * frontend, POOL_CONNECTION * backend, int protoMajor, int message_length,
- char *username, char *storedPassword, PasswordType passwordType);
-static void authenticate_frontend_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor);
-static void authenticate_frontend_cert(POOL_CONNECTION * frontend);
-static void authenticate_frontend_SCRAM(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth);
-static void authenticate_frontend_clear_text(POOL_CONNECTION * frontend);
-static bool get_auth_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth,
- char **password, PasswordType *passwordType);
-static void ProcessNegotiateProtocol(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp);
+static int do_clear_text_password(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor);
+static void pool_send_auth_fail(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp);
+static int do_md5(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor,
+ char *storedPassword, PasswordType passwordType);
+static void send_md5auth_request(POOL_CONNECTION *frontend, int protoMajor, char *salt);
+static int read_password_packet(POOL_CONNECTION *frontend, int protoMajor, char *password, int *pwdSize);
+static int send_password_packet(POOL_CONNECTION *backend, int protoMajor, char *password);
+static int send_auth_ok(POOL_CONNECTION *frontend, int protoMajor);
+static void sendAuthRequest(POOL_CONNECTION *frontend, int protoMajor, int32 auth_req_type, char *extradata, int extralen);
+
+static int pg_SASL_continue(POOL_CONNECTION *backend, char *payload, int payloadlen, void *sasl_state, bool final);
+static void *pg_SASL_init(POOL_CONNECTION *backend, char *payload, int payloadlen, char *username, char *storedPassword);
+static bool do_SCRAM(POOL_CONNECTION *frontend, POOL_CONNECTION *backend, int protoMajor, int message_length,
+ char *username, char *storedPassword, PasswordType passwordType);
+static void authenticate_frontend_md5(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor);
+static void authenticate_frontend_cert(POOL_CONNECTION *frontend);
+static void authenticate_frontend_SCRAM(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth);
+static void authenticate_frontend_clear_text(POOL_CONNECTION *frontend);
+static bool get_auth_password(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth,
+ char **password, PasswordType *passwordType);
+static void ProcessNegotiateProtocol(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp);
/*
* Do authentication. Assuming the only caller is
* make_persistent_db_connection().
*/
void
-connection_do_auth(POOL_CONNECTION_POOL_SLOT * cp, char *password)
+connection_do_auth(POOL_CONNECTION_POOL_SLOT *cp, char *password)
{
char kind;
int length;
switch (kind)
{
- char *p;
- int32 keylen;
+ char *p;
+ int32 keylen;
case 'K': /* backend key data */
keydata_done = true;
{
ereport(ERROR,
(errmsg("failed to authenticate"),
- errdetail("invalid backend key data length. received %d bytes exceeding %d",
+ errdetail("invalid backend key data length. received %d bytes exceeding %d",
ntohl(length), MAX_CANCELKEY_LENGTH)));
}
* 0.
*/
int
-pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp)
+pool_do_auth(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp)
{
signed char kind;
int pid;
int i;
int message_length = 0;
StartupPacket *sp;
- int32 keylen; /* cancel key length */
- char cancel_key[MAX_CANCELKEY_LENGTH];
- char *p;
+ int32 keylen; /* cancel key length */
+ char cancel_key[MAX_CANCELKEY_LENGTH];
+ char *p;
protoMajor = MAIN_CONNECTION(cp)->sp->major;
&password, &passwordType) == false)
{
/*
- * We do not have any password, we can still get the password
- * from client using plain text authentication if it is
- * allowed by user
+ * We do not have any password, we can still get the password from
+ * client using plain text authentication if it is allowed by user
*/
if (frontend->pool_hba == NULL && pool_config->allow_clear_text_frontend_auth)
{
}
}
else
- keylen = 4;
+ keylen = 4;
elog(DEBUG1, "cancel key length: %d", keylen);
* throws ereport.
*/
int
-pool_do_reauth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp)
+pool_do_reauth(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp)
{
int protoMajor;
int msglen;
- POOL_CONNECTION_POOL_SLOT *sp;
+ POOL_CONNECTION_POOL_SLOT *sp;
protoMajor = MAJOR(cp);
* send authentication failure message text to frontend
*/
static void
-pool_send_auth_fail(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp)
+pool_send_auth_fail(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp)
{
int messagelen;
char *errmessage;
* Send backend key data to frontend.
*/
static void
-pool_send_backend_key_data(POOL_CONNECTION * frontend, int pid,
+pool_send_backend_key_data(POOL_CONNECTION *frontend, int pid,
char *key, int32 keylen, int protoMajor)
{
char kind;
}
static void
-authenticate_frontend_clear_text(POOL_CONNECTION * frontend)
+authenticate_frontend_clear_text(POOL_CONNECTION *frontend)
{
static int size;
char password[MAX_PASSWORD_SIZE];
* perform clear text password authentication
*/
static int
-do_clear_text_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor)
+do_clear_text_password(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor)
{
static int size;
char *pwd = NULL;
else if (!rtn || frontend->pwd_size == 0)
{
/*
- * We do not have any password, we can still get the password
- * from client using plain text authentication if it is
- * allowed by user
+ * We do not have any password, we can still get the password from
+ * client using plain text authentication if it is allowed by user
*/
if (frontend->pool_hba == NULL ||
frontend->pool_hba->auth_method == uaPassword ||
- pool_config->allow_clear_text_frontend_auth )
+ pool_config->allow_clear_text_frontend_auth)
{
ereport(DEBUG1,
- (errmsg("using clear text authentication with frontend"),
+ (errmsg("using clear text authentication with frontend"),
errdetail("backend is using password authentication")));
authenticate_frontend_clear_text(frontend);
{
ereport(FATAL,
(return_code(2),
- errmsg("clear text password authentication failed"),
- errdetail("unable to get the password for user: \"%s\"", frontend->username)));
+ errmsg("clear text password authentication failed"),
+ errdetail("unable to get the password for user: \"%s\"", frontend->username)));
}
}
}
* password in the pool_passwd file.
*/
static void
-authenticate_frontend_SCRAM(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth)
+authenticate_frontend_SCRAM(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth)
{
void *scram_opaq;
char *output = NULL;
PasswordType storedPasswordType = PASSWORD_TYPE_UNKNOWN;
char *storedPassword = NULL;
- if (get_auth_password(backend, frontend, reauth,&storedPassword, &storedPasswordType) == false)
+ if (get_auth_password(backend, frontend, reauth, &storedPassword, &storedPasswordType) == false)
{
ereport(FATAL,
(return_code(2),
* Authenticate frontend using pool_hba.conf
*/
void
-authenticate_frontend(POOL_CONNECTION * frontend)
+authenticate_frontend(POOL_CONNECTION *frontend)
{
switch (frontend->pool_hba->auth_method)
{
#ifdef USE_SSL
static void
-authenticate_frontend_cert(POOL_CONNECTION * frontend)
+authenticate_frontend_cert(POOL_CONNECTION *frontend)
{
if (frontend->client_cert_loaded == true && frontend->cert_cn)
{
}
#else
static void
-authenticate_frontend_cert(POOL_CONNECTION * frontend)
+authenticate_frontend_cert(POOL_CONNECTION *frontend)
{
ereport(ERROR,
(errmsg("CERT authentication failed"),
#endif
static void
-authenticate_frontend_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor)
+authenticate_frontend_md5(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor)
{
char salt[4];
static int size;
PasswordType storedPasswordType = PASSWORD_TYPE_UNKNOWN;
char *storedPassword = NULL;
- if (get_auth_password(backend, frontend, reauth,&storedPassword, &storedPasswordType) == false)
+ if (get_auth_password(backend, frontend, reauth, &storedPassword, &storedPasswordType) == false)
{
ereport(FATAL,
(return_code(2),
* it.
*/
static bool
-get_auth_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth,
+get_auth_password(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth,
char **password, PasswordType *passwordType)
{
/* First preference is to use the pool_passwd file */
* perform MD5 authentication
*/
static int
-do_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor,
+do_md5(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor,
char *storedPassword, PasswordType passwordType)
{
char salt[4];
* Send an authentication request packet to the frontend.
*/
static void
-sendAuthRequest(POOL_CONNECTION * frontend, int protoMajor, int32 auth_req_type, char *extradata, int extralen)
+sendAuthRequest(POOL_CONNECTION *frontend, int protoMajor, int32 auth_req_type, char *extradata, int extralen)
{
int kind = htonl(auth_req_type);
* Send md5 authentication request packet to frontend
*/
static void
-send_md5auth_request(POOL_CONNECTION * frontend, int protoMajor, char *salt)
+send_md5auth_request(POOL_CONNECTION *frontend, int protoMajor, char *salt)
{
sendAuthRequest(frontend, protoMajor, AUTH_REQ_MD5, salt, 4);
}
* Read password packet from frontend
*/
static int
-read_password_packet(POOL_CONNECTION * frontend, int protoMajor, char *password, int *pwdSize)
+read_password_packet(POOL_CONNECTION *frontend, int protoMajor, char *password, int *pwdSize)
{
int size;
* "password" must be null-terminated.
*/
static int
-send_password_packet(POOL_CONNECTION * backend, int protoMajor, char *password)
+send_password_packet(POOL_CONNECTION *backend, int protoMajor, char *password)
{
int size;
int len;
* Send auth ok to frontend
*/
static int
-send_auth_ok(POOL_CONNECTION * frontend, int protoMajor)
+send_auth_ok(POOL_CONNECTION *frontend, int protoMajor)
{
int msglen;
}
static bool
-do_SCRAM(POOL_CONNECTION * frontend, POOL_CONNECTION * backend, int protoMajor, int message_length,
+do_SCRAM(POOL_CONNECTION *frontend, POOL_CONNECTION *backend, int protoMajor, int message_length,
char *username, char *storedPassword, PasswordType passwordType)
{
/* read the packet first */
}
static void *
-pg_SASL_init(POOL_CONNECTION * backend, char *payload, int payloadlen, char *username, char *storedPassword)
+pg_SASL_init(POOL_CONNECTION *backend, char *payload, int payloadlen, char *username, char *storedPassword)
{
char *initialresponse = NULL;
int initialresponselen;
* the protocol.
*/
static int
-pg_SASL_continue(POOL_CONNECTION * backend, char *payload, int payloadlen, void *sasl_state, bool final)
+pg_SASL_continue(POOL_CONNECTION *backend, char *payload, int payloadlen, void *sasl_state, bool final)
{
char *output;
int outputlen;
static void
ProcessNegotiateProtocol(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp)
{
- int32 len;
- int32 savelen;
- int32 protoMajor;
- int32 protoMinor;
- int32 protov;
- bool forwardMsg = false;
- int i;
+ int32 len;
+ int32 savelen;
+ int32 protoMajor;
+ int32 protoMinor;
+ int32 protov;
+ bool forwardMsg = false;
+ int i;
elog(DEBUG1, "Forwarding NegotiateProtocol message to frontend");
pool_write(frontend, "v", 1); /* forward message kind */
- savelen = len = pool_read_int(cp); /* message length including self */
+ savelen = len = pool_read_int(cp); /* message length including self */
pool_write(frontend, &len, 4); /* forward message length */
- len = ntohl(len) - 4; /* length of rest of the message */
- protov = pool_read_int(cp); /* read protocol version */
- protoMajor = PG_PROTOCOL_MAJOR(ntohl(protov)); /* protocol major version */
+ len = ntohl(len) - 4; /* length of rest of the message */
+ protov = pool_read_int(cp); /* read protocol version */
+ protoMajor = PG_PROTOCOL_MAJOR(ntohl(protov)); /* protocol major version */
protoMinor = PG_PROTOCOL_MINOR(ntohl(protov)); /* protocol minor version */
pool_write(frontend, &protov, 4); /* forward protocol version */
elog(DEBUG1, "protocol verion offered: major: %d minor: %d", protoMajor, protoMinor);
{
if (VALID_BACKEND(i))
{
- POOL_CONNECTION_POOL_SLOT *sp;
- char *p;
- char *np;
- Size nplen;
+ POOL_CONNECTION_POOL_SLOT *sp;
+ char *p;
+ char *np;
+ Size nplen;
p = pool_read2(CONNECTION(cp, i), len);
if (!forwardMsg)
{
- pool_write_and_flush(frontend, p, len); /* forward rest of message */
+ pool_write_and_flush(frontend, p, len); /* forward rest of
+ * message */
forwardMsg = true;
}
/* save negatiate protocol version */
sp->negotiated_minor = protoMinor;
/* save negatiate protocol message */
- nplen = 1 + /* message kind */
+ nplen = 1 + /* message kind */
sizeof(savelen) + /* message length */
sizeof(protov) + /* protocol version */
- len; /* rest of message */
+ len; /* rest of message */
/* allocate message area */
sp->negotiateProtocolMsg = MemoryContextAlloc(TopMemoryContext, nplen);
np = sp->negotiateProtocolMsg;
static HbaToken *make_hba_token(const char *token, bool quoted);
static bool parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
- int elevel, char **err_msg);
+ int elevel, char **err_msg);
static MemoryContext tokenize_file(const char *filename, FILE *file,
- List **tok_lines, int elevel);
-static void sendAuthRequest(POOL_CONNECTION * frontend, AuthRequest areq);
-static void auth_failed(POOL_CONNECTION * frontend);
-static bool hba_getauthmethod(POOL_CONNECTION * frontend);
-static bool check_hba(POOL_CONNECTION * frontend);
+ List **tok_lines, int elevel);
+static void sendAuthRequest(POOL_CONNECTION *frontend, AuthRequest areq);
+static void auth_failed(POOL_CONNECTION *frontend);
+static bool hba_getauthmethod(POOL_CONNECTION *frontend);
+static bool check_hba(POOL_CONNECTION *frontend);
static bool check_user(char *user, List *tokens);
static bool check_db(const char *dbname, const char *user, List *tokens);
static List *tokenize_inc_file(List *tokens,
- const char *outer_filename,
- const char *inc_filename,
- int elevel,
- char **err_msg);
+ const char *outer_filename,
+ const char *inc_filename,
+ int elevel,
+ char **err_msg);
static bool
- check_hostname(POOL_CONNECTION * frontend, const char *hostname);
+ check_hostname(POOL_CONNECTION *frontend, const char *hostname);
static bool
check_ip(SockAddr *raddr, struct sockaddr *addr, struct sockaddr *mask);
static bool
check_same_host_or_net(SockAddr *raddr, IPCompareMethod method);
static void check_network_callback(struct sockaddr *addr, struct sockaddr *netmask,
- void *cb_data);
+ void *cb_data);
static HbaLine *parse_hba_line(TokenizedLine *tok_line, int elevel);
static bool pg_isblank(const char c);
static bool next_token(char **lineptr, char *buf, int bufsz,
- bool *initial_quote, bool *terminating_comma,
- int elevel, char **err_msg);
+ bool *initial_quote, bool *terminating_comma,
+ int elevel, char **err_msg);
static List *next_field_expand(const char *filename, char **lineptr,
- int elevel, char **err_msg);
+ int elevel, char **err_msg);
#ifdef NOT_USED
static POOL_STATUS CheckUserExist(char *username);
#endif
#define PGPOOL_PAM_SERVICE "pgpool" /* Service name passed to PAM */
-static POOL_STATUS CheckPAMAuth(POOL_CONNECTION * frontend, char *user, char *password);
+static POOL_STATUS CheckPAMAuth(POOL_CONNECTION *frontend, char *user, char *password);
static int pam_passwd_conv_proc(int num_msg, const struct pam_message **msg, struct pam_response **resp, void *appdata_ptr);
static struct pam_conv pam_passwd_conv = {
};
static char *pam_passwd = NULL; /* Workaround for Solaris 2.6 brokenness */
-static POOL_CONNECTION * pam_frontend_kludge; /* Workaround for passing
+static POOL_CONNECTION *pam_frontend_kludge; /* Workaround for passing
* POOL_CONNECTION *frontend
* into pam_passwd_conv_proc */
#endif /* USE_PAM */
* so declare a prototype here in "#if defined(USE_PAM or USE_LDAP)" to avoid
* compilation warning.
*/
-static char *recv_password_packet(POOL_CONNECTION * frontend);
+static char *recv_password_packet(POOL_CONNECTION *frontend);
#endif /* USE_PAM or USE_LDAP */
/*
}
#ifdef USE_LDAP
+
/*
* Check if the selected authentication method has any mandatory arguments
* that are not set.
* do frontend <-> pgpool authentication based on pool_hba.conf
*/
void
-ClientAuthentication(POOL_CONNECTION * frontend)
+ClientAuthentication(POOL_CONNECTION *frontend)
{
POOL_STATUS status = POOL_END;
MemoryContext oldContext;
/*
* Get the password for the user if it is stored in the pool_password
- * file
- * authentication process is called in the temporary memory
- * context, but password mappings has to live till the life time
- * of frontend connection, so call the pool_get_user_credentials in
+ * file authentication process is called in the temporary memory
+ * context, but password mappings has to live till the life time of
+ * frontend connection, so call the pool_get_user_credentials in
* ProcessLoopContext memory context
*/
oldContext = MemoryContextSwitchTo(ProcessLoopContext);
#ifdef USE_SSL
ereport(FATAL,
- (return_code(2),
+ (return_code(2),
errmsg("client authentication failed"),
errdetail("no pool_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
hostinfo, frontend->username, frontend->database,
static void
-sendAuthRequest(POOL_CONNECTION * frontend, AuthRequest areq)
+sendAuthRequest(POOL_CONNECTION *frontend, AuthRequest areq)
{
int wsize; /* number of bytes to write */
int areq_nbo; /* areq in network byte order */
* Returns NULL if couldn't get password, else palloc'd string.
*/
static char *
-recv_password_packet(POOL_CONNECTION * frontend)
+recv_password_packet(POOL_CONNECTION *frontend)
{
int rsize;
char *passwd;
* Tell the user the authentication failed.
*/
static void
-auth_failed(POOL_CONNECTION * frontend)
+auth_failed(POOL_CONNECTION *frontend)
{
int messagelen;
char *errmessage;
* we return true and method = uaReject.
*/
static bool
-hba_getauthmethod(POOL_CONNECTION * frontend)
+hba_getauthmethod(POOL_CONNECTION *frontend)
{
if (check_hba(frontend))
return true;
* request.
*/
static bool
-check_hba(POOL_CONNECTION * frontend)
+check_hba(POOL_CONNECTION *frontend)
{
ListCell *line;
HbaLine *hba;
* Check to see if a connecting IP matches a given host name.
*/
static bool
-check_hostname(POOL_CONNECTION * frontend, const char *hostname)
+check_hostname(POOL_CONNECTION *frontend, const char *hostname)
{
struct addrinfo *gai_result,
*gai;
/*
* Check authentication against PAM.
*/
-static POOL_STATUS CheckPAMAuth(POOL_CONNECTION * frontend, char *user, char *password)
+static POOL_STATUS
+CheckPAMAuth(POOL_CONNECTION *frontend, char *user, char *password)
{
int retval;
pam_handle_t *pamh = NULL;
* later used inside the PAM conversation to pass the password to the
* authentication module.
*/
- pam_passwd_conv.appdata_ptr = (char *) password; /* from password above,
- * not allocated */
+ pam_passwd_conv.appdata_ptr = (char *) password; /* from password above,
+ * not allocated */
/* Optionally, one can set the service name in pool_hba.conf */
if (frontend->pool_hba->pamservice && frontend->pool_hba->pamservice[0] != '\0')
/*
* Check authentication against LDAP.
*/
-static POOL_STATUS CheckLDAPAuth(POOL_CONNECTION * frontend)
+static POOL_STATUS
+CheckLDAPAuth(POOL_CONNECTION *frontend)
{
char *passwd;
LDAP *ldap;
passwd = recv_password_packet(frontend);
if (passwd == NULL)
- return -2; /* client wouldn't send password */
+ return -2; /* client wouldn't send password */
if (InitializeLDAPConnection(frontend, &ldap) == -1)
{
#endif /* USE_LDAP */
#ifdef NOT_USED
-static POOL_STATUS CheckUserExist(char *username)
+static POOL_STATUS
+CheckUserExist(char *username)
{
char *passwd;
if (pool_passwd_filename == NULL)
{
- saved_passwd_filename[0] = '\0'; /* indicate pool_passwd is disabled */
+ saved_passwd_filename[0] = '\0'; /* indicate pool_passwd is
+ * disabled */
return;
}
{
#define LINE_LEN \
MAX_USER_NAME_LEN + 1 + MAX_POOL_PASSWD_LEN + 2
- char linebuf[LINE_LEN];
- char *writebuf = NULL;
- int len;
- bool updated = false;
+ char linebuf[LINE_LEN];
+ char *writebuf = NULL;
+ int len;
+ bool updated = false;
if (!passwd_fd)
ereport(ERROR,
while (!feof(passwd_fd) && !ferror(passwd_fd))
{
- char *t = linebuf;
- int len;
+ char *t = linebuf;
+ int len;
if (fgets(linebuf, sizeof(linebuf), passwd_fd) == NULL)
break;
strcat(writebuf, linebuf);
}
- if(!writebuf)
+ if (!writebuf)
return 0;
fclose(passwd_fd);
{
if (strlen(saved_passwd_filename))
ereport(ERROR,
- (errmsg("unable to get password, password file descriptor is NULL")));
+ (errmsg("unable to get password, password file descriptor is NULL")));
else
return NULL;
}
{
if (strlen(saved_passwd_filename))
ereport(WARNING,
- (errmsg("unable to get password, password file descriptor is NULL")));
+ (errmsg("unable to get password, password file descriptor is NULL")));
return NULL;
}
}
void
-delete_passwordMapping(PasswordMapping * pwdMapping)
+delete_passwordMapping(PasswordMapping *pwdMapping)
{
if (!pwdMapping)
return;
PasswordMapping *password_mapping = NULL;
/*
- * if the password specified in config is empty string or NULL look for the
- * password in pool_passwd file
+ * if the password specified in config is empty string or NULL look for
+ * the password in pool_passwd file
*/
if (password_in_config == NULL || strlen(password_in_config) == 0)
{
/* convert the TEXT prefixed password to plain text password */
passwordType = PASSWORD_TYPE_PLAINTEXT;
if (password)
- password = (char*)(password + strlen(PASSWORD_TEXT_PREFIX));
+ password = (char *) (password + strlen(PASSWORD_TEXT_PREFIX));
}
if (password && strlen(password) && (passwordType != PASSWORD_TYPE_PLAINTEXT &&
return NULL;
/*
- * To prevent file-swapping due to file race conditions,
- * we open the key file before checking it by stat().
+ * To prevent file-swapping due to file race conditions, we open the key
+ * file before checking it by stat().
*/
/* If password file cannot be opened, ignore it. */
- if ( (fp = fopen(key_file_path, "r")) == NULL)
+ if ((fp = fopen(key_file_path, "r")) == NULL)
return NULL;
if (fstat(fileno(fp), &stat_buf) != 0)
PasswordType passwordType = PASSWORD_TYPE_UNKNOWN;
/*
- * if the password specified in config is empty string or NULL look for the
- * password in pool_passwd file
+ * if the password specified in config is empty string or NULL look for
+ * the password in pool_passwd file
*/
if (password_in_config == NULL || strlen(password_in_config) == 0)
{
PasswordMapping *password_mapping = NULL;
+
password_mapping = pool_get_user_credentials(username);
if (password_mapping == NULL)
{
passwordType = get_password_type(password_in_config);
}
- /* if the password type is MD5 hash return -1*/
+ /* if the password type is MD5 hash return -1 */
if (passwordType == PASSWORD_TYPE_MD5)
{
return -1;
static void sort_config_vars(void);
static bool setConfigOptionArrayVarWithConfigDefault(struct config_generic *record, const char *name,
- const char *value, ConfigContext context, int elevel);
+ const char *value, ConfigContext context, int elevel);
static bool setConfigOption(const char *name, const char *value,
- ConfigContext context, GucSource source, int elevel);
+ ConfigContext context, GucSource source, int elevel);
static bool setConfigOptionVar(struct config_generic *record, const char *name, int index_val,
- const char *value, ConfigContext context, GucSource source, int elevel);
+ const char *value, ConfigContext context, GucSource source, int elevel);
static bool get_index_in_var_name(struct config_generic *record,
- const char *name, int *index, int elevel);
+ const char *name, int *index, int elevel);
static bool MakeUserRedirectListRegex(char *newval, int elevel);
static bool MakeDBRedirectListRegex(char *newval, int elevel);
static bool MakeAppRedirectListRegex(char *newval, int elevel);
static bool MakeDMLAdaptiveObjectRelationList(char *newval, int elevel);
-static char* getParsedToken(char *token, DBObjectTypes *object_type);
+static char *getParsedToken(char *token, DBObjectTypes *object_type);
static bool check_redirect_node_spec(char *node_spec);
static char **get_list_from_string(const char *str, const char *delimi, int *n);
#ifndef POOL_PRIVATE
static void convert_int_from_base_unit(int64 base_value, int base_unit,
- int64 *value, const char **unit);
+ int64 *value, const char **unit);
/* These functions are used to provide Hints for enum type config parameters and
static char *ShowOption(struct config_generic *record, int index, int elevel);
static char *config_enum_get_options(struct config_enum *record, const char *prefix,
- const char *suffix, const char *separator);
-static void send_row_description_for_detail_view(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-static int send_grouped_type_variable_to_frontend(struct config_grouped_array_var *grouped_record,
- POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-static int send_array_type_variable_to_frontend(struct config_generic *record,
- POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+ const char *suffix, const char *separator);
+static void send_row_description_for_detail_view(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+static int send_grouped_type_variable_to_frontend(struct config_grouped_array_var *grouped_record,
+ POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+static int send_array_type_variable_to_frontend(struct config_generic *record,
+ POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
#endif
};
static const struct config_enum_entry check_temp_table_options[] = {
- {"catalog", CHECK_TEMP_CATALOG, false}, /* search system catalogs */
- {"trace", CHECK_TEMP_TRACE, false}, /* tracing temp tables */
- {"none", CHECK_TEMP_NONE, false}, /* do not check temp tables */
- {"on", CHECK_TEMP_ON, false}, /* same as CHECK_TEMP_CATALOG. Just for backward compatibility. */
- {"off", CHECK_TEMP_OFF, false}, /* same as CHECK_TEMP_NONE. Just for backward compatibility. */
+ {"catalog", CHECK_TEMP_CATALOG, false}, /* search system catalogs */
+ {"trace", CHECK_TEMP_TRACE, false}, /* tracing temp tables */
+ {"none", CHECK_TEMP_NONE, false}, /* do not check temp tables */
+ {"on", CHECK_TEMP_ON, false}, /* same as CHECK_TEMP_CATALOG. Just for
+ * backward compatibility. */
+ {"off", CHECK_TEMP_OFF, false}, /* same as CHECK_TEMP_NONE. Just for
+ * backward compatibility. */
{NULL, 0, false}
};
static const struct config_enum_entry log_backend_messages_options[] = {
- {"none", BGMSG_NONE, false}, /* turn off logging */
- {"terse", BGMSG_TERSE, false}, /* terse logging (repeated messages are collapsed into count */
+ {"none", BGMSG_NONE, false}, /* turn off logging */
+ {"terse", BGMSG_TERSE, false}, /* terse logging (repeated messages are
+ * collapsed into count */
{"verbose", BGMSG_VERBOSE, false}, /* always log each message */
{NULL, 0, false}
};
{
{"health_check_test", CFGCXT_INIT, HEALTH_CHECK_CONFIG,
- "If on, enable health check testing.",
- CONFIG_VAR_TYPE_BOOL, false, 0
+ "If on, enable health check testing.",
+ CONFIG_VAR_TYPE_BOOL, false, 0
},
&g_pool_config.health_check_test,
false,
CONFIG_VAR_TYPE_STRING_ARRAY, true, VAR_PART_OF_GROUP, MAX_NUM_BACKENDS
},
NULL,
- "", /* for ALWAYS_PRIMARY */
+ "", /* for ALWAYS_PRIMARY */
EMPTY_CONFIG_STRING,
BackendFlagsAssignFunc, NULL, BackendFlagsShowFunc, BackendSlotEmptyCheckFunc
},
CONFIG_VAR_TYPE_INT, false, GUC_UNIT_MIN
},
&g_pool_config.log_rotation_age,
- 1440,/*1 day*/
+ 1440, /* 1 day */
0, INT_MAX,
NULL, NULL, NULL
},
},
&g_pool_config.log_rotation_size,
10 * 1024,
- 0, INT_MAX/1024,
+ 0, INT_MAX / 1024,
NULL, NULL, NULL
},
{
for (token = strtok(temp_string, delimi); token != NULL; token = strtok(NULL, delimi))
{
- int i;
+ int i;
/* skip leading whitespace */
while (isspace(*token))
/* skip trailing whitespace */
i = strlen(token) - 1;
- while (i >= 0 && isspace(token[i])) {
+ while (i >= 0 && isspace(token[i]))
+ {
token[i] = '\0';
i--;
}
}
else if (*str_temp == *delimi)
{
- char *output = (char *) palloc(j + 1);
+ char *output = (char *) palloc(j + 1);
+
StrNCpy(output, buf, j + 1);
/* replace escape character of "'" */
if (value != NULL)
{
- int64 newval64;
+ int64 newval64;
const char *hintmsg;
if (!parse_int(value, &newval64,
hintmsg ? errhint("%s", _(hintmsg)) : 0));
return false;
}
- newval = (int)newval64;
+ newval = (int) newval64;
}
else if (source == PGC_S_DEFAULT)
{
if (value != NULL)
{
- int64 newval64;
+ int64 newval64;
const char *hintmsg;
if (!parse_int(value, &newval64,
hintmsg ? errhint("%s", _(hintmsg)) : 0));
return false;
}
- newval = (int)newval64;
+ newval = (int) newval64;
}
else if (source == PGC_S_DEFAULT)
{
if (*buffer == '\0')
snprintf(buffer, sizeof(buffer), "ALWAYS_PRIMARY");
else
- snprintf(buffer+strlen(buffer), sizeof(buffer), "|ALWAYS_PRIMARY");
+ snprintf(buffer + strlen(buffer), sizeof(buffer), "|ALWAYS_PRIMARY");
}
return buffer;
}
static bool
WdIFSlotEmptyCheckFunc(int index)
{
- return (g_pool_config.hb_ifs[index].dest_port == 0);
+ return (g_pool_config.hb_ifs[index].dest_port == 0);
}
static const char *
g_pool_config.failover_on_backend_error = newval;
return true;
}
+
/*
* Throws warning for if someone uses the removed delegate_IP
* configuration parameter and set the value to delegate_ip
g_pool_config.delegate_ip = newval;
return true;
}
+
/*
* Check DB node spec. node spec should be either "primary", "standby" or
* numeric DB node id.
static bool
config_post_processor(ConfigContext context, int elevel)
{
- double total_weight = 0.0;
+ double total_weight = 0.0;
sig_atomic_t local_num_backends = 0;
- int i;
+ int i;
/* read from pgpool_node_id */
SetPgpoolNodeId(elevel);
/*
* Quarantine state in native replication mode is dangerous and it can
- * potentially cause data inconsistency.
- * So as per the discussions, we agreed on disallowing setting
- * failover_when_quorum_exists in native replication mode
+ * potentially cause data inconsistency. So as per the discussions, we
+ * agreed on disallowing setting failover_when_quorum_exists in native
+ * replication mode
*/
if (pool_config->failover_when_quorum_exists && pool_config->replication_mode)
}
/*
- * Verify the minimum and maximum number of spare children configuration when
- * dynamic process management is enabled
+ * Verify the minimum and maximum number of spare children configuration
+ * when dynamic process management is enabled
*/
if (g_pool_config.process_management == PM_DYNAMIC)
{
ereport(elevel,
(errmsg("invalid configuration, max_spare_children:%d must be greater than min_spare_children:%d",
- pool_config->max_spare_children,pool_config->min_spare_children)));
+ pool_config->max_spare_children, pool_config->min_spare_children)));
return false;
}
if (pool_config->num_init_children < pool_config->max_spare_children)
{
ereport(elevel,
(errmsg("invalid configuration, max_spare_children:%d can't be greater than num_init_children:%d",
- pool_config->max_spare_children,pool_config->num_init_children)));
+ pool_config->max_spare_children, pool_config->num_init_children)));
return false;
}
}
static bool
MakeDMLAdaptiveObjectRelationList(char *newval, int elevel)
{
- int i;
- int elements_count = 0;
- char **rawList = get_list_from_string(newval, ",", &elements_count);
+ int i;
+ int elements_count = 0;
+ char **rawList = get_list_from_string(newval, ",", &elements_count);
if (rawList == NULL || elements_count == 0)
{
for (i = 0; i < elements_count; i++)
{
- char *kvstr = rawList[i];
- char *left_token = strtok(kvstr, ":");
- char *right_token = strtok(NULL, ":");
+ char *kvstr = rawList[i];
+ char *left_token = strtok(kvstr, ":");
+ char *right_token = strtok(NULL, ":");
DBObjectTypes object_type;
ereport(DEBUG5,
(errmsg("dml_adaptive_init"),
- errdetail("%s -- left_token[%s] right_token[%s]", kvstr, left_token, right_token)));
+ errdetail("%s -- left_token[%s] right_token[%s]", kvstr, left_token, right_token)));
pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.name =
- getParsedToken(left_token, &object_type);
+ getParsedToken(left_token, &object_type);
pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.object_type = object_type;
pool_config->parsed_dml_adaptive_object_relationship_list[i].right_token.name =
- getParsedToken(right_token,&object_type);
+ getParsedToken(right_token, &object_type);
pool_config->parsed_dml_adaptive_object_relationship_list[i].right_token.object_type = object_type;
pfree(kvstr);
}
* We also remove the trailing spaces from the function type token
* and return the palloc'd copy of token in new_token
*/
-static char*
+static char *
getParsedToken(char *token, DBObjectTypes *object_type)
{
- int len;
+ int len;
+
*object_type = OBJECT_TYPE_UNKNOWN;
if (!token)
len = strlen(token);
if (len > strlen("*()"))
{
- int namelen = len - 2;
+ int namelen = len - 2;
+
/* check if token ends with () */
- if (strcmp(token + namelen,"()") == 0)
+ if (strcmp(token + namelen, "()") == 0)
{
/*
- * Remove the Parentheses from end of
- * token name
+ * Remove the Parentheses from end of token name
*/
- char *new_token;
- int new_len = strlen(token) - 2;
+ char *new_token;
+ int new_len = strlen(token) - 2;
+
new_token = palloc(new_len + 1);
- strncpy(new_token,token,new_len);
+ strncpy(new_token, token, new_len);
new_token[new_len] = '\0';
*object_type = OBJECT_TYPE_FUNCTION;
return new_token;
SetPgpoolNodeId(int elevel)
{
char pgpool_node_id_file[POOLMAXPATHLEN + 1];
- FILE *fd;
- int length;
- int i;
+ FILE *fd;
+ int length;
+ int i;
if (g_pool_config.use_watchdog)
{
snprintf(pgpool_node_id_file, sizeof(pgpool_node_id_file), "%s/%s", config_file_dir, NODE_ID_FILE_NAME);
#define MAXLINE 10
- char readbuf[MAXLINE];
+ char readbuf[MAXLINE];
fd = fopen(pgpool_node_id_file, "r");
if (!fd)
static bool
SetHBDestIfFunc(int elevel)
{
- int idx = 0;
- char **addrs;
- char **if_names;
- int i, j,
- n_addr,
- n_if_name;
+ int idx = 0;
+ char **addrs;
+ char **if_names;
+ int i,
+ j,
+ n_addr,
+ n_if_name;
g_pool_config.num_hb_dest_if = 0;
/*
* g_pool_config.hb_ifs is the information for sending/receiving heartbeat
- * for all nodes specified in pgpool.conf.
- * If it is local pgpool node information, set dest_port to g_pool_config.wd_heartbeat_port
- * and ignore addr and if_name.
- * g_pool_config.hb_dest_if is the heartbeat destination information.
+ * for all nodes specified in pgpool.conf. If it is local pgpool node
+ * information, set dest_port to g_pool_config.wd_heartbeat_port and
+ * ignore addr and if_name. g_pool_config.hb_dest_if is the heartbeat
+ * destination information.
*/
for (i = 0; i < WD_MAX_IF_NUM; i++)
{
continue;
}
- WdHbIf *hbNodeInfo = &g_pool_config.hb_ifs[i];
+ WdHbIf *hbNodeInfo = &g_pool_config.hb_ifs[i];
addrs = get_list_from_string(hbNodeInfo->addr, ";", &n_addr);
if_names = get_list_from_string(hbNodeInfo->if_name, ";", &n_if_name);
{
strlcpy(g_pool_config.hb_dest_if[idx].addr, addrs[j], WD_MAX_HOST_NAMELEN - 1);
g_pool_config.hb_dest_if[idx].dest_port = hbNodeInfo->dest_port;
- if (n_if_name > j )
+ if (n_if_name > j)
{
strlcpy(g_pool_config.hb_dest_if[idx].if_name, if_names[j], WD_MAX_IF_NAME_LEN - 1);
pfree(if_names[j]);
*result = (int64) val;
return true;
}
+
/*
* Convert a value from one of the human-friendly units ("kB", "min" etc.)
* to the given base unit. 'value' and 'unit' are the input value and unit
}
bool
-set_config_option_for_session(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *name, const char *value)
+set_config_option_for_session(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *name, const char *value)
{
bool ret;
MemoryContext oldCxt = MemoryContextSwitchTo(TopMemoryContext);
}
bool
-reset_all_variables(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+reset_all_variables(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
int i;
int elevel = (frontend == NULL) ? FATAL : FRONTEND_ONLY_ERROR;
* Handle "pgpool show all" command.
*/
bool
-report_all_variables(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+report_all_variables(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
int i;
int num_rows = 0;
* Handle "pgpool show" command.
*/
bool
-report_config_variable(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *var_name)
+report_config_variable(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *var_name)
{
int index = 0;
char *value;
}
static int
-send_array_type_variable_to_frontend(struct config_generic *record, POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+send_array_type_variable_to_frontend(struct config_generic *record, POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
if (record->dynamic_array_var)
{
}
static int
-send_grouped_type_variable_to_frontend(struct config_grouped_array_var *grouped_record, POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+send_grouped_type_variable_to_frontend(struct config_grouped_array_var *grouped_record, POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
int k,
index;
}
static void
-send_row_description_for_detail_view(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+send_row_description_for_detail_view(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
static char *field_names[] = {"item", "value", "description"};
#include "pool_config.h" /* remove me afterwards */
static POOL_PROCESS_CONTEXT process_context_d;
-static POOL_PROCESS_CONTEXT * process_context;
+static POOL_PROCESS_CONTEXT *process_context;
/*
* Initialize per process context
size_t
pool_coninfo_size(void)
{
- size_t size;
+ size_t size;
size = pool_config->num_init_children *
pool_config->max_pool *
* This flag is used to handle pg_terminate_backend()
*/
void
-pool_set_connection_will_be_terminated(ConnectionInfo * connInfo)
+pool_set_connection_will_be_terminated(ConnectionInfo *connInfo)
{
connInfo->swallow_termination = 1;
}
void
-pool_unset_connection_will_be_terminated(ConnectionInfo * connInfo)
+pool_unset_connection_will_be_terminated(ConnectionInfo *connInfo)
{
connInfo->swallow_termination = 0;
}
POOL_STANDBY,
POOL_EITHER,
POOL_BOTH
-} POOL_DEST;
+} POOL_DEST;
#define CHECK_QUERY_CONTEXT_IS_VALID \
do { \
} while (0)
static POOL_DEST send_to_where(Node *node);
-static void where_to_send_deallocate(POOL_QUERY_CONTEXT * query_context, Node *node);
-static void where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node *node);
-static void where_to_send_native_replication(POOL_QUERY_CONTEXT * query_context, char *query, Node *node);
+static void where_to_send_deallocate(POOL_QUERY_CONTEXT *query_context, Node *node);
+static void where_to_send_main_replica(POOL_QUERY_CONTEXT *query_context, char *query, Node *node);
+static void where_to_send_native_replication(POOL_QUERY_CONTEXT *query_context, char *query, Node *node);
static char *remove_read_write(int len, const char *contents, int *rewritten_len);
static void set_virtual_main_node(POOL_QUERY_CONTEXT *query_context);
static bool is_select_object_in_temp_write_list(Node *node, void *context);
static bool add_object_into_temp_write_list(Node *node, void *context);
static void dml_adaptive(Node *node, char *query);
-static char* get_associated_object_from_dml_adaptive_relations
- (char *left_token, DBObjectTypes object_type);
+static char *get_associated_object_from_dml_adaptive_relations
+ (char *left_token, DBObjectTypes object_type);
/*
* Create and initialize per query session context
* Destroy query context
*/
void
-pool_query_context_destroy(POOL_QUERY_CONTEXT * query_context)
+pool_query_context_destroy(POOL_QUERY_CONTEXT *query_context)
{
POOL_SESSION_CONTEXT *session_context;
* Perform shallow copy of given query context. Used in parse_before_bind.
*/
POOL_QUERY_CONTEXT *
-pool_query_context_shallow_copy(POOL_QUERY_CONTEXT * query_context)
+pool_query_context_shallow_copy(POOL_QUERY_CONTEXT *query_context)
{
POOL_QUERY_CONTEXT *qc;
MemoryContext memory_context;
* Start query
*/
void
-pool_start_query(POOL_QUERY_CONTEXT * query_context, char *query, int len, Node *node)
+pool_start_query(POOL_QUERY_CONTEXT *query_context, char *query, int len, Node *node)
{
POOL_SESSION_CONTEXT *session_context;
* Specify DB node to send query
*/
void
-pool_set_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id)
+pool_set_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id)
{
CHECK_QUERY_CONTEXT_IS_VALID;
* Unspecified DB node to send query
*/
void
-pool_unset_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id)
+pool_unset_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id)
{
CHECK_QUERY_CONTEXT_IS_VALID;
* Clear DB node map
*/
void
-pool_clear_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
+pool_clear_node_to_be_sent(POOL_QUERY_CONTEXT *query_context)
{
CHECK_QUERY_CONTEXT_IS_VALID;
* Set all DB node map entry
*/
void
-pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
+pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT *query_context)
{
int i;
POOL_SESSION_CONTEXT *sc;
if (SL_MODE)
{
/*
- * If load balance mode is disabled, only send to the primary node.
- * If primary node does not exist, send to the main node.
+ * If load balance mode is disabled, only send to the primary
+ * node. If primary node does not exist, send to the main
+ * node.
*/
if (!pool_config->load_balance_mode)
{
continue;
}
else
+
/*
* If the node is not primary node nor load balance node,
* there's no point to send query except statement level
*/
if (!pool_config->statement_level_load_balance &&
i != PRIMARY_NODE_ID && i != sc->load_balance_node_id)
- continue;
+ continue;
}
query_context->where_to_send[i] = true;
}
* Return true if multiple nodes are targets
*/
bool
-pool_multi_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
+pool_multi_node_to_be_sent(POOL_QUERY_CONTEXT *query_context)
{
int i;
int cnt = 0;
* Return if the DB node is needed to send query
*/
bool
-pool_is_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id)
+pool_is_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id)
{
CHECK_QUERY_CONTEXT_IS_VALID;
int
pool_virtual_main_db_node_id(void)
{
- volatile POOL_REQUEST_INFO *my_req;
+ volatile POOL_REQUEST_INFO *my_req;
POOL_SESSION_CONTEXT *sc;
/*
- * Check whether failover is in progress and we are child process.
- * If so, we will wait for failover to finish.
+ * Check whether failover is in progress and we are child process. If so,
+ * we will wait for failover to finish.
*/
my_req = Req_info;
if (processType == PT_CHILD && my_req->switching)
POOL_SETMASK(&BlockSig);
ereport(WARNING,
(errmsg("failover/failback is in progress"),
- errdetail("executing failover or failback on backend"),
+ errdetail("executing failover or failback on backend"),
errhint("In a moment you should be able to reconnect to the database")));
POOL_SETMASK(&UnBlockSig);
#endif
+
/*
* Wait for failover to finish
*/
if (wait_for_failover_to_finish() == -2)
+
/*
- * Waiting for failover/failback to finish was timed out.
- * Time to exit this process (and session disconnection).
+ * Waiting for failover/failback to finish was timed out. Time to
+ * exit this process (and session disconnection).
*/
child_exit(POOL_EXIT_AND_RESTART);
}
{
/*
* We used to return REAL_MAIN_NODE_ID here. Problem with it is, it
- * is possible that REAL_MAIN_NODE_ID could be changed
- * anytime. Suppose REAL_MAIN_NODE_ID == my_main_node_id == 1. Then
- * due to failback, REAL_MAIN_NODE_ID is changed to 0. Then
+ * is possible that REAL_MAIN_NODE_ID could be changed anytime.
+ * Suppose REAL_MAIN_NODE_ID == my_main_node_id == 1. Then due to
+ * failback, REAL_MAIN_NODE_ID is changed to 0. Then
* MAIN_CONNECTION(cp) will return NULL and any reference to it will
* cause segmentation fault. To prevent the issue we should return
* my_main_node_id instead.
}
/*
- * No query context exists. If in streaming replication mode, returns primary node
- * if exists. Otherwise returns my_main_node_id, which represents the
- * last REAL_MAIN_NODE_ID.
+ * No query context exists. If in streaming replication mode, returns
+ * primary node if exists. Otherwise returns my_main_node_id, which
+ * represents the last REAL_MAIN_NODE_ID.
*/
if (MAIN_REPLICA)
{
* Set the destination for the current query to the specific backend node.
*/
void
-pool_force_query_node_to_backend(POOL_QUERY_CONTEXT * query_context, int backend_id)
+pool_force_query_node_to_backend(POOL_QUERY_CONTEXT *query_context, int backend_id)
{
CHECK_QUERY_CONTEXT_IS_VALID;
* Decide where to send queries(thus expecting response)
*/
void
-pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node)
+pool_where_to_send(POOL_QUERY_CONTEXT *query_context, char *query, Node *node)
{
CHECK_QUERY_CONTEXT_IS_VALID;
if (query_context->is_multi_statement)
{
/*
- * If we are in streaming replication mode and we have multi statement query,
- * we should send it to primary server only. Otherwise it is possible
- * to send a write query to standby servers because we only use the
- * first element of the multi statement query and don't care about the
- * rest. Typical situation where we are bugged by this is,
- * "BEGIN;DELETE FROM table;END". Note that from pgpool-II 3.1.0
- * transactional statements such as "BEGIN" is unconditionally sent to
- * all nodes(see send_to_where() for more details). Someday we might
+ * If we are in streaming replication mode and we have multi
+ * statement query, we should send it to primary server only.
+ * Otherwise it is possible to send a write query to standby
+ * servers because we only use the first element of the multi
+ * statement query and don't care about the rest. Typical
+ * situation where we are bugged by this is, "BEGIN;DELETE FROM
+ * table;END". Note that from pgpool-II 3.1.0 transactional
+ * statements such as "BEGIN" is unconditionally sent to all
+ * nodes(see send_to_where() for more details). Someday we might
* be able to understand all part of multi statement queries, but
* until that day we need this band aid.
*/
* >0: send to this node_id
*/
POOL_STATUS
-pool_send_and_wait(POOL_QUERY_CONTEXT * query_context,
+pool_send_and_wait(POOL_QUERY_CONTEXT *query_context,
int send_type, int node_id)
{
POOL_SESSION_CONTEXT *session_context;
string = NULL;
/*
- * If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE in
- * streaming replication mode, we send BEGIN to standbys instead.
- * The original_query which is BEGIN READ WRITE is sent to primary.
- * The rewritten_query BEGIN is sent to standbys.
+ * If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE in streaming
+ * replication mode, we send BEGIN to standbys instead. The original_query
+ * which is BEGIN READ WRITE is sent to primary. The rewritten_query BEGIN
+ * is sent to standbys.
*/
if (pool_need_to_treat_as_if_default_transaction(query_context))
{
continue;
/*
- * If we are in streaming replication mode or logical replication mode,
- * we do not send COMMIT/ABORT to standbys if it's in I (idle) state.
+ * If we are in streaming replication mode or logical replication
+ * mode, we do not send COMMIT/ABORT to standbys if it's in I (idle)
+ * state.
*/
if (is_commit && MAIN_REPLICA && !IS_MAIN_NODE_ID(i) && TSTATE(backend, i) == 'I')
{
* >0: send to this node_id
*/
POOL_STATUS
-pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context,
+pool_extended_send_and_wait(POOL_QUERY_CONTEXT *query_context,
char *kind, int len, char *contents,
int send_type, int node_id, bool nowait)
{
rewritten_begin = NULL;
/*
- * If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE in
- * streaming replication mode, we send BEGIN to standbys instead.
- * The original_query which is BEGIN READ WRITE is sent to primary.
- * The rewritten_query BEGIN is sent to standbys.
+ * If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE in streaming
+ * replication mode, we send BEGIN to standbys instead. The original_query
+ * which is BEGIN READ WRITE is sent to primary. The rewritten_query BEGIN
+ * is sent to standbys.
*/
if (pool_need_to_treat_as_if_default_transaction(query_context))
{
* From syntactically analysis decide the statement to be sent to the
* primary, the standby or either or both in native replication+HR/SR mode.
*/
-static POOL_DEST send_to_where(Node *node)
+static POOL_DEST
+send_to_where(Node *node)
{
/* From storage/lock.h */
if (is_start_transaction_query(node))
{
/*
- * But actually, we send BEGIN to standby if it's BEGIN READ
- * WRITE or START TRANSACTION READ WRITE
+ * But actually, we send BEGIN to standby if it's BEGIN READ WRITE
+ * or START TRANSACTION READ WRITE
*/
if (is_read_write((TransactionStmt *) node))
return POOL_BOTH;
/*
- * Other TRANSACTION start commands are sent to both primary
- * and standby
+ * Other TRANSACTION start commands are sent to both primary and
+ * standby
*/
else
return POOL_BOTH;
}
return POOL_BOTH;
}
+
/*
* 2PC commands
*/
/*
* SET TRANSACTION ISOLATION LEVEL SERIALIZABLE or SET SESSION
- * CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE or
- * SET transaction_isolation TO 'serializable' SET
+ * CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE or SET
+ * transaction_isolation TO 'serializable' SET
* default_transaction_isolation TO 'serializable'
*/
else if (is_set_transaction_serializable(node))
}
/*
- * Check "SET TRANSACTION READ WRITE" "SET SESSION CHARACTERISTICS
- * AS TRANSACTION READ WRITE"
+ * Check "SET TRANSACTION READ WRITE" "SET SESSION CHARACTERISTICS AS
+ * TRANSACTION READ WRITE"
*/
else if (((VariableSetStmt *) node)->kind == VAR_SET_MULTI &&
(!strcmp(((VariableSetStmt *) node)->name, "TRANSACTION") ||
*/
static
void
-where_to_send_deallocate(POOL_QUERY_CONTEXT * query_context, Node *node)
+where_to_send_deallocate(POOL_QUERY_CONTEXT *query_context, Node *node)
{
DeallocateStmt *d = NULL;
ExecuteStmt *e = NULL;
- char *name;
+ char *name;
POOL_SENT_MESSAGE *msg;
if (IsA(node, DeallocateStmt))
else
{
/*
- * prepared statement was not found.
- * There are two cases when this could happen.
- * (1) mistakes by client. In this case backend will return ERROR
- * anyway.
- * (2) previous query was issued as multi-statement query. e.g.
- * SELECT 1\;PREPARE foo AS SELECT 1;
+ * prepared statement was not found. There are two cases when this
+ * could happen. (1) mistakes by client. In this case backend will
+ * return ERROR anyway. (2) previous query was issued as
+ * multi-statement query. e.g. SELECT 1\;PREPARE foo AS SELECT 1;
* In this case pgpool does not know anything about the prepared
* statement "foo".
*/
* The rewritten_query BEGIN is sent to standbys.
*/
bool
-pool_need_to_treat_as_if_default_transaction(POOL_QUERY_CONTEXT * query_context)
+pool_need_to_treat_as_if_default_transaction(POOL_QUERY_CONTEXT *query_context)
{
return (MAIN_REPLICA &&
is_start_transaction_query(query_context->parse_tree) &&
* Set query state, if a current state is before it than the specified state.
*/
void
-pool_set_query_state(POOL_QUERY_CONTEXT * query_context, POOL_QUERY_STATE state)
+pool_set_query_state(POOL_QUERY_CONTEXT *query_context, POOL_QUERY_STATE state)
{
int i;
static void
set_virtual_main_node(POOL_QUERY_CONTEXT *query_context)
{
- int i;
+ int i;
for (i = 0; i < NUM_BACKENDS; i++)
{
set_load_balance_info(POOL_QUERY_CONTEXT *query_context)
{
POOL_SESSION_CONTEXT *session_context;
+
session_context = pool_get_session_context(false);
if (pool_config->statement_level_load_balance)
if (name == NULL || list == NIL)
return false;
- ListCell *cell;
- foreach (cell, list)
+ ListCell *cell;
+
+ foreach(cell, list)
{
- char *cell_name = (char *)lfirst(cell);
+ char *cell_name = (char *) lfirst(cell);
+
if (strcasecmp(name, cell_name) == 0)
{
ereport(DEBUG1,
if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE && session_context->is_in_transaction)
{
ereport(DEBUG1,
- (errmsg("is_select_object_in_temp_write_list: \"%s\", found relation \"%s\"", (char*)context, rgv->relname)));
+ (errmsg("is_select_object_in_temp_write_list: \"%s\", found relation \"%s\"", (char *) context, rgv->relname)));
return is_in_list(rgv->relname, session_context->transaction_temp_write_list);
}
return raw_expression_tree_walker(node, is_select_object_in_temp_write_list, context);
}
-static char*
-get_associated_object_from_dml_adaptive_relations
- (char *left_token, DBObjectTypes object_type)
+static char *get_associated_object_from_dml_adaptive_relations
+ (char *left_token, DBObjectTypes object_type)
{
- int i;
- char *right_token = NULL;
+ int i;
+ char *right_token = NULL;
+
if (!pool_config->parsed_dml_adaptive_object_relationship_list)
return NULL;
- for (i=0 ;; i++)
+ for (i = 0;; i++)
{
if (pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.name == NULL)
break;
if (session_context->is_in_transaction)
{
- char *right_token =
- get_associated_object_from_dml_adaptive_relations
- (name, is_func_name? OBJECT_TYPE_FUNCTION : OBJECT_TYPE_RELATION);
+ char *right_token =
+ get_associated_object_from_dml_adaptive_relations
+ (name, is_func_name ? OBJECT_TYPE_FUNCTION : OBJECT_TYPE_RELATION);
if (right_token)
{
MemoryContext old_context = MemoryContextSwitchTo(session_context->memory_context);
+
session_context->transaction_temp_write_list =
lappend(session_context->transaction_temp_write_list, pstrdup(right_token));
MemoryContextSwitchTo(old_context);
RangeVar *rgv = (RangeVar *) node;
ereport(DEBUG5,
- (errmsg("add_object_into_temp_write_list: \"%s\", found relation \"%s\"", (char*)context, rgv->relname)));
+ (errmsg("add_object_into_temp_write_list: \"%s\", found relation \"%s\"", (char *) context, rgv->relname)));
POOL_SESSION_CONTEXT *session_context = pool_get_session_context(false);
MemoryContext old_context = MemoryContextSwitchTo(session_context->memory_context);
session_context->transaction_temp_write_list = NIL;
}
- else if(is_commit_or_rollback_query(node))
+ else if (is_commit_or_rollback_query(node))
{
session_context->is_in_transaction = false;
return;
}
- /* If non-selectStmt, find the relname and add it to the transaction temp write list. */
+ /*
+ * If non-selectStmt, find the relname and add it to the transaction
+ * temp write list.
+ */
if (!is_select_query(node, query))
add_object_into_temp_write_list(node, query);
* replication mode and slony mode. Called by pool_where_to_send.
*/
static void
-where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node *node)
+where_to_send_main_replica(POOL_QUERY_CONTEXT *query_context, char *query, Node *node)
{
POOL_DEST dest;
POOL_SESSION_CONTEXT *session_context;
if (is_tx_started_by_multi_statement_query())
{
/*
- * If we are in an explicit transaction and the transaction
- * was started by a multi statement query, we should send
- * query to primary node only (which was supposed to be sent
- * to all nodes) until the transaction gets committed or
- * aborted.
+ * If we are in an explicit transaction and the transaction was
+ * started by a multi statement query, we should send query to
+ * primary node only (which was supposed to be sent to all nodes)
+ * until the transaction gets committed or aborted.
*/
pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID);
}
{
/*
* If (we are outside of an explicit transaction) OR (the
- * transaction has not issued a write query yet, AND
- * transaction isolation level is not SERIALIZABLE) we might
- * be able to load balance.
+ * transaction has not issued a write query yet, AND transaction
+ * isolation level is not SERIALIZABLE) we might be able to load
+ * balance.
*/
ereport(DEBUG1,
*/
/*
- * If system catalog is used in the SELECT, we prefer to
- * send to the primary. Example: SELECT * FROM pg_class
- * WHERE relname = 't1'; Because 't1' is a constant, it's
- * hard to recognize as table name. Most use case such
- * query is against system catalog, and the table name can
- * be a temporary table, it's best to query against
- * primary system catalog. Please note that this test must
- * be done *before* test using pool_has_temp_table.
+ * If system catalog is used in the SELECT, we prefer to send
+ * to the primary. Example: SELECT * FROM pg_class WHERE
+ * relname = 't1'; Because 't1' is a constant, it's hard to
+ * recognize as table name. Most use case such query is
+ * against system catalog, and the table name can be a
+ * temporary table, it's best to query against primary system
+ * catalog. Please note that this test must be done *before*
+ * test using pool_has_temp_table.
*/
if (pool_has_system_catalog(node))
{
}
/*
- * If temporary table is used in the SELECT, we prefer to
- * send to the primary.
+ * If temporary table is used in the SELECT, we prefer to send
+ * to the primary.
*/
else if (pool_config->check_temp_table && pool_has_temp_table(node))
{
}
/*
- * If unlogged table is used in the SELECT, we prefer to
- * send to the primary.
+ * If unlogged table is used in the SELECT, we prefer to send
+ * to the primary.
*/
else if (pool_config->check_unlogged_table && pool_has_unlogged_table(node))
{
pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID);
}
+
/*
- * When query match the query patterns in primary_routing_query_pattern_list, we
- * send only to main node.
+ * When query match the query patterns in
+ * primary_routing_query_pattern_list, we send only to main
+ * node.
*/
else if (pattern_compare(query, WRITELIST, "primary_routing_query_pattern_list") == 1)
{
pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID);
}
+
/*
- * If a writing function call is used, we prefer to send
- * to the primary.
+ * If a writing function call is used, we prefer to send to
+ * the primary.
*/
else if (pool_has_function_call(node))
{
/*
* As streaming replication delay is too much, if
- * prefer_lower_delay_standby is true then elect new
- * load balance node which is lowest delayed,
- * false then send to the primary.
+ * prefer_lower_delay_standby is true then elect new load
+ * balance node which is lowest delayed, false then send
+ * to the primary.
*/
if (STREAM && check_replication_delay(session_context->load_balance_node_id))
{
if (pool_config->prefer_lower_delay_standby)
{
- int new_load_balancing_node = select_load_balancing_node();
+ int new_load_balancing_node = select_load_balancing_node();
session_context->load_balance_node_id = new_load_balancing_node;
session_context->query_context->load_balance_node_id = session_context->load_balance_node_id;
* Called by pool_where_to_send.
*/
static void
-where_to_send_native_replication(POOL_QUERY_CONTEXT * query_context, char *query, Node *node)
+where_to_send_native_replication(POOL_QUERY_CONTEXT *query_context, char *query, Node *node)
{
POOL_SESSION_CONTEXT *session_context;
POOL_CONNECTION_POOL *backend;
* from syntactical point of view).
*/
elog(DEBUG1, "Maybe: load balance mode: %d is_select_query: %d",
- pool_config->load_balance_mode, is_select_query(node, query));
+ pool_config->load_balance_mode, is_select_query(node, query));
if (pool_config->load_balance_mode &&
is_select_query(node, query) &&
else if (TSTATE(backend, MAIN_NODE_ID) == 'I')
{
/*
- * We are out side transaction. If default transaction is read only,
- * we can load balance.
+ * We are out side transaction. If default transaction is read
+ * only, we can load balance.
*/
- static char *si_query = "SELECT current_setting('transaction_read_only')";
+ static char *si_query = "SELECT current_setting('transaction_read_only')";
POOL_SELECT_RESULT *res;
- bool load_balance = false;
+ bool load_balance = false;
do_query(CONNECTION(backend, MAIN_NODE_ID), si_query, &res, MAJOR(backend));
if (res)
}
}
}
-
+
/*
* If a writing function call is used or replicate_select is true, we
* have to send to all nodes since the function may modify database.
}
/*
- * If (we are outside of an explicit transaction) OR (the
- * transaction has not issued a write query yet, AND transaction
- * isolation level is not SERIALIZABLE) we might be able to load
- * balance.
+ * If (we are outside of an explicit transaction) OR (the transaction
+ * has not issued a write query yet, AND transaction isolation level
+ * is not SERIALIZABLE) we might be able to load balance.
*/
else if (TSTATE(backend, MAIN_NODE_ID) == 'I' ||
(!pool_is_writing_transaction() &&
int
wait_for_failover_to_finish(void)
{
-#define MAX_FAILOVER_WAIT 30 /* waiting for failover finish timeout in seconds */
+#define MAX_FAILOVER_WAIT 30 /* waiting for failover finish timeout in
+ * seconds */
- volatile POOL_REQUEST_INFO *my_req;
- int ret = 0;
- int i;
+ volatile POOL_REQUEST_INFO *my_req;
+ int ret = 0;
+ int i;
/*
* Wait for failover to finish
*/
- for (i = 0;i < MAX_FAILOVER_WAIT; i++)
+ for (i = 0; i < MAX_FAILOVER_WAIT; i++)
{
my_req = Req_info;
if (my_req->switching == 0)
return ret;
- ret = -1; /* failover/failback finished */
+ ret = -1; /* failover/failback finished */
sleep(1);
}
- return -2; /* timed out */
+ return -2; /* timed out */
}
#include "context/pool_session_context.h"
static POOL_SESSION_CONTEXT session_context_d;
-static POOL_SESSION_CONTEXT * session_context = NULL;
+static POOL_SESSION_CONTEXT *session_context = NULL;
static void GetTranIsolationErrorCb(void *arg);
static void init_sent_message_list(void);
-static POOL_PENDING_MESSAGE * copy_pending_message(POOL_PENDING_MESSAGE * message);
-static void dump_sent_message(char *caller, POOL_SENT_MESSAGE * m);
+static POOL_PENDING_MESSAGE *copy_pending_message(POOL_PENDING_MESSAGE *message);
+static void dump_sent_message(char *caller, POOL_SENT_MESSAGE *m);
static void dml_adaptive_init(void);
static void dml_adaptive_destroy(void);
* Initialize per session context
*/
void
-pool_init_session_context(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+pool_init_session_context(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
session_context = &session_context_d;
ProcessInfo *process_info;
/* Initialize temp tables */
pool_temp_tables_init();
-
+
/* Snapshot isolation state */
session_context->si_state = SI_NO_SNAPSHOT;
* Destroy sent message
*/
void
-pool_sent_message_destroy(POOL_SENT_MESSAGE * message)
+pool_sent_message_destroy(POOL_SENT_MESSAGE *message)
{
bool in_progress;
POOL_QUERY_CONTEXT *qc = NULL;
}
static void
-dump_sent_message(char *caller, POOL_SENT_MESSAGE * m)
+dump_sent_message(char *caller, POOL_SENT_MESSAGE *m)
{
ereport(DEBUG5,
(errmsg("called by %s: sent message: address: %p kind: %c name: =%s= state:%d",
POOL_SENT_MESSAGE *
pool_create_sent_message(char kind, int len, char *contents,
int num_tsparams, const char *name,
- POOL_QUERY_CONTEXT * query_context)
+ POOL_QUERY_CONTEXT *query_context)
{
POOL_SENT_MESSAGE *msg;
* Add a sent message to sent message list
*/
void
-pool_add_sent_message(POOL_SENT_MESSAGE * message)
+pool_add_sent_message(POOL_SENT_MESSAGE *message)
{
POOL_SENT_MESSAGE *old_msg;
POOL_SENT_MESSAGE_LIST *msglist;
* Find a sent message by query context.
*/
POOL_SENT_MESSAGE *
-pool_get_sent_message_by_query_context(POOL_QUERY_CONTEXT * query_context)
+pool_get_sent_message_by_query_context(POOL_QUERY_CONTEXT *query_context)
{
int i;
POOL_SENT_MESSAGE_LIST *msglist;
* Set message state to POOL_SENT_MESSAGE_STATE to POOL_SENT_MESSAGE_CLOSED.
*/
void
-pool_set_sent_message_state(POOL_SENT_MESSAGE * message)
+pool_set_sent_message_state(POOL_SENT_MESSAGE *message)
{
ereport(DEBUG5,
(errmsg("pool_set_sent_message_state: name:%s kind:%c previous state: %d",
pool_set_writing_transaction(void)
{
/*
- * If disable_transaction_on_write is 'off' or 'dml_adaptive', then never turn on writing
- * transaction flag.
+ * If disable_transaction_on_write is 'off' or 'dml_adaptive', then never
+ * turn on writing transaction flag.
*/
if (pool_config->disable_load_balance_on_write != DLBOW_OFF && pool_config->disable_load_balance_on_write != DLBOW_DML_ADAPTIVE)
{
* is used. Returns true if it is not used.
*/
bool
-can_query_context_destroy(POOL_QUERY_CONTEXT * qc)
+can_query_context_destroy(POOL_QUERY_CONTEXT *qc)
{
int i;
int count = 0;
* message was sent.
*/
void
-pool_pending_message_dest_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context)
+pool_pending_message_dest_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context)
{
memcpy(message->node_ids, query_context->where_to_send, sizeof(message->node_ids));
* which indicates which backend nodes the message was sent.
*/
void
-pool_pending_message_query_context_dest_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context)
+pool_pending_message_query_context_dest_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context)
{
int i;
* Set query field of message.
*/
void
-pool_pending_message_query_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context)
+pool_pending_message_query_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context)
{
StrNCpy(message->query, query_context->original_query, sizeof(message->query));
}
* Add one message to the tail of the list.
*/
void
-pool_pending_message_add(POOL_PENDING_MESSAGE * message)
+pool_pending_message_add(POOL_PENDING_MESSAGE *message)
{
MemoryContext old_context;
* close message.
*/
char
-pool_get_close_message_spec(POOL_PENDING_MESSAGE * msg)
+pool_get_close_message_spec(POOL_PENDING_MESSAGE *msg)
{
return *msg->contents;
}
* The returned pointer is within "msg".
*/
char *
-pool_get_close_message_name(POOL_PENDING_MESSAGE * msg)
+pool_get_close_message_name(POOL_PENDING_MESSAGE *msg)
{
return (msg->contents) + 1;
}
* Perform deep copy of POOL_PENDING_MESSAGE object in the current memory
* context except the query context.
*/
-static POOL_PENDING_MESSAGE * copy_pending_message(POOL_PENDING_MESSAGE * message)
+static POOL_PENDING_MESSAGE *
+copy_pending_message(POOL_PENDING_MESSAGE *message)
{
POOL_PENDING_MESSAGE *msg;
* context except the query context.
*/
void
-pool_pending_message_free_pending_message(POOL_PENDING_MESSAGE * message)
+pool_pending_message_free_pending_message(POOL_PENDING_MESSAGE *message)
{
if (message == NULL)
return;
* Set previous message.
*/
void
-pool_pending_message_set_previous_message(POOL_PENDING_MESSAGE * message)
+pool_pending_message_set_previous_message(POOL_PENDING_MESSAGE *message)
{
if (!session_context)
{
* pool_pending_message_free_pending_message.
*/
POOL_PENDING_MESSAGE *
-pool_pending_message_find_lastest_by_query_context(POOL_QUERY_CONTEXT * qc)
+pool_pending_message_find_lastest_by_query_context(POOL_QUERY_CONTEXT *qc)
{
List *msgs;
POOL_PENDING_MESSAGE *msg;
* the pending message is one of primary or standby node.
*/
int
-pool_pending_message_get_target_backend_id(POOL_PENDING_MESSAGE * msg)
+pool_pending_message_get_target_backend_id(POOL_PENDING_MESSAGE *msg)
{
int backend_id = -1;
int i;
{
ListCell *cell;
ListCell *next;
- int cnt = 0;
- int i;
+ int cnt = 0;
+ int i;
if (!session_context)
{
foreach(msg_item, session_context->pending_messages)
{
POOL_PENDING_MESSAGE *msg = (POOL_PENDING_MESSAGE *) lfirst(msg_item);
+
msg->flush_pending = true;
ereport(DEBUG5,
(errmsg("pool_pending_message_set_flush_request: msg: %s",
* If the table already exists, just replace state.
*/
void
-pool_temp_tables_add(char * tablename, POOL_TEMP_TABLE_STATE state)
+pool_temp_tables_add(char *tablename, POOL_TEMP_TABLE_STATE state)
{
MemoryContext old_context;
- POOL_TEMP_TABLE * table;
+ POOL_TEMP_TABLE *table;
if (!session_context)
ereport(ERROR,
*/
POOL_TEMP_TABLE *
-pool_temp_tables_find(char * tablename)
+pool_temp_tables_find(char *tablename)
{
ListCell *cell;
foreach(cell, session_context->temp_tables)
{
- POOL_TEMP_TABLE * table = (POOL_TEMP_TABLE *)lfirst(cell);
+ POOL_TEMP_TABLE *table = (POOL_TEMP_TABLE *) lfirst(cell);
+
if (strcmp(tablename, table->tablename) == 0)
return table;
}
* the table state.
*/
void
-pool_temp_tables_delete(char * tablename, POOL_TEMP_TABLE_STATE state)
+pool_temp_tables_delete(char *tablename, POOL_TEMP_TABLE_STATE state)
{
- POOL_TEMP_TABLE * table;
+ POOL_TEMP_TABLE *table;
MemoryContext old_context;
if (!session_context)
Retry:
foreach(cell, session_context->temp_tables)
{
- POOL_TEMP_TABLE * table = (POOL_TEMP_TABLE *)lfirst(cell);
+ POOL_TEMP_TABLE *table = (POOL_TEMP_TABLE *) lfirst(cell);
if (table->state == TEMP_TABLE_CREATING)
{
Retry:
foreach(cell, session_context->temp_tables)
{
- POOL_TEMP_TABLE * table = (POOL_TEMP_TABLE *)lfirst(cell);
+ POOL_TEMP_TABLE *table = (POOL_TEMP_TABLE *) lfirst(cell);
if (table->state == TEMP_TABLE_CREATING || table->state == TEMP_TABLE_DROPPING)
{
foreach(cell, session_context->temp_tables)
{
- POOL_TEMP_TABLE * table = (POOL_TEMP_TABLE *)lfirst(cell);
+ POOL_TEMP_TABLE *table = (POOL_TEMP_TABLE *) lfirst(cell);
+
ereport(DEBUG1,
(errmsg("pool_temp_tables_dump: table %s state: %d",
table->tablename, table->state)));
#ifndef pool_auth_h
#define pool_auth_h
-extern void connection_do_auth(POOL_CONNECTION_POOL_SLOT * cp, char *password);
-extern int pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern int pool_do_reauth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp);
-extern void authenticate_frontend(POOL_CONNECTION * frontend);
+extern void connection_do_auth(POOL_CONNECTION_POOL_SLOT *cp, char *password);
+extern int pool_do_auth(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern int pool_do_reauth(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp);
+extern void authenticate_frontend(POOL_CONNECTION *frontend);
extern void pool_random_salt(char *md5Salt);
extern void pool_random(void *buf, size_t len);
-#endif /* pool_auth_h */
+#endif /* pool_auth_h */
char *ldapprefix;
char *ldapsuffix;
/* Additional LDAPl option with pgpool */
- bool backend_use_passwd; /* If true, pgpool use same password to auth backend */
+ bool backend_use_passwd; /* If true, pgpool use same password to
+ * auth backend */
};
extern bool load_hba(char *hbapath);
-extern void ClientAuthentication(POOL_CONNECTION * frontend);
+extern void ClientAuthentication(POOL_CONNECTION *frontend);
#endif /* POOL_HBA_H */
#define POOL_PASSWD_FILENAME "pool_passwd"
#define POOL_PASSWD_LEN 35
-#define MAX_POOL_PASSWD_LEN 132 /* In case of TEXT prefix(4byte) and plain text password(128byte)*/
+#define MAX_POOL_PASSWD_LEN 132 /* In case of TEXT prefix(4byte) and plain
+ * text password(128byte) */
#define MAX_USER_NAME_LEN 128
#define MAX_PGPASS_LEN 128
* pgpool-II child main process */
POOL_PASSWD_RW, /* open pool_passwd in read/write mode. used
* by pg_md5 command */
-} POOL_PASSWD_MODE;
+} POOL_PASSWD_MODE;
typedef enum PasswordType
{
char *userName;
char *password;
PasswordType passwordType;
-} UserPassword;
+} UserPassword;
typedef struct PasswordMapping
{
UserPassword pgpoolUser;
UserPassword backendUser;
bool mappedUser;
-} PasswordMapping;
+} PasswordMapping;
-extern PasswordMapping * pool_get_user_credentials(char *username);
+extern PasswordMapping *pool_get_user_credentials(char *username);
extern PasswordType get_password_type(const char *shadow_pass);
extern void pool_init_pool_passwd(char *pool_passwd_filename, POOL_PASSWD_MODE mode);
extern int pool_create_passwdent(char *username, char *passwd);
extern char *get_decrypted_password(const char *shadow_pass);
extern char *read_pool_key(char *key_file_path);
extern char *get_pgpool_config_user_password(char *username, char *password_in_config);
-extern void delete_passwordMapping(PasswordMapping * pwdMapping);
-extern int check_password_type_is_not_md5(char *username, char *password_in_config);
+extern void delete_passwordMapping(PasswordMapping *pwdMapping);
+extern int check_password_type_is_not_md5(char *username, char *password_in_config);
#endif /* POOL_PASSWD_H */
extern void scram_HMAC_final(uint8 *result, scram_HMAC_ctx *ctx);
extern void scram_SaltedPassword(const char *password, const char *salt,
- int saltlen, int iterations, uint8 *result);
+ int saltlen, int iterations, uint8 *result);
extern void scram_H(const uint8 *str, int len, uint8 *result);
extern void scram_ClientKey(const uint8 *salted_password, uint8 *result);
extern void scram_ServerKey(const uint8 *salted_password, uint8 *result);
extern char *scram_build_verifier(const char *salt, int saltlen, int iterations,
- const char *password);
+ const char *password);
#endif /* SCRAM_COMMON_H */
/* Routines dedicated to authentication */
extern void *pg_be_scram_init(const char *username, const char *shadow_pass);
-extern int pg_be_scram_exchange(void *opaq, char *input, int inputlen,
- char **output, int *outputlen, char **logdetail);
+extern int pg_be_scram_exchange(void *opaq, char *input, int inputlen,
+ char **output, int *outputlen, char **logdetail);
/* Routines to handle and check SCRAM-SHA-256 verifier */
extern char *pg_be_scram_build_verifier(const char *password);
extern bool scram_verify_plain_password(const char *username,
- const char *password, const char *verifier);
+ const char *password, const char *verifier);
extern void *pg_fe_scram_init(const char *username, const char *password);
extern void pg_fe_scram_exchange(void *opaq, char *input, int inputlen,
- char **output, int *outputlen,
- bool *done, bool *success);
+ char **output, int *outputlen,
+ bool *done, bool *success);
extern void pg_fe_scram_free(void *opaq);
extern char *pg_fe_scram_build_verifier(const char *password);
#ifndef POOL_PROCESS_CONTEXT_H
#define POOL_PROCESS_CONTEXT_H
-//#include "pool.h"
+/* #include "pool.h" */
#include "pcp/libpcp_ext.h"
#include "utils/pool_signal.h"
unsigned int last_alarm_second;
unsigned int undo_alarm_second;
-} POOL_PROCESS_CONTEXT;
+} POOL_PROCESS_CONTEXT;
extern void pool_init_process_context(void);
-extern POOL_PROCESS_CONTEXT * pool_get_process_context(void);
-extern ProcessInfo * pool_get_my_process_info(void);
+extern POOL_PROCESS_CONTEXT *pool_get_process_context(void);
+extern ProcessInfo *pool_get_my_process_info(void);
extern void pool_increment_local_session_id(void);
-extern size_t pool_coninfo_size(void);
+extern size_t pool_coninfo_size(void);
extern int pool_coninfo_num(void);
-extern ConnectionInfo * pool_coninfo(int child, int connection_pool, int backend);
-extern ConnectionInfo * pool_coninfo_pid(int pid, int connection_pool, int backend);
+extern ConnectionInfo *pool_coninfo(int child, int connection_pool, int backend);
+extern ConnectionInfo *pool_coninfo_pid(int pid, int connection_pool, int backend);
extern void pool_coninfo_set_frontend_connected(int proc_id, int pool_index);
extern void pool_coninfo_unset_frontend_connected(int proc_id, int pool_index);
-extern ConnectionInfo * pool_coninfo_backend_pid(int backend_pid, int *backend_node_id);
-extern void pool_set_connection_will_be_terminated(ConnectionInfo * connInfo);
-extern void pool_unset_connection_will_be_terminated(ConnectionInfo * connInfo);
+extern ConnectionInfo *pool_coninfo_backend_pid(int backend_pid, int *backend_node_id);
+extern void pool_set_connection_will_be_terminated(ConnectionInfo *connInfo);
+extern void pool_unset_connection_will_be_terminated(ConnectionInfo *connInfo);
extern void pool_alarm(pool_sighandler_t handler, unsigned int second);
extern void pool_undo_alarm(void);
POOL_PARSE_COMPLETE,
POOL_BIND_COMPLETE,
POOL_EXECUTE_COMPLETE
-} POOL_QUERY_STATE;
+} POOL_QUERY_STATE;
/*
* Query context:
Node *rewritten_parse_tree; /* rewritten raw parser output if any */
bool where_to_send[MAX_NUM_BACKENDS]; /* DB node map to send
* query */
- int load_balance_node_id; /* load balance node id per statement */
- int virtual_main_node_id; /* the 1st DB node to send query */
+ int load_balance_node_id; /* load balance node id per statement */
+ int virtual_main_node_id; /* the 1st DB node to send query */
POOL_QUERY_STATE query_state[MAX_NUM_BACKENDS]; /* for extended query
* protocol */
bool is_cache_safe; /* true if SELECT is safe to cache */
* extended query, do not commit cache if
* this flag is true. */
- bool atEnd; /* if true all rows have been already
- * fetched from the portal */
+ bool atEnd; /* if true all rows have been already fetched
+ * from the portal */
- bool partial_fetch; /* if true some rows have been fetched by
- * an execute with non 0 row option */
+ bool partial_fetch; /* if true some rows have been fetched by an
+ * execute with non 0 row option */
MemoryContext memory_context; /* memory context for query context */
-} POOL_QUERY_CONTEXT;
+} POOL_QUERY_CONTEXT;
-extern POOL_QUERY_CONTEXT * pool_init_query_context(void);
-extern void pool_query_context_destroy(POOL_QUERY_CONTEXT * query_context);
-extern POOL_QUERY_CONTEXT * pool_query_context_shallow_copy(POOL_QUERY_CONTEXT * query_context);
-extern void pool_start_query(POOL_QUERY_CONTEXT * query_context, char *query, int len, Node *node);
-extern void pool_set_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id);
-extern bool pool_is_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id);
-extern void pool_set_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id);
-extern void pool_unset_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id);
-extern void pool_clear_node_to_be_sent(POOL_QUERY_CONTEXT * query_context);
-extern void pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT * query_context);
-extern bool pool_multi_node_to_be_sent(POOL_QUERY_CONTEXT * query_context);
-extern void pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node);
-extern POOL_STATUS pool_send_and_wait(POOL_QUERY_CONTEXT * query_context, int send_type, int node_id);
-extern POOL_STATUS pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context, char *kind, int len, char *contents, int send_type, int node_id, bool nowait);
+extern POOL_QUERY_CONTEXT *pool_init_query_context(void);
+extern void pool_query_context_destroy(POOL_QUERY_CONTEXT *query_context);
+extern POOL_QUERY_CONTEXT *pool_query_context_shallow_copy(POOL_QUERY_CONTEXT *query_context);
+extern void pool_start_query(POOL_QUERY_CONTEXT *query_context, char *query, int len, Node *node);
+extern void pool_set_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id);
+extern bool pool_is_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id);
+extern void pool_set_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id);
+extern void pool_unset_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id);
+extern void pool_clear_node_to_be_sent(POOL_QUERY_CONTEXT *query_context);
+extern void pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT *query_context);
+extern bool pool_multi_node_to_be_sent(POOL_QUERY_CONTEXT *query_context);
+extern void pool_where_to_send(POOL_QUERY_CONTEXT *query_context, char *query, Node *node);
+extern POOL_STATUS pool_send_and_wait(POOL_QUERY_CONTEXT *query_context, int send_type, int node_id);
+extern POOL_STATUS pool_extended_send_and_wait(POOL_QUERY_CONTEXT *query_context, char *kind, int len, char *contents, int send_type, int node_id, bool nowait);
extern Node *pool_get_parse_tree(void);
extern char *pool_get_query_string(void);
extern bool is_set_transaction_serializable(Node *node);
extern bool is_start_transaction_query(Node *node);
extern bool is_read_write(TransactionStmt *node);
extern bool is_serializable(TransactionStmt *node);
-extern bool pool_need_to_treat_as_if_default_transaction(POOL_QUERY_CONTEXT * query_context);
+extern bool pool_need_to_treat_as_if_default_transaction(POOL_QUERY_CONTEXT *query_context);
extern bool is_savepoint_query(Node *node);
extern bool is_2pc_transaction_query(Node *node);
-extern void pool_set_query_state(POOL_QUERY_CONTEXT * query_context, POOL_QUERY_STATE state);
+extern void pool_set_query_state(POOL_QUERY_CONTEXT *query_context, POOL_QUERY_STATE state);
extern int statecmp(POOL_QUERY_STATE s1, POOL_QUERY_STATE s2);
extern bool pool_is_cache_safe(void);
extern void pool_set_cache_safe(void);
extern void pool_set_cache_exceeded(void);
extern void pool_unset_cache_exceeded(void);
extern bool pool_is_transaction_read_only(Node *node);
-extern void pool_force_query_node_to_backend(POOL_QUERY_CONTEXT * query_context, int backend_id);
+extern void pool_force_query_node_to_backend(POOL_QUERY_CONTEXT *query_context, int backend_id);
extern void check_object_relationship_list(char *name, bool is_func_name);
-extern int wait_for_failover_to_finish(void);
+extern int wait_for_failover_to_finish(void);
#endif /* POOL_QUERY_CONTEXT_H */
POOL_READ_COMMITTED, /* Read committed */
POOL_REPEATABLE_READ, /* Repeatable read */
POOL_SERIALIZABLE /* Serializable */
-} POOL_TRANSACTION_ISOLATION;
+} POOL_TRANSACTION_ISOLATION;
/*
* Return values for pool_use_sync_map
POOL_SENT_MESSAGE_CREATED, /* initial state of sent message */
POOL_SENT_MESSAGE_CLOSED /* sent message closed but close complete
* message has not arrived yet */
-} POOL_SENT_MESSAGE_STATE;
+} POOL_SENT_MESSAGE_STATE;
/*
* Message content of extended query
int param_offset; /* Offset from contents where actual bind
* parameters are stored. This is meaningful
* only when is_cache_safe is true. */
-} POOL_SENT_MESSAGE;
+} POOL_SENT_MESSAGE;
/*
* List of POOL_SENT_MESSAGE (XXX this should have been implemented using a
int capacity; /* capacity of list */
int size; /* number of elements */
POOL_SENT_MESSAGE **sent_messages;
-} POOL_SENT_MESSAGE_LIST;
+} POOL_SENT_MESSAGE_LIST;
/*
* Received message queue used in extended query/streaming replication mode.
POOL_DESCRIBE,
POOL_CLOSE,
POOL_SYNC
-} POOL_MESSAGE_TYPE;
+} POOL_MESSAGE_TYPE;
typedef struct
{
bool not_forward_to_frontend; /* Do not forward response from
* backend to frontend. This is
* used by parse_before_bind() */
- bool node_ids[MAX_NUM_BACKENDS]; /* backend node map which this message was sent to */
+ bool node_ids[MAX_NUM_BACKENDS]; /* backend node map which this
+ * message was sent to */
POOL_QUERY_CONTEXT *query_context; /* query context */
+
/*
* If "flush" message arrives, this flag is set to true until all buffered
* message for frontend are sent out.
*/
bool flush_pending;
- bool is_tx_started_by_multi_statement; /* true if an explicit transaction has been started by
- multi statement query */
-} POOL_PENDING_MESSAGE;
+ bool is_tx_started_by_multi_statement; /* true if an explicit
+ * transaction has been
+ * started by multi
+ * statement query */
+} POOL_PENDING_MESSAGE;
-typedef enum {
- TEMP_TABLE_CREATING = 1, /* temp table creating, not committed yet. */
- TEMP_TABLE_DROPPING, /* temp table dropping, not committed yet. */
- TEMP_TABLE_CREATE_COMMITTED, /* temp table created and committed. */
- TEMP_TABLE_DROP_COMMITTED, /* temp table dropped and committed. */
-} POOL_TEMP_TABLE_STATE;
+typedef enum
+{
+ TEMP_TABLE_CREATING = 1, /* temp table creating, not committed yet. */
+ TEMP_TABLE_DROPPING, /* temp table dropping, not committed yet. */
+ TEMP_TABLE_CREATE_COMMITTED, /* temp table created and committed. */
+ TEMP_TABLE_DROP_COMMITTED, /* temp table dropped and committed. */
+} POOL_TEMP_TABLE_STATE;
-typedef struct {
+typedef struct
+{
char tablename[MAX_IDENTIFIER_LEN]; /* temporary table name */
- POOL_TEMP_TABLE_STATE state; /* see above */
-} POOL_TEMP_TABLE;
+ POOL_TEMP_TABLE_STATE state; /* see above */
+} POOL_TEMP_TABLE;
typedef enum
int preferred_main_node_id;
#endif
- /* Whether snapshot is acquired in this transaction. Only used by Snapshot Isolation mode. */
+ /*
+ * Whether snapshot is acquired in this transaction. Only used by Snapshot
+ * Isolation mode.
+ */
SI_STATE si_state;
/* Whether transaction is read only. Only used by Snapshot Isolation mode. */
SI_STATE transaction_read_only;
* transaction has been
* started by a
* multi-statement-query */
+
/*
- * True if query cache feature disabled until session ends.
- * This is set when SET ROLE/SET SESSION AUTHORIZATION executed.
+ * True if query cache feature disabled until session ends. This is set
+ * when SET ROLE/SET SESSION AUTHORIZATION executed.
*/
- bool query_cache_disabled;
+ bool query_cache_disabled;
+
/*
* True if query cache feature disabled until current transaction ends.
* This is set when REVOKE executed in a transaction.
*/
- bool query_cache_disabled_tx;
+ bool query_cache_disabled_tx;
-} POOL_SESSION_CONTEXT;
+} POOL_SESSION_CONTEXT;
-extern void pool_init_session_context(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+extern void pool_init_session_context(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
extern void pool_session_context_destroy(void);
-extern POOL_SESSION_CONTEXT * pool_get_session_context(bool noerror);
+extern POOL_SESSION_CONTEXT *pool_get_session_context(bool noerror);
extern int pool_get_local_session_id(void);
extern bool pool_is_query_in_progress(void);
extern void pool_set_query_in_progress(void);
extern bool pool_is_ignore_till_sync(void);
extern void pool_set_ignore_till_sync(void);
extern void pool_unset_ignore_till_sync(void);
-extern POOL_SENT_MESSAGE * pool_create_sent_message(char kind, int len, char *contents,
- int num_tsparams, const char *name,
- POOL_QUERY_CONTEXT * query_context);
-extern void pool_add_sent_message(POOL_SENT_MESSAGE * message);
+extern POOL_SENT_MESSAGE *pool_create_sent_message(char kind, int len, char *contents,
+ int num_tsparams, const char *name,
+ POOL_QUERY_CONTEXT *query_context);
+extern void pool_add_sent_message(POOL_SENT_MESSAGE *message);
extern bool pool_remove_sent_message(char kind, const char *name);
extern void pool_remove_sent_messages(char kind);
extern void pool_clear_sent_message_list(void);
-extern void pool_sent_message_destroy(POOL_SENT_MESSAGE * message);
-extern POOL_SENT_MESSAGE * pool_get_sent_message(char kind, const char *name, POOL_SENT_MESSAGE_STATE state);
-extern void pool_set_sent_message_state(POOL_SENT_MESSAGE * message);
+extern void pool_sent_message_destroy(POOL_SENT_MESSAGE *message);
+extern POOL_SENT_MESSAGE *pool_get_sent_message(char kind, const char *name, POOL_SENT_MESSAGE_STATE state);
+extern void pool_set_sent_message_state(POOL_SENT_MESSAGE *message);
extern void pool_zap_query_context_in_sent_messages(POOL_QUERY_CONTEXT *query_context);
-extern POOL_SENT_MESSAGE * pool_get_sent_message_by_query_context(POOL_QUERY_CONTEXT * query_context);
+extern POOL_SENT_MESSAGE *pool_get_sent_message_by_query_context(POOL_QUERY_CONTEXT *query_context);
extern void pool_unset_writing_transaction(void);
extern void pool_set_writing_transaction(void);
extern bool pool_is_writing_transaction(void);
extern void pool_set_command_success(void);
extern bool pool_is_command_success(void);
extern void pool_copy_prep_where(bool *src, bool *dest);
-extern bool can_query_context_destroy(POOL_QUERY_CONTEXT * qc);
+extern bool can_query_context_destroy(POOL_QUERY_CONTEXT *qc);
extern void pool_pending_messages_init(void);
extern void pool_pending_messages_destroy(void);
-extern POOL_PENDING_MESSAGE * pool_pending_message_create(char kind, int len, char *contents);
-extern void pool_pending_message_free_pending_message(POOL_PENDING_MESSAGE * message);
-extern void pool_pending_message_dest_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context);
-extern void pool_pending_message_query_context_dest_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context);
-extern void pool_pending_message_query_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context);
-extern void pool_pending_message_add(POOL_PENDING_MESSAGE * message);
-extern POOL_PENDING_MESSAGE * pool_pending_message_head_message(void);
-extern POOL_PENDING_MESSAGE * pool_pending_message_pull_out(void);
-extern POOL_PENDING_MESSAGE * pool_pending_message_get(POOL_MESSAGE_TYPE type);
-extern char pool_get_close_message_spec(POOL_PENDING_MESSAGE * msg);
-extern char *pool_get_close_message_name(POOL_PENDING_MESSAGE * msg);
+extern POOL_PENDING_MESSAGE *pool_pending_message_create(char kind, int len, char *contents);
+extern void pool_pending_message_free_pending_message(POOL_PENDING_MESSAGE *message);
+extern void pool_pending_message_dest_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context);
+extern void pool_pending_message_query_context_dest_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context);
+extern void pool_pending_message_query_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context);
+extern void pool_pending_message_add(POOL_PENDING_MESSAGE *message);
+extern POOL_PENDING_MESSAGE *pool_pending_message_head_message(void);
+extern POOL_PENDING_MESSAGE *pool_pending_message_pull_out(void);
+extern POOL_PENDING_MESSAGE *pool_pending_message_get(POOL_MESSAGE_TYPE type);
+extern char pool_get_close_message_spec(POOL_PENDING_MESSAGE *msg);
+extern char *pool_get_close_message_name(POOL_PENDING_MESSAGE *msg);
extern void pool_pending_message_reset_previous_message(void);
-extern void pool_pending_message_set_previous_message(POOL_PENDING_MESSAGE * message);
-extern POOL_PENDING_MESSAGE * pool_pending_message_get_previous_message(void);
+extern void pool_pending_message_set_previous_message(POOL_PENDING_MESSAGE *message);
+extern POOL_PENDING_MESSAGE *pool_pending_message_get_previous_message(void);
extern bool pool_pending_message_exists(void);
extern const char *pool_pending_message_type_to_string(POOL_MESSAGE_TYPE type);
extern void pool_check_pending_message_and_reply(POOL_MESSAGE_TYPE type, char kind);
-extern POOL_PENDING_MESSAGE * pool_pending_message_find_lastest_by_query_context(POOL_QUERY_CONTEXT * qc);
-extern int pool_pending_message_get_target_backend_id(POOL_PENDING_MESSAGE * msg);
+extern POOL_PENDING_MESSAGE *pool_pending_message_find_lastest_by_query_context(POOL_QUERY_CONTEXT *qc);
+extern int pool_pending_message_get_target_backend_id(POOL_PENDING_MESSAGE *msg);
extern int pool_pending_message_get_message_num_by_backend_id(int backend_id);
extern void pool_pending_message_set_flush_request(void);
extern void dump_pending_message(void);
extern void pool_temp_tables_init(void);
extern void pool_temp_tables_destroy(void);
-extern void pool_temp_tables_add(char * tablename, POOL_TEMP_TABLE_STATE state);
-extern POOL_TEMP_TABLE * pool_temp_tables_find(char * tablename);
-extern void pool_temp_tables_delete(char * tablename, POOL_TEMP_TABLE_STATE state);
-extern void pool_temp_tables_commit_pending(void);
-extern void pool_temp_tables_remove_pending(void);
-extern void pool_temp_tables_dump(void);
+extern void pool_temp_tables_add(char *tablename, POOL_TEMP_TABLE_STATE state);
+extern POOL_TEMP_TABLE *pool_temp_tables_find(char *tablename);
+extern void pool_temp_tables_delete(char *tablename, POOL_TEMP_TABLE_STATE state);
+extern void pool_temp_tables_commit_pending(void);
+extern void pool_temp_tables_remove_pending(void);
+extern void pool_temp_tables_dump(void);
extern bool is_tx_started_by_multi_statement_query(void);
extern void set_tx_started_by_multi_statement_query(void);
/*
* Health check statistics per node
*/
-typedef struct {
- uint64 total_count; /* total count of health check */
- uint64 success_count; /* total count of successful health check */
- uint64 fail_count; /* total count of failed health check */
- uint64 skip_count; /* total count of skipped health check */
- uint64 retry_count; /* total count of health check retries */
- uint32 max_retry_count; /* max retry count in a health check session */
- uint64 total_health_check_duration; /* sum of health check duration */
- int32 max_health_check_duration; /* maximum duration spent for a health check session in milli seconds */
- int32 min_health_check_duration; /* minimum duration spent for a health check session in milli seconds */
- time_t last_health_check; /* last health check timestamp */
- time_t last_successful_health_check; /* last successful health check timestamp */
- time_t last_skip_health_check; /* last skipped health check timestamp */
- time_t last_failed_health_check; /* last failed health check timestamp */
+typedef struct
+{
+ uint64 total_count; /* total count of health check */
+ uint64 success_count; /* total count of successful health check */
+ uint64 fail_count; /* total count of failed health check */
+ uint64 skip_count; /* total count of skipped health check */
+ uint64 retry_count; /* total count of health check retries */
+ uint32 max_retry_count; /* max retry count in a health check
+ * session */
+ uint64 total_health_check_duration; /* sum of health check
+ * duration */
+ int32 max_health_check_duration; /* maximum duration spent for a
+ * health check session in milli
+ * seconds */
+ int32 min_health_check_duration; /* minimum duration spent for a
+ * health check session in milli
+ * seconds */
+ time_t last_health_check; /* last health check timestamp */
+ time_t last_successful_health_check; /* last successful health
+ * check timestamp */
+ time_t last_skip_health_check; /* last skipped health check timestamp */
+ time_t last_failed_health_check; /* last failed health check
+ * timestamp */
} POOL_HEALTH_CHECK_STATISTICS;
-extern volatile POOL_HEALTH_CHECK_STATISTICS *health_check_stats; /* health check stats area in shared memory */
+extern volatile POOL_HEALTH_CHECK_STATISTICS *health_check_stats; /* health check stats
+ * area in shared memory */
extern void do_health_check_child(int *node_id);
-extern size_t health_check_stats_shared_memory_size(void);
-extern void health_check_stats_init(POOL_HEALTH_CHECK_STATISTICS *addr);
+extern size_t health_check_stats_shared_memory_size(void);
+extern void health_check_stats_init(POOL_HEALTH_CHECK_STATISTICS *addr);
-#endif /* health_check_h */
+#endif /* health_check_h */
int32 pid; /* writer's pid */
char is_last; /* last chunk of message? 't' or 'f' ('T' or
* 'F' for CSV case) */
- char data[]; /* data payload starts here */
+ char data[]; /* data payload starts here */
} PipeProtoHeader;
typedef union
extern void register_inform_quarantine_nodes_req(void);
extern bool register_node_operation_request(POOL_REQUEST_KIND kind,
int *node_id_set, int count, unsigned char flags);
-#endif /* pool_internal_comms_h */
+#endif /* pool_internal_comms_h */
EXPLAIN_FORMAT_XML,
EXPLAIN_FORMAT_JSON,
EXPLAIN_FORMAT_YAML
-} ExplainFormat;
+} ExplainFormat;
typedef struct ExplainState
{
List *rtable_names; /* alias names for RTEs */
List *deparse_cxt; /* context list for deparsing expressions */
Bitmapset *printed_subplans; /* ids of SubPlans we've printed */
-} ExplainState;
+} ExplainState;
/* Hook for plugins to get control in ExplainOneQuery() */
typedef void (*ExplainOneQuery_hook_type) (Query *query,
IntoClause *into,
- ExplainState *es,
+ ExplainState * es,
const char *queryString,
ParamListInfo params);
extern PGDLLIMPORT ExplainOneQuery_hook_type ExplainOneQuery_hook;
extern void ExplainQuery(ExplainStmt *stmt, const char *queryString,
- ParamListInfo params, DestReceiver *dest);
+ ParamListInfo params, DestReceiver * dest);
-extern ExplainState *NewExplainState(void);
+extern ExplainState * NewExplainState(void);
extern TupleDesc ExplainResultDesc(ExplainStmt *stmt);
extern void ExplainOneUtility(Node *utilityStmt, IntoClause *into,
- ExplainState *es,
- const char *queryString, ParamListInfo params);
+ ExplainState * es,
+ const char *queryString, ParamListInfo params);
-extern void ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into,
- ExplainState *es, const char *queryString,
- ParamListInfo params, const instr_time *planduration);
+extern void ExplainOnePlan(PlannedStmt * plannedstmt, IntoClause *into,
+ ExplainState * es, const char *queryString,
+ ParamListInfo params, const instr_time * planduration);
-extern void ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc);
-extern void ExplainPrintTriggers(ExplainState *es, QueryDesc *queryDesc);
+extern void ExplainPrintPlan(ExplainState * es, QueryDesc * queryDesc);
+extern void ExplainPrintTriggers(ExplainState * es, QueryDesc * queryDesc);
-extern void ExplainQueryText(ExplainState *es, QueryDesc *queryDesc);
+extern void ExplainQueryText(ExplainState * es, QueryDesc * queryDesc);
-extern void ExplainBeginOutput(ExplainState *es);
-extern void ExplainEndOutput(ExplainState *es);
-extern void ExplainSeparatePlans(ExplainState *es);
+extern void ExplainBeginOutput(ExplainState * es);
+extern void ExplainEndOutput(ExplainState * es);
+extern void ExplainSeparatePlans(ExplainState * es);
extern void ExplainPropertyList(const char *qlabel, List *data,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyListNested(const char *qlabel, List *data,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyText(const char *qlabel, const char *value,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyInteger(const char *qlabel, int value,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyLong(const char *qlabel, long value,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyFloat(const char *qlabel, double value, int ndigits,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyBool(const char *qlabel, bool value,
- ExplainState *es);
+ ExplainState * es);
#endif /* EXPLAIN_H */
{
NodeTag type;
const char *extnodename; /* identifier of ExtensibleNodeMethods */
-} ExtensibleNode;
+} ExtensibleNode;
/*
* node_size is the size of an extensible node of this type in bytes.
void (*nodeOut) (struct StringInfoData *str,
const struct ExtensibleNode *node);
void (*nodeRead) (struct ExtensibleNode *node);
-} ExtensibleNodeMethods;
+} ExtensibleNodeMethods;
-extern void RegisterExtensibleNodeMethods(const ExtensibleNodeMethods *method);
+extern void RegisterExtensibleNodeMethods(const ExtensibleNodeMethods * method);
extern const ExtensibleNodeMethods *GetExtensibleNodeMethods(const char *name,
- bool missing_ok);
+ bool missing_ok);
/*
* Flags for custom paths, indicating what capabilities the resulting scan
/* from parser.c */
-extern int base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp,
- core_yyscan_t yyscanner);
-extern int minimal_base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp,
+extern int base_yylex(YYSTYPE *lvalp, YYLTYPE * llocp,
core_yyscan_t yyscanner);
+extern int minimal_base_yylex(YYSTYPE *lvalp, YYLTYPE * llocp,
+ core_yyscan_t yyscanner);
/* from gram.y */
extern void parser_init(base_yy_extra_type *yyext);
T_Invalid = 0,
/* pgpool Extension */
- T_PgpoolVariableSetStmt,
- T_PgpoolVariableShowStmt,
- T_PgpoolQueryCacheStmt,
+ T_PgpoolVariableSetStmt,
+ T_PgpoolVariableShowStmt,
+ T_PgpoolQueryCacheStmt,
#include "nodetags.h"
} NodeTag;
extern void outBitmapset(struct StringInfoData *str,
const struct Bitmapset *bms);
extern void outDatum(struct StringInfoData *str, uintptr_t value,
- int typlen, bool typbyval);
+ int typlen, bool typbyval);
extern char *nodeToString(const void *obj);
extern char *nodeToStringWithLocations(const void *obj);
extern char *bmsToString(const struct Bitmapset *bms);
AGG_SORTED, /* grouped agg, input must be sorted */
AGG_HASHED, /* grouped agg, use internal hashtable */
AGG_MIXED, /* grouped agg, hash and sort both used */
-} AggStrategy;
+} AggStrategy;
/*
* AggSplit -
SETOPCMD_INTERSECT_ALL,
SETOPCMD_EXCEPT,
SETOPCMD_EXCEPT_ALL,
-} SetOpCmd;
+} SetOpCmd;
typedef enum SetOpStrategy
{
SETOP_SORTED, /* input must be sorted */
SETOP_HASHED, /* use internal hashtable */
-} SetOpStrategy;
+} SetOpStrategy;
/*
* OnConflictAction -
List *lowerdatums; /* List of PartitionRangeDatums */
List *upperdatums; /* List of PartitionRangeDatums */
- ParseLoc location; /* token location, or -1 if unknown */
+ ParseLoc location; /* token location, or -1 if unknown */
} PartitionBoundSpec;
/*
typedef struct SinglePartitionSpec
{
NodeTag type;
-} SinglePartitionSpec;
+} SinglePartitionSpec;
/*
* PartitionCmd - info for ALTER TABLE/INDEX ATTACH/DETACH PARTITION commands
Bitmapset *selectedCols; /* columns needing SELECT permission */
Bitmapset *insertedCols; /* columns needing INSERT permission */
Bitmapset *updatedCols; /* columns needing UPDATE permission */
-} RTEPermissionInfo;
+} RTEPermissionInfo;
/*
* RangeTblFunction -
NodeTag type;
bool is_grant; /* true = GRANT, false = REVOKE */
GrantTargetType targtype; /* type of the grant target */
- ObjectType objtype; /* kind of object being operated on */
+ ObjectType objtype; /* kind of object being operated on */
List *objects; /* list of RangeVar nodes, ObjectWithArgs
* nodes, or plain names (as String values) */
List *privileges; /* list of AccessPriv nodes */
Oid langOid; /* OID of selected language */
bool langIsTrusted; /* trusted property of the language */
bool atomic; /* atomic execution context */
-} InlineCodeBlock;
+} InlineCodeBlock;
/* ----------------------
* CALL statement
NodeTag type;
bool atomic;
-} CallContext;
+} CallContext;
/* ----------------------
* Alter Object Rename Statement
extern List *get_dummy_write_query_tree(void);
extern List *get_dummy_read_query_tree(void);
-extern Node * get_dummy_insert_query_node(void);
+extern Node *get_dummy_insert_query_node(void);
#endif /* PARSER_H */
* the format of pg_class relation.
* ----------------
*/
-typedef FormData_pg_class *Form_pg_class;
+typedef FormData_pg_class * Form_pg_class;
DECLARE_UNIQUE_INDEX_PKEY(pg_class_oid_index, 2662, ClassOidIndexId, pg_class, btree(oid oid_ops));
DECLARE_UNIQUE_INDEX(pg_class_relname_nsp_index, 2663, ClassNameNspIndexId, pg_class, btree(relname name_ops, relnamespace oid_ops));
#define RELKIND_COMPOSITE_TYPE 'c' /* composite type */
#define RELKIND_FOREIGN_TABLE 'f' /* foreign table */
#define RELKIND_PARTITIONED_TABLE 'p' /* partitioned table */
-#define RELKIND_PARTITIONED_INDEX 'I' /* partitioned index */
+#define RELKIND_PARTITIONED_INDEX 'I' /* partitioned index */
#endif /* NOT_USED_IN_PGPOOL */
const List *l2;
int i1; /* current element indexes */
int i2;
-} ForBothCellState;
+} ForBothCellState;
typedef struct ForThreeState
{
const List *l2;
const List *l3;
int i; /* common element index */
-} ForThreeState;
+} ForThreeState;
typedef struct ForFourState
{
const List *l3;
const List *l4;
int i; /* common element index */
-} ForFourState;
+} ForFourState;
typedef struct ForFiveState
{
const List *l4;
const List *l5;
int i; /* common element index */
-} ForFiveState;
+} ForFiveState;
/*
* These routines are small enough, and used often enough, to justify being
* the format of pg_trigger relation.
* ----------------
*/
-typedef FormData_pg_trigger *Form_pg_trigger;
+typedef FormData_pg_trigger * Form_pg_trigger;
DECLARE_TOAST(pg_trigger, 2336, 2337);
uint8 b4_4_lower; /* min/max allowed value for 4th input byte */
uint8 b4_4_upper;
-} pg_mb_radix_tree;
+} pg_mb_radix_tree;
/*
* UTF-8 to local code conversion map (for combined characters)
uint32 utf1; /* UTF-8 code 1 */
uint32 utf2; /* UTF-8 code 2 */
uint32 code; /* local code */
-} pg_utf_to_local_combined;
+} pg_utf_to_local_combined;
/*
* local code to UTF-8 conversion map (for combined characters)
uint32 code; /* local code */
uint32 utf1; /* UTF-8 code 1 */
uint32 utf2; /* UTF-8 code 2 */
-} pg_local_to_utf_combined;
+} pg_local_to_utf_combined;
/*
* callback function for algorithmic encoding conversions (in either direction)
extern int UtfToLocal(const unsigned char *utf, int len,
unsigned char *iso,
- const pg_mb_radix_tree *map,
- const pg_utf_to_local_combined *cmap, int cmapsize,
+ const pg_mb_radix_tree * map,
+ const pg_utf_to_local_combined * cmap, int cmapsize,
utf_local_conversion_func conv_func,
int encoding, bool noError);
extern int LocalToUtf(const unsigned char *iso, int len,
unsigned char *utf,
- const pg_mb_radix_tree *map,
- const pg_local_to_utf_combined *cmap, int cmapsize,
+ const pg_mb_radix_tree * map,
+ const pg_local_to_utf_combined * cmap, int cmapsize,
utf_local_conversion_func conv_func,
int encoding, bool noError);
const unsigned char *tab, bool noError);
#ifdef WIN32
-extern WCHAR *pgwin32_message_to_UTF16(const char *str, int len, int *utf16len);
+extern WCHAR * pgwin32_message_to_UTF16(const char *str, int len, int *utf16len);
#endif
#endif /* PG_WCHAR_H */
* for portability. Don't use "offsetof(struct s, f[0])", as this doesn't
* work with MSVC and with C++ compilers.
*/
-#define FLEXIBLE_ARRAY_MEMBER /* empty */
+#define FLEXIBLE_ARRAY_MEMBER /* empty */
#endif /* POOL_PARSER_H */
/* Entry points in parser/scan.l */
extern core_yyscan_t scanner_init(const char *str,
- int slen,
+ int slen,
core_yy_extra_type *yyext,
const ScanKeywordList *keywordlist,
const uint16 *keyword_tokens);
extern void scanner_finish(core_yyscan_t yyscanner);
-extern int core_yylex(core_YYSTYPE *yylval_param, YYLTYPE *yylloc_param,
+extern int core_yylex(core_YYSTYPE *yylval_param, YYLTYPE * yylloc_param,
core_yyscan_t yyscanner);
extern int scanner_errposition(int location, core_yyscan_t yyscanner);
extern void setup_scanner_errposition_callback(ScannerCallbackState *scbstate,
CON_CONNECT_WAIT, /* waiting for connection starting */
CON_UP, /* up and running */
CON_DOWN /* down, disconnected */
-} BACKEND_STATUS;
+} BACKEND_STATUS;
/* backend status name strings */
#define BACKEND_STATUS_CON_UNUSED "unused"
typedef struct
{
BACKEND_STATUS status[MAX_NUM_BACKENDS];
-} BackendStatusRecord;
+} BackendStatusRecord;
typedef enum
{
ROLE_REPLICA,
ROLE_PRIMARY,
ROLE_STANDBY
-} SERVER_ROLE;
+} SERVER_ROLE;
/*
* PostgreSQL backend descriptor. Placed on shared memory area.
char backend_hostname[MAX_DB_HOST_NAMELEN]; /* backend host name */
int backend_port; /* backend port numbers */
BACKEND_STATUS backend_status; /* backend status */
- char pg_backend_status[NAMEDATALEN]; /* backend status examined by show pool_nodes and pcp_node_info*/
+ char pg_backend_status[NAMEDATALEN]; /* backend status examined by
+ * show pool_nodes and
+ * pcp_node_info */
time_t status_changed_time; /* backend status changed time */
double backend_weight; /* normalized backend load balance ratio */
double unnormalized_weight; /* described parameter */
char backend_data_directory[MAX_PATH_LENGTH];
- char backend_application_name[NAMEDATALEN]; /* application_name for walreceiver */
+ char backend_application_name[NAMEDATALEN]; /* application_name for
+ * walreceiver */
unsigned short flag; /* various flags */
bool quarantine; /* true if node is CON_DOWN because of
* quarantine */
uint64 standby_delay; /* The replication delay against the primary */
- bool standby_delay_by_time; /* true if standby_delay is measured in microseconds, not bytes */
+ bool standby_delay_by_time; /* true if standby_delay is measured
+ * in microseconds, not bytes */
SERVER_ROLE role; /* Role of server. used by pcp_node_info and
* failover() to keep track of quarantined
* primary node */
- char pg_role[NAMEDATALEN]; /* backend role examined by show pool_nodes and pcp_node_info*/
- char replication_state [NAMEDATALEN]; /* "state" from pg_stat_replication */
- char replication_sync_state [NAMEDATALEN]; /* "sync_state" from pg_stat_replication */
-} BackendInfo;
+ char pg_role[NAMEDATALEN]; /* backend role examined by show
+ * pool_nodes and pcp_node_info */
+ char replication_state[NAMEDATALEN]; /* "state" from
+ * pg_stat_replication */
+ char replication_sync_state[NAMEDATALEN]; /* "sync_state" from
+ * pg_stat_replication */
+} BackendInfo;
typedef struct
{
* reloading pgpool.conf. */
BackendInfo backend_info[MAX_NUM_BACKENDS];
-} BackendDesc;
+} BackendDesc;
typedef enum
{
IDLE,
IDLE_IN_TRANS,
CONNECTING
-} ProcessStatus;
+} ProcessStatus;
/*
* mamimum cancel key length
int major; /* protocol major version */
int minor; /* protocol minor version */
int pid; /* backend process id */
- char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
+ char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
int32 keylen; /* cancel key length */
int counter; /* used counter */
time_t create_time; /* connection creation time */
- time_t client_connection_time; /* client connection time */
+ time_t client_connection_time; /* client connection time */
time_t client_disconnection_time; /* client last disconnection time */
int client_idle_duration; /* client idle duration time (s) */
int load_balancing_node; /* load balancing node */
* it should not be treated as a backend node failure. This flag is used
* to handle pg_terminate_backend()
*/
-} ConnectionInfo;
+} ConnectionInfo;
/*
* process information
{
pid_t pid; /* OS's process id */
time_t start_time; /* fork() time */
- char connected; /* if not 0 this process is already used*/
+ char connected; /* if not 0 this process is already used */
int wait_for_connect; /* waiting time for client connection (s) */
ConnectionInfo *connection_info; /* head of the connection info for
* this process */
- int client_connection_count; /* how many times clients used this process */
- ProcessStatus status;
- char client_host[NI_MAXHOST]; /* client host. Only valid if status != WAIT_FOR_CONNECT */
- char client_port[NI_MAXSERV]; /* client port. Only valid if status != WAIT_FOR_CONNECT */
- char statement[MAXSTMTLEN]; /* the last statement sent to backend */
- uint64 node_ids[2]; /* "statement" is sent to the node id (bitmap) */
+ int client_connection_count; /* how many times clients used
+ * this process */
+ ProcessStatus status;
+ char client_host[NI_MAXHOST]; /* client host. Only valid if
+ * status != WAIT_FOR_CONNECT */
+ char client_port[NI_MAXSERV]; /* client port. Only valid if
+ * status != WAIT_FOR_CONNECT */
+ char statement[MAXSTMTLEN]; /* the last statement sent to backend */
+ uint64 node_ids[2]; /* "statement" is sent to the node id (bitmap) */
bool need_to_restart; /* If non 0, exit this child process as
* soon as current session ends. Typical
* case this flag being set is failback a
* node in streaming replication mode. */
bool exit_if_idle;
- int pooled_connections; /* Total number of pooled connections
- * by this child */
-} ProcessInfo;
+ int pooled_connections; /* Total number of pooled connections by
+ * this child */
+} ProcessInfo;
/*
* reporting types
char name[POOLCONFIG_MAXNAMELEN + 1];
char value[POOLCONFIG_MAXVALLEN + 1];
char desc[POOLCONFIG_MAXDESCLEN + 1];
-} POOL_REPORT_CONFIG;
+} POOL_REPORT_CONFIG;
/* nodes report struct */
typedef struct
char rep_state[POOLCONFIG_MAXWEIGHTLEN + 1];
char rep_sync_state[POOLCONFIG_MAXWEIGHTLEN + 1];
char last_status_change[POOLCONFIG_MAXDATELEN];
-} POOL_REPORT_NODES;
+} POOL_REPORT_NODES;
/* processes report struct */
typedef struct
char backend_connection_time[POOLCONFIG_MAXDATELEN + 1];
char pool_counter[POOLCONFIG_MAXCOUNTLEN + 1];
char status[POOLCONFIG_MAXPROCESSSTATUSLEN + 1];
-} POOL_REPORT_PROCESSES;
+} POOL_REPORT_PROCESSES;
/* pools reporting struct */
typedef struct
char client_host[NI_MAXHOST];
char client_port[NI_MAXSERV];
char statement[MAXSTMTLEN];
-} POOL_REPORT_POOLS;
+} POOL_REPORT_POOLS;
/* version struct */
typedef struct
{
char version[POOLCONFIG_MAXVALLEN + 1];
-} POOL_REPORT_VERSION;
+} POOL_REPORT_VERSION;
/* health check statistics report struct */
typedef struct
char status[POOLCONFIG_MAXSTATLEN + 1];
char role[POOLCONFIG_MAXWEIGHTLEN + 1];
char last_status_change[POOLCONFIG_MAXDATELEN];
- char total_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char success_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char fail_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char skip_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char retry_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char average_retry_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char max_retry_count[POOLCONFIG_MAXCOUNTLEN+1];
- char max_health_check_duration[POOLCONFIG_MAXCOUNTLEN+1];
- char min_health_check_duration[POOLCONFIG_MAXCOUNTLEN+1];
- char average_health_check_duration[POOLCONFIG_MAXLONGCOUNTLEN+1];
+ char total_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char success_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char fail_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char skip_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char retry_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char average_retry_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char max_retry_count[POOLCONFIG_MAXCOUNTLEN + 1];
+ char max_health_check_duration[POOLCONFIG_MAXCOUNTLEN + 1];
+ char min_health_check_duration[POOLCONFIG_MAXCOUNTLEN + 1];
+ char average_health_check_duration[POOLCONFIG_MAXLONGCOUNTLEN + 1];
char last_health_check[POOLCONFIG_MAXDATELEN];
char last_successful_health_check[POOLCONFIG_MAXDATELEN];
char last_skip_health_check[POOLCONFIG_MAXDATELEN];
char last_failed_health_check[POOLCONFIG_MAXDATELEN];
-} POOL_HEALTH_CHECK_STATS;
+} POOL_HEALTH_CHECK_STATS;
/* show backend statistics report struct */
typedef struct
char update_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
char delete_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
char ddl_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
- char other_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
- char panic_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
- char fatal_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
- char error_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
-} POOL_BACKEND_STATS;
+ char other_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
+ char panic_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
+ char fatal_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
+ char error_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
+} POOL_BACKEND_STATS;
typedef enum
{
extern PCPResultInfo * pcp_health_check_stats(PCPConnInfo * pcpCon, int nid);
extern PCPResultInfo * pcp_process_count(PCPConnInfo * pcpConn);
extern PCPResultInfo * pcp_process_info(PCPConnInfo * pcpConn, int pid);
-extern PCPResultInfo * pcp_reload_config(PCPConnInfo * pcpConn,char command_scope);
-extern PCPResultInfo * pcp_log_rotate(PCPConnInfo * pcpConn,char command_scope);
+extern PCPResultInfo * pcp_reload_config(PCPConnInfo * pcpConn, char command_scope);
+extern PCPResultInfo * pcp_log_rotate(PCPConnInfo * pcpConn, char command_scope);
extern PCPResultInfo * pcp_invalidate_query_cache(PCPConnInfo * pcpConn);
extern PCPResultInfo * pcp_detach_node(PCPConnInfo * pcpConn, int nid);
extern char *role_to_str(SERVER_ROLE role);
-extern int * pool_health_check_stats_offsets(int *n);
-extern int * pool_report_pools_offsets(int *n);
+extern int *pool_health_check_stats_offsets(int *n);
+extern int *pool_report_pools_offsets(int *n);
/* ------------------------------
* pcp_error.c
char nodeName[WD_MAX_HOST_NAMELEN];
char hostName[WD_MAX_HOST_NAMELEN]; /* host name */
char stateName[WD_MAX_HOST_NAMELEN]; /* state name */
- char membership_status_string[WD_MAX_HOST_NAMELEN]; /* membership status of this node */
+ char membership_status_string[WD_MAX_HOST_NAMELEN]; /* membership status of
+ * this node */
int wd_port; /* watchdog port */
int wd_priority; /* node priority in leader election */
int pgpool_port; /* pgpool port */
int po; /* pending data offset */
int bufsz; /* pending data buffer size */
int len; /* pending data length */
-} PCP_CONNECTION;
+} PCP_CONNECTION;
-extern PCP_CONNECTION * pcp_open(int fd);
-extern void pcp_close(PCP_CONNECTION * pc);
-extern int pcp_read(PCP_CONNECTION * pc, void *buf, int len);
-extern int pcp_write(PCP_CONNECTION * pc, void *buf, int len);
-extern int pcp_flush(PCP_CONNECTION * pc);
+extern PCP_CONNECTION *pcp_open(int fd);
+extern void pcp_close(PCP_CONNECTION *pc);
+extern int pcp_read(PCP_CONNECTION *pc, void *buf, int len);
+extern int pcp_write(PCP_CONNECTION *pc, void *buf, int len);
+extern int pcp_flush(PCP_CONNECTION *pc);
#define UNIX_DOMAIN_PATH "/tmp"
extern bool pcp_mark_recovery_in_progress(void);
-#endif /* pcp_worker_h */
+#endif /* pcp_worker_h */
extern void start_recovery(int recovery_node);
extern void finish_recovery(void);
-extern int wait_connection_closed(void);
-extern int ensure_conn_counter_validity(void);
+extern int wait_connection_closed(void);
+extern int ensure_conn_counter_validity(void);
-#endif /* recovery_h */
+#endif /* recovery_h */
POOL_ERROR,
POOL_FATAL,
POOL_DEADLOCK
-} POOL_STATUS;
+} POOL_STATUS;
typedef enum
{
POOL_SOCKET_VALID,
POOL_SOCKET_ERROR,
POOL_SOCKET_EOF
-} POOL_SOCKET_STATE;
+} POOL_SOCKET_STATE;
/*
* Imported from src/include/libpq/pqcomm.h as of PostgreSQL 18.
char options[SM_OPTIONS]; /* Optional additional args */
char unused[SM_UNUSED]; /* Unused */
char tty[SM_TTY]; /* Tty for debug output */
-} StartupPacket_v2;
+} StartupPacket_v2;
/* startup packet info */
typedef struct
{
int protoVersion; /* Protocol version */
int pid; /* backend process id */
- char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
-} CancelPacket;
+ char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
+} CancelPacket;
#define MAX_PASSWORD_SIZE 1024
PasswordMapping *passwordMapping;
ConnectionInfo *con_info; /* shared memory coninfo used for handling the
* query containing pg_terminate_backend */
-} POOL_CONNECTION;
+} POOL_CONNECTION;
/*
* connection pool structure
{
StartupPacket *sp; /* startup packet info */
int pid; /* backend pid */
- char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
+ char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
+
/*
- * Cancel key length. In protocol version 3.0, it is 4.
- * In 3.2 or later, the maximum length is 256.
+ * Cancel key length. In protocol version 3.0, it is 4. In 3.2 or later,
+ * the maximum length is 256.
*/
int32 keylen;
POOL_CONNECTION *con;
time_t closetime; /* absolute time in second when the connection
* closed if 0, that means the connection is
* under use. */
+
/*
* Protocol version after negotiation. If nplen == 0, no negotiation has
* been done.
*/
int negotiated_major;
int negotiated_minor;
- char *negotiateProtocolMsg; /* Raw NegotiateProtocol messag */
+ char *negotiateProtocolMsg; /* Raw NegotiateProtocol messag */
int32 nplen; /* message length of NegotiateProtocol messag */
-} POOL_CONNECTION_POOL_SLOT;
+} POOL_CONNECTION_POOL_SLOT;
typedef struct
{
*/
ConnectionInfo *info;
POOL_CONNECTION_POOL_SLOT *slots[MAX_NUM_BACKENDS];
-} POOL_CONNECTION_POOL;
+} POOL_CONNECTION_POOL;
/* Defined in pool_session_context.h */
extern bool pool_is_node_to_be_sent_in_current_query(int node_id);
extern int pool_virtual_main_db_node_id(void);
-extern BACKEND_STATUS * my_backend_status[];
+extern BACKEND_STATUS *my_backend_status[];
extern int my_main_node_id;
#define VALID_BACKEND(backend_id) \
POOL_NODE_STATUS_PRIMARY, /* primary node */
POOL_NODE_STATUS_STANDBY, /* standby node */
POOL_NODE_STATUS_INVALID /* invalid node (split brain, stand alone) */
-} POOL_NODE_STATUS;
+} POOL_NODE_STATUS;
/* Clustering mode macros */
#define REPLICATION (pool_config->backend_clustering_mode == CM_NATIVE_REPLICATION || \
#define ACCEPT_FD_SEM 4
#define SI_CRITICAL_REGION_SEM 5
#define FOLLOW_PRIMARY_SEM 6
-#define MAIN_EXIT_HANDLER_SEM 7 /* used in exit_hander in pgpool main process */
+#define MAIN_EXIT_HANDLER_SEM 7 /* used in exit_hander in pgpool main
+ * process */
#define MAX_REQUEST_QUEUE_SIZE 10
-#define MAX_SEC_WAIT_FOR_CLUSTER_TRANSACTION 10 /* time in seconds to keep
+#define MAX_SEC_WAIT_FOR_CLUSTER_TRANSACTION 10 /* time in seconds to keep
* retrying for a watchdog
* command if the cluster is
* not in stable state */
CLOSE_IDLE_REQUEST,
PROMOTE_NODE_REQUEST,
NODE_QUARANTINE_REQUEST
-} POOL_REQUEST_KIND;
+} POOL_REQUEST_KIND;
#define REQ_DETAIL_SWITCHOVER 0x00000001 /* failover due to switch over */
#define REQ_DETAIL_WATCHDOG 0x00000002 /* failover req from watchdog */
* require majority vote */
#define REQ_DETAIL_UPDATE 0x00000008 /* failover req is just an update
* node status request */
-#define REQ_DETAIL_PROMOTE 0x00000010 /* failover req is actually promoting the specified standby node.
- * current primary will be detached */
+#define REQ_DETAIL_PROMOTE 0x00000010 /* failover req is actually
+ * promoting the specified standby
+ * node. current primary will be
+ * detached */
typedef struct
{
unsigned char request_details; /* option flags kind */
int node_id[MAX_NUM_BACKENDS]; /* request node id */
int count; /* request node ids count */
-} POOL_REQUEST_NODE;
+} POOL_REQUEST_NODE;
typedef struct
{
POOL_REQUEST_NODE request[MAX_REQUEST_QUEUE_SIZE];
int request_queue_head;
int request_queue_tail;
- int main_node_id; /* the youngest node id which is not in down
+ int main_node_id; /* the youngest node id which is not in down
* status */
int primary_node_id; /* the primary node id in streaming
* replication mode */
- int conn_counter; /* number of connections from clients to pgpool */
+ int conn_counter; /* number of connections from clients to
+ * pgpool */
bool switching; /* it true, failover or failback is in
* progress */
- /* greater than 0 if follow primary command or detach_false_primary in
- * execution */
+
+ /*
+ * greater than 0 if follow primary command or detach_false_primary in
+ * execution
+ */
bool follow_primary_count;
- bool follow_primary_lock_pending; /* watchdog process can't wait
- * for follow_primary lock acquisition
- * in case it is held at the time of
- * request.
- * This flag indicates that lock was requested
- * by watchdog coordinator and next contender should
- * wait for the coordinator to release the lock
- */
- bool follow_primary_lock_held_remotely; /* true when lock is held by
- watchdog coordinator*/
- bool follow_primary_ongoing; /* true if follow primary command is ongoing */
- bool query_cache_invalidate_request; /* true if pcp_invalidate_query_cache requested */
-} POOL_REQUEST_INFO;
+ bool follow_primary_lock_pending; /* watchdog process can't wait
+ * for follow_primary lock
+ * acquisition in case it is
+ * held at the time of
+ * request. This flag
+ * indicates that lock was
+ * requested by watchdog
+ * coordinator and next
+ * contender should wait for
+ * the coordinator to release
+ * the lock */
+ bool follow_primary_lock_held_remotely; /* true when lock is held
+ * by watchdog coordinator */
+ bool follow_primary_ongoing; /* true if follow primary command is
+ * ongoing */
+ bool query_cache_invalidate_request; /* true if
+ * pcp_invalidate_query_cache
+ * requested */
+} POOL_REQUEST_INFO;
/* description of row. corresponding to RowDescription message */
typedef struct
int typeoid; /* data type oid */
int size; /* data length minus means variable data type */
int mod; /* data type modifier */
-} AttrInfo;
+} AttrInfo;
typedef struct
{
int num_attrs; /* number of attributes */
AttrInfo *attrinfo;
-} RowDesc;
+} RowDesc;
typedef struct
{
* excluding termination null */
char **data; /* actual row character data terminated with
* null */
-} POOL_SELECT_RESULT;
+} POOL_SELECT_RESULT;
/*
* recovery mode
PT_PCP_WORKER,
PT_HEALTH_CHECK,
PT_LOGGER,
- PT_LAST_PTYPE /* last ptype marker. any ptype must be above this. */
-} ProcessType;
+ PT_LAST_PTYPE /* last ptype marker. any ptype must be above
+ * this. */
+} ProcessType;
typedef enum
BACKEND_CONNECTING,
PROCESSING,
EXITING
-} ProcessState;
+} ProcessState;
/*
* Snapshot isolation manage area in shared memory
*/
typedef struct
{
- uint32 commit_counter; /* number of committing children */
+ uint32 commit_counter; /* number of committing children */
uint32 snapshot_counter; /* number of snapshot acquiring children */
- pid_t *snapshot_waiting_children; /* array size is num_init_children */
- pid_t *commit_waiting_children; /* array size is num_init_children */
+ pid_t *snapshot_waiting_children; /* array size is num_init_children */
+ pid_t *commit_waiting_children; /* array size is num_init_children */
} SI_ManageInfo;
/*
extern pid_t myProcPid; /* process pid */
extern ProcessType processType;
extern ProcessState processState;
-extern bool reset_query_error; /* true if error occurs in reset queries */
+extern bool reset_query_error; /* true if error occurs in reset queries */
extern void set_application_name(ProcessType ptype);
extern void set_application_name_with_string(char *string);
extern void set_application_name_with_suffix(ProcessType ptype, int suffix);
extern char *get_application_name(void);
extern char *get_application_name_for_process(ProcessType ptype);
-void SetProcessGlobalVariables(ProcessType pType);
+void SetProcessGlobalVariables(ProcessType pType);
extern volatile SI_ManageInfo *si_manage_info;
extern volatile sig_atomic_t sigusr2_received;
extern volatile sig_atomic_t health_check_timer_expired; /* non 0 if health check
* timer expired */
extern int my_proc_id; /* process table id (!= UNIX's PID) */
-extern ProcessInfo * process_info; /* shmem process information table */
-extern ConnectionInfo * con_info; /* shmem connection info table */
-extern POOL_REQUEST_INFO * Req_info;
+extern ProcessInfo *process_info; /* shmem process information table */
+extern ConnectionInfo *con_info; /* shmem connection info table */
+extern POOL_REQUEST_INFO *Req_info;
extern volatile sig_atomic_t *InRecovery;
extern volatile sig_atomic_t got_sighup;
extern volatile sig_atomic_t exit_request;
extern void do_child(int *fds);
extern void child_exit(int code);
-extern void cancel_request(CancelPacket * sp, int32 len);
+extern void cancel_request(CancelPacket *sp, int32 len);
extern void check_stop_request(void);
extern void pool_initialize_private_backend_status(void);
extern int send_to_pg_frontend(char *data, int len, bool flush);
extern void *pool_shared_memory_create(size_t size);
extern void pool_shmem_exit(int code);
extern void initialize_shared_memory_main_segment(size_t size);
-extern void * pool_shared_memory_segment_get_chunk(size_t size);
+extern void *pool_shared_memory_segment_get_chunk(size_t size);
/* pgpool_main.c*/
-extern BackendInfo * pool_get_node_info(int node_number);
+extern BackendInfo *pool_get_node_info(int node_number);
extern int pool_get_node_count(void);
extern int *pool_get_process_list(int *array_size);
-extern ProcessInfo * pool_get_process_info(pid_t pid);
+extern ProcessInfo *pool_get_process_info(pid_t pid);
extern void pool_sleep(unsigned int second);
extern int PgpoolMain(bool discard_status, bool clear_memcache_oidmaps);
extern int pool_send_to_frontend(char *data, int len, bool flush);
extern int pool_frontend_exists(void);
extern pid_t pool_waitpid(int *status);
extern int write_status_file(void);
-extern POOL_NODE_STATUS * verify_backend_node_status(POOL_CONNECTION_POOL_SLOT * *slots);
-extern POOL_NODE_STATUS * pool_get_node_status(void);
+extern POOL_NODE_STATUS *verify_backend_node_status(POOL_CONNECTION_POOL_SLOT **slots);
+extern POOL_NODE_STATUS *pool_get_node_status(void);
extern void pool_set_backend_status_changed_time(int backend_id);
extern int get_next_main_node(void);
extern bool pool_acquire_follow_primary_lock(bool block, bool remote_reques);
/* pool_worker_child.c */
extern void do_worker_child(void);
-extern int get_query_result(POOL_CONNECTION_POOL_SLOT * *slots, int backend_id, char *query, POOL_SELECT_RESULT * *res);
+extern int get_query_result(POOL_CONNECTION_POOL_SLOT **slots, int backend_id, char *query, POOL_SELECT_RESULT **res);
/* utils/pg_strong_random.c */
-void pg_strong_random_init(void);
-bool pg_strong_random(void *buf, size_t len);
+void pg_strong_random_init(void);
+bool pg_strong_random(void *buf, size_t len);
#endif /* POOL_H */
int type;
int flag;
regex_t regexv;
-} RegPattern;
+} RegPattern;
typedef enum ProcessManagementModes
{
PM_STATIC = 1,
PM_DYNAMIC
-} ProcessManagementModes;
+} ProcessManagementModes;
typedef enum ProcessManagementSstrategies
{
PM_STRATEGY_AGGRESSIVE = 1,
PM_STRATEGY_GENTLE,
PM_STRATEGY_LAZY
-} ProcessManagementSstrategies;
+} ProcessManagementSstrategies;
typedef enum NativeReplicationSubModes
{
CM_SLONY,
CM_RAW,
CM_SNAPSHOT_ISOLATION
-} ClusteringModes;
+} ClusteringModes;
typedef enum LogStandbyDelayModes
{
LSD_ALWAYS = 1,
LSD_OVER_THRESHOLD,
LSD_NONE
-} LogStandbyDelayModes;
+} LogStandbyDelayModes;
typedef enum MemCacheMethod
{
SHMEM_CACHE = 1,
MEMCACHED_CACHE
-} MemCacheMethod;
+} MemCacheMethod;
typedef enum WdLifeCheckMethod
{
LIFECHECK_BY_QUERY = 1,
LIFECHECK_BY_HB,
LIFECHECK_BY_EXTERNAL
-} WdLifeCheckMethod;
+} WdLifeCheckMethod;
typedef enum DLBOW_OPTION
{
DLBOW_TRANS_TRANSACTION,
DLBOW_ALWAYS,
DLBOW_DML_ADAPTIVE
-} DLBOW_OPTION;
+} DLBOW_OPTION;
typedef enum RELQTARGET_OPTION
{
RELQTARGET_PRIMARY = 1,
RELQTARGET_LOAD_BALANCE_NODE
-} RELQTARGET_OPTION;
+} RELQTARGET_OPTION;
typedef enum CHECK_TEMP_TABLE_OPTION
{
CHECK_TEMP_NONE,
CHECK_TEMP_ON,
CHECK_TEMP_OFF,
-} CHECK_TEMP_TABLE_OPTION;
+} CHECK_TEMP_TABLE_OPTION;
/* log_backend_messages */
typedef enum BGMSG_OPTION
char hostname[WD_MAX_HOST_NAMELEN]; /* host name */
int pgpool_port; /* pgpool port */
int wd_port; /* watchdog port */
-} WdNodeInfo;
+} WdNodeInfo;
typedef struct WdNodesConfig
{
int num_wd; /* number of watchdogs */
- WdNodeInfo wd_node_info[MAX_WATCHDOG_NUM];
-} WdNodesConfig;
+ WdNodeInfo wd_node_info[MAX_WATCHDOG_NUM];
+} WdNodesConfig;
typedef struct
char addr[WD_MAX_HOST_NAMELEN];
char if_name[WD_MAX_IF_NAME_LEN];
int dest_port;
-} WdHbIf;
+} WdHbIf;
#define WD_INFO(wd_id) (pool_config->wd_nodes.wd_node_info[(wd_id)])
#define WD_HB_IF(if_id) (pool_config->hb_dest_if[(if_id)])
* retries */
int connect_timeout; /* timeout value before giving up
* connecting to backend */
-} HealthCheckParams;
+} HealthCheckParams;
/*
* For dml adaptive object relations
OBJECT_TYPE_FUNCTION,
OBJECT_TYPE_RELATION,
OBJECT_TYPE_UNKNOWN
-} DBObjectTypes;
+} DBObjectTypes;
typedef struct
{
- char *name;
+ char *name;
DBObjectTypes object_type;
-} DBObject;
+} DBObject;
typedef struct
{
DBObject left_token;
DBObject right_token;
-} DBObjectRelation;
+} DBObjectRelation;
/*
* configuration parameters
*/
typedef struct
{
- ClusteringModes backend_clustering_mode; /* Backend clustering mode */
- ProcessManagementModes process_management;
+ ClusteringModes backend_clustering_mode; /* Backend clustering mode */
+ ProcessManagementModes process_management;
ProcessManagementSstrategies process_management_strategy;
- char **listen_addresses; /* hostnames/IP addresses to listen on */
+ char **listen_addresses; /* hostnames/IP addresses to listen on */
int port; /* port # to bind */
- char **pcp_listen_addresses; /* PCP listen address to listen on */
+ char **pcp_listen_addresses; /* PCP listen address to listen on */
int pcp_port; /* PCP port # to bind */
- char **unix_socket_directories; /* pgpool socket directories */
- char *unix_socket_group; /* owner group of pgpool sockets */
+ char **unix_socket_directories; /* pgpool socket directories */
+ char *unix_socket_group; /* owner group of pgpool sockets */
int unix_socket_permissions; /* pgpool sockets permissions */
char *wd_ipc_socket_dir; /* watchdog command IPC socket directory */
- char **pcp_socket_dir; /* PCP socket directory */
- int num_init_children; /* Maximum number of child to
- * accept connections */
- int min_spare_children; /* Minimum number of idle children */
- int max_spare_children; /* Minimum number of idle children */
+ char **pcp_socket_dir; /* PCP socket directory */
+ int num_init_children; /* Maximum number of child to accept
+ * connections */
+ int min_spare_children; /* Minimum number of idle children */
+ int max_spare_children; /* Minimum number of idle children */
int listen_backlog_multiplier; /* determines the size of the
* connection queue */
int reserved_connections; /* # of reserved connections */
char *pid_file_name; /* pid file name */
bool replication_mode; /* replication mode */
bool log_connections; /* logs incoming connections */
- bool log_disconnections; /* logs closing connections */
+ bool log_disconnections; /* logs closing connections */
bool log_pcp_processes; /* logs pcp processes */
bool log_hostname; /* resolve hostname */
bool enable_pool_hba; /* enables pool_hba.conf file
* false, just abort the
* transaction to keep
* the consistency.\ 1 */
- bool auto_failback; /* If true, backend node reattach,
- * when backend node detached and
+ bool auto_failback; /* If true, backend node reattach, when
+ * backend node detached and
* replication_status is 'stream' */
- int auto_failback_interval; /* min interval of executing auto_failback */
+ int auto_failback_interval; /* min interval of executing
+ * auto_failback */
bool replicate_select; /* replicate SELECT statement when load
* balancing is disabled. */
char **reset_query_list; /* comma separated list of queries to be
* issued at the end of session */
char **read_only_function_list; /* list of functions with no side
- * effects */
+ * effects */
char **write_function_list; /* list of functions with side effects */
- char **primary_routing_query_pattern_list; /* list of query patterns that
- * should be sent to primary node */
+ char **primary_routing_query_pattern_list; /* list of query patterns
+ * that should be sent to
+ * primary node */
char *log_line_prefix; /* printf-style string to output at
* beginning of each log line */
int log_error_verbosity; /* controls how much detail about
bool logging_collector;
int log_rotation_age;
int log_rotation_size;
- char *log_directory;
- char *log_filename;
+ char *log_directory;
+ char *log_filename;
bool log_truncate_on_rotation;
int log_file_mode;
* greater than 0 to enable the
* functionality. */
- int delay_threshold_by_time; /* If the standby server delays more than
- * delay_threshold_in_time, any query goes to the
- * primary only. The unit is in seconds. 0
- * disables the check. Default is 0.
- * If delay_threshold_in_time is greater than 0,
- * delay_threshold will be ignored.
- * Note that health_check_period required to be
- * greater than 0 to enable the
- * functionality. */
+ int delay_threshold_by_time; /* If the standby server delays
+ * more than
+ * delay_threshold_in_time, any
+ * query goes to the primary only.
+ * The unit is in seconds. 0
+ * disables the check. Default is
+ * 0. If delay_threshold_in_time
+ * is greater than 0,
+ * delay_threshold will be
+ * ignored. Note that
+ * health_check_period required to
+ * be greater than 0 to enable the
+ * functionality. */
bool prefer_lower_delay_standby;
char *sr_check_database; /* PostgreSQL database name for streaming
* replication check */
char *failover_command; /* execute command when failover happens */
- char *follow_primary_command; /* execute command when failover is
+ char *follow_primary_command; /* execute command when failover is
* ended */
char *failback_command; /* execute command when failback happens */
- bool failover_on_backend_error; /* If true, trigger fail over when
+ bool failover_on_backend_error; /* If true, trigger fail over when
* writing to the backend
* communication socket fails.
* This is the same behavior of
* set to false, pgpool will
* report an error and disconnect
* the session. */
- bool failover_on_backend_shutdown; /* If true, trigger fail over
- when backend is going down */
+ bool failover_on_backend_shutdown; /* If true, trigger fail over
+ * when backend is going down */
bool detach_false_primary; /* If true, detach false primary */
char *recovery_user; /* PostgreSQL user name for online recovery */
char *recovery_password; /* PostgreSQL user password for online
bool log_statement; /* logs all SQL statements */
bool log_per_node_statement; /* logs per node detailed SQL
* statements */
- bool notice_per_node_statement; /* logs notice message for per node detailed SQL
- * statements */
+ bool notice_per_node_statement; /* logs notice message for per
+ * node detailed SQL statements */
bool log_client_messages; /* If true, logs any client messages */
int log_backend_messages; /* logs any backend messages */
char *lobj_lock_table; /* table name to lock for rewriting
/* followings till syslog, does not exist in the configuration file */
int num_reset_queries; /* number of queries in reset_query_list */
- int num_listen_addresses; /* number of entries in listen_addresses */
- int num_pcp_listen_addresses; /* number of entries in pcp_listen_addresses */
- int num_unix_socket_directories; /* number of entries in unix_socket_directories */
- int num_pcp_socket_directories; /* number of entries in pcp_socket_dir */
+ int num_listen_addresses; /* number of entries in
+ * listen_addresses */
+ int num_pcp_listen_addresses; /* number of entries in
+ * pcp_listen_addresses */
+ int num_unix_socket_directories; /* number of entries in
+ * unix_socket_directories */
+ int num_pcp_socket_directories; /* number of entries in
+ * pcp_socket_dir */
int num_read_only_function_list; /* number of functions in
- * read_only_function_list */
+ * read_only_function_list */
int num_write_function_list; /* number of functions in
* write_function_list */
- int num_cache_safe_memqcache_table_list; /* number of functions in
- * cache_safe_memqcache_table_list */
- int num_cache_unsafe_memqcache_table_list; /* number of functions in
- * cache_unsafe_memqcache_table_list */
- int num_primary_routing_query_pattern_list; /* number of query patterns in
- * primary_routing_query_pattern_list */
+ int num_cache_safe_memqcache_table_list; /* number of functions
+ * in
+ * cache_safe_memqcache_table_list */
+ int num_cache_unsafe_memqcache_table_list; /* number of functions
+ * in
+ * cache_unsafe_memqcache_table_list */
+ int num_primary_routing_query_pattern_list; /* number of query
+ * patterns in
+ * primary_routing_query_pattern_list */
int num_wd_monitoring_interfaces_list; /* number of items in
* wd_monitoring_interfaces_list */
/* ssl configuration */
char *ssl_ca_cert; /* path to root (CA) certificate */
char *ssl_ca_cert_dir; /* path to directory containing CA
* certificates */
- char *ssl_crl_file; /* path to the SSL certificate revocation list file */
+ char *ssl_crl_file; /* path to the SSL certificate revocation list
+ * file */
char *ssl_ciphers; /* allowed ssl ciphers */
- bool ssl_prefer_server_ciphers; /*Use SSL cipher preferences, rather than the client's*/
+ bool ssl_prefer_server_ciphers; /* Use SSL cipher preferences,
+ * rather than the client's */
char *ssl_ecdh_curve; /* the curve to use in ECDH key exchange */
- char *ssl_dh_params_file; /* path to the Diffie-Hellman parameters contained file */
- char *ssl_passphrase_command; /* path to the Diffie-Hellman parameters contained file */
+ char *ssl_dh_params_file; /* path to the Diffie-Hellman parameters
+ * contained file */
+ char *ssl_passphrase_command; /* path to the Diffie-Hellman
+ * parameters contained file */
int64 relcache_expire; /* relation cache life time in seconds */
int relcache_size; /* number of relation cache life entry */
- CHECK_TEMP_TABLE_OPTION check_temp_table; /* how to check temporary table */
+ CHECK_TEMP_TABLE_OPTION check_temp_table; /* how to check temporary
+ * table */
bool check_unlogged_table; /* enable unlogged table check */
- bool enable_shared_relcache; /* If true, relation cache stored in memory cache */
- RELQTARGET_OPTION relcache_query_target; /* target node to send relcache queries */
+ bool enable_shared_relcache; /* If true, relation cache stored in
+ * memory cache */
+ RELQTARGET_OPTION relcache_query_target; /* target node to send
+ * relcache queries */
/*
* followings are for regex support and do not exist in the configuration
* file
*/
- RegPattern *lists_patterns; /* Precompiled regex patterns for write/readonly
- * lists */
+ RegPattern *lists_patterns; /* Precompiled regex patterns for
+ * write/readonly lists */
int pattc; /* number of regexp pattern */
int current_pattern_size; /* size of the regex pattern array */
* by default */
char *memqcache_oiddir; /* Temporary work directory to record
* table oids */
- char **cache_safe_memqcache_table_list; /* list of tables to memqcache */
- char **cache_unsafe_memqcache_table_list; /* list of tables not to memqcache */
+ char **cache_safe_memqcache_table_list; /* list of tables to
+ * memqcache */
+ char **cache_unsafe_memqcache_table_list; /* list of tables not to
+ * memqcache */
RegPattern *lists_memqcache_table_patterns; /* Precompiled regex patterns
* for cache safe/unsafe lists */
* user_redirect_preference_list =
* 'postgres:primary,user[0-4]:1,user[5-9]:2'
*/
- char *user_redirect_preference_list; /* raw string in
- * pgpool.conf */
- RegArray *redirect_usernames; /* Precompiled regex patterns for db
+ char *user_redirect_preference_list; /* raw string in pgpool.conf */
+ RegArray *redirect_usernames; /* Precompiled regex patterns for db
* preference list */
- Left_right_tokens *user_redirect_tokens; /* db redirect for dbname and node
- * string */
+ Left_right_tokens *user_redirect_tokens; /* db redirect for dbname and
+ * node string */
/*
* database_redirect_preference_list =
* will not be load balanced
* until the session ends. */
- char *dml_adaptive_object_relationship_list; /* objects relationship list*/
+ char *dml_adaptive_object_relationship_list; /* objects relationship
+ * list */
DBObjectRelation *parsed_dml_adaptive_object_relationship_list;
- bool statement_level_load_balance; /* if on, select load balancing node per statement */
+ bool statement_level_load_balance; /* if on, select load
+ * balancing node per
+ * statement */
/*
* add for watchdog
* failover requests to
* build consensus */
bool enable_consensus_with_half_votes;
- /* apply majority rule for consensus
- * and quorum computation at 50% of
- * votes in a cluster with an even
- * number of nodes.
- */
+
+ /*
+ * apply majority rule for consensus and quorum computation at 50% of
+ * votes in a cluster with an even number of nodes.
+ */
bool wd_remove_shutdown_nodes;
- /* revoke membership of properly shutdown watchdog
- * nodes.
- */
- int wd_lost_node_removal_timeout;
- /* timeout in seconds to revoke membership of
- * LOST watchdog nodes
- */
- int wd_no_show_node_removal_timeout;
- /* time in seconds to revoke membership of
- * NO-SHOW watchdog node
- */
+
+ /*
+ * revoke membership of properly shutdown watchdog nodes.
+ */
+ int wd_lost_node_removal_timeout;
+
+ /*
+ * timeout in seconds to revoke membership of LOST watchdog nodes
+ */
+ int wd_no_show_node_removal_timeout;
+
+ /*
+ * time in seconds to revoke membership of NO-SHOW watchdog node
+ */
WdLifeCheckMethod wd_lifecheck_method; /* method of lifecheck.
* 'heartbeat' or 'query' */
* leader pgpool goes down. */
int wd_priority; /* watchdog node priority, during leader
* election */
- int pgpool_node_id; /* pgpool (watchdog) node id */
+ int pgpool_node_id; /* pgpool (watchdog) node id */
WdNodesConfig wd_nodes; /* watchdog lists */
char *trusted_servers; /* icmp reachable server list (A,B,C) */
- char *trusted_server_command; /* Executes this command when upper servers are observed */
+ char *trusted_server_command; /* Executes this command when upper
+ * servers are observed */
char *delegate_ip; /* delegate IP address */
int wd_interval; /* lifecheck interval (sec) */
char *wd_authkey; /* Authentication key for watchdog
* signal (sec) */
int wd_heartbeat_deadtime; /* Deadtime interval for heartbeat
* signal (sec) */
- WdHbIf hb_ifs[WD_MAX_IF_NUM]; /* heartbeat interfaces of all watchdog nodes */
- WdHbIf hb_dest_if[WD_MAX_IF_NUM]; /* heartbeat destination interfaces */
- int num_hb_dest_if; /* number of interface devices */
+ WdHbIf hb_ifs[WD_MAX_IF_NUM]; /* heartbeat interfaces of all
+ * watchdog nodes */
+ WdHbIf hb_dest_if[WD_MAX_IF_NUM]; /* heartbeat destination
+ * interfaces */
+ int num_hb_dest_if; /* number of interface devices */
char **wd_monitoring_interfaces_list; /* network interface name list
* to be monitored by watchdog */
- bool health_check_test; /* if on, enable health check testing */
+ bool health_check_test; /* if on, enable health check testing */
-} POOL_CONFIG;
+} POOL_CONFIG;
-extern POOL_CONFIG * pool_config;
-extern char config_file_dir[]; /* directory path of config file pgpool.conf */
+extern POOL_CONFIG *pool_config;
+extern char config_file_dir[]; /* directory path of config file pgpool.conf */
typedef enum
{
CFGCXT_RELOAD,
CFGCXT_PCP,
CFGCXT_SESSION
-} ConfigContext;
+} ConfigContext;
typedef struct ConfigVariable
{
extern bool pool_get_config(const char *config_file, ConfigContext context);
extern int eval_logical(const char *str);
extern char *pool_flag_to_str(unsigned short flag);
-extern char *backend_status_to_str(BackendInfo * bi);
+extern char *backend_status_to_str(BackendInfo *bi);
/* methods used for regexp support */
extern int add_regex_pattern(const char *type, char *s);
WATCHDOG_LIFECHECK,
GENERAL_CONFIG,
CACHE_CONFIG
-} config_group;
+} config_group;
typedef enum
{
CONFIG_VAR_TYPE_DOUBLE_ARRAY,
CONFIG_VAR_TYPE_STRING_ARRAY,
CONFIG_VAR_TYPE_GROUP
-} config_type;
+} config_type;
/*
* The possible values of an enum variable are specified by an array of
#define DEFAULT_FOR_NO_VALUE_ARRAY_VAR 0x0020
/* From PG's src/include/utils/guc.h */
-#define GUC_UNIT_KB 0x1000 /* value is in kilobytes */
-#define GUC_UNIT_BLOCKS 0x2000 /* value is in blocks */
-#define GUC_UNIT_XBLOCKS 0x3000 /* value is in xlog blocks */
-#define GUC_UNIT_MB 0x4000 /* value is in megabytes */
-#define GUC_UNIT_BYTE 0x8000 /* value is in bytes */
-#define GUC_UNIT_MEMORY 0xF000 /* mask for size-related units */
-
-#define GUC_UNIT_MS 0x10000 /* value is in milliseconds */
-#define GUC_UNIT_S 0x20000 /* value is in seconds */
-#define GUC_UNIT_MIN 0x30000 /* value is in minutes */
-#define GUC_UNIT_TIME 0xF0000 /* mask for time-related units */
+#define GUC_UNIT_KB 0x1000 /* value is in kilobytes */
+#define GUC_UNIT_BLOCKS 0x2000 /* value is in blocks */
+#define GUC_UNIT_XBLOCKS 0x3000 /* value is in xlog blocks */
+#define GUC_UNIT_MB 0x4000 /* value is in megabytes */
+#define GUC_UNIT_BYTE 0x8000 /* value is in bytes */
+#define GUC_UNIT_MEMORY 0xF000 /* mask for size-related units */
+
+#define GUC_UNIT_MS 0x10000 /* value is in milliseconds */
+#define GUC_UNIT_S 0x20000 /* value is in seconds */
+#define GUC_UNIT_MIN 0x30000 /* value is in minutes */
+#define GUC_UNIT_TIME 0xF0000 /* mask for time-related units */
#define GUC_UNIT (GUC_UNIT_MEMORY | GUC_UNIT_TIME)
/*
extern void InitializeConfigOptions(void);
extern bool set_one_config_option(const char *name, const char *value,
- ConfigContext context, GucSource source, int elevel);
+ ConfigContext context, GucSource source, int elevel);
extern bool set_config_options(ConfigVariable *head_p,
- ConfigContext context, GucSource source, int elevel);
+ ConfigContext context, GucSource source, int elevel);
#ifndef POOL_PRIVATE
-extern bool report_config_variable(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *var_name);
-extern bool report_all_variables(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern bool set_config_option_for_session(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *name, const char *value);
-bool reset_all_variables(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+extern bool report_config_variable(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *var_name);
+extern bool report_all_variables(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern bool set_config_option_for_session(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *name, const char *value);
+bool reset_all_variables(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
#endif
#endif /* POOL_CONFIG_VARIABLES_H */
{
LOAD_UNSELECTED = 0,
LOAD_SELECTED
-} LOAD_BALANCE_STATUS;
+} LOAD_BALANCE_STATUS;
extern int assert_enabled;
extern void ExceptionalCondition(const char *conditionName,
- const char *errorType,
- const char *fileName, int lineNumber) __attribute__((noreturn));
+ const char *errorType,
+ const char *fileName, int lineNumber) __attribute__((noreturn));
#define MAXIMUM_ALIGNOF 8
#ifndef pool_connection_pool_h
#define pool_connection_pool_h
-extern POOL_CONNECTION_POOL * pool_connection_pool; /* connection pool */
+extern POOL_CONNECTION_POOL *pool_connection_pool; /* connection pool */
-extern int pool_init_cp(void);
-extern POOL_CONNECTION_POOL * pool_create_cp(void);
-extern POOL_CONNECTION_POOL * pool_get_cp(char *user, char *database, int protoMajor, int check_socket);
+extern int pool_init_cp(void);
+extern POOL_CONNECTION_POOL *pool_create_cp(void);
+extern POOL_CONNECTION_POOL *pool_get_cp(char *user, char *database, int protoMajor, int check_socket);
extern void pool_discard_cp(char *user, char *database, int protoMajor);
extern void pool_backend_timer(void);
-extern void pool_connection_pool_timer(POOL_CONNECTION_POOL * backend);
+extern void pool_connection_pool_timer(POOL_CONNECTION_POOL *backend);
extern RETSIGTYPE pool_backend_timer_handler(int sig);
extern int connect_inet_domain_socket(int slot, bool retry);
extern int connect_unix_domain_socket(int slot, bool retry);
extern void update_pooled_connection_count(void);
extern int in_use_backend_id(POOL_CONNECTION_POOL *pool);
-#endif /* pool_connection_pool_h */
+#endif /* pool_connection_pool_h */
*/
typedef struct
{
- short major; /* major version number in up to 3 digits decimal.
- * Examples: 120, 110, 100, 96.
- */
- short minor; /* minor version number in up to 2 digits decimal.
- * Examples: 0, 1, 2, 10, 23.
- */
- char version_string[MAX_PG_VERSION_STRING+1]; /* original version string */
-} PGVersion;
+ short major; /* major version number in up to 3 digits
+ * decimal. Examples: 120, 110, 100, 96. */
+ short minor; /* minor version number in up to 2 digits
+ * decimal. Examples: 0, 1, 2, 10, 23. */
+ char version_string[MAX_PG_VERSION_STRING + 1]; /* original version
+ * string */
+} PGVersion;
-extern void send_startup_packet(POOL_CONNECTION_POOL_SLOT * cp);
+extern void send_startup_packet(POOL_CONNECTION_POOL_SLOT *cp);
extern void pool_free_startup_packet(StartupPacket *sp);
-extern POOL_CONNECTION_POOL_SLOT * make_persistent_db_connection(
- int db_node_id, char *hostname, int port, char *dbname, char *user, char *password, bool retry);
-extern POOL_CONNECTION_POOL_SLOT * make_persistent_db_connection_noerror(
- int db_node_id, char *hostname, int port, char *dbname, char *user, char *password, bool retry);
-extern void discard_persistent_db_connection(POOL_CONNECTION_POOL_SLOT * cp);
+extern POOL_CONNECTION_POOL_SLOT *make_persistent_db_connection(
+ int db_node_id, char *hostname, int port, char *dbname, char *user, char *password, bool retry);
+extern POOL_CONNECTION_POOL_SLOT *make_persistent_db_connection_noerror(
+ int db_node_id, char *hostname, int port, char *dbname, char *user, char *password, bool retry);
+extern void discard_persistent_db_connection(POOL_CONNECTION_POOL_SLOT *cp);
extern int select_load_balancing_node(void);
-extern PGVersion *Pgversion(POOL_CONNECTION_POOL * backend);
+extern PGVersion *Pgversion(POOL_CONNECTION_POOL *backend);
/* pool_pg_utils.c */
extern bool si_snapshot_prepared(void);
extern void si_commit_done(void);
extern int check_replication_delay(int node_id);
-#endif /* pool_pg_utils_h */
+#endif /* pool_pg_utils_h */
extern void reset_variables(void);
extern void reset_connection(void);
-extern void per_node_statement_log(POOL_CONNECTION_POOL * backend,
+extern void per_node_statement_log(POOL_CONNECTION_POOL *backend,
int node_id, char *query);
-extern int pool_extract_error_message(bool read_kind, POOL_CONNECTION * backend,
+extern int pool_extract_error_message(bool read_kind, POOL_CONNECTION *backend,
int major, bool unread, char **message);
-extern POOL_STATUS do_command(POOL_CONNECTION * frontend, POOL_CONNECTION * backend,
+extern POOL_STATUS do_command(POOL_CONNECTION *frontend, POOL_CONNECTION *backend,
char *query, int protoMajor, int pid, char *key, int keylen, int no_ready_for_query);
-extern void do_query(POOL_CONNECTION * backend, char *query, POOL_SELECT_RESULT * *result, int major);
-extern void free_select_result(POOL_SELECT_RESULT * result);
+extern void do_query(POOL_CONNECTION *backend, char *query, POOL_SELECT_RESULT **result, int major);
+extern void free_select_result(POOL_SELECT_RESULT *result);
extern int compare(const void *p1, const void *p2);
-extern void do_error_execute_command(POOL_CONNECTION_POOL * backend, int node_id, int major);
-extern POOL_STATUS pool_discard_packet_contents(POOL_CONNECTION_POOL * cp);
+extern void do_error_execute_command(POOL_CONNECTION_POOL *backend, int node_id, int major);
+extern POOL_STATUS pool_discard_packet_contents(POOL_CONNECTION_POOL *cp);
extern void pool_dump_valid_backend(int backend_id);
-extern bool pool_push_pending_data(POOL_CONNECTION * backend);
+extern bool pool_push_pending_data(POOL_CONNECTION *backend);
-extern void pool_send_frontend_exits(POOL_CONNECTION_POOL * backend);
-extern POOL_STATUS ParameterStatus(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern void pool_send_frontend_exits(POOL_CONNECTION_POOL *backend);
+extern POOL_STATUS ParameterStatus(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern void pool_send_error_message(POOL_CONNECTION * frontend, int protoMajor,
- char *code,
- char *message,
- char *detail,
- char *hint,
- char *file,
- int line);
-extern void pool_send_fatal_message(POOL_CONNECTION * frontend, int protoMajor,
- char *code,
- char *message,
- char *detail,
- char *hint,
- char *file,
- int line);
-extern void pool_send_severity_message(POOL_CONNECTION * frontend, int protoMajor,
- char *code,
- char *message,
- char *detail,
- char *hint,
- char *file,
- char *severity,
- int line);
+extern void pool_send_error_message(POOL_CONNECTION *frontend, int protoMajor,
+ char *code,
+ char *message,
+ char *detail,
+ char *hint,
+ char *file,
+ int line);
+extern void pool_send_fatal_message(POOL_CONNECTION *frontend, int protoMajor,
+ char *code,
+ char *message,
+ char *detail,
+ char *hint,
+ char *file,
+ int line);
+extern void pool_send_severity_message(POOL_CONNECTION *frontend, int protoMajor,
+ char *code,
+ char *message,
+ char *detail,
+ char *hint,
+ char *file,
+ char *severity,
+ int line);
-extern POOL_STATUS SimpleForwardToFrontend(char kind, POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern POOL_STATUS SimpleForwardToBackend(char kind, POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int len, char *contents);
+extern POOL_STATUS SimpleForwardToFrontend(char kind, POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern POOL_STATUS SimpleForwardToBackend(char kind, POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, int len, char *contents);
-extern POOL_STATUS pool_process_query(POOL_CONNECTION * frontend,
-POOL_CONNECTION_POOL * backend,
-int reset_request);
-extern bool is_backend_cache_empty(POOL_CONNECTION_POOL * backend);
-extern void pool_send_readyforquery(POOL_CONNECTION * frontend);
+extern POOL_STATUS pool_process_query(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
+ int reset_request);
+extern bool is_backend_cache_empty(POOL_CONNECTION_POOL *backend);
+extern void pool_send_readyforquery(POOL_CONNECTION *frontend);
extern char *extract_error_kind(char *message, int major);
-#endif /* pool_process_query_h */
+#endif /* pool_process_query_h */
/*
* modules defined in pool_proto_modules.c
*/
-extern POOL_STATUS SimpleQuery(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS SimpleQuery(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS Execute(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS Execute(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS Parse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS Parse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS Bind(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS Bind(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS Describe(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS Describe(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS Close(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS Close(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS FunctionCall3(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS FunctionCall3(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS ReadyForQuery(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend, bool send_ready, bool cache_commit);
+extern POOL_STATUS ReadyForQuery(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend, bool send_ready, bool cache_commit);
-extern POOL_STATUS ParseComplete(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS ParseComplete(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS BindComplete(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS BindComplete(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS CloseComplete(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS CloseComplete(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS ParameterDescription(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS ParameterDescription(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS ErrorResponse3(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS ErrorResponse3(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS CopyInResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS CopyInResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS CopyOutResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS CopyOutResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS CopyDataRows(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend, int copyin);
+extern POOL_STATUS CopyDataRows(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend, int copyin);
-extern POOL_STATUS FunctionCall(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS FunctionCall(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS ProcessFrontendResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS ProcessFrontendResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS ProcessBackendResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS ProcessBackendResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int *state, short *num_fields);
-extern void handle_query_context(POOL_CONNECTION_POOL * backend);;
+extern void handle_query_context(POOL_CONNECTION_POOL *backend);;
extern void pool_emit_log_for_message_length_diff(int *length_array, char *name);
-extern void per_node_statement_notice(POOL_CONNECTION_POOL * backend, int node_id, char *query);
+extern void per_node_statement_notice(POOL_CONNECTION_POOL *backend, int node_id, char *query);
extern void log_backend_messages(unsigned char kind, int backend_id);
/*
* modules defined in pool_proto2.c
*/
-extern POOL_STATUS AsciiRow(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS AsciiRow(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
short num_fields);
-extern POOL_STATUS BinaryRow(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS BinaryRow(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
short num_fields);
-extern POOL_STATUS CompletedResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS CompletedResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS CursorResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS CursorResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern void EmptyQueryResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern void EmptyQueryResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS FunctionResultResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS FunctionResultResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS NotificationResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS NotificationResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern int RowDescription(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
- short *result);
+extern int RowDescription(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
+ short *result);
-extern void wait_for_query_response_with_trans_cleanup(POOL_CONNECTION * frontend, POOL_CONNECTION * backend,
+extern void wait_for_query_response_with_trans_cleanup(POOL_CONNECTION *frontend, POOL_CONNECTION *backend,
int protoVersion, int pid, char *key, int keylen);
-extern POOL_STATUS wait_for_query_response(POOL_CONNECTION * frontend, POOL_CONNECTION * backend, int protoVersion);
+extern POOL_STATUS wait_for_query_response(POOL_CONNECTION *frontend, POOL_CONNECTION *backend, int protoVersion);
extern bool is_select_query(Node *node, char *sql);
extern bool is_commit_query(Node *node);
extern bool is_rollback_query(Node *node);
extern bool is_rollback_to_query(Node *node);
extern bool is_strict_query(Node *node); /* returns non 0 if this is strict
* query */
-extern int need_insert_lock(POOL_CONNECTION_POOL * backend, char *query, Node *node);
-extern POOL_STATUS insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *query, InsertStmt *node, int lock_kind);
+extern int need_insert_lock(POOL_CONNECTION_POOL *backend, char *query, Node *node);
+extern POOL_STATUS insert_lock(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *query, InsertStmt *node, int lock_kind);
extern char *parse_copy_data(char *buf, int len, char delimiter, int col_id);
extern int check_copy_from_stdin(Node *node); /* returns non 0 if this is a
* COPY FROM STDIN */
-extern void query_ps_status(char *query, POOL_CONNECTION_POOL * backend); /* show ps status */
-extern POOL_STATUS start_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, Node *node);
-extern POOL_STATUS end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern int detect_deadlock_error(POOL_CONNECTION * backend, int major);
-extern int detect_serialization_error(POOL_CONNECTION * backend, int major, bool unread);
-extern int detect_active_sql_transaction_error(POOL_CONNECTION * backend, int major);
-extern int detect_query_cancel_error(POOL_CONNECTION * backend, int major);
-extern int detect_idle_in_transaction_session_timeout_error(POOL_CONNECTION * backend, int major);
-extern int detect_idle_session_timeout_error(POOL_CONNECTION * backend, int major);
-extern bool is_partition_table(POOL_CONNECTION_POOL * backend, Node *node);
-extern POOL_STATUS pool_discard_packet(POOL_CONNECTION_POOL * cp);
-extern void query_cache_register(char kind, POOL_CONNECTION * frontend, char *database, char *data, int data_len);
+extern void query_ps_status(char *query, POOL_CONNECTION_POOL *backend); /* show ps status */
+extern POOL_STATUS start_internal_transaction(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, Node *node);
+extern POOL_STATUS end_internal_transaction(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern int detect_deadlock_error(POOL_CONNECTION *backend, int major);
+extern int detect_serialization_error(POOL_CONNECTION *backend, int major, bool unread);
+extern int detect_active_sql_transaction_error(POOL_CONNECTION *backend, int major);
+extern int detect_query_cancel_error(POOL_CONNECTION *backend, int major);
+extern int detect_idle_in_transaction_session_timeout_error(POOL_CONNECTION *backend, int major);
+extern int detect_idle_session_timeout_error(POOL_CONNECTION *backend, int major);
+extern bool is_partition_table(POOL_CONNECTION_POOL *backend, Node *node);
+extern POOL_STATUS pool_discard_packet(POOL_CONNECTION_POOL *cp);
+extern void query_cache_register(char kind, POOL_CONNECTION *frontend, char *database, char *data, int data_len);
extern int is_drop_database(Node *node); /* returns non 0 if this is a DROP
* DATABASE command */
-extern void send_simplequery_message(POOL_CONNECTION * backend, int len, char *string, int major);
-extern POOL_STATUS send_extended_protocol_message(POOL_CONNECTION_POOL * backend,
+extern void send_simplequery_message(POOL_CONNECTION *backend, int len, char *string, int major);
+extern POOL_STATUS send_extended_protocol_message(POOL_CONNECTION_POOL *backend,
int node_id, char *kind,
int len, char *string);
-extern int synchronize(POOL_CONNECTION * cp);
-extern void read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *decided_kind);
-extern void read_kind_from_one_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *kind, int node);
-extern void do_error_command(POOL_CONNECTION * backend, int major);
-extern void raise_intentional_error_if_need(POOL_CONNECTION_POOL * backend);
+extern int synchronize(POOL_CONNECTION *cp);
+extern void read_kind_from_backend(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *decided_kind);
+extern void read_kind_from_one_backend(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *kind, int node);
+extern void do_error_command(POOL_CONNECTION *backend, int major);
+extern void raise_intentional_error_if_need(POOL_CONNECTION_POOL *backend);
-extern void pool_at_command_success(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+extern void pool_at_command_success(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
/*
* modules defined in CommandComplete.c
*/
-extern POOL_STATUS CommandComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, bool command_complete);
+extern POOL_STATUS CommandComplete(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, bool command_complete);
-extern int pool_read_message_length(POOL_CONNECTION_POOL * cp);
-extern int *pool_read_message_length2(POOL_CONNECTION_POOL * cp);
-extern signed char pool_read_kind(POOL_CONNECTION_POOL * cp);
-extern int pool_read_int(POOL_CONNECTION_POOL * cp);
+extern int pool_read_message_length(POOL_CONNECTION_POOL *cp);
+extern int *pool_read_message_length2(POOL_CONNECTION_POOL *cp);
+extern signed char pool_read_kind(POOL_CONNECTION_POOL *cp);
+extern int pool_read_int(POOL_CONNECTION_POOL *cp);
/* pool_proto2.c */
-extern POOL_STATUS ErrorResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS ErrorResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern void NoticeResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern void NoticeResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern char per_node_error_log(POOL_CONNECTION_POOL * backend, int node_id,
+extern char per_node_error_log(POOL_CONNECTION_POOL *backend, int node_id,
char *query, char *prefix, bool unread);
extern void init_pi_set(void);
* block". Each block is assigned a "cache block id", which is
* starting with 0.
*/
-typedef char *POOL_CACHE_BLOCK; /* pointer to cache block */
+typedef char *POOL_CACHE_BLOCK; /* pointer to cache block */
typedef unsigned int POOL_CACHE_BLOCKID; /* cache block id */
typedef unsigned int POOL_CACHE_ITEMID; /* cache item id */
{
POOL_CACHE_BLOCKID blockid;
POOL_CACHE_ITEMID itemid;
-} POOL_CACHEID; /* cache id */
+} POOL_CACHEID; /* cache id */
/*
* Each block has management space called "cache block header" at the
unsigned char flags; /* flags. see above */
unsigned int num_items; /* number of items */
unsigned int free_bytes; /* total free space in bytes */
-} POOL_CACHE_BLOCK_HEADER;
+} POOL_CACHE_BLOCK_HEADER;
typedef struct
{
char query_hash[POOL_MD5_HASHKEYLEN];
-} POOL_QUERY_HASH;
+} POOL_QUERY_HASH;
#define POOL_ITEM_USED 0x0001 /* is this item used? */
#define POOL_ITEM_HAS_NEXT 0x0002 /* is this item has "next" item? */
POOL_CACHEID next; /* next cache item if any */
unsigned int offset; /* item offset in this block */
unsigned char flags; /* flags. see above */
-} POOL_CACHE_ITEM_POINTER;
+} POOL_CACHE_ITEM_POINTER;
/*
* Each block holds several "cache item", which consists of variable
unsigned int total_length; /* total length in bytes including myself */
time_t timestamp; /* cache creation time */
int64 expire; /* cache expire duration in seconds */
-} POOL_CACHE_ITEM_HEADER;
+} POOL_CACHE_ITEM_HEADER;
typedef struct
{
POOL_CACHE_ITEM_HEADER header; /* cache item header */
char data[1]; /* variable length data follows */
-} POOL_CACHE_ITEM;
+} POOL_CACHE_ITEM;
/*
* Possible the largest free space size in bytes
extern int memcached_connect(void);
extern void memcached_disconnect(void);
-extern void memqcache_register(char kind, POOL_CONNECTION * frontend, char *data, int data_len);
+extern void memqcache_register(char kind, POOL_CONNECTION *frontend, char *data, int data_len);
/*
* Cache key
POOL_CACHEID cacheid; /* cache key (shmem configuration) */
char hashkey[POOL_MD5_HASHKEYLEN]; /* cache key (memcached
* configuration) */
-} POOL_CACHEKEY;
+} POOL_CACHEKEY;
/*
* Internal buffer structure
size_t bufsize; /* buffer size */
size_t buflen; /* used length */
char *buf; /* buffer */
-} POOL_INTERNAL_BUFFER;
+} POOL_INTERNAL_BUFFER;
/*
* Temporary query cache buffer
POOL_INTERNAL_BUFFER *buffer;
int num_oids;
POOL_INTERNAL_BUFFER *oids;
-} POOL_TEMP_QUERY_CACHE;
+} POOL_TEMP_QUERY_CACHE;
/*
* Temporary query cache buffer array
int num_caches;
int array_size;
POOL_TEMP_QUERY_CACHE *caches[1]; /* actual data continues... */
-} POOL_QUERY_CACHE_ARRAY;
+} POOL_QUERY_CACHE_ARRAY;
/*
* Query cache statistics structure. This area must be placed on shared
time_t start_time; /* start time when the statistics begins */
long long int num_selects; /* number of successful SELECTs */
long long int num_cache_hits; /* number of SELECTs extracted from cache */
-} POOL_QUERY_CACHE_STATS;
+} POOL_QUERY_CACHE_STATS;
/*
* Shared memory cache stats interface.
* fragment(unusable) cache
* entries */
POOL_QUERY_CACHE_STATS cache_stats;
-} POOL_SHMEM_STATS;
+} POOL_SHMEM_STATS;
/*--------------------------------------------------------------------------------
* On shared memory hash table implementation
struct POOL_HASH_ELEMENT *next; /* link to next entry */
POOL_QUERY_HASH hashkey; /* MD5 hash key */
POOL_CACHEID cacheid; /* logical location of this cache element */
-} POOL_HASH_ELEMENT;
+} POOL_HASH_ELEMENT;
typedef uint32 POOL_HASH_KEY;
{
POOL_HASH_KEY hashkey; /* hash key */
POOL_HASH_ELEMENT *element; /* hash element */
-} POOL_HEADER_ELEMENT;
+} POOL_HEADER_ELEMENT;
/* Hash header */
typedef struct
long nhash; /* number of hash keys (power of 2) */
uint32 mask; /* mask for hash function */
POOL_HEADER_ELEMENT elements[1]; /* actual hash elements follows */
-} POOL_HASH_HEADER;
+} POOL_HASH_HEADER;
typedef enum
{
extern int pool_hash_init(int nelements);
extern size_t pool_hash_size(int nelements);
-extern POOL_CACHEID * pool_hash_search(POOL_QUERY_HASH * key);
-extern int pool_hash_delete(POOL_QUERY_HASH * key);
+extern POOL_CACHEID *pool_hash_search(POOL_QUERY_HASH *key);
+extern int pool_hash_delete(POOL_QUERY_HASH *key);
extern uint32 hash_any(unsigned char *k, int keylen);
-extern POOL_STATUS pool_fetch_from_memory_cache(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS pool_fetch_from_memory_cache(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
char *contents, bool use_fake_cache, bool *foundp);
-extern int pool_fetch_cache(POOL_CONNECTION_POOL * backend, const char *query, char **buf, size_t *len);
-extern int pool_catalog_commit_cache(POOL_CONNECTION_POOL * backend, char *query, char *data, size_t datalen);
+extern int pool_fetch_cache(POOL_CONNECTION_POOL *backend, const char *query, char **buf, size_t *len);
+extern int pool_catalog_commit_cache(POOL_CONNECTION_POOL *backend, char *query, char *data, size_t datalen);
extern bool pool_is_likely_select(char *query);
extern bool pool_is_table_in_unsafe_list(const char *table_name);
extern int pool_init_fsmm(size_t size);
extern void pool_allocate_fsmm_clock_hand(void);
-extern POOL_QUERY_CACHE_ARRAY * pool_create_query_cache_array(void);
-extern void pool_discard_query_cache_array(POOL_QUERY_CACHE_ARRAY * cache_array);
+extern POOL_QUERY_CACHE_ARRAY *pool_create_query_cache_array(void);
+extern void pool_discard_query_cache_array(POOL_QUERY_CACHE_ARRAY *cache_array);
-extern POOL_TEMP_QUERY_CACHE * pool_create_temp_query_cache(char *query);
-extern void pool_handle_query_cache(POOL_CONNECTION_POOL * backend, char *query, Node *node, char state,
- bool partial_fetch);
+extern POOL_TEMP_QUERY_CACHE *pool_create_temp_query_cache(char *query);
+extern void pool_handle_query_cache(POOL_CONNECTION_POOL *backend, char *query, Node *node, char state,
+ bool partial_fetch);
extern int pool_init_memqcache_stats(void);
-extern POOL_QUERY_CACHE_STATS * pool_get_memqcache_stats(void);
+extern POOL_QUERY_CACHE_STATS *pool_get_memqcache_stats(void);
extern void pool_reset_memqcache_stats(void);
extern long long int pool_stats_count_up_num_selects(long long int num);
extern long long int pool_stats_count_up_num_cache_hits(void);
extern long long int pool_tmp_stats_count_up_num_selects(void);
extern long long int pool_tmp_stats_get_num_selects(void);
extern void pool_tmp_stats_reset_num_selects(void);
-extern POOL_SHMEM_STATS * pool_get_shmem_storage_stats(void);
+extern POOL_SHMEM_STATS *pool_get_shmem_storage_stats(void);
-extern POOL_TEMP_QUERY_CACHE * pool_get_current_cache(void);
-extern POOL_TEMP_QUERY_CACHE * pool_get_current_cache(void);
-extern void pool_discard_temp_query_cache(POOL_TEMP_QUERY_CACHE * temp_cache);
+extern POOL_TEMP_QUERY_CACHE *pool_get_current_cache(void);
+extern POOL_TEMP_QUERY_CACHE *pool_get_current_cache(void);
+extern void pool_discard_temp_query_cache(POOL_TEMP_QUERY_CACHE *temp_cache);
extern void pool_discard_current_temp_query_cache(void);
extern void pool_shmem_lock(POOL_MEMQ_LOCK_TYPE type);
extern void clear_query_cache(void);
-extern bool query_cache_delete_by_stmt(char *query, POOL_CONNECTION_POOL * backend);
+extern bool query_cache_delete_by_stmt(char *query, POOL_CONNECTION_POOL *backend);
#endif /* POOL_MEMQCACHE_H */
#define POOL_LOBJ_H
#include "pool.h"
-extern char *pool_rewrite_lo_creat(char kind, char *packet, int packet_len, POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int *len);
+extern char *pool_rewrite_lo_creat(char kind, char *packet, int packet_len, POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, int *len);
#endif /* POOL_LOBJ_H */
#include "parser/nodes.h"
#include "context/pool_session_context.h"
-extern char *rewrite_timestamp(POOL_CONNECTION_POOL * backend, Node *node, bool rewrite_to_params, POOL_SENT_MESSAGE * message);
-extern char *bind_rewrite_timestamp(POOL_CONNECTION_POOL * backend, POOL_SENT_MESSAGE * message, const char *orig_msg, int *len);
+extern char *rewrite_timestamp(POOL_CONNECTION_POOL *backend, Node *node, bool rewrite_to_params, POOL_SENT_MESSAGE *message);
+extern char *bind_rewrite_timestamp(POOL_CONNECTION_POOL *backend, POOL_SENT_MESSAGE *message, const char *orig_msg, int *len);
extern bool isSystemType(Node *node, const char *name);
#endif /* POOL_TIMESTAMP_H */
extern bool message_level_is_interesting(int elevel);
extern bool errstart(int elevel, const char *filename, int lineno,
- const char *funcname, const char *domain);
+ const char *funcname, const char *domain);
extern void errfinish(int dummy,...);
#define errcode(sqlerrcode) \
extern int get_return_code(void);
extern int
-errmsg(const char *fmt,...)
+ errmsg(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int
-errmsg_internal(const char *fmt,...)
+ errmsg_internal(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int
-errmsg_plural(const char *fmt_singular, const char *fmt_plural,
- unsigned long n,...)
+ errmsg_plural(const char *fmt_singular, const char *fmt_plural,
+ unsigned long n,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 4)))
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 4)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 4)))
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 4)));
extern int
-errdetail(const char *fmt,...)
+ errdetail(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int
-errdetail_internal(const char *fmt,...)
+ errdetail_internal(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int
-errdetail_log(const char *fmt,...)
+ errdetail_log(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int
-errdetail_plural(const char *fmt_singular, const char *fmt_plural,
- unsigned long n,...)
+ errdetail_plural(const char *fmt_singular, const char *fmt_plural,
+ unsigned long n,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 4)))
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 4)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 4)))
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 4)));
extern int
-errhint(const char *fmt,...)
+ errhint(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
/*
* errcontext() is typically called in error context callback functions, not
extern int set_errcontext_domain(const char *domain);
extern int
-errcontext_msg(const char *fmt,...)
+ errcontext_msg(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int errhidestmt(bool hide_stmt);
extern void elog_start(const char *filename, int lineno, const char *funcname);
extern void
-elog_finish(int elevel, const char *fmt,...)
+ elog_finish(int elevel, const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
/* Support for attaching context information to error reports */
* safely (memory context, GUC load etc)
*/
extern void
-write_stderr(const char *fmt,...)
+ write_stderr(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
void shmem_exit(int code);
void on_exit_reset(void);
#ifdef __GNUC__
extern int
-errhint(const char *fmt,...)
-__attribute__((format(printf, 1, 2)));
+ errhint(const char *fmt,...)
+ __attribute__((format(printf, 1, 2)));
extern int
-errdetail(const char *fmt,...)
-__attribute__((format(printf, 1, 2)));
+ errdetail(const char *fmt,...)
+ __attribute__((format(printf, 1, 2)));
extern void
-errmsg(const char *fmt,...)
-__attribute__((format(printf, 1, 2)));
+ errmsg(const char *fmt,...)
+ __attribute__((format(printf, 1, 2)));
#else
extern int errhint(const char *fmt,...);
extern int errdetail(const char *fmt,...);
#endif
extern bool errstart(int elevel, const char *filename, int lineno,
- const char *funcname, const char *domain);
+ const char *funcname, const char *domain);
extern void errfinish(int dummy,...);
/*
#ifndef HAVE_GETOPT_LONG
-extern int getopt_long(int argc, char *const argv[],
- const char *optstring,
- const struct option *longopts, int *longindex);
+extern int getopt_long(int argc, char *const argv[],
+ const char *optstring,
+ const struct option *longopts, int *longindex);
#endif
#endif /* GETOPT_LONG_H */
size_t value_extra; /* how much extra space to allocate for
* values? */
- } json_settings;
+ } json_settings;
#define json_enable_comments 0x01
json_boolean,
json_null
- } json_type;
+ } json_type;
extern const struct _json_value json_value_none;
struct _json_value *value;
- } json_object_entry;
+ } json_object_entry;
typedef struct _json_value
{
#endif
- } json_value;
+ } json_value;
json_value *json_parse(const json_char * json,
size_t length);
#define json_error_max 128
- json_value *json_parse_ex(json_settings * settings,
+ json_value *json_parse_ex(json_settings *settings,
const json_char * json,
size_t length,
char *error);
/* Not usually necessary, unless you used a custom mem_alloc and now want to
* use a custom mem_free.
*/
- void json_value_free_ex(json_settings * settings,
+ void json_value_free_ex(json_settings *settings,
json_value *);
#endif
/* pgpool-II extensions */
-json_value *json_get_value_for_key(json_value * source, const char *key);
-int json_get_int_value_for_key(json_value * source, const char *key, int *value);
-int json_get_long_value_for_key(json_value * source, const char *key, long *value);
-char *json_get_string_value_for_key(json_value * source, const char *key);
-int json_get_bool_value_for_key(json_value * source, const char *key, bool *value);
+json_value *json_get_value_for_key(json_value *source, const char *key);
+int json_get_int_value_for_key(json_value *source, const char *key, int *value);
+int json_get_long_value_for_key(json_value *source, const char *key, long *value);
+char *json_get_string_value_for_key(json_value *source, const char *key);
+int json_get_bool_value_for_key(json_value *source, const char *key, bool *value);
#endif
{
JWOBJECT,
JWARRAY
-} JWElementType;
+} JWElementType;
typedef struct JWStack
{
JWElementType elementType;
int elementCount;
-} JWStack;
+} JWStack;
typedef struct JsonNode
{
bool pretty;
int stack_ptr;
JWStack stack[MAX_STACK_DEPTH];
-} JsonNode;
+} JsonNode;
-extern JsonNode * jw_create(JWElementType rootElement, bool pretty_output);
-extern JsonNode * jw_create_with_array(bool pretty_output);
-extern JsonNode * jw_create_with_object(bool pretty_output);
-extern bool jw_put_string(JsonNode * jNode, char *key, char *value);
-extern bool jw_put_int(JsonNode * jNode, char *key, int value);
-extern bool jw_put_bool(JsonNode * jNode, char *key, bool value);
-extern bool jw_put_long(JsonNode * jNode, char *key, long value);
-extern bool jw_put_null(JsonNode * jNode, char *key);
-extern bool jw_put_string_value(JsonNode * jNode, char *value);
-extern bool jw_put_int_value(JsonNode * jNode, int value);
-extern bool jw_put_bool_value(JsonNode * jNode, bool value);
-extern bool jw_put_long_value(JsonNode * jNode, long value);
-extern bool jw_put_null_value(JsonNode * jNode);
-extern bool jw_start_element(JsonNode * jNode, JWElementType element, char *key);
-extern bool jw_start_array(JsonNode * jNode, char *key);
-extern bool jw_start_object(JsonNode * jNode, char *key);
-extern bool jw_end_element(JsonNode * jNode);
-extern bool jw_finish_document(JsonNode * jNode);
-extern char *jw_get_json_string(JsonNode * jNode);
-extern int jw_get_json_length(JsonNode * jNode);
-extern void jw_destroy(JsonNode * jNode);
+extern JsonNode *jw_create(JWElementType rootElement, bool pretty_output);
+extern JsonNode *jw_create_with_array(bool pretty_output);
+extern JsonNode *jw_create_with_object(bool pretty_output);
+extern bool jw_put_string(JsonNode *jNode, char *key, char *value);
+extern bool jw_put_int(JsonNode *jNode, char *key, int value);
+extern bool jw_put_bool(JsonNode *jNode, char *key, bool value);
+extern bool jw_put_long(JsonNode *jNode, char *key, long value);
+extern bool jw_put_null(JsonNode *jNode, char *key);
+extern bool jw_put_string_value(JsonNode *jNode, char *value);
+extern bool jw_put_int_value(JsonNode *jNode, int value);
+extern bool jw_put_bool_value(JsonNode *jNode, bool value);
+extern bool jw_put_long_value(JsonNode *jNode, long value);
+extern bool jw_put_null_value(JsonNode *jNode);
+extern bool jw_start_element(JsonNode *jNode, JWElementType element, char *key);
+extern bool jw_start_array(JsonNode *jNode, char *key);
+extern bool jw_start_object(JsonNode *jNode, char *key);
+extern bool jw_end_element(JsonNode *jNode);
+extern bool jw_finish_document(JsonNode *jNode);
+extern char *jw_get_json_string(JsonNode *jNode);
+extern int jw_get_json_length(JsonNode *jNode);
+extern void jw_destroy(JsonNode *jNode);
#endif
extern void MemoryContextResetChildren(MemoryContext context);
extern void MemoryContextDeleteChildren(MemoryContext context);
extern void MemoryContextSetParent(MemoryContext context,
- MemoryContext new_parent);
+ MemoryContext new_parent);
extern Size GetMemoryChunkSpace(void *pointer);
extern MemoryContext MemoryContextGetParent(MemoryContext context);
extern bool MemoryContextIsEmpty(MemoryContext context);
extern void MemoryContextStats(MemoryContext context);
extern void MemoryContextStatsDetail(MemoryContext context, int max_children);
extern void MemoryContextAllowInCriticalSection(MemoryContext context,
- bool allow);
+ bool allow);
#ifdef MEMORY_CONTEXT_CHECKING
extern void MemoryContextCheck(MemoryContext context);
* specific creation routines, and noplace else.
*/
extern MemoryContext MemoryContextCreate(NodeTag tag, Size size,
- MemoryContextMethods *methods,
- MemoryContext parent,
- const char *name);
+ MemoryContextMethods *methods,
+ MemoryContext parent,
+ const char *name);
/*
/* aset.c */
extern MemoryContext AllocSetContextCreate(MemoryContext parent,
- const char *name,
- Size minContextSize,
- Size initBlockSize,
- Size maxBlockSize);
+ const char *name,
+ Size minContextSize,
+ Size initBlockSize,
+ Size maxBlockSize);
/* slab.c */
extern MemoryContext SlabContextCreate(MemoryContext parent,
- const char *name,
- Size blockSize,
- Size chunkSize);
+ const char *name,
+ Size blockSize,
+ Size chunkSize);
/*
* Recommended default alloc parameters, suitable for "ordinary" contexts
extern void *MemoryContextAlloc(MemoryContext context, Size size);
extern void *MemoryContextAllocZero(MemoryContext context, Size size);
extern void *MemoryContextAllocExtended(MemoryContext context,
- Size size, int flags);
+ Size size, int flags);
extern void *palloc(Size size);
extern void *palloc0(Size size);
/* Registration of memory context reset/delete callbacks */
extern void MemoryContextRegisterResetCallback(MemoryContext context,
- MemoryContextCallback *cb);
+ MemoryContextCallback *cb);
/*
* These are like standard strdup() except the copied string is
#include "pool_type.h"
-extern int SockAddr_cidr_mask(struct sockaddr_storage *mask,
- char *numbits, int family);
+extern int SockAddr_cidr_mask(struct sockaddr_storage *mask,
+ char *numbits, int family);
typedef void (*PgIfAddrCallback) (struct sockaddr *addr, struct sockaddr *netmask, void *cb_data);
-extern int getaddrinfo_all(const char *hostname, const char *servname,
- const struct addrinfo *hintp,
- struct addrinfo **result);
+extern int getaddrinfo_all(const char *hostname, const char *servname,
+ const struct addrinfo *hintp,
+ struct addrinfo **result);
extern void freeaddrinfo_all(int hint_ai_family, struct addrinfo *ai);
-extern int getnameinfo_all(const struct sockaddr_storage *addr, int salen,
- char *node, int nodelen,
- char *service, int servicelen,
- int flags);
+extern int getnameinfo_all(const struct sockaddr_storage *addr, int salen,
+ char *node, int nodelen,
+ char *service, int servicelen,
+ int flags);
-extern int rangeSockAddr(const struct sockaddr_storage *addr,
- const struct sockaddr_storage *netaddr,
- const struct sockaddr_storage *netmask);
+extern int rangeSockAddr(const struct sockaddr_storage *addr,
+ const struct sockaddr_storage *netaddr,
+ const struct sockaddr_storage *netmask);
/* imported from PostgreSQL getaddrinfo.c */
int num; /* number of entries */
char **names; /* parameter names */
char **values; /* values */
-} ParamStatus;
+} ParamStatus;
-extern int pool_init_params(ParamStatus * params);
-extern void pool_discard_params(ParamStatus * params);
-extern char *pool_find_name(ParamStatus * params, char *name, int *pos);
-extern int pool_get_param(ParamStatus * params, int index, char **name, char **value);
-extern int pool_add_param(ParamStatus * params, char *name, char *value);
-extern void pool_param_debug_print(ParamStatus * params);
+extern int pool_init_params(ParamStatus *params);
+extern void pool_discard_params(ParamStatus *params);
+extern char *pool_find_name(ParamStatus *params, char *name, int *pos);
+extern int pool_get_param(ParamStatus *params, int index, char **name, char **value);
+extern int pool_add_param(ParamStatus *params, char *name, char *value);
+extern void pool_param_debug_print(ParamStatus *params);
-#endif /* pool_params_h */
+#endif /* pool_params_h */
#ifndef POOL_PROCESS_REPORTING_H
#define POOL_PROCESS_REPORTING_H
-extern void send_row_description(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
- short num_fields, char **field_names);
-extern void send_complete_and_ready(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *message, const int num_rows);
-extern POOL_REPORT_CONFIG * get_config(int *nrows);
-extern POOL_REPORT_POOLS * get_pools(int *nrows);
-extern POOL_REPORT_PROCESSES * get_processes(int *nrows);
-extern POOL_REPORT_NODES * get_nodes(int *nrows, int node_id);
-extern POOL_REPORT_VERSION * get_version(void);
+extern void send_row_description(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend,
+ short num_fields, char **field_names);
+extern void send_complete_and_ready(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *message, const int num_rows);
+extern POOL_REPORT_CONFIG *get_config(int *nrows);
+extern POOL_REPORT_POOLS *get_pools(int *nrows);
+extern POOL_REPORT_PROCESSES *get_processes(int *nrows);
+extern POOL_REPORT_NODES *get_nodes(int *nrows, int node_id);
+extern POOL_REPORT_VERSION *get_version(void);
extern POOL_HEALTH_CHECK_STATS *get_health_check_stats(int *nrows);
extern POOL_BACKEND_STATS *get_backend_stats(int *nrows);
-extern void config_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void pools_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void processes_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void nodes_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void version_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void cache_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void show_health_check_stats(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void show_backend_stats(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+extern void config_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void pools_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void processes_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void nodes_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void version_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void cache_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void show_health_check_stats(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void show_backend_stats(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
-extern void send_config_var_detail_row(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *name, const char *value, const char *description);
-extern void send_config_var_value_only_row(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *value);
+extern void send_config_var_detail_row(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *name, const char *value, const char *description);
+extern void send_config_var_value_only_row(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *value);
extern char *get_backend_status_string(BACKEND_STATUS status);
-extern int * pool_report_pools_offsets(int *n);
+extern int *pool_report_pools_offsets(int *n);
#endif
int refcnt; /* reference count */
int session_id; /* LocalSessionId */
time_t expire; /* cache expiration absolute time in seconds */
-} PoolRelCache;
+} PoolRelCache;
#define MAX_QUERY_LENGTH 1500
typedef struct
bool no_cache_if_zero; /* if register func returns 0, do not
* cache the data */
PoolRelCache *cache; /* cache data */
-} POOL_RELCACHE;
+} POOL_RELCACHE;
-extern POOL_RELCACHE * pool_create_relcache(int cachesize, char *sql,
- func_ptr register_func, func_ptr unregister_func,
- bool issessionlocal);
-extern void pool_discard_relcache(POOL_RELCACHE * relcache);
-extern void *pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, char *table);
+extern POOL_RELCACHE *pool_create_relcache(int cachesize, char *sql,
+ func_ptr register_func, func_ptr unregister_func,
+ bool issessionlocal);
+extern void pool_discard_relcache(POOL_RELCACHE *relcache);
+extern void *pool_search_relcache(POOL_RELCACHE *relcache, POOL_CONNECTION_POOL *backend, char *table);
extern char *remove_quotes_and_schema_from_relname(char *table);
-extern void *int_register_func(POOL_SELECT_RESULT * res);
+extern void *int_register_func(POOL_SELECT_RESULT *res);
extern void *int_unregister_func(void *data);
-extern void *string_register_func(POOL_SELECT_RESULT * res);
+extern void *string_register_func(POOL_SELECT_RESULT *res);
extern void *string_unregister_func(void *data);
extern bool SplitIdentifierString(char *rawstring, char separator, Node **namelist);
bool row_security; /* true if row security enabled */
int num_oids; /* number of oids */
int table_oids[POOL_MAX_SELECT_OIDS]; /* table oids */
- char table_names[POOL_MAX_SELECT_OIDS][NAMEDATALEN]; /* table names */
-} SelectContext;
+ char table_names[POOL_MAX_SELECT_OIDS][NAMEDATALEN]; /* table names */
+} SelectContext;
extern int pool_get_terminate_backend_pid(Node *node);
extern bool pool_has_function_call(Node *node);
extern bool pool_has_to_regclass(void);
extern bool raw_expression_tree_walker(Node *node, bool (*walker) (), void *context);
extern int pool_table_name_to_oid(char *table_name);
-extern int pool_extract_table_oids_from_select_stmt(Node *node, SelectContext * ctx);
+extern int pool_extract_table_oids_from_select_stmt(Node *node, SelectContext *ctx);
extern RangeVar *makeRangeVarFromNameList(List *names);
extern char *make_table_name_from_rangevar(RangeVar *rangevar);
extern char *make_function_name_from_funccall(FuncCall *fcall);
#endif
-extern void pool_ssl_negotiate_serverclient(POOL_CONNECTION * cp);
-extern void pool_ssl_negotiate_clientserver(POOL_CONNECTION * cp);
-extern void pool_ssl_close(POOL_CONNECTION * cp);
-extern int pool_ssl_read(POOL_CONNECTION * cp, void *buf, int size);
-extern int pool_ssl_write(POOL_CONNECTION * cp, const void *buf, int size);
-extern bool pool_ssl_pending(POOL_CONNECTION * cp);
+extern void pool_ssl_negotiate_serverclient(POOL_CONNECTION *cp);
+extern void pool_ssl_negotiate_clientserver(POOL_CONNECTION *cp);
+extern void pool_ssl_close(POOL_CONNECTION *cp);
+extern int pool_ssl_read(POOL_CONNECTION *cp, void *buf, int size);
+extern int pool_ssl_write(POOL_CONNECTION *cp, const void *buf, int size);
+extern bool pool_ssl_pending(POOL_CONNECTION *cp);
extern int SSL_ServerSide_init(void);
-#endif /* pool_ssl_h */
+#endif /* pool_ssl_h */
(connection)->len = 0; \
} while (0)
-extern POOL_CONNECTION * pool_open(int fd, bool backend_connection);
-extern void pool_close(POOL_CONNECTION * cp);
-extern int pool_read(POOL_CONNECTION * cp, void *buf, int len);
-extern void pool_read_with_error(POOL_CONNECTION * cp, void *buf, int len,
- const char *err_context);
+extern POOL_CONNECTION *pool_open(int fd, bool backend_connection);
+extern void pool_close(POOL_CONNECTION *cp);
+extern int pool_read(POOL_CONNECTION *cp, void *buf, int len);
+extern void pool_read_with_error(POOL_CONNECTION *cp, void *buf, int len,
+ const char *err_context);
-extern char *pool_read2(POOL_CONNECTION * cp, int len);
-extern int pool_write(POOL_CONNECTION * cp, void *buf, int len);
-extern int pool_write_noerror(POOL_CONNECTION * cp, void *buf, int len);
-extern int pool_flush(POOL_CONNECTION * cp);
-extern int pool_flush_noerror(POOL_CONNECTION * cp);
-extern int pool_flush_it(POOL_CONNECTION * cp);
-extern void pool_write_and_flush(POOL_CONNECTION * cp, void *buf, int len);
-extern int pool_write_and_flush_noerror(POOL_CONNECTION * cp, void *buf, int len);
-extern char *pool_read_string(POOL_CONNECTION * cp, int *len, int line);
-extern int pool_unread(POOL_CONNECTION * cp, void *data, int len);
-extern int pool_push(POOL_CONNECTION * cp, void *data, int len);
-extern void pool_pop(POOL_CONNECTION * cp, int *len);
-extern int pool_stacklen(POOL_CONNECTION * cp);
+extern char *pool_read2(POOL_CONNECTION *cp, int len);
+extern int pool_write(POOL_CONNECTION *cp, void *buf, int len);
+extern int pool_write_noerror(POOL_CONNECTION *cp, void *buf, int len);
+extern int pool_flush(POOL_CONNECTION *cp);
+extern int pool_flush_noerror(POOL_CONNECTION *cp);
+extern int pool_flush_it(POOL_CONNECTION *cp);
+extern void pool_write_and_flush(POOL_CONNECTION *cp, void *buf, int len);
+extern int pool_write_and_flush_noerror(POOL_CONNECTION *cp, void *buf, int len);
+extern char *pool_read_string(POOL_CONNECTION *cp, int *len, int line);
+extern int pool_unread(POOL_CONNECTION *cp, void *data, int len);
+extern int pool_push(POOL_CONNECTION *cp, void *data, int len);
+extern void pool_pop(POOL_CONNECTION *cp, int *len);
+extern int pool_stacklen(POOL_CONNECTION *cp);
-extern void pool_set_db_node_id(POOL_CONNECTION * con, int db_node_id);
+extern void pool_set_db_node_id(POOL_CONNECTION *con, int db_node_id);
extern void pool_set_timeout(int timeoutval);
extern int pool_get_timeout(void);
-extern int pool_check_fd(POOL_CONNECTION * cp);
+extern int pool_check_fd(POOL_CONNECTION *cp);
#endif /* POOL_STREAM_H */
#include "pool.h"
#include <netdb.h>
-extern char remote_ps_data[NI_MAXHOST + NI_MAXSERV + 2]; /* used for set_ps_display */
+extern char remote_ps_data[NI_MAXHOST + NI_MAXSERV + 2]; /* used for
+ * set_ps_display */
extern char **save_ps_display_args(int argc, char **argv);
extern void init_ps_display(const char *username, const char *dbname,
- const char *host_info, const char *initial_str);
+ const char *host_info, const char *initial_str);
extern void set_ps_display(const char *activity, bool force);
extern const char *get_ps_display(int *displen);
-extern void pool_ps_idle_display(POOL_CONNECTION_POOL * backend);
+extern void pool_ps_idle_display(POOL_CONNECTION_POOL *backend);
-#endif /* ps_status_h */
+#endif /* ps_status_h */
int size; /* regex array size */
int pos; /* next regex array index position */
regex_t **regex; /* regular expression array */
-} RegArray;
+} RegArray;
RegArray *create_regex_array(void);
-int add_regex_array(RegArray * ar, char *pattern);
-int regex_array_match(RegArray * ar, char *pattern);
-void destroy_regex_array(RegArray * ar);
+int add_regex_array(RegArray *ar, char *pattern);
+int regex_array_match(RegArray *ar, char *pattern);
+void destroy_regex_array(RegArray *ar);
/*
* String left-right token type
char *left_token;
char *right_token;
double weight_token;
-} Left_right_token;
+} Left_right_token;
typedef struct
{
int pos;
int size;
Left_right_token *token;
-} Left_right_tokens;
+} Left_right_tokens;
Left_right_tokens *create_lrtoken_array(void);
-void extract_string_tokens2(char *str, char *delimi, char delimi2, Left_right_tokens * lrtokens);
+void extract_string_tokens2(char *str, char *delimi, char delimi2, Left_right_tokens *lrtokens);
#endif
/* Interface routines for SHA224/256/384/512 */
extern void pg_sha224_init(pg_sha224_ctx *ctx);
extern void pg_sha224_update(pg_sha224_ctx *ctx, const uint8 *input0,
- size_t len);
+ size_t len);
extern void pg_sha224_final(pg_sha224_ctx *ctx, uint8 *dest);
extern void pg_sha256_init(pg_sha256_ctx *ctx);
extern void pg_sha256_update(pg_sha256_ctx *ctx, const uint8 *input0,
- size_t len);
+ size_t len);
extern void pg_sha256_final(pg_sha256_ctx *ctx, uint8 *dest);
extern void pg_sha384_init(pg_sha384_ctx *ctx);
extern void pg_sha384_update(pg_sha384_ctx *ctx,
- const uint8 *, size_t len);
+ const uint8 *, size_t len);
extern void pg_sha384_final(pg_sha384_ctx *ctx, uint8 *dest);
extern void pg_sha512_init(pg_sha512_ctx *ctx);
extern void pg_sha512_update(pg_sha512_ctx *ctx, const uint8 *input0,
- size_t len);
+ size_t len);
extern void pg_sha512_final(pg_sha512_ctx *ctx, uint8 *dest);
#endif /* _PG_SHA2_H_ */
#define SSL_UTILS_H
extern void calculate_hmac_sha256(const char *data, int len, char *buf);
-extern int aes_decrypt_with_password(unsigned char *ciphertext, int ciphertext_len,
- const char *password, unsigned char *plaintext);
-extern int aes_encrypt_with_password(unsigned char *plaintext, int plaintext_len,
- const char *password, unsigned char *ciphertext);
+extern int aes_decrypt_with_password(unsigned char *ciphertext, int ciphertext_len,
+ const char *password, unsigned char *plaintext);
+extern int aes_encrypt_with_password(unsigned char *plaintext, int plaintext_len,
+ const char *password, unsigned char *ciphertext);
#endif
#ifndef statistics_h
#define statistics_h
-extern size_t stat_shared_memory_size(void);
-extern void stat_set_stat_area(void *address);
-extern void stat_init_stat_area(void);
-extern void stat_count_up(int backend_node_id, Node *parsetree);
-extern void error_stat_count_up(int backend_node_id, char *str);
-extern uint64 stat_get_select_count(int backend_node_id);
-extern uint64 stat_get_insert_count(int backend_node_id);
-extern uint64 stat_get_update_count(int backend_node_id);
-extern uint64 stat_get_delete_count(int backend_node_id);
-extern uint64 stat_get_ddl_count(int backend_node_id);
-extern uint64 stat_get_other_count(int backend_node_id);
-extern uint64 stat_get_panic_count(int backend_node_id);
-extern uint64 stat_get_fatal_count(int backend_node_id);
-extern uint64 stat_get_error_count(int backend_node_id);
+extern size_t stat_shared_memory_size(void);
+extern void stat_set_stat_area(void *address);
+extern void stat_init_stat_area(void);
+extern void stat_count_up(int backend_node_id, Node *parsetree);
+extern void error_stat_count_up(int backend_node_id, char *str);
+extern uint64 stat_get_select_count(int backend_node_id);
+extern uint64 stat_get_insert_count(int backend_node_id);
+extern uint64 stat_get_update_count(int backend_node_id);
+extern uint64 stat_get_delete_count(int backend_node_id);
+extern uint64 stat_get_ddl_count(int backend_node_id);
+extern uint64 stat_get_other_count(int backend_node_id);
+extern uint64 stat_get_panic_count(int backend_node_id);
+extern uint64 stat_get_fatal_count(int backend_node_id);
+extern uint64 stat_get_error_count(int backend_node_id);
-#endif /* statistics_h */
+#endif /* statistics_h */
* years */
int32 day; /* days, after time for alignment */
int32 month; /* months and years, after time for alignment */
-} Interval;
+} Interval;
/* Limits on the "precision" option (typmod) for these data types */
#define PGPOOLVERSION "tasukiboshi"
-
WD_SHUTDOWN,
WD_ADD_MESSAGE_SENT,
WD_NETWORK_ISOLATION
-} WD_STATES;
+} WD_STATES;
typedef enum
{
WD_SOCK_CONNECTED,
WD_SOCK_ERROR,
WD_SOCK_CLOSED
-} WD_SOCK_STATE;
+} WD_SOCK_STATE;
typedef enum
{
WD_EVENT_WD_STATE_REQUIRE_RELOAD,
WD_EVENT_I_AM_APPEARING_LOST,
WD_EVENT_I_AM_APPEARING_FOUND
-} WD_EVENTS;
+} WD_EVENTS;
/*
* If you add a new lost reason. Remember to add entry in
* wd_node_lost_reasons (watchdog.c)
*/
-typedef enum {
+typedef enum
+{
NODE_LOST_UNKNOWN_REASON = 0,
NODE_LOST_BY_LIFECHECK,
NODE_LOST_BY_SEND_FAILURE,
* wd_cluster_membership_status (watchdog.c)
*/
-typedef enum {
+typedef enum
+{
WD_NODE_MEMBERSHIP_ACTIVE,
WD_NODE_REVOKED_SHUTDOWN,
WD_NODE_REVOKED_NO_SHOW,
WD_NODE_REVOKED_LOST
-}WD_NODE_MEMBERSHIP_STATUS;
+} WD_NODE_MEMBERSHIP_STATUS;
typedef struct SocketConnection
{
struct timeval tv; /* connect time of socket */
char addr[48]; /* ip address of socket connection */
WD_SOCK_STATE sock_state; /* current state of socket */
-} SocketConnection;
+} SocketConnection;
typedef struct WatchdogNode
{
* from the node */
struct timeval last_sent_time; /* timestamp when last packet was sent on
* the node */
- struct timeval lost_time; /* timestamp when the remote node was lost on coordinator
- */
- WD_NODE_MEMBERSHIP_STATUS membership_status; /* status of node membership
- *in watchdog cluster
- Only valid for remote nodes */
- bool has_lost_us; /*
- * True when this remote node thinks
- * we are lost
- */
- int sending_failures_count; /* number of times we have failed
- * to send message to the node.
- * Gets reset after successful sent
- */
- int missed_beacon_count; /* number of times the node has
- * failed to reply for beacon.
- * message
- */
+ struct timeval lost_time; /* timestamp when the remote node was lost on
+ * coordinator */
+ WD_NODE_MEMBERSHIP_STATUS membership_status; /* status of node
+ * membership in watchdog
+ * cluster Only valid for
+ * remote nodes */
+ bool has_lost_us; /* True when this remote node thinks we are
+ * lost */
+ int sending_failures_count; /* number of times we have failed to
+ * send message to the node. Gets
+ * reset after successful sent */
+ int missed_beacon_count; /* number of times the node has failed
+ * to reply for beacon. message */
WD_NODE_LOST_REASONS node_lost_reason;
- char pgp_version[MAX_VERSION_STR_LEN]; /* Pgpool-II version */
- int wd_data_major_version; /* watchdog messaging version major*/
- int wd_data_minor_version; /* watchdog messaging version minor*/
+ char pgp_version[MAX_VERSION_STR_LEN]; /* Pgpool-II version */
+ int wd_data_major_version; /* watchdog messaging version major */
+ int wd_data_minor_version; /* watchdog messaging version minor */
char nodeName[WD_MAX_NODE_NAMELEN]; /* name of this node */
char hostname[WD_MAX_HOST_NAMELEN]; /* host name */
int pgpool_port; /* pgpool port */
int wd_priority; /* watchdog priority */
char delegate_ip[WD_MAX_HOST_NAMELEN]; /* delegate IP */
- int pgpool_node_id; /* pgpool node id specified in pgpool_node_id file */
+ int pgpool_node_id; /* pgpool node id specified in pgpool_node_id
+ * file */
int standby_nodes_count; /* number of standby nodes joined the
* cluster only applicable when this
* WatchdogNode is the
* initiated by remote */
SocketConnection client_socket; /* socket connections for this node
* initiated by local */
-} WatchdogNode;
+} WatchdogNode;
/*
* Argument for WD Exec cluster command
typedef struct WDExecCommandArg
{
- char arg_name[WD_MAX_ARG_NAME_LEN];
- char arg_value[WD_MAX_ARG_VALUE_LEN];
-} WDExecCommandArg;
+ char arg_name[WD_MAX_ARG_NAME_LEN];
+ char arg_value[WD_MAX_ARG_VALUE_LEN];
+} WDExecCommandArg;
extern pid_t initialize_watchdog(void);
int wd_priority; /* node priority */
char delegate_ip[WD_MAX_HOST_NAMELEN]; /* delegate IP */
int id;
-} WDNodeInfo;
+} WDNodeInfo;
typedef struct WDGenericData
{
bool boolVal;
long longVal;
} data;
-} WDGenericData;
+} WDGenericData;
-extern WDGenericData * get_wd_runtime_variable_value(char *wd_authkey, char *varName);
+extern WDGenericData *get_wd_runtime_variable_value(char *wd_authkey, char *varName);
extern WD_STATES get_watchdog_local_node_state(char *wd_authkey);
extern int get_watchdog_quorum_state(char *wd_authkey);
extern char *wd_get_watchdog_nodes_json(char *wd_authkey, int nodeID);
extern void set_wd_command_timeout(int sec);
-extern char* get_request_json(char *key, char *value, char *authKey);
-extern WDNodeInfo *parse_watchdog_node_info_from_wd_node_json(json_value * source);
+extern char *get_request_json(char *key, char *value, char *authKey);
+extern WDNodeInfo *parse_watchdog_node_info_from_wd_node_json(json_value *source);
#endif /* WD_COMMANDS_H */
WD_INVALID_LOCK,
/* currently we have only one lock */
WD_FOLLOW_PRIMARY_LOCK
-}WD_LOCK_STANDBY_TYPE;
+} WD_LOCK_STANDBY_TYPE;
extern WdCommandResult wd_start_recovery(void);
extern WDFailoverCMDResults wd_degenerate_backend_set(int *node_id_set, int count, unsigned char flags);
extern WDFailoverCMDResults wd_promote_backend(int node_id, unsigned char flags);
-extern WdCommandResult wd_execute_cluster_command(char* clusterCommand,List *argsList);
+extern WdCommandResult wd_execute_cluster_command(char *clusterCommand, List *argsList);
-extern WDPGBackendStatus * get_pg_backend_status_from_leader_wd_node(void);
+extern WDPGBackendStatus *get_pg_backend_status_from_leader_wd_node(void);
extern WD_STATES wd_internal_get_watchdog_local_node_state(void);
extern int wd_internal_get_watchdog_quorum_state(void);
COMMAND_OK,
COMMAND_FAILED,
COMMAND_TIMEOUT
-} WdCommandResult;
+} WdCommandResult;
typedef struct WDIPCCmdResult
char type;
int length;
char *data;
-} WDIPCCmdResult;
+} WDIPCCmdResult;
extern void wd_ipc_conn_initialize(void);
extern size_t estimate_ipc_socket_addr_len(void);
extern char *get_watchdog_ipc_address(void);
-extern WDIPCCmdResult * issue_command_to_watchdog(char type, int timeout_sec, char *data, int data_len, bool blocking);
+extern WDIPCCmdResult *issue_command_to_watchdog(char type, int timeout_sec, char *data, int data_len, bool blocking);
-extern void FreeCmdResult(WDIPCCmdResult * res);
+extern void FreeCmdResult(WDIPCCmdResult *res);
#endif /* WD_IPC_CONN_H */
FAILOVER_RES_BUILDING_CONSENSUS,
FAILOVER_RES_CONSENSUS_MAY_FAIL,
FAILOVER_RES_TIMEOUT
-} WDFailoverCMDResults;
+} WDFailoverCMDResults;
typedef enum WDValueDataType
{
VALUE_DATA_TYPE_STRING,
VALUE_DATA_TYPE_BOOL,
VALUE_DATA_TYPE_LONG
-} WDValueDataType;
+} WDValueDataType;
/* IPC MESSAGES TYPES */
#define WD_REGISTER_FOR_NOTIFICATION '0'
BACKEND_STATUS backend_status[MAX_NUM_BACKENDS];
char nodeName[WD_MAX_HOST_NAMELEN]; /* name of the watchdog node
* that sent the data */
-} WDPGBackendStatus;
+} WDPGBackendStatus;
-extern WatchdogNode * get_watchdog_node_from_json(char *json_data, int data_len, char **authkey);
-extern char *get_watchdog_node_info_json(WatchdogNode * wdNode, char *authkey);
-extern POOL_CONFIG * get_pool_config_from_json(char *json_data, int data_len);
+extern WatchdogNode *get_watchdog_node_from_json(char *json_data, int data_len, char **authkey);
+extern char *get_watchdog_node_info_json(WatchdogNode *wdNode, char *authkey);
+extern POOL_CONFIG *get_pool_config_from_json(char *json_data, int data_len);
extern char *get_pool_config_json(void);
extern char *get_lifecheck_node_status_change_json(int nodeID, int nodeStatus, char *message, char *authKey);
extern bool parse_node_status_json(char *json_data, int data_len, int *nodeID, int *nodeStatus, char **message);
extern bool parse_beacon_message_json(char *json_data, int data_len, int *state,
- long *seconds_since_node_startup,
- long *seconds_since_current_state,
- int *quorumStatus,
- int *standbyNodesCount,
- bool *escalated);
-extern char *get_beacon_message_json(WatchdogNode * wdNode);
+ long *seconds_since_node_startup,
+ long *seconds_since_current_state,
+ int *quorumStatus,
+ int *standbyNodesCount,
+ bool *escalated);
+extern char *get_beacon_message_json(WatchdogNode *wdNode);
extern char *get_wd_node_function_json(char *func_name, int *node_id_set, int count, unsigned char flags, unsigned int sharedKey, char *authKey);
extern bool parse_wd_node_function_json(char *json_data, int data_len, char **func_name, int **node_id_set, int *count, unsigned char *flags);
extern char *get_wd_simple_message_json(char *message);
-extern WDPGBackendStatus * get_pg_backend_node_status_from_json(char *json_data, int data_len);
-extern char *get_backend_node_status_json(WatchdogNode * wdNode);
+extern WDPGBackendStatus *get_pg_backend_node_status_from_json(char *json_data, int data_len);
+extern char *get_backend_node_status_json(WatchdogNode *wdNode);
extern char *get_simple_request_json(char *key, char *value, unsigned int sharedKey, char *authKey);
extern char *get_data_request_json(char *request_type, unsigned int sharedKey, char *authKey);
extern bool
-parse_wd_exec_cluster_command_json(char *json_data, int data_len,
- char **clusterCommand, List **args_list);
+ parse_wd_exec_cluster_command_json(char *json_data, int data_len,
+ char **clusterCommand, List **args_list);
-extern char *
-get_wd_exec_cluster_command_json(char *clusterCommand,List *args_list,
- unsigned int sharedKey, char *authKey);
+extern char *get_wd_exec_cluster_command_json(char *clusterCommand, List *args_list,
+ unsigned int sharedKey, char *authKey);
#endif
NODE_EMPTY,
NODE_DEAD,
NODE_ALIVE
-} NodeStates;
+} NodeStates;
typedef struct LifeCheckNode
{
NodeStates nodeState;
- int ID;
+ int ID;
WD_STATES wdState;
char stateName[128];
char hostName[WD_MAX_HOST_NAMELEN];
int retry_lives;
struct timeval hb_send_time; /* send time */
struct timeval hb_last_recv_time; /* recv time */
-} LifeCheckNode;
+} LifeCheckNode;
typedef struct lifeCheckCluster
{
int nodeCount;
struct LifeCheckNode *lifeCheckNodes;
-} LifeCheckCluster;
+} LifeCheckCluster;
-extern LifeCheckCluster * gslifeCheckCluster; /* lives in shared memory */
+extern LifeCheckCluster *gslifeCheckCluster; /* lives in shared memory */
/* wd_lifecheck.c */
/* wd_heartbeat.c */
-extern pid_t wd_hb_receiver(int fork_wait_time, WdHbIf * hb_if);
-extern pid_t wd_hb_sender(int fork_wait_time, WdHbIf * hb_if);
+extern pid_t wd_hb_receiver(int fork_wait_time, WdHbIf *hb_if);
+extern pid_t wd_hb_sender(int fork_wait_time, WdHbIf *hb_if);
#endif
extern int watchdog_thread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
extern char *string_replace(const char *string, const char *pattern, const char *replacement);
extern void wd_calc_hash(const char *str, int len, char *buf);
-extern int aes_decrypt_with_password(unsigned char *ciphertext, int ciphertext_len,
- const char *password, unsigned char *plaintext);
-extern int aes_encrypt_with_password(unsigned char *plaintext, int plaintext_len,
- const char *password, unsigned char *ciphertext);
+extern int aes_decrypt_with_password(unsigned char *ciphertext, int ciphertext_len,
+ const char *password, unsigned char *plaintext);
+extern int aes_encrypt_with_password(unsigned char *plaintext, int plaintext_len,
+ const char *password, unsigned char *ciphertext);
/* wd_escalation.c */
extern pid_t fork_escalation_process(void);
static void setResultIntData(PCPResultInfo * res, unsigned int slotno, int value);
static void process_node_info_response(PCPConnInfo * pcpConn, char *buf, int len);
-static void process_health_check_stats_response(PCPConnInfo * pcpConn, char *buf, int len);
+static void process_health_check_stats_response(PCPConnInfo * pcpConn, char *buf, int len);
static void process_command_complete_response(PCPConnInfo * pcpConn, char *buf, int len);
static void process_watchdog_info_response(PCPConnInfo * pcpConn, char *buf, int len);
static void process_process_info_response(PCPConnInfo * pcpConn, char *buf, int len);
struct addrinfo hints;
/*
- * getaddrinfo() requires a string because it also accepts service names,
- * such as "http".
+ * getaddrinfo() requires a string because it also accepts service
+ * names, such as "http".
*/
if (asprintf(&portstr, "%d", port) == -1)
{
pcpConn->connState = PCP_CONNECTION_BAD;
return pcpConn;
}
- break; /* successfully connected */
+ break; /* successfully connected */
}
/* no address available */
password = password_from_file;
/*
- * If reading password from .pcppass file fails, try to read it from prompt.
+ * If reading password from .pcppass file fails, try to read it from
+ * prompt.
*/
if (password == NULL || *password == '\0')
password = simple_prompt("Password: ", 100, false);
pcp_internal_error(pcpConn, "invalid PCP connection");
return NULL;
}
- if (command_scope == 'l') /*local only*/
+ if (command_scope == 'l') /* local only */
pcp_write(pcpConn->pcpConn, "T", 1);
else
pcp_write(pcpConn->pcpConn, "t", 1);
static void
process_node_info_response(PCPConnInfo * pcpConn, char *buf, int len)
{
- char *index;
+ char *index;
BackendInfo *backend_info = NULL;
if (strcmp(buf, "ArraySize") == 0)
}
PCPResultInfo *
-pcp_reload_config(PCPConnInfo * pcpConn,char command_scope)
+pcp_reload_config(PCPConnInfo * pcpConn, char command_scope)
{
- int wsize;
+ int wsize;
+
/*
* pcp packet format for pcp_reload_config
* z[size][command_scope]
*/
if (PCPConnectionStatus(pcpConn) != PCP_CONNECTION_OK)
{
- pcp_internal_error(pcpConn, "invalid PCP connection");
- return NULL;
+ pcp_internal_error(pcpConn, "invalid PCP connection");
+ return NULL;
}
pcp_write(pcpConn->pcpConn, "Z", 1);
pcp_write(pcpConn->pcpConn, &wsize, sizeof(int));
pcp_write(pcpConn->pcpConn, &command_scope, sizeof(char));
if (PCPFlush(pcpConn) < 0)
- return NULL;
+ return NULL;
if (pcpConn->Pfdebug)
- fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"Z\", len=%d\n", ntohl(wsize));
+ fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"Z\", len=%d\n", ntohl(wsize));
return process_pcp_response(pcpConn, 'Z');
}
PCPResultInfo *
-pcp_log_rotate(PCPConnInfo * pcpConn,char command_scope)
+pcp_log_rotate(PCPConnInfo * pcpConn, char command_scope)
{
- int wsize;
+ int wsize;
+
/*
* pcp packet format for pcp_log_rotate
* v[size][command_scope]
*/
if (PCPConnectionStatus(pcpConn) != PCP_CONNECTION_OK)
{
- pcp_internal_error(pcpConn, "invalid PCP connection");
- return NULL;
+ pcp_internal_error(pcpConn, "invalid PCP connection");
+ return NULL;
}
pcp_write(pcpConn->pcpConn, "V", 1);
pcp_write(pcpConn->pcpConn, &wsize, sizeof(int));
pcp_write(pcpConn->pcpConn, &command_scope, sizeof(char));
if (PCPFlush(pcpConn) < 0)
- return NULL;
+ return NULL;
if (pcpConn->Pfdebug)
- fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"Z\", len=%d\n", ntohl(wsize));
+ fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"Z\", len=%d\n", ntohl(wsize));
return process_pcp_response(pcpConn, 'V');
}
PCPResultInfo *
pcp_invalidate_query_cache(PCPConnInfo * pcpConn)
{
- int wsize;
+ int wsize;
if (PCPConnectionStatus(pcpConn) != PCP_CONNECTION_OK)
{
- pcp_internal_error(pcpConn, "invalid PCP connection");
- return NULL;
+ pcp_internal_error(pcpConn, "invalid PCP connection");
+ return NULL;
}
pcp_write(pcpConn->pcpConn, "G", 1);
wsize = htonl(sizeof(int));
pcp_write(pcpConn->pcpConn, &wsize, sizeof(int));
if (PCPFlush(pcpConn) < 0)
- return NULL;
+ return NULL;
if (pcpConn->Pfdebug)
- fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"G\", len=%d\n", ntohl(wsize));
+ fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"G\", len=%d\n", ntohl(wsize));
return process_pcp_response(pcpConn, 'G');
}
* len: length of the data
*/
static void
-process_health_check_stats_response
-(PCPConnInfo * pcpConn, char *buf, int len)
+ process_health_check_stats_response
+ (PCPConnInfo * pcpConn, char *buf, int len)
{
POOL_HEALTH_CHECK_STATS *stats;
- int *offsets;
- int n;
- int i;
- char *p;
- int maxstr;
- char c[] = "CommandComplete";
+ int *offsets;
+ int n;
+ int i;
+ char *p;
+ int maxstr;
+ char c[] = "CommandComplete";
if (strcmp(buf, c) != 0)
{
/* Allocate health stats memory */
stats = palloc0(sizeof(POOL_HEALTH_CHECK_STATS));
- p = (char *)stats;
+ p = (char *) stats;
/* Calculate total packet length */
offsets = pool_health_check_stats_offsets(&n);
for (i = 0; i < n; i++)
{
- if (i == n -1)
+ if (i == n - 1)
maxstr = sizeof(POOL_HEALTH_CHECK_STATS) - offsets[i];
else
maxstr = offsets[i + 1] - offsets[i];
- StrNCpy(p + offsets[i], buf, maxstr -1);
+ StrNCpy(p + offsets[i], buf, maxstr - 1);
buf += strlen(buf) + 1;
}
process_process_info_response(PCPConnInfo * pcpConn, char *buf, int len)
{
char *index;
- int *offsets;
- int i, n;
+ int *offsets;
+ int i,
+ n;
int maxstr;
- char *p;
- POOL_REPORT_POOLS *pools = NULL;
+ char *p;
+ POOL_REPORT_POOLS *pools = NULL;
offsets = pool_report_pools_offsets(&n);
goto INVALID_RESPONSE;
pools = palloc0(sizeof(POOL_REPORT_POOLS));
- p = (char *)pools;
+ p = (char *) pools;
buf += strlen(buf) + 1;
for (i = 0; i < n; i++)
{
- if (i == n -1)
+ if (i == n - 1)
maxstr = sizeof(POOL_REPORT_POOLS) - offsets[i];
else
maxstr = offsets[i + 1] - offsets[i];
- StrNCpy(p + offsets[i], buf, maxstr -1);
+ StrNCpy(p + offsets[i], buf, maxstr - 1);
buf += strlen(buf) + 1;
}
int wsize;
char node_id[16];
char *sendchar;
- char *switchover_option; /* n: just change node status, s: switchover primary */
+ char *switchover_option; /* n: just change node status, s:
+ * switchover primary */
if (PCPConnectionStatus(pcpConn) != PCP_CONNECTION_OK)
{
pcp_write(pcpConn->pcpConn, sendchar, 1);
/* calculate send buffer size */
- wsize = sizeof(char); /* protocol. 'j' or 'J' */
+ wsize = sizeof(char); /* protocol. 'j' or 'J' */
wsize += strlen(node_id); /* node id + space */
- wsize += sizeof(char); /* promote option */
- wsize += sizeof(int); /* buffer length */
+ wsize += sizeof(char); /* promote option */
+ wsize += sizeof(int); /* buffer length */
wsize = htonl(wsize);
pcp_write(pcpConn->pcpConn, &wsize, sizeof(int));
#include "auth/md5.h"
#include "auth/pool_hba.h"
-volatile POOL_HEALTH_CHECK_STATISTICS *health_check_stats; /* health check stats area in shared memory */
+volatile POOL_HEALTH_CHECK_STATISTICS *health_check_stats; /* health check stats
+ * area in shared memory */
-static POOL_CONNECTION_POOL_SLOT * slot;
+static POOL_CONNECTION_POOL_SLOT *slot;
static volatile sig_atomic_t reload_config_request = 0;
static volatile sig_atomic_t restart_request = 0;
volatile POOL_HEALTH_CHECK_STATISTICS *stats;
sigjmp_buf local_sigjmp_buf;
MemoryContext HealthCheckMemoryContext;
char psbuffer[NI_MAXHOST];
- static struct timeval start_time;
- static struct timeval end_time;
- long diff_t;
+ static struct timeval start_time;
+ static struct timeval end_time;
+ long diff_t;
+
+ POOL_HEALTH_CHECK_STATISTICS mystat;
- POOL_HEALTH_CHECK_STATISTICS mystat;
stats = &health_check_stats[*node_id];
/* Set application name */
{
MemoryContextSwitchTo(HealthCheckMemoryContext);
MemoryContextResetAndDeleteChildren(HealthCheckMemoryContext);
+
/*
- * Since HealthCheckMemoryContext is used for "slot", we need to clear it
- * so that new slot is allocated later on.
+ * Since HealthCheckMemoryContext is used for "slot", we need to clear
+ * it so that new slot is allocated later on.
*/
slot = NULL;
- bool skipped = false;
+ bool skipped = false;
CHECK_REQUEST;
stats->success_count++;
stats->last_successful_health_check = time(NULL);
- /* The node has become reachable again. Reset
- * the quarantine state
+ /*
+ * The node has become reachable again. Reset the quarantine
+ * state
*/
send_failback_request(*node_id, false, REQ_DETAIL_UPDATE | REQ_DETAIL_WATCHDOG);
}
discard_persistent_connection(*node_id);
/*
- Update health check duration only if health check was not skipped
- since the duration could be very small (probably 0) if health
- check is skipped.
+ * Update health check duration only if health check was not
+ * skipped since the duration could be very small (probably 0) if
+ * health check is skipped.
*/
if (!skipped)
stats->min_health_check_duration = diff_t;
}
- memcpy(&mystat, (void *)stats, sizeof(mystat));
+ memcpy(&mystat, (void *) stats, sizeof(mystat));
sleep(pool_config->health_check_params[*node_id].health_check_period);
}
{
BackendInfo *bkinfo;
int retry_cnt;
- static time_t auto_failback_interval = 0; /* resume time of auto_failback */
+ static time_t auto_failback_interval = 0; /* resume time of
+ * auto_failback */
bool check_failback = false;
time_t now;
- char *dbname;
+ char *dbname;
bkinfo = pool_get_node_info(node);
/*
- * If the node is already in down status or unused, do nothing.
- * except when the node state is down because of quarantine operation
- * since we want to detect when the node comes back to life again to
- * remove it from the quarantine state
+ * If the node is already in down status or unused, do nothing. except
+ * when the node state is down because of quarantine operation since we
+ * want to detect when the node comes back to life again to remove it from
+ * the quarantine state
*/
if (bkinfo->backend_status == CON_UNUSED ||
(bkinfo->backend_status == CON_DOWN && bkinfo->quarantine == false))
if (pool_config->auto_failback && auto_failback_interval < now &&
STREAM && !strcmp(bkinfo->replication_state, "streaming") && !Req_info->switching)
{
- ereport(DEBUG1,
- (errmsg("health check DB node: %d (status:%d) for auto_failback", node, bkinfo->backend_status)));
- check_failback = true;
+ ereport(DEBUG1,
+ (errmsg("health check DB node: %d (status:%d) for auto_failback", node, bkinfo->backend_status)));
+ check_failback = true;
}
else
return false;
if (retry_cnt != pool_config->health_check_params[node].health_check_max_retries)
{
- int ret_cnt;
+ int ret_cnt;
retry_cnt++;
ret_cnt = pool_config->health_check_params[node].health_check_max_retries - retry_cnt;
if (check_failback && !Req_info->switching && slot)
{
- ereport(LOG,
+ ereport(LOG,
(errmsg("request auto failback, node id:%d", node)));
- /* get current time to use auto_failback_interval */
- now = time(NULL);
- auto_failback_interval = now + pool_config->auto_failback_interval;
+ /* get current time to use auto_failback_interval */
+ now = time(NULL);
+ auto_failback_interval = now + pool_config->auto_failback_interval;
- send_failback_request(node, true, REQ_DETAIL_CONFIRMED);
+ send_failback_request(node, true, REQ_DETAIL_CONFIRMED);
}
}
size_t
health_check_stats_shared_memory_size(void)
{
- size_t size;
+ size_t size;
- size = MAXALIGN(sizeof(POOL_HEALTH_CHECK_STATISTICS) * MAX_NUM_BACKENDS);
+ size = MAXALIGN(sizeof(POOL_HEALTH_CHECK_STATISTICS) * MAX_NUM_BACKENDS);
elog(DEBUG1, "health_check_stats_shared_memory_size: requested size: %lu", size);
return size;
}
void
health_check_stats_init(POOL_HEALTH_CHECK_STATISTICS *addr)
{
- int i;
+ int i;
health_check_stats = addr;
memset((void *) health_check_stats, 0, health_check_stats_shared_memory_size());
- for (i = 0 ;i < MAX_NUM_BACKENDS; i++)
+ for (i = 0; i < MAX_NUM_BACKENDS; i++)
{
health_check_stats[i].min_health_check_duration = INT_MAX;
}
}
else
strlcpy(pool_passwd, pool_config->pool_passwd,
- sizeof(pool_passwd));
+ sizeof(pool_passwd));
pool_init_pool_passwd(pool_passwd, POOL_PASSWD_R);
}
for (;;)
{
- int cnt = 5; /* sending signal retry interval */
+ int cnt = 5; /* sending signal retry interval */
if (kill(pid, stop_sig) == -1)
{
fprintf(stderr, ".");
sleep(1);
cnt--;
- /* If pgpool did not stop within 5 seconds, break the loop and try
- * to send the signal again */
+
+ /*
+ * If pgpool did not stop within 5 seconds, break the loop and try
+ * to send the signal again
+ */
if (cnt <= 0)
break;
}
#define DEVNULL "/dev/null"
typedef int64 pg_time_t;
+
/*
* We read() into a temp buffer twice as big as a chunk, so that any fragment
* left after processing can be moved down to the front and we'll still have
*/
-bool redirection_done = false;
+bool redirection_done = false;
/*
* Private state
/*
* Check if the log directory or filename pattern changed in
- * pgpool.conf. If so, force rotation to make sure we're
- * writing the logfiles in the right place.
+ * pgpool.conf. If so, force rotation to make sure we're writing
+ * the logfiles in the right place.
*/
if (strcmp(pool_config->log_directory, currentLogDir) != 0)
{
* next_rotation_time.
*
* Also note that we need to beware of overflow in calculation of the
- * timeout: with large settings of pool_config->log_rotation_age, next_rotation_time
- * could be more than INT_MAX msec in the future. In that case we'll
- * wait no more than INT_MAX msec, and try again.
+ * timeout: with large settings of pool_config->log_rotation_age,
+ * next_rotation_time could be more than INT_MAX msec in the future.
+ * In that case we'll wait no more than INT_MAX msec, and try again.
*/
timeout.tv_sec = 0;
/* Reset usec everytime before calling sellect */
/*
* Sleep until there's something to do
*/
-
+
FD_ZERO(&rfds);
FD_SET(syslogPipe[0], &rfds);
- rc = select(syslogPipe[0] + 1, &rfds, NULL, NULL, timeout.tv_sec?&timeout:NULL);
+ rc = select(syslogPipe[0] + 1, &rfds, NULL, NULL, timeout.tv_sec ? &timeout : NULL);
if (rc == 1)
{
int bytesRead;
/*
* The initial logfile is created right in the postmaster, to verify that
- * the pool_config->log_directory is writable. We save the reference time so that the
- * syslogger child process can recompute this file name.
+ * the pool_config->log_directory is writable. We save the reference time
+ * so that the syslogger child process can recompute this file name.
*
* It might look a bit strange to re-do this during a syslogger restart,
* but we must do so since the postmaster closed syslogFile after the
mode_t oumask;
/*
- * Note we do not let pool_config->log_file_mode disable IWUSR, since we certainly want
- * to be able to write the files ourselves.
+ * Note we do not let pool_config->log_file_mode disable IWUSR, since we
+ * certainly want to be able to write the files ourselves.
*/
oumask = umask((mode_t) ((~(pool_config->log_file_mode | S_IWUSR)) & (S_IRWXU | S_IRWXG | S_IRWXO)));
fh = fopen(filename, mode);
/*
* Decide whether to overwrite or append. We can overwrite if (a)
- * pool_config->log_truncate_on_rotation is set, (b) the rotation was triggered by
- * elapsed time and not something else, and (c) the computed file name is
- * different from what we were previously logging into.
+ * pool_config->log_truncate_on_rotation is set, (b) the rotation was
+ * triggered by elapsed time and not something else, and (c) the computed
+ * file name is different from what we were previously logging into.
*
* Note: last_file_name should never be NULL here, but if it is, append.
*/
/*
* ENFILE/EMFILE are not too surprising on a busy system; just
* keep using the old file till we manage to get a new one.
- * Otherwise, assume something's wrong with pool_config->log_directory and stop
- * trying to create files.
+ * Otherwise, assume something's wrong with
+ * pool_config->log_directory and stop trying to create files.
*/
if (errno != ENFILE && errno != EMFILE)
{
/*
* ENFILE/EMFILE are not too surprising on a busy system; just
* keep using the old file till we manage to get a new one.
- * Otherwise, assume something's wrong with pool_config->log_directory and stop
- * trying to create files.
+ * Otherwise, assume something's wrong with
+ * pool_config->log_directory and stop trying to create files.
*/
if (errno != ENFILE && errno != EMFILE)
{
/* treat pool_config->log_filename as a strftime pattern */
strftime(filename + len, MAXPGPATH - len, pool_config->log_filename,
- localtime(×tamp));
+ localtime(×tamp));
if (suffix != NULL)
{
set_next_rotation_time(void)
{
pg_time_t now;
- struct tm *tm;
+ struct tm *tm;
int rotinterval;
/* nothing to do if time-based rotation is disabled */
* required */
SIG_WATCHDOG_QUORUM_CHANGED, /* notify main about cluster quorum change
* of watchdog cluster */
- SIG_INFORM_QUARANTINE_NODES, /* notify main about send degenerate requests
- * for all quarantine nodes */
+ SIG_INFORM_QUARANTINE_NODES, /* notify main about send degenerate
+ * requests for all quarantine nodes */
MAX_INTERRUPTS /* Must be last! */
-} User1SignalReason;
+} User1SignalReason;
typedef struct User1SignalSlot
{
sig_atomic_t signalFlags[MAX_INTERRUPTS];
-} User1SignalSlot;
+} User1SignalSlot;
#ifdef NOT_USED
/*
typedef struct
{
bool all_backend_down; /* true if all backends are down */
- bool search_primary; /* true if we need to seach primary node */
- bool need_to_restart_children; /* true if we need to restart child process */
- bool need_to_restart_pcp; /* true if we need to restart pc process */
+ bool search_primary; /* true if we need to seach primary node */
+ bool need_to_restart_children; /* true if we need to restart
+ * child process */
+ bool need_to_restart_pcp; /* true if we need to restart pc
+ * process */
bool partial_restart; /* true if partial restart is needed */
- bool sync_required; /* true if watchdog synchronization is necessary */
+ bool sync_required; /* true if watchdog synchronization is
+ * necessary */
POOL_REQUEST_KIND reqkind;
int node_id_set[MAX_NUM_BACKENDS];
/*
* An array to hold down nodes information. Each array member corresponds
* to node id. If nodes[i] is 1, the node i is down.
- */
+ */
int nodes[MAX_NUM_BACKENDS];
} FAILOVER_CONTEXT;
static RETSIGTYPE wakeup_handler(int sig);
static void initialize_shared_mem_objects(bool clear_memcache_oidmaps);
-static int trigger_failover_command(int node, const char *command_line,
- int old_main_node, int new_main_node, int old_primary);
+static int trigger_failover_command(int node, const char *command_line,
+ int old_main_node, int new_main_node, int old_primary);
static int find_primary_node(void);
static int find_primary_node_repeatedly(void);
static void terminate_all_childrens(int sig);
static char *process_name_from_pid(pid_t pid);
static void sync_backend_from_watchdog(void);
static void update_backend_quarantine_status(void);
-static int get_server_version(POOL_CONNECTION_POOL_SLOT * *slots, int node_id);
+static int get_server_version(POOL_CONNECTION_POOL_SLOT **slots, int node_id);
static void get_info_from_conninfo(char *conninfo, char *host, int hostlen, char *port, int portlen);
/*
* Subroutines of failover()
*/
-static int handle_failback_request(FAILOVER_CONTEXT *failover_context, int node_id);
-static int handle_failover_request(FAILOVER_CONTEXT *failover_context, int node_id);
+static int handle_failback_request(FAILOVER_CONTEXT *failover_context, int node_id);
+static int handle_failover_request(FAILOVER_CONTEXT *failover_context, int node_id);
static void kill_failover_children(FAILOVER_CONTEXT *failover_context, int node_id);
static void exec_failover_command(FAILOVER_CONTEXT *failover_context, int new_main_node_id, int promote_node_id);
-static int determine_new_primary_node(FAILOVER_CONTEXT *failover_context, int node_id);
-static int exec_follow_primary_command(FAILOVER_CONTEXT *failover_context, int node_id, int new_primary_node_id);
+static int determine_new_primary_node(FAILOVER_CONTEXT *failover_context, int node_id);
+static int exec_follow_primary_command(FAILOVER_CONTEXT *failover_context, int node_id, int new_primary_node_id);
static void save_node_info(FAILOVER_CONTEXT *failover_context, int new_primary_node_id, int new_main_node_id);
static void exec_child_restart(FAILOVER_CONTEXT *failover_context, int node_id);
static void exec_notice_pcp_child(FAILOVER_CONTEXT *failover_context);
static void check_requests(void);
static void print_signal_member(sigset_t *sig);
static void service_child_processes(void);
-static int select_victim_processes(int *process_info_idxs, int count);
+static int select_victim_processes(int *process_info_idxs, int count);
static struct sockaddr_un *un_addrs; /* unix domain socket path */
static struct sockaddr_un *pcp_un_addrs; /* unix domain socket path for PCP */
ProcessInfo *process_info = NULL; /* Per child info table on shmem */
-volatile User1SignalSlot *user1SignalSlot = NULL; /* User 1 signal slot on
- * shmem */
-int current_child_process_count;
+volatile User1SignalSlot *user1SignalSlot = NULL; /* User 1 signal slot on
+ * shmem */
+int current_child_process_count;
/*
* To track health check process ids
*/
ConnectionInfo *con_info;
-static int *fds = NULL; /* listening file descriptors (UNIX socket,
+static int *fds = NULL; /* listening file descriptors (UNIX socket,
* inet domain sockets) */
-static int *pcp_fds = NULL; /* listening file descriptors for pcp (UNIX socket,
- * inet domain sockets) */
+static int *pcp_fds = NULL; /* listening file descriptors for pcp (UNIX
+ * socket, inet domain sockets) */
extern char *pcp_conf_file; /* path for pcp.conf */
extern char *conf_file;
extern char *hba_file;
-static volatile sig_atomic_t exiting = 0; /* non 0 if I'm exiting */
-static volatile sig_atomic_t switching = 0; /* non 0 if I'm failing over or degenerating */
+static volatile sig_atomic_t exiting = 0; /* non 0 if I'm exiting */
+static volatile sig_atomic_t switching = 0; /* non 0 if I'm failing over or
+ * degenerating */
POOL_REQUEST_INFO *Req_info; /* request info area in shared memory */
volatile sig_atomic_t *InRecovery; /* non 0 if recovery is started */
* Dummy variable to suppress compiler warnings by discarding return values
* from write(2) in signal handlers
*/
-static int dummy_status;
+static int dummy_status;
/*
* Snapshot Isolation manage area
int num_inet_fds = 0;
int num_unix_fds = 0;
int num_pcp_fds = 0;
- int *unix_fds;
- int *inet_fds;
- int *pcp_unix_fds;
- int *pcp_inet_fds;
+ int *unix_fds;
+ int *inet_fds;
+ int *pcp_unix_fds;
+ int *pcp_inet_fds;
int i;
char unix_domain_socket_path[UNIXSOCK_PATH_BUFLEN + 1024];
if (strlen(unix_domain_socket_path) >= UNIXSOCK_PATH_BUFLEN)
{
ereport(WARNING,
- (errmsg("Unix-domain socket path \"%s\" is too long (maximum %d bytes)",
- unix_domain_socket_path,
- (int) (UNIXSOCK_PATH_BUFLEN - 1))));
+ (errmsg("Unix-domain socket path \"%s\" is too long (maximum %d bytes)",
+ unix_domain_socket_path,
+ (int) (UNIXSOCK_PATH_BUFLEN - 1))));
continue;
}
un_addrs = realloc(un_addrs, sizeof(struct sockaddr_un) * (num_unix_fds + 1));
if (un_addrs == NULL)
ereport(FATAL,
- (errmsg("failed to allocate memory in startup process")));
+ (errmsg("failed to allocate memory in startup process")));
snprintf(un_addrs[i].sun_path, sizeof(un_addrs[i].sun_path), "%s", unix_domain_socket_path);
num_unix_fds++;
if (num_unix_fds == 0)
{
ereport(FATAL,
- (errmsg("could not create any Unix-domain sockets")));
+ (errmsg("could not create any Unix-domain sockets")));
}
/* set unix domain socket path for pgpool PCP communication */
{
memset(unix_domain_socket_path, 0, sizeof(unix_domain_socket_path));
snprintf(unix_domain_socket_path, sizeof(unix_domain_socket_path), "%s/.s.PGSQL.%d",
- pool_config->pcp_socket_dir[i],
- pool_config->pcp_port);
+ pool_config->pcp_socket_dir[i],
+ pool_config->pcp_port);
if (strlen(unix_domain_socket_path) >= UNIXSOCK_PATH_BUFLEN)
{
ereport(WARNING,
- (errmsg("PCP Unix-domain socket path \"%s\" is too long (maximum %d bytes)",
- unix_domain_socket_path,
- (int) (UNIXSOCK_PATH_BUFLEN - 1))));
+ (errmsg("PCP Unix-domain socket path \"%s\" is too long (maximum %d bytes)",
+ unix_domain_socket_path,
+ (int) (UNIXSOCK_PATH_BUFLEN - 1))));
continue;
}
pcp_un_addrs = realloc(pcp_un_addrs, sizeof(struct sockaddr_un) * (num_pcp_fds + 1));
if (pcp_un_addrs == NULL)
ereport(FATAL,
- (errmsg("failed to allocate memory in startup process")));
+ (errmsg("failed to allocate memory in startup process")));
snprintf(pcp_un_addrs[i].sun_path, sizeof(pcp_un_addrs[i].sun_path), "%s", unix_domain_socket_path);
num_pcp_fds++;
if (num_pcp_fds == 0)
{
ereport(FATAL,
- (errmsg("could not create any PCP Unix-domain sockets")));
+ (errmsg("could not create any PCP Unix-domain sockets")));
}
/* set up signal handlers */
/* start the log collector if enabled */
pgpool_logger_pid = SysLogger_Start();
- /*
- * If using syslogger, close the read side of the pipe. We don't bother
- * tracking this in fd.c, either.
- */
+
+ /*
+ * If using syslogger, close the read side of the pipe. We don't bother
+ * tracking this in fd.c, either.
+ */
if (syslogPipe[0] >= 0)
close(syslogPipe[0]);
syslogPipe[0] = -1;
wakeup_request = 0;
/*
- * Watchdog process fires SIGUSR2 once in stable state
- * In addition, when watchdog fails to start with FATAL, the process
- * exits and SIGCHLD is fired, so we can also expect SIGCHLD from
- * watchdog process. Finally, we also need to look for the SIGUSR1
- * signal for the failover requests from other watchdog nodes. In
- * case a request arrives at the same time when the watchdog has just
- * been initialized.
+ * Watchdog process fires SIGUSR2 once in stable state In addition,
+ * when watchdog fails to start with FATAL, the process exits and
+ * SIGCHLD is fired, so we can also expect SIGCHLD from watchdog
+ * process. Finally, we also need to look for the SIGUSR1 signal for
+ * the failover requests from other watchdog nodes. In case a request
+ * arrives at the same time when the watchdog has just been
+ * initialized.
*
- * So we need to wait until watchdog is in stable state so only
- * wait for SIGUSR1, SIGCHLD, and signals those are necessary to make
- * sure we respond to user requests of shutdown if it arrives while we
- * are in waiting state.
+ * So we need to wait until watchdog is in stable state so only wait
+ * for SIGUSR1, SIGCHLD, and signals those are necessary to make sure
+ * we respond to user requests of shutdown if it arrives while we are
+ * in waiting state.
*
* Note that SIGUSR1 does not need to be in the wait signal list,
* although it's signal handler is already installed, but even if the
ereport(LOG,
(errmsg("watchdog process is initialized"),
- errdetail("watchdog messaging data version: %s",WD_MESSAGE_DATA_VERSION)));
+ errdetail("watchdog messaging data version: %s", WD_MESSAGE_DATA_VERSION)));
/*
* initialize the lifecheck process
if (sigusr1_request)
{
- do {
+ do
+ {
sigusr1_request = 0;
sigusr1_interrupt_processor();
} while (sigusr1_request == 1);
fds = malloc(sizeof(int) * (num_unix_fds + 1));
if (fds == NULL)
ereport(FATAL,
- (errmsg("failed to allocate memory in startup process")));
+ (errmsg("failed to allocate memory in startup process")));
unix_fds = create_unix_domain_sockets_by_list(un_addrs,
pool_config->unix_socket_group,
fds = realloc(fds, sizeof(int) * (num_inet_fds + num_unix_fds + 1));
if (fds == NULL)
ereport(FATAL,
- (errmsg("failed to expand memory for fds")));
+ (errmsg("failed to expand memory for fds")));
memcpy(&fds[num_unix_fds], inet_fds, sizeof(int) * num_inet_fds);
fds[num_unix_fds + num_inet_fds] = -1;
/* For query cache concurrency control */
if (pool_config->memory_cache_enabled)
{
- char path[1024];
- int lfd;
+ char path[1024];
+ int lfd;
snprintf(path, sizeof(path), "%s/QUERY_CACHE_LOCK_FILE", pool_config->logdir);
lfd = open(path, O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR);
else
current_child_process_count = pool_config->num_init_children;
ereport(DEBUG1,
- (errmsg("Spawning %d child processes",current_child_process_count)));
+ (errmsg("Spawning %d child processes", current_child_process_count)));
for (i = 0; i < current_child_process_count; i++)
{
process_info[i].start_time = time(NULL);
pcp_fds = realloc(pcp_fds, sizeof(int) * (num_inet_fds + num_pcp_fds + 1));
if (pcp_fds == NULL)
ereport(FATAL,
- (errmsg("failed to expand memory for pcp_fds")));
+ (errmsg("failed to expand memory for pcp_fds")));
memcpy(&pcp_fds[num_pcp_fds], pcp_inet_fds, sizeof(int) * num_inet_fds);
pcp_fds[num_inet_fds + num_pcp_fds] = -1;
#ifdef NOT_USED
CHECK_REQUEST;
#endif
+
/*
* check for child signals to ensure child startup before reporting
* successful start.
if (*group != '\0')
{
- char *endptr;
- gid_t gid;
+ char *endptr;
+ gid_t gid;
unsigned long val;
- /* check group*/
+ /* check group */
val = strtoul(group, &endptr, 10);
if (*endptr == '\0')
{
else
{
struct group *gr;
+
gr = getgrnam(group);
- if(!gr)
+ if (!gr)
{
ereport(FATAL,
(errmsg("unix_socket_group \"%s\" does not exist", group)));
int i;
int killed_count = 0;
int terminated_count = 0;
+
/*
* This is supposed to be called from main process
*/
}
}
- for (i = 0 ; i < MAX_NUM_BACKENDS; i++)
+ for (i = 0; i < MAX_NUM_BACKENDS; i++)
{
if (health_check_pids[i] != 0)
{
*/
if (follow_pid > 0)
{
- ereport(LOG,
+ ereport(LOG,
(errmsg("terminating all child processes of follow child")));
kill(follow_pid, sig);
switch (sig)
{
/*
* Do not use VALID_BACKEND macro in raw mode. VALID_BACKEND return
- * true only if the argument is main node id. In other words,
- * standby nodes are false. So need to check backend status with
+ * true only if the argument is main node id. In other words, standby
+ * nodes are false. So need to check backend status with
* VALID_BACKEND_RAW.
*/
if (RAW_MODE)
if (write(pipe_fds[1], "\0", 1) < 0)
ereport(WARNING,
(errmsg("SIGUSR1 handler: write to pipe failed"),
- errdetail("%m")));
+ errdetail("%m")));
#endif
POOL_SETMASK(&UnBlockSig);
(errmsg("we have joined the watchdog cluster as STANDBY node"),
errdetail("syncing the backend states from the LEADER watchdog node")));
sync_backend_from_watchdog();
+
/*
* we also want to release the follow_primary lock if it was held
- * by the remote node.
- * because the change of watchdog coordinator would lead to forever stuck
- * in the the locked state
+ * by the remote node. because the change of watchdog coordinator
+ * would lead to forever stuck in the the locked state
*/
pool_release_follow_primary_lock(true);
}
(errmsg("failover handler"),
errdetail("starting to select new main node")));
- /* If this is promoting specified node, new_main_node
- * should be replaced by the requested node. The requested
- * node should be REAL_PRIMARY_NODE_ID.
+ /*
+ * If this is promoting specified node, new_main_node should be
+ * replaced by the requested node. The requested node should be
+ * REAL_PRIMARY_NODE_ID.
*/
if (failover_context.request_details & REQ_DETAIL_PROMOTE)
{
else /* NODE_DOWN_REQUEST &&
* NODE_QUARANTINE_REQUEST */
{
-
+
if (handle_failover_request(&failover_context, node_id) < 0)
continue;
}
exec_failover_command(&failover_context, new_main_node, promote_node);
/*
- * Determine new primary node id. Possibly call find_primary_node_repeatedly().
+ * Determine new primary node id. Possibly call
+ * find_primary_node_repeatedly().
*/
new_primary = determine_new_primary_node(&failover_context, node_id);
-
+
/*
- * If follow_primary_command is provided and in streaming
- * replication mode, we start degenerating all backends as they are
- * not replicated anymore.
+ * If follow_primary_command is provided and in streaming replication
+ * mode, we start degenerating all backends as they are not replicated
+ * anymore.
*/
i = exec_follow_primary_command(&failover_context, node_id, new_primary);
new_main_node = i;
/*
- * Now new primary node and new main node are established.
- * Save them into shared memory. Also update status changed time.
+ * Now new primary node and new main node are established. Save them
+ * into shared memory. Also update status changed time.
*/
save_node_info(&failover_context, new_primary, new_main_node);
}
/*
- * We are almost done.
- * Unlock flags.
+ * We are almost done. Unlock flags.
*/
pool_semaphore_lock(REQUEST_INFO_SEM);
switching = 0;
if (pipe_fds[1] && write(pipe_fds[1], "\0", 1) < 0)
ereport(WARNING,
(errmsg("reap_handler: write to pipe failed"),
- errdetail("%m")));
+ errdetail("%m")));
#endif
POOL_SETMASK(&UnBlockSig);
int cnt = 0;
int i;
- for (i=0;i < pool_config->num_init_children;i++)
+ for (i = 0; i < pool_config->num_init_children; i++)
{
if (process_info[i].pid != 0)
cnt++;
int old_main_node, int new_main_node, int old_primary)
{
int r = 0;
- StringInfoData exec_cmd_data;
- StringInfo exec_cmd = &exec_cmd_data;
+ StringInfoData exec_cmd_data;
+ StringInfo exec_cmd = &exec_cmd_data;
BackendInfo *info;
BackendInfo *newmain;
BackendInfo *oldprimary;
static POOL_NODE_STATUS pool_node_status[MAX_NUM_BACKENDS];
POOL_NODE_STATUS *
-verify_backend_node_status(POOL_CONNECTION_POOL_SLOT * *slots)
+verify_backend_node_status(POOL_CONNECTION_POOL_SLOT **slots)
{
POOL_SELECT_RESULT *res;
int num_primaries = 0;
/* verify host and port */
if (((*backend_info->backend_hostname == '/' && *host == '\0') ||
- /*
- * It is possible that backend_hostname is Unix
- * domain socket but wal_receiver connects via
- * TCP/IP localhost.
- */
+
+ /*
+ * It is possible that backend_hostname is Unix domain
+ * socket but wal_receiver connects via TCP/IP
+ * localhost.
+ */
(*backend_info->backend_hostname == '/' && !strcmp("localhost", host)) ||
- !strcmp(backend_info->backend_hostname, host)) &&
+ !strcmp(backend_info->backend_hostname, host)) &&
backend_info->backend_port == atoi(port))
{
/* the standby connects to the primary */
{
ereport(LOG,
(errmsg("failed to find primary node"),
- errdetail("find_primary_node_repeatedly: expired after %d seconds",
- pool_config->search_primary_node_timeout)));
+ errdetail("find_primary_node_repeatedly: expired after %d seconds",
+ pool_config->search_primary_node_timeout)));
break;
}
}
#endif
SetProcessGlobalVariables(PT_FOLLOWCHILD);
+
/*
- * when the watchdog is enabled, we would come here
- * only on the coordinator node.
- * so before acquiring the local lock, Lock all the
- * standby nodes so that they should stop false primary
- * detection until we are finished with the follow primary
- * command.
+ * when the watchdog is enabled, we would come here only on the
+ * coordinator node. so before acquiring the local lock, Lock all the
+ * standby nodes so that they should stop false primary detection
+ * until we are finished with the follow primary command.
*/
wd_lock_standby(WD_FOLLOW_PRIMARY_LOCK);
pool_acquire_follow_primary_lock(true, false);
}
Req_info->follow_primary_ongoing = false;
pool_release_follow_primary_lock(false);
- /* inform standby watchdog nodes to release the lock as well*/
+ /* inform standby watchdog nodes to release the lock as well */
wd_unlock_standby(WD_FOLLOW_PRIMARY_LOCK);
exit(0);
}
static void
initialize_shared_mem_objects(bool clear_memcache_oidmaps)
{
- BackendDesc* backend_desc;
- Size size;
- int i;
+ BackendDesc *backend_desc;
+ Size size;
+ int i;
/*
* Calculate the size of required shared memory and try to allocate
* everything in single memory segment
*/
- size = 256;/* let us have some extra space */
+ size = 256; /* let us have some extra space */
size += MAXALIGN(sizeof(BackendDesc));
elog(DEBUG1, "BackendDesc: %zu bytes requested for shared memory", MAXALIGN(sizeof(BackendDesc)));
size += MAXALIGN(pool_coninfo_size());
size += MAXALIGN(pool_config->num_init_children * (sizeof(ProcessInfo)));
elog(DEBUG1, "ProcessInfo: num_init_children (%d) * sizeof(ProcessInfo) (%zu) = %zu bytes requested for shared memory",
- pool_config->num_init_children, sizeof(ProcessInfo), pool_config->num_init_children* sizeof(ProcessInfo));
+ pool_config->num_init_children, sizeof(ProcessInfo), pool_config->num_init_children * sizeof(ProcessInfo));
size += MAXALIGN(sizeof(User1SignalSlot));
elog(DEBUG1, "UserSignalSlot: %zu bytes requested for shared memory", MAXALIGN(sizeof(User1SignalSlot)));
size += MAXALIGN(sizeof(POOL_REQUEST_INFO));
elog(DEBUG1, "POOL_REQUEST_INFO: %zu bytes requested for shared memory", MAXALIGN(sizeof(POOL_REQUEST_INFO)));
- size += MAXALIGN(sizeof(int)); /* for InRecovery */
+ size += MAXALIGN(sizeof(int)); /* for InRecovery */
size += MAXALIGN(stat_shared_memory_size());
elog(DEBUG1, "stat_shared_memory_size: %zu bytes requested for shared memory", MAXALIGN(stat_shared_memory_size()));
size += MAXALIGN(health_check_stats_shared_memory_size());
/* Move the backend descriptors to shared memory */
backend_desc = pool_shared_memory_segment_get_chunk(sizeof(BackendDesc));
- memcpy(backend_desc, pool_config->backend_desc,sizeof(BackendDesc));
+ memcpy(backend_desc, pool_config->backend_desc, sizeof(BackendDesc));
pfree(pool_config->backend_desc);
pool_config->backend_desc = backend_desc;
- /* get the shared memory from main segment*/
- con_info = (ConnectionInfo *)pool_shared_memory_segment_get_chunk(pool_coninfo_size());
+ /* get the shared memory from main segment */
+ con_info = (ConnectionInfo *) pool_shared_memory_segment_get_chunk(pool_coninfo_size());
- process_info = (ProcessInfo *)pool_shared_memory_segment_get_chunk(pool_config->num_init_children * (sizeof(ProcessInfo)));
+ process_info = (ProcessInfo *) pool_shared_memory_segment_get_chunk(pool_config->num_init_children * (sizeof(ProcessInfo)));
for (i = 0; i < pool_config->num_init_children; i++)
{
process_info[i].connection_info = pool_coninfo(i, 0, 0);
process_info[i].pid = 0;
}
- user1SignalSlot = (User1SignalSlot *)pool_shared_memory_segment_get_chunk(sizeof(User1SignalSlot));
+ user1SignalSlot = (User1SignalSlot *) pool_shared_memory_segment_get_chunk(sizeof(User1SignalSlot));
- Req_info = (POOL_REQUEST_INFO *)pool_shared_memory_segment_get_chunk(sizeof(POOL_REQUEST_INFO));
+ Req_info = (POOL_REQUEST_INFO *) pool_shared_memory_segment_get_chunk(sizeof(POOL_REQUEST_INFO));
- InRecovery = (int *)pool_shared_memory_segment_get_chunk(sizeof(int));
+ InRecovery = (int *) pool_shared_memory_segment_get_chunk(sizeof(int));
/* Initialize statistics area */
stat_set_stat_area(pool_shared_memory_segment_get_chunk(stat_shared_memory_size()));
health_check_stats_init(pool_shared_memory_segment_get_chunk(health_check_stats_shared_memory_size()));
/* Initialize Snapshot Isolation manage area */
- si_manage_info = (SI_ManageInfo*)pool_shared_memory_segment_get_chunk(sizeof(SI_ManageInfo));
+ si_manage_info = (SI_ManageInfo *) pool_shared_memory_segment_get_chunk(sizeof(SI_ManageInfo));
si_manage_info->snapshot_waiting_children =
- (pid_t*)pool_shared_memory_segment_get_chunk(pool_config->num_init_children * sizeof(pid_t));
+ (pid_t *) pool_shared_memory_segment_get_chunk(pool_config->num_init_children * sizeof(pid_t));
si_manage_info->commit_waiting_children =
- (pid_t*)pool_shared_memory_segment_get_chunk(pool_config->num_init_children * sizeof(pid_t));
+ (pid_t *) pool_shared_memory_segment_get_chunk(pool_config->num_init_children * sizeof(pid_t));
/*
* Initialize backend status area. From now on, VALID_BACKEND macro can be
}
}
+
/*
* Read the status file
*/
*/
if (follow_pid > 0)
{
- ereport(LOG,
+ ereport(LOG,
(errmsg("terminating all child processes of follow child")));
kill(follow_pid, SIGTERM);
kill(-follow_pid, SIGTERM);
/*
* Update primary node id info on the shared memory area if it's different
- * from the one on leader watchdog node. This should be done only in streaming
- * or logical replication mode.
+ * from the one on leader watchdog node. This should be done only in
+ * streaming or logical replication mode.
*/
if (SL_MODE && Req_info->primary_node_id != backendStatus->primary_node_id)
{
ereport(LOG,
(errmsg("primary node:%d on leader watchdog node \"%s\" is different from local primary node:%d",
backendStatus->primary_node_id, backendStatus->nodeName, Req_info->primary_node_id)));
+
/*
* leader node returns primary_node_id = -1 when the primary node is
* in quarantine state on the leader. So we will not update our
backendStatus->primary_node_id == -1 && BACKEND_INFO(Req_info->primary_node_id).backend_status != CON_DOWN)
{
ereport(LOG,
- (errmsg("primary node:%d on leader watchdog node \"%s\" seems to be quarantined",
- Req_info->primary_node_id, backendStatus->nodeName),
- errdetail("keeping the current primary")));
+ (errmsg("primary node:%d on leader watchdog node \"%s\" seems to be quarantined",
+ Req_info->primary_node_id, backendStatus->nodeName),
+ errdetail("keeping the current primary")));
}
else
{
* version number is in the static memory area.
*/
static int
-get_server_version(POOL_CONNECTION_POOL_SLOT * *slots, int node_id)
+get_server_version(POOL_CONNECTION_POOL_SLOT **slots, int node_id)
{
static int server_versions[MAX_NUM_BACKENDS];
pool_acquire_follow_primary_lock(bool block, bool remote_request)
{
pool_sigset_t oldmask;
- volatile int follow_primary_count;
+ volatile int follow_primary_count;
for (;;)
{
{
if (Req_info->follow_primary_lock_held_remotely)
{
- /* The lock was already held by remote node and we only
+ /*
+ * The lock was already held by remote node and we only
* support one remote lock
*/
ereport(LOG,
else if (Req_info->follow_primary_count)
{
/*
- * we have received the release lock request from remote
- * but the lock is not held by remote node.
- * Just ignore the request
+ * we have received the release lock request from remote but the
+ * lock is not held by remote node. Just ignore the request
*/
ereport(DEBUG1,
(errmsg("pool_release_follow_primary_lock is not relasing the lock since it was not held by remote node")));
}
+
/*
- * Silently ignore, if we received the release request from remote while no lock was held.
- * Also clear the pending lock request, As we only support single remote lock
+ * Silently ignore, if we received the release request from remote
+ * while no lock was held. Also clear the pending lock request, As we
+ * only support single remote lock
*/
Req_info->follow_primary_lock_pending = false;
}
- else /*local request */
+ else /* local request */
{
/*
- * if we have a pending lock request from watchdog
- * do not remove the actual lock, Just clear the pending flag
+ * if we have a pending lock request from watchdog do not remove the
+ * actual lock, Just clear the pending flag
*/
if (Req_info->follow_primary_lock_pending)
{
if (Req_info->follow_primary_lock_held_remotely)
{
/*
- * Ideally this should not happen.
- * yet if for some reason our local node is trying to release a lock
- * that is held by remote node. Just produce a LOG message and release
- * the lock
+ * Ideally this should not happen. yet if for some reason our
+ * local node is trying to release a lock that is held by
+ * remote node. Just produce a LOG message and release the
+ * lock
*/
ereport(LOG,
(errmsg("pool_release_follow_primary_lock is relasing the remote lock by local request")));
{
if (node_id < 0 || node_id >= MAX_NUM_BACKENDS ||
(failover_context->reqkind == NODE_UP_REQUEST && !(RAW_MODE &&
- BACKEND_INFO(node_id).backend_status == CON_DOWN) && VALID_BACKEND(node_id)) ||
+ BACKEND_INFO(node_id).backend_status == CON_DOWN) && VALID_BACKEND(node_id)) ||
(failover_context->reqkind == NODE_DOWN_REQUEST && !VALID_BACKEND(node_id)))
{
if (node_id < 0 || node_id >= MAX_NUM_BACKENDS)
BACKEND_INFO(node_id).quarantine = false;
/*
- * do not search for primary node when handling the quarantine
- * nodes
+ * do not search for primary node when handling the quarantine nodes
*/
failover_context->search_primary = false;
/*
- * recalculate the main node id after setting the backend
- * status of quarantined node, this will bring us to the old
- * main_node_id that was before the quarantine state
+ * recalculate the main node id after setting the backend status of
+ * quarantined node, this will bring us to the old main_node_id that
+ * was before the quarantine state
*/
Req_info->main_node_id = get_next_main_node();
if (Req_info->primary_node_id == -1 &&
BACKEND_INFO(node_id).role == ROLE_PRIMARY)
{
/*
- * if the failback request is for the quarantined node and
- * that node had a primary role before it was quarantined,
- * restore the primary node status for that node. this is
- * important for the failover script to get the proper
- * value of old primary
+ * if the failback request is for the quarantined node and that
+ * node had a primary role before it was quarantined, restore the
+ * primary node status for that node. this is important for the
+ * failover script to get the proper value of old primary
*/
ereport(LOG,
(errmsg("failover: failing back the quarantine node that was primary before it was quarantined"),
Req_info->primary_node_id = node_id;
/*
- * since we changed the primary node so restart of all
- * children is required
+ * since we changed the primary node so restart of all children is
+ * required
*/
failover_context->need_to_restart_children = true;
failover_context->partial_restart = false;
else
{
/*
- * The request is a proper failback request and not because of
- * the update status of quarantined node
+ * The request is a proper failback request and not because of the
+ * update status of quarantined node
*/
(void) write_status_file();
static int
handle_failover_request(FAILOVER_CONTEXT *failover_context, int node_id)
{
- int cnt = 0; /* number of down node ids */
- int i;
+ int cnt = 0; /* number of down node ids */
+ int i;
for (i = 0; i < failover_context->node_count; i++)
{
if (failover_context->node_id_set[i] != -1 && (BACKEND_INFO(failover_context->node_id_set[i]).quarantine == true ||
- ((RAW_MODE && VALID_BACKEND_RAW(failover_context->node_id_set[i])) ||
- VALID_BACKEND(failover_context->node_id_set[i]))))
+ ((RAW_MODE && VALID_BACKEND_RAW(failover_context->node_id_set[i])) ||
+ VALID_BACKEND(failover_context->node_id_set[i]))))
{
ereport(LOG,
(errmsg("=== Starting %s. shutdown host %s(%d) ===",
BACKEND_INFO(failover_context->node_id_set[i]).backend_hostname,
BACKEND_INFO(failover_context->node_id_set[i]).backend_port)));
- BACKEND_INFO(failover_context->node_id_set[i]).backend_status = CON_DOWN; /* set down status */
+ BACKEND_INFO(failover_context->node_id_set[i]).backend_status = CON_DOWN; /* set down status */
pool_set_backend_status_changed_time(failover_context->node_id_set[i]);
if (failover_context->reqkind == NODE_QUARANTINE_REQUEST)
{
else
{
/*
- * if the degeneration request is for the quarantined
- * node and that node had a primary role before it was
- * quarantined, Restore the primary node status for
- * that node before degenerating it. This is important
- * for the failover script to get the proper value of
- * old primary
+ * if the degeneration request is for the quarantined node and
+ * that node had a primary role before it was quarantined,
+ * Restore the primary node status for that node before
+ * degenerating it. This is important for the failover script
+ * to get the proper value of old primary
*/
if (Req_info->primary_node_id == -1 &&
BACKEND_INFO(failover_context->node_id_set[i]).quarantine == true &&
static void
kill_failover_children(FAILOVER_CONTEXT *failover_context, int node_id)
{
- int i, j, k;
+ int i,
+ j,
+ k;
+
/*
* On 2011/5/2 Tatsuo Ishii says: if mode is streaming replication and
- * request is NODE_UP_REQUEST (failback case) we don't need to restart
- * all children. Existing session will not use newly attached node,
- * but load balanced node is not changed until this session ends, so
- * it's harmless anyway.
+ * request is NODE_UP_REQUEST (failback case) we don't need to restart all
+ * children. Existing session will not use newly attached node, but load
+ * balanced node is not changed until this session ends, so it's harmless
+ * anyway.
*/
/*
- * On 2015/9/21 Tatsuo Ishii says: this judgment is not sufficient if
- * all backends were down. Child process has local status in which all
+ * On 2015/9/21 Tatsuo Ishii says: this judgment is not sufficient if all
+ * backends were down. Child process has local status in which all
* backends are down. In this case even if new connection arrives from
- * frontend, the child will not accept it because the local status
- * shows all backends are down. For this purpose we refer to
- * "all_backend_down" variable, which was set before updating backend
- * status.
+ * frontend, the child will not accept it because the local status shows
+ * all backends are down. For this purpose we refer to "all_backend_down"
+ * variable, which was set before updating backend status.
*
* See bug 248 for more details.
*/
/*
- * We also need to think about a case when the former primary node did
- * not exist. In the case we need to restart all children as
- * well. For example when previous primary node id is 0 and then it
- * went down, restarted, re-attached without promotion. Then existing
- * child process loses connection slot to node 0 and keeps on using it
- * when node 0 comes back. This could result in segfault later on in
- * the child process because there's no connection to node id 0.
+ * We also need to think about a case when the former primary node did not
+ * exist. In the case we need to restart all children as well. For
+ * example when previous primary node id is 0 and then it went down,
+ * restarted, re-attached without promotion. Then existing child process
+ * loses connection slot to node 0 and keeps on using it when node 0 comes
+ * back. This could result in segfault later on in the child process
+ * because there's no connection to node id 0.
*
- * Actually we need to think about when ALWAYS_PRIMARY flag is set
- * *but* DISALLOW_TO_FAILOVER flag is not set case. In the case after
- * primary failover Req_info->primary_node_id is set, but connection
- * to the primary node does not exist. So we should do full restart if
- * requested node id is the former primary node.
+ * Actually we need to think about when ALWAYS_PRIMARY flag is set *but*
+ * DISALLOW_TO_FAILOVER flag is not set case. In the case after primary
+ * failover Req_info->primary_node_id is set, but connection to the
+ * primary node does not exist. So we should do full restart if requested
+ * node id is the former primary node.
*
* See bug 672 for more details.
*/
static void
exec_failover_command(FAILOVER_CONTEXT *failover_context, int new_main_node_id, int promote_node_id)
{
- int i;
+ int i;
if (failover_context->reqkind == NODE_DOWN_REQUEST)
{
{
if (failover_context->nodes[i])
{
- /* If this is promoting specified node, new_main_node
- * should be replaced by the requested node. The requested
- * node should be REAL_PRIMARY_NODE_ID.
+ /*
+ * If this is promoting specified node, new_main_node should
+ * be replaced by the requested node. The requested node
+ * should be REAL_PRIMARY_NODE_ID.
*/
if (failover_context->request_details & REQ_DETAIL_PROMOTE)
{
static int
determine_new_primary_node(FAILOVER_CONTEXT *failover_context, int node_id)
{
- int new_primary;
+ int new_primary;
if (failover_context->reqkind == PROMOTE_NODE_REQUEST && VALID_BACKEND(node_id))
{
else if (failover_context->reqkind == NODE_QUARANTINE_REQUEST)
{
/*
- * If the quarantine node was the primary node, set the new primary
- * to -1 (invalid).
+ * If the quarantine node was the primary node, set the new primary to
+ * -1 (invalid).
*/
if (Req_info->primary_node_id == node_id)
{
/*
- * set the role of the node, This will help us restore the
- * primary node id when the node will come out from quarantine
- * state
+ * set the role of the node, This will help us restore the primary
+ * node id when the node will come out from quarantine state
*/
BACKEND_INFO(node_id).role = ROLE_PRIMARY;
new_primary = -1;
}
/*
- * If the down node was a standby node in streaming replication mode,
- * we can avoid calling find_primary_node_repeatedly() and recognize
- * the former primary as the new primary node, which will reduce the
- * time to process standby down.
- * This does not apply to the case when no primary node existed
- * (Req_info->primary_node_id < 0). In this case
+ * If the down node was a standby node in streaming replication mode, we
+ * can avoid calling find_primary_node_repeatedly() and recognize the
+ * former primary as the new primary node, which will reduce the time to
+ * process standby down. This does not apply to the case when no primary
+ * node existed (Req_info->primary_node_id < 0). In this case
* find_primary_node_repeatedly() should be called.
*/
else if (SL_MODE &&
static int
exec_follow_primary_command(FAILOVER_CONTEXT *failover_context, int node_id, int new_primary_node_id)
{
- int follow_cnt = 0;
- int new_main_node_id = -1;
- int i;
+ int follow_cnt = 0;
+ int new_main_node_id = -1;
+ int i;
if (!STREAM)
return -1;
failover_context->reqkind == PROMOTE_NODE_REQUEST)
{
/*
- * follow primary command is executed in following cases:
- * - failover against the current primary
- * - no primary exists and new primary is created by failover
- * - promote node request
+ * follow primary command is executed in following cases: - failover
+ * against the current primary - no primary exists and new primary is
+ * created by failover - promote node request
*/
if (((failover_context->reqkind == NODE_DOWN_REQUEST) &&
Req_info->primary_node_id >= 0 &&
static void
exec_child_restart(FAILOVER_CONTEXT *failover_context, int node_id)
{
- int i, j, k;
+ int i,
+ j,
+ k;
if (failover_context->need_to_restart_children)
{
for (i = 0; i < pool_config->num_init_children; i++)
{
/*
- * Try to kill pgpool child because previous kill signal may
- * not be received by pgpool child. This could happen if
- * multiple PostgreSQL are going down (or even starting
- * pgpool, without starting PostgreSQL can trigger this).
- * Child calls degenerate_backend() and it tries to acquire
- * semaphore to write a failover request. In this case the
- * signal mask is set as well, thus signals are never
- * received.
+ * Try to kill pgpool child because previous kill signal may not
+ * be received by pgpool child. This could happen if multiple
+ * PostgreSQL are going down (or even starting pgpool, without
+ * starting PostgreSQL can trigger this). Child calls
+ * degenerate_backend() and it tries to acquire semaphore to write
+ * a failover request. In this case the signal mask is set as
+ * well, thus signals are never received.
*/
bool restart = false;
else
{
/*
- * Set restart request to each child. Children will exit(1)
- * whenever they are convenient.
+ * Set restart request to each child. Children will exit(1) whenever
+ * they are convenient.
*/
for (i = 0; i < pool_config->num_init_children; i++)
{
{
#ifdef NOT_USED
/*
- * Temporary black magic. Without this regression 055 does not
- * finish
+ * Temporary black magic. Without this regression 055 does not finish
*/
fprintf(stderr, "=== %s done. shutdown host %s(%d) ===",
(failover_context->reqkind == NODE_DOWN_REQUEST) ? "Failover" : "Quarantine",
(errmsg("fork a new PCP child pid %d in failover()", pcp_pid)));
}
}
+
/*
* -------------------------------------------------------------------------
* Subroutines for failover() end
create_unix_domain_sockets_by_list(struct sockaddr_un *un_addrs,
char *group, int permissions, int n_sockets)
{
- int i;
- int *sockets = NULL;
+ int i;
+ int *sockets = NULL;
if (un_addrs == NULL)
return NULL;
sockets = malloc(sizeof(int) * n_sockets);
if (sockets == NULL)
ereport(FATAL,
- (errmsg("failed to allocate memory in startup process")));
+ (errmsg("failed to allocate memory in startup process")));
for (i = 0; i < n_sockets; i++)
{
static int *
create_inet_domain_sockets_by_list(char **listen_addresses, int n_listen_addresses, int port, int *n_sockets)
{
- int *sockets = NULL;
- int i;
+ int *sockets = NULL;
+ int i;
*n_sockets = 0;
{
int *inet_fds,
*walk;
- int n = 0; /* number of fds returned from create_inet_domain_sockets(). */
+ int n = 0; /* number of fds returned from
+ * create_inet_domain_sockets(). */
ereport(LOG,
(errmsg("listen address[%d]: %s", i, listen_addresses[i])));
* Check and execute pending requests set by signal interrupts.
*/
static
-void check_requests(void)
+void
+check_requests(void)
{
sigset_t sig;
*/
if (sigusr1_request)
{
- do {
+ do
+ {
sigusr1_request = 0;
sigusr1_interrupt_processor();
} while (sigusr1_request == 1);
print_signal_member(&sig);
/*
- * Unblock signals so that SIGQUIT/SIGTERRM/SIGINT can be accepted.
- * They are all shutdown requests.
+ * Unblock signals so that SIGQUIT/SIGTERRM/SIGINT can be accepted. They
+ * are all shutdown requests.
*/
POOL_SETMASK(&UnBlockSig);
}
static
-void print_signal_member(sigset_t *sig)
+void
+print_signal_member(sigset_t *sig)
{
if (sigismember(sig, SIGQUIT))
ereport(LOG,
static void
service_child_processes(void)
{
- int connected_children = Req_info->conn_counter;
- int idle_children = current_child_process_count - connected_children;
- static int high_load_counter = 0;
+ int connected_children = Req_info->conn_counter;
+ int idle_children = current_child_process_count - connected_children;
+ static int high_load_counter = 0;
+
ereport(DEBUG2,
- (errmsg("current_children_count = %d idle_children = %d connected_children = %d high_load_counter = %d",
- current_child_process_count, idle_children, connected_children, high_load_counter)));
+ (errmsg("current_children_count = %d idle_children = %d connected_children = %d high_load_counter = %d",
+ current_child_process_count, idle_children, connected_children, high_load_counter)));
if (idle_children > pool_config->max_spare_children)
{
- int ki;
- int victim_count;
- int kill_process_info_idxs[MAX_ONE_SHOT_KILLS];
- int kill_count = idle_children - pool_config->max_spare_children;
- int cycle_skip_count_before_scale_down;
- int cycle_skip_between_scale_down;
- int one_shot_kill_count;
+ int ki;
+ int victim_count;
+ int kill_process_info_idxs[MAX_ONE_SHOT_KILLS];
+ int kill_count = idle_children - pool_config->max_spare_children;
+ int cycle_skip_count_before_scale_down;
+ int cycle_skip_between_scale_down;
+ int one_shot_kill_count;
switch (pool_config->process_management_strategy)
{
- case PM_STRATEGY_AGGRESSIVE:
- cycle_skip_count_before_scale_down = 25; /* roughly 50 seconds */
- cycle_skip_between_scale_down = 2;
- one_shot_kill_count = MAX_ONE_SHOT_KILLS;
- break;
+ case PM_STRATEGY_AGGRESSIVE:
+ cycle_skip_count_before_scale_down = 25; /* roughly 50 seconds */
+ cycle_skip_between_scale_down = 2;
+ one_shot_kill_count = MAX_ONE_SHOT_KILLS;
+ break;
- case PM_STRATEGY_LAZY:
- cycle_skip_count_before_scale_down = 150; /* roughly 300 seconds */
- cycle_skip_between_scale_down = 10;
- one_shot_kill_count = 3;
- break;
+ case PM_STRATEGY_LAZY:
+ cycle_skip_count_before_scale_down = 150; /* roughly 300 seconds */
+ cycle_skip_between_scale_down = 10;
+ one_shot_kill_count = 3;
+ break;
- case PM_STRATEGY_GENTLE:
- cycle_skip_count_before_scale_down = 60; /* roughly 120 seconds */
- cycle_skip_between_scale_down = 5;
- one_shot_kill_count = 3;
- break;
+ case PM_STRATEGY_GENTLE:
+ cycle_skip_count_before_scale_down = 60; /* roughly 120 seconds */
+ cycle_skip_between_scale_down = 5;
+ one_shot_kill_count = 3;
+ break;
- default:
- /* should never come here, but if we do use gentle counts*/
- cycle_skip_count_before_scale_down = 60; /* roughly 120 seconds */
- cycle_skip_between_scale_down = 5;
- one_shot_kill_count = 3;
- break;
+ default:
+ /* should never come here, but if we do use gentle counts */
+ cycle_skip_count_before_scale_down = 60; /* roughly 120 seconds */
+ cycle_skip_between_scale_down = 5;
+ one_shot_kill_count = 3;
+ break;
}
/* Do not scale down too quickly */
if (++high_load_counter < cycle_skip_count_before_scale_down || high_load_counter % cycle_skip_between_scale_down)
return;
- memset(kill_process_info_idxs, -1 ,sizeof(kill_process_info_idxs));
+ memset(kill_process_info_idxs, -1, sizeof(kill_process_info_idxs));
if (kill_count > one_shot_kill_count)
kill_count = one_shot_kill_count;
for (ki = 0; ki < victim_count; ki++)
{
- int index = kill_process_info_idxs[ki];
- if (index >=0)
+ int index = kill_process_info_idxs[ki];
+
+ if (index >= 0)
{
if (process_info[index].pid && process_info[index].status == WAIT_FOR_CONNECT)
{
ereport(DEBUG1,
- (errmsg("asking child process with pid:%d to kill itself to satisfy max_spare_children",
- process_info[index].pid),
- errdetail("child process has %d pooled connections",process_info[index].pooled_connections)));
+ (errmsg("asking child process with pid:%d to kill itself to satisfy max_spare_children",
+ process_info[index].pid),
+ errdetail("child process has %d pooled connections", process_info[index].pooled_connections)));
process_info[index].exit_if_idle = true;
kill(process_info[index].pid, SIGUSR2);
}
{
/* Reset the high load counter */
high_load_counter = 0;
- /*See if we need to spawn new children */
+ /* See if we need to spawn new children */
if (idle_children < pool_config->min_spare_children)
{
- int i;
- int spawned = 0;
- int new_spawn_no = pool_config->min_spare_children - idle_children;
+ int i;
+ int spawned = 0;
+ int new_spawn_no = pool_config->min_spare_children - idle_children;
+
/* Add 25% of max_spare_children */
new_spawn_no += pool_config->max_spare_children / 4;
if (new_spawn_no + current_child_process_count > pool_config->num_init_children)
{
ereport(DEBUG5,
- (errmsg("we have hit the ceiling, spawning %d child(ren)",
- pool_config->num_init_children - current_child_process_count)));
+ (errmsg("we have hit the ceiling, spawning %d child(ren)",
+ pool_config->num_init_children - current_child_process_count)));
new_spawn_no = pool_config->num_init_children - current_child_process_count;
}
if (new_spawn_no <= 0)
static int
select_victim_processes(int *process_info_idxs, int count)
{
- int i, ki;
- bool found_enough = false;
- int selected_count = 0;
+ int i,
+ ki;
+ bool found_enough = false;
+ int selected_count = 0;
- if (count <= 0)
- return 0;
+ if (count <= 0)
+ return 0;
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->num_init_children; i++)
+ {
+ /* Only the child process in waiting for connect can be terminated */
+ if (process_info[i].pid && process_info[i].status == WAIT_FOR_CONNECT)
{
- /* Only the child process in waiting for connect can be terminated */
- if (process_info[i].pid && process_info[i].status == WAIT_FOR_CONNECT)
+ if (selected_count < count)
{
- if (selected_count < count)
- {
- process_info_idxs[selected_count++] = i;
- }
- else
+ process_info_idxs[selected_count++] = i;
+ }
+ else
+ {
+ found_enough = true;
+
+ /*
+ * we don't bother selecting the child having least pooled
+ * connection with aggressive strategy
+ */
+ if (pool_config->process_management_strategy != PM_STRATEGY_AGGRESSIVE)
{
- found_enough = true;
- /* we don't bother selecting the child having least pooled connection with
- * aggressive strategy
- */
- if (pool_config->process_management_strategy != PM_STRATEGY_AGGRESSIVE)
+ for (ki = 0; ki < count; ki++)
{
- for (ki = 0; ki < count; ki++)
+ int old_index = process_info_idxs[ki];
+
+ if (old_index < 0 || process_info[old_index].pooled_connections > process_info[i].pooled_connections)
{
- int old_index = process_info_idxs[ki];
- if (old_index < 0 || process_info[old_index].pooled_connections > process_info[i].pooled_connections)
- {
- process_info_idxs[ki] = i;
- found_enough = false;
- break;
- }
- if (process_info[old_index].pooled_connections)
- found_enough = false;
+ process_info_idxs[ki] = i;
+ found_enough = false;
+ break;
}
+ if (process_info[old_index].pooled_connections)
+ found_enough = false;
}
}
}
- if (found_enough)
- break;
}
+ if (found_enough)
+ break;
+ }
return selected_count;
}
*
* Global variables. Should be eventually removed.
*/
-#include <unistd.h> /*For getpid*/
+#include <unistd.h> /* For getpid */
#include "pool.h"
#include "utils/elog.h"
pid_t mypid; /* pgpool parent process id */
-pid_t myProcPid; /* process pid */
+pid_t myProcPid; /* process pid */
ProcessType processType;
ProcessState processState;
-bool reset_query_error; /* true if error returned from backend while processing reset queries */
+bool reset_query_error; /* true if error returned from backend while
+ * processing reset queries */
/*
* Application name
*/
-static char *process_application_name = "main";
+static char *process_application_name = "main";
/*
* Fixed application names. ordered by ProcessType.
*/
-char *application_names[] = {"main",
- "child",
- "sr_check_worker",
- "heart_beat_sender",
- "heart_beat_receiver",
- "watchdog",
- "life_check",
- "follow_child",
- "watchdog_utility",
- "pcp_main",
- "pcp_child",
- "health_check",
- "logger"
+char *application_names[] = {"main",
+ "child",
+ "sr_check_worker",
+ "heart_beat_sender",
+ "heart_beat_receiver",
+ "watchdog",
+ "life_check",
+ "follow_child",
+ "watchdog_utility",
+ "pcp_main",
+ "pcp_child",
+ "health_check",
+ "logger"
};
char *
void
set_application_name_with_suffix(ProcessType ptype, int suffix)
{
- static char appname_buf[POOLCONFIG_MAXNAMELEN +1];
+ static char appname_buf[POOLCONFIG_MAXNAMELEN + 1];
+
snprintf(appname_buf, POOLCONFIG_MAXNAMELEN, "%s%d", get_application_name_for_process(ptype), suffix);
set_application_name_with_string(appname_buf);
}
return process_application_name;
}
-void SetProcessGlobalVariables(ProcessType pType)
+void
+SetProcessGlobalVariables(ProcessType pType)
{
processType = pType;
myProcPid = getpid();
* sends the signal to pgpool-II main process to terminate Pgpool-II
* process.
*/
-bool terminate_pgpool(char mode, bool error)
+bool
+terminate_pgpool(char mode, bool error)
{
pid_t ppid = getppid();
}
else
{
- ereport(error?ERROR:WARNING,
+ ereport(error ? ERROR : WARNING,
(errmsg("error while processing shutdown request"),
errdetail("invalid shutdown mode \"%c\"", mode)));
return false;
* _copyPlannedStmt
*/
static PlannedStmt *
-_copyPlannedStmt(const PlannedStmt *from)
+_copyPlannedStmt(const PlannedStmt * from)
{
PlannedStmt *newnode = makeNode(PlannedStmt);
* all the copy functions for classes which inherit from Plan.
*/
static void
-CopyPlanFields(const Plan *from, Plan *newnode)
+CopyPlanFields(const Plan * from, Plan * newnode)
{
COPY_SCALAR_FIELD(startup_cost);
COPY_SCALAR_FIELD(total_cost);
* _copyPlan
*/
static Plan *
-_copyPlan(const Plan *from)
+_copyPlan(const Plan * from)
{
Plan *newnode = makeNode(Plan);
* _copyResult
*/
static Result *
-_copyResult(const Result *from)
+_copyResult(const Result * from)
{
Result *newnode = makeNode(Result);
* _copyProjectSet
*/
static ProjectSet *
-_copyProjectSet(const ProjectSet *from)
+_copyProjectSet(const ProjectSet * from)
{
ProjectSet *newnode = makeNode(ProjectSet);
* _copyModifyTable
*/
static ModifyTable *
-_copyModifyTable(const ModifyTable *from)
+_copyModifyTable(const ModifyTable * from)
{
ModifyTable *newnode = makeNode(ModifyTable);
* _copyAppend
*/
static Append *
-_copyAppend(const Append *from)
+_copyAppend(const Append * from)
{
Append *newnode = makeNode(Append);
* _copyMergeAppend
*/
static MergeAppend *
-_copyMergeAppend(const MergeAppend *from)
+_copyMergeAppend(const MergeAppend * from)
{
MergeAppend *newnode = makeNode(MergeAppend);
* _copyRecursiveUnion
*/
static RecursiveUnion *
-_copyRecursiveUnion(const RecursiveUnion *from)
+_copyRecursiveUnion(const RecursiveUnion * from)
{
RecursiveUnion *newnode = makeNode(RecursiveUnion);
* _copyBitmapAnd
*/
static BitmapAnd *
-_copyBitmapAnd(const BitmapAnd *from)
+_copyBitmapAnd(const BitmapAnd * from)
{
BitmapAnd *newnode = makeNode(BitmapAnd);
* _copyBitmapOr
*/
static BitmapOr *
-_copyBitmapOr(const BitmapOr *from)
+_copyBitmapOr(const BitmapOr * from)
{
BitmapOr *newnode = makeNode(BitmapOr);
* _copyGather
*/
static Gather *
-_copyGather(const Gather *from)
+_copyGather(const Gather * from)
{
Gather *newnode = makeNode(Gather);
* all the copy functions for classes which inherit from Scan.
*/
static void
-CopyScanFields(const Scan *from, Scan *newnode)
+CopyScanFields(const Scan * from, Scan * newnode)
{
CopyPlanFields((const Plan *) from, (Plan *) newnode);
* _copyScan
*/
static Scan *
-_copyScan(const Scan *from)
+_copyScan(const Scan * from)
{
Scan *newnode = makeNode(Scan);
* _copySeqScan
*/
static SeqScan *
-_copySeqScan(const SeqScan *from)
+_copySeqScan(const SeqScan * from)
{
SeqScan *newnode = makeNode(SeqScan);
* _copySampleScan
*/
static SampleScan *
-_copySampleScan(const SampleScan *from)
+_copySampleScan(const SampleScan * from)
{
SampleScan *newnode = makeNode(SampleScan);
* _copyIndexScan
*/
static IndexScan *
-_copyIndexScan(const IndexScan *from)
+_copyIndexScan(const IndexScan * from)
{
IndexScan *newnode = makeNode(IndexScan);
* _copyIndexOnlyScan
*/
static IndexOnlyScan *
-_copyIndexOnlyScan(const IndexOnlyScan *from)
+_copyIndexOnlyScan(const IndexOnlyScan * from)
{
IndexOnlyScan *newnode = makeNode(IndexOnlyScan);
* _copyBitmapIndexScan
*/
static BitmapIndexScan *
-_copyBitmapIndexScan(const BitmapIndexScan *from)
+_copyBitmapIndexScan(const BitmapIndexScan * from)
{
BitmapIndexScan *newnode = makeNode(BitmapIndexScan);
* _copyBitmapHeapScan
*/
static BitmapHeapScan *
-_copyBitmapHeapScan(const BitmapHeapScan *from)
+_copyBitmapHeapScan(const BitmapHeapScan * from)
{
BitmapHeapScan *newnode = makeNode(BitmapHeapScan);
* _copyTidScan
*/
static TidScan *
-_copyTidScan(const TidScan *from)
+_copyTidScan(const TidScan * from)
{
TidScan *newnode = makeNode(TidScan);
* _copyTidRangeScan
*/
static TidRangeScan *
-_copyTidRangeScan(const TidRangeScan *from)
+_copyTidRangeScan(const TidRangeScan * from)
{
TidRangeScan *newnode = makeNode(TidRangeScan);
* _copySubqueryScan
*/
static SubqueryScan *
-_copySubqueryScan(const SubqueryScan *from)
+_copySubqueryScan(const SubqueryScan * from)
{
SubqueryScan *newnode = makeNode(SubqueryScan);
* _copyFunctionScan
*/
static FunctionScan *
-_copyFunctionScan(const FunctionScan *from)
+_copyFunctionScan(const FunctionScan * from)
{
FunctionScan *newnode = makeNode(FunctionScan);
* _copyTableFuncScan
*/
static TableFuncScan *
-_copyTableFuncScan(const TableFuncScan *from)
+_copyTableFuncScan(const TableFuncScan * from)
{
TableFuncScan *newnode = makeNode(TableFuncScan);
* _copyValuesScan
*/
static ValuesScan *
-_copyValuesScan(const ValuesScan *from)
+_copyValuesScan(const ValuesScan * from)
{
ValuesScan *newnode = makeNode(ValuesScan);
* _copyCteScan
*/
static CteScan *
-_copyCteScan(const CteScan *from)
+_copyCteScan(const CteScan * from)
{
CteScan *newnode = makeNode(CteScan);
* _copyNamedTuplestoreScan
*/
static NamedTuplestoreScan *
-_copyNamedTuplestoreScan(const NamedTuplestoreScan *from)
+_copyNamedTuplestoreScan(const NamedTuplestoreScan * from)
{
NamedTuplestoreScan *newnode = makeNode(NamedTuplestoreScan);
* _copyWorkTableScan
*/
static WorkTableScan *
-_copyWorkTableScan(const WorkTableScan *from)
+_copyWorkTableScan(const WorkTableScan * from)
{
WorkTableScan *newnode = makeNode(WorkTableScan);
* _copyForeignScan
*/
static ForeignScan *
-_copyForeignScan(const ForeignScan *from)
+_copyForeignScan(const ForeignScan * from)
{
ForeignScan *newnode = makeNode(ForeignScan);
* _copyCustomScan
*/
static CustomScan *
-_copyCustomScan(const CustomScan *from)
+_copyCustomScan(const CustomScan * from)
{
CustomScan *newnode = makeNode(CustomScan);
* all the copy functions for classes which inherit from Join.
*/
static void
-CopyJoinFields(const Join *from, Join *newnode)
+CopyJoinFields(const Join * from, Join * newnode)
{
CopyPlanFields((const Plan *) from, (Plan *) newnode);
* _copyGatherMerge
*/
static GatherMerge *
-_copyGatherMerge(const GatherMerge *from)
+_copyGatherMerge(const GatherMerge * from)
{
GatherMerge *newnode = makeNode(GatherMerge);
COPY_POINTER_FIELD(sortOperators, from->numCols * sizeof(Oid));
COPY_POINTER_FIELD(collations, from->numCols * sizeof(Oid));
COPY_POINTER_FIELD(nullsFirst, from->numCols * sizeof(bool));
- COPY_BITMAPSET_FIELD(initParam);
+ COPY_BITMAPSET_FIELD(initParam);
return newnode;
}
* _copyJoin
*/
static Join *
-_copyJoin(const Join *from)
+_copyJoin(const Join * from)
{
Join *newnode = makeNode(Join);
* _copyNestLoop
*/
static NestLoop *
-_copyNestLoop(const NestLoop *from)
+_copyNestLoop(const NestLoop * from)
{
NestLoop *newnode = makeNode(NestLoop);
* _copyMergeJoin
*/
static MergeJoin *
-_copyMergeJoin(const MergeJoin *from)
+_copyMergeJoin(const MergeJoin * from)
{
MergeJoin *newnode = makeNode(MergeJoin);
int numCols;
* _copyHashJoin
*/
static HashJoin *
-_copyHashJoin(const HashJoin *from)
+_copyHashJoin(const HashJoin * from)
{
HashJoin *newnode = makeNode(HashJoin);
* _copyMaterial
*/
static Material *
-_copyMaterial(const Material *from)
+_copyMaterial(const Material * from)
{
Material *newnode = makeNode(Material);
* _copyMemoize
*/
static Memoize *
-_copyMemoize(const Memoize *from)
+_copyMemoize(const Memoize * from)
{
Memoize *newnode = makeNode(Memoize);
* all the copy functions for classes which inherit from Sort.
*/
static void
-CopySortFields(const Sort *from, Sort *newnode)
+CopySortFields(const Sort * from, Sort * newnode)
{
CopyPlanFields((const Plan *) from, (Plan *) newnode);
* _copySort
*/
static Sort *
-_copySort(const Sort *from)
+_copySort(const Sort * from)
{
Sort *newnode = makeNode(Sort);
* _copyIncrementalSort
*/
static IncrementalSort *
-_copyIncrementalSort(const IncrementalSort *from)
+_copyIncrementalSort(const IncrementalSort * from)
{
IncrementalSort *newnode = makeNode(IncrementalSort);
* _copyGroup
*/
static Group *
-_copyGroup(const Group *from)
+_copyGroup(const Group * from)
{
Group *newnode = makeNode(Group);
* _copyAgg
*/
static Agg *
-_copyAgg(const Agg *from)
+_copyAgg(const Agg * from)
{
Agg *newnode = makeNode(Agg);
* _copyWindowAgg
*/
static WindowAgg *
-_copyWindowAgg(const WindowAgg *from)
+_copyWindowAgg(const WindowAgg * from)
{
WindowAgg *newnode = makeNode(WindowAgg);
* _copyUnique
*/
static Unique *
-_copyUnique(const Unique *from)
+_copyUnique(const Unique * from)
{
Unique *newnode = makeNode(Unique);
* _copyHash
*/
static Hash *
-_copyHash(const Hash *from)
+_copyHash(const Hash * from)
{
Hash *newnode = makeNode(Hash);
* _copySetOp
*/
static SetOp *
-_copySetOp(const SetOp *from)
+_copySetOp(const SetOp * from)
{
SetOp *newnode = makeNode(SetOp);
* _copyLockRows
*/
static LockRows *
-_copyLockRows(const LockRows *from)
+_copyLockRows(const LockRows * from)
{
LockRows *newnode = makeNode(LockRows);
* _copyLimit
*/
static Limit *
-_copyLimit(const Limit *from)
+_copyLimit(const Limit * from)
{
Limit *newnode = makeNode(Limit);
* _copyNestLoopParam
*/
static NestLoopParam *
-_copyNestLoopParam(const NestLoopParam *from)
+_copyNestLoopParam(const NestLoopParam * from)
{
NestLoopParam *newnode = makeNode(NestLoopParam);
* _copyPlanRowMark
*/
static PlanRowMark *
-_copyPlanRowMark(const PlanRowMark *from)
+_copyPlanRowMark(const PlanRowMark * from)
{
PlanRowMark *newnode = makeNode(PlanRowMark);
}
static PartitionPruneInfo *
-_copyPartitionPruneInfo(const PartitionPruneInfo *from)
+_copyPartitionPruneInfo(const PartitionPruneInfo * from)
{
PartitionPruneInfo *newnode = makeNode(PartitionPruneInfo);
}
static PartitionedRelPruneInfo *
-_copyPartitionedRelPruneInfo(const PartitionedRelPruneInfo *from)
+_copyPartitionedRelPruneInfo(const PartitionedRelPruneInfo * from)
{
PartitionedRelPruneInfo *newnode = makeNode(PartitionedRelPruneInfo);
* _copyPartitionPruneStepOp
*/
static PartitionPruneStepOp *
-_copyPartitionPruneStepOp(const PartitionPruneStepOp *from)
+_copyPartitionPruneStepOp(const PartitionPruneStepOp * from)
{
PartitionPruneStepOp *newnode = makeNode(PartitionPruneStepOp);
* _copyPartitionPruneStepCombine
*/
static PartitionPruneStepCombine *
-_copyPartitionPruneStepCombine(const PartitionPruneStepCombine *from)
+_copyPartitionPruneStepCombine(const PartitionPruneStepCombine * from)
{
PartitionPruneStepCombine *newnode = makeNode(PartitionPruneStepCombine);
* _copyPlanInvalItem
*/
static PlanInvalItem *
-_copyPlanInvalItem(const PlanInvalItem *from)
+_copyPlanInvalItem(const PlanInvalItem * from)
{
PlanInvalItem *newnode = makeNode(PlanInvalItem);
static JsonExpr *
_copyJsonExpr(const JsonExpr *from)
{
- JsonExpr *newnode = makeNode(JsonExpr);
+ JsonExpr *newnode = makeNode(JsonExpr);
COPY_SCALAR_FIELD(op);
COPY_STRING_FIELD(column_name);
* _copyPathKey
*/
static PathKey *
-_copyPathKey(const PathKey *from)
+_copyPathKey(const PathKey * from)
{
PathKey *newnode = makeNode(PathKey);
}
static GroupByOrdering *
-_copyGroupByOrdering(const GroupByOrdering *from)
+_copyGroupByOrdering(const GroupByOrdering * from)
{
GroupByOrdering *newnode = makeNode(GroupByOrdering);
* _copyRestrictInfo
*/
static RestrictInfo *
-_copyRestrictInfo(const RestrictInfo *from)
+_copyRestrictInfo(const RestrictInfo * from)
{
RestrictInfo *newnode = makeNode(RestrictInfo);
* _copyPlaceHolderVar
*/
static PlaceHolderVar *
-_copyPlaceHolderVar(const PlaceHolderVar *from)
+_copyPlaceHolderVar(const PlaceHolderVar * from)
{
PlaceHolderVar *newnode = makeNode(PlaceHolderVar);
* _copySpecialJoinInfo
*/
static SpecialJoinInfo *
-_copySpecialJoinInfo(const SpecialJoinInfo *from)
+_copySpecialJoinInfo(const SpecialJoinInfo * from)
{
SpecialJoinInfo *newnode = makeNode(SpecialJoinInfo);
* _copyAppendRelInfo
*/
static AppendRelInfo *
-_copyAppendRelInfo(const AppendRelInfo *from)
+_copyAppendRelInfo(const AppendRelInfo * from)
{
AppendRelInfo *newnode = makeNode(AppendRelInfo);
* _copyPlaceHolderInfo
*/
static PlaceHolderInfo *
-_copyPlaceHolderInfo(const PlaceHolderInfo *from)
+_copyPlaceHolderInfo(const PlaceHolderInfo * from)
{
PlaceHolderInfo *newnode = makeNode(PlaceHolderInfo);
static JsonTable *
_copyJsonTable(const JsonTable *from)
{
- JsonTable *newnode = makeNode(JsonTable);
+ JsonTable *newnode = makeNode(JsonTable);
COPY_NODE_FIELD(context_item);
COPY_NODE_FIELD(pathspec);
* ****************************************************************
*/
static ExtensibleNode *
-_copyExtensibleNode(const ExtensibleNode *from)
+_copyExtensibleNode(const ExtensibleNode * from)
{
ExtensibleNode *newnode;
- const ExtensibleNodeMethods *methods;
+ const ExtensibleNodeMethods *methods;
methods = GetExtensibleNodeMethods(from->extnodename, false);
newnode = (ExtensibleNode *) newNode(methods->node_size,
#ifdef NOT_USED_IN_PGPOOL
static ForeignKeyCacheInfo *
-_copyForeignKeyCacheInfo(const ForeignKeyCacheInfo *from)
+_copyForeignKeyCacheInfo(const ForeignKeyCacheInfo * from)
{
ForeignKeyCacheInfo *newnode = makeNode(ForeignKeyCacheInfo);