JBoss Native SVN: r2445 - trunk/mod_cluster/native/mod_proxy_cluster.
by jbossnative-commits@lists.jboss.org
Author: jfrederic.clere(a)jboss.com
Date: 2009-05-25 08:20:31 -0400 (Mon, 25 May 2009)
New Revision: 2445
Modified:
trunk/mod_cluster/native/mod_proxy_cluster/mod_proxy_cluster.c
Log:
Fix for MODCLUSTER-75.
Modified: trunk/mod_cluster/native/mod_proxy_cluster/mod_proxy_cluster.c
===================================================================
--- trunk/mod_cluster/native/mod_proxy_cluster/mod_proxy_cluster.c 2009-05-21 16:32:02 UTC (rev 2444)
+++ trunk/mod_cluster/native/mod_proxy_cluster/mod_proxy_cluster.c 2009-05-25 12:20:31 UTC (rev 2445)
@@ -75,6 +75,12 @@
static apr_thread_mutex_t *lock = NULL;
+static server_rec *main_server = NULL;
+#define CREAT_ALL 0 /* create balancers/workers in all VirtualHost */
+#define CREAT_NONE 1 /* don't create balancers (but add workers) */
+#define CREAT_ROOT 2 /* Only create balancers/workers in the main server */
+static int creat_bal = 2;
+
#define WAITFORREMOVE 10 /* seconds */
#define TIMEINTERVAL apr_time_from_sec(1) /* recalcul the lbstatus based on number of request in the time interval */
@@ -354,16 +360,26 @@
return rv;
}
-/*
+/**
* Add a node to the worker conf
* NOTE: pool is the request pool or any temporary pool. Use conf->pool for any data that live longer.
+ * @param node the pointer to the node structure
+ * @param conf a proxy_server_conf.
+ * @param balancer the balancer to update or NULL to create it.
+ * @param name the name of the balancer.
+ * @param pool a temporary pool.
+ * @server the server rec for logging purposes.
+ *
*/
-static void add_workers_node(nodeinfo_t *node, proxy_server_conf *conf, apr_pool_t *pool, server_rec *server)
+static void add_workers_node(nodeinfo_t *node, proxy_server_conf *conf, proxy_balancer *balancer,
+ char *name, apr_pool_t *pool, server_rec *server)
{
- char *name = apr_pstrcat(pool, "cluster://", node->mess.balancer, NULL);
- proxy_balancer *balancer = ap_proxy_get_balancer(pool, conf, name);
proxy_worker *worker = NULL;
if (!balancer) {
+ if (creat_bal == CREAT_NONE)
+ return; /* Don't create balancers */
+ if (creat_bal == CREAT_ROOT && server != main_server)
+ return; /* Don't create balancers if not root */
/* Create one */
int sizebal, i;
int *bal;
@@ -372,7 +388,7 @@
balancer = apr_array_push(conf->balancers);
memset(balancer, 0, sizeof(proxy_balancer));
balancer->name = apr_pstrdup(conf->pool, name);
- balancer->lbmethod = ap_lookup_provider(PROXY_LBMETHOD, "cluster_byrequests", "0");
+ balancer->lbmethod = ap_lookup_provider(PROXY_LBMETHOD, "byrequests", "0");
balancer->workers = apr_array_make(conf->pool, 5, sizeof(proxy_worker));
/* XXX Is this a right place to create mutex */
#if APR_HAS_THREADS
@@ -383,15 +399,22 @@
"add_workers_node: Can't create lock for balancer");
}
#endif
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, server,
+ "add_workers_node: Using balancer %s", name);
+ }
+ if (balancer && balancer->workers->nelts == 0) {
/* Logic to copy the shared memory information to the balancer */
+ int sizebal, i;
+ int *bal;
bal = apr_pcalloc(pool, sizeof(int) * balancer_storage->get_max_size_balancer());
sizebal = balancer_storage->get_ids_used_balancer(bal);
for (i=0; i<sizebal; i++) {
balancerinfo_t *balan;
balancer_storage->read_balancer(bal[i], &balan);
- /* Something like cluster://cluster1 and cluster1 */
- if (strcmp(balan->balancer, &balancer->name[10]) == 0) {
+ /* Something like balancer://cluster1 and cluster1 */
+ if (strcmp(balan->balancer, &balancer->name[11]) == 0) {
/* XXX: StickySession, StickySessionRemove not in */
balancer->sticky = apr_psprintf(conf->pool, "%s|%s", balan->StickySessionCookie,
balan->StickySessionPath);
@@ -420,6 +443,31 @@
"add_workers_node done");
}
/*
+ * Adds the balancers and the workers to the VirtualHosts
+ * Note server is only here for logging purposes.
+ */
+static void add_balancers_workers(nodeinfo_t *node, apr_pool_t *pool, server_rec *server)
+{
+ server_rec *s = main_server;
+ char *name = apr_pstrcat(pool, "balancer://", node->mess.balancer, NULL);
+
+ while (s) {
+ void *sconf = s->module_config;
+ proxy_server_conf *conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module);
+ proxy_balancer *balancer = ap_proxy_get_balancer(pool, conf, name);
+ int i;
+
+ if (!balancer && creat_bal == CREAT_NONE) {
+ s = s->next;
+ continue;
+ }
+ add_workers_node(node, conf, balancer, name, pool, server);
+ if (creat_bal == CREAT_ROOT)
+ break;
+ s = s->next;
+ }
+}
+/*
* Remove a node from the worker conf
*/
static int remove_workers_node(nodeinfo_t *node, proxy_server_conf *conf, apr_pool_t *pool, server_rec *server)
@@ -460,7 +508,7 @@
if (i == 0) {
/* No connection in use: clean the worker */
proxy_balancer *balancer;
- char *name = apr_pstrcat(pool, "cluster://", node->mess.balancer, NULL);
+ char *name = apr_pstrcat(pool, "balancer://", node->mess.balancer, NULL);
/* mark the worker removed in the apr_array of the balancer */
balancer = (proxy_balancer *)conf->balancers->elts;
@@ -494,8 +542,10 @@
}
/*
* Create/Remove workers corresponding to updated nodes.
+ * NOTE: It is called from proxy_cluster_watchdog_func and other locations
+ * It shouldn't call worker_nodes_are_updated() because there may be several VirtualHosts.
*/
-static void update_workers_node(proxy_server_conf *conf, apr_pool_t *pool, server_rec *server)
+static void update_workers_node(proxy_server_conf *conf, apr_pool_t *pool, server_rec *server, int check)
{
int *id, size, i;
apr_time_t last;
@@ -503,7 +553,10 @@
/* Check if we have to do something */
apr_thread_mutex_lock(lock);
- last = node_storage->worker_nodes_need_update(server, pool);
+ if (check)
+ last = node_storage->worker_nodes_need_update(main_server, pool);
+ else
+ last = 1;
/* nodes_need_update will return 1 if last_updated is zero: first time we are called */
if (last == 0) {
@@ -529,12 +582,13 @@
/* The node has changed */
if (ou->mess.remove)
notok = notok + remove_workers_node(ou, conf, pool, server);
- else
- add_workers_node(ou, conf, pool, server);
+ else {
+ char *name = apr_pstrcat(pool, "balancer://", ou->mess.balancer, NULL);
+ proxy_balancer *balancer = ap_proxy_get_balancer(pool, conf, name);
+ add_workers_node(ou, conf, balancer, name, pool, server);
+ }
}
}
- if (! notok)
- node_storage->worker_nodes_are_updated(server);
apr_thread_mutex_unlock(lock);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, server,
@@ -854,8 +908,12 @@
if (node_storage->read_node(nodes[n], &node) != APR_SUCCESS)
continue;
ret = get_balancer_by_node(r, node, conf, NULL);
- if (ret != NULL) {
- return ret;
+ if (ret) {
+ /* Check that it is in our proxy_server_conf */
+ char *name = apr_pstrcat(r->pool, "balancer://", ret, NULL);
+ proxy_balancer *balancer = ap_proxy_get_balancer(r->pool, conf, name);
+ if (balancer)
+ return ret;
}
}
return NULL;
@@ -916,7 +974,7 @@
#endif
/* create workers for new nodes */
- update_workers_node(conf, r->pool, r->server);
+ update_workers_node(conf, r->pool, r->server, 1);
/* First try to see if we have available candidate */
if (domain && strlen(domain)>0)
@@ -1000,6 +1058,17 @@
}
return mycandidate;
}
+/*
+ * Wrapper to mod_balancer "standard" interface.
+ */
+static proxy_worker *find_best_byrequests(proxy_balancer *balancer,
+ request_rec *r)
+{
+ void *sconf = r->server->module_config;
+ proxy_server_conf *conf = (proxy_server_conf *)
+ ap_get_module_config(sconf, &proxy_module);
+ return internal_find_best_byrequests(balancer, conf, r, NULL, 0);
+}
/*
* Do a ping/pong to the node
@@ -1145,30 +1214,43 @@
return status;
}
/*
- * Check that we could connect to the node
+ * Check that we could connect to the node and create corresponding balancers and workers.
* id : worker id
* load : load factor from the cluster manager.
*/
static int proxy_node_isup(request_rec *r, int id, int load)
{
- void *sconf = r->server->module_config;
- proxy_server_conf *conf = (proxy_server_conf *)
- ap_get_module_config(sconf, &proxy_module);
- int i;
+ int i, foundid = -1;
apr_status_t rv;
- proxy_worker *worker;
+ proxy_worker *worker = NULL;
+ server_rec *s = main_server;
+ proxy_server_conf *conf;
+ nodeinfo_t *node;
- /* create the workers (that could be the first time) */
- update_workers_node(conf, r->pool, r->server);
+ if (node_storage->read_node(id, &node) != APR_SUCCESS)
+ return 500;
- /* search for the worker */
- worker = (proxy_worker *)conf->workers->elts;
- for (i = 0; i < conf->workers->nelts; i++) {
- if (worker->id == id)
- break;
- worker++;
+ /* create the balancers and workers (that could be the first time) */
+ add_balancers_workers(node, r->pool, r->server);
+
+ /* search for the worker in the VirtualHosts */
+ while (s) {
+ void *sconf = s->module_config;
+ conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
+
+ worker = (proxy_worker *)conf->workers->elts;
+ for (i = 0; i < conf->workers->nelts; i++) {
+ if (worker->id == id) {
+ foundid = id;
+ break;
+ }
+ worker++;
+ }
+ if (foundid == id)
+ break;
+ s = s->next;
}
- if (i == conf->workers->nelts) {
+ if (foundid != id) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"proxy_cluster_isup: Can't find worker for %d", id);
return 500;
@@ -1246,24 +1328,37 @@
static void * APR_THREAD_FUNC proxy_cluster_watchdog_func(apr_thread_t *thd, void *data)
{
apr_pool_t *pool;
- server_rec *s = (server_rec *) data;
- void *sconf = s->module_config;
- proxy_server_conf *conf = (proxy_server_conf *)
- ap_get_module_config(sconf, &proxy_module);
+ server_rec *server = (server_rec *) data;
+ for (;;) {
+ server_rec *s = main_server;
+ void *sconf = s->module_config;
+ proxy_server_conf *conf = (proxy_server_conf *)
+ ap_get_module_config(sconf, &proxy_module);
+ apr_time_t last;
- for (;;) {
apr_sleep(apr_time_make(1, 0));
apr_pool_create(&pool, conf->pool);
- /* Create new workers if the shared memory changes */
- update_workers_node(conf, pool, s);
- /* cleanup removed node in shared memory */
- remove_removed_node(pool, s);
- /* Calculate the lbstatus for each node */
- update_workers_lbstatus(conf, pool, s);
- /* Free sessionid slots */
- if (sessionid_storage)
- remove_timeout_sessionid(conf, pool, s);
+ last = node_storage->worker_nodes_need_update(main_server, pool);
+ while (s) {
+ sconf = s->module_config;
+ conf = (proxy_server_conf *)
+ ap_get_module_config(sconf, &proxy_module);
+
+ /* Create new workers if the shared memory changes */
+ if (last)
+ update_workers_node(conf, pool, s, 0);
+ /* cleanup removed node in shared memory */
+ remove_removed_node(pool, s);
+ /* Calculate the lbstatus for each node */
+ update_workers_lbstatus(conf, pool, s);
+ /* Free sessionid slots */
+ if (sessionid_storage)
+ remove_timeout_sessionid(conf, pool, s);
+ s = s->next;
+ }
apr_pool_destroy(pool);
+ if (last)
+ node_storage->worker_nodes_are_updated(main_server);
}
apr_thread_exit(thd, 0);
return NULL;
@@ -1278,6 +1373,8 @@
apr_status_t rv;
apr_thread_t *wdt;
+ main_server = s;
+
rv = apr_thread_mutex_create(&lock, APR_THREAD_MUTEX_DEFAULT, p);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR|APLOG_NOERRNO, 0, s,
@@ -1428,14 +1525,14 @@
balancer = get_route_balancer(r, conf);
if (!balancer) {
/* May be the balancer has not been created (XXX: use shared memory to find the balancer ...) */
- update_workers_node(conf, r->pool, r->server);
+ update_workers_node(conf, r->pool, r->server, 1);
balancer = get_route_balancer(r, conf);
}
if (!balancer)
balancer = get_context_host_balancer(r);
if (balancer) {
- r->filename = apr_pstrcat(r->pool, "proxy:cluster://", balancer, r->unparsed_uri, NULL);
+ r->filename = apr_pstrcat(r->pool, "proxy:balancer://", balancer, r->unparsed_uri, NULL);
r->handler = "proxy-server";
r->proxyreq = PROXYREQ_REVERSE;
#if HAVE_CLUSTER_EX_DEBUG
@@ -1459,7 +1556,7 @@
ap_log_error(APLOG_MARK, APLOG_NOERRNO|APLOG_DEBUG, 0, r->server,
"proxy_cluster_canon url: %s", url);
#endif
- if (strncasecmp(url, "cluster:", 9) == 0) {
+ if (strncasecmp(url, "balancer:", 9) == 0) {
return OK;
}
return DECLINED;
@@ -1794,7 +1891,7 @@
!(*balancer = ap_proxy_get_balancer(r->pool, conf, *url))) {
apr_thread_mutex_unlock(lock);
/* May the node has not be created yet */
- update_workers_node(conf, r->pool, r->server);
+ update_workers_node(conf, r->pool, r->server, 1);
if (!(*balancer = ap_proxy_get_balancer(r->pool, conf, *url))) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"proxy: CLUSTER no balancer for %s", *url);
@@ -1937,6 +2034,8 @@
#if HAVE_CLUSTER_EX_DEBUG
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"proxy_cluster_post_request for (%s)", balancer->name);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy_cluster_post_request for (%s) %s", balancer->name, balancer->sticky);
#endif
if (worker && worker->s->busy)
@@ -1946,7 +2045,7 @@
/* Add information about sessions corresponding to a node */
sticky = apr_table_get(r->notes, "session-sticky");
- if (sticky == NULL) {
+ if (sticky == NULL && balancer->sticky) {
char *path, *stick;
stick = apr_pstrdup(r->pool, balancer->sticky);
if ((path = strchr(stick, '|'))) {
@@ -1954,6 +2053,12 @@
}
sticky = (const char *) stick;
}
+ if (sticky == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy_cluster_post_request for (%s) %s", balancer->name, balancer->sticky);
+ return OK;
+ }
+
cookie = get_cookie_param(r, sticky, 0);
sessionid = apr_table_get(r->notes, "session-id");
route = apr_table_get(r->notes, "session-route");
@@ -1982,6 +2087,14 @@
return OK;
}
+/* the lbmethods (note it the only one in mod_cluster for the moment) */
+static const proxy_balancer_method byrequests =
+{
+ "byrequests",
+ &find_best_byrequests,
+ NULL
+};
+
/*
* Register the hooks on our module.
*/
@@ -2003,7 +2116,10 @@
proxy_hook_pre_request(proxy_cluster_pre_request, NULL, NULL, APR_HOOK_FIRST);
proxy_hook_post_request(proxy_cluster_post_request, NULL, NULL, APR_HOOK_FIRST);
- ap_register_provider(p, "proxy_cluster" , "balancer", "0", &balancerhandler);
+ /* Register a provider for the "ping/pong" logic */
+ ap_register_provider(p, "proxy_cluster", "balancer", "0", &balancerhandler);
+ /* Register a provider for the loadbalancer (for things like ProxyPass /titi balancer://mycluster/myapp) */
+ ap_register_provider(p, PROXY_LBMETHOD, "byrequests", "0", &balancerhandler);
}
/* XXX: not needed
@@ -2018,12 +2134,35 @@
return NULL;
}
+static const char*cmd_proxy_cluster_creatbal(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ int val = atoi(arg);
+ if (val<0 || val>2) {
+ return "CreateBalancers must be one of: 0, 1 or 2";
+ } else {
+ creat_bal = val;
+ }
+ return NULL;
+}
+
+static const command_rec proxy_cluster_cmds[] =
+{
+ AP_INIT_TAKE1(
+ "CreateBalancers",
+ cmd_proxy_cluster_creatbal,
+ NULL,
+ OR_ALL,
+ "CreateBalancers - Defined VirtualHosts where the balancers are created 0: All, 1: None, 2: Main (Default: 2 Main)"
+ ),
+ {NULL}
+};
+
module AP_MODULE_DECLARE_DATA proxy_cluster_module = {
STANDARD20_MODULE_STUFF,
NULL, /* per-directory config creator */
NULL, /* dir config merger */
create_proxy_cluster_server_config, /* server config creator */
NULL, /* server config merger */
- NULL, /* command table */
+ proxy_cluster_cmds, /* command table */
proxy_cluster_hooks /* register hooks */
};