Author: jfrederic.clere(a)jboss.com
Date: 2008-03-20 12:13:24 -0400 (Thu, 20 Mar 2008)
New Revision: 1450
Modified:
sandbox/httpd/src/native/common/node.c
sandbox/httpd/src/native/common/node.h
sandbox/httpd/src/native/mod_manager/mod_manager.c
sandbox/httpd/src/native/mod_proxy_cluster/mod_proxy_cluster.c
Log:
Add logic to support check of host and context.
Modified: sandbox/httpd/src/native/common/node.c
===================================================================
--- sandbox/httpd/src/native/common/node.c 2008-03-19 15:41:20 UTC (rev 1449)
+++ sandbox/httpd/src/native/common/node.c 2008-03-20 16:13:24 UTC (rev 1450)
@@ -96,7 +96,7 @@
}
return APR_NOTFOUND;
}
-apr_status_t insert_update_node(mem_t *s, nodeinfo_t *node)
+apr_status_t insert_update_node(mem_t *s, nodeinfo_t *node, int *id)
{
apr_status_t rv;
nodeinfo_t *ou;
@@ -105,6 +105,7 @@
node->mess.id = 0;
rv = s->storage->ap_slotmem_do(s->slotmem, insert_update, &node,
s->p);
if (node->mess.id != 0 && rv == APR_SUCCESS) {
+ *id = node->mess.id;
return APR_SUCCESS; /* updated */
}
@@ -115,10 +116,11 @@
}
memcpy(ou, node, sizeof(nodeinfo_t));
ou->mess.id = ident;
+ *id = ident;
ou->updatetime = apr_time_sec(apr_time_now());
/* set of offset to the proxy_worker_stat */
- ou->status = sizeof(nodemess_t) + sizeof(unsigned long) + sizeof(ou->balancer)
+ sizeof(int);
+ ou->offset = sizeof(nodemess_t) + sizeof(unsigned long) + sizeof(ou->balancer)
+ sizeof(int);
return APR_SUCCESS;
}
Modified: sandbox/httpd/src/native/common/node.h
===================================================================
--- sandbox/httpd/src/native/common/node.h 2008-03-19 15:41:20 UTC (rev 1449)
+++ sandbox/httpd/src/native/common/node.h 2008-03-20 16:13:24 UTC (rev 1450)
@@ -65,7 +65,7 @@
char strtime[8]; /* date when send by the node */
/* part updated in httpd */
- int id; /* id in table and worker it */
+ int id; /* id in table and worker id */
};
typedef struct nodemess nodemess_t;
@@ -78,7 +78,7 @@
/* filled by httpd */
unsigned long updatetime; /* time of last received message */
char balancer[40]; /* name of the balancer */
- int status; /* offset to the proxy_worker_stat structure */
+ int offset; /* offset to the proxy_worker_stat structure */
char stat[SIZEOFSCORE]; /* to store the status */
};
typedef struct nodeinfo nodeinfo_t;
@@ -91,7 +91,7 @@
* @return APR_SUCCESS if all went well
*
*/
-APR_DECLARE(apr_status_t) insert_update_node(mem_t *s, nodeinfo_t *node);
+APR_DECLARE(apr_status_t) insert_update_node(mem_t *s, nodeinfo_t *node, int *id);
/**
* read a node record from the shared table
Modified: sandbox/httpd/src/native/mod_manager/mod_manager.c
===================================================================
--- sandbox/httpd/src/native/mod_manager/mod_manager.c 2008-03-19 15:41:20 UTC (rev 1449)
+++ sandbox/httpd/src/native/mod_manager/mod_manager.c 2008-03-20 16:13:24 UTC (rev 1450)
@@ -79,13 +79,58 @@
{
return(get_max_size_node(nodestatsmem));
}
-static const struct node_storage_method storage =
+static const struct node_storage_method node_storage =
{
loc_read_node,
loc_get_ids_used_node,
loc_get_max_size_node
};
+/*
+ * routines for the context_storage_method
+ */
+static apr_status_t loc_read_context(int ids, contextinfo_t **context)
+{
+ return (get_context(contextstatsmem, context, ids));
+}
+static int loc_get_ids_used_context(int *ids)
+{
+ return(get_ids_used_context(contextstatsmem, ids));
+}
+static int loc_get_max_size_context()
+{
+ return(get_max_size_context(contextstatsmem));
+}
+static const struct context_storage_method context_storage =
+{
+ loc_read_context,
+ loc_get_ids_used_context,
+ loc_get_max_size_context
+};
+
+/*
+ * routines for the host_storage_method
+ */
+static apr_status_t loc_read_host(int ids, hostinfo_t **host)
+{
+ return (get_host(hoststatsmem, host, ids));
+}
+static int loc_get_ids_used_host(int *ids)
+{
+ return(get_ids_used_host(hoststatsmem, ids));
+}
+static int loc_get_max_size_host()
+{
+ return(get_max_size_host(hoststatsmem));
+}
+static const struct host_storage_method host_storage =
+{
+ loc_read_host,
+ loc_get_ids_used_host,
+ loc_get_max_size_host
+};
+
+/* helper for the handling of the Alias: host1,... Context: context1,... */
struct cluster_host {
char *host;
char *context;
@@ -333,7 +378,6 @@
}
phost->context = ptr[i+1];
}
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL, "manager_handler %s %s",
ptr[i], ptr[i+1]);
i++;
i++;
}
@@ -342,9 +386,9 @@
strcpy(nodeinfo.balancer,mconf->balancername);
/* Insert or update node description */
- if (insert_update_node(nodestatsmem, &nodeinfo) != APR_SUCCESS)
+ int id;
+ if (insert_update_node(nodestatsmem, &nodeinfo, &id) != APR_SUCCESS)
return 500;
- int id = nodeinfo.mess.id;
/* Insert the Alias and corresponding Context */
phost = vhost;
@@ -373,7 +417,7 @@
for (i=0; i<size; i++) {
nodeinfo_t *ou;
get_node(nodestatsmem, &ou, id[i]);
- ap_rprintf(r, "node: %d %s %s %s %s %s\n", id[i], ou->mess.JVMRoute,
ou->mess.Domain,
+ ap_rprintf(r, "node: [%d:%d] %s %s %s %s %s\n", id[i], ou->mess.id,
ou->mess.JVMRoute, ou->mess.Domain,
ou->mess.Host, ou->mess.Port, ou->mess.Type);
}
@@ -456,7 +500,6 @@
ap_get_brigade(r->input_filters, input_brigade, AP_MODE_READBYTES, APR_BLOCK_READ,
sizeof(buff));
apr_brigade_flatten(input_brigade, buff, &bufsiz);
buff[bufsiz] = '\0';
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL, "%s %s manager_handler buff
%s", r->method, r->uri, buff);
decodeenc(buff);
if (strcasecmp(r->method, "CONFIG") == 0)
return(process_config(r, buff));
@@ -567,8 +610,10 @@
/* Process the request from the ModClusterService */
ap_hook_handler(manager_handler, NULL, NULL, APR_HOOK_MIDDLE);
- /* Register nodes table provider */
- ap_register_provider(p, "manager" , "shared", "0",
&storage);
+ /* Register nodes/hosts/contexts table provider */
+ ap_register_provider(p, "manager" , "shared", "0",
&node_storage);
+ ap_register_provider(p, "manager" , "shared", "1",
&host_storage);
+ ap_register_provider(p, "manager" , "shared", "2",
&context_storage);
}
/*
Modified: sandbox/httpd/src/native/mod_proxy_cluster/mod_proxy_cluster.c
===================================================================
--- sandbox/httpd/src/native/mod_proxy_cluster/mod_proxy_cluster.c 2008-03-19 15:41:20 UTC
(rev 1449)
+++ sandbox/httpd/src/native/mod_proxy_cluster/mod_proxy_cluster.c 2008-03-20 16:13:24 UTC
(rev 1450)
@@ -1,5 +1,5 @@
/*
- * ALOHA - Apache Httpd Native Java Library
+ * mod_cluster
*
* Copyright(c) 2007 Red Hat Middleware, LLC,
* and individual contributors as indicated by the @authors tag.
@@ -39,10 +39,14 @@
#include "mod_proxy.h"
#include "node.h"
+#include "host.h"
+#include "context.h"
static proxy_balancer_method *lbrprovider = NULL;
static proxy_balancer_method *lbtprovider = NULL;
-static struct node_storage_method *storage = NULL;
+static struct node_storage_method *node_storage = NULL;
+static struct host_storage_method *host_storage = NULL;
+static struct context_storage_method *context_storage = NULL;
/*
* Create/Get the worker before using it
@@ -75,8 +79,9 @@
proxy_worker = (struct proxy_worker *) *worker;
/* Get the shared memory for this worker */
ptr = (char *) node;
- ptr = ptr + node->status;
+ ptr = ptr + node->offset;
proxy_worker->s = (proxy_worker_stat *) ptr;
+ proxy_worker->id = node->mess.id;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"Created: worker for %s", url);
proxy_worker->s->status = PROXY_WORKER_INITIALIZED;
@@ -101,8 +106,8 @@
/* read the ident of the nodes */
- ids = apr_pcalloc(r->pool, sizeof(int) * storage->get_max_size_node());
- num = storage->get_ids_used_node(ids);
+ ids = apr_pcalloc(r->pool, sizeof(int) * node_storage->get_max_size_node());
+ num = node_storage->get_ids_used_node(ids);
/* XXX: How to skip the balancer that aren't controled by mod_manager */
if (conf->workers->nelts<num) {
/* There are more workers in shared area than in the local tables */
@@ -125,7 +130,7 @@
if (ids[j]) {
/* read the node and create the worker */
nodeinfo_t *node;
- storage->read_node(ids[j], &node);
+ node_storage->read_node(ids[j], &node);
proxy_balancer *balancer = ap_proxy_get_balancer(r->pool, conf,
apr_pstrcat(r->pool, "balancer://", node->balancer,
NULL));
if (balancer) {
@@ -137,37 +142,150 @@
}
}
}
-}
+}
+/*
+ * Check that the worker will handle the host/context.
+ * The id of the worker is used to find the (slot) node in the shared
+ * memory.
+ */
+static int iscontext_host_ok(request_rec *r, proxy_balancer *balancer,
+ proxy_worker *worker)
+{
+ nodeinfo_t *node;
+
+ node_storage->read_node(worker->id, &node);
+
+ /*
+ * check the hosts and contexts
+ * A node may have several virtual hosts and
+ * each virtual hosts may have several context
+ */
+ int sizevhost = host_storage->get_max_size_host();
+ int *vhosts = apr_palloc(r->pool, sizeof(int)*sizevhost);
+ sizevhost = host_storage->get_ids_used_host(vhosts);
+ int i;
+ for (i=0; i<sizevhost; i++) {
+ hostinfo_t *vhost;
+ host_storage->read_host(vhosts[i], &vhost);
+ if (vhost->node == node->mess.id) {
+ /* XXX Check the virtual host */
+
+ /* Check the contexts */
+ int sizecontext = context_storage->get_max_size_context();
+ int *contexts = apr_palloc(r->pool, sizeof(int)*sizecontext);
+ sizecontext = context_storage->get_ids_used_context(contexts);
+ int j;
+ for (j=0; j<sizecontext; j++) {
+ contextinfo_t *context;
+ context_storage->read_context(contexts[j], &context);
+ if (context->vhost != vhost->vhost)
+ continue;
+
+ /* check for /context[/] in the URL */
+ int len = strlen(context->context);
+ if (strncmp(r->uri, context->context, len) == 0) {
+ if (r->uri[len] == '\0' || r->uri[len] == '/')
{
+ /* XXX: Check status */
+ return 1;
+ }
+ }
+ }
+
+ }
+ }
+ return 0;
+}
+
+/*
+ * The ModClusterService from the cluster fills the lbfactor values.
+ * Our logic is a bit different the mod_balancer one. We check the
+ * context and host to prevent to route to application beeing redeploy or
+ * stopped in one node but not in others.
+ */
static proxy_worker *find_best_bytraffic(proxy_balancer *balancer,
request_rec *r)
{
+ int i;
+ apr_off_t mytraffic = 0;
+ apr_off_t curmin = 0;
+ proxy_worker *worker;
proxy_worker *mycandidate = NULL;
+ int cur_lbset = 0;
+ int max_lbset = 0;
+ int checking_standby;
+ int checked_standby;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: Entering bytraffic for BALANCER (%s)",
+ balancer->name);
+
/* create workers for new nodes */
create_workers_node(r);
- /* call mod_proxy_balancer method */
- mycandidate = lbtprovider->finder(balancer,r);
+ /* First try to see if we have available candidate */
+ do {
+ checking_standby = checked_standby = 0;
+ while (!mycandidate && !checked_standby) {
+ worker = (proxy_worker *)balancer->workers->elts;
+ for (i = 0; i < balancer->workers->nelts; i++, worker++) {
+ if (!checking_standby) { /* first time through */
+ if (worker->s->lbset > max_lbset)
+ max_lbset = worker->s->lbset;
+ }
+ if (worker->s->lbset > cur_lbset)
+ continue;
+ if ( (checking_standby ? !PROXY_WORKER_IS_STANDBY(worker) :
PROXY_WORKER_IS_STANDBY(worker)) )
+ continue;
+ /* If the worker is in error state run
+ * retry on that worker. It will be marked as
+ * operational if the retry timeout is elapsed.
+ * The worker might still be unusable, but we try
+ * anyway.
+ */
+ if (!PROXY_WORKER_IS_USABLE(worker))
+ ap_proxy_retry_worker("BALANCER", worker, r->server);
+ /* Take into calculation only the workers that are
+ * not in error state or not disabled.
+ */
+ if (PROXY_WORKER_IS_USABLE(worker) && iscontext_host_ok(r,
balancer, worker)) {
+ mytraffic = (worker->s->transferred/worker->s->lbfactor)
+
+ (worker->s->read/worker->s->lbfactor);
+ if (!mycandidate || mytraffic < curmin) {
+ mycandidate = worker;
+ curmin = mytraffic;
+ }
+ }
+ }
+ checked_standby = checking_standby++;
+ }
+ cur_lbset++;
+ } while (cur_lbset <= max_lbset && !mycandidate);
+
return mycandidate;
}
-static int proxy_cluster_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
- apr_pool_t *ptemp)
-{
- lbtprovider = ap_lookup_provider(PROXY_LBMETHOD, "bytraffic",
"0");
-
- return OK;
-}
-
static int proxy_cluster_post_config(apr_pool_t *p, apr_pool_t *plog,
apr_pool_t *ptemp, server_rec *s)
{
- storage = ap_lookup_provider("manager" , "shared",
"0");
- if (storage == NULL) {
+ node_storage = ap_lookup_provider("manager" , "shared",
"0");
+ if (node_storage == NULL) {
ap_log_error(APLOG_MARK, APLOG_ERR|APLOG_NOERRNO, 0, s,
- "proxy_cluster_post_config: Can't find mod_manager");
+ "proxy_cluster_post_config: Can't find mod_manager for
nodes");
return !OK;
}
+ host_storage = ap_lookup_provider("manager" , "shared",
"1");
+ if (host_storage == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR|APLOG_NOERRNO, 0, s,
+ "proxy_cluster_post_config: Can't find mod_manager for
hosts");
+ return !OK;
+ }
+ context_storage = ap_lookup_provider("manager" , "shared",
"2");
+ if (context_storage == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR|APLOG_NOERRNO, 0, s,
+ "proxy_cluster_post_config: Can't find mod_manager for
contexts");
+ return !OK;
+ }
return OK;
}
@@ -181,11 +299,9 @@
static void proxy_cluster_hooks(apr_pool_t *p)
{
- ap_hook_pre_config(proxy_cluster_pre_config, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_post_config(proxy_cluster_post_config, NULL, NULL, APR_HOOK_MIDDLE);
/* create the provider for the proxy logic */
- ap_register_provider(p, PROXY_LBMETHOD, "cluster_byrequests",
"0", &byrequests);
ap_register_provider(p, PROXY_LBMETHOD, "cluster_bytraffic", "0",
&bytraffic);
}
static void *create_proxy_cluster_dir_config(apr_pool_t *p, char *dir)