Author: jfrederic.clere(a)jboss.com
Date: 2008-10-07 10:23:14 -0400 (Tue, 07 Oct 2008)
New Revision: 1941
Modified:
trunk/mod_cluster/native/mod_proxy_cluster/mod_proxy_cluster.c
Log:
Also use informationn between the timestamp to share the load.
Modified: trunk/mod_cluster/native/mod_proxy_cluster/mod_proxy_cluster.c
===================================================================
--- trunk/mod_cluster/native/mod_proxy_cluster/mod_proxy_cluster.c 2008-10-07 09:36:13 UTC
(rev 1940)
+++ trunk/mod_cluster/native/mod_proxy_cluster/mod_proxy_cluster.c 2008-10-07 14:23:14 UTC
(rev 1941)
@@ -614,15 +614,12 @@
*/
-static char *get_balancer_by_node(request_rec *r, int nodeid, proxy_server_conf *conf,
proxy_balancer *balance)
+static char *get_balancer_by_node(request_rec *r, nodeinfo_t *node, proxy_server_conf
*conf, proxy_balancer *balance)
{
- nodeinfo_t *node;
int i;
int sizevhost;
int *vhosts;
- node_storage->read_node(nodeid, &node);
-
/*
* check the hosts and contexts
* A node may have several virtual hosts and
@@ -703,7 +700,9 @@
int *nodes = apr_palloc(r->pool, sizeof(int)*sizenode);
sizenode = node_storage->get_ids_used_node(nodes);
for (n=0; n<sizenode; n++) {
- char *ret = get_balancer_by_node(r, nodes[n], conf, NULL);
+ nodeinfo_t *node;
+ node_storage->read_node(nodes[n], &node);
+ char *ret = get_balancer_by_node(r, node, conf, NULL);
if (ret != NULL) {
return ret;
}
@@ -716,7 +715,7 @@
* memory.
* (See get_context_host_balancer too).
*/
-static int iscontext_host_ok(request_rec *r, proxy_balancer *balancer, int node)
+static int iscontext_host_ok(request_rec *r, proxy_balancer *balancer, nodeinfo_t *node)
{
char *balancername = get_balancer_by_node(r, node, NULL, balancer);
if (balancername != NULL) {
@@ -728,16 +727,14 @@
/*
* Check that the worker corresponds to a node that belongs to the same domain according
to the JVMRoute.
*/
-static int isnode_domain_ok(request_rec *r, proxy_worker *worker,
+static int isnode_domain_ok(request_rec *r, nodeinfo_t *node,
const char *domain)
{
- nodeinfo_t *ou;
- node_storage->read_node(worker->id, &ou);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "isnode_domain_ok: domain %s:%s", domain,
ou->mess.Domain);
+ "isnode_domain_ok: domain %s:%s", domain,
node->mess.Domain);
if (domain == NULL)
return 1; /* OK no domain in the corresponding to the SESSIONID */
- if (strcmp(ou->mess.Domain, domain) == 0)
+ if (strcmp(node->mess.Domain, domain) == 0)
return 1; /* OK */
return 0;
}
@@ -776,6 +773,7 @@
while (!mycandidate && !checked_standby) {
worker = (proxy_worker *)balancer->workers->elts;
for (i = 0; i < balancer->workers->nelts; i++, worker++) {
+ nodeinfo_t *node;
if (worker->id == 0)
continue; /* marked removed */
@@ -796,10 +794,11 @@
* not in error state or not disabled.
* and that can map the context.
*/
- if (PROXY_WORKER_IS_USABLE(worker) && iscontext_host_ok(r, balancer,
worker->id)) {
+ node_storage->read_node(worker->id, &node);
+ if (PROXY_WORKER_IS_USABLE(worker) && iscontext_host_ok(r, balancer,
node)) {
if (!checked_domain) {
/* First try only nodes in the domain */
- if (!isnode_domain_ok(r, worker, domain)) {
+ if (!isnode_domain_ok(r, node, domain)) {
continue;
}
}
@@ -807,10 +806,19 @@
mycandidate = worker;
break; /* Done */
} else {
- if (!mycandidate
- || worker->s->busy < mycandidate->s->busy
- || (worker->s->busy == mycandidate->s->busy
&& worker->s->lbstatus < mycandidate->s->lbstatus)) {
+ if (!mycandidate)
mycandidate = worker;
+ else {
+ nodeinfo_t *node1;
+ int lbstatus, lbstatus1;
+
+ node_storage->read_node(mycandidate->id, &node1);
+ lbstatus1 = ((mycandidate->s->elected -
node1->mess.oldelected) * 1000)/mycandidate->s->lbfactor;
+ lbstatus = ((worker->s->elected -
node->mess.oldelected) * 1000)/worker->s->lbfactor;
+ lbstatus1 = lbstatus1 + mycandidate->s->lbstatus;
+ lbstatus = lbstatus + worker->s->lbstatus;
+ if (lbstatus1> lbstatus)
+ mycandidate = worker;
}
}
}
@@ -1387,7 +1395,6 @@
if (candidate) {
proxy_cluster_helper *helper;
- candidate->s->elected++; /* mark it in use */
helper = (proxy_cluster_helper *) candidate->opaque;
helper->count_active++;
}