Author: jfrederic.clere(a)jboss.com
Date: 2008-05-09 08:04:46 -0400 (Fri, 09 May 2008)
New Revision: 1595
Modified:
sandbox/httpd/src/native/mod_proxy_cluster/mod_proxy_cluster.c
Log:
Add the proxy_hook_pre_request() and corresponding cluster scheme logic
(copy from mod_proxy_balancer.c).
Modified: sandbox/httpd/src/native/mod_proxy_cluster/mod_proxy_cluster.c
===================================================================
--- sandbox/httpd/src/native/mod_proxy_cluster/mod_proxy_cluster.c 2008-05-07 07:49:47 UTC
(rev 1594)
+++ sandbox/httpd/src/native/mod_proxy_cluster/mod_proxy_cluster.c 2008-05-09 12:04:46 UTC
(rev 1595)
@@ -213,7 +213,7 @@
"create_workers_node can't read id %d",
ids[j]);
continue;
}
- char *name = apr_pstrcat(pool, "balancer://",
node->balancer, NULL);
+ char *name = apr_pstrcat(pool, "cluster://", node->balancer,
NULL);
proxy_balancer *balancer = ap_proxy_get_balancer(pool, conf, name);
if (!balancer) {
/* Create one */
@@ -615,7 +615,7 @@
ap_get_module_config(sconf, &proxy_module);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "proxy: Entering bytraffic for BALANCER (%s)",
+ "proxy: Entering bytraffic for CLUSTER (%s)",
balancer->name);
/* create workers for new nodes */
@@ -1111,7 +1111,7 @@
char *balancer = get_context_host_balancer(r);
if (balancer) {
- r->filename = apr_pstrcat(r->pool, "proxy:balancer://",
balancer, r->uri, NULL);
+ r->filename = apr_pstrcat(r->pool, "proxy:cluster://", balancer,
r->uri, NULL);
r->handler = "proxy-server";
r->proxyreq = PROXYREQ_REVERSE;
ap_log_error(APLOG_MARK, APLOG_NOERRNO|APLOG_EMERG, 0, r->server,
@@ -1124,6 +1124,377 @@
}
/*
+ * canonise the url
+ */
+static int proxy_cluster_canon(request_rec *r, char *url)
+{
+ if (strncasecmp(url, "balancer:", 9) == 0) {
+ OK; /* XXX: need more */
+ }
+ else {
+ return DECLINED;
+ }
+}
+
+/* Find the worker that has the 'route' defined
+ */
+static proxy_worker *find_route_worker(proxy_balancer *balancer,
+ const char *route, request_rec *r)
+{
+ int i;
+ int checking_standby;
+ int checked_standby;
+
+ proxy_worker *worker;
+
+ checking_standby = checked_standby = 0;
+ while (!checked_standby) {
+ worker = (proxy_worker *)balancer->workers->elts;
+ for (i = 0; i < balancer->workers->nelts; i++, worker++) {
+ if ( (checking_standby ? !PROXY_WORKER_IS_STANDBY(worker) :
PROXY_WORKER_IS_STANDBY(worker)) )
+ continue;
+ if (*(worker->s->route) && strcmp(worker->s->route,
route) == 0) {
+ if (worker && PROXY_WORKER_IS_USABLE(worker)) {
+ return worker;
+ } else {
+ /*
+ * If the worker is in error state run
+ * retry on that worker. It will be marked as
+ * operational if the retry timeout is elapsed.
+ * The worker might still be unusable, but we try
+ * anyway.
+ */
+ ap_proxy_retry_worker("BALANCER", worker, r->server);
+ if (PROXY_WORKER_IS_USABLE(worker)) {
+ return worker;
+ } else {
+ /*
+ * We have a worker that is unusable.
+ * It can be in error or disabled, but in case
+ * it has a redirection set use that redirection worker.
+ * This enables to safely remove the member from the
+ * balancer. Of course you will need some kind of
+ * session replication between those two remote.
+ */
+ if (*worker->s->redirect) {
+ proxy_worker *rworker = NULL;
+ rworker = find_route_worker(balancer,
worker->s->redirect, r);
+ /* Check if the redirect worker is usable */
+ if (rworker && !PROXY_WORKER_IS_USABLE(rworker)) {
+ /*
+ * If the worker is in error state run
+ * retry on that worker. It will be marked as
+ * operational if the retry timeout is elapsed.
+ * The worker might still be unusable, but we try
+ * anyway.
+ */
+ ap_proxy_retry_worker("BALANCER", rworker,
r->server);
+ }
+ if (rworker && PROXY_WORKER_IS_USABLE(rworker))
+ return rworker;
+ }
+ }
+ }
+ }
+ }
+ checked_standby = checking_standby++;
+ }
+ return NULL;
+}
+static proxy_worker *find_session_route(proxy_balancer *balancer,
+ request_rec *r,
+ char **route,
+ char **sticky_used,
+ char **url)
+{
+ proxy_worker *worker = NULL;
+ char *sticky, *sticky_path, *path;
+
+ if (!balancer->sticky)
+ return NULL;
+ sticky = sticky_path = apr_pstrdup(r->pool, balancer->sticky);
+ if ((path = strchr(sticky, '|'))) {
+ *path++ = '\0';
+ sticky_path = path;
+ }
+
+ /* Try to find the sticky route inside url */
+ *sticky_used = sticky_path;
+ *route = get_path_param(r->pool, *url, sticky_path);
+ if (!*route) {
+ *route = get_cookie_param(r, sticky);
+ *sticky_used = sticky;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CLUSTER: Found value %s for "
+ "stickysession %s", *route, balancer->sticky);
+ /*
+ * If we found a value for sticksession, find the first '.' within.
+ * Everything after '.' (if present) is our route.
+ */
+ if ((*route) && ((*route = strchr(*route, '.')) != NULL ))
+ (*route)++;
+ if ((*route) && (**route)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CLUSTER: Found route %s", *route);
+ /* We have a route in path or in cookie
+ * Find the worker that has this route defined.
+ */
+ worker = find_route_worker(balancer, *route, r);
+ if (worker && strcmp(*route, worker->s->route)) {
+ /*
+ * Notice that the route of the worker chosen is different from
+ * the route supplied by the client.
+ */
+ apr_table_setn(r->subprocess_env, "BALANCER_ROUTE_CHANGED",
"1");
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CLUSTER: Route changed from %s to %s",
+ *route, worker->s->route);
+ }
+ return worker;
+ }
+ else
+ return NULL;
+}
+
+static proxy_worker *find_best_worker(proxy_balancer *balancer,
+ request_rec *r)
+{
+ proxy_worker *candidate = NULL;
+ apr_status_t rv;
+
+ if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "proxy: CLUSTER: (%s). Lock failed for find_best_worker()",
balancer->name);
+ return NULL;
+ }
+
+ candidate = (*balancer->lbmethod->finder)(balancer, r);
+
+ if (candidate)
+ candidate->s->elected++;
+
+/*
+ PROXY_THREAD_UNLOCK(balancer);
+ return NULL;
+*/
+
+ if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "proxy: CLUSTER: (%s). Unlock failed for find_best_worker()",
balancer->name);
+ }
+
+ if (candidate == NULL) {
+ /* All the workers are in error state or disabled.
+ * If the balancer has a timeout sleep for a while
+ * and try again to find the worker. The chances are
+ * that some other thread will release a connection.
+ * By default the timeout is not set, and the server
+ * returns SERVER_BUSY.
+ */
+#if APR_HAS_THREADS
+ if (balancer->timeout) {
+ /* XXX: This can perhaps be build using some
+ * smarter mechanism, like tread_cond.
+ * But since the statuses can came from
+ * different childs, use the provided algo.
+ */
+ apr_interval_time_t timeout = balancer->timeout;
+ apr_interval_time_t step, tval = 0;
+ /* Set the timeout to 0 so that we don't
+ * end in infinite loop
+ */
+ balancer->timeout = 0;
+ step = timeout / 100;
+ while (tval < timeout) {
+ apr_sleep(step);
+ /* Try again */
+ if ((candidate = find_best_worker(balancer, r)))
+ break;
+ tval += step;
+ }
+ /* restore the timeout */
+ balancer->timeout = timeout;
+ }
+#endif
+ }
+ return candidate;
+}
+
+static int rewrite_url(request_rec *r, proxy_worker *worker,
+ char **url)
+{
+ const char *scheme = strstr(*url, "://");
+ const char *path = NULL;
+
+ if (scheme)
+ path = ap_strchr_c(scheme + 3, '/');
+
+ /* we break the URL into host, port, uri */
+ if (!worker) {
+ return ap_proxyerror(r, HTTP_BAD_REQUEST, apr_pstrcat(r->pool,
+ "missing worker. URI cannot be parsed: ", *url,
+ NULL));
+ }
+
+ *url = apr_pstrcat(r->pool, worker->name, path, NULL);
+
+ return OK;
+}
+
+/*
+ * Find a worker for mod_proxy logic
+ */
+static int proxy_cluster_pre_request(proxy_worker **worker,
+ proxy_balancer **balancer,
+ request_rec *r,
+ proxy_server_conf *conf, char **url)
+{
+ int access_status;
+ proxy_worker *runtime;
+ char *route = NULL;
+ char *sticky = NULL;
+ apr_status_t rv;
+
+ *worker = NULL;
+ /* Step 1: check if the url is for us
+ * The url we can handle starts with 'balancer://'
+ * If balancer is already provided skip the search
+ * for balancer, because this is failover attempt.
+ */
+ if (!*balancer &&
+ !(*balancer = ap_proxy_get_balancer(r->pool, conf, *url)))
+ return DECLINED;
+
+ /* Step 2: find the session route */
+
+ runtime = find_session_route(*balancer, r, &route, &sticky, url);
+ /* Lock the LoadBalancer
+ * XXX: perhaps we need the process lock here
+ */
+ if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "proxy: CLUSTER: (%s). Lock failed for pre_request",
+ (*balancer)->name);
+ return DECLINED;
+ }
+ if (runtime) {
+ int i, total_factor = 0;
+ proxy_worker *workers;
+ /* We have a sticky load balancer
+ * Update the workers status
+ * so that even session routes get
+ * into account.
+ */
+ workers = (proxy_worker *)(*balancer)->workers->elts;
+ for (i = 0; i < (*balancer)->workers->nelts; i++) {
+ /* Take into calculation only the workers that are
+ * not in error state or not disabled.
+ *
+ * TODO: Abstract the below, since this is dependent
+ * on the LB implementation
+ */
+ if (PROXY_WORKER_IS_USABLE(workers)) {
+ workers->s->lbstatus += workers->s->lbfactor;
+ total_factor += workers->s->lbfactor;
+ }
+ workers++;
+ }
+ runtime->s->lbstatus -= total_factor;
+ runtime->s->elected++;
+
+ *worker = runtime;
+ }
+ else if (route && (*balancer)->sticky_force) {
+ int i, member_of = 0;
+ proxy_worker *workers;
+ /*
+ * We have a route provided that doesn't match the
+ * balancer name. See if the provider route is the
+ * member of the same balancer in which case return 503
+ */
+ workers = (proxy_worker *)(*balancer)->workers->elts;
+ for (i = 0; i < (*balancer)->workers->nelts; i++) {
+ if (*(workers->s->route) && strcmp(workers->s->route,
route) == 0) {
+ member_of = 1;
+ break;
+ }
+ workers++;
+ }
+ if (member_of) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "proxy: CLUSTER: (%s). All workers are in error state for
route (%s)",
+ (*balancer)->name, route);
+ if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "proxy: CLUSTER: (%s). Unlock failed for
pre_request",
+ (*balancer)->name);
+ }
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+ }
+
+ if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "proxy: CLUSTER: (%s). Unlock failed for pre_request",
+ (*balancer)->name);
+ }
+ if (!*worker) {
+ runtime = find_best_worker(*balancer, r);
+ if (!runtime) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "proxy: CLUSTER: (%s). All workers are in error
state",
+ (*balancer)->name);
+
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+ if ((*balancer)->sticky && runtime) {
+ /*
+ * This balancer has sticky sessions and the client either has not
+ * supplied any routing information or all workers for this route
+ * including possible redirect and hotstandby workers are in error
+ * state, but we have found another working worker for this
+ * balancer where we can send the request. Thus notice that we have
+ * changed the route to the backend.
+ */
+ apr_table_setn(r->subprocess_env, "BALANCER_ROUTE_CHANGED",
"1");
+ }
+ *worker = runtime;
+ }
+
+ /* Add balancer/worker info to env. */
+ apr_table_setn(r->subprocess_env,
+ "BALANCER_NAME", (*balancer)->name);
+ apr_table_setn(r->subprocess_env,
+ "BALANCER_WORKER_NAME", (*worker)->name);
+ apr_table_setn(r->subprocess_env,
+ "BALANCER_WORKER_ROUTE", (*worker)->s->route);
+
+ /* Rewrite the url from 'balancer://url'
+ * to the 'worker_scheme://worker_hostname[:worker_port]/url'
+ * This replaces the balancers fictional name with the
+ * real hostname of the elected worker.
+ */
+ access_status = rewrite_url(r, *worker, url);
+ /* Add the session route to request notes if present */
+ if (route) {
+ apr_table_setn(r->notes, "session-sticky", sticky);
+ apr_table_setn(r->notes, "session-route", route);
+
+ /* Add session info to env. */
+ apr_table_setn(r->subprocess_env,
+ "BALANCER_SESSION_STICKY", sticky);
+ apr_table_setn(r->subprocess_env,
+ "BALANCER_SESSION_ROUTE", route);
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy: CLUSTER (%s) worker (%s) rewritten to %s",
+ (*balancer)->name, (*worker)->name, *url);
+
+ return access_status;
+}
+
+/*
* Register the hooks on our module.
*/
static void proxy_cluster_hooks(apr_pool_t *p)
@@ -1141,6 +1512,10 @@
/* check the url and give the mapping to mod_proxy */
ap_hook_translate_name(proxy_cluster_trans, aszPre, aszSucc, APR_HOOK_FIRST);
+
+ proxy_hook_canon_handler(proxy_cluster_canon, NULL, NULL, APR_HOOK_FIRST);
+
+ proxy_hook_pre_request(proxy_cluster_pre_request, NULL, NULL, APR_HOOK_FIRST);
}
static void *create_proxy_cluster_dir_config(apr_pool_t *p, char *dir)