[jboss-cvs] JBossAS SVN: r84207 - in trunk: cluster/src/main/org/jboss/ha/framework/server/lock and 5 other directories.
jboss-cvs-commits at lists.jboss.org
jboss-cvs-commits at lists.jboss.org
Sat Feb 14 08:47:54 EST 2009
Author: bstansberry at jboss.com
Date: 2009-02-14 08:47:54 -0500 (Sat, 14 Feb 2009)
New Revision: 84207
Added:
trunk/cluster/src/main/org/jboss/ha/framework/server/lock/
trunk/cluster/src/main/org/jboss/ha/framework/server/lock/AbstractClusterLockManager.java
trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ClusterLockManager.java
trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ExclusiveClusterLockManager.java
trunk/cluster/src/main/org/jboss/ha/framework/server/lock/LocalLockHandler.java
trunk/cluster/src/main/org/jboss/ha/framework/server/lock/LockState.java
trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ReadWriteClusterLockManager.java
trunk/cluster/src/main/org/jboss/ha/framework/server/lock/RemoteLockResponse.java
trunk/cluster/src/main/org/jboss/ha/framework/server/lock/TimeoutException.java
trunk/testsuite/src/main/org/jboss/test/cluster/defaultcfg/test/ExclusiveClusteredLockManagerUnitTestCase.java
trunk/testsuite/src/main/org/jboss/test/cluster/defaultcfg/test/ReadWriteClusteredLockManagerUnitTestCase.java
trunk/testsuite/src/main/org/jboss/test/cluster/lock/
trunk/testsuite/src/main/org/jboss/test/cluster/lock/ClusteredLockManagerTestBase.java
Modified:
trunk/component-matrix/pom.xml
trunk/testsuite/.classpath
trunk/testsuite/build.xml
Log:
[JBAS-5552] Cluster locking support
Added: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/AbstractClusterLockManager.java
===================================================================
--- trunk/cluster/src/main/org/jboss/ha/framework/server/lock/AbstractClusterLockManager.java (rev 0)
+++ trunk/cluster/src/main/org/jboss/ha/framework/server/lock/AbstractClusterLockManager.java 2009-02-14 13:47:54 UTC (rev 84207)
@@ -0,0 +1,567 @@
+/*
+ * JBoss, Home of Professional Open Source.
+ * Copyright 2009, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+
+package org.jboss.ha.framework.server.lock;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.Vector;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+import org.jboss.ha.framework.interfaces.ClusterNode;
+import org.jboss.ha.framework.interfaces.HAPartition;
+import org.jboss.ha.framework.interfaces.HAPartition.HAMembershipListener;
+import org.jboss.ha.framework.server.lock.LockState.State;
+import org.jboss.logging.Logger;
+
+/**
+ * Base class for cluster-wide lock implementations.
+ *
+ * @author Brian Stansberry
+ */
+public abstract class AbstractClusterLockManager implements HAMembershipListener, ClusterLockManager
+{
+ public static final Class<?>[] REMOTE_LOCK_TYPES = new Class[]{Serializable.class, ClusterNode.class, long.class};
+ public static final Class<?>[] RELEASE_REMOTE_LOCK_TYPES = new Class[]{Serializable.class, ClusterNode.class};
+
+ /**
+ * Object the HAPartition can invoke on. This class is static as
+ * an aid in unit testing.
+ */
+ public static class RpcTarget
+ {
+ private final AbstractClusterLockManager mgr;
+
+ private RpcTarget(AbstractClusterLockManager mgr)
+ {
+ this.mgr = mgr;
+ }
+
+ public RemoteLockResponse remoteLock(Serializable categoryName, ClusterNode caller, long timeout)
+ {
+ return mgr.remoteLock(categoryName, caller, timeout);
+ }
+
+ public void releaseRemoteLock(Serializable categoryName, ClusterNode caller)
+ {
+ mgr.releaseRemoteLock(categoryName, caller);
+ }
+ }
+
+ protected final Logger log = Logger.getLogger(getClass());
+
+ private final ConcurrentMap<Serializable, LockState> lockStates = new ConcurrentHashMap<Serializable, LockState>();
+ private final ConcurrentMap<ClusterNode, Set<LockState>> lockStatesByOwner = new ConcurrentHashMap<ClusterNode, Set<LockState>>();
+ private ClusterNode me;
+ private final String serviceHAName;
+ private final HAPartition partition;
+ private final LocalLockHandler localHandler;
+ private final List<ClusterNode> members = new CopyOnWriteArrayList<ClusterNode>();
+// private final boolean supportLocalOnly;
+ private RpcTarget rpcTarget;
+
+ public AbstractClusterLockManager(String serviceHAName,
+ HAPartition partition,
+ LocalLockHandler handler)
+ {
+ if (serviceHAName == null)
+ {
+ throw new IllegalArgumentException("serviceHAName is null");
+ }
+ if (partition == null)
+ {
+ throw new IllegalArgumentException("partition is null");
+ }
+ if (handler == null)
+ {
+ throw new IllegalArgumentException("localHandler is null");
+ }
+
+ this.partition = partition;
+ this.localHandler = handler;
+ this.serviceHAName = serviceHAName;
+ }
+
+ // -------------------------------------------------------------- Properties
+
+ public HAPartition getPartition()
+ {
+ return partition;
+ }
+
+ public String getServiceHAName()
+ {
+ return serviceHAName;
+ }
+
+ public LocalLockHandler getLocalHandler()
+ {
+ return localHandler;
+ }
+
+ // ------------------------------------------------------ ClusterLockManager
+
+ public boolean lock(Serializable lockId, long timeout)
+ {
+ if (this.rpcTarget == null)
+ {
+ throw new IllegalStateException("Must call start() before first call to lock()");
+ }
+ LockState category = getCategory(lockId, true);
+
+ long left = timeout > 0 ? timeout : Long.MAX_VALUE;
+ long start = System.currentTimeMillis();
+ while (left > 0)
+ {
+ // Another node we lost to who should take precedence
+ // over ourself in competition for the lock
+ ClusterNode superiorCompetitor = null;
+
+ // Only continue if category is unlocked
+ if (category.state.compareAndSet(LockState.State.UNLOCKED, LockState.State.REMOTE_LOCKING))
+ {
+ // Category state is now REMOTE_LOCKING, so other nodes will fail
+ // in attempts to acquire on this node unless the caller is "superior"
+
+ boolean success = false;
+ try
+ {
+ // Get the lock on all other nodes in the cluster
+
+ @SuppressWarnings("unchecked")
+ ArrayList rsps = partition.callMethodOnCluster(getServiceHAName(),
+ "remoteLock", new Object[]{lockId, me, new Long(left)},
+ REMOTE_LOCK_TYPES, true);
+
+ boolean remoteLocked = true;
+ if (rsps != null)
+ {
+ for (Object rsp : rsps)
+ {
+ if ((rsp instanceof RemoteLockResponse) == false)
+ {
+ remoteLocked = false;
+ }
+ else if (((RemoteLockResponse) rsp).flag != RemoteLockResponse.Flag.OK)
+ {
+ remoteLocked = false;
+ if (superiorCompetitor == null)
+ {
+ superiorCompetitor = getSuperiorCompetitor(((RemoteLockResponse) rsp).holder);
+ }
+ }
+ }
+ }
+ else if (members.size() ==0 || (members.size() == 1 && members.contains(me)))
+ {
+ // no peers
+ remoteLocked = true;
+ }
+
+ if (remoteLocked)
+ {
+ // Bail if someone else locked our node while we were locking
+ // others.
+ if (category.state.compareAndSet(LockState.State.REMOTE_LOCKING, LockState.State.LOCAL_LOCKING))
+ {
+ // Now we are in LOCAL_LOCKING phase which will cause
+ // us to reject incoming remote requests
+
+ // Now see if we can lock our own node
+ long localTimeout = left - (System.currentTimeMillis() - start);
+ if (getLock(lockId, category, me, localTimeout).flag == RemoteLockResponse.Flag.OK)
+ {
+ success = true;
+ return true;
+ }
+ // else either 1) there was a race with a remote caller or
+ // 2) some other activity locally is preventing our
+ // acquisition of the local lock. Either way back off
+ // and then retry
+ }
+
+ // Find out if we couldn't acquire because someone with
+ // precedence has the lock
+ superiorCompetitor = getSuperiorCompetitor(category.getHolder());
+ }
+ }
+ catch (RuntimeException e)
+ {
+ throw e;
+ }
+ catch (Exception e)
+ {
+ throw new RuntimeException(e);
+ }
+ finally
+ {
+ if (!success)
+ {
+ cleanup(lockId, category);
+ }
+ }
+ }
+
+ // We failed for some reason. Pause to let things clear before trying again.
+ // If we've detected we are competing with someone else who is
+ // "superior" pause longer to let them proceed first.
+ long backoff = computeBackoff(timeout, start, left, superiorCompetitor == null);
+ if (backoff > 0)
+ {
+ try
+ {
+ Thread.sleep(backoff);
+ }
+ catch (InterruptedException e)
+ {
+ Thread.currentThread().interrupt();
+ break;
+ }
+ }
+
+ if (category.state.get() == LockState.State.INVALID)
+ {
+ // Someone invalidated our category; get a new one
+ category = getCategory(lockId, true);
+ }
+
+ long now = System.currentTimeMillis();
+ left -= (now - start);
+ start = now;
+ }
+
+ return false;
+ }
+
+ public String getPartitionName()
+ {
+ return partition.getPartitionName();
+ }
+
+ public ClusterNode getLocalClusterNode()
+ {
+ return this.me;
+ }
+
+ public List<ClusterNode> getCurrentView()
+ {
+ return new ArrayList<ClusterNode>(members);
+ }
+
+ public void start() throws Exception
+ {
+ this.me = this.partition.getClusterNode();
+ this.localHandler.setLocalNode(this.me);
+
+ this.rpcTarget = new RpcTarget(this);
+ this.partition.registerRPCHandler(this.serviceHAName, this.rpcTarget);
+ this.partition.registerMembershipListener(this);
+
+ @SuppressWarnings("unchecked")
+ Vector allMembers = this.partition.getCurrentView();
+ membershipChanged(new Vector<ClusterNode>(), allMembers, allMembers);
+ }
+
+ public void stop() throws Exception
+ {
+ if (this.rpcTarget != null)
+ {
+ this.partition.unregisterRPCHandler(this.serviceHAName, this.rpcTarget);
+ this.rpcTarget = null;
+ this.partition.unregisterMembershipListener(this);
+ Vector<ClusterNode> dead = new Vector<ClusterNode>(members);
+ Vector<ClusterNode> empty = new Vector<ClusterNode>();
+ membershipChanged(dead, empty, empty);
+ this.me = null;
+ }
+ }
+
+ // ---------------------------------------------------- HAMembershipListener
+
+ @SuppressWarnings("unchecked")
+ public synchronized void membershipChanged(Vector deadMembers, Vector newMembers, Vector allMembers)
+ {
+ this.members.clear();
+ this.members.addAll(allMembers);
+
+ Set<ClusterNode> toClean = lockStatesByOwner.keySet();
+ toClean.removeAll(this.members);
+ for (ClusterNode deadMember : toClean)
+ {
+ Set<LockState> deadMemberLocks = lockStatesByOwner.remove(deadMember);
+ if (deadMemberLocks != null)
+ {
+ synchronized (deadMemberLocks)
+ {
+ // We're going to iterate and make a call that removes from set,
+ // so iterate over a copy to avoid ConcurrentModificationException
+ Set<LockState> copy = new HashSet<LockState>(deadMemberLocks);
+ for (LockState lockState : copy)
+ {
+ releaseRemoteLock(lockState.lockId, (ClusterNode) deadMember);
+ }
+ }
+ }
+
+ }
+ }
+
+ // --------------------------------------------------------------- Protected
+
+ protected abstract RemoteLockResponse handleLockSuccess(Serializable categoryName, LockState category, ClusterNode caller);
+
+ protected abstract LockState getCategory(Serializable categoryName);
+
+ protected abstract RemoteLockResponse yieldLock(Serializable categoryName, LockState category, ClusterNode caller, long timeout);
+
+ protected void recordLockHolder(LockState category, ClusterNode caller)
+ {
+ if (category.holder != null)
+ {
+ Set<LockState> memberLocks = getLocksHeldByMember(category.holder);
+ synchronized (memberLocks)
+ {
+ memberLocks.remove(category);
+ }
+ }
+
+ if (me.equals(caller) == false)
+ {
+ Set<LockState> memberLocks = getLocksHeldByMember(caller);
+ synchronized (memberLocks)
+ {
+ memberLocks.add(category);
+ }
+ }
+
+ category.lock(caller);
+ }
+
+ protected LockState getCategory(Serializable categoryName, boolean create)
+ {
+ LockState category = lockStates.get(categoryName);
+ if (category == null && create)
+ {
+ category = new LockState(categoryName);
+ LockState existing = lockStates.putIfAbsent(categoryName, category);
+ if (existing != null)
+ {
+ category = existing;
+ }
+ }
+ return category;
+ }
+
+ protected void removeLockState(Serializable id, LockState category)
+ {
+ lockStates.remove(id, category);
+ }
+
+ // ----------------------------------------------------------------- Private
+
+ /**
+ * Called by a remote node via RpcTarget.
+ */
+ private RemoteLockResponse remoteLock(Serializable categoryName, ClusterNode caller, long timeout)
+ {
+ RemoteLockResponse response = null;
+ LockState category = getCategory(categoryName);
+ if (category == null)
+ {
+ // unknown == OK
+ return new RemoteLockResponse(RemoteLockResponse.Flag.OK);
+ }
+
+ switch (category.state.get())
+ {
+ case UNLOCKED:
+ // Remote callers can race for the local lock
+ response = getLock(categoryName, category, caller, timeout);
+ break;
+ case REMOTE_LOCKING:
+ if (me.equals(caller))
+ {
+ log.warn("Received remoteLock call from self");
+ response = new RemoteLockResponse(RemoteLockResponse.Flag.OK);
+ }
+ else if (getSuperiorCompetitor(caller) == null)
+ {
+ // I want the lock and I take precedence
+ response = new RemoteLockResponse(RemoteLockResponse.Flag.REJECT, me);
+ }
+ else
+ {
+ // I want the lock but caller takes precedence; let
+ // them acquire local lock and I'll fail
+ response = getLock(categoryName, category, caller, timeout);
+ }
+ break;
+ case LOCAL_LOCKING:
+ // I've gotten OK responses from everyone and am about
+ // to acquire local lock, so reject caller
+ response = new RemoteLockResponse(RemoteLockResponse.Flag.REJECT, me);
+ break;
+ case LOCKED:
+ // See if I have the lock and will give it up
+ response = yieldLock(categoryName, category, caller, timeout);
+ break;
+ case INVALID:
+ // We've given up the lock since we got the category and the
+ // thread that caused that is trying to discard the unneeded
+ // category.
+ // Call in again and see what our current state is
+ response = remoteLock(categoryName, caller, timeout);
+ break;
+ }
+
+ return response;
+ }
+
+ /**
+ * Called by a remote node via RpcTarget.
+ */
+ private void releaseRemoteLock(Serializable categoryName, ClusterNode caller)
+ {
+ LockState category = getCategory(categoryName, false);
+ if (category != null && category.state.get() == State.LOCKED)
+ {
+ if (caller.equals(localHandler.getLockHolder(categoryName)))
+ {
+ // Throw away the category as a cleanup exercise
+ category.invalidate();
+ localHandler.releaseLock(categoryName, caller);
+ Set<LockState> memberLocks = getLocksHeldByMember(caller);
+ synchronized (memberLocks)
+ {
+ memberLocks.remove(category);
+ }
+ removeLockState(categoryName, category);
+ }
+ }
+
+ }
+
+ /** See if <code>caller</code> comes before us in the members list */
+ private ClusterNode getSuperiorCompetitor(ClusterNode caller)
+ {
+ if (caller == null)
+ return null;
+
+ for (ClusterNode node : members)
+ {
+ if (me.equals(node))
+ {
+ break;
+ }
+ else if (caller.equals(node))
+ {
+ return caller;
+ }
+ }
+
+ return null;
+ }
+
+ /**
+ * Always call this with a lock on the Category.
+ */
+ protected RemoteLockResponse getLock(Serializable categoryName, LockState category,
+ ClusterNode caller, long timeout)
+ {
+ RemoteLockResponse response;
+ try
+ {
+ localHandler.tryLock(categoryName, caller, timeout);
+ }
+ catch (InterruptedException e)
+ {
+ Thread.currentThread().interrupt();
+ log.error("Caught InterruptedException; Failing request by " + caller + " to lock " + categoryName);
+ return new RemoteLockResponse(RemoteLockResponse.Flag.FAIL, localHandler.getLockHolder(categoryName));
+ }
+ catch (TimeoutException t)
+ {
+ return new RemoteLockResponse(RemoteLockResponse.Flag.FAIL, t.getOwner());
+ }
+
+ response = handleLockSuccess(categoryName, category, caller);
+ return response;
+ }
+
+ private Set<LockState> getLocksHeldByMember(ClusterNode member)
+ {
+ Set<LockState> memberCategories = lockStatesByOwner.get(member);
+ if (memberCategories == null)
+ {
+ memberCategories = new HashSet<LockState>();
+ Set<LockState> existing = lockStatesByOwner.putIfAbsent(member, memberCategories);
+ if (existing != null)
+ {
+ memberCategories = existing;
+ }
+ }
+ return memberCategories;
+ }
+
+ /** Back out of a failed attempt by the local node to lock */
+ private void cleanup(Serializable categoryName, LockState category)
+ {
+ try
+ {
+ partition.callMethodOnCluster(getServiceHAName(), "releaseRemoteLock", new Object[]{categoryName, me}, RELEASE_REMOTE_LOCK_TYPES, true);
+ }
+ catch (RuntimeException e)
+ {
+ throw e;
+ }
+ catch (Exception e)
+ {
+ throw new RuntimeException("Failed releasing remote lock", e);
+ }
+ finally
+ {
+ if (category.state.compareAndSet(LockState.State.REMOTE_LOCKING, LockState.State.UNLOCKED) == false)
+ {
+ category.state.compareAndSet(LockState.State.LOCAL_LOCKING, LockState.State.UNLOCKED);
+ }
+ }
+ }
+
+ private static long computeBackoff(long initialTimeout, long start, long left, boolean superiorCompetitor)
+ {
+ long remain = left - (System.currentTimeMillis() - start);
+ // Don't spam the cluster
+ if (remain < Math.min(initialTimeout / 5, 15))
+ {
+ return remain;
+ }
+
+ long max = superiorCompetitor ? 100 : 250;
+ long min = remain / 3;
+ return Math.min(max, min);
+ }
+}
Property changes on: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/AbstractClusterLockManager.java
___________________________________________________________________
Name: svn:keywords
+
Added: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ClusterLockManager.java
===================================================================
--- trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ClusterLockManager.java (rev 0)
+++ trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ClusterLockManager.java 2009-02-14 13:47:54 UTC (rev 84207)
@@ -0,0 +1,75 @@
+/*
+ * JBoss, Home of Professional Open Source.
+ * Copyright 2009, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.ha.framework.server.lock;
+
+import java.io.Serializable;
+
+/**
+ *
+ * @author Brian Stansberry
+ *
+ */
+public interface ClusterLockManager
+{
+ /**
+ * Acquire across the cluster the lock for the given category within the
+ * specified number of milliseconds
+ *
+ * @param lockId the id of the lock
+ * @param timeout max number of milliseconds to wait to acquire the lock
+ *
+ * @return <code>true</code> if the lock was acquired, <code>false</code>
+ * otherwise
+ */
+ boolean lock(Serializable lockId, long timeout);
+
+ /**
+ * Release across the cluster the lock for the given category.
+ *
+ * @param lockId the id of the lock
+ */
+ void unlock(Serializable lockId);
+
+ /**
+ * Gets the name of the cluster partition.
+ *
+ * @return the partition name. Will not return <code>null</code>.
+ */
+ String getPartitionName();
+
+ /**
+ * Bring this object into a state where it is ready to handle
+ * normal requests.
+ *
+ * @throws Exception
+ */
+ void start() throws Exception;
+
+ /**
+ * Perform any cleanup work. After this method is invoked normal lock/unlock
+ * requests should not be accepted.
+ *
+ * @throws Exception
+ */
+ void stop() throws Exception;
+
+}
\ No newline at end of file
Property changes on: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ClusterLockManager.java
___________________________________________________________________
Name: svn:keywords
+
Added: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ExclusiveClusterLockManager.java
===================================================================
--- trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ExclusiveClusterLockManager.java (rev 0)
+++ trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ExclusiveClusterLockManager.java 2009-02-14 13:47:54 UTC (rev 84207)
@@ -0,0 +1,105 @@
+/*
+ * JBoss, Home of Professional Open Source.
+ * Copyright 2009, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+
+package org.jboss.ha.framework.server.lock;
+
+import java.io.Serializable;
+
+import org.jboss.ha.framework.interfaces.ClusterNode;
+import org.jboss.ha.framework.interfaces.HAPartition;
+
+/**
+ * TODO consider dividing this into two classes, one for the "supportsLocalOnly"
+ * usage and one for other usage.
+ *
+ * @author Brian Stansberry
+ */
+public class ExclusiveClusterLockManager extends AbstractClusterLockManager
+{
+ public ExclusiveClusterLockManager(String serviceHAName,
+ HAPartition partition,
+ LocalLockHandler handler)
+ {
+ super(serviceHAName, partition, handler);
+ }
+
+ // ------------------------------------------------------ ClusterLockManager
+
+ public void unlock(Serializable lockId)
+ {
+ ClusterNode myself = getLocalClusterNode();
+ if (myself == null)
+ {
+ throw new IllegalStateException("Must call start() before first call to unlock()");
+ }
+
+ LockState category = getCategory(lockId, false);
+
+ if (category != null && myself.equals(category.getHolder()))
+ {
+ category.invalidate();
+ getLocalHandler().releaseLock(lockId, myself);
+ removeLockState(lockId, category);
+ }
+ }
+
+ // --------------------------------------------------------------- Protected
+
+ @Override
+ protected LockState getCategory(Serializable categoryName)
+ {
+ return getCategory(categoryName, false);
+ }
+
+ @Override
+ protected RemoteLockResponse yieldLock(Serializable categoryName, LockState category, ClusterNode caller, long timeout)
+ {
+ if (getLocalClusterNode().equals(category.getHolder()))
+ {
+ return getLock(categoryName, category, caller, timeout);
+ }
+ else
+ {
+ return new RemoteLockResponse(RemoteLockResponse.Flag.REJECT, category.getHolder());
+ }
+ }
+
+ @Override
+ protected RemoteLockResponse handleLockSuccess(Serializable categoryName, LockState category, ClusterNode caller)
+ {
+ if (getLocalClusterNode().equals(caller))
+ {
+ recordLockHolder(category, caller);
+ }
+ else
+ {
+ // Caller succeeded, but since this node doesn't hold the
+ // lock we don't want to hold the category in our map any longer
+ category.invalidate();
+ removeLockState(categoryName, category);
+ }
+ return new RemoteLockResponse(RemoteLockResponse.Flag.OK);
+ }
+
+
+ // ----------------------------------------------------------------- Private
+}
Property changes on: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ExclusiveClusterLockManager.java
___________________________________________________________________
Name: svn:keywords
+
Added: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/LocalLockHandler.java
===================================================================
--- trunk/cluster/src/main/org/jboss/ha/framework/server/lock/LocalLockHandler.java (rev 0)
+++ trunk/cluster/src/main/org/jboss/ha/framework/server/lock/LocalLockHandler.java 2009-02-14 13:47:54 UTC (rev 84207)
@@ -0,0 +1,80 @@
+/*
+ * JBoss, Home of Professional Open Source.
+ * Copyright 2009, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+
+package org.jboss.ha.framework.server.lock;
+
+import java.io.Serializable;
+
+import org.jboss.ha.framework.interfaces.ClusterNode;
+
+/**
+ * @author Brian Stansberry
+ *
+ */
+public interface LocalLockHandler
+{
+ /**
+ * Gets the node the holds the given lock on this node, or <code>null</code>
+ * if no node holds the lock on this node.
+ *
+ * @param lockName
+ * @return
+ */
+ ClusterNode getLockHolder(Serializable lockName);
+
+ /**
+ * Try to acquire the local lock within the given timeout. If this
+ * method returns successfully, the caller has acquired the lock.
+ *
+ * @param lockName the name of the lock.
+ * @param caller the node making the request
+ * @param timeout number of ms the caller will accept waiting before
+ * the lock acquisition should be considered a failure.
+ * A value less than one means wait as long as necessary.
+ *
+ * @return a flag indicating whether this LocalLockHandler is internally
+ * tracking the fact that <code>caller</code> holds the lock.
+ * If <code>false</code> is returned, the calling code can assume
+ * that the next call to this method with the same <code>categoryName</code>
+ * but <strong>any</strong> <code>caller</code> value would succeed.
+ *
+ * @throws TimeoutException if the lock could not be acquired within the
+ * specified timeout
+ * @throws InterruptedException
+ */
+ boolean tryLock(Serializable lockName, ClusterNode caller, long timeout)
+ throws TimeoutException, InterruptedException;
+
+ /**
+ * Release the lock.
+ *
+ * @param lockName
+ * @param caller
+ * @return <code>true</code> if the caller held the lock, <code>false</code>
+ * otherwise.
+ */
+ boolean releaseLock(Serializable lockName, ClusterNode caller);
+
+ ClusterNode getLocalNode(ClusterNode localNode);
+
+ void setLocalNode(ClusterNode localNode);
+}
Property changes on: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/LocalLockHandler.java
___________________________________________________________________
Name: svn:keywords
+
Added: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/LockState.java
===================================================================
--- trunk/cluster/src/main/org/jboss/ha/framework/server/lock/LockState.java (rev 0)
+++ trunk/cluster/src/main/org/jboss/ha/framework/server/lock/LockState.java 2009-02-14 13:47:54 UTC (rev 84207)
@@ -0,0 +1,86 @@
+/*
+ * JBoss, Home of Professional Open Source.
+ * Copyright 2009, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+
+package org.jboss.ha.framework.server.lock;
+
+import java.io.Serializable;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.jboss.ha.framework.interfaces.ClusterNode;
+
+public class LockState
+{
+ /** Lock status of a category */
+ public enum State
+ {
+ /** No one has the lock and local node is not attempting to acquire */
+ UNLOCKED,
+ /** Local node is attempting to acquire lock across cluster */
+ REMOTE_LOCKING,
+ /** Local node has lock across cluster, now attempting locally */
+ LOCAL_LOCKING,
+ /** The lock is held locally */
+ LOCKED,
+ /**
+ * This object has been removed from the categories map and
+ * should be discarded
+ */
+ INVALID
+ }
+ final Serializable lockId;
+ final AtomicReference<LockState.State> state = new AtomicReference<LockState.State>(State.UNLOCKED);
+ ClusterNode holder;
+
+ LockState(Serializable lockId)
+ {
+ if (lockId == null)
+ {
+ throw new IllegalArgumentException("lockId is null");
+ }
+ this.lockId = lockId;
+ }
+
+ public synchronized ClusterNode getHolder()
+ {
+ return holder;
+ }
+
+ public synchronized void invalidate()
+ {
+ this.state.set(State.INVALID);
+ this.holder = null;
+ }
+
+ public synchronized void lock(ClusterNode holder)
+ {
+ this.state.set(State.LOCKED);
+ this.holder = holder;
+ }
+
+ public synchronized void release()
+ {
+ if (this.state.compareAndSet(State.LOCKED, State.UNLOCKED))
+ {
+ this.holder = null;
+ }
+ }
+}
\ No newline at end of file
Property changes on: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/LockState.java
___________________________________________________________________
Name: svn:keywords
+
Added: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ReadWriteClusterLockManager.java
===================================================================
--- trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ReadWriteClusterLockManager.java (rev 0)
+++ trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ReadWriteClusterLockManager.java 2009-02-14 13:47:54 UTC (rev 84207)
@@ -0,0 +1,103 @@
+/*
+ * JBoss, Home of Professional Open Source.
+ * Copyright 2009, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+
+package org.jboss.ha.framework.server.lock;
+
+import java.io.Serializable;
+
+import org.jboss.ha.framework.interfaces.ClusterNode;
+import org.jboss.ha.framework.interfaces.HAPartition;
+
+/**
+ *
+ *
+ * @author Brian Stansberry
+ */
+public class ReadWriteClusterLockManager extends AbstractClusterLockManager
+{
+
+ public ReadWriteClusterLockManager(String serviceHAName,
+ HAPartition partition,
+ LocalLockHandler handler)
+ {
+ super(serviceHAName, partition, handler);
+ }
+
+ // ------------------------------------------------------ ClusterLockManager
+
+ public void unlock(Serializable lockId)
+ {
+ ClusterNode myself = getLocalClusterNode();
+ if (myself == null)
+ {
+ throw new IllegalStateException("Must call start() before first call to unlock()");
+ }
+
+ LockState category = getCategory(lockId, false);
+
+ if (category != null && myself.equals(category.getHolder()))
+ {
+ getLocalHandler().releaseLock(lockId, myself);
+ category.release();
+
+ try
+ {
+ getPartition().callMethodOnCluster(getServiceHAName(), "releaseRemoteLock",
+ new Object[]{lockId, myself},
+ RELEASE_REMOTE_LOCK_TYPES, true);
+ }
+ catch (RuntimeException e)
+ {
+ throw e;
+ }
+ catch (Exception e)
+ {
+ throw new RuntimeException("Failed releasing remote lock", e);
+ }
+ }
+ }
+
+ // ------------------------------------------------------------------ Public
+
+ // --------------------------------------------------------------- Protected
+
+ @Override
+ protected LockState getCategory(Serializable categoryName)
+ {
+ return getCategory(categoryName, true);
+ }
+
+ @Override
+ protected RemoteLockResponse yieldLock(Serializable categoryName, LockState category, ClusterNode caller, long timeout)
+ {
+ return new RemoteLockResponse(RemoteLockResponse.Flag.REJECT, category.getHolder());
+ }
+
+ @Override
+ protected RemoteLockResponse handleLockSuccess(Serializable categoryName, LockState category, ClusterNode caller)
+ {
+ recordLockHolder(category, caller);
+ return new RemoteLockResponse(RemoteLockResponse.Flag.OK);
+ }
+
+ // ----------------------------------------------------------------- Private
+}
Property changes on: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/ReadWriteClusterLockManager.java
___________________________________________________________________
Name: svn:keywords
+
Added: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/RemoteLockResponse.java
===================================================================
--- trunk/cluster/src/main/org/jboss/ha/framework/server/lock/RemoteLockResponse.java (rev 0)
+++ trunk/cluster/src/main/org/jboss/ha/framework/server/lock/RemoteLockResponse.java 2009-02-14 13:47:54 UTC (rev 84207)
@@ -0,0 +1,42 @@
+package org.jboss.ha.framework.server.lock;
+
+import java.io.Serializable;
+
+import org.jboss.ha.framework.interfaces.ClusterNode;
+
+/**
+ * Return value for a "remoteLock" call. This class is public as an
+ * aid in unit testing.
+ */
+public class RemoteLockResponse implements Serializable
+{
+ public enum Flag
+ {
+ /** Lock acquired on responding node */
+ OK,
+ /** Attempt to acquire local lock failed */
+ FAIL,
+ /**
+ * Request rejected either because lock is held or
+ * local node is attempting to acquire lock.
+ */
+ REJECT
+ }
+
+ /** The serialVersionUID */
+ private static final long serialVersionUID = -8878607946010425555L;
+
+ public final RemoteLockResponse.Flag flag;
+ public final ClusterNode holder;
+
+ public RemoteLockResponse(RemoteLockResponse.Flag flag)
+ {
+ this(flag, null);
+ }
+
+ public RemoteLockResponse(RemoteLockResponse.Flag flag, ClusterNode holder)
+ {
+ this.flag = flag;
+ this.holder = holder;
+ }
+}
\ No newline at end of file
Property changes on: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/RemoteLockResponse.java
___________________________________________________________________
Name: svn:keywords
+
Added: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/TimeoutException.java
===================================================================
--- trunk/cluster/src/main/org/jboss/ha/framework/server/lock/TimeoutException.java (rev 0)
+++ trunk/cluster/src/main/org/jboss/ha/framework/server/lock/TimeoutException.java 2009-02-14 13:47:54 UTC (rev 84207)
@@ -0,0 +1,49 @@
+/*
+ * JBoss, Home of Professional Open Source.
+ * Copyright 2009, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+
+package org.jboss.ha.framework.server.lock;
+
+import org.jboss.ha.framework.interfaces.ClusterNode;
+
+/**
+ * Thrown to indicate failure to acquire a lock within a specified timeout.
+ *
+ * @author Brian Stansberry
+ */
+public class TimeoutException extends Exception
+{
+ /** The serialVersionUID */
+ private static final long serialVersionUID = 7786468949269669386L;
+
+ private ClusterNode owner;
+
+ public TimeoutException(ClusterNode owner)
+ {
+ super("Unabled to acquire lock as it is held by " + (owner == null ? "unknown" : owner));
+ this.owner = owner;
+ }
+
+ public ClusterNode getOwner()
+ {
+ return owner;
+ }
+}
Property changes on: trunk/cluster/src/main/org/jboss/ha/framework/server/lock/TimeoutException.java
___________________________________________________________________
Name: svn:keywords
+
Modified: trunk/component-matrix/pom.xml
===================================================================
--- trunk/component-matrix/pom.xml 2009-02-14 11:35:21 UTC (rev 84206)
+++ trunk/component-matrix/pom.xml 2009-02-14 13:47:54 UTC (rev 84207)
@@ -411,18 +411,12 @@
</dependency>
<dependency>
- <groupId>easymock</groupId>
+ <groupId>org.easymock</groupId>
<artifactId>easymock</artifactId>
- <version>1.1</version>
+ <version>2.4</version>
</dependency>
<dependency>
- <groupId>easymock</groupId>
- <artifactId>easymockclassextension</artifactId>
- <version>1.1</version>
- </dependency>
-
- <dependency>
<groupId>gnu-getopt</groupId>
<artifactId>getopt</artifactId>
<version>1.0.12-brew</version>
Modified: trunk/testsuite/.classpath
===================================================================
--- trunk/testsuite/.classpath 2009-02-14 11:35:21 UTC (rev 84206)
+++ trunk/testsuite/.classpath 2009-02-14 13:47:54 UTC (rev 84207)
@@ -50,5 +50,6 @@
<classpathentry kind="lib" path="output/resources/security"/>
<classpathentry kind="lib" path="/thirdparty/org/jboss/ws/lib/jbossws-spi.jar" sourcepath="/thirdparty/org/jboss/ws/lib/jbossws-spi-sources.jar"/>
<classpathentry kind="lib" path="/thirdparty/org/jboss/ws/lib/jbossws-common.jar" sourcepath="/thirdparty/org/jboss/ws/lib/jbossws-common-src.zip"/>
- <classpathentry kind="output" path="output/eclipse-classes"/>
+ <classpathentry kind="lib" path="/thirdparty/org/easymock/lib/easymock.jar" sourcepath="/thirdparty/org/easymock/lib/easymock-sources.jar"/>
+ <classpathentry kind="output" path="output/eclipse-classes"/>
</classpath>
Modified: trunk/testsuite/build.xml
===================================================================
--- trunk/testsuite/build.xml 2009-02-14 11:35:21 UTC (rev 84206)
+++ trunk/testsuite/build.xml 2009-02-14 13:47:54 UTC (rev 84207)
@@ -131,6 +131,7 @@
<path refid="jgroups.jgroups.classpath"/>
<path refid="joesnmp.joesnmp.classpath"/>
<path refid="junit.junit.classpath"/>
+ <path refid="org.easymock.classpath"/>
<path refid="javassist.classpath"/>
<path refid="juddi.juddi.classpath"/>
<path refid="nekohtml.nekohtml.classpath"/>
Added: trunk/testsuite/src/main/org/jboss/test/cluster/defaultcfg/test/ExclusiveClusteredLockManagerUnitTestCase.java
===================================================================
--- trunk/testsuite/src/main/org/jboss/test/cluster/defaultcfg/test/ExclusiveClusteredLockManagerUnitTestCase.java (rev 0)
+++ trunk/testsuite/src/main/org/jboss/test/cluster/defaultcfg/test/ExclusiveClusteredLockManagerUnitTestCase.java 2009-02-14 13:47:54 UTC (rev 84207)
@@ -0,0 +1,190 @@
+/*
+ * JBoss, Home of Professional Open Source.
+ * Copyright 2009, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+
+package org.jboss.test.cluster.defaultcfg.test;
+
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.resetToStrict;
+import static org.easymock.EasyMock.verify;
+
+import java.util.List;
+import java.util.Vector;
+
+import org.jboss.ha.framework.interfaces.ClusterNode;
+import org.jboss.ha.framework.interfaces.HAPartition;
+import org.jboss.ha.framework.server.lock.ExclusiveClusterLockManager;
+import org.jboss.ha.framework.server.lock.LocalLockHandler;
+import org.jboss.ha.framework.server.lock.RemoteLockResponse;
+import org.jboss.ha.framework.server.lock.AbstractClusterLockManager.RpcTarget;
+import org.jboss.test.cluster.lock.ClusteredLockManagerTestBase;
+
+/**
+ * Unit test of ExclusiveClusterLockManager
+ *
+ * @author Brian Stansberry
+ *
+ */
+public class ExclusiveClusteredLockManagerUnitTestCase extends ClusteredLockManagerTestBase<ExclusiveClusterLockManager>
+{
+ /**
+ * Create a new ClusteredLockManagerImplUnitTestCase.
+ *
+ * @param name
+ */
+ public ExclusiveClusteredLockManagerUnitTestCase(String name)
+ {
+ super(name);
+ }
+
+ @Override
+ protected ExclusiveClusterLockManager createClusteredLockManager(String serviceHAName,
+ HAPartition partition, LocalLockHandler handler)
+ {
+ return new ExclusiveClusterLockManager(serviceHAName, partition, handler);
+ }
+
+ public void testBasicRemoteLock() throws Exception
+ {
+ TesteeSet<ExclusiveClusterLockManager> testeeSet = getTesteeSet(node1, 1, 2);
+ ExclusiveClusterLockManager testee = testeeSet.impl;
+ LocalLockHandler handler = testee.getLocalHandler();
+ RpcTarget target = testeeSet.target;
+
+ ClusterNode caller = testee.getCurrentView().get(0);
+ assertFalse(node1.equals(caller));
+
+ resetToStrict(handler);
+ replay(handler);
+
+ RemoteLockResponse rsp = target.remoteLock("test", caller, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ verify(handler);
+
+ // Do it again; should still work
+ resetToStrict(handler);
+ replay(handler);
+
+ rsp = target.remoteLock("test", caller, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ verify(handler);
+ }
+
+ public void testContestedRemoteLock() throws Exception
+ {
+ TesteeSet<ExclusiveClusterLockManager> testeeSet = getTesteeSet(node1, 1, 3);
+ ExclusiveClusterLockManager testee = testeeSet.impl;
+ LocalLockHandler handler = testee.getLocalHandler();
+ RpcTarget target = testeeSet.target;
+
+ ClusterNode caller1 = testee.getCurrentView().get(0);
+ assertFalse(node1.equals(caller1));
+
+ ClusterNode caller2 = testee.getCurrentView().get(2);
+ assertFalse(node1.equals(caller2));
+
+ resetToStrict(handler);
+ replay(handler);
+
+ RemoteLockResponse rsp = target.remoteLock("test", caller1, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ verify(handler);
+
+ // A call from a different caller should still work as
+ // w/ supportLockOnly==false we only reject if WE hold the lock
+ resetToStrict(handler);
+ replay(handler);
+
+ rsp = target.remoteLock("test", caller2, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ verify(handler);
+
+ }
+
+ /**
+ * Test that if a member holds a lock but is then removed from the
+ * view, another remote member can obtain the lock.
+ *
+ * @throws Exception
+ */
+ public void testDeadMemberCleanupAllowsRemoteLock() throws Exception
+ {
+ TesteeSet<ExclusiveClusterLockManager> testeeSet = getTesteeSet(node1, 1, 3);
+ ExclusiveClusterLockManager testee = testeeSet.impl;
+ LocalLockHandler handler = testee.getLocalHandler();
+ RpcTarget target = testeeSet.target;
+
+ List<ClusterNode> members = testee.getCurrentView();
+ ClusterNode caller1 = members.get(0);
+ assertFalse(node1.equals(caller1));
+
+ ClusterNode caller2 = members.get(2);
+ assertFalse(node1.equals(caller2));
+
+ resetToStrict(handler);
+ replay(handler);
+
+ RemoteLockResponse rsp = target.remoteLock("test", caller1, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ verify(handler);
+
+ // Change the view
+ Vector<ClusterNode> dead = new Vector<ClusterNode>();
+ dead.add(caller1);
+
+ Vector<ClusterNode> all = new Vector<ClusterNode>(members);
+ all.remove(caller1);
+
+ resetToStrict(handler);
+ replay(handler);
+
+ testee.membershipChanged(dead, new Vector<ClusterNode>(), all);
+
+ verify(handler);
+
+ // A call from a different caller should work
+ resetToStrict(handler);
+ replay(handler);
+
+ rsp = target.remoteLock("test", caller2, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ verify(handler);
+ }
+
+}
Property changes on: trunk/testsuite/src/main/org/jboss/test/cluster/defaultcfg/test/ExclusiveClusteredLockManagerUnitTestCase.java
___________________________________________________________________
Name: svn:keywords
+
Added: trunk/testsuite/src/main/org/jboss/test/cluster/defaultcfg/test/ReadWriteClusteredLockManagerUnitTestCase.java
===================================================================
--- trunk/testsuite/src/main/org/jboss/test/cluster/defaultcfg/test/ReadWriteClusteredLockManagerUnitTestCase.java (rev 0)
+++ trunk/testsuite/src/main/org/jboss/test/cluster/defaultcfg/test/ReadWriteClusteredLockManagerUnitTestCase.java 2009-02-14 13:47:54 UTC (rev 84207)
@@ -0,0 +1,404 @@
+/*
+ * JBoss, Home of Professional Open Source.
+ * Copyright 2009, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+
+package org.jboss.test.cluster.defaultcfg.test;
+
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.aryEq;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.makeThreadSafe;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.resetToNice;
+import static org.easymock.EasyMock.resetToStrict;
+import static org.easymock.EasyMock.verify;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Vector;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.jboss.ha.framework.interfaces.ClusterNode;
+import org.jboss.ha.framework.interfaces.HAPartition;
+import org.jboss.ha.framework.server.lock.AbstractClusterLockManager;
+import org.jboss.ha.framework.server.lock.LocalLockHandler;
+import org.jboss.ha.framework.server.lock.ReadWriteClusterLockManager;
+import org.jboss.ha.framework.server.lock.RemoteLockResponse;
+import org.jboss.ha.framework.server.lock.TimeoutException;
+import org.jboss.ha.framework.server.lock.AbstractClusterLockManager.RpcTarget;
+import org.jboss.test.cluster.lock.ClusteredLockManagerTestBase;
+
+/**
+ * Unit test of ClusteredLockManagerImpl
+ *
+ * @author Brian Stansberry
+ *
+ */
+public class ReadWriteClusteredLockManagerUnitTestCase extends ClusteredLockManagerTestBase<ReadWriteClusterLockManager>
+{
+ /**
+ * Create a new ClusteredLockManagerImplUnitTestCase.
+ *
+ * @param name
+ */
+ public ReadWriteClusteredLockManagerUnitTestCase(String name)
+ {
+ super(name);
+ }
+
+ @Override
+ protected ReadWriteClusterLockManager createClusteredLockManager(String serviceHAName,
+ HAPartition partition, LocalLockHandler handler)
+ {
+ return new ReadWriteClusterLockManager(serviceHAName, partition, handler);
+ }
+
+ public void testBasicRemoteLock() throws Exception
+ {
+ TesteeSet<ReadWriteClusterLockManager> testeeSet = getTesteeSet(node1, 1, 2);
+ ReadWriteClusterLockManager testee = testeeSet.impl;
+ LocalLockHandler handler = testee.getLocalHandler();
+ RpcTarget target = testeeSet.target;
+
+ ClusterNode caller = testee.getCurrentView().get(0);
+ assertFalse(node1.equals(caller));
+
+ resetToStrict(handler);
+ expect(handler.tryLock("test", caller, 1000)).andReturn(true);
+ replay(handler);
+
+ RemoteLockResponse rsp = target.remoteLock("test", caller, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ verify(handler);
+
+ // Do it again; should fail as another thread from caller already
+ // acquired the lock
+ resetToStrict(handler); // fail if we call the local handler
+ replay(handler);
+
+ rsp = target.remoteLock("test", caller, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.REJECT, rsp.flag);
+ assertEquals(caller, rsp.holder);
+
+ verify(handler);
+ }
+
+ public void testContestedRemoteLock() throws Exception
+ {
+ TesteeSet<ReadWriteClusterLockManager> testeeSet = getTesteeSet(node1, 1, 3);
+ ReadWriteClusterLockManager testee = testeeSet.impl;
+ LocalLockHandler handler = testee.getLocalHandler();
+ RpcTarget target = testeeSet.target;
+
+ ClusterNode caller1 = testee.getCurrentView().get(0);
+ assertFalse(node1.equals(caller1));
+
+ ClusterNode caller2 = testee.getCurrentView().get(2);
+ assertFalse(node1.equals(caller2));
+
+ resetToStrict(handler);
+ expect(handler.tryLock("test", caller1, 1000)).andReturn(true);
+ replay(handler);
+
+ RemoteLockResponse rsp = target.remoteLock("test", caller1, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ verify(handler);
+
+ // A call from a different caller should be rejected without need
+ // to go to the LocalLockHandler
+ resetToStrict(handler);
+ replay(handler);
+
+ rsp = target.remoteLock("test", caller2, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.REJECT, rsp.flag);
+ assertEquals(caller1, rsp.holder);
+
+ verify(handler);
+ }
+
+ public void testConcurrentRemoteLock() throws Exception
+ {
+ TesteeSet<ReadWriteClusterLockManager> testeeSet = getTesteeSet(node1, 1, 3);
+ ReadWriteClusterLockManager testee = testeeSet.impl;
+ LocalLockHandler handler = testee.getLocalHandler();
+ final RpcTarget target = testeeSet.target;
+
+ ClusterNode caller1 = testee.getCurrentView().get(0);
+ assertFalse(node1.equals(caller1));
+
+ ClusterNode caller2 = testee.getCurrentView().get(2);
+ assertFalse(node1.equals(caller2));
+
+
+ resetToStrict(handler);
+ makeThreadSafe(handler, true);
+ // When caller 1 invokes, block before giving response
+ CountDownLatch answerStartLatch = new CountDownLatch(1);
+ CountDownLatch answerDoneLatch = new CountDownLatch(1);
+ BlockingAnswer<Boolean> caller1Answer = new BlockingAnswer<Boolean>(Boolean.TRUE, answerStartLatch, null, answerDoneLatch);
+ BlockingAnswer<Boolean> caller2Answer = new BlockingAnswer<Boolean>(new TimeoutException(caller1), answerDoneLatch, 0, null, null);
+ expect(handler.tryLock("test", caller1, 1000)).andAnswer(caller1Answer);
+ expect(handler.tryLock("test", caller2, 1000)).andAnswer(caller2Answer);
+ replay(handler);
+
+ CountDownLatch startLatch1 = new CountDownLatch(1);
+ CountDownLatch startLatch2 = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(2);
+
+ RemoteLockCaller winner = new RemoteLockCaller(target, caller1, startLatch1, null, finishedLatch);
+ RemoteLockCaller loser = new RemoteLockCaller(target, caller2, startLatch2, null, finishedLatch);
+
+ Thread t1 = new Thread(winner);
+ t1.setDaemon(true);
+ Thread t2 = new Thread(loser);
+ t2.setDaemon(true);
+
+ try
+ {
+ t1.start();
+ assertTrue(startLatch1.await(1, TimeUnit.SECONDS));
+ // t1 should now be blocking in caller1Answer
+
+ t2.start();
+ assertTrue(startLatch2.await(1, TimeUnit.SECONDS));
+ // t2 should now be blocking due to t1
+
+ // release t1
+ answerStartLatch.countDown();
+
+ // wait for both to complete
+ assertTrue(finishedLatch.await(1, TimeUnit.SECONDS));
+
+ verify(handler);
+
+ rethrow("winner had an exception", winner.getException());
+ rethrow("loser had an exception", loser.getException());
+
+ RemoteLockResponse rsp = winner.getResult();
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ rsp = loser.getResult();
+ assertEquals(RemoteLockResponse.Flag.FAIL, rsp.flag);
+ assertEquals(caller1, rsp.holder);
+ }
+ finally
+ {
+ if (t1.isAlive())
+ t1.interrupt();
+ if (t2.isAlive())
+ t2.interrupt();
+ }
+ }
+
+ public void testRemoteLockFailsAgainstLocalLock() throws Exception
+ {
+ TesteeSet<ReadWriteClusterLockManager> testeeSet = getTesteeSet(node1, 1, 2);
+ ReadWriteClusterLockManager testee = testeeSet.impl;
+ LocalLockHandler handler = testee.getLocalHandler();
+ RpcTarget target = testeeSet.target;
+
+ ClusterNode caller1 = testee.getCurrentView().get(0);
+ assertFalse(node1.equals(caller1));
+
+ resetToStrict(handler);
+ // We throw TimeoutException to indicate "node1" holds the lock
+ expect(handler.tryLock("test", caller1, 1000)).andThrow(new TimeoutException(node1));
+ replay(handler);
+
+ RemoteLockResponse rsp = target.remoteLock("test", caller1, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.FAIL, rsp.flag);
+ assertEquals(node1, rsp.holder);
+
+ verify(handler);
+
+ // A second attempt should succeed if the local lock is released
+
+ resetToStrict(handler);
+ // We return normally to indicate success
+ expect(handler.tryLock("test", caller1, 1000)).andReturn(true);
+ replay(handler);
+
+ rsp = target.remoteLock("test", caller1, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ verify(handler);
+ }
+
+ public void testBasicClusterLockFailsAgainstLocalLock() throws Exception
+ {
+ basicClusterLockFailsAgainstLocalLockTest(2);
+ }
+
+ public void testStandaloneClusterLockFailsAgainstLocalLock() throws Exception
+ {
+ basicClusterLockFailsAgainstLocalLockTest(2);
+ }
+
+ private void basicClusterLockFailsAgainstLocalLockTest(int viewSize) throws Exception
+ {
+ int viewPos = viewSize == 1 ? 0 : 1;
+ TesteeSet<ReadWriteClusterLockManager> testeeSet = getTesteeSet(node1, viewPos, viewSize);
+ ReadWriteClusterLockManager testee = testeeSet.impl;
+ HAPartition partition = testee.getPartition();
+ LocalLockHandler handler = testee.getLocalHandler();
+
+ resetToNice(partition);
+ resetToStrict(handler);
+
+ ArrayList<RemoteLockResponse> rspList = new ArrayList<RemoteLockResponse>();
+ for (int i = 0; i < viewSize - 1; i++)
+ {
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+ }
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("remoteLock"),
+ eqLockParams(node1, 2000000),
+ aryEq(AbstractClusterLockManager.REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(rspList).atLeastOnce();
+
+ expect(handler.tryLock(eq("test"), eq(node1), anyLong())).andThrow(new TimeoutException(node1)).atLeastOnce();
+
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("releaseRemoteLock"),
+ aryEq(new Object[]{"test", node1}),
+ aryEq(AbstractClusterLockManager.RELEASE_REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(new ArrayList<Object>()).atLeastOnce();
+ replay(partition);
+ replay(handler);
+
+ assertFalse(testee.lock("test", 10));
+
+ verify(partition);
+ verify(handler);
+ }
+
+ /**
+ * Test that if a member holds a lock but is then removed from the
+ * view, another remote member can obtain the lock.
+ *
+ * @throws Exception
+ */
+ public void testDeadMemberCleanupAllowsRemoteLock() throws Exception
+ {
+ TesteeSet<ReadWriteClusterLockManager> testeeSet = getTesteeSet(node1, 1, 3);
+ ReadWriteClusterLockManager testee = testeeSet.impl;
+ LocalLockHandler handler = testee.getLocalHandler();
+ RpcTarget target = testeeSet.target;
+
+ List<ClusterNode> members = testee.getCurrentView();
+ ClusterNode caller1 = members.get(0);
+ assertFalse(node1.equals(caller1));
+
+ ClusterNode caller2 = members.get(2);
+ assertFalse(node1.equals(caller2));
+
+ resetToStrict(handler);
+ expect(handler.tryLock("test", caller1, 1000)).andReturn(true);
+ replay(handler);
+
+ RemoteLockResponse rsp = target.remoteLock("test", caller1, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ verify(handler);
+
+ // Change the view
+ Vector<ClusterNode> dead = new Vector<ClusterNode>();
+ dead.add(caller1);
+
+ Vector<ClusterNode> all = new Vector<ClusterNode>(members);
+ all.remove(caller1);
+
+ resetToStrict(handler);
+ expect(handler.getLockHolder("test")).andReturn(caller1);
+ expect(handler.releaseLock("test", caller1)).andReturn(true);
+ replay(handler);
+
+ testee.membershipChanged(dead, new Vector<ClusterNode>(), all);
+
+ verify(handler);
+
+ // A call from a different caller should work
+ resetToStrict(handler);
+ expect(handler.tryLock("test", caller2, 1000)).andReturn(true);
+ replay(handler);
+
+ rsp = target.remoteLock("test", caller2, 1000);
+
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+ assertNull(rsp.holder);
+
+ verify(handler);
+ }
+
+ /**
+ * Remote node acquires a lock; different remote node tries to release which is ignored.
+ *
+ * @throws Exception
+ */
+ public void testSpuriousLockReleaseIgnored2() throws Exception
+ {
+ TesteeSet<ReadWriteClusterLockManager> testeeSet = getTesteeSet(node1, 1, 3);
+ ReadWriteClusterLockManager testee = testeeSet.impl;
+ HAPartition partition = testee.getPartition();
+ LocalLockHandler handler = testee.getLocalHandler();
+ RpcTarget target = testeeSet.target;
+
+ ClusterNode caller1 = testee.getCurrentView().get(0);
+ ClusterNode caller2 = testee.getCurrentView().get(2);
+
+ resetToStrict(partition);
+ resetToStrict(handler);
+
+ expect(handler.tryLock(eq("test"), eq(caller1), anyLong())).andReturn(true);
+
+ expect(handler.getLockHolder("test")).andReturn(caller1);
+
+ replay(partition);
+ replay(handler);
+
+ RemoteLockResponse rsp = target.remoteLock("test", caller1, 1);
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+
+ target.releaseRemoteLock("test", caller2);
+
+ verify(partition);
+ verify(handler);
+ }
+
+}
Property changes on: trunk/testsuite/src/main/org/jboss/test/cluster/defaultcfg/test/ReadWriteClusteredLockManagerUnitTestCase.java
___________________________________________________________________
Name: svn:keywords
+
Added: trunk/testsuite/src/main/org/jboss/test/cluster/lock/ClusteredLockManagerTestBase.java
===================================================================
--- trunk/testsuite/src/main/org/jboss/test/cluster/lock/ClusteredLockManagerTestBase.java (rev 0)
+++ trunk/testsuite/src/main/org/jboss/test/cluster/lock/ClusteredLockManagerTestBase.java 2009-02-14 13:47:54 UTC (rev 84207)
@@ -0,0 +1,927 @@
+package org.jboss.test.cluster.lock;
+
+import static org.easymock.EasyMock.and;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.aryEq;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.isA;
+import static org.easymock.EasyMock.makeThreadSafe;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.resetToNice;
+import static org.easymock.EasyMock.resetToStrict;
+import static org.easymock.EasyMock.same;
+import static org.easymock.EasyMock.verify;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Vector;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import junit.framework.TestCase;
+
+import org.easymock.Capture;
+import org.easymock.EasyMock;
+import org.easymock.IAnswer;
+import org.easymock.IArgumentMatcher;
+import org.jboss.ha.framework.interfaces.ClusterNode;
+import org.jboss.ha.framework.interfaces.HAPartition;
+import org.jboss.ha.framework.server.ClusterNodeImpl;
+import org.jboss.ha.framework.server.lock.AbstractClusterLockManager;
+import org.jboss.ha.framework.server.lock.ExclusiveClusterLockManager;
+import org.jboss.ha.framework.server.lock.LocalLockHandler;
+import org.jboss.ha.framework.server.lock.RemoteLockResponse;
+import org.jboss.ha.framework.server.lock.AbstractClusterLockManager.RpcTarget;
+import org.jgroups.stack.IpAddress;
+
+public abstract class ClusteredLockManagerTestBase<T extends AbstractClusterLockManager> extends TestCase
+{
+
+ protected ClusterNode node1;
+ protected ClusterNode node2;
+ protected ClusterNode node3;
+
+ public static Object[] eqLockParams(ClusterNode node, long timeout)
+ {
+ EasyMock.reportMatcher(new LockParamsMatcher(node, timeout));
+ return null;
+ }
+
+ protected static class LockParamsMatcher implements IArgumentMatcher
+ {
+ private final ClusterNode node;
+ private final long timeout;
+
+ LockParamsMatcher(ClusterNode node, long timeout)
+ {
+ this.node = node;
+ this.timeout = timeout;
+ }
+
+ public void appendTo(StringBuffer buffer)
+ {
+ buffer.append("eqRemoteLockParams({\"test\",");
+ buffer.append(node);
+ buffer.append(',');
+ buffer.append(timeout);
+ buffer.append("})");
+ }
+
+ public boolean matches(Object arg)
+ {
+ if (arg instanceof Object[])
+ {
+ Object[] args = (Object[]) arg;
+ if (args.length == 3)
+ {
+ if ("test".equals(args[0]) && node.equals(args[1])
+ && args[2] instanceof Long)
+ {
+ long l = ((Long) args[2]).longValue();
+ return l >= 0 && l <= timeout;
+ }
+ }
+ }
+ return false;
+ }
+
+ }
+
+ public ClusteredLockManagerTestBase()
+ {
+ super();
+ }
+
+ public ClusteredLockManagerTestBase(String name)
+ {
+ super(name);
+ }
+
+ protected void setUp() throws Exception
+ {
+ super.setUp();
+ node1 = new ClusterNodeImpl(new IpAddress("localhost", 1));
+ node2 = new ClusterNodeImpl(new IpAddress("localhost", 2));
+ node3 = new ClusterNodeImpl(new IpAddress("localhost", 3));
+ }
+
+ protected void tearDown() throws Exception
+ {
+ super.tearDown();
+ }
+
+ public void testConstructor() throws Exception
+ {
+ HAPartition haPartition = createNiceMock(HAPartition.class);
+ LocalLockHandler handler = createNiceMock(LocalLockHandler.class);
+
+ try
+ {
+ new ExclusiveClusterLockManager(null, haPartition, handler);
+ fail("Null serviceHAName should prevent construction");
+ }
+ catch (IllegalArgumentException good) {}
+
+ try
+ {
+ new ExclusiveClusterLockManager("test", null, handler);
+ fail("Null HAPartition should prevent construction");
+ }
+ catch (IllegalArgumentException good) {}
+
+ try
+ {
+ new ExclusiveClusterLockManager("test", haPartition, null);
+ fail("Null LocalLockHandler should prevent construction");
+ }
+ catch (IllegalArgumentException good) {}
+
+ expect(haPartition.getClusterNode()).andReturn(node1);
+ expect(haPartition.getPartitionName()).andReturn("TestPartition");
+
+ replay(haPartition);
+ replay(handler);
+
+ ExclusiveClusterLockManager testee = new ExclusiveClusterLockManager("test", haPartition, handler);
+
+ assertEquals("test", testee.getServiceHAName());
+ assertEquals("TestPartition", testee.getPartitionName());
+ }
+
+ public void testStart() throws Exception
+ {
+ HAPartition haPartition = createNiceMock(HAPartition.class);
+ LocalLockHandler handler = createNiceMock(LocalLockHandler.class);
+ expect(haPartition.getClusterNode()).andReturn(node1);
+ expect(haPartition.getPartitionName()).andReturn("TestPartition");
+
+ replay(haPartition);
+ replay(handler);
+
+ ExclusiveClusterLockManager testee = new ExclusiveClusterLockManager("test", haPartition, handler);
+
+ try
+ {
+ testee.lock("id", 1000);
+ fail("Call to lock() should fail if not started");
+ }
+ catch (IllegalStateException good) {}
+
+ try
+ {
+ testee.unlock("id");
+ fail("Call to unlock() should fail if not started");
+ }
+ catch (IllegalStateException good) {}
+
+ reset(haPartition);
+
+ assertEquals("Current view is empty when unstarted", 0, testee.getCurrentView().size());
+
+ haPartition.registerRPCHandler(eq("test"), isA(ExclusiveClusterLockManager.RpcTarget.class));
+ haPartition.registerMembershipListener(testee);
+ Vector<ClusterNode> view = new Vector<ClusterNode>();
+ view.add(node1);
+ expect(haPartition.getCurrentView()).andReturn(view);
+ replay(haPartition);
+
+ testee.start();
+
+ verify(haPartition);
+
+ assertEquals("Current view is correct", 1, testee.getCurrentView().size());
+ assertTrue(testee.getCurrentView().contains(node1));
+
+ }
+
+ public void testStop() throws Exception
+ {
+ TesteeSet<T> testeeSet = getTesteeSet(node1, 0, 1);
+ T testee = testeeSet.impl;
+ HAPartition haPartition = testee.getPartition();
+
+ reset(haPartition);
+
+ haPartition.unregisterMembershipListener(testee);
+ haPartition.unregisterRPCHandler(eq("test"), same(testeeSet.target));
+
+ replay(haPartition);
+
+ testee.stop();
+
+ verify(haPartition);
+
+ assertEquals("Current view is empty when stopped", 0, testee.getCurrentView().size());
+
+ try
+ {
+ testee.lock("id", 1000);
+ fail("Call to lock() should fail if stopped");
+ }
+ catch (IllegalStateException good) {}
+
+ try
+ {
+ testee.unlock("id");
+ fail("Call to unlock() should fail if stopped");
+ }
+ catch (IllegalStateException good) {}
+
+ }
+
+ public void testGetMembers() throws Exception
+ {
+ TesteeSet<T> testeeSet = getTesteeSet(node1, 1, 2);
+ T testee = testeeSet.impl;
+
+ List<ClusterNode> members = testee.getCurrentView();
+ assertEquals(2, members.size());
+ assertEquals(node1, members.get(1));
+
+ ClusterNode dead = members.get(0);
+ assertFalse(node1.equals(dead));
+
+ Vector<ClusterNode> newView = getView(node1, 0, 3);
+ newView.remove(dead);
+
+ Vector<ClusterNode> addedMembers = new Vector<ClusterNode>(newView);
+ addedMembers.removeAll(members);
+
+ Vector<ClusterNode> deadMembers = new Vector<ClusterNode>();
+ deadMembers.add(dead);
+
+ testee.membershipChanged(deadMembers, addedMembers, newView);
+
+ members = testee.getCurrentView();
+ assertEquals(2, members.size());
+ assertEquals(node1, members.get(0));
+ assertFalse(node1.equals(members.get(1)));
+ assertFalse(members.contains(dead));
+
+ }
+
+ /**
+ * Simple test of acquiring a cluster-wide lock in a two node cluster
+ * where local-only locks are supported.
+ *
+ * @throws Exception
+ */
+ public void testBasicClusterLock() throws Exception
+ {
+ basicClusterLockTest(2);
+ }
+
+ /**
+ * Simple test of acquiring a cluster-wide lock in a cluster where the
+ * caller is the only member and where local-only locks are supported.
+ *
+ * @throws Exception
+ */
+ public void testStandaloneClusterLock() throws Exception
+ {
+ basicClusterLockTest(1);
+ }
+
+ private void basicClusterLockTest(int viewSize) throws Exception
+ {
+ int viewPos = viewSize == 1 ? 0 : 1;
+ TesteeSet<T> testeeSet = getTesteeSet(node1, viewPos, viewSize);
+ AbstractClusterLockManager testee = testeeSet.impl;
+ HAPartition partition = testee.getPartition();
+ LocalLockHandler handler = testee.getLocalHandler();
+
+ resetToStrict(partition);
+ resetToStrict(handler);
+
+ ArrayList<RemoteLockResponse> rspList = new ArrayList<RemoteLockResponse>();
+ for (int i = 0; i < viewSize - 1; i++)
+ {
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+ }
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("remoteLock"),
+ eqLockParams(node1, 200000),
+ aryEq(ExclusiveClusterLockManager.REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(rspList);
+
+ expect(handler.tryLock(eq("test"), eq(node1), anyLong())).andReturn(true);
+
+ replay(partition);
+ replay(handler);
+
+ assertTrue(testee.lock("test", 200000));
+
+ verify(partition);
+ verify(handler);
+
+ }
+
+ public void testRemoteRejectionFromSuperiorCaller() throws Exception
+ {
+ TesteeSet<T> testeeSet = getTesteeSet(node1, 1, 3);
+ AbstractClusterLockManager testee = testeeSet.impl;
+ HAPartition partition = testee.getPartition();
+ LocalLockHandler handler = testee.getLocalHandler();
+
+ resetToNice(partition);
+ resetToStrict(handler);
+
+ ClusterNode superior = testee.getCurrentView().get(0);
+
+ ArrayList<RemoteLockResponse> rspList = new ArrayList<RemoteLockResponse>();
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.REJECT, superior));
+
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("remoteLock"),
+ eqLockParams(node1, 200000),
+ aryEq(ExclusiveClusterLockManager.REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(rspList).atLeastOnce();
+
+ replay(partition);
+ replay(handler);
+
+ assertFalse(testee.lock("test", 50));
+
+ verify(partition);
+ verify(handler);
+
+ }
+
+ public void testRemoteRejectionFromInferiorCaller() throws Exception
+ {
+ TesteeSet<T> testeeSet = getTesteeSet(node1, 1, 3);
+ AbstractClusterLockManager testee = testeeSet.impl;
+ HAPartition partition = testee.getPartition();
+ LocalLockHandler handler = testee.getLocalHandler();
+
+ resetToStrict(partition);
+ resetToStrict(handler);
+
+ ClusterNode inferior = testee.getCurrentView().get(2);
+
+ ArrayList<RemoteLockResponse> rspList = new ArrayList<RemoteLockResponse>();
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.REJECT, inferior));
+
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("remoteLock"),
+ eqLockParams(node1, 200000),
+ aryEq(ExclusiveClusterLockManager.REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(rspList);
+
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("releaseRemoteLock"),
+ aryEq(new Object[]{"test", node1}),
+ aryEq(AbstractClusterLockManager.RELEASE_REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(rspList);
+
+ rspList = new ArrayList<RemoteLockResponse>();
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("remoteLock"),
+ eqLockParams(node1, 200000),
+ aryEq(ExclusiveClusterLockManager.REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(rspList);
+
+ expect(handler.tryLock(eq("test"), eq(node1), anyLong())).andReturn(true).atLeastOnce();
+
+ replay(partition);
+ replay(handler);
+
+ assertTrue(testee.lock("test", 50));
+
+ verify(partition);
+ verify(handler);
+
+ }
+
+ public void testLocalLockingStateRejectsSuperiorRemoteCaller() throws Exception
+ {
+ TesteeSet<T> testeeSet = getTesteeSet(node1, 1, 3);
+ T testee = testeeSet.impl;
+ HAPartition partition = testee.getPartition();
+ LocalLockHandler handler = testee.getLocalHandler();
+ final RpcTarget target = testeeSet.target;
+
+ ClusterNode superiorCaller = testee.getCurrentView().get(0);
+ assertFalse(node1.equals(superiorCaller));
+
+ resetToStrict(partition);
+ makeThreadSafe(partition, true);
+ resetToStrict(handler);
+ makeThreadSafe(handler, true);
+
+ ArrayList<RemoteLockResponse> rspList = new ArrayList<RemoteLockResponse>();
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("remoteLock"),
+ eqLockParams(node1, 200000),
+ aryEq(ExclusiveClusterLockManager.REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(rspList);
+
+ // When caller 1 invokes, block before giving response
+ CountDownLatch answerAwaitLatch = new CountDownLatch(1);
+ CountDownLatch answerStartLatch = new CountDownLatch(1);
+ CountDownLatch answerDoneLatch = new CountDownLatch(1);
+ BlockingAnswer<Boolean> caller1Answer = new BlockingAnswer<Boolean>(Boolean.TRUE, answerAwaitLatch, answerStartLatch, answerDoneLatch);
+ expect(handler.tryLock(eq("test"), eq(node1), anyLong())).andAnswer(caller1Answer);
+
+ replay(partition);
+ replay(handler);
+
+ LocalLockCaller winner = new LocalLockCaller(testee, null, null, null);
+
+ Thread t1 = new Thread(winner);
+ t1.setDaemon(true);
+
+ try
+ {
+ t1.start();
+ assertTrue(answerStartLatch.await(500, TimeUnit.SECONDS));
+ // t1 should now be blocking in caller1Answer
+
+ RemoteLockResponse rsp = target.remoteLock("test", superiorCaller, 1);
+ assertEquals(RemoteLockResponse.Flag.REJECT, rsp.flag);
+ assertEquals(node1, rsp.holder);
+
+ // release t1
+ answerAwaitLatch.countDown();
+
+ // wait for t1 to complete
+ assertTrue(answerDoneLatch.await(2, TimeUnit.SECONDS));
+
+ verify(handler);
+
+ rethrow("winner had an exception", winner.getException());
+
+ Boolean locked = winner.getResult();
+ assertEquals(Boolean.TRUE, locked);
+ }
+ finally
+ {
+ if (t1.isAlive())
+ t1.interrupt();
+ }
+ }
+
+ public void testRemoteLockingStateAllowsSuperiorRemoteCaller() throws Exception
+ {
+ TesteeSet<T> testeeSet = getTesteeSet(node1, 1, 3);
+ T testee = testeeSet.impl;
+ HAPartition partition = testee.getPartition();
+ LocalLockHandler handler = testee.getLocalHandler();
+ final RpcTarget target = testeeSet.target;
+
+ ClusterNode superiorCaller = testee.getCurrentView().get(0);
+ assertFalse(node1.equals(superiorCaller));
+
+ resetToNice(partition); // nice as we may loop retrying and failing
+ makeThreadSafe(partition, true);
+ resetToStrict(handler);
+ makeThreadSafe(handler, true);
+
+ // When caller 1 invokes, block before giving response
+ CountDownLatch answerAwaitLatch = new CountDownLatch(1);
+ CountDownLatch answerStartLatch = new CountDownLatch(1);
+
+ ArrayList<RemoteLockResponse> rspList = new ArrayList<RemoteLockResponse>();
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.REJECT, superiorCaller));
+
+ BlockingAnswer<ArrayList<RemoteLockResponse>> caller1Answer = new BlockingAnswer<ArrayList<RemoteLockResponse>>(rspList, answerAwaitLatch, answerStartLatch, null);
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("remoteLock"),
+ eqLockParams(node1, 200000),
+ aryEq(ExclusiveClusterLockManager.REMOTE_LOCK_TYPES),
+ eq(true))).andAnswer(caller1Answer).atLeastOnce();
+
+ expect(handler.tryLock(eq("test"), eq(superiorCaller), anyLong())).andReturn(true);
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("releaseRemoteLock"),
+ aryEq(new Object[]{"test", node1}),
+ aryEq(AbstractClusterLockManager.RELEASE_REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(new ArrayList<Object>()).atLeastOnce();
+
+ replay(partition);
+ replay(handler);
+
+ CountDownLatch finishedLatch = new CountDownLatch(1);
+ LocalLockCaller loser = new LocalLockCaller(testee, null, null, finishedLatch);
+
+ Thread t1 = new Thread(loser);
+ t1.setDaemon(true);
+
+ try
+ {
+ t1.start();
+ assertTrue(answerStartLatch.await(1, TimeUnit.SECONDS));
+ // t1 should now be blocking in caller1Answer
+
+ RemoteLockResponse rsp = target.remoteLock("test", superiorCaller, 1);
+ assertEquals(RemoteLockResponse.Flag.OK, rsp.flag);
+
+ // release t1
+ answerAwaitLatch.countDown();
+
+ // wait for t1 to complete
+ assertTrue(finishedLatch.await(1, TimeUnit.SECONDS));
+
+ verify(handler);
+
+ rethrow("winner had an exception", loser.getException());
+
+ Boolean locked = loser.getResult();
+ assertEquals(Boolean.FALSE, locked);
+ }
+ finally
+ {
+ if (t1.isAlive())
+ t1.interrupt();
+ }
+ }
+
+ public void testRemoteLockingStateRejectsInferiorRemoteCaller() throws Exception
+ {
+ TesteeSet<T> testeeSet = getTesteeSet(node1, 1, 3);
+ T testee = testeeSet.impl;
+ HAPartition partition = testee.getPartition();
+ LocalLockHandler handler = testee.getLocalHandler();
+ final RpcTarget target = testeeSet.target;
+
+ ClusterNode inferiorCaller = testee.getCurrentView().get(2);
+ assertFalse(node1.equals(inferiorCaller));
+
+ resetToStrict(partition);
+ makeThreadSafe(partition, true);
+ resetToStrict(handler);
+ makeThreadSafe(handler, true);
+
+ // When caller 1 invokes, block before giving response
+ CountDownLatch answerAwaitLatch = new CountDownLatch(1);
+ CountDownLatch answerStartLatch = new CountDownLatch(1);
+
+ ArrayList<RemoteLockResponse> rspList = new ArrayList<RemoteLockResponse>();
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.REJECT, inferiorCaller));
+
+ BlockingAnswer<ArrayList<RemoteLockResponse>> caller1Answer = new BlockingAnswer<ArrayList<RemoteLockResponse>>(rspList, answerAwaitLatch, answerStartLatch, null);
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("remoteLock"),
+ eqLockParams(node1, 200000),
+ aryEq(ExclusiveClusterLockManager.REMOTE_LOCK_TYPES),
+ eq(true))).andAnswer(caller1Answer);
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("releaseRemoteLock"),
+ aryEq(new Object[]{"test", node1}),
+ aryEq(AbstractClusterLockManager.RELEASE_REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(new ArrayList<Object>());
+
+ rspList = new ArrayList<RemoteLockResponse>();
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("remoteLock"),
+ eqLockParams(node1, 200000),
+ aryEq(ExclusiveClusterLockManager.REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(rspList);
+
+ expect(handler.tryLock(eq("test"), eq(node1), anyLong())).andReturn(true);
+
+ replay(partition);
+ replay(handler);
+
+ CountDownLatch finishedLatch = new CountDownLatch(1);
+ LocalLockCaller loser = new LocalLockCaller(testee, null, null, finishedLatch);
+
+ Thread t1 = new Thread(loser);
+ t1.setDaemon(true);
+
+ try
+ {
+ t1.start();
+ assertTrue(answerStartLatch.await(1, TimeUnit.SECONDS));
+ // t1 should now be blocking in caller1Answer
+
+ RemoteLockResponse rsp = target.remoteLock("test", inferiorCaller, 1);
+ assertEquals(RemoteLockResponse.Flag.REJECT, rsp.flag);
+ assertEquals(node1, rsp.holder);
+
+ // release t1
+ answerAwaitLatch.countDown();
+
+ // wait for t1 to complete
+ assertTrue(finishedLatch.await(1, TimeUnit.SECONDS));
+
+ verify(handler);
+
+ rethrow("winner had an exception", loser.getException());
+
+ Boolean locked = loser.getResult();
+ assertEquals(Boolean.TRUE, locked);
+ }
+ finally
+ {
+ if (t1.isAlive())
+ t1.interrupt();
+ }
+ }
+
+ /**
+ * Local node acquires a lock; remote node tries to releasem which is ignored.
+ *
+ * @throws Exception
+ */
+ public void testSpuriousRemoteLockReleaseIgnored() throws Exception
+ {
+ TesteeSet<T> testeeSet = getTesteeSet(node1, 1, 2);
+ AbstractClusterLockManager testee = testeeSet.impl;
+ HAPartition partition = testee.getPartition();
+ LocalLockHandler handler = testee.getLocalHandler();
+
+ ClusterNode other = testee.getCurrentView().get(0);
+ resetToStrict(partition);
+ resetToStrict(handler);
+
+ ArrayList<RemoteLockResponse> rspList = new ArrayList<RemoteLockResponse>();
+ rspList.add(new RemoteLockResponse(RemoteLockResponse.Flag.OK));
+
+
+ expect(partition.callMethodOnCluster(eq("test"),
+ eq("remoteLock"),
+ eqLockParams(node1, 200000),
+ aryEq(ExclusiveClusterLockManager.REMOTE_LOCK_TYPES),
+ eq(true))).andReturn(rspList);
+
+ expect(handler.tryLock(eq("test"), eq(node1), anyLong())).andReturn(true);
+
+ expect(handler.getLockHolder("test")).andReturn(node1);
+
+ replay(partition);
+ replay(handler);
+
+ assertTrue(testee.lock("test", 200000));
+ testeeSet.target.releaseRemoteLock("test", other);
+
+ verify(partition);
+ verify(handler);
+ }
+
+ protected TesteeSet<T> getTesteeSet(ClusterNode node, int viewPos, int viewSize) throws Exception
+ {
+ HAPartition haPartition = createNiceMock(HAPartition.class);
+ LocalLockHandler handler = createNiceMock(LocalLockHandler.class);
+ expect(haPartition.getClusterNode()).andReturn(node);
+ expect(haPartition.getPartitionName()).andReturn("TestPartition");
+
+ Capture<ExclusiveClusterLockManager.RpcTarget> c = new Capture<ExclusiveClusterLockManager.RpcTarget>();
+ haPartition.registerRPCHandler(eq("test"), and(isA(ExclusiveClusterLockManager.RpcTarget.class), capture(c)));
+ Vector<ClusterNode> view = getView(node, viewPos, viewSize);
+ expect(haPartition.getCurrentView()).andReturn(view);
+
+ replay(haPartition);
+ replay(handler);
+
+ T testee = createClusteredLockManager("test", haPartition, handler);
+
+ testee.start();
+
+ reset(haPartition);
+ reset(handler);
+
+ return new TesteeSet<T>(testee, c.getValue());
+ }
+
+ protected abstract T createClusteredLockManager(String serviceHAName, HAPartition partition, LocalLockHandler handler);
+
+ private Vector<ClusterNode> getView(ClusterNode member, int viewPos, int numMembers)
+ {
+ Vector<ClusterNode> all = new Vector<ClusterNode>(Arrays.asList(new ClusterNode[]{node1, node2, node3}));
+ all.remove(member);
+ while (all.size() > numMembers - 1) // -1 'cause we'll add one in a sec
+ {
+ all.remove(all.size() - 1);
+ }
+ all.add(viewPos, member);
+
+ return all;
+ }
+
+ protected static void rethrow(String msg, Throwable t) throws Exception
+ {
+ if (t != null)
+ {
+ if (t instanceof AssertionError)
+ {
+ AssertionError rethrow = new AssertionError(msg);
+ rethrow.initCause(t);
+ throw rethrow;
+ }
+
+ throw new RuntimeException(msg, t);
+ }
+ }
+
+ protected class TesteeSet<C extends AbstractClusterLockManager>
+ {
+ public final C impl;
+ public final RpcTarget target;
+
+ TesteeSet(C impl, RpcTarget target)
+ {
+ this.impl = impl;
+ this.target = target;
+ }
+ }
+
+ /**
+ * Allows EasyMock to block before returning.
+ *
+ * @author Brian Stansberry
+ *
+ * @param <T>
+ */
+ protected class BlockingAnswer<C> implements IAnswer<C>
+ {
+ private final C answer;
+ private final Exception toThrow;
+ private final CountDownLatch startLatch;
+ private final CountDownLatch awaitlatch;
+ private final CountDownLatch endLatch;
+ private final long timeout;
+
+ public BlockingAnswer(C answer, CountDownLatch awaitLatch, CountDownLatch startLatch, CountDownLatch endLatch)
+ {
+ this(answer, awaitLatch, 0, startLatch, endLatch);
+ }
+
+ public BlockingAnswer(C answer, CountDownLatch awaitLatch, long timeout, CountDownLatch startLatch, CountDownLatch endLatch)
+ {
+ this.awaitlatch = awaitLatch;
+ this.startLatch = startLatch;
+ this.endLatch = endLatch;
+ this.timeout = timeout;
+ this.answer = answer;
+ this.toThrow = null;
+ }
+
+ public BlockingAnswer(Exception toThrow, CountDownLatch awaitLatch, long timeout, CountDownLatch startLatch, CountDownLatch endLatch)
+ {
+ this.awaitlatch = awaitLatch;
+ this.startLatch = startLatch;
+ this.endLatch = endLatch;
+ this.timeout = timeout;
+ this.answer = null;
+ this.toThrow = toThrow;
+ }
+
+ public C answer() throws Throwable
+ {
+ if (startLatch != null)
+ {
+ startLatch.countDown();
+ }
+
+ try
+ {
+ if (timeout > 0)
+ {
+ awaitlatch.await(timeout, TimeUnit.MILLISECONDS);
+ }
+ else
+ {
+ awaitlatch.await();
+ }
+
+ if (toThrow != null)
+ {
+ throw toThrow;
+ }
+
+ return answer;
+ }
+ finally
+ {
+ if (endLatch != null)
+ {
+ endLatch.countDown();
+ }
+ }
+ }
+ }
+
+ protected abstract class AbstractCaller<C> implements Runnable
+ {
+ private final CountDownLatch startLatch;
+ private final CountDownLatch proceedLatch;
+ private final CountDownLatch finishLatch;
+ private C result;
+ private Throwable exception;
+
+ AbstractCaller(CountDownLatch startLatch,
+ CountDownLatch proceedLatch, CountDownLatch finishLatch)
+ {
+ this.startLatch = startLatch;
+ this.proceedLatch = proceedLatch;
+ this.finishLatch = finishLatch;
+ }
+
+ public void run()
+ {
+ try
+ {
+ if (startLatch != null)
+ {
+ startLatch.countDown();
+ }
+
+ if (proceedLatch != null)
+ {
+ proceedLatch.await();
+ }
+
+ result = execute();
+ }
+ catch (Throwable t)
+ {
+ exception = t;
+ }
+ finally
+ {
+ if (finishLatch != null)
+ {
+ finishLatch.countDown();
+ }
+
+ }
+ }
+
+ protected abstract C execute();
+
+ public C getResult()
+ {
+ return result;
+ }
+
+ public Throwable getException()
+ {
+ return exception;
+ }
+
+ }
+
+ protected class RemoteLockCaller extends AbstractCaller<RemoteLockResponse>
+ {
+ private final RpcTarget target;
+ private final ClusterNode caller;
+
+ public RemoteLockCaller(RpcTarget target, ClusterNode caller, CountDownLatch startLatch,
+ CountDownLatch proceedLatch, CountDownLatch finishLatch)
+ {
+ super(startLatch, proceedLatch, finishLatch);
+ this.target = target;
+ this.caller = caller;
+ }
+
+ protected RemoteLockResponse execute()
+ {
+ return target.remoteLock("test", caller, 1000);
+ }
+ }
+
+ protected class LocalLockCaller extends AbstractCaller<Boolean>
+ {
+ private final AbstractClusterLockManager target;
+
+ public LocalLockCaller(AbstractClusterLockManager target, CountDownLatch startLatch,
+ CountDownLatch proceedLatch, CountDownLatch finishLatch)
+ {
+ super(startLatch, proceedLatch, finishLatch);
+ this.target = target;
+ }
+
+ protected Boolean execute()
+ {
+ return Boolean.valueOf(target.lock("test", 20));
+ }
+ }
+
+}
\ No newline at end of file
Property changes on: trunk/testsuite/src/main/org/jboss/test/cluster/lock/ClusteredLockManagerTestBase.java
___________________________________________________________________
Name: svn:keywords
+
More information about the jboss-cvs-commits
mailing list