[jboss-cvs] JBossAS SVN: r59995 - in trunk: tools/etc/buildmagic and 1 other directory.
jboss-cvs-commits at lists.jboss.org
jboss-cvs-commits at lists.jboss.org
Thu Jan 25 08:41:32 EST 2007
Author: jerrygauth
Date: 2007-01-25 08:41:31 -0500 (Thu, 25 Jan 2007)
New Revision: 59995
Modified:
trunk/testsuite/src/main/org/jboss/test/cluster/test/DRMTestCase.java
trunk/tools/etc/buildmagic/modules.ent
Log:
JBAS-2470
Modified: trunk/testsuite/src/main/org/jboss/test/cluster/test/DRMTestCase.java
===================================================================
--- trunk/testsuite/src/main/org/jboss/test/cluster/test/DRMTestCase.java 2007-01-25 13:15:08 UTC (rev 59994)
+++ trunk/testsuite/src/main/org/jboss/test/cluster/test/DRMTestCase.java 2007-01-25 13:41:31 UTC (rev 59995)
@@ -25,7 +25,6 @@
import java.rmi.server.UnicastRemoteObject;
import java.security.SecureRandom;
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.Vector;
import java.util.List;
@@ -39,19 +38,28 @@
import junit.framework.Test;
-import org.jboss.test.JBossClusteredTestCase;
-import org.jboss.test.cluster.drm.IReplicants;
-import org.jboss.test.cluster.drm.MockHAPartition;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.config.RuntimeConfig;
+import org.jboss.cache.jmx.CacheJmxWrapper;
import org.jboss.ha.framework.interfaces.ClusterNode;
import org.jboss.ha.framework.interfaces.DistributedReplicantManager;
import org.jboss.ha.framework.interfaces.DistributedReplicantManager.ReplicantListener;
+import org.jboss.ha.framework.server.ClusterPartition;
+import org.jboss.ha.framework.server.ClusterPartitionConfig;
import org.jboss.ha.framework.server.DistributedReplicantManagerImpl;
+import org.jboss.ha.framework.server.DistributedStateImpl;
import org.jboss.jmx.adaptor.rmi.RMIAdaptor;
import org.jboss.jmx.adaptor.rmi.RMIAdaptorExt;
import org.jboss.jmx.adaptor.rmi.RMINotificationListener;
import org.jboss.logging.Logger;
+import org.jboss.test.JBossClusteredTestCase;
+import org.jboss.test.cluster.drm.IReplicants;
+import org.jboss.test.cluster.drm.MockHAPartition;
+import org.jgroups.jmx.JChannelFactory;
+import org.jgroups.stack.GossipRouter;
import org.jgroups.stack.IpAddress;
+
import EDU.oswego.cs.dl.util.concurrent.Semaphore;
/** Tests of the DistributedReplicantManagerImpl
@@ -61,7 +69,10 @@
* @version $Revision$
*/
public class DRMTestCase extends JBossClusteredTestCase
-{
+{
+ private static final String SERVICEA = "serviceA";
+ private static final String SERVICEB = "serviceB";
+
static class TestListener extends UnicastRemoteObject
implements RMINotificationListener
{
@@ -514,15 +525,19 @@
ObjectName clusterService = new ObjectName("jboss:service=DefaultPartition");
Vector view0 = (Vector) server0.getAttribute(clusterService, "CurrentView");
log.info("server0: CurrentView, "+view0);
+ log.debug("+++ testStateReplication 1");
ObjectName drmService = new ObjectName("jboss.test:service=DRMTestCase");
IReplicants drm0 = (IReplicants)
MBeanServerInvocationHandler.newProxyInstance(server0, drmService,
IReplicants.class, true);
+ log.debug("+++ testStateReplication 2");
log.info(MBeanServerInvocationHandler.class.getProtectionDomain());
TestListener listener = new TestListener(log);
server0.addNotificationListener(drmService, listener, null, null);
log.info("server0 addNotificationListener");
+ log.debug("+++ testStateReplication 3");
String address = (String) drm0.lookupLocalReplicant();
+ log.debug("+++ testStateReplication 4");
log.info("server0: lookupLocalReplicant: "+address);
assertTrue("server0: address("+address+") == server0("+servers[0]+")",
address.equals(servers[0]));
@@ -589,7 +604,9 @@
/**
* Tests the functionality of isMasterReplica(), also testing merge
- * handling.
+ * handling. This test creates and manipulates two HAPartition instances in memory
+ * so it doesn't require a server deployment. The partition instances communicate via
+ * a GossipRouter. The router is stopped and restarted to simulate a merge condition.
*
* TODO move this test out of the testsuite and into the cluster module
* itself, since it doesn't rely on the container.
@@ -598,120 +615,190 @@
*/
public void testIsMasterReplica() throws Exception
{
+ GossipRouter router = null;
log.debug("+++ testIsMasterReplica()");
-
- MBeanServer mbeanServer =
- MBeanServerFactory.createMBeanServer("mockPartition");
- try {
- ClusterNode localAddress = new ClusterNode(new IpAddress("127.0.0.1", 12345));
- MockHAPartition partition = new MockHAPartition(localAddress);
-
- DistributedReplicantManagerImpl drm =
- new DistributedReplicantManagerImpl(partition);
- drm.create();
+ try
+ {
+ String partitionName = "DRMTestCasePartition";
+ String muxFile = "jgroups-multiplexer.sar/META-INF/multiplexer-stacks.xml";
+ String stackName = "tunnel";
- // Create a fake view for the MockHAPartition
+ log.info("DRMTestCase.testIsMasterReplica() - starting GossipRouter");
+ // router characteristics here must match the definition in the stack configuration
+ router = new GossipRouter(12001, "127.0.0.1");
+ router.start();
+ Thread.sleep(10000);
- Vector remoteAddresses = new Vector();
- for (int i = 1; i < 5; i++)
- remoteAddresses.add(new ClusterNode(new IpAddress("127.0.0.1", 12340 + i)));
+ Configuration cacheConfig1 = new Configuration();
+ cacheConfig1.setMultiplexerStack(stackName);
+ RuntimeConfig cacheRuntimeConfig1 = new RuntimeConfig();
+ JChannelFactory factory1 = new JChannelFactory();
+ factory1.setMultiplexerConfig(muxFile);
+ cacheRuntimeConfig1.setMuxChannelFactory(factory1);
+ cacheConfig1.setRuntimeConfig(cacheRuntimeConfig1);
+ CacheJmxWrapper wrapper1 = new CacheJmxWrapper();
+ wrapper1.setConfiguration(cacheConfig1);
+ wrapper1.create();
+ wrapper1.start();
- Vector allNodes = new Vector(remoteAddresses);
- allNodes.add(localAddress);
- partition.setCurrentViewClusterNodes(allNodes);
+ DistributedStateImpl ds1 = new DistributedStateImpl();
+ ds1.setClusteredCache(wrapper1.getCache());
- // Pass fake state to DRMImpl
-
- HashMap replicants = new HashMap();
- ArrayList remoteResponses = new ArrayList();
- for (int i = 0; i < remoteAddresses.size(); i++)
- {
- ClusterNode node = (ClusterNode) remoteAddresses.elementAt(i);
- Integer replicant = new Integer(i + 1);
- replicants.put(node.getName(), replicant);
- HashMap localReplicant = new HashMap();
- localReplicant.put("Mock", replicant);
- remoteResponses.add(new Object[] {node.getName(), localReplicant});
- }
- HashMap services = new HashMap();
- services.put("Mock", replicants);
+ ClusterPartitionConfig config1 = new ClusterPartitionConfig();
+ config1.setPartitionName(partitionName);
+ config1.setNodeUniqueId("DRMTestCaseNode1");
+ config1.setClusteredCache(wrapper1.getCache());
+ config1.setDistributedState(ds1);
+ config1.setStateTransferTimeout(30000);
+ config1.setMethodCallTimeout(60000);
+ ClusterPartition partition1 = new ClusterPartition(config1);
+ partition1.setBindIntoJndi(false);
+ partition1.create();
+ partition1.start();
+ Thread.sleep(10000);
- int hash = 0;
- for (int i = 1; i < 5; i++)
- hash += (new Integer(i)).hashCode();
+ Configuration cacheConfig2 = new Configuration();
+ cacheConfig2.setMultiplexerStack(stackName);
+ RuntimeConfig cacheRuntimeConfig2 = new RuntimeConfig();
+ JChannelFactory factory2 = new JChannelFactory();
+ factory2.setMultiplexerConfig(muxFile);
+ cacheRuntimeConfig2.setMuxChannelFactory(factory2);
+ cacheConfig2.setRuntimeConfig(cacheRuntimeConfig2);
+ CacheJmxWrapper wrapper2 = new CacheJmxWrapper();
+ wrapper2.setConfiguration(cacheConfig2);
+ wrapper2.create();
+ wrapper2.start();
- HashMap intraviewIds = new HashMap();
- intraviewIds.put("Mock", new Integer(hash));
-
- partition.setRemoteReplicants(remoteResponses);
+ DistributedStateImpl ds2 = new DistributedStateImpl();
+ ds2.setClusteredCache(wrapper2.getCache());
- drm.setCurrentState(new Object[] {services, intraviewIds });
+ ClusterPartitionConfig config2 = new ClusterPartitionConfig();
+ config2.setPartitionName(partitionName);
+ config2.setNodeUniqueId("DRMTestCaseNode2");
+ config2.setClusteredCache(wrapper2.getCache());
+ config2.setDistributedState(ds2);
+ config2.setStateTransferTimeout(30000);
+ config2.setMethodCallTimeout(60000);
+ ClusterPartition partition2 = new ClusterPartition(config2);
+ partition2.setBindIntoJndi(false);
+ partition2.create();
+ partition2.start();
+ Thread.sleep(10000);
+
+ DistributedReplicantManager drm1 = partition1.getDistributedReplicantManager();
+ DistributedReplicantManager drm2 = partition2.getDistributedReplicantManager();
- drm.start();
+ // confirm that each partition contains two nodes
+ assertEquals("Partition1 should contain two nodes; ", 2, partition1.getCurrentView().size());
+ assertEquals("Partition2 should contain two nodes; ", 2, partition2.getCurrentView().size());
- // add a local replicant
+ drm1.add(SERVICEA, "valueA1");
+ drm2.add(SERVICEA, "valueA2");
+ drm2.add(SERVICEB, "valueB2");
- drm.add("Mock", new Integer(5));
+ // test that only one node is the master replica for serviceA
+ assertTrue("ServiceA must have a master replica",
+ drm1.isMasterReplica(SERVICEA) || drm2.isMasterReplica(SERVICEA));
+ assertTrue("ServiceA must have a single master replica",
+ drm1.isMasterReplica(SERVICEA) != drm2.isMasterReplica(SERVICEA));
+
+ // ServiceB should only be a master replica on partition2
+ assertFalse("ServiceB should not be a master replica on partition1",
+ drm1.isMasterReplica(SERVICEB));
+ assertTrue("ServiceB must have a master replica on partition2",
+ drm2.isMasterReplica(SERVICEB));
- // test that this node is not the master replica
+ // confirm that each partition contains correct DRM replicants for services A and B
+ assertEquals("Partition1 should contain two DRM replicants for serviceA; ",
+ 2, drm1.lookupReplicants(SERVICEA).size());
+ assertEquals("Partition2 should contain two DRM replicants for serviceA; ",
+ 2, drm2.lookupReplicants(SERVICEA).size());
+ assertEquals("Partition1 should contain one DRM replicant for serviceB; ",
+ 1, drm1.lookupReplicants(SERVICEB).size());
+ assertEquals("Partition2 should contain one DRM replicant for serviceB; ",
+ 1, drm2.lookupReplicants(SERVICEB).size());
+
+ // simulate a split of the partition
+ log.info("DRMTestCase.testIsMasterReplica() - stopping GossipRouter");
+ router.stop();
+ sleepThread(20000);
- assertFalse("Local node is not master after startup",
- drm.isMasterReplica("Mock"));
-
- // simulate a split where this node is the coord
+ // confirm that each partition contains one node
+ assertEquals("Partition1 should contain one node after split; ",
+ 1, partition1.getCurrentView().size());
+ assertEquals("Partition2 should contain one node after split; ",
+ 1, partition2.getCurrentView().size());
+
+ // confirm that each node is a master replica for serviceA after the split
+ assertTrue("ServiceA should be a master replica on partition1 after split",
+ drm1.isMasterReplica(SERVICEA));
+ assertTrue("ServiceA should be a master replica on partition2 after split",
+ drm2.isMasterReplica(SERVICEA));
- Vector localOnly = new Vector();
- localOnly.add(localAddress);
+ // ServiceB should still only be a master replica on partition2 after split
+ assertFalse("ServiceB should not be a master replica on partition1 after split",
+ drm1.isMasterReplica(SERVICEB));
+ assertTrue("ServiceB must have a master replica on partition2 after split",
+ drm2.isMasterReplica(SERVICEB));
- partition.setCurrentViewClusterNodes(localOnly);
- partition.setRemoteReplicants(new ArrayList());
+ // Remove ServiceA replicant from partition1
+ drm1.remove(SERVICEA);
- drm.membershipChanged(remoteAddresses, new Vector(), localOnly);
+ // test that this node is not the master replica
+ assertFalse("partition1 is not master replica after dropping ServiceA replicant",
+ drm1.isMasterReplica(SERVICEA));
- // test that this node is the master replica
+ //Restore the local replicant
+ drm1.add(SERVICEA, "valueA1a");
- assertTrue("Local node is master after split", drm.isMasterReplica("Mock"));
-
- // Remove our local replicant
-
- drm.remove("Mock");
-
- // test that this node is not the master replica
-
- assertFalse("Local node is not master after dropping replicant",
- drm.isMasterReplica("Mock"));
-
- // Restore the local replicant
-
- drm.add("Mock", new Integer(5));
-
// simulate a merge
+ log.info("DRMTestCase.testIsMasterReplica() - restarting GossipRouter");
+ router.start();
+ // it seems to take more than 10 seconds for the merge to take effect
+ sleepThread(30000);
- Vector mergeGroups = new Vector();
- mergeGroups.add(remoteAddresses);
- mergeGroups.add(localOnly);
+ assertTrue(router.isStarted());
+
+ // confirm that each partition contains two nodes again
+ assertEquals("Partition1 should contain two nodes after merge; ",
+ 2, partition1.getCurrentView().size());
+ assertEquals("Partition2 should contain two nodes after merge; ",
+ 2, partition2.getCurrentView().size());
- partition.setCurrentViewClusterNodes(allNodes);
- partition.setRemoteReplicants(remoteResponses);
+ // test that only one node is the master replica for serviceA after merge
+ assertTrue("ServiceA must have a master replica after merge",
+ drm1.isMasterReplica(SERVICEA) || drm2.isMasterReplica(SERVICEA));
+ assertTrue("ServiceA must have a single master replica after merge",
+ drm1.isMasterReplica(SERVICEA) != drm2.isMasterReplica(SERVICEA));
+
+ // ServiceB should only be a master replica on partition2 after merge
+ assertFalse("ServiceB should not be a master replica on partition1 after merge",
+ drm1.isMasterReplica(SERVICEB));
+ assertTrue("ServiceB must have a master replica on partition2 after merge",
+ drm2.isMasterReplica(SERVICEB));
- drm.membershipChangedDuringMerge(new Vector(), remoteAddresses,
- allNodes, mergeGroups);
+ // confirm that each partition contains correct DRM replicants for services A and B after merge
+ assertEquals("Partition1 should contain two DRM replicants for serviceA after merge; ",
+ 2, drm1.lookupReplicants(SERVICEA).size());
+ assertEquals("Partition2 should contain two DRM replicants for serviceA after merge; ",
+ 2, drm2.lookupReplicants(SERVICEA).size());
+ assertEquals("Partition1 should contain one DRM replicant for serviceB after merge; ",
+ 1, drm1.lookupReplicants(SERVICEB).size());
+ assertEquals("Partition2 should contain one DRM replicant for serviceB after merge; ",
+ 1, drm2.lookupReplicants(SERVICEB).size());
- // Merge processing is done asynchronously, so pause a bit
- sleepThread(100);
-
- // test that this node is not the master replica
-
- assertFalse("Local node is not master after merge",
- drm.isMasterReplica("Mock"));
+ partition1.stop();
+ partition2.stop();
}
- finally {
- MBeanServerFactory.releaseMBeanServer(mbeanServer);
+ finally
+ {
+ log.info("DRMTestCase.testIsMasterReplica() - cleaning up resources");
+ if (router != null)
+ router.stop();
}
}
-
/**
* Tests that one thread blocking in DRM.notifyKeyListeners() does not
* prevent other threads registering/unregistering listeners. JBAS-2539
Modified: trunk/tools/etc/buildmagic/modules.ent
===================================================================
--- trunk/tools/etc/buildmagic/modules.ent 2007-01-25 13:15:08 UTC (rev 59994)
+++ trunk/tools/etc/buildmagic/modules.ent 2007-01-25 13:41:31 UTC (rev 59995)
@@ -43,10 +43,12 @@
<!-- Cluster -->
<property name="jboss.cluster.root" value="${project.root}/cluster/output"/>
<property name="jboss.cluster.lib" value="${jboss.cluster.root}/lib"/>
+<property name="jboss.cluster.resources" value="${jboss.cluster.root}/resources"/>
<path id="jboss.cluster.classpath">
<pathelement path="${jboss.cluster.lib}/jbossha.jar"/>
<pathelement path="${jboss.cluster.lib}/jbossha-httpsession.sar"/>
<pathelement path="${jboss.cluster.lib}/jbossha-singleton.jar"/>
+ <pathelement path="${jboss.cluster.resources}"/>
</path>
More information about the jboss-cvs-commits
mailing list