JBoss Cache SVN: r4995 - core/branches/1.4.X/tests/functional/org/jboss/cache.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-01-04 14:23:10 -0500 (Fri, 04 Jan 2008)
New Revision: 4995
Added:
core/branches/1.4.X/tests/functional/org/jboss/cache/RemoveNodeOptimisticTest.java
core/branches/1.4.X/tests/functional/org/jboss/cache/RemoveNodeTest.java
Log:
Added tests for JBCACHE-1256
Added: core/branches/1.4.X/tests/functional/org/jboss/cache/RemoveNodeOptimisticTest.java
===================================================================
--- core/branches/1.4.X/tests/functional/org/jboss/cache/RemoveNodeOptimisticTest.java (rev 0)
+++ core/branches/1.4.X/tests/functional/org/jboss/cache/RemoveNodeOptimisticTest.java 2008-01-04 19:23:10 UTC (rev 4995)
@@ -0,0 +1,9 @@
+package org.jboss.cache;
+
+public class RemoveNodeOptimisticTest extends RemoveNodeTest
+{
+ public RemoveNodeOptimisticTest()
+ {
+ optimistic = true;
+ }
+}
Added: core/branches/1.4.X/tests/functional/org/jboss/cache/RemoveNodeTest.java
===================================================================
--- core/branches/1.4.X/tests/functional/org/jboss/cache/RemoveNodeTest.java (rev 0)
+++ core/branches/1.4.X/tests/functional/org/jboss/cache/RemoveNodeTest.java 2008-01-04 19:23:10 UTC (rev 4995)
@@ -0,0 +1,54 @@
+package org.jboss.cache;
+
+import junit.framework.TestCase;
+
+import javax.transaction.TransactionManager;
+
+/**
+ * Tests removing deep, nonexistent nodes
+ *
+ * @author Manik Surtani (<a href="mailto:manik@jboss.org">manik(a)jboss.org</a>)
+ */
+public class RemoveNodeTest extends TestCase
+{
+ protected boolean optimistic = false;
+ protected TreeCache cache;
+
+ protected void setUp() throws Exception
+ {
+ cache = new TreeCache();
+ if (optimistic) cache.setNodeLockingScheme("OPTIMISTIC");
+ cache.setTransactionManagerLookupClass(DummyTransactionManagerLookup.class.getName());
+ cache.start();
+ }
+
+ protected void tearDown()
+ {
+ cache.stop();
+ }
+
+ public void testPhantomStructuralNodesOnRemove() throws Exception
+ {
+ assert cache.peek(Fqn.fromString("/a/b/c")) == null;
+ cache.remove("/a/b/c");
+ assert cache.peek(Fqn.fromString("/a/b/c")) == null;
+ assert cache.peek(Fqn.fromString("/a/b")) == null;
+ assert cache.peek(Fqn.fromString("/a")) == null;
+
+ System.out.println("Cache: " + cache.printDetails());
+ }
+
+ public void testPhantomStructuralNodesOnRemoveTransactional() throws Exception
+ {
+ TransactionManager tm = cache.getTransactionManager();
+ assert cache.peek(Fqn.fromString("/a/b/c")) == null;
+ tm.begin();
+ cache.remove("/a/b/c");
+ tm.commit();
+ assert cache.peek(Fqn.fromString("/a/b/c")) == null;
+ assert cache.peek(Fqn.fromString("/a/b")) == null;
+ assert cache.peek(Fqn.fromString("/a")) == null;
+
+ System.out.println("Cache: " + cache.printDetails());
+ }
+}
16 years, 12 months
JBoss Cache SVN: r4994 - cache-bench-fwk/trunk/src/org/cachebench/smartfrog.
by jbosscache-commits@lists.jboss.org
Author: mircea.markus
Date: 2008-01-04 14:22:10 -0500 (Fri, 04 Jan 2008)
New Revision: 4994
Modified:
cache-bench-fwk/trunk/src/org/cachebench/smartfrog/CacheBenchmarkPrim.java
Log:
the Prim was enhanced to log the output of the run script
Modified: cache-bench-fwk/trunk/src/org/cachebench/smartfrog/CacheBenchmarkPrim.java
===================================================================
--- cache-bench-fwk/trunk/src/org/cachebench/smartfrog/CacheBenchmarkPrim.java 2008-01-04 19:03:15 UTC (rev 4993)
+++ cache-bench-fwk/trunk/src/org/cachebench/smartfrog/CacheBenchmarkPrim.java 2008-01-04 19:22:10 UTC (rev 4994)
@@ -53,7 +53,7 @@
public synchronized void sfStart() throws SmartFrogException, RemoteException
{
super.sfStart();
- log.trace("parsing the configuration...");
+ log.trace("Entered sfStart...");
try
{
String command = scriptToExec + " " + nodeIndex + " " + cacheDistribution + " -DclusterSize=" + clusterSize;
@@ -77,6 +77,11 @@
log.error("Unexpected error:" + e.getMessage(), e);
throw new RemoteException("Unexpected error",e);
}
+ log.trace("Terminating the tests...");
+ TerminationRecord terminationRecord = new TerminationRecord(TerminationRecord.NORMAL, "terminated the benchmark " +
+ getDescription(), null);
+ sfTerminate(terminationRecord);
+ log.debug("Test terminated successfully " + getDescription());
}
private File getFwkHomeDir()
@@ -97,4 +102,9 @@
super.sfTerminateWith(terminationRecord);
log.trace("sfTerminateWith called with value:" + terminationRecord);
}
+
+ public String getDescription()
+ {
+ return "( clusterSize:" + clusterSize + ", nodeIndex:" + this.nodeIndex + " )";
+ }
}
16 years, 12 months
JBoss Cache SVN: r4993 - in core/trunk/src: main/java/org/jboss/cache/invocation and 2 other directories.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-01-04 14:03:15 -0500 (Fri, 04 Jan 2008)
New Revision: 4993
Modified:
core/trunk/src/main/java/org/jboss/cache/interceptors/PessimisticLockInterceptor.java
core/trunk/src/main/java/org/jboss/cache/invocation/RemoteCacheInvocationDelegate.java
core/trunk/src/test/java/org/jboss/cache/api/CacheAPIOptimisticTest.java
core/trunk/src/test/java/org/jboss/cache/api/CacheAPITest.java
core/trunk/src/test/java/org/jboss/cache/buddyreplication/GravitationCleanupTest.java
Log:
JBCACHE-1256 - PessimisticLockInterceptor does not clean up all temporary nodes created during a removeNode() call
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/PessimisticLockInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/PessimisticLockInterceptor.java 2008-01-04 18:52:20 UTC (rev 4992)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/PessimisticLockInterceptor.java 2008-01-04 19:03:15 UTC (rev 4993)
@@ -118,7 +118,7 @@
}
else
{
- acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.WRITE, true, false, false, true);
+ acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.WRITE, true, false, false, true, null, false);
}
return nextInterceptor(ctx);
}
@@ -130,7 +130,7 @@
protected Object handleLockMethod(InvocationContext ctx, Fqn fqn, NodeLock.LockType lockType, boolean recursive) throws Throwable
{
- acquireLocksWithTimeout(ctx, fqn, lockType, false, false, false, false);
+ acquireLocksWithTimeout(ctx, fqn, lockType, false, false, false, false, null, false);
if (recursive)
{
//acquireLocksOnChildren(cache.peek(fqn, false), lockType, ctx);
@@ -204,7 +204,7 @@
if (trace) log.trace("Attempting to get WL on node to be moved [" + from + "]");
if (from != null && !(configuration.getIsolationLevel() == IsolationLevel.NONE))
{
- lock(ctx, from, NodeLock.LockType.WRITE, false, timeout, true, false);
+ lock(ctx, from, NodeLock.LockType.WRITE, false, timeout, true, false, null, false);
if (ctx.getGlobalTransaction() != null)
{
cache.getTransactionTable().get(ctx.getGlobalTransaction()).addRemovedNode(from);
@@ -215,7 +215,7 @@
{
//now for an RL for the new parent.
if (trace) log.trace("Attempting to get RL on new parent [" + to + "]");
- lock(ctx, to, NodeLock.LockType.READ, false, timeout, false, false);
+ lock(ctx, to, NodeLock.LockType.READ, false, timeout, false, false, null, false);
acquireLocksOnChildren(peekNode(ctx, to, false, true, false), NodeLock.LockType.READ, ctx);
}
Object retValue = nextInterceptor(ctx);
@@ -230,16 +230,27 @@
protected Object handleRemoveNodeMethod(InvocationContext ctx, GlobalTransaction tx, Fqn fqn, boolean createUndoOps) throws Throwable
{
- boolean created = acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.WRITE, true, false, true, false);
+ // need to make a note of ALL nodes created here!!
+ List<Fqn> createdNodes = new LinkedList<Fqn>();
+ // we need to mark new nodes created as deleted since they are only created to form a path to the node being removed, to
+ // create a lock.
+ boolean created = acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.WRITE, true, false, true, false, createdNodes, true);
if (ctx.getGlobalTransaction() != null)
{
- cache.getTransactionTable().get(ctx.getGlobalTransaction()).addRemovedNode(fqn);
+ TransactionEntry entry = tx_table.get(ctx.getGlobalTransaction());
+ entry.addRemovedNode(fqn);
+ for (Fqn f : createdNodes) entry.addRemovedNode(f);
}
- acquireLocksOnChildren(rootNode.getChildDirect(fqn), NodeLock.LockType.WRITE, ctx);
+ acquireLocksOnChildren(peekNode(ctx, fqn, false, false, false), NodeLock.LockType.WRITE, ctx);
Object retVal = nextInterceptor(ctx);
+
+ // and make sure we remove all nodes we've created for the sake of later removal.
if (ctx.getGlobalTransaction() == null)
{
+
+ for (Fqn f : createdNodes) cacheImpl.realRemove(f, true);
cacheImpl.realRemove(fqn, true);
+
NodeSPI n = peekNode(ctx, fqn, false, true, false);
if (n != null)
{
@@ -252,7 +263,7 @@
protected Object handlePutForExternalReadMethod(InvocationContext ctx, GlobalTransaction tx, Fqn fqn, Object key, Object value) throws Throwable
{
- acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, true, true, false, true);
+ acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, true, true, false, true, null, false);
return nextInterceptor(ctx);
}
@@ -263,61 +274,61 @@
protected Object handleRemoveDataMethod(InvocationContext ctx, GlobalTransaction tx, Fqn fqn, boolean createUndoOps) throws Throwable
{
- acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.WRITE, false, false, false, false);
+ acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.WRITE, false, false, false, false, null, false);
return nextInterceptor(ctx);
}
protected Object handleAddChildMethod(InvocationContext ctx, GlobalTransaction tx, Fqn parentFqn, Object childName, Node cn, boolean createUndoOps) throws Throwable
{
- acquireLocksWithTimeout(ctx, parentFqn, NodeLock.LockType.READ, false, false, false, false);
+ acquireLocksWithTimeout(ctx, parentFqn, NodeLock.LockType.READ, false, false, false, false, null, false);
return nextInterceptor(ctx);
}
protected Object handleEvictMethod(InvocationContext ctx, Fqn fqn) throws Throwable
{
- acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.WRITE, false, true, false, false);
+ acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.WRITE, false, true, false, false, null, false);
return nextInterceptor(ctx);
}
protected Object handleGetKeyValueMethod(InvocationContext ctx, Fqn fqn, Object key, boolean sendNodeEvent) throws Throwable
{
- acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false);
+ acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false, null, false);
return nextInterceptor(ctx);
}
protected Object handleGetNodeMethod(InvocationContext ctx, Fqn fqn) throws Throwable
{
- acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false);
+ acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false, null, false);
return nextInterceptor(ctx);
}
protected Object handleGetKeysMethod(InvocationContext ctx, Fqn fqn) throws Throwable
{
- acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false);
+ acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false, null, false);
return nextInterceptor(ctx);
}
protected Object handleGetChildrenNamesMethod(InvocationContext ctx, Fqn fqn) throws Throwable
{
- acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false);
+ acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false, null, false);
return nextInterceptor(ctx);
}
protected Object handlePrintMethod(InvocationContext ctx, Fqn fqn) throws Throwable
{
- acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false);
+ acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false, null, false);
return nextInterceptor(ctx);
}
protected Object handleReleaseAllLocksMethod(InvocationContext ctx, Fqn fqn) throws Throwable
{
- acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false);
+ acquireLocksWithTimeout(ctx, fqn, NodeLock.LockType.READ, false, false, false, false, null, false);
return nextInterceptor(ctx);
}
private boolean acquireLocksWithTimeout(InvocationContext ctx, Fqn fqn, NodeLock.LockType lockType,
boolean createIfNotExists, boolean zeroLockTimeout,
- boolean acquireLockOnParent, boolean reverseRemoveCheck)
+ boolean acquireLockOnParent, boolean reverseRemoveCheck, List<Fqn> createdNodes, boolean markNewNodesAsDeleted)
throws InterruptedException
{
if (fqn == null || configuration.getIsolationLevel() == IsolationLevel.NONE) return false;
@@ -334,7 +345,7 @@
{
throw new TimeoutException("Unable to acquire lock on Fqn " + fqn + " after " + timeout + " millis");
}
- created = lock(ctx, fqn, lockType, createIfNotExists, timeout, acquireLockOnParent, reverseRemoveCheck);
+ created = lock(ctx, fqn, lockType, createIfNotExists, timeout, acquireLockOnParent, reverseRemoveCheck, createdNodes, markNewNodesAsDeleted);
firstTry = false;
}
while (createIfNotExists && peekNode(ctx, fqn, false, true, false) == null);// keep trying until we have the lock (fixes concurrent remove())
@@ -347,12 +358,14 @@
* 2) acquireWriteLockOnParent is true. If so AND {@link org.jboss.cache.Node#isLockForChildInsertRemove()} then a read
* lock will be aquired for the parent of the node.
*
- * @param createIfNotExists if true, then missing nodes will be cretaed on the fly. If false, method returns if we
- * reach a node that does not exists
- * @param reverseRemoveCheck see {@link #manageReverseRemove(org.jboss.cache.transaction.GlobalTransaction, org.jboss.cache.NodeSPI, boolean)}
+ * @param createIfNotExists if true, then missing nodes will be cretaed on the fly. If false, method returns if we
+ * reach a node that does not exists
+ * @param reverseRemoveCheck see {@link #manageReverseRemove(org.jboss.cache.transaction.GlobalTransaction, org.jboss.cache.NodeSPI, boolean)}
+ * @param createdNodes a list to which any nodes created can register their Fqns so that calling code is aware of which nodes have been newly created.
+ * @param markNewNodesAsDeleted
*/
private boolean lock(InvocationContext ctx, Fqn fqn, NodeLock.LockType lockType, boolean createIfNotExists, long timeout,
- boolean acquireWriteLockOnParent, boolean reverseRemoveCheck)
+ boolean acquireWriteLockOnParent, boolean reverseRemoveCheck, List<Fqn> createdNodes, boolean markNewNodesAsDeleted)
throws TimeoutException, LockingException, InterruptedException
{
Thread currentThread = Thread.currentThread();
@@ -380,6 +393,8 @@
currentNode = parent.addChildDirect(new Fqn(childName));
created = true;
if (trace) log.trace("Child node was null, so created child node " + childName);
+ if (createdNodes != null) createdNodes.add(currentNode.getFqn());
+ if (markNewNodesAsDeleted) currentNode.markAsDeleted(true);
}
else
{
@@ -478,7 +493,7 @@
}
/**
- * Used by {@link #lock(org.jboss.cache.InvocationContext, org.jboss.cache.Fqn, org.jboss.cache.lock.NodeLock.LockType, boolean, long, boolean, boolean)}.
+ * Used by lock()
* Determins whter an arbitrary node from the supplied fqn needs an write lock.
*/
private boolean writeLockNeeded(InvocationContext ctx, NodeLock.LockType lockType, int currentNodeIndex, boolean acquireWriteLockOnParent, boolean createIfNotExists, Fqn targetFqn, NodeSPI currentNode)
Modified: core/trunk/src/main/java/org/jboss/cache/invocation/RemoteCacheInvocationDelegate.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/invocation/RemoteCacheInvocationDelegate.java 2008-01-04 18:52:20 UTC (rev 4992)
+++ core/trunk/src/main/java/org/jboss/cache/invocation/RemoteCacheInvocationDelegate.java 2008-01-04 19:03:15 UTC (rev 4993)
@@ -73,12 +73,14 @@
{
if (log.isTraceEnabled())
log.trace("DataGravitationCleanup: Removing primary (" + primary + ") and backup (" + backup + ")");
- //primaryDataCleanup = MethodCallFactory.create(MethodDeclarations.removeNodeMethodLocal, primary, false);
+
getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
- removeNode(primary);
- //backupDataCleanup = MethodCallFactory.create(MethodDeclarations.removeNodeMethodLocal, backup, false);
- getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
- removeNode(backup);
+ if (!removeNode(primary))
+ {
+ // only attempt to clean up the backup if the primary did not exist - a waste of a call otherwise.
+ getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+ removeNode(backup);
+ }
}
else
{
Modified: core/trunk/src/test/java/org/jboss/cache/api/CacheAPIOptimisticTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/CacheAPIOptimisticTest.java 2008-01-04 18:52:20 UTC (rev 4992)
+++ core/trunk/src/test/java/org/jboss/cache/api/CacheAPIOptimisticTest.java 2008-01-04 19:03:15 UTC (rev 4993)
@@ -1,9 +1,12 @@
package org.jboss.cache.api;
+import org.testng.annotations.Test;
+
/**
* Optimistically locked version of {@link org.jboss.cache.api.CacheAPITest}
*/
+@Test(groups = "functional")
public class CacheAPIOptimisticTest extends CacheAPITest
{
public CacheAPIOptimisticTest()
Modified: core/trunk/src/test/java/org/jboss/cache/api/CacheAPITest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/CacheAPITest.java 2008-01-04 18:52:20 UTC (rev 4992)
+++ core/trunk/src/test/java/org/jboss/cache/api/CacheAPITest.java 2008-01-04 19:03:15 UTC (rev 4993)
@@ -2,6 +2,7 @@
import org.jboss.cache.Cache;
import org.jboss.cache.CacheFactory;
+import org.jboss.cache.CacheSPI;
import org.jboss.cache.DefaultCacheFactory;
import org.jboss.cache.Fqn;
import org.jboss.cache.Node;
@@ -12,11 +13,13 @@
import org.jboss.cache.notifications.annotation.NodeCreated;
import org.jboss.cache.notifications.event.Event;
import org.jboss.cache.transaction.GenericTransactionManagerLookup;
+import org.jboss.cache.util.CachePrinter;
import static org.testng.AssertJUnit.*;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
+import javax.transaction.TransactionManager;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@@ -28,7 +31,7 @@
* @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
*/
-@Test(groups = {"functional"})
+@Test(groups = "functional")
public class CacheAPITest
{
private Cache<String, String> cache;
@@ -185,6 +188,8 @@
assertFalse(cache.getRoot().hasChild(fqn));
assertEquals(false, cache.removeNode(fqn));
+ System.out.println("Cache: " + CachePrinter.printCacheDetails(cache));
+
// Check that it's removed if it has a child
Fqn<String> child = Fqn.fromString("/test/fqn/child");
cache.getRoot().addChild(child);
@@ -350,6 +355,29 @@
assertTrue(cache.getRoot().getChildren().isEmpty());
}
+ public void testPhantomStructuralNodesOnRemove()
+ {
+ CacheSPI spi = (CacheSPI) cache;
+ assert spi.peek(Fqn.fromString("/a/b/c"), true, true) == null;
+ assert !spi.removeNode("/a/b/c");
+ assert spi.peek(Fqn.fromString("/a/b/c"), true, true) == null;
+ assert spi.peek(Fqn.fromString("/a/b"), true, true) == null;
+ assert spi.peek(Fqn.fromString("/a"), true, true) == null;
+ }
+
+ public void testPhantomStructuralNodesOnRemoveTransactional() throws Exception
+ {
+ CacheSPI spi = (CacheSPI) cache;
+ TransactionManager tm = spi.getTransactionManager();
+ assert spi.peek(Fqn.fromString("/a/b/c"), true, true) == null;
+ tm.begin();
+ assert !spi.removeNode("/a/b/c");
+ tm.commit();
+ assert spi.peek(Fqn.fromString("/a/b/c"), true, true) == null;
+ assert spi.peek(Fqn.fromString("/a/b"), true, true) == null;
+ assert spi.peek(Fqn.fromString("/a"), true, true) == null;
+ }
+
@CacheListener
public class Listener
{
Modified: core/trunk/src/test/java/org/jboss/cache/buddyreplication/GravitationCleanupTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/buddyreplication/GravitationCleanupTest.java 2008-01-04 18:52:20 UTC (rev 4992)
+++ core/trunk/src/test/java/org/jboss/cache/buddyreplication/GravitationCleanupTest.java 2008-01-04 19:03:15 UTC (rev 4993)
@@ -15,10 +15,30 @@
Fqn fqn = Fqn.fromString("/a/b/c");
Object key = "key", value = "value";
- public void testStaleRegionOnDataOwner() throws Exception
+ public void testStaleRegionOnDataOwnerPessimistic() throws Exception
{
- caches = createCaches(1, 2, false, true, false);
+ testDataOwner(false);
+ }
+ public void testStaleRegionOnDataOwnerOptimistic() throws Exception
+ {
+ testDataOwner(true);
+ }
+
+ public void testStaleRegionOnBuddyPessimistic() throws Exception
+ {
+ testBuddy(false);
+ }
+
+ public void testStaleRegionOnBuddyOptimistic() throws Exception
+ {
+ testBuddy(true);
+ }
+
+ private void testDataOwner(boolean optimistic) throws Exception
+ {
+ caches = createCaches(1, 2, false, true, optimistic);
+
// add some stuff on the primary
CacheSPI dataOwner = caches.get(0);
CacheSPI buddy = caches.get(1);
@@ -52,4 +72,54 @@
assert dataOwner.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(buddy.getLocalAddress())), false) != null : "Should have backup node for buddy";
assert dataOwner.peek(BuddyManager.getBackupFqn(buddy.getLocalAddress(), fqn), false) != null : "Should have backup data";
}
+
+ private void testBuddy(boolean optimistic) throws Exception
+ {
+ caches = createCaches(1, 3, false, true, optimistic);
+
+ // add some stuff on the primary
+ CacheSPI dataOwner = caches.get(0);
+ CacheSPI buddy = caches.get(1);
+ CacheSPI thirdInstance = caches.get(2);
+
+ assertIsBuddy(dataOwner, buddy, true);
+ assertIsBuddy(buddy, thirdInstance, true);
+ assertIsBuddy(thirdInstance, dataOwner, true);
+
+ dataOwner.put(fqn, key, value);
+
+ System.out.println("dataOwner: " + CachePrinter.printCacheLockingInfo(dataOwner));
+ System.out.println("buddy: " + CachePrinter.printCacheLockingInfo(buddy));
+ System.out.println("thirdInstance: " + CachePrinter.printCacheLockingInfo(thirdInstance));
+
+ assert dataOwner.peek(fqn, false) != null : "Should have data";
+ assert dataOwner.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(thirdInstance.getLocalAddress())), false) != null : "Should have backup node for buddy";
+ assert dataOwner.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(dataOwner.getLocalAddress())), false) == null : "Should NOT have backup node for self!";
+ assert dataOwner.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(buddy.getLocalAddress())), false) == null : "Should NOT have backup node for 2nd instance!";
+
+ assert buddy.peek(fqn, false) == null : "Should not have data";
+ assert buddy.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(buddy.getLocalAddress())), false) == null : "Should NOT have backup node for self!";
+ assert buddy.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(dataOwner.getLocalAddress())), false) != null : "Should have backup node for buddy";
+ assert buddy.peek(BuddyManager.getBackupFqn(dataOwner.getLocalAddress(), fqn), false) != null : "Should have backup data";
+
+ // now do a gravitate call.
+ assert thirdInstance.get(fqn, key).equals(value) : "Data should have gravitated!";
+
+ System.out.println("dataOwner: " + CachePrinter.printCacheLockingInfo(dataOwner));
+ System.out.println("buddy: " + CachePrinter.printCacheLockingInfo(buddy));
+ System.out.println("thirdInstance: " + CachePrinter.printCacheLockingInfo(thirdInstance));
+
+ assert thirdInstance.peek(fqn, false) != null : "Should have data";
+ assert thirdInstance.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(buddy.getLocalAddress())), false) != null : "Should have backup node for buddy";
+ assert thirdInstance.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(thirdInstance.getLocalAddress())), false) == null : "Should NOT have backup node for self!";
+
+ assert dataOwner.peek(fqn, false) == null : "Should not have data";
+ assert dataOwner.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(dataOwner.getLocalAddress())), false) == null : "Should NOT have backup node for self!";
+ assert dataOwner.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(thirdInstance.getLocalAddress())), false) != null : "Should have backup node for buddy";
+ assert dataOwner.peek(BuddyManager.getBackupFqn(thirdInstance.getLocalAddress(), fqn), false) != null : "Should have backup data";
+ assert buddy.peek(fqn, false) == null : "Should not have data";
+ assert buddy.peek(fqn.getParent(), false) == null : "Should not have any part of the data";
+ assert buddy.peek(BuddyManager.getBackupFqn(dataOwner.getLocalAddress(), fqn), false) == null : "Should NOT have backup data";
+
+ }
}
16 years, 12 months
JBoss Cache SVN: r4992 - cache-bench-fwk/trunk.
by jbosscache-commits@lists.jboss.org
Author: mircea.markus
Date: 2008-01-04 13:52:20 -0500 (Fri, 04 Jan 2008)
New Revision: 4992
Modified:
cache-bench-fwk/trunk/build.xml
Log:
the Prim was enhanced to log the output of the run script
Modified: cache-bench-fwk/trunk/build.xml
===================================================================
--- cache-bench-fwk/trunk/build.xml 2008-01-04 18:51:19 UTC (rev 4991)
+++ cache-bench-fwk/trunk/build.xml 2008-01-04 18:52:20 UTC (rev 4992)
@@ -514,7 +514,7 @@
<mkdir dir="${basedir}/smartfrog/sfClasses"/>
<jar destfile="${basedir}/smartfrog/sfClasses/cacheBenchmark-sf.jar">
<fileset dir="${framework.output.dir}" includes="**/smartfrog/**"/>
- <fileset dir="${basedir}/smartfrog" excludes="**/sfClasses/**"/>
+ <!--<fileset dir="${basedir}/smartfrog" excludes="**/sfClasses/**"/>-->
</jar>
</target>
</project>
16 years, 12 months
JBoss Cache SVN: r4991 - cache-bench-fwk/trunk/smartfrog.
by jbosscache-commits@lists.jboss.org
Author: mircea.markus
Date: 2008-01-04 13:51:19 -0500 (Fri, 04 Jan 2008)
New Revision: 4991
Modified:
cache-bench-fwk/trunk/smartfrog/main.sf
Log:
chnaged config
Modified: cache-bench-fwk/trunk/smartfrog/main.sf
===================================================================
--- cache-bench-fwk/trunk/smartfrog/main.sf 2008-01-04 17:54:32 UTC (rev 4990)
+++ cache-bench-fwk/trunk/smartfrog/main.sf 2008-01-04 18:51:19 UTC (rev 4991)
@@ -1,13 +1,64 @@
-#include "org/smartfrog/components.sf"
-#include "cacheBenchComponent.sf"
-
-sfConfig extends Compound {
-
- node1 extends BaseCacheBenchPrim{
- sfProcessHost "localhost";
- nodeIndex 1;
- scriptToExec "runNode.bat"
- }
-
-}
-
+#include "org/smartfrog/components.sf"
+
+
+BaseCacheBenchPrim extends Prim {
+ sfClass "org.cachebench.smartfrog.CacheBenchmarkPrim";
+
+ scriptToExec "./runNode.sh"
+
+ //FQN of the directory where the framework was checked out (noramally is th eparent of the dir that contains this file)
+ //this should be edited
+ cacheBenchmarkHome "/qa/home/mmarkus/code/cache-bench-fwk";
+
+ //should be the name of a subdirectory of 'cache-products' directory.
+ cacheDistribution "jbosscache-2.0.0";
+
+ //defines on how many nodes the benchmark will run
+ clusterSize 1;
+
+ //might take a value from 0..max_nr_of_nodes, representing the index of the current node in the cluster
+ nodeIndex TBD;
+}
+
+sfConfig extends Compound {
+
+ node1 extends BaseCacheBenchPrim{
+ sfProcessHost "cluster01";
+ nodeIndex 0;
+ }
+
+ node2 extends BaseCacheBenchPrim{
+ sfProcessHost "cluster02";
+ nodeIndex 1;
+ }
+
+ node3 extends BaseCacheBenchPrim{
+ sfProcessHost "cluster03";
+ nodeIndex 2;
+ }
+
+ node4 extends BaseCacheBenchPrim{
+ sfProcessHost "cluster04";
+ nodeIndex 3;
+ }
+
+ node5 extends BaseCacheBenchPrim{
+ sfProcessHost "cluster05";
+ nodeIndex 4;
+ }
+
+ node6 extends BaseCacheBenchPrim{
+ sfProcessHost "cluster06";
+ nodeIndex 5;
+ }
+
+ node7 extends BaseCacheBenchPrim{
+ sfProcessHost "cluster07";
+ nodeIndex 6;
+ }
+
+ node8 extends BaseCacheBenchPrim{
+ sfProcessHost "cluster08";
+ nodeIndex 7;
+ }
+}
16 years, 12 months
JBoss Cache SVN: r4990 - in core/branches/1.4.X/src/org/jboss/cache: marshall and 1 other directory.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-01-04 12:54:32 -0500 (Fri, 04 Jan 2008)
New Revision: 4990
Modified:
core/branches/1.4.X/src/org/jboss/cache/Node.java
core/branches/1.4.X/src/org/jboss/cache/marshall/JBCMethodCall.java
Log:
Optionally more verbosity in toString() if trace is enabled.
Modified: core/branches/1.4.X/src/org/jboss/cache/Node.java
===================================================================
--- core/branches/1.4.X/src/org/jboss/cache/Node.java 2008-01-04 17:49:20 UTC (rev 4989)
+++ core/branches/1.4.X/src/org/jboss/cache/Node.java 2008-01-04 17:54:32 UTC (rev 4990)
@@ -445,31 +445,40 @@
synchronized (this)
{
if (data != null)
- sb.append("\ndata=[");
- Set keys = data.keySet();
- int i=0;
- for (Iterator it = keys.iterator(); it.hasNext();)
+ {
+ if (trace)
{
- i++;
- sb.append(it.next());
+ sb.append("\ndata=").append(data.keySet());
+ }
+ else
+ {
+ sb.append("\ndata=[");
+ Set keys = data.keySet();
+ int i=0;
+ for (Iterator it = keys.iterator(); it.hasNext();)
+ {
+ i++;
+ sb.append(it.next());
- if (i == 5)
- {
- int more = keys.size() - 5;
- if (more > 1)
+ if (i == 5)
{
- sb.append(", and ");
- sb.append(more);
- sb.append(" more");
- break;
+ int more = keys.size() - 5;
+ if (more > 1)
+ {
+ sb.append(", and ");
+ sb.append(more);
+ sb.append(" more");
+ break;
+ }
}
+ else
+ {
+ sb.append(", ");
+ }
}
- else
- {
- sb.append(", ");
- }
}
- sb.append("]");
+ sb.append("]");
+ }
}
if (lock_ != null)
{
Modified: core/branches/1.4.X/src/org/jboss/cache/marshall/JBCMethodCall.java
===================================================================
--- core/branches/1.4.X/src/org/jboss/cache/marshall/JBCMethodCall.java 2008-01-04 17:49:20 UTC (rev 4989)
+++ core/branches/1.4.X/src/org/jboss/cache/marshall/JBCMethodCall.java 2008-01-04 17:54:32 UTC (rev 4990)
@@ -7,6 +7,7 @@
package org.jboss.cache.marshall;
import org.jgroups.blocks.MethodCall;
+import org.apache.commons.logging.LogFactory;
import java.lang.reflect.Method;
@@ -22,6 +23,7 @@
{
private int methodId;
private static final long serialVersionUID = -4826713878871338199L;
+ private static boolean trace = LogFactory.getLog(JBCMethodCall.class).isTraceEnabled();
public JBCMethodCall()
{
@@ -61,16 +63,28 @@
public String toString()
{
StringBuffer ret = new StringBuffer();
- boolean first = true;
ret.append(method_name);
ret.append("; id:");
ret.append(methodId);
ret.append("; Args: (");
if (args != null && args.length > 0)
{
- ret.append(" arg[0] = ");
- ret.append(args[0]);
- if (args.length > 1) ret.append(" ...");
+ if (trace)
+ {
+ boolean first = true;
+ for (int i=0; i<args.length; i++)
+ {
+ if (first) first = false;
+ else ret.append(", ");
+ ret.append(args[i]);
+ }
+ }
+ else
+ {
+ ret.append(" arg[0] = ");
+ ret.append(args[0]);
+ if (args.length > 1) ret.append(" ...");
+ }
}
ret.append(')');
return ret.toString();
17 years
JBoss Cache SVN: r4989 - in core/trunk/src/main/java/org/jboss/cache: marshall and 1 other directory.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-01-04 12:49:20 -0500 (Fri, 04 Jan 2008)
New Revision: 4989
Modified:
core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java
core/trunk/src/main/java/org/jboss/cache/marshall/MethodCall.java
Log:
Optional toString() verbosity if trace is enabled.
Modified: core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java 2008-01-04 17:32:54 UTC (rev 4988)
+++ core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java 2008-01-04 17:49:20 UTC (rev 4989)
@@ -42,6 +42,7 @@
* Debug log.
*/
protected static Log log = LogFactory.getLog(UnversionedNode.class);
+ protected static boolean trace = log.isTraceEnabled();
/**
* True if all children have been loaded. This is set when CacheImpl.getChildrenNames() is called.
@@ -298,7 +299,7 @@
// notify if we actually created a new child
if (newChild == child)
{
- if (log.isTraceEnabled())
+ if (trace)
{
log.trace("created child: fqn=" + child_fqn);
}
@@ -348,17 +349,59 @@
{
synchronized (data)
{
- sb.append(" data=[");
- Set keys = data.keySet();
+ if (trace)
+ {
+ sb.append(" data=").append(data.keySet());
+ }
+ else
+ {
+ sb.append(" data=[");
+ Set keys = data.keySet();
+ int i = 0;
+ for (Object o : keys)
+ {
+ i++;
+ sb.append(o);
+
+ if (i == 5)
+ {
+ int more = keys.size() - 5;
+ if (more > 1)
+ {
+ sb.append(", and ");
+ sb.append(more);
+ sb.append(" more");
+ break;
+ }
+ }
+ else
+ {
+ sb.append(", ");
+ }
+ }
+ sb.append("]");
+ }
+ }
+ }
+ if (children != null && !children.isEmpty())
+ {
+ if (trace)
+ {
+ sb.append(" children=").append(getChildrenNamesDirect());
+ }
+ else
+ {
+ sb.append(" children=[");
+ Set names = getChildrenNamesDirect();
int i = 0;
- for (Object o : keys)
+ for (Object o : names)
{
i++;
sb.append(o);
if (i == 5)
{
- int more = keys.size() - 5;
+ int more = names.size() - 5;
if (more > 1)
{
sb.append(", and ");
@@ -375,35 +418,6 @@
sb.append("]");
}
}
- if (children != null && !children.isEmpty())
- {
- sb.append(" children=[");
- Set names = getChildrenNamesDirect();
- int i = 0;
- for (Object o : names)
- {
- i++;
- sb.append(o);
-
- if (i == 5)
- {
- int more = names.size() - 5;
- if (more > 1)
- {
- sb.append(", and ");
- sb.append(more);
- sb.append(" more");
- break;
- }
- }
- else
- {
- sb.append(", ");
- }
- }
- sb.append("]");
-
- }
if (lock_ != null)
{
if (isReadLocked())
@@ -728,7 +742,7 @@
public void setFqn(Fqn fqn)
{
- if (log.isTraceEnabled())
+ if (trace)
{
log.trace(getFqn() + " set FQN " + fqn);
}
@@ -864,7 +878,7 @@
public void setValid(boolean valid, boolean recursive)
{
this.valid = valid;
- if (log.isTraceEnabled()) log.trace("Marking node " + getFqn() + " as " + (valid ? "" : "in") + "valid");
+ if (trace) log.trace("Marking node " + getFqn() + " as " + (valid ? "" : "in") + "valid");
if (recursive)
{
for (Node child : children().values())
Modified: core/trunk/src/main/java/org/jboss/cache/marshall/MethodCall.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/marshall/MethodCall.java 2008-01-04 17:32:54 UTC (rev 4988)
+++ core/trunk/src/main/java/org/jboss/cache/marshall/MethodCall.java 2008-01-04 17:49:20 UTC (rev 4989)
@@ -6,6 +6,8 @@
*/
package org.jboss.cache.marshall;
+import org.apache.commons.logging.LogFactory;
+
import java.lang.reflect.Method;
/**
@@ -25,6 +27,7 @@
*/
public class MethodCall extends org.jgroups.blocks.MethodCall
{
+ private static boolean trace = LogFactory.getLog(MethodCall.class).isTraceEnabled();
/**
* It's unclear why this class would be serialized.
*/
@@ -84,7 +87,6 @@
public String toString()
{
StringBuffer ret = new StringBuffer();
- boolean first = true;
ret.append("MethodName: ");
ret.append(method_name);
ret.append("; MethodIdInteger: ");
@@ -92,10 +94,25 @@
ret.append("; Args: (");
if (args != null && args.length > 0)
{
- ret.append(" arg[0] = ");
- ret.append(args[0]);
- if (args.length > 1) ret.append(" ...");
+ if (trace)
+ {
+ boolean first = true;
+ for (Object arg : args)
+ {
+ if (first) first = false;
+ else ret.append(", ");
+
+ ret.append(arg);
+ }
+ }
+ else
+ {
+ ret.append(" arg[0] = ");
+ ret.append(args[0]);
+ if (args.length > 1) ret.append(" ...");
+ }
}
+
ret.append(')');
return ret.toString();
}
17 years
JBoss Cache SVN: r4988 - core/trunk/src/test/java/org/jboss/cache/buddyreplication.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-01-04 12:32:54 -0500 (Fri, 04 Jan 2008)
New Revision: 4988
Added:
core/trunk/src/test/java/org/jboss/cache/buddyreplication/GravitationCleanupTest.java
Log:
Added test to ensure proper cleanup after gravitation
Added: core/trunk/src/test/java/org/jboss/cache/buddyreplication/GravitationCleanupTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/buddyreplication/GravitationCleanupTest.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/buddyreplication/GravitationCleanupTest.java 2008-01-04 17:32:54 UTC (rev 4988)
@@ -0,0 +1,55 @@
+package org.jboss.cache.buddyreplication;
+
+import org.jboss.cache.CacheSPI;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.util.CachePrinter;
+import org.testng.annotations.Test;
+
+/**
+ * @author Manik Surtani (<a href="mailto:manik@jboss.org">manik(a)jboss.org</a>)
+ * @since 2.1.0
+ */
+@Test(groups = "functional")
+public class GravitationCleanupTest extends BuddyReplicationTestsBase
+{
+ Fqn fqn = Fqn.fromString("/a/b/c");
+ Object key = "key", value = "value";
+
+ public void testStaleRegionOnDataOwner() throws Exception
+ {
+ caches = createCaches(1, 2, false, true, false);
+
+ // add some stuff on the primary
+ CacheSPI dataOwner = caches.get(0);
+ CacheSPI buddy = caches.get(1);
+
+ dataOwner.put(fqn, key, value);
+
+ System.out.println("dataOwner: " + CachePrinter.printCacheLockingInfo(dataOwner));
+ System.out.println("buddy: " + CachePrinter.printCacheLockingInfo(buddy));
+
+ assert dataOwner.peek(fqn, false) != null : "Should have data";
+ assert dataOwner.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(buddy.getLocalAddress())), false) != null : "Should have backup node for buddy";
+ assert dataOwner.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(dataOwner.getLocalAddress())), false) == null : "Should NOT have backup node for self!";
+
+ assert buddy.peek(fqn, false) == null : "Should not have data";
+ assert buddy.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(buddy.getLocalAddress())), false) == null : "Should NOT have backup node for self!";
+ assert buddy.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(dataOwner.getLocalAddress())), false) != null : "Should have backup node for buddy";
+ assert buddy.peek(BuddyManager.getBackupFqn(dataOwner.getLocalAddress(), fqn), false) != null : "Should have backup data";
+
+ // now do a gravitate call.
+ assert buddy.get(fqn, key).equals(value) : "Data should have gravitated!";
+
+ System.out.println("dataOwner: " + CachePrinter.printCacheLockingInfo(dataOwner));
+ System.out.println("buddy: " + CachePrinter.printCacheLockingInfo(buddy));
+
+ assert buddy.peek(fqn, false) != null : "Should have data";
+ assert buddy.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(dataOwner.getLocalAddress())), false) != null : "Should have backup node for buddy";
+ assert buddy.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(buddy.getLocalAddress())), false) == null : "Should NOT have backup node for self!";
+
+ assert dataOwner.peek(fqn, false) == null : "Should not have data";
+ assert dataOwner.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(dataOwner.getLocalAddress())), false) == null : "Should NOT have backup node for self!";
+ assert dataOwner.peek(new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(buddy.getLocalAddress())), false) != null : "Should have backup node for buddy";
+ assert dataOwner.peek(BuddyManager.getBackupFqn(buddy.getLocalAddress(), fqn), false) != null : "Should have backup data";
+ }
+}
17 years
JBoss Cache SVN: r4987 - core/trunk/src/main/java/org/jboss/cache/interceptors.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-01-04 12:18:33 -0500 (Fri, 04 Jan 2008)
New Revision: 4987
Modified:
core/trunk/src/main/java/org/jboss/cache/interceptors/PessimisticLockInterceptor.java
Log:
JBCACHE-1251 - Re-adding invalidated node fails with invalidation + pessimistic locking
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/PessimisticLockInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/PessimisticLockInterceptor.java 2008-01-04 16:55:01 UTC (rev 4986)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/PessimisticLockInterceptor.java 2008-01-04 17:18:33 UTC (rev 4987)
@@ -388,6 +388,10 @@
return false;
}
}
+ else
+ {
+ if (!currentNode.isValid() && createIfNotExists) currentNode.setValid(true, false);
+ }
NodeLock.LockType lockTypeRequired = NodeLock.LockType.READ;
if (created || writeLockNeeded(ctx, lockType, currentIndex, acquireWriteLockOnParent, createIfNotExists, fqn, currentNode))
{
17 years