JBoss Cache SVN: r4666 - in core/trunk/src: main/java/org/jboss/cache/interceptors and 3 other directories.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2007-10-22 21:40:26 -0400 (Mon, 22 Oct 2007)
New Revision: 4666
Modified:
core/trunk/src/main/java/org/jboss/cache/CacheImpl.java
core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java
core/trunk/src/test/java/org/jboss/cache/api/pfer/PutForExternalReadTestBase.java
core/trunk/src/test/java/org/jboss/cache/factories/UnitTestCacheConfigurationFactory.java
core/trunk/src/test/java/org/jboss/cache/invalidation/InvalidationInterceptorTest.java
core/trunk/src/test/java/org/jboss/cache/invalidation/VersionInconsistencyTest.java
Log:
More fixes, and patched a broken test
Modified: core/trunk/src/main/java/org/jboss/cache/CacheImpl.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/CacheImpl.java 2007-10-22 23:57:59 UTC (rev 4665)
+++ core/trunk/src/main/java/org/jboss/cache/CacheImpl.java 2007-10-23 01:40:26 UTC (rev 4666)
@@ -2760,15 +2760,31 @@
NodeSPI nodeSPI = peek(fqn, false, true);
if (nodeSPI == null)
{
+ log.trace("Node doesn't exist; creating a tombstone");
// create the node we need.
Map<K, V> m = Collections.emptyMap();
InvocationContext ic = getInvocationContext();
boolean origCacheModeLocal = ic.getOptionOverrides().isCacheModeLocal();
ic.getOptionOverrides().setCacheModeLocal(true);
- put(fqn, m);
- ic.getOptionOverrides().setCacheModeLocal(origCacheModeLocal);
+ // if we are in a tx this call should happen outside of any tx
+ try
+ {
+ Transaction suspended = null;
+ if (getTransactionManager() != null)
+ {
+ suspended = getTransactionManager().suspend();
+ }
+ put(fqn, m);
+ if (suspended != null) getTransactionManager().resume(suspended);
+ ic.getOptionOverrides().setCacheModeLocal(origCacheModeLocal);
+ }
+ catch (Exception e)
+ {
+ log.error("Unable to create tombstone!", e);
+ }
nodeSPI = (NodeSPI) root.getChild(fqn);
}
+ log.trace("Retrieved node. Setting version to " + versionToInvalidate + " and marking as invalid");
nodeSPI.setVersion(versionToInvalidate);
// mark the node to be removed (and all children) as invalid so anyone holding a direct reference to it will
// be aware that it is no longer valid.
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java 2007-10-22 23:57:59 UTC (rev 4665)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java 2007-10-23 01:40:26 UTC (rev 4666)
@@ -63,7 +63,7 @@
{
MethodCall m = ctx.getMethodCall();
Option optionOverride = ctx.getOptionOverrides();
- if (optionOverride != null && optionOverride.isCacheModeLocal() && ctx.getTransaction() == null)
+ if (optionOverride != null && optionOverride.isCacheModeLocal() && (ctx.getTransaction() == null || MethodDeclarations.isTransactionLifecycleMethod(m.getMethodId())))
{
// skip replication!!
return super.invoke(ctx);
@@ -77,7 +77,7 @@
// now see if this is a CRUD method:
if (MethodDeclarations.isCrudMethod(m.getMethodId()))
{
- if (m.getMethodId() != MethodDeclarations.putForExternalReadMethodLocal_id)
+ if (m.getMethodId() != MethodDeclarations.putForExternalReadMethodLocal_id && m.getMethodId() != MethodDeclarations.putForExternalReadVersionedMethodLocal_id)
{
if (log.isDebugEnabled()) log.debug("Is a CRUD method");
Set<Fqn> fqns = new HashSet<Fqn>();
Modified: core/trunk/src/test/java/org/jboss/cache/api/pfer/PutForExternalReadTestBase.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/pfer/PutForExternalReadTestBase.java 2007-10-22 23:57:59 UTC (rev 4665)
+++ core/trunk/src/test/java/org/jboss/cache/api/pfer/PutForExternalReadTestBase.java 2007-10-23 01:40:26 UTC (rev 4666)
@@ -1,27 +1,7 @@
package org.jboss.cache.api.pfer;
-import static org.easymock.EasyMock.anyBoolean;
-import static org.easymock.EasyMock.anyInt;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-import static org.testng.AssertJUnit.assertEquals;
-import static org.testng.AssertJUnit.assertFalse;
-import static org.testng.AssertJUnit.assertNotNull;
-import static org.testng.AssertJUnit.assertNull;
-import static org.testng.AssertJUnit.assertTrue;
-import static org.testng.AssertJUnit.fail;
-
-import java.lang.reflect.Method;
-import java.util.List;
-
-import javax.transaction.SystemException;
-import javax.transaction.Transaction;
-import javax.transaction.TransactionManager;
-
import org.easymock.EasyMock;
+import static org.easymock.EasyMock.*;
import org.jboss.cache.Cache;
import org.jboss.cache.CacheFactory;
import org.jboss.cache.CacheSPI;
@@ -39,10 +19,17 @@
import org.jboss.cache.transaction.GlobalTransaction;
import org.jboss.cache.transaction.OptimisticTransactionEntry;
import org.jgroups.Address;
+import static org.testng.AssertJUnit.*;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
+import javax.transaction.SystemException;
+import javax.transaction.Transaction;
+import javax.transaction.TransactionManager;
+import java.lang.reflect.Method;
+import java.util.List;
+
@Test(groups = {"functional", "jgroups", "transaction"})
public abstract class PutForExternalReadTestBase
{
@@ -66,6 +53,8 @@
cache1 = cf.createCache(UnitTestCacheConfigurationFactory.createConfiguration(cacheMode), false);
cache1.getConfiguration().setTransactionManagerLookupClass("org.jboss.cache.transaction.DummyTransactionManagerLookup");
cache1.getConfiguration().setNodeLockingScheme(optimistic ? Configuration.NodeLockingScheme.OPTIMISTIC : Configuration.NodeLockingScheme.PESSIMISTIC);
+ cache1.getConfiguration().setSyncCommitPhase(optimistic);
+ cache1.getConfiguration().setSyncRollbackPhase(optimistic);
cache1.start();
tm1 = cache1.getConfiguration().getRuntimeConfig().getTransactionManager();
@@ -73,6 +62,8 @@
cache2 = cf.createCache(UnitTestCacheConfigurationFactory.createConfiguration(cacheMode), false);
cache2.getConfiguration().setTransactionManagerLookupClass("org.jboss.cache.transaction.DummyTransactionManagerLookup");
cache2.getConfiguration().setNodeLockingScheme(optimistic ? Configuration.NodeLockingScheme.OPTIMISTIC : Configuration.NodeLockingScheme.PESSIMISTIC);
+ cache2.getConfiguration().setSyncCommitPhase(optimistic);
+ cache2.getConfiguration().setSyncRollbackPhase(optimistic);
cache2.start();
tm2 = cache2.getConfiguration().getRuntimeConfig().getTransactionManager();
@@ -215,10 +206,15 @@
// inject a mock RPC manager so that we can test whether calls made are sync or async.
cache1.getConfiguration().getRuntimeConfig().setRPCManager(rpcManager);
- // specify what we expect called on the mock Rpc Manager. For params we don't care about, just use ANYTHING.
- // setting the mock object to expect the "sync" param to be false.
- expect(rpcManager.getReplicationQueue()).andReturn(null);
- expect(rpcManager.callRemoteMethods(anyAddresses(), (Method)anyObject(), (Object[]) anyObject(), eq(false), anyBoolean(), anyInt())).andReturn(null);
+ // invalidations will not trigger any rpc call sfor PFER
+ if (!isUsingInvalidation())
+ {
+ // specify what we expect called on the mock Rpc Manager. For params we don't care about, just use ANYTHING.
+ // setting the mock object to expect the "sync" param to be false.
+ expect(rpcManager.getReplicationQueue()).andReturn(null);
+ expect(rpcManager.callRemoteMethods(anyAddresses(), (Method)anyObject(), (Object[]) anyObject(), eq(false), anyBoolean(), anyInt())).andReturn(null);
+ }
+
replay(rpcManager);
// now try a simple replication. Since the RPCManager is a mock object it will not actually replicate anything.
@@ -291,9 +287,9 @@
{
}
- if (optimistic)
+ if (optimistic && !isUsingInvalidation())
{
- // proves that the put did, in fact, barf.
+ // proves that the put did, in fact, barf. Doesn't work for invalidations since the inability to invalidate will not cause a rollback.
assertNull(cache1.get(fqn, key));
}
else
@@ -302,7 +298,8 @@
try
{
cache1.removeNode(fqn);
- fail("Should have barfed");
+ // as above, the inability to invalidate will not cause an exception
+ if (!isUsingInvalidation()) fail("Should have barfed");
}
catch (RuntimeException re)
{
Modified: core/trunk/src/test/java/org/jboss/cache/factories/UnitTestCacheConfigurationFactory.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/factories/UnitTestCacheConfigurationFactory.java 2007-10-22 23:57:59 UTC (rev 4665)
+++ core/trunk/src/test/java/org/jboss/cache/factories/UnitTestCacheConfigurationFactory.java 2007-10-23 01:40:26 UTC (rev 4666)
@@ -6,17 +6,17 @@
*/
package org.jboss.cache.factories;
-import java.io.InputStream;
-
import org.jboss.cache.config.CacheLoaderConfig;
import org.jboss.cache.config.Configuration;
+import org.jboss.cache.config.Configuration.CacheMode;
import org.jboss.cache.config.ConfigurationException;
-import org.jboss.cache.config.Configuration.CacheMode;
import org.jboss.cache.transaction.TransactionSetup;
import org.jboss.cache.xml.XmlHelper;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
+import java.io.InputStream;
+
/**
* Cache configuration factory used by unit tests.
*/
@@ -94,7 +94,7 @@
public Configuration parseFile(String filename, CacheMode mode)
{
- return parseStream(getAsInputStreamFromClassLoader(DEFAULT_CONFIGURATION_FILE), mode);
+ return parseStream(getAsInputStreamFromClassLoader(filename == null ? DEFAULT_CONFIGURATION_FILE : filename), mode);
}
public Configuration parseStream(InputStream stream, CacheMode mode)
@@ -143,6 +143,9 @@
}
}
}
+
+ // either way, set mode in the config!!
+ c.setCacheMode(mode);
return c;
}
}
Modified: core/trunk/src/test/java/org/jboss/cache/invalidation/InvalidationInterceptorTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/invalidation/InvalidationInterceptorTest.java 2007-10-22 23:57:59 UTC (rev 4665)
+++ core/trunk/src/test/java/org/jboss/cache/invalidation/InvalidationInterceptorTest.java 2007-10-23 01:40:26 UTC (rev 4666)
@@ -29,7 +29,9 @@
import javax.transaction.Transaction;
import javax.transaction.TransactionManager;
import java.util.ArrayList;
+import java.util.HashSet;
import java.util.List;
+import java.util.Set;
/**
* Tests the async interceptor
@@ -40,15 +42,18 @@
public class InvalidationInterceptorTest
{
private static Log log = LogFactory.getLog(InvalidationInterceptorTest.class);
- CacheImpl<Object, Object> cache1, cache2;
+ private CacheImpl<Object, Object> cache1, cache2;
+ private Set<CacheImpl> toClean = new HashSet<CacheImpl>();
@AfterMethod
public void tearDown()
{
- TestingUtil.killCaches(cache1);
- TestingUtil.killCaches(cache2);
+ TestingUtil.killCaches(cache1, cache2);
+ for (CacheImpl c : toClean) TestingUtil.killCaches(c);
+ toClean.clear();
}
+
public void testPessimisticNonTransactional() throws Exception
{
cache1 = createCache(false);
@@ -211,8 +216,8 @@
cache2.put(fqn, "key", "value");
Assert.assertEquals("value", cache2.get(fqn, "key"));
Node n = cache1.get(fqn);
- assert n != null : "Should not be null";
- assert n.getKeys().isEmpty() : "But should not contain any data";
+ assertHasBeenInvalidated(n, "Should have been invalidated");
+ assertHasBeenInvalidated(cache1.peek(fqn, true, true), "Should have been invalidated");
// start a tx that cache1 will have to send out an evict ...
TransactionManager mgr1 = cache1.getTransactionManager();
@@ -480,8 +485,8 @@
// test that this has NOT replicated, but rather has been invalidated:
Assert.assertEquals("value", cache1.get(fqn, "key"));
Node n2 = cache2.get(fqn);
- assert n2 != null : "Should NOT be null; we need to have version info on all instances.";
- assert n2.get("key") == null : "Data should not have replicated!";
+ assertHasBeenInvalidated(n2, "Should have been invalidated");
+ assertHasBeenInvalidated(cache2.peek(fqn, true, true), "Should have been invalidated");
// now make sure cache2 is in sync with cache1:
cache2.put(fqn, "key", "value");
@@ -668,16 +673,7 @@
caches.get(0).put(fqn, "key", "value");
assertEquals("expecting value", "value", caches.get(0).get(fqn, "key"));
Node n = caches.get(1).get(fqn);
- if (optimistic)
- {
- assert n != null : "Should NOT be null";
- assert n.getKeys().isEmpty() : "but should be empty";
- }
- else
- {
- // only opt locking requires a stub node created on invalidation to hold the data version
- assert n == null : "Should be null!";
- }
+ assertHasBeenInvalidated(n, "Should have been invalidated");
// now put in caches.get(1), should fire an eviction
caches.get(1).put(fqn, "key", "value2");
@@ -694,6 +690,7 @@
cache.getConfiguration().setCacheMode(Configuration.CacheMode.INVALIDATION_SYNC);
if (optimistic) cache.getConfiguration().setNodeLockingScheme("OPTIMISTIC");
cache.getConfiguration().setTransactionManagerLookupClass("org.jboss.cache.transaction.DummyTransactionManagerLookup");
+ toClean.add(cache);
return cache;
}
@@ -701,6 +698,7 @@
{
CacheImpl<Object, Object> cache = createUnstartedCache(optimistic);
cache.start();
+ toClean.add(cache);
return cache;
}
@@ -715,6 +713,7 @@
caches.get(0).start();
caches.get(1).start();
+ toClean.addAll(caches);
return caches;
}
Modified: core/trunk/src/test/java/org/jboss/cache/invalidation/VersionInconsistencyTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/invalidation/VersionInconsistencyTest.java 2007-10-22 23:57:59 UTC (rev 4665)
+++ core/trunk/src/test/java/org/jboss/cache/invalidation/VersionInconsistencyTest.java 2007-10-23 01:40:26 UTC (rev 4666)
@@ -1,20 +1,20 @@
package org.jboss.cache.invalidation;
-import org.testng.annotations.Test;
-import org.testng.annotations.BeforeTest;
-import org.testng.annotations.AfterTest;
import org.jboss.cache.Cache;
+import org.jboss.cache.CacheImpl;
import org.jboss.cache.DefaultCacheFactory;
import org.jboss.cache.Fqn;
-import org.jboss.cache.Node;
import org.jboss.cache.NodeSPI;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.misc.TestingUtil;
import org.jboss.cache.optimistic.DefaultDataVersion;
-import org.jboss.cache.misc.TestingUtil;
import org.jboss.cache.transaction.DummyTransactionManagerLookup;
-import org.jboss.cache.config.Configuration;
+import org.testng.annotations.AfterTest;
+import org.testng.annotations.BeforeTest;
+import org.testng.annotations.Test;
+import javax.transaction.Transaction;
import javax.transaction.TransactionManager;
-import javax.transaction.Transaction;
/**
* This test simulates the problem described in JBCACHE-1155
@@ -91,7 +91,7 @@
assert val.equals("v-newer");
// test node versions
- NodeSPI n = (NodeSPI) cache1.getRoot().getChild(node);
+ NodeSPI n = (NodeSPI) ((CacheImpl) cache1).peek(node, true, true);
assert ((DefaultDataVersion) n.getVersion()).getRawVersion() == 1 : "Version should be 1";
}
}
17 years, 2 months
JBoss Cache SVN: r4665 - core/trunk/src/main/java/org/jboss/cache/interceptors.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2007-10-22 19:57:59 -0400 (Mon, 22 Oct 2007)
New Revision: 4665
Modified:
core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java
core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java
Log:
Fixed issue with removeNode return value as well as making parent nodes valid
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java 2007-10-22 23:57:26 UTC (rev 4664)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java 2007-10-22 23:57:59 UTC (rev 4665)
@@ -350,7 +350,7 @@
// post-notify
if (notify) notifier.notifyNodeRemoved(workspaceNode.getFqn(), false, null, ctx);
- return true;
+ return workspaceNode.getNode().isValid();
}
private Object removeKeyAndNotify(Object removeKey, TransactionWorkspace workspace, WorkspaceNode workspaceNode, InvocationContext ctx)
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java 2007-10-22 23:57:26 UTC (rev 4664)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java 2007-10-22 23:57:59 UTC (rev 4665)
@@ -123,7 +123,7 @@
if (underlyingNode != null && !underlyingNode.isValid())
{
// we havea tombstone
- if (!workspaceNode.isCreated()) throw new DataVersioningException("Underlying node doesn't exist but a tombstone does; workspace node should be marked as created!");
+ if (!workspaceNode.isCreated() && !workspaceNode.isDeleted()) throw new DataVersioningException("Underlying node doesn't exist but a tombstone does; workspace node should be marked as created!");
if (underlyingNode.getVersion().newerThan(workspaceNode.getVersion()))
{
// we have an out of date node here
@@ -249,7 +249,9 @@
Map mergedData = workspaceNode.getMergedData();
underlyingNode.clearDataDirect();
underlyingNode.putAllDirect(mergedData);
- underlyingNode.setValid(true, false);
+
+ // mark node and any parents as valid- if available. Versioning parents are tough though - leave as old versions?
+ validateNodeAndParents(underlyingNode);
updateVersion = true;
}
@@ -262,6 +264,12 @@
}
+ private void validateNodeAndParents(NodeSPI node)
+ {
+ node.setValid(true, false);
+ if (!node.getFqn().isRoot()) validateNodeAndParents(node.getParent());
+ }
+
private void performVersionUpdate(NodeSPI underlyingNode, WorkspaceNode workspaceNode)
{
if (workspaceNode.isVersioningImplicit())
17 years, 2 months
JBoss Cache SVN: r4664 - in core/trunk/src/test/java/org/jboss/cache: misc and 1 other directory.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2007-10-22 19:57:26 -0400 (Mon, 22 Oct 2007)
New Revision: 4664
Modified:
core/trunk/src/test/java/org/jboss/cache/invalidation/InvalidationInterceptorTest.java
core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java
Log:
Refactored test
Modified: core/trunk/src/test/java/org/jboss/cache/invalidation/InvalidationInterceptorTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/invalidation/InvalidationInterceptorTest.java 2007-10-22 23:08:18 UTC (rev 4663)
+++ core/trunk/src/test/java/org/jboss/cache/invalidation/InvalidationInterceptorTest.java 2007-10-22 23:57:26 UTC (rev 4664)
@@ -9,7 +9,6 @@
import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.jboss.cache.Cache;
import org.jboss.cache.CacheImpl;
import org.jboss.cache.CacheSPI;
import org.jboss.cache.DefaultCacheFactory;
@@ -21,12 +20,8 @@
import org.jboss.cache.factories.XmlConfigurationParser;
import org.jboss.cache.misc.TestingUtil;
import org.jboss.cache.xml.XmlHelper;
-import static org.testng.AssertJUnit.assertEquals;
-import static org.testng.AssertJUnit.assertFalse;
-import static org.testng.AssertJUnit.assertNotNull;
-import static org.testng.AssertJUnit.assertNull;
-import static org.testng.AssertJUnit.assertTrue;
-
+import static org.testng.AssertJUnit.*;
+import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
import org.w3c.dom.Element;
@@ -45,11 +40,19 @@
public class InvalidationInterceptorTest
{
private static Log log = LogFactory.getLog(InvalidationInterceptorTest.class);
+ CacheImpl<Object, Object> cache1, cache2;
+ @AfterMethod
+ public void tearDown()
+ {
+ TestingUtil.killCaches(cache1);
+ TestingUtil.killCaches(cache2);
+ }
+
public void testPessimisticNonTransactional() throws Exception
{
- CacheImpl<Object, Object> cache1 = createCache(false);
- CacheImpl<Object, Object> cache2 = createCache(false);
+ cache1 = createCache(false);
+ cache2 = createCache(false);
Fqn fqn = Fqn.fromString("/a/b");
cache1.put(fqn, "key", "value");
@@ -65,28 +68,20 @@
// since the node already exists even PL will not remove it - but will invalidate it's data
Node n = cache1.get(fqn);
- assert n != null : "Should not be null";
- assert n.getKeys().isEmpty() : "Should not contain any data";
+ assertHasBeenInvalidated(n, "Should have been invalidated");
Assert.assertEquals("value", cache2.get(fqn, "key"));
// now test the invalidation:
cache1.put(fqn, "key2", "value2");
Assert.assertEquals("value2", cache1.get(fqn, "key2"));
n = cache2.get(fqn);
- assert n != null : "Should not be null";
- assert n.getKeys().isEmpty() : "Should not contain any data";
-
- // clean up.
- cache1.stop();
- cache2.stop();
- cache1 = null;
- cache2 = null;
+ assertHasBeenInvalidated(n, "Should have been invalidated");
}
public void testUnnecessaryEvictions() throws Exception
{
- CacheImpl<Object, Object> cache1 = createCache(false);
- CacheImpl<Object, Object> cache2 = createCache(false);
+ cache1 = createCache(false);
+ cache2 = createCache(false);
Fqn fqn1 = Fqn.fromString("/a/b/c");
Fqn fqn2 = Fqn.fromString("/a/b/d");
@@ -107,19 +102,13 @@
assertEquals("world", cache2.get(fqn2, "hello"));
assertNull(cache1.get(fqn1, "hello"));
assertNull(cache1.get(fqn2, "hello"));
-
- cache1.stop();
- cache2.stop();
- cache1 = null;
- cache2 = null;
-
}
public void testPessimisticNonTransactionalAsync() throws Exception
{
- CacheImpl<Object, Object> cache1 = createUnstartedCache(false);
- CacheImpl<Object, Object> cache2 = createUnstartedCache(false);
+ cache1 = createUnstartedCache(false);
+ cache2 = createUnstartedCache(false);
cache1.getConfiguration().setCacheMode(Configuration.CacheMode.INVALIDATION_ASYNC);
cache2.getConfiguration().setCacheMode(Configuration.CacheMode.INVALIDATION_ASYNC);
cache1.start();
@@ -140,8 +129,7 @@
// since the node already exists even PL will not remove it - but will invalidate it's data
Node n = cache1.get(fqn);
- assert n != null : "Should not be null";
- assert n.getKeys().isEmpty() : "Should not contain any data";
+ assertHasBeenInvalidated(n, "Should have been invalidated");
Assert.assertEquals("value", cache2.get(fqn, "key"));
// now test the invalidation:
@@ -151,21 +139,14 @@
// since the node already exists even PL will not remove it - but will invalidate it's data
n = cache2.get(fqn);
- assert n != null : "Should not be null";
- assert n.getKeys().isEmpty() : "Should not contain any data";
-
- // clean up.
- cache1.stop();
- cache2.stop();
- cache1 = null;
- cache2 = null;
+ assertHasBeenInvalidated(n, "Should have been invalidated");
}
public void testPessimisticTransactional() throws Exception
{
- CacheImpl<Object, Object> cache1 = createCache(false);
- CacheImpl<Object, Object> cache2 = createCache(false);
+ cache1 = createCache(false);
+ cache2 = createCache(false);
Fqn fqn = Fqn.fromString("/a/b");
cache1.put(fqn, "key", "value");
@@ -188,8 +169,7 @@
// since the node already exists even PL will not remove it - but will invalidate it's data
Node n = cache1.get(fqn);
- assert n != null : "Should not be null";
- assert n.getKeys().isEmpty() : "Should not contain any data";
+ assertHasBeenInvalidated(n, "Should have been invalidated");
Assert.assertEquals("value", cache2.get(fqn, "key"));
// now test the invalidation again
@@ -204,8 +184,7 @@
Assert.assertEquals("value2", cache1.get(fqn, "key2"));
// since the node already exists even PL will not remove it - but will invalidate it's data
n = cache2.get(fqn);
- assert n != null : "Should not be null";
- assert n.getKeys().isEmpty() : "Should not contain any data";
+ assertHasBeenInvalidated(n, "Should have been invalidated");
// test a rollback
txm = cache2.getTransactionManager();
@@ -218,22 +197,14 @@
Assert.assertEquals("value2", cache1.get(fqn, "key2"));
n = cache2.get(fqn);
- assert n != null : "Should not be null";
- assert n.getKeys().isEmpty() : "Should not contain any data";
-
- // clean up.
- cache1.stop();
- cache2.stop();
- cache1 = null;
- cache2 = null;
-
+ assertHasBeenInvalidated(n, "Should have been invalidated");
}
public void testOptSyncUnableToEvict() throws Exception
{
- CacheImpl<Object, Object> cache1 = createCache(true);
- CacheImpl<Object, Object> cache2 = createCache(true);
+ cache1 = createCache(true);
+ cache2 = createCache(true);
Fqn fqn = Fqn.fromString("/a/b");
@@ -275,17 +246,12 @@
{
Assert.assertTrue("Ought to have failed!", true);
}
- // clean up.
- cache1.stop();
- cache2.stop();
- cache1 = null;
- cache2 = null;
}
public void testPessTxSyncUnableToEvict() throws Exception
{
- CacheImpl<Object, Object> cache1 = createCache(false);
- CacheImpl<Object, Object> cache2 = createCache(false);
+ cache1 = createCache(false);
+ cache2 = createCache(false);
Fqn fqn = Fqn.fromString("/a/b");
@@ -325,17 +291,12 @@
{
Assert.assertTrue("Ought to have succeeded!", false);
}
- // clean up.
- cache1.stop();
- cache2.stop();
- cache1 = null;
- cache2 = null;
}
public void testPessTxAsyncUnableToEvict() throws Exception
{
- CacheImpl<Object, Object> cache1 = createUnstartedCache(false);
- CacheImpl<Object, Object> cache2 = createUnstartedCache(false);
+ cache1 = createUnstartedCache(false);
+ cache2 = createUnstartedCache(false);
cache1.getConfiguration().setCacheMode(Configuration.CacheMode.INVALIDATION_ASYNC);
cache2.getConfiguration().setCacheMode(Configuration.CacheMode.INVALIDATION_ASYNC);
cache1.start();
@@ -379,11 +340,6 @@
{
Assert.assertTrue("Ought to have succeeded!", false);
}
- // clean up.
- cache1.stop();
- cache2.stop();
- cache1 = null;
- cache2 = null;
}
public void testPessimisticNodeRemoval() throws Exception
@@ -399,76 +355,55 @@
@SuppressWarnings("unchecked")
private void nodeRemovalTest(boolean optimistic) throws Exception
{
- CacheImpl<Object, Object> cache1 = null;
- CacheImpl<Object, Object> cache2 = null;
- try
- {
- cache1 = createCache(optimistic);
- cache2 = createCache(optimistic);
-
- Node root1 = cache1.getRoot();
- Node root2 = cache2.getRoot();
-
- // this fqn is relative, but since it is from the root it may as well be absolute
- Fqn<String> fqn = Fqn.fromString("/test/fqn");
- cache1.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
- cache1.put(fqn, "key", "value");
- assertEquals("value", cache1.get(fqn, "key"));
- cache2.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
- cache2.put(fqn, "key", "value");
- assertEquals("value", cache2.get(fqn, "key"));
+ cache1 = createCache(optimistic);
+ cache2 = createCache(optimistic);
+
+ Node root1 = cache1.getRoot();
+ Node root2 = cache2.getRoot();
+
+ // this fqn is relative, but since it is from the root it may as well be absolute
+ Fqn<String> fqn = Fqn.fromString("/test/fqn");
+ cache1.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+ cache1.put(fqn, "key", "value");
+ assertEquals("value", cache1.get(fqn, "key"));
+ cache2.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+ cache2.put(fqn, "key", "value");
+ assertEquals("value", cache2.get(fqn, "key"));
+
+ assertEquals(true, cache1.removeNode(fqn));
+ assertFalse(root1.hasChild(fqn));
+ Node remoteNode = root2.getChild(fqn);
+ checkRemoteNodeIsRemoved(remoteNode);
+ assertEquals(false, cache1.removeNode(fqn));
+
+ Fqn<String> child = Fqn.fromString("/test/fqn/child");
+ cache1.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+ cache1.put(child, "key", "value");
+ assertEquals("value", cache1.get(child, "key"));
+ cache2.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+ cache2.put(child, "key", "value");
+ assertEquals("value", cache2.get(child, "key"));
+
+ assertEquals(true, cache1.removeNode(fqn));
+ assertFalse(root1.hasChild(fqn));
+ remoteNode = root2.getChild(fqn);
+ checkRemoteNodeIsRemoved(remoteNode);
+ assertEquals(false, cache1.removeNode(fqn));
+ }
- assertEquals(true, cache1.removeNode(fqn));
- assertFalse(root1.hasChild(fqn));
- Node remoteNode = root2.getChild(fqn);
- checkRemoteNodeIsRemoved(remoteNode);
- assertEquals(false, cache1.removeNode(fqn));
-
- Fqn<String> child = Fqn.fromString("/test/fqn/child");
- cache1.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
- cache1.put(child, "key", "value");
- assertEquals("value", cache1.get(child, "key"));
- cache2.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
- cache2.put(child, "key", "value");
- assertEquals("value", cache2.get(child, "key"));
-
- assertEquals(true, cache1.removeNode(fqn));
- assertFalse(root1.hasChild(fqn));
- remoteNode = root2.getChild(fqn);
- checkRemoteNodeIsRemoved(remoteNode);
- assertEquals(false, cache1.removeNode(fqn));
- }
- finally
+ private void checkRemoteNodeIsRemoved(Node<Object, Object> remoteNode)
+ {
+ assertHasBeenInvalidated(remoteNode, "Should have been removed");
+ // Recursively check any children
+ if (remoteNode != null)
{
- if (cache1 != null)
+ for (Node<Object, Object> child : remoteNode.getChildren())
{
- cache1.stop();
- cache1.destroy();
+ checkRemoteNodeIsRemoved(child);
}
- if (cache2 != null)
- {
- cache2.stop();
- cache2.destroy();
- }
-
}
-
}
- private void checkRemoteNodeIsRemoved(Node<Object, Object> remoteNode) {
-
- assertNotNull("remoteNode " + remoteNode.getFqn() +" is not null", remoteNode);
- // FIXME A simple isValid() check should suffice,
- // but that's not implemented
- //assertFalse("remoteNode is not valid", remoteNode.isValid());
- assertEquals("remoteNode " + remoteNode.getFqn() +" has no keys", 0, remoteNode.getKeys().size());
- // Recursively check any children
- for (Node<Object, Object> child : remoteNode.getChildren())
- {
- checkRemoteNodeIsRemoved(child);
- }
- }
-
public void testPessimisticNodeResurrection() throws Exception {
nodeResurrectionTest(false);
}
@@ -479,66 +414,46 @@
private void nodeResurrectionTest(boolean optimistic) throws Exception
{
- CacheImpl<Object, Object> cache1 = null;
- CacheImpl<Object, Object> cache2 = null;
- try
- {
- cache1 = createCache(optimistic);
- cache2 = createCache(optimistic);
-
- // this fqn is relative, but since it is from the root it may as well be absolute
- Fqn<String> fqn = Fqn.fromString("/test/fqn1");
- cache1.put(fqn, "key", "value");
- assertEquals("value", cache1.get(fqn, "key"));
- assertEquals(null, cache2.get(fqn, "key"));
- // Change the value in order to increment the version if Optimistic is used
- cache1.put(fqn, "key", "newValue");
- assertEquals("newValue", cache1.get(fqn, "key"));
- assertEquals(null, cache2.get(fqn, "key"));
-
- assertEquals(true, cache1.removeNode(fqn));
- assertEquals(null, cache1.get(fqn, "key"));
- assertEquals(null, cache2.get(fqn, "key"));
-
- // Restore locally
- cache1.put(fqn, "key", "value");
- assertEquals("value", cache1.get(fqn, "key"));
- assertEquals(null, cache2.get(fqn, "key"));
-
- // Repeat, but now restore the node on the remote cache
- fqn = Fqn.fromString("/test/fqn2");
- cache1.put(fqn, "key", "value");
- assertEquals("value", cache1.get(fqn, "key"));
- assertEquals(null, cache2.get(fqn, "key"));
- // Change the value in order to increment the version if Optimistic is used
- cache1.put(fqn, "key", "newValue");
- assertEquals("newValue", cache1.get(fqn, "key"));
- assertEquals(null, cache2.get(fqn, "key"));
-
- assertEquals(true, cache1.removeNode(fqn));
- assertEquals(null, cache1.get(fqn, "key"));
- assertEquals(null, cache2.get(fqn, "key"));
-
- // Restore on remote cache
- cache2.put(fqn, "key", "value");
- assertEquals("value", cache2.get(fqn, "key"));
- assertEquals(null, cache1.get(fqn, "key"));
- }
- finally
- {
- if (cache1 != null)
- {
- cache1.stop();
- cache1.destroy();
- }
- if (cache2 != null)
- {
- cache2.stop();
- cache2.destroy();
- }
-
- }
-
+ cache1 = createCache(optimistic);
+ cache2 = createCache(optimistic);
+
+ // this fqn is relative, but since it is from the root it may as well be absolute
+ Fqn<String> fqn = Fqn.fromString("/test/fqn1");
+ cache1.put(fqn, "key", "value");
+ assertEquals("value", cache1.get(fqn, "key"));
+ assertEquals(null, cache2.get(fqn, "key"));
+ // Change the value in order to increment the version if Optimistic is used
+ cache1.put(fqn, "key", "newValue");
+ assertEquals("newValue", cache1.get(fqn, "key"));
+ assertEquals(null, cache2.get(fqn, "key"));
+
+ assertEquals(true, cache1.removeNode(fqn));
+ assertEquals(null, cache1.get(fqn, "key"));
+ assertEquals(null, cache2.get(fqn, "key"));
+
+ // Restore locally
+ cache1.put(fqn, "key", "value");
+ assertEquals("value", cache1.get(fqn, "key"));
+ assertEquals(null, cache2.get(fqn, "key"));
+
+ // Repeat, but now restore the node on the remote cache
+ fqn = Fqn.fromString("/test/fqn2");
+ cache1.put(fqn, "key", "value");
+ assertEquals("value", cache1.get(fqn, "key"));
+ assertEquals(null, cache2.get(fqn, "key"));
+ // Change the value in order to increment the version if Optimistic is used
+ cache1.put(fqn, "key", "newValue");
+ assertEquals("newValue", cache1.get(fqn, "key"));
+ assertEquals(null, cache2.get(fqn, "key"));
+
+ assertEquals(true, cache1.removeNode(fqn));
+ assertEquals(null, cache1.get(fqn, "key"));
+ assertEquals(null, cache2.get(fqn, "key"));
+
+ // Restore on remote cache
+ cache2.put(fqn, "key", "value");
+ assertEquals("value", cache2.get(fqn, "key"));
+ assertEquals(null, cache1.get(fqn, "key"));
}
private void dumpVersionInfo(CacheSPI c1, CacheSPI c2, Fqn fqn)
@@ -554,8 +469,8 @@
public void testOptimistic() throws Exception
{
- CacheImpl<Object, Object> cache1 = createCache(true);
- CacheImpl<Object, Object> cache2 = createCache(true);
+ cache1 = createCache(true);
+ cache2 = createCache(true);
Fqn fqn = Fqn.fromString("/a/b");
cache1.put(fqn, "key", "value");
@@ -574,8 +489,8 @@
dumpVersionInfo(cache1, cache2, fqn);
Node n1 = cache1.get(fqn);
- assert n1 != null : "Should NOT be null; we need to have version info on all instances.";
- assert n1.get("key") == null : "Data should not have replicated!";
+ assertHasBeenInvalidated(n1, "Should have been invalidated");
+ assertHasBeenInvalidated(cache1.peek(fqn, true, true), "Should have been invalidated");
Assert.assertEquals("value", cache2.get(fqn, "key"));
@@ -586,9 +501,8 @@
Assert.assertEquals("value2", cache1.get(fqn, "key2"));
n2 = cache2.get(fqn);
- assert n2 != null : "Should NOT be null; we need to have version info on all instances.";
- assert n2.get("key") == null : "Data should have invalidated!";
- assert n2.get("key2") == null : "Data should have invalidated!";
+ assertHasBeenInvalidated(n2, "Should have been invalidated");
+ assertHasBeenInvalidated(cache2.peek(fqn, false, false), "Should have been invalidated");
// with tx's
TransactionManager txm = cache2.getTransactionManager();
@@ -600,8 +514,8 @@
txm.commit();
n1 = cache1.get(fqn);
- assert n1 != null : "Should NOT be null; we need to have version info on all instances.";
- assert n1.get("key") == null : "Data should be null!";
+ assertHasBeenInvalidated(n1, "Should have been invalidated");
+ assertHasBeenInvalidated(cache1.peek(fqn, false, false), "Should have been invalidated");
Assert.assertEquals("value", cache2.get(fqn, "key"));
// now test the invalidation again
@@ -615,8 +529,8 @@
Assert.assertEquals("value2", cache1.get(fqn, "key2"));
n2 = cache2.get(fqn);
- assert n2 != null : "Should NOT be null; we need to have version info on all instances.";
- assert n2.get("key2") == null : "Data should have invalidated!";
+ assertHasBeenInvalidated(n2, "Should have been invalidated");
+ assertHasBeenInvalidated(cache2.peek(fqn, false, false), "Should have been invalidated");
// test a rollback
txm = cache2.getTransactionManager();
@@ -629,20 +543,15 @@
Assert.assertEquals("value2", cache1.get(fqn, "key2"));
n2 = cache2.get(fqn);
- assert n2 != null : "Should NOT be null; we need to have version info on all instances.";
- assert n2.get("key2") == null : "Should not have committed!";
-
- // clean up.
- cache1.stop();
- cache2.stop();
- cache1 = null;
- cache2 = null;
-
+ assertHasBeenInvalidated(n2, "Should have been invalidated");
+ assertHasBeenInvalidated(cache2.peek(fqn, false, false), "Should have been invalidated");
}
public void testPessimisticNonTransactionalWithCacheLoader() throws Exception
{
List<CacheImpl<Object, Object>> caches = createCachesWithSharedCL(false);
+ cache1 = caches.get(0);
+ cache2 = caches.get(1);
Fqn fqn = Fqn.fromString("/a/b");
caches.get(0).put(fqn, "key", "value");
@@ -661,19 +570,13 @@
Assert.assertEquals("value2", caches.get(1).get(fqn, "key2"));
Assert.assertEquals("value", caches.get(0).get(fqn, "key"));
Assert.assertEquals("value", caches.get(1).get(fqn, "key"));
-
- // clean up.
- caches.get(0).remove(fqn);
- caches.get(1).remove(fqn);
- caches.get(0).stop();
- caches.get(1).stop();
- caches.set(0, null);
- caches.set(1, null);
}
public void testPessimisticTransactionalWithCacheLoader() throws Exception
{
List<CacheImpl<Object,Object>> caches = createCachesWithSharedCL(false);
+ cache1 = caches.get(0);
+ cache2 = caches.get(1);
Fqn fqn = Fqn.fromString("/a/b");
TransactionManager mgr = caches.get(0).getTransactionManager();
@@ -694,19 +597,13 @@
Assert.assertEquals("value", caches.get(0).get(fqn, "key"));
Assert.assertNull("Should be null", caches.get(0).get(fqn, "key2"));
Assert.assertNull("Should be null", caches.get(1).get(fqn, "key2"));
-
- // clean up.
- caches.get(0).remove(fqn);
- caches.get(1).remove(fqn);
- caches.get(0).stop();
- caches.get(1).stop();
- caches.set(0, null);
- caches.set(1, null);
}
public void testOptimisticWithCacheLoader() throws Exception
{
List<CacheImpl<Object, Object>> caches = createCachesWithSharedCL(true);
+ cache1 = caches.get(0);
+ cache2 = caches.get(1);
Fqn fqn = Fqn.fromString("/a/b");
TransactionManager mgr = caches.get(0).getTransactionManager();
@@ -729,14 +626,6 @@
Assert.assertEquals("value", caches.get(0).get(fqn, "key"));
Assert.assertNull("Should be null", caches.get(0).get(fqn, "key2"));
Assert.assertNull("Should be null", caches.get(1).get(fqn, "key2"));
-
- // clean up.
- caches.get(0).remove(fqn);
- caches.get(1).remove(fqn);
- caches.get(0).stop();
- caches.get(1).stop();
- caches.set(0, null);
- caches.set(1, null);
}
public void testInvalidationWithRegionBasedMarshalling() throws Exception
@@ -754,6 +643,8 @@
List<CacheImpl<Object,Object>> caches = new ArrayList<CacheImpl<Object, Object>>();
caches.add(createUnstartedCache(false));
caches.add(createUnstartedCache(false));
+ cache1 = caches.get(0);
+ cache2 = caches.get(1);
caches.get(0).getConfiguration().setUseRegionBasedMarshalling(true);
caches.get(1).getConfiguration().setUseRegionBasedMarshalling(true);
@@ -792,14 +683,7 @@
caches.get(1).put(fqn, "key", "value2");
assertEquals("expecting value2", "value2", caches.get(1).get(fqn, "key"));
n = caches.get(0).get(fqn);
- assert n != null : "Should NOT be null";
- assert n.getKeys().isEmpty() : "but should be empty";
-
- // clean up.
- caches.get(0).remove(fqn);
- caches.get(1).remove(fqn);
- caches.get(0).stop();
- caches.get(1).stop();
+ assertHasBeenInvalidated(n, "Should have been invalidated");
}
protected CacheImpl<Object, Object> createUnstartedCache(boolean optimistic) throws Exception
@@ -852,4 +736,17 @@
Element element = XmlHelper.stringToElement(xml);
return XmlConfigurationParser.parseCacheLoaderConfig(element);
}
+
+ protected void assertHasBeenInvalidated(Node n, String message)
+ {
+ // depending on how n was retrieved!
+ if (n == null)
+ {
+ assert true : message;
+ }
+ else
+ {
+ assert !n.isValid() : message;
+ }
+ }
}
Modified: core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java 2007-10-22 23:08:18 UTC (rev 4663)
+++ core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java 2007-10-22 23:57:26 UTC (rev 4664)
@@ -411,6 +411,7 @@
}
}
+ ci.stop();
ci.destroy();
}
}
17 years, 2 months
JBoss Cache SVN: r4663 - in core/trunk/src: main/java/org/jboss/cache/eviction and 6 other directories.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2007-10-22 19:08:18 -0400 (Mon, 22 Oct 2007)
New Revision: 4663
Added:
core/trunk/src/main/java/org/jboss/cache/NodeNotValidException.java
core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/
core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedOptNodeValidityTest.java
core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedPessNodeValidityTest.java
core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalOptNodeValidityTest.java
core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalPessNodeValidityTest.java
core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/NodeValidityTestBase.java
core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedOptNodeValidityTest.java
core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedPessNodeValidityTest.java
core/trunk/src/test/java/org/jboss/cache/invalidation/TombstoneEvictionTest.java
Modified:
core/trunk/src/main/java/org/jboss/cache/CacheImpl.java
core/trunk/src/main/java/org/jboss/cache/CacheSPI.java
core/trunk/src/main/java/org/jboss/cache/Node.java
core/trunk/src/main/java/org/jboss/cache/NodeSPI.java
core/trunk/src/main/java/org/jboss/cache/Region.java
core/trunk/src/main/java/org/jboss/cache/RegionImpl.java
core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java
core/trunk/src/main/java/org/jboss/cache/eviction/BaseEvictionAlgorithm.java
core/trunk/src/main/java/org/jboss/cache/interceptors/CacheLoaderInterceptor.java
core/trunk/src/main/java/org/jboss/cache/interceptors/CacheStoreInterceptor.java
core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java
core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticCreateIfNotExistsInterceptor.java
core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java
core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java
core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java
core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionPersistenceTest.java
core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionTransferTest.java
Log:
JBCACHE-1188 - implemented Node.isValid() and tombstones for invalidations using optimistic locking
Modified: core/trunk/src/main/java/org/jboss/cache/CacheImpl.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/CacheImpl.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/CacheImpl.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -1389,13 +1389,13 @@
return n != null;
}
- /**
- * Gets node without attempt to load it from CacheLoader if not present
- *
- * @param fqn
- */
public NodeSPI<K, V> peek(Fqn<?> fqn, boolean includeDeletedNodes)
{
+ return peek(fqn, includeDeletedNodes, false);
+ }
+
+ public NodeSPI<K, V> peek(Fqn<?> fqn, boolean includeDeletedNodes, boolean includeInvalidNodes)
+ {
if (fqn == null || fqn.size() == 0) return root;
NodeSPI<K, V> n = root;
int fqnSize = fqn.size();
@@ -1411,6 +1411,10 @@
{
return null;
}
+ else if (!includeInvalidNodes && !n.isValid())
+ {
+ return null;
+ }
}
return n;
}
@@ -2401,7 +2405,7 @@
}
// Find the node. This will add the temporarily created parent nodes to the TX's node list if tx != null)
- n = findNode(fqn, version);
+ n = findNode(fqn, version, true);
if (n == null)
{
if (log.isTraceEnabled())
@@ -2430,11 +2434,11 @@
if (eviction || configuration.isNodeLockingOptimistic())
{
// if there is no parent node and the fqn is root, found == true otherwise found == false.
- found = parent_node == null ? fqn.isRoot() : parent_node.removeChildDirect(n.getFqn().getLastElement());
+ found = n.isValid() && parent_node == null ? fqn.isRoot() : parent_node.removeChildDirect(n.getFqn().getLastElement());
}
else
{
- found = !n.isDeleted();
+ found = n.isValid() && !n.isDeleted();
n.markAsDeleted(true);
}
@@ -2448,7 +2452,7 @@
// create a compensating method call (reverting the effect of
// this modification) and put it into the TX's undo list.
- if (tx != null && create_undo_ops && !eviction)
+ if (tx != null && create_undo_ops && !eviction && found)
{
undo_op = MethodCallFactory.create(MethodDeclarations.addChildMethodLocal, tx, parent_node.getFqn(), n.getFqn().getLastElement(), n, false);
@@ -2661,8 +2665,9 @@
*/
public boolean _evict(Fqn fqn) throws CacheException
{
- if (!exists(fqn))
- return true;// node does not exist. Maybe it has been recursively removed.
+ if (peek(fqn, false, true) == null) return true;
+ // node does not exist. Maybe it has been recursively removed.
+
// use remove method now if there is a child node. Otherwise, it is removed
boolean create_undo_ops = false;
boolean sendNodeEvent = false;
@@ -2724,20 +2729,25 @@
* <p/>
* Finally, the data version of the in-memory node is updated to the version being evicted to prevent versions
* going out of sync.
- *
- * @param fqn
- * @param versionToInvalidate
*/
public void invalidate(Fqn fqn, DataVersion versionToInvalidate)
{
- Node node = get(fqn); // force interceptor chain, load if necessary from cache loader.
+ Node<K, V> node = get(fqn); // force interceptor chain, load if necessary from cache loader.
if (node != null)
{
- _removeData(null, fqn, false, false, true, versionToInvalidate);
+ // mark the node to be removed (and all children) as invalid so anyone holding a direct reference to it will
+ // be aware that it is no longer valid.
+ ((NodeSPI) node).setValid(false, true);
+
+ if (configuration.isNodeLockingOptimistic())
+ _removeData(null, fqn, false, false, true, versionToInvalidate);
+ else
+ _evict(fqn);
+
if (versionToInvalidate != null)
{
- NodeSPI n = peek(fqn, false);
+ NodeSPI n = peek(fqn, false, true);
n.setVersion(versionToInvalidate);
}
}
@@ -2745,15 +2755,24 @@
{
// if pessimistic locking, just return.
if (!configuration.isNodeLockingOptimistic()) return;
- // create the node we need.
- Map<K, V> m = Collections.emptyMap();
- InvocationContext ic = getInvocationContext();
- boolean origCacheModeLocal = ic.getOptionOverrides().isCacheModeLocal();
- ic.getOptionOverrides().setCacheModeLocal(true);
- put(fqn, m);
- ic.getOptionOverrides().setCacheModeLocal(origCacheModeLocal);
- NodeSPI nodeSPI = (NodeSPI) root.getChild(fqn);
+
+ // check if a tombstone already exists
+ NodeSPI nodeSPI = peek(fqn, false, true);
+ if (nodeSPI == null)
+ {
+ // create the node we need.
+ Map<K, V> m = Collections.emptyMap();
+ InvocationContext ic = getInvocationContext();
+ boolean origCacheModeLocal = ic.getOptionOverrides().isCacheModeLocal();
+ ic.getOptionOverrides().setCacheModeLocal(true);
+ put(fqn, m);
+ ic.getOptionOverrides().setCacheModeLocal(origCacheModeLocal);
+ nodeSPI = (NodeSPI) root.getChild(fqn);
+ }
nodeSPI.setVersion(versionToInvalidate);
+ // mark the node to be removed (and all children) as invalid so anyone holding a direct reference to it will
+ // be aware that it is no longer valid.
+ nodeSPI.setValid(false, true);
}
}
@@ -2803,6 +2822,11 @@
childNode.markAsDeleted(false, true);
+ // tricky stuff here - this does look kludgy since we're recursively re-validating nodes
+ // potentially mistakenly revalidating tombstones, but this method would only be called
+ // when using pess locking and tombstones don't exist with PL, so this is OK.
+ childNode.setValid(true, true);
+
if (gtx != null && undoOps)
{
// 1. put undo-op in TX' undo-operations list (needed to rollback TX)
@@ -4056,6 +4080,10 @@
return false;
}
+ // mark the node to be removed (and all children) as invalid so anyone holding a direct reference to it will
+ // be aware that it is no longer valid.
+ n.setValid(false, true);
+
if (log.isTraceEnabled()) log.trace("Performing a real remove for node " + f + ", marked for removal.");
if (skipMarkerCheck || n.isDeleted())
{
@@ -4080,15 +4108,20 @@
}
/**
- * Finds a node given a fully qualified name and DataVersion.
+ * Finds a node given a fully qualified name and DataVersion. Does not include invalid nodes.
*/
- private NodeSPI<K, V> findNode(Fqn fqn, DataVersion version) throws CacheException
+ private NodeSPI<K, V> findNode(Fqn fqn, DataVersion version)
{
+ return findNode(fqn, version, false);
+ }
+
+ private NodeSPI<K, V> findNode(Fqn fqn, DataVersion version, boolean includeInvalidNodes)
+ {
if (fqn == null) return null;
- NodeSPI<K, V> toReturn = peek(fqn, false);
+ NodeSPI<K, V> toReturn = peek(fqn, false, includeInvalidNodes);
- if (version != null && configuration.isNodeLockingOptimistic())
+ if (toReturn != null && version != null && configuration.isNodeLockingOptimistic())
{
// we need to check the version of the data node...
DataVersion nodeVersion = toReturn.getVersion();
Modified: core/trunk/src/main/java/org/jboss/cache/CacheSPI.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/CacheSPI.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/CacheSPI.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -152,7 +152,7 @@
Notifier getNotifier();
/**
- * Returns a node without accessing the interceptor chain.
+ * Returns a node without accessing the interceptor chain. Does not return any nodes marked as invalid.
*
* @param fqn the Fqn to look up.
* @param includeDeletedNodes if you intend to see nodes marked as deleted within the current tx, set this to true
@@ -161,6 +161,16 @@
NodeSPI<K, V> peek(Fqn<?> fqn, boolean includeDeletedNodes);
/**
+ * Returns a node without accessing the interceptor chain, optionally returning nodes that are marked as invalid ({@link org.jboss.cache.Node#isValid()} == false).
+ *
+ * @param fqn the Fqn to look up.
+ * @param includeDeletedNodes if you intend to see nodes marked as deleted within the current tx, set this to true
+ * @param includeInvalidNodes if true, nodes marked as being invalid are also returned.
+ * @return a node if one exists or null
+ */
+ NodeSPI<K, V> peek(Fqn<?> fqn, boolean includeDeletedNodes, boolean includeInvalidNodes);
+
+ /**
* Used with buddy replication's data gravitation interceptor. If marshalling is necessary, ensure that the cache is
* configured to use {@link org.jboss.cache.config.Configuration#useRegionBasedMarshalling} and the {@link org.jboss.cache.Region}
* pertaining to the Fqn passed in is activated, and has an appropriate ClassLoader.
Modified: core/trunk/src/main/java/org/jboss/cache/Node.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/Node.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/Node.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -270,8 +270,9 @@
boolean hasChild(Object o);
/**
- * Tests if a node reference is still valid. A node reference may become invalid if it has been evicted, for example,
- * in which case it should be looked up again from the cache.
+ * Tests if a node reference is still valid. A node reference may become invalid if it has been removed, invalidated
+ * or moved, either locally or remotely. If a node is invalid, it should be fetched again from the cache or a valid
+ * parent node. Operations on invalid nodes will throw a {@link org.jboss.cache.NodeNotValidException}.
*
* @return true if the node is valid.
*/
Added: core/trunk/src/main/java/org/jboss/cache/NodeNotValidException.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/NodeNotValidException.java (rev 0)
+++ core/trunk/src/main/java/org/jboss/cache/NodeNotValidException.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,30 @@
+package org.jboss.cache;
+
+/**
+ * Thrown whenever operations are attempted on a node that is no longer valid. See {@link org.jboss.cache.Node#isValid()}
+ * for details.
+ *
+ * @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+public class NodeNotValidException extends CacheException
+{
+ public NodeNotValidException()
+ {
+ }
+
+ public NodeNotValidException(Throwable cause)
+ {
+ super(cause);
+ }
+
+ public NodeNotValidException(String msg)
+ {
+ super(msg);
+ }
+
+ public NodeNotValidException(String msg, Throwable cause)
+ {
+ super(msg, cause);
+ }
+}
Modified: core/trunk/src/main/java/org/jboss/cache/NodeSPI.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/NodeSPI.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/NodeSPI.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -459,4 +459,13 @@
* @since 2.1.0
*/
void setInternalState(Map state);
+
+ /**
+ * Sets the validity of a node. By default, all nodes are valid unless they are deleted, invalidated or moved, either
+ * locally or remotely. To be used in conjunction with {@link #isValid()}.
+ * @param valid if true, the node is marked as valid; if false, the node is invalid.
+ * @param recursive if true, the validity flag passed in is applied to all children as well.
+ * @since 2.1.0
+ */
+ void setValid(boolean valid, boolean recursive);
}
Modified: core/trunk/src/main/java/org/jboss/cache/Region.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/Region.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/Region.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -6,6 +6,7 @@
*/
package org.jboss.cache;
+import org.jboss.cache.config.Configuration;
import org.jboss.cache.config.EvictionPolicyConfig;
import org.jboss.cache.config.EvictionRegionConfig;
import org.jboss.cache.eviction.EvictedEventNode;
@@ -50,6 +51,12 @@
void registerContextClassLoader(ClassLoader classLoader);
/**
+ * @return the cache-wide configuration
+ * @since 2.1.0
+ */
+ Configuration getCacheConfiguration();
+
+ /**
* Unregisters a registered {@link ClassLoader}s for this region.
*/
void unregisterContextClassLoader();
Modified: core/trunk/src/main/java/org/jboss/cache/RegionImpl.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/RegionImpl.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/RegionImpl.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -8,6 +8,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.jboss.cache.config.Configuration;
import org.jboss.cache.config.EvictionPolicyConfig;
import org.jboss.cache.config.EvictionRegionConfig;
import org.jboss.cache.eviction.EvictedEventNode;
@@ -58,6 +59,11 @@
createQueue();
}
+ public Configuration getCacheConfiguration()
+ {
+ return regionManager.getCache().getConfiguration();
+ }
+
public void registerContextClassLoader(ClassLoader classLoader)
{
this.classLoader = classLoader;
Modified: core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -69,6 +69,7 @@
private final Map<K, V> data = new HashMap<K, V>();
private boolean lockForChildInsertRemove;
+ private boolean valid = true;
/**
* Constructs a new node with an FQN of Root.
@@ -163,6 +164,7 @@
public V get(K key)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
return cache.get(getFqn(), key);
}
@@ -190,6 +192,7 @@
public Map<K, V> getData()
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
if (cache == null) return Collections.emptyMap();
return cache.getData(getFqn());
@@ -211,6 +214,7 @@
public V put(K key, V value)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
return cache.put(getFqn(), key, value);
}
@@ -283,6 +287,7 @@
public V remove(K key)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
return cache.remove(getFqn(), key);
}
@@ -305,6 +310,8 @@
{
StringBuffer sb = new StringBuffer();
sb.append(getClass().getSimpleName());
+ if (!valid) sb.append(" (INVALID!) ");
+
if (deleted)
{
sb.append(" (deleted) [ ").append(fqn);
@@ -341,6 +348,7 @@
public Node<K, V> addChild(Fqn f)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
Fqn nf = new Fqn(getFqn(), f);
cache.put(nf, null);
return getChild(f);
@@ -375,6 +383,7 @@
public void clearData()
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
cache.removeData(getFqn());
}
@@ -385,6 +394,7 @@
public Node<K, V> getChild(Fqn fqn)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
return cache.get(new Fqn(getFqn(), fqn));
}
@@ -409,6 +419,7 @@
public Set<Object> getChildrenNames()
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
return cache.getChildrenNames(getFqn());
}
@@ -419,6 +430,7 @@
public Set<K> getKeys()
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
Set<K> keys = cache.getKeys(getFqn());
return keys == null ? Collections.<K>emptySet() : Collections.<K>unmodifiableSet(keys);
}
@@ -434,16 +446,19 @@
public boolean hasChild(Fqn f)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
return getChild(f) != null;
}
public boolean hasChild(Object o)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
return getChild(o) != null;
}
public V putIfAbsent(K k, V v)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
// make sure this is atomic. Not hugely performant at the moment (should use the locking interceptors) but for now ...
synchronized (this)
{
@@ -456,6 +471,7 @@
public V replace(K key, V value)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
// make sure this is atomic. Not hugely performant at the moment (should use the locking interceptors) but for now ...
synchronized (this)
{
@@ -470,6 +486,7 @@
public boolean replace(K key, V oldValue, V newValue)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
// make sure this is atomic. Not hugely performant at the moment (should use the locking interceptors) but for now ...
synchronized (this)
{
@@ -485,16 +502,19 @@
public boolean removeChild(Fqn fqn)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
return cache.removeNode(new Fqn(getFqn(), fqn));
}
public int dataSize()
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
return cache.getKeys(getFqn()).size();
}
public boolean removeChild(Object childName)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
return cache.removeNode(new Fqn(getFqn(), childName));
}
@@ -529,11 +549,13 @@
public void putAll(Map<K, V> data)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
cache.put(fqn, data);
}
public void replaceAll(Map<K, V> data)
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
cache.put(fqn, data, true);
}
@@ -648,6 +670,7 @@
public Set<Node<K, V>> getChildren()
{
+ if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid. Perhaps it has been moved or removed.");
if (cache == null) return Collections.emptySet();
Set<Node<K, V>> children = new HashSet<Node<K, V>>();
for (Object c : cache.getChildrenNames(getFqn()))
@@ -738,10 +761,22 @@
public boolean isValid()
{
- // TODO; implement this property, to detect if it has been evicted, removed by another thread, etc. Method added for now as a dummy so it exists in the API
- return true;
+ return valid;
}
+ public void setValid(boolean valid, boolean recursive)
+ {
+ this.valid = valid;
+ if (log.isTraceEnabled()) log.trace("Marking node " + getFqn() + " as " + (valid ? "" : "in") + "valid");
+ if (recursive)
+ {
+ for (Node child: children().values())
+ {
+ ((UnversionedNode) child).setValid(valid, recursive);
+ }
+ }
+ }
+
public boolean isLockForChildInsertRemove()
{
return lockForChildInsertRemove;
Modified: core/trunk/src/main/java/org/jboss/cache/eviction/BaseEvictionAlgorithm.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/eviction/BaseEvictionAlgorithm.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/eviction/BaseEvictionAlgorithm.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -10,6 +10,7 @@
import org.apache.commons.logging.LogFactory;
import org.jboss.cache.Fqn;
import org.jboss.cache.Region;
+import org.jboss.cache.config.Configuration;
import org.jboss.cache.lock.TimeoutException;
import java.util.concurrent.BlockingQueue;
@@ -44,6 +45,8 @@
*/
protected EvictionQueue evictionQueue;
+ protected boolean allowTombstones = false;
+
/**
* This method will create an EvictionQueue implementation and prepare it for use.
*
@@ -74,6 +77,10 @@
this.region = region;
evictionQueue = setupEvictionQueue(region);
log.debug("initialized: " + this);
+ // hacky temp solution till we have an ioc fwk to inject configuration elements as needed
+ Configuration.CacheMode cm = region.getCacheConfiguration().getCacheMode();
+ allowTombstones = region.getCacheConfiguration().isNodeLockingOptimistic() &&
+ (cm == Configuration.CacheMode.INVALIDATION_ASYNC || cm == Configuration.CacheMode.INVALIDATION_SYNC);
}
/**
@@ -340,7 +347,17 @@
NodeEntry ne = evictionQueue.getNodeEntry(fqn);
if (ne != null)
{
- evictionQueue.removeNodeEntry(ne);
+ if (allowTombstones)
+ {
+ // don't remove from the queue - deleting a node results in a tombstone which means the nodes
+ // still need to be considered for eviction!
+ return;
+ }
+ else
+ {
+ // a removeNode operation will simply remove the node. Nothing to worry about.
+ evictionQueue.removeNodeEntry(ne);
+ }
}
else
{
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/CacheLoaderInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/CacheLoaderInterceptor.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/CacheLoaderInterceptor.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -5,6 +5,7 @@
import org.jboss.cache.Fqn;
import org.jboss.cache.InvocationContext;
import org.jboss.cache.NodeSPI;
+import static org.jboss.cache.config.Configuration.CacheMode;
import org.jboss.cache.loader.CacheLoader;
import org.jboss.cache.lock.NodeLock;
import org.jboss.cache.marshall.MethodCall;
@@ -34,6 +35,7 @@
private TransactionTable txTable = null;
protected boolean isActivation = false;
protected CacheLoader loader;
+ protected boolean usingOptimisticInvalidation = false;
/**
* True if CacheStoreInterceptor is in place.
@@ -47,6 +49,9 @@
super.setCache(cache);
txTable = cache.getTransactionTable();
this.loader = cache.getCacheLoaderManager().getCacheLoader();
+ CacheMode mode = cache.getConfiguration().getCacheMode();
+ usingOptimisticInvalidation = cache.getConfiguration().isNodeLockingOptimistic() &&
+ ((mode == CacheMode.INVALIDATION_ASYNC) || (mode == CacheMode.INVALIDATION_SYNC));
}
/**
@@ -117,6 +122,10 @@
acquireLock = true;
break;
case MethodDeclarations.getNodeMethodLocal_id:
+ bypassLoadingData = !usingOptimisticInvalidation;
+ fqn = (Fqn) args[0];
+ acquireLock = true;
+ break;
case MethodDeclarations.getChildrenNamesMethodLocal_id:
bypassLoadingData = true;
case MethodDeclarations.releaseAllLocksMethodLocal_id:
@@ -134,6 +143,12 @@
// clean up nodesCreated map
cleanupNodesCreated(entry);
break;
+ case MethodDeclarations.removeNodeMethodLocal_id:
+ if (cache.getConfiguration().isNodeLockingOptimistic())
+ {
+ fqn = (Fqn) args[1];
+ }
+ break;
default:
if (!useCacheStore)
{
@@ -168,7 +183,7 @@
private void loadIfNeeded(InvocationContext ctx, Fqn fqn, Object key, boolean allKeys, boolean initNode, boolean acquireLock, MethodCall m, TransactionEntry entry, boolean recursive, boolean isMove, boolean bypassLoadingData) throws Throwable
{
- NodeSPI n = cache.peek(fqn, true);
+ NodeSPI n = cache.peek(fqn, true, true);
boolean mustLoad = mustLoad(n, key, allKeys);
if (log.isTraceEnabled())
@@ -299,6 +314,15 @@
log.trace("must load, node null");
return true;
}
+
+ // check this first!!!
+ if (!n.isValid() && cache.getConfiguration().isNodeLockingOptimistic())
+ {
+ // attempt to load again; this only happens if we have tombstones lying around, or we are using invalidation.
+ log.trace("loading again from cache loader since in-memory node is marked as invalid");
+ return true;
+ }
+
// JBCACHE-1172 Skip single-key optimization if request needs all keys
if (!allKeys)
{
@@ -429,6 +453,9 @@
// n.clearDataDirect();
n.setInternalState(nodeData);
+ // set this node as valid?
+ if (usingOptimisticInvalidation) n.setValid(true, false);
+
cache.getNotifier().notifyNodeLoaded(fqn, false, nodeData, ctx);
if (isActivation)
{
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/CacheStoreInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/CacheStoreInterceptor.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/CacheStoreInterceptor.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -242,9 +242,13 @@
{
for (Fqn f : affectedFqns)
{
- NodeSPI n = cache.peek(f, false);
- Map internalState = n.getInternalState(true);
- loader.put(f, internalState);
+ // NOT going to store tombstones!!
+ NodeSPI n = cache.peek(f, false, false);
+ if (n != null)
+ {
+ Map internalState = n.getInternalState(true);
+ loader.put(f, internalState);
+ }
}
}
}
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -14,9 +14,9 @@
import org.jboss.cache.marshall.MethodCallFactory;
import org.jboss.cache.marshall.MethodDeclarations;
import org.jboss.cache.optimistic.DataVersion;
+import org.jboss.cache.optimistic.DefaultDataVersion;
import org.jboss.cache.optimistic.TransactionWorkspace;
import org.jboss.cache.optimistic.WorkspaceNode;
-import org.jboss.cache.optimistic.DefaultDataVersion;
import org.jboss.cache.transaction.GlobalTransaction;
import org.jboss.cache.transaction.OptimisticTransactionEntry;
import org.jboss.cache.transaction.TransactionEntry;
@@ -112,22 +112,24 @@
{
case MethodDeclarations.prepareMethod_id:
- log.debug("Entering InvalidationInterceptor's prepare phase");
- // fetch the modifications before the transaction is committed (and thus removed from the txTable)
- gtx = ctx.getGlobalTransaction();
- entry = txTable.get(gtx);
- if (entry == null) throw new IllegalStateException("cannot find transaction entry for " + gtx);
- modifications = new LinkedList<MethodCall>(entry.getModifications());
+ if (!optimistic)
+ {
+ log.debug("Entering InvalidationInterceptor's prepare phase");
+ // fetch the modifications before the transaction is committed (and thus removed from the txTable)
+ gtx = ctx.getGlobalTransaction();
+ entry = txTable.get(gtx);
+ if (entry == null) throw new IllegalStateException("cannot find transaction entry for " + gtx);
+ modifications = new LinkedList<MethodCall>(entry.getModifications());
- if (modifications.size() > 0)
- {
- broadcastInvalidate(modifications, gtx, tx, ctx);
+ if (modifications.size() > 0)
+ {
+ broadcastInvalidate(modifications, gtx, tx, ctx);
+ }
+ else
+ {
+ log.debug("Nothing to invalidate - no modifications in the transaction.");
+ }
}
- else
- {
- log.debug("Nothing to invalidate - no modifications in the transaction.");
- }
-
break;
case MethodDeclarations.optimisticPrepareMethod_id:
// here we just record the modifications but actually do the invalidate in commit.
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticCreateIfNotExistsInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticCreateIfNotExistsInterceptor.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticCreateIfNotExistsInterceptor.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -10,7 +10,6 @@
import org.jboss.cache.CacheSPI;
import org.jboss.cache.Fqn;
import org.jboss.cache.InvocationContext;
-import org.jboss.cache.Node;
import org.jboss.cache.NodeFactory;
import org.jboss.cache.NodeSPI;
import org.jboss.cache.marshall.MethodCall;
@@ -77,7 +76,7 @@
fqns.add(newParent);
// peek into Node and get a hold of all child fqns as these need to be in the workspace.
- NodeSPI node = cache.peek(nodeFqn, true);
+ NodeSPI node = cache.peek(nodeFqn, true, true);
greedyGetFqns(fqns, node, newParent);
@@ -222,6 +221,10 @@
log.trace("Child node " + currentNode.getFqn() + " doesn't exist in workspace or has been deleted. Adding to workspace in gtx " + gtx);
workspaceNode = nodeFactory.createWorkspaceNode(currentNode, workspace);
+
+ // if the underlying node is a tombstone then mark the workspace node as newly created
+ if (!currentNode.isValid()) workspaceNode.markAsCreated();
+
if (isTargetFqn && !workspace.isVersioningImplicit())
{
workspaceNode.setVersion(version);
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -63,12 +63,12 @@
GlobalTransaction gtx = getGlobalTransaction(ctx);
TransactionWorkspace workspace = getTransactionWorkspace(gtx);
Fqn fqn = getFqn(args, m.getMethodId());
- WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, true);
+ WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, true, true);
// in the case of a data gravitation cleanup, if the primary Fqn does not exist the backup one may.
if (workspaceNode == null && m.getMethodId() == MethodDeclarations.dataGravitationCleanupMethod_id)
{
- workspaceNode = fetchWorkspaceNode(getBackupFqn(args), workspace, true);
+ workspaceNode = fetchWorkspaceNode(getBackupFqn(args), workspace, true, true);
}
if (workspaceNode != null)
@@ -232,7 +232,7 @@
return;
}
- WorkspaceNode oldParent = fetchWorkspaceNode(nodeFqn.getParent(), ws, false);
+ WorkspaceNode oldParent = fetchWorkspaceNode(nodeFqn.getParent(), ws, false, true);
if (oldParent == null) throw new NodeNotExistsException("Node " + nodeFqn.getParent() + " does not exist!");
if (parentFqn.equals(oldParent.getFqn()))
@@ -241,7 +241,7 @@
return;
}
// retrieve parent
- WorkspaceNode parent = fetchWorkspaceNode(parentFqn, ws, false);
+ WorkspaceNode parent = fetchWorkspaceNode(parentFqn, ws, false, true);
if (parent == null) throw new NodeNotExistsException("Node " + parentFqn + " does not exist!");
Object nodeName = nodeFqn.getLastElement();
@@ -275,13 +275,13 @@
private void recursiveMoveNode(WorkspaceNode node, Fqn newBase, TransactionWorkspace ws)
{
Fqn newFqn = new Fqn(newBase, node.getFqn().getLastElement());
- WorkspaceNode movedNode = fetchWorkspaceNode(newFqn, ws, true);
+ WorkspaceNode movedNode = fetchWorkspaceNode(newFqn, ws, true, true);
movedNode.putAll(node.getData());
// process children
for (Object n : node.getChildrenNames())
{
- WorkspaceNode child = fetchWorkspaceNode(new Fqn(node.getFqn(), n), ws, false);
+ WorkspaceNode child = fetchWorkspaceNode(new Fqn(node.getFqn(), n), ws, false, true);
if (child != null) recursiveMoveNode(child, newFqn, ws);
}
}
@@ -323,7 +323,7 @@
if (workspaceNode == null) return false;
Fqn parentFqn = workspaceNode.getFqn().getParent();
- WorkspaceNode parentNode = fetchWorkspaceNode(parentFqn, workspace, true);
+ WorkspaceNode parentNode = fetchWorkspaceNode(parentFqn, workspace, true, true);
if (parentNode == null) throw new NodeNotExistsException("Unable to find parent node with fqn " + parentFqn);
// pre-notify
@@ -390,7 +390,7 @@
{
Fqn fqn = (Fqn) args[0];
Object key = args[1];
- WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false);
+ WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false, false);
if (workspaceNode == null)
{
@@ -412,7 +412,7 @@
{
Fqn fqn = (Fqn) args[0];
- WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false);
+ WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false, false);
if (workspaceNode == null)
{
@@ -437,7 +437,7 @@
{
Fqn fqn = (Fqn) args[0];
- WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false);
+ WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false, false);
if (workspaceNode == null)
{
@@ -458,7 +458,7 @@
{
Fqn fqn = (Fqn) args[0];
- WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false);
+ WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false, false);
if (workspaceNode == null)
{
@@ -479,7 +479,7 @@
{
Fqn fqn = (Fqn) args[0];
- WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false);
+ WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false, false);
if (workspaceNode == null)
{
@@ -513,15 +513,16 @@
* @param fqn Fqn of the node to retrieve
* @param workspace transaction workspace to look in
* @param undeleteIfNecessary if the node is in the workspace but marked as deleted, this meth
+ * @param includeInvalidNodes
* @return a node, if found, or null if not.
*/
- private WorkspaceNode fetchWorkspaceNode(Fqn fqn, TransactionWorkspace workspace, boolean undeleteIfNecessary)
+ private WorkspaceNode fetchWorkspaceNode(Fqn fqn, TransactionWorkspace workspace, boolean undeleteIfNecessary, boolean includeInvalidNodes)
{
WorkspaceNode workspaceNode = workspace.getNode(fqn);
// if we do not have the node then we need to add it to the workspace
if (workspaceNode == null)
{
- NodeSPI node = cache.peek(fqn, true);
+ NodeSPI node = cache.peek(fqn, true, includeInvalidNodes);
if (node == null) return null;
// create new workspace node based on the node from the underlying data structure
@@ -539,7 +540,7 @@
{
workspaceNode.markAsDeleted(false);
// re-add to parent
- WorkspaceNode parent = fetchWorkspaceNode(fqn.getParent(), workspace, true);
+ WorkspaceNode parent = fetchWorkspaceNode(fqn.getParent(), workspace, true, includeInvalidNodes);
parent.addChild(workspaceNode);
}
else
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -7,9 +7,11 @@
package org.jboss.cache.interceptors;
import org.jboss.cache.CacheException;
+import org.jboss.cache.CacheSPI;
import org.jboss.cache.Fqn;
import org.jboss.cache.InvocationContext;
import org.jboss.cache.NodeSPI;
+import static org.jboss.cache.config.Configuration.CacheMode;
import org.jboss.cache.marshall.MethodCall;
import org.jboss.cache.marshall.MethodDeclarations;
import org.jboss.cache.optimistic.DataVersioningException;
@@ -44,6 +46,15 @@
*/
public class OptimisticValidatorInterceptor extends OptimisticInterceptor
{
+ private boolean useTombstones;
+
+ public void setCache(CacheSPI cache)
+ {
+ super.setCache(cache);
+ CacheMode mode = cache.getConfiguration().getCacheMode();
+ useTombstones = (mode == CacheMode.INVALIDATION_ASYNC) || (mode == CacheMode.INVALIDATION_SYNC);
+ }
+
public Object invoke(InvocationContext ctx) throws Throwable
{
MethodCall m = ctx.getMethodCall();
@@ -93,7 +104,7 @@
if (trace) log.trace("Validating version for node [" + fqn + "]");
NodeSPI underlyingNode;
- underlyingNode = cache.peek(fqn, true);
+ underlyingNode = cache.peek(fqn, true, true);
// if this is a newly created node then we expect the underlying node to be null.
// also, if the node has been deleted in the WS and the underlying node is null, this *may* be ok ... will test again later when comparing versions
@@ -104,11 +115,22 @@
}
// needs to have been created AND modified - we allow concurrent creation if no data is put into the node
- if (underlyingNode != null && workspaceNode.isCreated() && workspaceNode.isModified())
+ if (underlyingNode != null && underlyingNode.isValid() && workspaceNode.isCreated() && workspaceNode.isModified())
{
throw new DataVersioningException("Transaction attempted to create " + fqn + " anew. It has already been created since this transaction started, by another (possibly remote) transaction. We have a concurrent creation event.");
}
+ if (underlyingNode != null && !underlyingNode.isValid())
+ {
+ // we havea tombstone
+ if (!workspaceNode.isCreated()) throw new DataVersioningException("Underlying node doesn't exist but a tombstone does; workspace node should be marked as created!");
+ if (underlyingNode.getVersion().newerThan(workspaceNode.getVersion()))
+ {
+ // we have an out of date node here
+ throw new DataVersioningException("Version mismatch for node " + fqn + ": underlying node with version " + workspaceNode.getNode().getVersion() + " is newer than workspace node, with version " + workspaceNode.getVersion());
+ }
+ }
+
if (!workspaceNode.isCreated() && (workspaceNode.isDeleted() || workspaceNode.isModified()))
{
// if the real node is null, throw a DVE
@@ -170,13 +192,22 @@
}
else
{
- NodeSPI parent = underlyingNode.getParent();
- if (parent == null)
+ // mark it as invalid so any direct references are marked as such
+ underlyingNode.setValid(false, true);
+ // we need to update versions here, too
+ performVersionUpdate(underlyingNode, workspaceNode);
+
+ if (!useTombstones)
{
- throw new CacheException("Underlying node " + underlyingNode + " has no parent");
+ // don't retain the tombstone
+ NodeSPI parent = underlyingNode.getParent();
+ if (parent == null)
+ {
+ throw new CacheException("Underlying node " + underlyingNode + " has no parent");
+ }
+
+ parent.removeChildDirect(underlyingNode.getFqn().getLastElement());
}
-
- parent.removeChildDirect(underlyingNode.getFqn().getLastElement());
}
}
else
@@ -197,7 +228,14 @@
for (Fqn child : deltas.get(1))
{
- underlyingNode.removeChildDirect(child.getLastElement());
+ // mark it as invalid so any direct references are marked as such
+ underlyingNode.getChildDirect(child.getLastElement()).setValid(false, true);
+
+ if (!useTombstones)
+ {
+ // don't retain the tombstone
+ underlyingNode.removeChildDirect(child.getLastElement());
+ }
}
updateVersion = underlyingNode.isLockForChildInsertRemove();
@@ -211,35 +249,36 @@
Map mergedData = workspaceNode.getMergedData();
underlyingNode.clearDataDirect();
underlyingNode.putAllDirect(mergedData);
+ underlyingNode.setValid(true, false);
updateVersion = true;
}
if (updateVersion)
{
- if (workspaceNode.isVersioningImplicit())
- {
- if (trace) log.trace("Versioning is implicit; incrementing.");
- underlyingNode.setVersion(((DefaultDataVersion) workspaceNode.getVersion()).increment());
- }
- else
- {
- if (trace) log.trace("Versioning is explicit; not attempting an increment.");
- underlyingNode.setVersion(workspaceNode.getVersion());
- }
-
- if (trace)
- log.trace("Setting version of node " + underlyingNode.getFqn() + " from " + workspaceNode.getVersion() + " to " + underlyingNode.getVersion());
+ performVersionUpdate(underlyingNode, workspaceNode);
}
- else
- {
- if (trace)
- log.trace("Version update on " + workspaceNode.getFqn() + " not necessary since the node is not dirty or LockParentForChildInsertRemove is set to false");
- }
}
}
}
+ private void performVersionUpdate(NodeSPI underlyingNode, WorkspaceNode workspaceNode)
+ {
+ if (workspaceNode.isVersioningImplicit())
+ {
+ if (trace) log.trace("Versioning is implicit; incrementing.");
+ underlyingNode.setVersion(((DefaultDataVersion) workspaceNode.getVersion()).increment());
+ }
+ else
+ {
+ if (trace) log.trace("Versioning is explicit; not attempting an increment.");
+ underlyingNode.setVersion(workspaceNode.getVersion());
+ }
+
+ if (trace)
+ log.trace("Setting version of node " + underlyingNode.getFqn() + " from " + workspaceNode.getVersion() + " to " + underlyingNode.getVersion());
+ }
+
private void rollBack(GlobalTransaction gtx)
{
TransactionWorkspace workspace;
Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedOptNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedOptNodeValidityTest.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedOptNodeValidityTest.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,99 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.jboss.cache.CacheImpl;
+import org.jboss.cache.NodeSPI;
+import org.jboss.cache.optimistic.DefaultDataVersion;
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+@Test(groups = {"functional"})
+public class InvalidatedOptNodeValidityTest extends InvalidatedPessNodeValidityTest
+{
+ public InvalidatedOptNodeValidityTest()
+ {
+ optimistic = true;
+ }
+
+ public void testTombstoneRevival()
+ {
+ modifier.put(parent, K, V);
+ modifier.removeNode(parent);
+
+ NodeSPI observerNode = (NodeSPI) observer.getRoot().getChild(parent);
+ assert observerNode == null : "Should be removed";
+
+ // now try a put on a with a newer data version; should work
+ modifier.getInvocationContext().getOptionOverrides().setDataVersion(new DefaultDataVersion(10));
+ modifier.put(parent, K, V);
+
+ NodeSPI modifierNode = (NodeSPI) modifier.getRoot().getChild(parent);
+ assert modifierNode != null : "Should not be null";
+ assert modifierNode.isValid() : "No longer a tombstone";
+ assert ((DefaultDataVersion) modifierNode.getVersion()).getRawVersion() == 10 : "Version should be updated";
+
+ observerNode = (NodeSPI) observer.getRoot().getChild(parent);
+ assert observerNode != null : "Should not be null";
+ assert observerNode.isValid() : "No longer a tombstone";
+ assert ((DefaultDataVersion) observerNode.getVersion()).getRawVersion() == 10 : "Version should be updated";
+ }
+
+ public void testTombstoneVersioningFailure() throws Exception
+ {
+ CacheImpl modifierImpl = (CacheImpl) modifier;
+ CacheImpl observerImpl = (CacheImpl) observer;
+
+ modifier.put(parent, K, V);
+
+ // test that this exists in the (shared) loader
+ assert loader.get(parent) != null;
+ assert loader.get(parent).size() > 0;
+
+ modifier.removeNode(parent);
+
+ // assert that tombstones exist on both instances
+ assert modifierImpl.peek(parent, true, true) != null;
+ assert observerImpl.peek(parent, true, true) != null;
+ assert modifierImpl.peek(parent, false, false) == null;
+ assert observerImpl.peek(parent, false, false) == null;
+
+ // make sure this does not exist in the loader; since it HAS been removed
+ assert loader.get(parent) == null;
+
+ NodeSPI observerNode = (NodeSPI) observer.getRoot().getChild(parent);
+ assert observerNode == null : "Should be removed";
+
+ // now try a put on a with a newer data version; should work
+ modifier.getInvocationContext().getOptionOverrides().setDataVersion(new DefaultDataVersion(1));
+ try
+ {
+ modifier.put(parent, K, V);
+ assert false : "Should have barfed!";
+ }
+ catch (RuntimeException expected)
+ {
+
+ }
+
+ NodeSPI modifierNode = (NodeSPI) modifier.getRoot().getChild(parent);
+ assert modifierNode == null : "Should be null";
+
+ observerNode = (NodeSPI) observer.getRoot().getChild(parent);
+ assert observerNode == null : "Should be null";
+
+ NodeSPI modifierTombstone = modifierImpl.peek(parent, true, true);
+ NodeSPI observerTombstone = observerImpl.peek(parent, true, true);
+
+ assert modifierTombstone != null : "Tombstone should still exist";
+ assert observerTombstone != null : "Tombstone should still exist";
+
+ assert !modifierTombstone.isValid() : "Should not be valid";
+ assert !observerTombstone.isValid() : "Should not be valid";
+
+ assert ((DefaultDataVersion) modifierTombstone.getVersion()).getRawVersion() == 2 : "Should retain versioning";
+ assert ((DefaultDataVersion) observerTombstone.getVersion()).getRawVersion() == 2 : "Should retain versioning";
+ }
+}
\ No newline at end of file
Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedPessNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedPessNodeValidityTest.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedPessNodeValidityTest.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,65 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.jboss.cache.Cache;
+import org.jboss.cache.CacheFactory;
+import org.jboss.cache.CacheImpl;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.config.CacheLoaderConfig;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.loader.DummyInMemoryCacheLoader;
+import org.jboss.cache.loader.DummySharedInMemoryCacheLoader;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+@Test(groups = {"functional"})
+public class InvalidatedPessNodeValidityTest extends NodeValidityTestBase
+{
+ protected DummyInMemoryCacheLoader loader;
+
+ public InvalidatedPessNodeValidityTest()
+ {
+ invalidation = true;
+ }
+
+ protected Cache<String, String> createObserver()
+ {
+ return newCache();
+ }
+
+ protected Cache<String, String> createModifier()
+ {
+ return newCache();
+ }
+
+ @AfterMethod
+ public void emptyCacheLoader()
+ {
+ if (loader != null) loader.wipe();
+ }
+
+ protected Cache<String, String> newCache()
+ {
+ CacheFactory<String, String> f = DefaultCacheFactory.getInstance();
+ Cache<String, String> cache = f.createCache(false);
+ cache.getConfiguration().setCacheMode(Configuration.CacheMode.INVALIDATION_SYNC);
+ optimisticConfiguration(cache.getConfiguration());
+
+ // need a cache loader as a shared data source between the 2 instances
+ CacheLoaderConfig.IndividualCacheLoaderConfig iclc = new CacheLoaderConfig.IndividualCacheLoaderConfig();
+ iclc.setClassName(DummySharedInMemoryCacheLoader.class.getName());
+ CacheLoaderConfig clc = new CacheLoaderConfig();
+ clc.addIndividualCacheLoaderConfig(iclc);
+ cache.getConfiguration().setCacheLoaderConfig(clc);
+
+ cache.start();
+
+ loader = (DummyInMemoryCacheLoader) ((CacheImpl) cache).getCacheLoader();
+
+ return cache;
+ }
+}
Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalOptNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalOptNodeValidityTest.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalOptNodeValidityTest.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,17 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+@Test(groups = {"functional"})
+public class LocalOptNodeValidityTest extends LocalPessNodeValidityTest
+{
+ public LocalOptNodeValidityTest()
+ {
+ optimistic = true;
+ }
+}
\ No newline at end of file
Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalPessNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalPessNodeValidityTest.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalPessNodeValidityTest.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,50 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.jboss.cache.Cache;
+import org.jboss.cache.CacheFactory;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.misc.TestingUtil;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+@Test(groups = {"functional"})
+public class LocalPessNodeValidityTest extends NodeValidityTestBase
+{
+ private Cache<String, String> cache;
+
+ public LocalPessNodeValidityTest()
+ {
+ clustered = false;
+ }
+
+ @AfterMethod
+ public void tearDown()
+ {
+ super.tearDown();
+ TestingUtil.killCaches(cache);
+ cache = null;
+ }
+
+ protected Cache<String, String> createObserver()
+ {
+ return createModifier();
+ }
+
+ protected Cache<String, String> createModifier()
+ {
+ if (cache == null)
+ {
+ CacheFactory<String, String> f = DefaultCacheFactory.getInstance();
+ cache = f.createCache(false);
+ optimisticConfiguration(cache.getConfiguration());
+ cache.start();
+ return cache;
+ }
+ return cache;
+ }
+}
Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/NodeValidityTestBase.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/NodeValidityTestBase.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/NodeValidityTestBase.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,366 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.jboss.cache.Cache;
+import org.jboss.cache.CacheImpl;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.Node;
+import org.jboss.cache.NodeNotValidException;
+import org.jboss.cache.NodeSPI;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.misc.TestingUtil;
+import org.jboss.cache.optimistic.DefaultDataVersion;
+import org.jboss.cache.transaction.DummyTransactionManagerLookup;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import java.util.Collections;
+
+/**
+ * exercises the isValid() api call on node.
+ *
+ * @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+@Test(groups = {"functional"})
+public abstract class NodeValidityTestBase
+{
+ protected boolean optimistic;
+
+ // needed to attach a blockUntilViewsReceived in setup
+ protected boolean clustered = true;
+
+ // needed to test tombstones
+ protected boolean invalidation = false;
+
+ protected Cache<String, String> observer;
+ protected Cache<String, String> modifier;
+ protected Fqn parent = Fqn.fromString("/parent");
+ protected Fqn child = Fqn.fromString("/parent/child");
+ protected String K="k", V="v";
+
+ protected abstract Cache<String, String> createObserver();
+ protected abstract Cache<String, String> createModifier();
+
+ protected void optimisticConfiguration(Configuration c)
+ {
+ if (optimistic)
+ {
+ c.setNodeLockingScheme(Configuration.NodeLockingScheme.OPTIMISTIC);
+ c.setTransactionManagerLookupClass(DummyTransactionManagerLookup.class.getName());
+ c.setSyncCommitPhase(true);
+ c.setSyncRollbackPhase(true);
+ }
+ }
+
+ @BeforeMethod
+ public void setUp()
+ {
+ observer = createObserver();
+ modifier = createModifier();
+ if (clustered) TestingUtil.blockUntilViewsReceived(60000, observer, modifier);
+ }
+
+ @AfterMethod
+ public void tearDown()
+ {
+ TestingUtil.killCaches(observer, modifier);
+ observer = null;
+ modifier = null;
+ }
+
+ public void testRemoval()
+ {
+// observer.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+ observer.put(parent, K, V);
+
+ Node<String, String> obsNode = observer.getRoot().getChild(parent);
+
+ assert obsNode.get(K).equals(V) : "Data should be in the node.";
+ assert obsNode.isValid() : "Node should be valid";
+
+ modifier.removeNode(parent);
+
+ assert !obsNode.isValid() : "Should no longer be valid";
+ }
+
+ public void testRemovalWithChildren()
+ {
+// observer.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+ observer.put(child, K, V);
+
+ Node<String, String> obsParentNode = observer.getRoot().getChild(parent);
+ Node<String, String> obsChildNode = observer.getRoot().getChild(child);
+
+ assert obsChildNode.get(K).equals(V) : "Data should be in the node.";
+ assert obsChildNode.isValid() : "Node should be valid";
+ assert obsParentNode.isValid() : "Node should be valid";
+
+ modifier.removeNode(parent);
+
+ assert !obsParentNode.isValid() : "Should no longer be valid";
+ assert !obsChildNode.isValid() : "Should no longer be valid";
+ }
+
+ public void testMove()
+ {
+ Fqn newParent = Fqn.fromString("/newParent/parent");
+
+ //observer.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+ observer.put(parent, K, V);
+
+ Node<String, String> obsNode = observer.getRoot().getChild(parent);
+
+ assert obsNode.get(K).equals(V) : "Data should be in the node.";
+ assert obsNode.isValid() : "Node should be valid";
+
+ // new parent needs to exist first.
+ modifier.getRoot().addChild(newParent);
+ modifier.move(parent, newParent.getParent());
+
+ // the old node is only marked as invalid if we use opt locking
+ // with pess locking we directly move the node reference so the old ref is still valid, EVEN if the move happens
+ // remotely.
+ if (optimistic) assert !obsNode.isValid() : "Should no longer be valid";
+
+ assert observer.getRoot().getChild(newParent).isValid() : "Should be valid";
+ }
+
+ public void testMoveWithChildren()
+ {
+ Fqn newParent = Fqn.fromString("/newParent/parent");
+ Fqn newChild = Fqn.fromString("/newParent/parent/child");
+
+// observer.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+ observer.put(child, K, V);
+
+ Node<String, String> obsParentNode = observer.getRoot().getChild(parent);
+ Node<String, String> obsChildNode = observer.getRoot().getChild(child);
+
+ assert obsChildNode.get(K).equals(V) : "Data should be in the node.";
+ assert obsChildNode.isValid() : "Node should be valid";
+ assert obsParentNode.isValid() : "Node should be valid";
+
+ // new parent needs to exist first.
+ modifier.getRoot().addChild(newParent);
+ modifier.move(parent, newParent.getParent());
+
+ // the old node is only marked as invalid if we use opt locking
+ // with pess locking we directly move the node reference so the old ref is still valid.
+ if (optimistic)
+ {
+ assert !obsParentNode.isValid() : "Should no longer be valid";
+ assert !obsChildNode.isValid() : "Should no longer be valid";
+ }
+
+ assert observer.getRoot().getChild(newParent).isValid() : "Should be valid";
+ assert observer.getRoot().getChild(newChild).isValid() : "Should be valid";
+ }
+
+ public void testEvict()
+ {
+ // eviction should NOT affect validity
+ observer.put(parent, K, V);
+ Node<String, String> obsNode = observer.getRoot().getChild(parent);
+
+ assert obsNode.get(K).equals(V) : "Data should be in the node.";
+ assert obsNode.isValid() : "Node should be valid";
+
+ // eviction needs to happen on the same cache being watched
+ observer.evict(parent, false);
+
+ assert obsNode.isValid() : "Node should be valid";
+ }
+
+ public void testOperationsOnInvalidNode()
+ {
+ observer.put(parent, K, V);
+ Node<String, String> obsNode = observer.getRoot().getChild(parent);
+
+ assert obsNode.get(K).equals(V) : "Data should be in the node.";
+ assert obsNode.isValid() : "Node should be valid";
+
+ modifier.removeNode(parent);
+
+ assert !obsNode.isValid() : "Node should not be valid";
+
+ // all operations on the cached node should throw a NodeNotValidException
+
+ try
+ {
+ obsNode.get(K);
+ assert false : "Should fail";
+ }
+ catch (NodeNotValidException good)
+ {
+ // do nothing
+ }
+
+ try
+ {
+ obsNode.put(K, "v2");
+ assert false : "Should fail";
+ }
+ catch (NodeNotValidException good)
+ {
+ // do nothing
+ }
+
+ try
+ {
+ obsNode.remove(K);
+ assert false : "Should fail";
+ }
+ catch (NodeNotValidException good)
+ {
+ // do nothing
+ }
+
+ try
+ {
+ obsNode.clearData();
+ assert false : "Should fail";
+ }
+ catch (NodeNotValidException good)
+ {
+ // do nothing
+ }
+
+ try
+ {
+ obsNode.putAll(Collections.singletonMap(K, "v2"));
+ assert false : "Should fail";
+ }
+ catch (NodeNotValidException good)
+ {
+ // do nothing
+ }
+
+ try
+ {
+ obsNode.getKeys();
+ assert false : "Should fail";
+ }
+ catch (NodeNotValidException good)
+ {
+ // do nothing
+ }
+
+ try
+ {
+ obsNode.hasChild("Something");
+ assert false : "Should fail";
+ }
+ catch (NodeNotValidException good)
+ {
+ // do nothing
+ }
+
+ try
+ {
+ obsNode.removeChild("Something");
+ assert false : "Should fail";
+ }
+ catch (NodeNotValidException good)
+ {
+ // do nothing
+ }
+
+ try
+ {
+ obsNode.addChild(child);
+ assert false : "Should fail";
+ }
+ catch (NodeNotValidException good)
+ {
+ // do nothing
+ }
+
+ try
+ {
+ obsNode.getChildrenNames();
+ assert false : "Should fail";
+ }
+ catch (NodeNotValidException good)
+ {
+ // do nothing
+ }
+ }
+
+ public void testExistenceOfTombstones()
+ {
+ CacheImpl modifierImpl = (CacheImpl) modifier;
+ CacheImpl observerImpl = (CacheImpl) observer;
+
+ modifier.put(parent, K, V);
+ modifier.removeNode(parent);
+
+ if (optimistic && invalidation)
+ {
+ // if we are using optimistic invalidation then we should see tombstones. NOT otherwise.
+ NodeSPI modifierTombstone = modifierImpl.peek(parent, true, true);
+ NodeSPI observerTombstone = observerImpl.peek(parent, true, true);
+
+ assert modifierTombstone != null : "Modifier tombstone should not be null";
+ assert observerTombstone != null : "Observer tombstone should not be null";
+
+ assert !modifierTombstone.isValid() : "Should not be valid";
+ assert !observerTombstone.isValid() : "Should not be valid";
+
+ assert ((DefaultDataVersion) modifierTombstone.getVersion()).getRawVersion() == 2 : "Tombstone should be versioned";
+ assert ((DefaultDataVersion) observerTombstone.getVersion()).getRawVersion() == 2 : "Tombstone should be versioned";
+
+ }
+ else
+ {
+ // if we are using pess locking there should be NO tombstones, regardless of replication/invalidation!
+ assert modifierImpl.peek(parent, true, true) == null : "Tombstone should not exist";
+ assert observerImpl.peek(parent, true, true) == null : "Tombstone should not exist";
+ }
+ }
+
+ public void testExistenceOfTombstonesWithChildren()
+ {
+ CacheImpl modifierImpl = (CacheImpl) modifier;
+ CacheImpl observerImpl = (CacheImpl) observer;
+
+ modifier.put(child, K, V);
+ modifier.removeNode(parent);
+
+ if (optimistic && invalidation)
+ {
+ // if we are using optimistic invalidation then we should see tombstones. NOT otherwise.
+ NodeSPI modifierParentTombstone = modifierImpl.peek(parent, true, true);
+ NodeSPI observerParentTombstone = observerImpl.peek(parent, true, true);
+ NodeSPI modifierChildTombstone = modifierImpl.peek(child, true, true);
+ NodeSPI observerChildTombstone = observerImpl.peek(child, true, true);
+
+ assert modifierParentTombstone != null : "Modifier parent tombstone should not be null";
+ assert observerParentTombstone != null : "Observer parent tombstone should not be null";
+ assert modifierChildTombstone != null : "Modifier child tombstone should not be null";
+ assert observerChildTombstone != null : "Observer child tombstone should not be null";
+
+ assert !modifierParentTombstone.isValid() : "Should not be valid";
+ assert !observerParentTombstone.isValid() : "Should not be valid";
+ assert !modifierChildTombstone.isValid() : "Should not be valid";
+ assert !observerChildTombstone.isValid() : "Should not be valid";
+
+ assert ((DefaultDataVersion) modifierParentTombstone.getVersion()).getRawVersion() == 1 : "Tombstone should be versioned";
+ assert ((DefaultDataVersion) observerParentTombstone.getVersion()).getRawVersion() == 1 : "Tombstone should be versioned";
+
+ // note that versions on children cannot be incremented/updated since the remove operation was
+ // performed on the parent.
+ assert ((DefaultDataVersion) modifierChildTombstone.getVersion()).getRawVersion() == 1 : "Tombstone should be versioned";
+ assert ((DefaultDataVersion) observerChildTombstone.getVersion()).getRawVersion() == 1 : "Tombstone should be versioned";
+
+ }
+ else
+ {
+ // if we are using pess locking there should be NO tombstones, regardless of replication/invalidation!
+ assert modifierImpl.peek(parent, true, true) == null : "Tombstone should not exist";
+ assert observerImpl.peek(parent, true, true) == null : "Tombstone should not exist";
+ assert modifierImpl.peek(child, true, true) == null : "Tombstone should not exist";
+ assert observerImpl.peek(child, true, true) == null : "Tombstone should not exist";
+ }
+ }
+}
Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedOptNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedOptNodeValidityTest.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedOptNodeValidityTest.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,17 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+@Test(groups = {"functional"})
+public class ReplicatedOptNodeValidityTest extends ReplicatedPessNodeValidityTest
+{
+ public ReplicatedOptNodeValidityTest()
+ {
+ optimistic = true;
+ }
+}
\ No newline at end of file
Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedPessNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedPessNodeValidityTest.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedPessNodeValidityTest.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,36 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.jboss.cache.Cache;
+import org.jboss.cache.CacheFactory;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.config.Configuration;
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+@Test(groups = {"functional"})
+public class ReplicatedPessNodeValidityTest extends NodeValidityTestBase
+{
+ protected Cache<String, String> createObserver()
+ {
+ return newCache();
+ }
+
+ protected Cache<String, String> createModifier()
+ {
+ return newCache();
+ }
+
+ protected Cache<String, String> newCache()
+ {
+ CacheFactory<String, String> f = DefaultCacheFactory.getInstance();
+ Cache<String, String> cache = f.createCache(false);
+ cache.getConfiguration().setCacheMode(Configuration.CacheMode.REPL_SYNC);
+ optimisticConfiguration(cache.getConfiguration());
+ cache.start();
+ return cache;
+ }
+}
Added: core/trunk/src/test/java/org/jboss/cache/invalidation/TombstoneEvictionTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/invalidation/TombstoneEvictionTest.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/invalidation/TombstoneEvictionTest.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,131 @@
+package org.jboss.cache.invalidation;
+
+import org.jboss.cache.CacheImpl;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.config.EvictionConfig;
+import org.jboss.cache.config.EvictionRegionConfig;
+import org.jboss.cache.eviction.FIFOConfiguration;
+import org.jboss.cache.misc.TestingUtil;
+import org.jboss.cache.transaction.DummyTransactionManagerLookup;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Make sure tombstones are evicted
+ *
+ * @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+@Test (groups = {"functional"})
+public class TombstoneEvictionTest
+{
+ private CacheImpl c1, c2;
+ private Fqn fqn = Fqn.fromString("/data/test");
+ private Fqn dummy = Fqn.fromString("/data/dummy");
+ private long evictionWaitTime = 2100;
+
+ @BeforeMethod
+ public void setUp() throws Exception
+ {
+ c1 = (CacheImpl) DefaultCacheFactory.getInstance().createCache(false);
+ c2 = (CacheImpl) DefaultCacheFactory.getInstance().createCache(false);
+
+ // the FIFO policy cfg
+ FIFOConfiguration cfg = new FIFOConfiguration();
+ cfg.setMaxNodes(1);
+ cfg.setMinTimeToLiveSeconds(0);
+
+ // the region configuration
+ EvictionRegionConfig regionCfg = new EvictionRegionConfig();
+ regionCfg.setRegionFqn(dummy.getParent());
+ regionCfg.setRegionName(dummy.getParent().toString());
+ regionCfg.setEvictionPolicyConfig(cfg);
+
+ // set regions in a list
+ List<EvictionRegionConfig> evictionRegionConfigs = Collections.singletonList(regionCfg);
+
+
+ EvictionConfig ec = new EvictionConfig();
+ ec.setWakeupIntervalSeconds(1);
+ ec.setEvictionRegionConfigs(evictionRegionConfigs);
+
+ c1.getConfiguration().setCacheMode(Configuration.CacheMode.INVALIDATION_SYNC);
+ c1.getConfiguration().setNodeLockingScheme(Configuration.NodeLockingScheme.OPTIMISTIC);
+ c1.getConfiguration().setTransactionManagerLookupClass(DummyTransactionManagerLookup.class.getName());
+ c1.getConfiguration().setEvictionConfig(ec);
+
+ c2.setConfiguration(c1.getConfiguration().clone());
+
+ c1.start();
+ c2.start();
+
+ TestingUtil.blockUntilViewsReceived(60000, c1, c2);
+ }
+
+ @AfterMethod
+ public void tearDown()
+ {
+ TestingUtil.killCaches(c1, c2);
+ }
+
+ public void testControl()
+ {
+ c1.put(fqn, "k", "v");
+ c1.put(dummy, "k", "v");
+
+ assert c1.peek(fqn, false, true) != null : "Node should exist";
+ assert c1.peek(dummy, false, true) != null : "Node should exist";
+
+ TestingUtil.sleepThread(evictionWaitTime);
+
+ assert c1.peek(fqn, false, true) == null : "Should have evicted";
+ assert c1.peek(dummy, false, true) != null : "Node should exist";
+ }
+
+ public void testWithInvalidationMarkers()
+ {
+ c1.put(fqn, "k", "v");
+ c1.put(dummy, "k", "v");
+
+ assert c1.peek(fqn, false, true) != null : "Node should exist";
+ assert c1.peek(dummy, false, true) != null : "Node should exist";
+
+ assert c2.peek(fqn, false, true) != null : "Node should exist";
+ assert c2.peek(dummy, false, true) != null : "Node should exist";
+
+ TestingUtil.sleepThread(evictionWaitTime);
+
+ assert c1.peek(fqn, false, true) == null : "Should have evicted";
+ assert c1.peek(dummy, false, true) != null : "Node should exist";
+
+ assert c2.peek(fqn, false, true) == null : "Should have evicted";
+ assert c2.peek(dummy, false, true) != null : "Node should exist";
+ }
+
+ public void testWithTombstones()
+ {
+ c1.put(fqn, "k", "v");
+ c1.removeNode(fqn);
+ c1.put(dummy, "k", "v");
+
+ assert c1.peek(fqn, false, true) != null : "Node should exist";
+ assert c1.peek(dummy, false, true) != null : "Node should exist";
+
+ assert c2.peek(fqn, false, true) != null : "Node should exist";
+ assert c2.peek(dummy, false, true) != null : "Node should exist";
+
+ TestingUtil.sleepThread(evictionWaitTime);
+
+ assert c1.peek(fqn, false, true) == null : "Should have evicted";
+ assert c1.peek(dummy, false, true) != null : "Node should exist";
+
+ assert c2.peek(fqn, false, true) == null : "Should have evicted";
+ assert c2.peek(dummy, false, true) != null : "Node should exist";
+ }
+}
Modified: core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -7,16 +7,18 @@
package org.jboss.cache.misc;
-import java.io.File;
-import java.util.List;
-import java.util.Random;
-
import org.jboss.cache.Cache;
import org.jboss.cache.CacheImpl;
import org.jboss.cache.CacheSPI;
+import org.jboss.cache.CacheStatus;
+import org.jboss.cache.Fqn;
import org.jboss.cache.interceptors.Interceptor;
import org.jboss.cache.util.CachePrinter;
+import java.io.File;
+import java.util.List;
+import java.util.Random;
+
/**
* Utilities for unit testing JBossCache.
*
@@ -374,4 +376,43 @@
//System.out.println("File " + f.toURI() + " deleted = " + f.delete());
f.delete();
}
+
+ /**
+ * Kills a cache - stops it, clears any data in any cache loaders, and rolls back any associated txs
+ */
+ public static void killCaches(Cache... caches)
+ {
+ for (Cache c: caches)
+ {
+ if (c != null && c.getCacheStatus() == CacheStatus.STARTED)
+ {
+ CacheImpl ci = (CacheImpl) c;
+ if (ci.getTransactionManager() != null)
+ {
+ try
+ {
+ ci.getTransactionManager().rollback();
+ }
+ catch (Exception e)
+ {
+ // don't care
+ }
+ }
+
+ if (ci.getCacheLoader() != null)
+ {
+ try
+ {
+ ci.getCacheLoader().remove(Fqn.ROOT);
+ }
+ catch (Exception e)
+ {
+ // don't care
+ }
+ }
+
+ ci.destroy();
+ }
+ }
+ }
}
Modified: core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionPersistenceTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionPersistenceTest.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionPersistenceTest.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -135,67 +135,6 @@
assert n.getData().size() == 1;
}
- public void testStateTransferDefaultVersionAfterRemoval() throws Exception
- {
- Fqn f = Fqn.fromString("/one/two/three");
- cache.put(f, "k", "v");
- cache.put(f, "k1", "v1");
- cache.removeNode(f);
-
- NodeSPI n = (NodeSPI) cache.getRoot().getChild(f);
- DataVersion dv = n.getVersion();
-
- assert dv instanceof DefaultDataVersion : "Should be an instance of DefaultDataVersion";
-
- assert ((DefaultDataVersion) dv).getRawVersion() == 3 : "Should have accurate data version";
-
- // now restart cache instance
- cache.stop();
- cache.start();
-
- assert cache.get(f, "k")== null : "Should be removed";
-
- n = (NodeSPI) cache.getRoot().getChild(f);
-
- dv = n.getVersion();
-
- assert dv instanceof DefaultDataVersion : "Should be an instance of DefaultDataVersion";
-
- assert ((DefaultDataVersion) dv).getRawVersion() == 3 : "Version should have persisted";
- }
-
- public void testStateTransferCustomVersionAfterRemoval() throws Exception
- {
- Fqn f = Fqn.fromString("/one/two/three");
- cache.getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('A'));
- cache.put(f, "k", "v");
- cache.getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('B'));
- cache.put(f, "k1", "v1");
- cache.getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('C'));
- cache.removeNode(f);
-
- NodeSPI n = (NodeSPI) cache.getRoot().getChild(f);
- DataVersion dv = n.getVersion();
-
- assert dv instanceof CharVersion : "Should be an instance of CharVersion";
-
- assert ((CharVersion) dv).version == 'C' : "Should have accurate data version";
-
- // now restart cache instance
- cache.stop();
- cache.start();
-
- assert cache.get(f, "k")== null : "Should be removed";
-
- n = (NodeSPI) cache.getRoot().getChild(f);
-
- dv = n.getVersion();
-
- assert dv instanceof CharVersion : "Should be an instance of CharVersion";
-
- assert ((CharVersion) dv).version == 'C' : "Version should have persisted";
- }
-
public static class CharVersion implements DataVersion
{
private char version = 'A';
Modified: core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionTransferTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionTransferTest.java 2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionTransferTest.java 2007-10-22 23:08:18 UTC (rev 4663)
@@ -193,69 +193,6 @@
assert n.getData().size() == 1;
}
- public void testStateTransferDefaultVersionAfterRemoval() throws Exception
- {
- Fqn f = Fqn.fromString("/one/two/three");
- caches.get(0).put(f, "k", "v");
- caches.get(0).put(f, "k1", "v1");
- caches.get(0).removeNode(f);
-
- NodeSPI n = (NodeSPI) caches.get(0).getRoot().getChild(f);
- DataVersion dv = n.getVersion();
-
- assert dv instanceof DefaultDataVersion : "Should be an instance of DefaultDataVersion";
-
- assert ((DefaultDataVersion) dv).getRawVersion() == 3 : "Should have accurate data version";
-
- // now start next cache instance
- caches.get(1).start();
-
- TestingUtil.blockUntilViewsReceived(10000, caches.get(0), caches.get(1));
-
- assert caches.get(1).get(f, "k")== null : "Should be removed";
-
- n = (NodeSPI) caches.get(1).getRoot().getChild(f);
-
- dv = n.getVersion();
-
- assert dv instanceof DefaultDataVersion : "Should be an instance of DefaultDataVersion";
-
- assert ((DefaultDataVersion) dv).getRawVersion() == 3 : "Version should have transferred";
- }
-
- public void testStateTransferCustomVersionAfterRemoval() throws Exception
- {
- Fqn f = Fqn.fromString("/one/two/three");
- caches.get(0).getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('A'));
- caches.get(0).put(f, "k", "v");
- caches.get(0).getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('B'));
- caches.get(0).put(f, "k1", "v1");
- caches.get(0).getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('C'));
- caches.get(0).removeNode(f);
-
- NodeSPI n = (NodeSPI) caches.get(0).getRoot().getChild(f);
- DataVersion dv = n.getVersion();
-
- assert dv instanceof CharVersion : "Should be an instance of CharVersion";
-
- assert ((CharVersion) dv).version == 'C' : "Should have accurate data version";
-
- // now start next cache instance
- caches.get(1).start();
-
- TestingUtil.blockUntilViewsReceived(10000, caches.get(0), caches.get(1));
-
- assert caches.get(1).get(f, "k")== null : "Should be removed";
-
- n = (NodeSPI) caches.get(1).getRoot().getChild(f);
-
- dv = n.getVersion();
-
- assert dv instanceof CharVersion : "Should be an instance of CharVersion";
-
- assert ((CharVersion) dv).version == 'C' : "Version should have transferred";
- }
-
public static class CharVersion implements DataVersion
{
private char version = 'A';
17 years, 2 months
JBoss Cache SVN: r4662 - core/trunk/src/examples/resources.
by jbosscache-commits@lists.jboss.org
Author: mircea.markus
Date: 2007-10-22 15:13:05 -0400 (Mon, 22 Oct 2007)
New Revision: 4662
Removed:
core/trunk/src/examples/resources/build.bat
core/trunk/src/examples/resources/build.sh
Log:
removing files as they are no longer needed
Deleted: core/trunk/src/examples/resources/build.bat
===================================================================
--- core/trunk/src/examples/resources/build.bat 2007-10-22 12:00:07 UTC (rev 4661)
+++ core/trunk/src/examples/resources/build.bat 2007-10-22 19:13:05 UTC (rev 4662)
@@ -1,3 +0,0 @@
-@echo off
-set ANT_HOME=.\ant-dist
-%ANT_HOME%\bin\ant %1 %2 %3 %4 %5
Deleted: core/trunk/src/examples/resources/build.sh
===================================================================
--- core/trunk/src/examples/resources/build.sh 2007-10-22 12:00:07 UTC (rev 4661)
+++ core/trunk/src/examples/resources/build.sh 2007-10-22 19:13:05 UTC (rev 4662)
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-CACHE_HOME=`dirname $0`
-ANT_HOME=$CACHE_HOME/ant-dist
-
-$ANT_HOME/bin/ant "$@"
-
17 years, 2 months
JBoss Cache SVN: r4661 - core/trunk/src/test/resources.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2007-10-22 08:00:07 -0400 (Mon, 22 Oct 2007)
New Revision: 4661
Modified:
core/trunk/src/test/resources/log4j.xml
Log:
Do not put local debug log levels in svn
Modified: core/trunk/src/test/resources/log4j.xml
===================================================================
--- core/trunk/src/test/resources/log4j.xml 2007-10-22 11:46:54 UTC (rev 4660)
+++ core/trunk/src/test/resources/log4j.xml 2007-10-22 12:00:07 UTC (rev 4661)
@@ -63,15 +63,15 @@
<!-- ================ -->
<category name="org.jboss.cache">
- <priority value="DEBUG"/>
+ <priority value="WARN"/>
</category>
<category name="org.jboss.tm">
- <priority value="DEBUG"/>
+ <priority value="WARN"/>
</category>
<category name="org.jgroups">
- <priority value="DEBUG"/>
+ <priority value="WARN"/>
</category>
<!-- ======================= -->
@@ -80,7 +80,7 @@
<root>
<!--<appender-ref ref="CONSOLE"/>-->
- <appender-ref ref="CONSOLE"/>
+ <appender-ref ref="FILE"/>
</root>
</log4j:configuration>
17 years, 2 months