[jbosscache-commits] JBoss Cache SVN: r4663 - in core/trunk/src: main/java/org/jboss/cache/eviction and 6 other directories.

jbosscache-commits at lists.jboss.org jbosscache-commits at lists.jboss.org
Mon Oct 22 19:08:18 EDT 2007


Author: manik.surtani at jboss.com
Date: 2007-10-22 19:08:18 -0400 (Mon, 22 Oct 2007)
New Revision: 4663

Added:
   core/trunk/src/main/java/org/jboss/cache/NodeNotValidException.java
   core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/
   core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedOptNodeValidityTest.java
   core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedPessNodeValidityTest.java
   core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalOptNodeValidityTest.java
   core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalPessNodeValidityTest.java
   core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/NodeValidityTestBase.java
   core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedOptNodeValidityTest.java
   core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedPessNodeValidityTest.java
   core/trunk/src/test/java/org/jboss/cache/invalidation/TombstoneEvictionTest.java
Modified:
   core/trunk/src/main/java/org/jboss/cache/CacheImpl.java
   core/trunk/src/main/java/org/jboss/cache/CacheSPI.java
   core/trunk/src/main/java/org/jboss/cache/Node.java
   core/trunk/src/main/java/org/jboss/cache/NodeSPI.java
   core/trunk/src/main/java/org/jboss/cache/Region.java
   core/trunk/src/main/java/org/jboss/cache/RegionImpl.java
   core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java
   core/trunk/src/main/java/org/jboss/cache/eviction/BaseEvictionAlgorithm.java
   core/trunk/src/main/java/org/jboss/cache/interceptors/CacheLoaderInterceptor.java
   core/trunk/src/main/java/org/jboss/cache/interceptors/CacheStoreInterceptor.java
   core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java
   core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticCreateIfNotExistsInterceptor.java
   core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java
   core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java
   core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java
   core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionPersistenceTest.java
   core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionTransferTest.java
Log:
JBCACHE-1188 - implemented Node.isValid() and tombstones for invalidations using optimistic locking

Modified: core/trunk/src/main/java/org/jboss/cache/CacheImpl.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/CacheImpl.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/CacheImpl.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -1389,13 +1389,13 @@
       return n != null;
    }
 
-   /**
-    * Gets node without attempt to load it from CacheLoader if not present
-    *
-    * @param fqn
-    */
    public NodeSPI<K, V> peek(Fqn<?> fqn, boolean includeDeletedNodes)
    {
+      return peek(fqn, includeDeletedNodes, false);
+   }
+
+   public NodeSPI<K, V> peek(Fqn<?> fqn, boolean includeDeletedNodes, boolean includeInvalidNodes)
+   {
       if (fqn == null || fqn.size() == 0) return root;
       NodeSPI<K, V> n = root;
       int fqnSize = fqn.size();
@@ -1411,6 +1411,10 @@
          {
             return null;
          }
+         else if (!includeInvalidNodes && !n.isValid())
+         {
+            return null;
+         }
       }
       return n;
    }
@@ -2401,7 +2405,7 @@
       }
 
       // Find the node. This will add the temporarily created parent nodes to the TX's node list if tx != null)
-      n = findNode(fqn, version);
+      n = findNode(fqn, version, true);
       if (n == null)
       {
          if (log.isTraceEnabled())
@@ -2430,11 +2434,11 @@
       if (eviction || configuration.isNodeLockingOptimistic())
       {
          // if there is no parent node and the fqn is root, found == true otherwise found == false.
-         found = parent_node == null ? fqn.isRoot() : parent_node.removeChildDirect(n.getFqn().getLastElement());
+         found = n.isValid() && parent_node == null ? fqn.isRoot() : parent_node.removeChildDirect(n.getFqn().getLastElement());
       }
       else
       {
-         found = !n.isDeleted();
+         found = n.isValid() && !n.isDeleted();
          n.markAsDeleted(true);
       }
 
@@ -2448,7 +2452,7 @@
 
       // create a compensating method call (reverting the effect of
       // this modification) and put it into the TX's undo list.
-      if (tx != null && create_undo_ops && !eviction)
+      if (tx != null && create_undo_ops && !eviction && found)
       {
          undo_op = MethodCallFactory.create(MethodDeclarations.addChildMethodLocal, tx, parent_node.getFqn(), n.getFqn().getLastElement(), n, false);
 
@@ -2661,8 +2665,9 @@
     */
    public boolean _evict(Fqn fqn) throws CacheException
    {
-      if (!exists(fqn))
-         return true;// node does not exist. Maybe it has been recursively removed.
+      if (peek(fqn, false, true) == null) return true;
+      // node does not exist. Maybe it has been recursively removed.
+
       // use remove method now if there is a child node. Otherwise, it is removed
       boolean create_undo_ops = false;
       boolean sendNodeEvent = false;
@@ -2724,20 +2729,25 @@
     * <p/>
     * Finally, the data version of the in-memory node is updated to the version being evicted to prevent versions
     * going out of sync.
-    *
-    * @param fqn
-    * @param versionToInvalidate
     */
    public void invalidate(Fqn fqn, DataVersion versionToInvalidate)
    {
-      Node node = get(fqn); // force interceptor chain, load if necessary from cache loader.
+      Node<K, V> node = get(fqn); // force interceptor chain, load if necessary from cache loader.
 
       if (node != null)
       {
-         _removeData(null, fqn, false, false, true, versionToInvalidate);
+         // mark the node to be removed (and all children) as invalid so anyone holding a direct reference to it will
+         // be aware that it is no longer valid.
+         ((NodeSPI) node).setValid(false, true);
+
+         if (configuration.isNodeLockingOptimistic())
+            _removeData(null, fqn, false, false, true, versionToInvalidate);
+         else
+            _evict(fqn);
+         
          if (versionToInvalidate != null)
          {
-            NodeSPI n = peek(fqn, false);
+            NodeSPI n = peek(fqn, false, true);
             n.setVersion(versionToInvalidate);
          }
       }
@@ -2745,15 +2755,24 @@
       {
          // if pessimistic locking, just return.
          if (!configuration.isNodeLockingOptimistic()) return;
-         // create the node we need.
-         Map<K, V> m = Collections.emptyMap();
-         InvocationContext ic = getInvocationContext();
-         boolean origCacheModeLocal = ic.getOptionOverrides().isCacheModeLocal();
-         ic.getOptionOverrides().setCacheModeLocal(true);
-         put(fqn, m);
-         ic.getOptionOverrides().setCacheModeLocal(origCacheModeLocal);
-         NodeSPI nodeSPI = (NodeSPI) root.getChild(fqn);
+
+         // check if a tombstone already exists
+         NodeSPI nodeSPI = peek(fqn, false, true);
+         if (nodeSPI == null)
+         {
+            // create the node we need.
+            Map<K, V> m = Collections.emptyMap();
+            InvocationContext ic = getInvocationContext();
+            boolean origCacheModeLocal = ic.getOptionOverrides().isCacheModeLocal();
+            ic.getOptionOverrides().setCacheModeLocal(true);
+            put(fqn, m);
+            ic.getOptionOverrides().setCacheModeLocal(origCacheModeLocal);
+            nodeSPI = (NodeSPI) root.getChild(fqn);
+         }
          nodeSPI.setVersion(versionToInvalidate);
+         // mark the node to be removed (and all children) as invalid so anyone holding a direct reference to it will
+         // be aware that it is no longer valid.
+         nodeSPI.setValid(false, true);
       }
    }
 
@@ -2803,6 +2822,11 @@
 
       childNode.markAsDeleted(false, true);
 
+      // tricky stuff here - this does look kludgy since we're recursively re-validating nodes
+      // potentially mistakenly revalidating tombstones, but this method would only be called
+      // when using pess locking and tombstones don't exist with PL, so this is OK.            
+      childNode.setValid(true, true);
+
       if (gtx != null && undoOps)
       {
          // 1. put undo-op in TX' undo-operations list (needed to rollback TX)
@@ -4056,6 +4080,10 @@
          return false;
       }
 
+      // mark the node to be removed (and all children) as invalid so anyone holding a direct reference to it will
+      // be aware that it is no longer valid.
+      n.setValid(false, true);      
+
       if (log.isTraceEnabled()) log.trace("Performing a real remove for node " + f + ", marked for removal.");
       if (skipMarkerCheck || n.isDeleted())
       {
@@ -4080,15 +4108,20 @@
    }
 
    /**
-    * Finds a node given a fully qualified name and DataVersion.
+    * Finds a node given a fully qualified name and DataVersion.  Does not include invalid nodes.
     */
-   private NodeSPI<K, V> findNode(Fqn fqn, DataVersion version) throws CacheException
+   private NodeSPI<K, V> findNode(Fqn fqn, DataVersion version)
    {
+      return findNode(fqn, version, false);
+   }
+
+   private NodeSPI<K, V> findNode(Fqn fqn, DataVersion version, boolean includeInvalidNodes)
+   {
       if (fqn == null) return null;
 
-      NodeSPI<K, V> toReturn = peek(fqn, false);
+      NodeSPI<K, V> toReturn = peek(fqn, false, includeInvalidNodes);
 
-      if (version != null && configuration.isNodeLockingOptimistic())
+      if (toReturn != null && version != null && configuration.isNodeLockingOptimistic())
       {
          // we need to check the version of the data node...
          DataVersion nodeVersion = toReturn.getVersion();

Modified: core/trunk/src/main/java/org/jboss/cache/CacheSPI.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/CacheSPI.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/CacheSPI.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -152,7 +152,7 @@
    Notifier getNotifier();
 
    /**
-    * Returns a node without accessing the interceptor chain.
+    * Returns a node without accessing the interceptor chain.  Does not return any nodes marked as invalid.
     *
     * @param fqn                 the Fqn to look up.
     * @param includeDeletedNodes if you intend to see nodes marked as deleted within the current tx, set this to true
@@ -161,6 +161,16 @@
    NodeSPI<K, V> peek(Fqn<?> fqn, boolean includeDeletedNodes);
 
    /**
+    * Returns a node without accessing the interceptor chain, optionally returning nodes that are marked as invalid ({@link org.jboss.cache.Node#isValid()} == false).
+    *
+    * @param fqn                 the Fqn to look up.
+    * @param includeDeletedNodes if you intend to see nodes marked as deleted within the current tx, set this to true
+    * @param includeInvalidNodes if true, nodes marked as being invalid are also returned.
+    * @return a node if one exists or null
+    */
+   NodeSPI<K, V> peek(Fqn<?> fqn, boolean includeDeletedNodes, boolean includeInvalidNodes);
+
+   /**
     * Used with buddy replication's data gravitation interceptor.  If marshalling is necessary, ensure that the cache is
     * configured to use {@link org.jboss.cache.config.Configuration#useRegionBasedMarshalling} and the {@link org.jboss.cache.Region}
     * pertaining to the Fqn passed in is activated, and has an appropriate ClassLoader.

Modified: core/trunk/src/main/java/org/jboss/cache/Node.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/Node.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/Node.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -270,8 +270,9 @@
    boolean hasChild(Object o);
 
    /**
-    * Tests if a node reference is still valid.  A node reference may become invalid if it has been evicted, for example,
-    * in which case it should be looked up again from the cache.
+    * Tests if a node reference is still valid.  A node reference may become invalid if it has been removed, invalidated
+    * or moved, either locally or remotely.  If a node is invalid, it should be fetched again from the cache or a valid
+    * parent node.  Operations on invalid nodes will throw a {@link org.jboss.cache.NodeNotValidException}. 
     *
     * @return true if the node is valid.
     */

Added: core/trunk/src/main/java/org/jboss/cache/NodeNotValidException.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/NodeNotValidException.java	                        (rev 0)
+++ core/trunk/src/main/java/org/jboss/cache/NodeNotValidException.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,30 @@
+package org.jboss.cache;
+
+/**
+ * Thrown whenever operations are attempted on a node that is no longer valid.  See {@link org.jboss.cache.Node#isValid()}
+ * for details.
+ *
+ * @author <a href="mailto:manik at jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+public class NodeNotValidException extends CacheException
+{
+   public NodeNotValidException()
+   {
+   }
+
+   public NodeNotValidException(Throwable cause)
+   {
+      super(cause);
+   }
+
+   public NodeNotValidException(String msg)
+   {
+      super(msg);
+   }
+
+   public NodeNotValidException(String msg, Throwable cause)
+   {
+      super(msg, cause);
+   }
+}

Modified: core/trunk/src/main/java/org/jboss/cache/NodeSPI.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/NodeSPI.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/NodeSPI.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -459,4 +459,13 @@
     * @since 2.1.0
     */
    void setInternalState(Map state);
+
+   /**
+    * Sets the validity of a node.  By default, all nodes are valid unless they are deleted, invalidated or moved, either
+    * locally or remotely.  To be used in conjunction with {@link #isValid()}.
+    * @param valid if true, the node is marked as valid; if false, the node is invalid.
+    * @param recursive if true, the validity flag passed in is applied to all children as well.
+    * @since 2.1.0
+    */
+   void setValid(boolean valid, boolean recursive);
 }

Modified: core/trunk/src/main/java/org/jboss/cache/Region.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/Region.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/Region.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -6,6 +6,7 @@
  */
 package org.jboss.cache;
 
+import org.jboss.cache.config.Configuration;
 import org.jboss.cache.config.EvictionPolicyConfig;
 import org.jboss.cache.config.EvictionRegionConfig;
 import org.jboss.cache.eviction.EvictedEventNode;
@@ -50,6 +51,12 @@
    void registerContextClassLoader(ClassLoader classLoader);
 
    /**
+    * @return the cache-wide configuration
+    * @since 2.1.0
+    */
+   Configuration getCacheConfiguration();
+
+   /**
     * Unregisters a registered {@link ClassLoader}s for this region.
     */
    void unregisterContextClassLoader();

Modified: core/trunk/src/main/java/org/jboss/cache/RegionImpl.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/RegionImpl.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/RegionImpl.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -8,6 +8,7 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.jboss.cache.config.Configuration;
 import org.jboss.cache.config.EvictionPolicyConfig;
 import org.jboss.cache.config.EvictionRegionConfig;
 import org.jboss.cache.eviction.EvictedEventNode;
@@ -58,6 +59,11 @@
       createQueue();
    }
 
+   public Configuration getCacheConfiguration()
+   {
+      return regionManager.getCache().getConfiguration();
+   }
+
    public void registerContextClassLoader(ClassLoader classLoader)
    {
       this.classLoader = classLoader;

Modified: core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/UnversionedNode.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -69,6 +69,7 @@
    private final Map<K, V> data = new HashMap<K, V>();
 
    private boolean lockForChildInsertRemove;
+   private boolean valid = true;
 
    /**
     * Constructs a new node with an FQN of Root.
@@ -163,6 +164,7 @@
 
    public V get(K key)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       return cache.get(getFqn(), key);
    }
 
@@ -190,6 +192,7 @@
 
    public Map<K, V> getData()
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       if (cache == null) return Collections.emptyMap();
       return cache.getData(getFqn());
 
@@ -211,6 +214,7 @@
 
    public V put(K key, V value)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       return cache.put(getFqn(), key, value);
    }
 
@@ -283,6 +287,7 @@
 
    public V remove(K key)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       return cache.remove(getFqn(), key);
    }
 
@@ -305,6 +310,8 @@
    {
       StringBuffer sb = new StringBuffer();
       sb.append(getClass().getSimpleName());
+      if (!valid) sb.append(" (INVALID!) ");
+
       if (deleted)
       {
          sb.append(" (deleted) [ ").append(fqn);
@@ -341,6 +348,7 @@
 
    public Node<K, V> addChild(Fqn f)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       Fqn nf = new Fqn(getFqn(), f);
       cache.put(nf, null);
       return getChild(f);
@@ -375,6 +383,7 @@
 
    public void clearData()
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       cache.removeData(getFqn());
    }
 
@@ -385,6 +394,7 @@
 
    public Node<K, V> getChild(Fqn fqn)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       return cache.get(new Fqn(getFqn(), fqn));
    }
 
@@ -409,6 +419,7 @@
 
    public Set<Object> getChildrenNames()
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       return cache.getChildrenNames(getFqn());
    }
 
@@ -419,6 +430,7 @@
 
    public Set<K> getKeys()
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       Set<K> keys = cache.getKeys(getFqn());
       return keys == null ? Collections.<K>emptySet() : Collections.<K>unmodifiableSet(keys);
    }
@@ -434,16 +446,19 @@
 
    public boolean hasChild(Fqn f)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       return getChild(f) != null;
    }
 
    public boolean hasChild(Object o)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       return getChild(o) != null;
    }
 
    public V putIfAbsent(K k, V v)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       // make sure this is atomic.  Not hugely performant at the moment (should use the locking interceptors) but for now ...
       synchronized (this)
       {
@@ -456,6 +471,7 @@
 
    public V replace(K key, V value)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       // make sure this is atomic.  Not hugely performant at the moment (should use the locking interceptors) but for now ...
       synchronized (this)
       {
@@ -470,6 +486,7 @@
 
    public boolean replace(K key, V oldValue, V newValue)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       // make sure this is atomic.  Not hugely performant at the moment (should use the locking interceptors) but for now ...
       synchronized (this)
       {
@@ -485,16 +502,19 @@
 
    public boolean removeChild(Fqn fqn)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       return cache.removeNode(new Fqn(getFqn(), fqn));
    }
 
    public int dataSize()
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       return cache.getKeys(getFqn()).size();
    }
 
    public boolean removeChild(Object childName)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       return cache.removeNode(new Fqn(getFqn(), childName));
    }
 
@@ -529,11 +549,13 @@
 
    public void putAll(Map<K, V> data)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       cache.put(fqn, data);
    }
 
    public void replaceAll(Map<K, V> data)
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       cache.put(fqn, data, true);
    }
 
@@ -648,6 +670,7 @@
 
    public Set<Node<K, V>> getChildren()
    {
+      if (!valid) throw new NodeNotValidException("Node " + getFqn() + " is not valid.  Perhaps it has been moved or removed.");
       if (cache == null) return Collections.emptySet();
       Set<Node<K, V>> children = new HashSet<Node<K, V>>();
       for (Object c : cache.getChildrenNames(getFqn()))
@@ -738,10 +761,22 @@
 
    public boolean isValid()
    {
-      // TODO; implement this property, to detect if it has been evicted, removed by another thread, etc.  Method added for now as a dummy so it exists in the API
-      return true;
+      return valid;
    }
 
+   public void setValid(boolean valid, boolean recursive)
+   {
+      this.valid = valid;
+      if (log.isTraceEnabled()) log.trace("Marking node " + getFqn() + " as " + (valid ? "" : "in") + "valid");
+      if (recursive)
+      {
+         for (Node child: children().values())
+         {
+            ((UnversionedNode) child).setValid(valid, recursive);
+         }
+      }
+   }
+
    public boolean isLockForChildInsertRemove()
    {
       return lockForChildInsertRemove;

Modified: core/trunk/src/main/java/org/jboss/cache/eviction/BaseEvictionAlgorithm.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/eviction/BaseEvictionAlgorithm.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/eviction/BaseEvictionAlgorithm.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -10,6 +10,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.jboss.cache.Fqn;
 import org.jboss.cache.Region;
+import org.jboss.cache.config.Configuration;
 import org.jboss.cache.lock.TimeoutException;
 
 import java.util.concurrent.BlockingQueue;
@@ -44,6 +45,8 @@
     */
    protected EvictionQueue evictionQueue;
 
+   protected boolean allowTombstones = false;
+
    /**
     * This method will create an EvictionQueue implementation and prepare it for use.
     *
@@ -74,6 +77,10 @@
       this.region = region;
       evictionQueue = setupEvictionQueue(region);
       log.debug("initialized: " + this);
+      // hacky temp solution till we have an ioc fwk to inject configuration elements as needed
+      Configuration.CacheMode cm = region.getCacheConfiguration().getCacheMode();
+      allowTombstones = region.getCacheConfiguration().isNodeLockingOptimistic() &&
+                        (cm == Configuration.CacheMode.INVALIDATION_ASYNC || cm == Configuration.CacheMode.INVALIDATION_SYNC);
    }
 
    /**
@@ -340,7 +347,17 @@
       NodeEntry ne = evictionQueue.getNodeEntry(fqn);
       if (ne != null)
       {
-         evictionQueue.removeNodeEntry(ne);
+         if (allowTombstones)
+         {
+            // don't remove from the queue - deleting a node results in a tombstone which means the nodes
+            // still need to be considered for eviction!
+            return;
+         }
+         else
+         {
+            // a removeNode operation will simply remove the node.  Nothing to worry about.
+            evictionQueue.removeNodeEntry(ne);
+         }
       }
       else
       {

Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/CacheLoaderInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/CacheLoaderInterceptor.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/CacheLoaderInterceptor.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -5,6 +5,7 @@
 import org.jboss.cache.Fqn;
 import org.jboss.cache.InvocationContext;
 import org.jboss.cache.NodeSPI;
+import static org.jboss.cache.config.Configuration.CacheMode;
 import org.jboss.cache.loader.CacheLoader;
 import org.jboss.cache.lock.NodeLock;
 import org.jboss.cache.marshall.MethodCall;
@@ -34,6 +35,7 @@
    private TransactionTable txTable = null;
    protected boolean isActivation = false;
    protected CacheLoader loader;
+   protected boolean usingOptimisticInvalidation = false;
 
    /**
     * True if CacheStoreInterceptor is in place.
@@ -47,6 +49,9 @@
       super.setCache(cache);
       txTable = cache.getTransactionTable();
       this.loader = cache.getCacheLoaderManager().getCacheLoader();
+      CacheMode mode = cache.getConfiguration().getCacheMode();
+      usingOptimisticInvalidation = cache.getConfiguration().isNodeLockingOptimistic() &&
+                                    ((mode == CacheMode.INVALIDATION_ASYNC) || (mode == CacheMode.INVALIDATION_SYNC));
    }
 
    /**
@@ -117,6 +122,10 @@
             acquireLock = true;
             break;
          case MethodDeclarations.getNodeMethodLocal_id:
+            bypassLoadingData = !usingOptimisticInvalidation;
+            fqn = (Fqn) args[0];
+            acquireLock = true;
+            break;            
          case MethodDeclarations.getChildrenNamesMethodLocal_id:
             bypassLoadingData = true;
          case MethodDeclarations.releaseAllLocksMethodLocal_id:
@@ -134,6 +143,12 @@
             // clean up nodesCreated map
             cleanupNodesCreated(entry);
             break;
+         case MethodDeclarations.removeNodeMethodLocal_id:
+            if (cache.getConfiguration().isNodeLockingOptimistic())
+            {
+               fqn = (Fqn) args[1];               
+            }
+            break;
          default:
             if (!useCacheStore)
             {
@@ -168,7 +183,7 @@
 
    private void loadIfNeeded(InvocationContext ctx, Fqn fqn, Object key, boolean allKeys, boolean initNode, boolean acquireLock, MethodCall m, TransactionEntry entry, boolean recursive, boolean isMove, boolean bypassLoadingData) throws Throwable
    {
-      NodeSPI n = cache.peek(fqn, true);
+      NodeSPI n = cache.peek(fqn, true, true);
 
       boolean mustLoad = mustLoad(n, key, allKeys);
       if (log.isTraceEnabled())
@@ -299,6 +314,15 @@
          log.trace("must load, node null");
          return true;
       }
+
+      // check this first!!!
+      if (!n.isValid() && cache.getConfiguration().isNodeLockingOptimistic())
+      {
+         // attempt to load again; this only happens if we have tombstones lying around, or we are using invalidation.
+         log.trace("loading again from cache loader since in-memory node is marked as invalid");
+         return true;
+      }
+
       // JBCACHE-1172 Skip single-key optimization if request needs all keys
       if (!allKeys)
       {
@@ -429,6 +453,9 @@
 //         n.clearDataDirect();
          n.setInternalState(nodeData);
 
+         // set this node as valid?
+         if (usingOptimisticInvalidation) n.setValid(true, false);
+
          cache.getNotifier().notifyNodeLoaded(fqn, false, nodeData, ctx);
          if (isActivation)
          {

Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/CacheStoreInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/CacheStoreInterceptor.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/CacheStoreInterceptor.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -242,9 +242,13 @@
       {
          for (Fqn f : affectedFqns)
          {
-            NodeSPI n = cache.peek(f, false);
-            Map internalState = n.getInternalState(true);
-            loader.put(f, internalState);            
+            // NOT going to store tombstones!!
+            NodeSPI n = cache.peek(f, false, false);
+            if (n != null)
+            {
+               Map internalState = n.getInternalState(true);
+               loader.put(f, internalState);
+            }
          }
       }
    }

Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/InvalidationInterceptor.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -14,9 +14,9 @@
 import org.jboss.cache.marshall.MethodCallFactory;
 import org.jboss.cache.marshall.MethodDeclarations;
 import org.jboss.cache.optimistic.DataVersion;
+import org.jboss.cache.optimistic.DefaultDataVersion;
 import org.jboss.cache.optimistic.TransactionWorkspace;
 import org.jboss.cache.optimistic.WorkspaceNode;
-import org.jboss.cache.optimistic.DefaultDataVersion;
 import org.jboss.cache.transaction.GlobalTransaction;
 import org.jboss.cache.transaction.OptimisticTransactionEntry;
 import org.jboss.cache.transaction.TransactionEntry;
@@ -112,22 +112,24 @@
             {
 
                case MethodDeclarations.prepareMethod_id:
-                  log.debug("Entering InvalidationInterceptor's prepare phase");
-                  // fetch the modifications before the transaction is committed (and thus removed from the txTable)
-                  gtx = ctx.getGlobalTransaction();
-                  entry = txTable.get(gtx);
-                  if (entry == null) throw new IllegalStateException("cannot find transaction entry for " + gtx);
-                  modifications = new LinkedList<MethodCall>(entry.getModifications());
+                  if (!optimistic)
+                  {
+                     log.debug("Entering InvalidationInterceptor's prepare phase");
+                     // fetch the modifications before the transaction is committed (and thus removed from the txTable)
+                     gtx = ctx.getGlobalTransaction();
+                     entry = txTable.get(gtx);
+                     if (entry == null) throw new IllegalStateException("cannot find transaction entry for " + gtx);
+                     modifications = new LinkedList<MethodCall>(entry.getModifications());
 
-                  if (modifications.size() > 0)
-                  {
-                     broadcastInvalidate(modifications, gtx, tx, ctx);
+                     if (modifications.size() > 0)
+                     {
+                        broadcastInvalidate(modifications, gtx, tx, ctx);
+                     }
+                     else
+                     {
+                        log.debug("Nothing to invalidate - no modifications in the transaction.");
+                     }
                   }
-                  else
-                  {
-                     log.debug("Nothing to invalidate - no modifications in the transaction.");
-                  }
-
                   break;
                case MethodDeclarations.optimisticPrepareMethod_id:
                   // here we just record the modifications but actually do the invalidate in commit.

Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticCreateIfNotExistsInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticCreateIfNotExistsInterceptor.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticCreateIfNotExistsInterceptor.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -10,7 +10,6 @@
 import org.jboss.cache.CacheSPI;
 import org.jboss.cache.Fqn;
 import org.jboss.cache.InvocationContext;
-import org.jboss.cache.Node;
 import org.jboss.cache.NodeFactory;
 import org.jboss.cache.NodeSPI;
 import org.jboss.cache.marshall.MethodCall;
@@ -77,7 +76,7 @@
       fqns.add(newParent);
 
       //  peek into Node and get a hold of all child fqns as these need to be in the workspace.
-      NodeSPI node = cache.peek(nodeFqn, true);
+      NodeSPI node = cache.peek(nodeFqn, true, true);
       greedyGetFqns(fqns, node, newParent);
 
 
@@ -222,6 +221,10 @@
                      log.trace("Child node " + currentNode.getFqn() + " doesn't exist in workspace or has been deleted.  Adding to workspace in gtx " + gtx);
 
                   workspaceNode = nodeFactory.createWorkspaceNode(currentNode, workspace);
+
+                  // if the underlying node is a tombstone then mark the workspace node as newly created
+                  if (!currentNode.isValid()) workspaceNode.markAsCreated();
+
                   if (isTargetFqn && !workspace.isVersioningImplicit())
                   {
                      workspaceNode.setVersion(version);

Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticNodeInterceptor.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -63,12 +63,12 @@
          GlobalTransaction gtx = getGlobalTransaction(ctx);
          TransactionWorkspace workspace = getTransactionWorkspace(gtx);
          Fqn fqn = getFqn(args, m.getMethodId());
-         WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, true);
+         WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, true, true);
 
          // in the case of a data gravitation cleanup, if the primary Fqn does not exist the backup one may.
          if (workspaceNode == null && m.getMethodId() == MethodDeclarations.dataGravitationCleanupMethod_id)
          {
-            workspaceNode = fetchWorkspaceNode(getBackupFqn(args), workspace, true);
+            workspaceNode = fetchWorkspaceNode(getBackupFqn(args), workspace, true, true);
          }
 
          if (workspaceNode != null)
@@ -232,7 +232,7 @@
          return;
       }
 
-      WorkspaceNode oldParent = fetchWorkspaceNode(nodeFqn.getParent(), ws, false);
+      WorkspaceNode oldParent = fetchWorkspaceNode(nodeFqn.getParent(), ws, false, true);
       if (oldParent == null) throw new NodeNotExistsException("Node " + nodeFqn.getParent() + " does not exist!");
       
       if (parentFqn.equals(oldParent.getFqn()))
@@ -241,7 +241,7 @@
          return;
       }
       // retrieve parent
-      WorkspaceNode parent = fetchWorkspaceNode(parentFqn, ws, false);
+      WorkspaceNode parent = fetchWorkspaceNode(parentFqn, ws, false, true);
       if (parent == null) throw new NodeNotExistsException("Node " + parentFqn + " does not exist!");
 
       Object nodeName = nodeFqn.getLastElement();
@@ -275,13 +275,13 @@
    private void recursiveMoveNode(WorkspaceNode node, Fqn newBase, TransactionWorkspace ws)
    {
       Fqn newFqn = new Fqn(newBase, node.getFqn().getLastElement());
-      WorkspaceNode movedNode = fetchWorkspaceNode(newFqn, ws, true);
+      WorkspaceNode movedNode = fetchWorkspaceNode(newFqn, ws, true, true);
       movedNode.putAll(node.getData());
 
       // process children
       for (Object n : node.getChildrenNames())
       {
-         WorkspaceNode child = fetchWorkspaceNode(new Fqn(node.getFqn(), n), ws, false);
+         WorkspaceNode child = fetchWorkspaceNode(new Fqn(node.getFqn(), n), ws, false, true);
          if (child != null) recursiveMoveNode(child, newFqn, ws);
       }
    }
@@ -323,7 +323,7 @@
       if (workspaceNode == null) return false;
 
       Fqn parentFqn = workspaceNode.getFqn().getParent();
-      WorkspaceNode parentNode = fetchWorkspaceNode(parentFqn, workspace, true);
+      WorkspaceNode parentNode = fetchWorkspaceNode(parentFqn, workspace, true, true);
       if (parentNode == null) throw new NodeNotExistsException("Unable to find parent node with fqn " + parentFqn);
 
       // pre-notify
@@ -390,7 +390,7 @@
    {
       Fqn fqn = (Fqn) args[0];
       Object key = args[1];
-      WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false);
+      WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false, false);
 
       if (workspaceNode == null)
       {
@@ -412,7 +412,7 @@
    {
       Fqn fqn = (Fqn) args[0];
 
-      WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false);
+      WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false, false);
 
       if (workspaceNode == null)
       {
@@ -437,7 +437,7 @@
    {
       Fqn fqn = (Fqn) args[0];
 
-      WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false);
+      WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false, false);
 
       if (workspaceNode == null)
       {
@@ -458,7 +458,7 @@
    {
       Fqn fqn = (Fqn) args[0];
 
-      WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false);
+      WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false, false);
 
       if (workspaceNode == null)
       {
@@ -479,7 +479,7 @@
    {
       Fqn fqn = (Fqn) args[0];
 
-      WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false);
+      WorkspaceNode workspaceNode = fetchWorkspaceNode(fqn, workspace, false, false);
 
       if (workspaceNode == null)
       {
@@ -513,15 +513,16 @@
     * @param fqn                 Fqn of the node to retrieve
     * @param workspace           transaction workspace to look in
     * @param undeleteIfNecessary if the node is in the workspace but marked as deleted, this meth
+    * @param includeInvalidNodes
     * @return a node, if found, or null if not.
     */
-   private WorkspaceNode fetchWorkspaceNode(Fqn fqn, TransactionWorkspace workspace, boolean undeleteIfNecessary)
+   private WorkspaceNode fetchWorkspaceNode(Fqn fqn, TransactionWorkspace workspace, boolean undeleteIfNecessary, boolean includeInvalidNodes)
    {
       WorkspaceNode workspaceNode = workspace.getNode(fqn);
       // if we do not have the node then we need to add it to the workspace
       if (workspaceNode == null)
       {
-         NodeSPI node = cache.peek(fqn, true);
+         NodeSPI node = cache.peek(fqn, true, includeInvalidNodes);
          if (node == null) return null;
 
          // create new workspace node based on the node from the underlying data structure
@@ -539,7 +540,7 @@
          {
             workspaceNode.markAsDeleted(false);
             // re-add to parent
-            WorkspaceNode parent = fetchWorkspaceNode(fqn.getParent(), workspace, true);
+            WorkspaceNode parent = fetchWorkspaceNode(fqn.getParent(), workspace, true, includeInvalidNodes);
             parent.addChild(workspaceNode);
          }
          else

Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticValidatorInterceptor.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -7,9 +7,11 @@
 package org.jboss.cache.interceptors;
 
 import org.jboss.cache.CacheException;
+import org.jboss.cache.CacheSPI;
 import org.jboss.cache.Fqn;
 import org.jboss.cache.InvocationContext;
 import org.jboss.cache.NodeSPI;
+import static org.jboss.cache.config.Configuration.CacheMode;
 import org.jboss.cache.marshall.MethodCall;
 import org.jboss.cache.marshall.MethodDeclarations;
 import org.jboss.cache.optimistic.DataVersioningException;
@@ -44,6 +46,15 @@
  */
 public class OptimisticValidatorInterceptor extends OptimisticInterceptor
 {
+   private boolean useTombstones;
+
+   public void setCache(CacheSPI cache)
+   {
+      super.setCache(cache);
+      CacheMode mode = cache.getConfiguration().getCacheMode();
+      useTombstones = (mode == CacheMode.INVALIDATION_ASYNC) || (mode == CacheMode.INVALIDATION_SYNC);
+   }
+
    public Object invoke(InvocationContext ctx) throws Throwable
    {
       MethodCall m = ctx.getMethodCall();
@@ -93,7 +104,7 @@
             if (trace) log.trace("Validating version for node [" + fqn + "]");
 
             NodeSPI underlyingNode;
-            underlyingNode = cache.peek(fqn, true);
+            underlyingNode = cache.peek(fqn, true, true);
 
             // if this is a newly created node then we expect the underlying node to be null.
             // also, if the node has been deleted in the WS and the underlying node is null, this *may* be ok ... will test again later when comparing versions
@@ -104,11 +115,22 @@
             }
 
             // needs to have been created AND modified - we allow concurrent creation if no data is put into the node
-            if (underlyingNode != null && workspaceNode.isCreated() && workspaceNode.isModified())
+            if (underlyingNode != null && underlyingNode.isValid() && workspaceNode.isCreated() && workspaceNode.isModified())
             {
                throw new DataVersioningException("Transaction attempted to create " + fqn + " anew.  It has already been created since this transaction started, by another (possibly remote) transaction.  We have a concurrent creation event.");
             }
 
+            if (underlyingNode != null && !underlyingNode.isValid())
+            {
+               // we havea  tombstone
+               if (!workspaceNode.isCreated()) throw new DataVersioningException("Underlying node doesn't exist but a tombstone does; workspace node should be marked as created!");
+               if (underlyingNode.getVersion().newerThan(workspaceNode.getVersion()))
+               {
+                  // we have an out of date node here
+                  throw new DataVersioningException("Version mismatch for node " + fqn + ": underlying node with version " + workspaceNode.getNode().getVersion() + " is newer than workspace node, with version " + workspaceNode.getVersion());
+               }
+            }
+
             if (!workspaceNode.isCreated() && (workspaceNode.isDeleted() || workspaceNode.isModified()))
             {
                // if the real node is null, throw a DVE
@@ -170,13 +192,22 @@
             }
             else
             {
-               NodeSPI parent = underlyingNode.getParent();
-               if (parent == null)
+               // mark it as invalid so any direct references are marked as such
+               underlyingNode.setValid(false, true);
+               // we need to update versions here, too
+               performVersionUpdate(underlyingNode, workspaceNode);
+               
+               if (!useTombstones)
                {
-                  throw new CacheException("Underlying node " + underlyingNode + " has no parent");
+                  // don't retain the tombstone
+                  NodeSPI parent = underlyingNode.getParent();
+                  if (parent == null)
+                  {
+                     throw new CacheException("Underlying node " + underlyingNode + " has no parent");
+                  }
+
+                  parent.removeChildDirect(underlyingNode.getFqn().getLastElement());                  
                }
-
-               parent.removeChildDirect(underlyingNode.getFqn().getLastElement());
             }
          }
          else
@@ -197,7 +228,14 @@
 
                for (Fqn child : deltas.get(1))
                {
-                  underlyingNode.removeChildDirect(child.getLastElement());
+                  // mark it as invalid so any direct references are marked as such
+                  underlyingNode.getChildDirect(child.getLastElement()).setValid(false, true);
+
+                  if (!useTombstones)
+                  {
+                     // don't retain the tombstone
+                     underlyingNode.removeChildDirect(child.getLastElement());
+                  }
                }
 
                updateVersion = underlyingNode.isLockForChildInsertRemove();
@@ -211,35 +249,36 @@
                Map mergedData = workspaceNode.getMergedData();
                underlyingNode.clearDataDirect();
                underlyingNode.putAllDirect(mergedData);
+               underlyingNode.setValid(true, false);
                updateVersion = true;
             }
 
             if (updateVersion)
             {
-               if (workspaceNode.isVersioningImplicit())
-               {
-                  if (trace) log.trace("Versioning is implicit; incrementing.");
-                  underlyingNode.setVersion(((DefaultDataVersion) workspaceNode.getVersion()).increment());
-               }
-               else
-               {
-                  if (trace) log.trace("Versioning is explicit; not attempting an increment.");
-                  underlyingNode.setVersion(workspaceNode.getVersion());
-               }
-
-               if (trace)
-                  log.trace("Setting version of node " + underlyingNode.getFqn() + " from " + workspaceNode.getVersion() + " to " + underlyingNode.getVersion());
+               performVersionUpdate(underlyingNode, workspaceNode);
             }
-            else
-            {
-               if (trace)
-                  log.trace("Version update on " + workspaceNode.getFqn() + " not necessary since the node is not dirty or LockParentForChildInsertRemove is set to false");
-            }
          }
       }
 
    }
 
+   private void performVersionUpdate(NodeSPI underlyingNode, WorkspaceNode workspaceNode)
+   {
+      if (workspaceNode.isVersioningImplicit())
+      {
+         if (trace) log.trace("Versioning is implicit; incrementing.");
+         underlyingNode.setVersion(((DefaultDataVersion) workspaceNode.getVersion()).increment());
+      }
+      else
+      {
+         if (trace) log.trace("Versioning is explicit; not attempting an increment.");
+         underlyingNode.setVersion(workspaceNode.getVersion());
+      }
+
+      if (trace)
+         log.trace("Setting version of node " + underlyingNode.getFqn() + " from " + workspaceNode.getVersion() + " to " + underlyingNode.getVersion());
+   }
+
    private void rollBack(GlobalTransaction gtx)
    {
       TransactionWorkspace workspace;

Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedOptNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedOptNodeValidityTest.java	                        (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedOptNodeValidityTest.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,99 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.jboss.cache.CacheImpl;
+import org.jboss.cache.NodeSPI;
+import org.jboss.cache.optimistic.DefaultDataVersion;
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik at jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+ at Test(groups = {"functional"})
+public class InvalidatedOptNodeValidityTest extends InvalidatedPessNodeValidityTest
+{
+   public InvalidatedOptNodeValidityTest()
+   {
+      optimistic = true;
+   }
+
+   public void testTombstoneRevival()
+   {
+      modifier.put(parent, K, V);
+      modifier.removeNode(parent);
+
+      NodeSPI observerNode = (NodeSPI) observer.getRoot().getChild(parent);
+      assert observerNode == null : "Should be removed";
+
+      // now try a put on a with a newer data version; should work
+      modifier.getInvocationContext().getOptionOverrides().setDataVersion(new DefaultDataVersion(10));
+      modifier.put(parent, K, V);
+
+      NodeSPI modifierNode = (NodeSPI) modifier.getRoot().getChild(parent);
+      assert modifierNode != null : "Should not be null";
+      assert modifierNode.isValid() : "No longer a tombstone";
+      assert ((DefaultDataVersion) modifierNode.getVersion()).getRawVersion() == 10 : "Version should be updated";
+
+      observerNode = (NodeSPI) observer.getRoot().getChild(parent);
+      assert observerNode != null : "Should not be null";
+      assert observerNode.isValid() : "No longer a tombstone";
+      assert ((DefaultDataVersion) observerNode.getVersion()).getRawVersion() == 10 : "Version should be updated";
+   }
+
+   public void testTombstoneVersioningFailure() throws Exception
+   {
+      CacheImpl modifierImpl = (CacheImpl) modifier;
+      CacheImpl observerImpl = (CacheImpl) observer;
+
+      modifier.put(parent, K, V);
+
+      // test that this exists in the (shared) loader
+      assert loader.get(parent) != null;
+      assert loader.get(parent).size() > 0;
+
+      modifier.removeNode(parent);
+
+      // assert that tombstones exist on both instances
+      assert modifierImpl.peek(parent, true, true) != null;
+      assert observerImpl.peek(parent, true, true) != null;
+      assert modifierImpl.peek(parent, false, false) == null;
+      assert observerImpl.peek(parent, false, false) == null;
+
+      // make sure this does not exist in the loader; since it HAS been removed
+      assert loader.get(parent) == null;
+
+      NodeSPI observerNode = (NodeSPI) observer.getRoot().getChild(parent);
+      assert observerNode == null : "Should be removed";
+
+      // now try a put on a with a newer data version; should work
+      modifier.getInvocationContext().getOptionOverrides().setDataVersion(new DefaultDataVersion(1));
+      try
+      {
+         modifier.put(parent, K, V);
+         assert false : "Should have barfed!";
+      }
+      catch (RuntimeException expected)
+      {
+
+      }
+
+      NodeSPI modifierNode = (NodeSPI) modifier.getRoot().getChild(parent);
+      assert modifierNode == null : "Should be null";
+
+      observerNode = (NodeSPI) observer.getRoot().getChild(parent);
+      assert observerNode == null : "Should be null";
+
+      NodeSPI modifierTombstone = modifierImpl.peek(parent, true, true);
+      NodeSPI observerTombstone = observerImpl.peek(parent, true, true);
+
+      assert modifierTombstone != null : "Tombstone should still exist";
+      assert observerTombstone != null : "Tombstone should still exist";
+
+      assert !modifierTombstone.isValid() : "Should not be valid";
+      assert !observerTombstone.isValid() : "Should not be valid";
+
+      assert ((DefaultDataVersion) modifierTombstone.getVersion()).getRawVersion() == 2 : "Should retain versioning";
+      assert ((DefaultDataVersion) observerTombstone.getVersion()).getRawVersion() == 2 : "Should retain versioning";
+   }
+}
\ No newline at end of file

Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedPessNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedPessNodeValidityTest.java	                        (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/InvalidatedPessNodeValidityTest.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,65 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.jboss.cache.Cache;
+import org.jboss.cache.CacheFactory;
+import org.jboss.cache.CacheImpl;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.config.CacheLoaderConfig;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.loader.DummyInMemoryCacheLoader;
+import org.jboss.cache.loader.DummySharedInMemoryCacheLoader;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik at jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+ at Test(groups = {"functional"})
+public class InvalidatedPessNodeValidityTest extends NodeValidityTestBase
+{
+   protected DummyInMemoryCacheLoader loader;
+
+   public InvalidatedPessNodeValidityTest()
+   {
+      invalidation = true;
+   }
+
+   protected Cache<String, String> createObserver()
+   {
+      return newCache();
+   }
+
+   protected Cache<String, String> createModifier()
+   {
+      return newCache();
+   }
+
+   @AfterMethod
+   public void emptyCacheLoader()
+   {
+      if (loader != null) loader.wipe();
+   }
+
+   protected Cache<String, String> newCache()
+   {
+      CacheFactory<String, String> f = DefaultCacheFactory.getInstance();
+      Cache<String, String> cache = f.createCache(false);
+      cache.getConfiguration().setCacheMode(Configuration.CacheMode.INVALIDATION_SYNC);
+      optimisticConfiguration(cache.getConfiguration());
+
+      // need a cache loader as a shared data source between the 2 instances
+      CacheLoaderConfig.IndividualCacheLoaderConfig iclc = new CacheLoaderConfig.IndividualCacheLoaderConfig();
+      iclc.setClassName(DummySharedInMemoryCacheLoader.class.getName());
+      CacheLoaderConfig clc = new CacheLoaderConfig();
+      clc.addIndividualCacheLoaderConfig(iclc);
+      cache.getConfiguration().setCacheLoaderConfig(clc);
+
+      cache.start();
+
+      loader = (DummyInMemoryCacheLoader) ((CacheImpl) cache).getCacheLoader();
+
+      return cache;
+   }
+}

Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalOptNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalOptNodeValidityTest.java	                        (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalOptNodeValidityTest.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,17 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik at jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+ at Test(groups = {"functional"})
+public class LocalOptNodeValidityTest extends LocalPessNodeValidityTest
+{
+   public LocalOptNodeValidityTest()
+   {
+      optimistic = true;      
+   }
+}
\ No newline at end of file

Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalPessNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalPessNodeValidityTest.java	                        (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/LocalPessNodeValidityTest.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,50 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.jboss.cache.Cache;
+import org.jboss.cache.CacheFactory;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.misc.TestingUtil;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik at jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+ at Test(groups = {"functional"})
+public class LocalPessNodeValidityTest extends NodeValidityTestBase
+{
+   private Cache<String, String> cache;
+
+   public LocalPessNodeValidityTest()
+   {
+      clustered = false;
+   }
+
+   @AfterMethod
+   public void tearDown()
+   {
+      super.tearDown();
+      TestingUtil.killCaches(cache);
+      cache = null;
+   }
+
+   protected Cache<String, String> createObserver()
+   {
+      return createModifier();
+   }
+
+   protected Cache<String, String> createModifier()
+   {
+      if (cache == null)
+      {
+         CacheFactory<String, String> f = DefaultCacheFactory.getInstance();
+         cache = f.createCache(false);
+         optimisticConfiguration(cache.getConfiguration());
+         cache.start();
+         return cache;
+      }
+      return cache;
+   }
+}

Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/NodeValidityTestBase.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/NodeValidityTestBase.java	                        (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/NodeValidityTestBase.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,366 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.jboss.cache.Cache;
+import org.jboss.cache.CacheImpl;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.Node;
+import org.jboss.cache.NodeNotValidException;
+import org.jboss.cache.NodeSPI;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.misc.TestingUtil;
+import org.jboss.cache.optimistic.DefaultDataVersion;
+import org.jboss.cache.transaction.DummyTransactionManagerLookup;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import java.util.Collections;
+
+/**
+ * exercises the isValid() api call on node.
+ *
+ * @author <a href="mailto:manik at jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+ at Test(groups = {"functional"})
+public abstract class NodeValidityTestBase
+{
+   protected boolean optimistic;
+
+   // needed to attach a blockUntilViewsReceived in setup
+   protected boolean clustered = true;
+
+   // needed to test tombstones
+   protected boolean invalidation = false;
+
+   protected Cache<String, String> observer;
+   protected Cache<String, String> modifier;
+   protected Fqn parent = Fqn.fromString("/parent");
+   protected Fqn child = Fqn.fromString("/parent/child");
+   protected String K="k", V="v";
+
+   protected abstract Cache<String, String> createObserver();
+   protected abstract Cache<String, String> createModifier();
+
+   protected void optimisticConfiguration(Configuration c)
+   {
+      if (optimistic)
+      {
+         c.setNodeLockingScheme(Configuration.NodeLockingScheme.OPTIMISTIC);
+         c.setTransactionManagerLookupClass(DummyTransactionManagerLookup.class.getName());
+         c.setSyncCommitPhase(true);
+         c.setSyncRollbackPhase(true);
+      }
+   }
+
+   @BeforeMethod
+   public void setUp()
+   {
+      observer = createObserver();
+      modifier = createModifier();
+      if (clustered) TestingUtil.blockUntilViewsReceived(60000, observer, modifier);
+   }
+
+   @AfterMethod
+   public void tearDown()
+   {
+      TestingUtil.killCaches(observer, modifier);
+      observer = null;
+      modifier = null;
+   }
+
+   public void testRemoval()
+   {
+//      observer.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+      observer.put(parent, K, V);
+
+      Node<String, String> obsNode = observer.getRoot().getChild(parent);
+
+      assert obsNode.get(K).equals(V) : "Data should be in the node.";
+      assert obsNode.isValid() : "Node should be valid";
+
+      modifier.removeNode(parent);
+
+      assert !obsNode.isValid() : "Should no longer be valid";
+   }
+
+   public void testRemovalWithChildren()
+   {
+//      observer.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+      observer.put(child, K, V);
+
+      Node<String, String> obsParentNode = observer.getRoot().getChild(parent);
+      Node<String, String> obsChildNode = observer.getRoot().getChild(child);
+
+      assert obsChildNode.get(K).equals(V) : "Data should be in the node.";
+      assert obsChildNode.isValid() : "Node should be valid";
+      assert obsParentNode.isValid() : "Node should be valid";
+
+      modifier.removeNode(parent);
+
+      assert !obsParentNode.isValid() : "Should no longer be valid";
+      assert !obsChildNode.isValid() : "Should no longer be valid";
+   }
+
+   public void testMove()
+   {
+      Fqn newParent = Fqn.fromString("/newParent/parent");
+
+      //observer.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+      observer.put(parent, K, V);
+
+      Node<String, String> obsNode = observer.getRoot().getChild(parent);
+
+      assert obsNode.get(K).equals(V) : "Data should be in the node.";
+      assert obsNode.isValid() : "Node should be valid";
+
+      // new parent needs to exist first.
+      modifier.getRoot().addChild(newParent);      
+      modifier.move(parent, newParent.getParent());
+
+      // the old node is only marked as invalid if we use opt locking
+      // with pess locking we directly move the node reference so the old ref is still valid, EVEN if the move happens
+      // remotely.
+      if (optimistic) assert !obsNode.isValid() : "Should no longer be valid";
+
+      assert observer.getRoot().getChild(newParent).isValid() : "Should be valid";
+   }
+
+   public void testMoveWithChildren()
+   {
+      Fqn newParent = Fqn.fromString("/newParent/parent");
+      Fqn newChild = Fqn.fromString("/newParent/parent/child");
+
+//      observer.getInvocationContext().getOptionOverrides().setCacheModeLocal(true);
+      observer.put(child, K, V);
+
+      Node<String, String> obsParentNode = observer.getRoot().getChild(parent);
+      Node<String, String> obsChildNode = observer.getRoot().getChild(child);
+
+      assert obsChildNode.get(K).equals(V) : "Data should be in the node.";
+      assert obsChildNode.isValid() : "Node should be valid";
+      assert obsParentNode.isValid() : "Node should be valid";
+
+      // new parent needs to exist first.
+      modifier.getRoot().addChild(newParent);
+      modifier.move(parent, newParent.getParent());
+
+      // the old node is only marked as invalid if we use opt locking
+      // with pess locking we directly move the node reference so the old ref is still valid.
+      if (optimistic)
+      {
+         assert !obsParentNode.isValid() : "Should no longer be valid";
+         assert !obsChildNode.isValid() : "Should no longer be valid";
+      }
+
+      assert observer.getRoot().getChild(newParent).isValid() : "Should be valid";
+      assert observer.getRoot().getChild(newChild).isValid() : "Should be valid";
+   }
+
+   public void testEvict()
+   {
+      // eviction should NOT affect validity
+      observer.put(parent, K, V);
+      Node<String, String> obsNode = observer.getRoot().getChild(parent);
+
+      assert obsNode.get(K).equals(V) : "Data should be in the node.";
+      assert obsNode.isValid() : "Node should be valid";
+
+      // eviction needs to happen on the same cache being watched
+      observer.evict(parent, false);
+
+      assert obsNode.isValid() : "Node should be valid";
+   }
+
+   public void testOperationsOnInvalidNode()
+   {
+      observer.put(parent, K, V);
+      Node<String, String> obsNode = observer.getRoot().getChild(parent);
+
+      assert obsNode.get(K).equals(V) : "Data should be in the node.";
+      assert obsNode.isValid() : "Node should be valid";
+
+      modifier.removeNode(parent);
+
+      assert !obsNode.isValid() : "Node should not be valid";
+
+      // all operations on the cached node should throw a NodeNotValidException
+
+      try
+      {
+         obsNode.get(K);
+         assert false : "Should fail";
+      }
+      catch (NodeNotValidException good)
+      {
+         // do nothing
+      }
+
+      try
+      {
+         obsNode.put(K, "v2");
+         assert false : "Should fail";
+      }
+      catch (NodeNotValidException good)
+      {
+         // do nothing
+      }
+
+      try
+      {
+         obsNode.remove(K);
+         assert false : "Should fail";
+      }
+      catch (NodeNotValidException good)
+      {
+         // do nothing
+      }
+
+      try
+      {
+         obsNode.clearData();
+         assert false : "Should fail";
+      }
+      catch (NodeNotValidException good)
+      {
+         // do nothing
+      }
+
+      try
+      {
+         obsNode.putAll(Collections.singletonMap(K, "v2"));
+         assert false : "Should fail";
+      }
+      catch (NodeNotValidException good)
+      {
+         // do nothing
+      }
+
+      try
+      {
+         obsNode.getKeys();
+         assert false : "Should fail";
+      }
+      catch (NodeNotValidException good)
+      {
+         // do nothing
+      }
+
+      try
+      {
+         obsNode.hasChild("Something");
+         assert false : "Should fail";
+      }
+      catch (NodeNotValidException good)
+      {
+         // do nothing
+      }
+
+      try
+      {
+         obsNode.removeChild("Something");
+         assert false : "Should fail";
+      }
+      catch (NodeNotValidException good)
+      {
+         // do nothing
+      }
+
+      try
+      {
+         obsNode.addChild(child);
+         assert false : "Should fail";
+      }
+      catch (NodeNotValidException good)
+      {
+         // do nothing
+      }
+
+      try
+      {
+         obsNode.getChildrenNames();
+         assert false : "Should fail";
+      }
+      catch (NodeNotValidException good)
+      {
+         // do nothing
+      }
+   }
+
+   public void testExistenceOfTombstones()
+   {
+      CacheImpl modifierImpl = (CacheImpl) modifier;
+      CacheImpl observerImpl = (CacheImpl) observer;
+
+      modifier.put(parent, K, V);
+      modifier.removeNode(parent);
+
+      if (optimistic && invalidation)
+      {
+         // if we are using optimistic invalidation then we should see tombstones.  NOT otherwise.
+         NodeSPI modifierTombstone = modifierImpl.peek(parent, true, true);
+         NodeSPI observerTombstone = observerImpl.peek(parent, true, true);
+
+         assert modifierTombstone != null : "Modifier tombstone should not be null";
+         assert observerTombstone != null : "Observer tombstone should not be null";
+
+         assert !modifierTombstone.isValid() : "Should not be valid";
+         assert !observerTombstone.isValid() : "Should not be valid";
+
+         assert ((DefaultDataVersion) modifierTombstone.getVersion()).getRawVersion() == 2 : "Tombstone should be versioned";
+         assert ((DefaultDataVersion) observerTombstone.getVersion()).getRawVersion() == 2 : "Tombstone should be versioned";
+
+      }
+      else
+      {
+         // if we are using pess locking there should be NO tombstones, regardless of replication/invalidation!
+         assert modifierImpl.peek(parent, true, true) == null : "Tombstone should not exist";
+         assert observerImpl.peek(parent, true, true) == null : "Tombstone should not exist";
+      }
+   }
+
+   public void testExistenceOfTombstonesWithChildren()
+   {
+      CacheImpl modifierImpl = (CacheImpl) modifier;
+      CacheImpl observerImpl = (CacheImpl) observer;
+
+      modifier.put(child, K, V);
+      modifier.removeNode(parent);
+
+      if (optimistic && invalidation)
+      {
+         // if we are using optimistic invalidation then we should see tombstones.  NOT otherwise.
+         NodeSPI modifierParentTombstone = modifierImpl.peek(parent, true, true);
+         NodeSPI observerParentTombstone = observerImpl.peek(parent, true, true);
+         NodeSPI modifierChildTombstone = modifierImpl.peek(child, true, true);
+         NodeSPI observerChildTombstone = observerImpl.peek(child, true, true);
+
+         assert modifierParentTombstone != null : "Modifier parent tombstone should not be null";
+         assert observerParentTombstone != null : "Observer parent tombstone should not be null";
+         assert modifierChildTombstone != null : "Modifier child tombstone should not be null";
+         assert observerChildTombstone != null : "Observer child tombstone should not be null";
+
+         assert !modifierParentTombstone.isValid() : "Should not be valid";
+         assert !observerParentTombstone.isValid() : "Should not be valid";
+         assert !modifierChildTombstone.isValid() : "Should not be valid";
+         assert !observerChildTombstone.isValid() : "Should not be valid";
+
+         assert ((DefaultDataVersion) modifierParentTombstone.getVersion()).getRawVersion() == 1 : "Tombstone should be versioned";
+         assert ((DefaultDataVersion) observerParentTombstone.getVersion()).getRawVersion() == 1 : "Tombstone should be versioned";
+
+         // note that versions on children cannot be incremented/updated since the remove operation was
+         // performed on the parent.
+         assert ((DefaultDataVersion) modifierChildTombstone.getVersion()).getRawVersion() == 1 : "Tombstone should be versioned";
+         assert ((DefaultDataVersion) observerChildTombstone.getVersion()).getRawVersion() == 1 : "Tombstone should be versioned";
+
+      }
+      else
+      {
+         // if we are using pess locking there should be NO tombstones, regardless of replication/invalidation!
+         assert modifierImpl.peek(parent, true, true) == null : "Tombstone should not exist";
+         assert observerImpl.peek(parent, true, true) == null : "Tombstone should not exist";
+         assert modifierImpl.peek(child, true, true) == null : "Tombstone should not exist";
+         assert observerImpl.peek(child, true, true) == null : "Tombstone should not exist";
+      }
+   }
+}

Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedOptNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedOptNodeValidityTest.java	                        (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedOptNodeValidityTest.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,17 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik at jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+ at Test(groups = {"functional"})
+public class ReplicatedOptNodeValidityTest extends ReplicatedPessNodeValidityTest
+{
+   public ReplicatedOptNodeValidityTest()
+   {
+      optimistic = true;
+   }
+}
\ No newline at end of file

Added: core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedPessNodeValidityTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedPessNodeValidityTest.java	                        (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/nodevalidity/ReplicatedPessNodeValidityTest.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,36 @@
+package org.jboss.cache.api.nodevalidity;
+
+import org.jboss.cache.Cache;
+import org.jboss.cache.CacheFactory;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.config.Configuration;
+import org.testng.annotations.Test;
+
+/**
+ *
+ * @author <a href="mailto:manik at jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+ at Test(groups = {"functional"})
+public class ReplicatedPessNodeValidityTest extends NodeValidityTestBase
+{
+   protected Cache<String, String> createObserver()
+   {
+      return newCache();
+   }
+
+   protected Cache<String, String> createModifier()
+   {
+      return newCache();
+   }
+
+   protected Cache<String, String> newCache()
+   {
+      CacheFactory<String, String> f = DefaultCacheFactory.getInstance();
+      Cache<String, String> cache = f.createCache(false);
+      cache.getConfiguration().setCacheMode(Configuration.CacheMode.REPL_SYNC);
+      optimisticConfiguration(cache.getConfiguration());
+      cache.start();
+      return cache;
+   }
+}

Added: core/trunk/src/test/java/org/jboss/cache/invalidation/TombstoneEvictionTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/invalidation/TombstoneEvictionTest.java	                        (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/invalidation/TombstoneEvictionTest.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -0,0 +1,131 @@
+package org.jboss.cache.invalidation;
+
+import org.jboss.cache.CacheImpl;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.config.EvictionConfig;
+import org.jboss.cache.config.EvictionRegionConfig;
+import org.jboss.cache.eviction.FIFOConfiguration;
+import org.jboss.cache.misc.TestingUtil;
+import org.jboss.cache.transaction.DummyTransactionManagerLookup;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Make sure tombstones are evicted
+ *
+ * @author <a href="mailto:manik at jboss.org">Manik Surtani</a>
+ * @since 2.1.0
+ */
+ at Test (groups = {"functional"})
+public class TombstoneEvictionTest
+{
+   private CacheImpl c1, c2;
+   private Fqn fqn = Fqn.fromString("/data/test");
+   private Fqn dummy = Fqn.fromString("/data/dummy");
+   private long evictionWaitTime = 2100;
+
+   @BeforeMethod
+   public void setUp() throws Exception
+   {
+      c1 = (CacheImpl) DefaultCacheFactory.getInstance().createCache(false);
+      c2 = (CacheImpl) DefaultCacheFactory.getInstance().createCache(false);
+
+      // the FIFO policy cfg
+      FIFOConfiguration cfg = new FIFOConfiguration();
+      cfg.setMaxNodes(1);
+      cfg.setMinTimeToLiveSeconds(0);
+
+      // the region configuration
+      EvictionRegionConfig regionCfg = new EvictionRegionConfig();
+      regionCfg.setRegionFqn(dummy.getParent());
+      regionCfg.setRegionName(dummy.getParent().toString());
+      regionCfg.setEvictionPolicyConfig(cfg);
+
+      // set regions in a list
+      List<EvictionRegionConfig> evictionRegionConfigs = Collections.singletonList(regionCfg);
+
+
+      EvictionConfig ec = new EvictionConfig();
+      ec.setWakeupIntervalSeconds(1);
+      ec.setEvictionRegionConfigs(evictionRegionConfigs);
+
+      c1.getConfiguration().setCacheMode(Configuration.CacheMode.INVALIDATION_SYNC);
+      c1.getConfiguration().setNodeLockingScheme(Configuration.NodeLockingScheme.OPTIMISTIC);
+      c1.getConfiguration().setTransactionManagerLookupClass(DummyTransactionManagerLookup.class.getName());
+      c1.getConfiguration().setEvictionConfig(ec);
+
+      c2.setConfiguration(c1.getConfiguration().clone());
+
+      c1.start();
+      c2.start();
+
+      TestingUtil.blockUntilViewsReceived(60000, c1, c2);
+   }
+
+   @AfterMethod
+   public void tearDown()
+   {
+      TestingUtil.killCaches(c1, c2);
+   }
+
+   public void testControl()
+   {
+      c1.put(fqn, "k", "v");
+      c1.put(dummy, "k", "v");
+
+      assert c1.peek(fqn, false, true) != null : "Node should exist";
+      assert c1.peek(dummy, false, true) != null : "Node should exist";
+
+      TestingUtil.sleepThread(evictionWaitTime);
+
+      assert c1.peek(fqn, false, true) == null : "Should have evicted";
+      assert c1.peek(dummy, false, true) != null : "Node should exist";
+   }
+
+   public void testWithInvalidationMarkers()
+   {
+      c1.put(fqn, "k", "v");
+      c1.put(dummy, "k", "v");
+
+      assert c1.peek(fqn, false, true) != null : "Node should exist";
+      assert c1.peek(dummy, false, true) != null : "Node should exist";
+
+      assert c2.peek(fqn, false, true) != null : "Node should exist";
+      assert c2.peek(dummy, false, true) != null : "Node should exist";
+
+      TestingUtil.sleepThread(evictionWaitTime);
+
+      assert c1.peek(fqn, false, true) == null : "Should have evicted";
+      assert c1.peek(dummy, false, true) != null : "Node should exist";
+
+      assert c2.peek(fqn, false, true) == null : "Should have evicted";
+      assert c2.peek(dummy, false, true) != null : "Node should exist";
+   }
+   
+   public void testWithTombstones()
+   {
+      c1.put(fqn, "k", "v");
+      c1.removeNode(fqn);
+      c1.put(dummy, "k", "v");
+
+      assert c1.peek(fqn, false, true) != null : "Node should exist";
+      assert c1.peek(dummy, false, true) != null : "Node should exist";
+
+      assert c2.peek(fqn, false, true) != null : "Node should exist";
+      assert c2.peek(dummy, false, true) != null : "Node should exist";
+
+      TestingUtil.sleepThread(evictionWaitTime);
+
+      assert c1.peek(fqn, false, true) == null : "Should have evicted";
+      assert c1.peek(dummy, false, true) != null : "Node should exist";
+
+      assert c2.peek(fqn, false, true) == null : "Should have evicted";
+      assert c2.peek(dummy, false, true) != null : "Node should exist";
+   }
+}

Modified: core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/test/java/org/jboss/cache/misc/TestingUtil.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -7,16 +7,18 @@
 
 package org.jboss.cache.misc;
 
-import java.io.File;
-import java.util.List;
-import java.util.Random;
-
 import org.jboss.cache.Cache;
 import org.jboss.cache.CacheImpl;
 import org.jboss.cache.CacheSPI;
+import org.jboss.cache.CacheStatus;
+import org.jboss.cache.Fqn;
 import org.jboss.cache.interceptors.Interceptor;
 import org.jboss.cache.util.CachePrinter;
 
+import java.io.File;
+import java.util.List;
+import java.util.Random;
+
 /**
  * Utilities for unit testing JBossCache.
  *
@@ -374,4 +376,43 @@
       //System.out.println("File " + f.toURI() + " deleted = " + f.delete());
       f.delete();
    }
+
+   /**
+    * Kills a cache - stops it, clears any data in any cache loaders, and rolls back any associated txs
+    */
+   public static void killCaches(Cache... caches)
+   {
+      for (Cache c: caches)
+      {
+         if (c != null && c.getCacheStatus() == CacheStatus.STARTED)
+         {
+            CacheImpl ci = (CacheImpl) c;
+            if (ci.getTransactionManager() != null)
+            {
+               try
+               {
+                  ci.getTransactionManager().rollback();
+               }
+               catch (Exception e)
+               {
+                  // don't care
+               }
+            }
+
+            if (ci.getCacheLoader() != null)
+            {
+               try
+               {
+                  ci.getCacheLoader().remove(Fqn.ROOT);
+               }
+               catch (Exception e)
+               {
+                  // don't care
+               }
+            }
+
+            ci.destroy();
+         }
+      }
+   }
 }

Modified: core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionPersistenceTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionPersistenceTest.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionPersistenceTest.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -135,67 +135,6 @@
       assert n.getData().size() == 1;
    }
 
-   public void testStateTransferDefaultVersionAfterRemoval() throws Exception
-   {
-      Fqn f = Fqn.fromString("/one/two/three");
-      cache.put(f, "k", "v");
-      cache.put(f, "k1", "v1");
-      cache.removeNode(f);
-
-      NodeSPI n = (NodeSPI) cache.getRoot().getChild(f);
-      DataVersion dv = n.getVersion();
-
-      assert dv instanceof DefaultDataVersion : "Should be an instance of DefaultDataVersion";
-
-      assert ((DefaultDataVersion) dv).getRawVersion() == 3 : "Should have accurate data version";
-
-      // now restart cache instance
-      cache.stop();
-      cache.start();
-
-      assert cache.get(f, "k")== null : "Should be removed";
-
-      n = (NodeSPI) cache.getRoot().getChild(f);
-
-      dv = n.getVersion();
-
-      assert dv instanceof DefaultDataVersion : "Should be an instance of DefaultDataVersion";
-
-      assert ((DefaultDataVersion) dv).getRawVersion() == 3 : "Version should have persisted";
-   }
-
-   public void testStateTransferCustomVersionAfterRemoval() throws Exception
-   {
-      Fqn f = Fqn.fromString("/one/two/three");
-      cache.getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('A'));
-      cache.put(f, "k", "v");
-      cache.getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('B'));
-      cache.put(f, "k1", "v1");
-      cache.getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('C'));
-      cache.removeNode(f);
-
-      NodeSPI n = (NodeSPI) cache.getRoot().getChild(f);
-      DataVersion dv = n.getVersion();
-
-      assert dv instanceof CharVersion : "Should be an instance of CharVersion";
-
-      assert ((CharVersion) dv).version == 'C' : "Should have accurate data version";
-
-      // now restart cache instance
-      cache.stop();
-      cache.start();
-
-      assert cache.get(f, "k")== null : "Should be removed";
-
-      n = (NodeSPI) cache.getRoot().getChild(f);
-
-      dv = n.getVersion();
-
-      assert dv instanceof CharVersion : "Should be an instance of CharVersion";
-
-      assert ((CharVersion) dv).version == 'C' : "Version should have persisted";
-   }
-
    public static class CharVersion implements DataVersion
    {
       private char version = 'A';

Modified: core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionTransferTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionTransferTest.java	2007-10-22 19:13:05 UTC (rev 4662)
+++ core/trunk/src/test/java/org/jboss/cache/optimistic/DataVersionTransferTest.java	2007-10-22 23:08:18 UTC (rev 4663)
@@ -193,69 +193,6 @@
       assert n.getData().size() == 1;
    }
 
-   public void testStateTransferDefaultVersionAfterRemoval() throws Exception
-   {
-      Fqn f = Fqn.fromString("/one/two/three");
-      caches.get(0).put(f, "k", "v");
-      caches.get(0).put(f, "k1", "v1");
-      caches.get(0).removeNode(f);
-
-      NodeSPI n = (NodeSPI) caches.get(0).getRoot().getChild(f);
-      DataVersion dv = n.getVersion();
-
-      assert dv instanceof DefaultDataVersion : "Should be an instance of DefaultDataVersion";
-
-      assert ((DefaultDataVersion) dv).getRawVersion() == 3 : "Should have accurate data version";
-
-      // now start next cache instance
-      caches.get(1).start();
-
-      TestingUtil.blockUntilViewsReceived(10000, caches.get(0), caches.get(1));
-
-      assert caches.get(1).get(f, "k")== null : "Should be removed";
-
-      n = (NodeSPI) caches.get(1).getRoot().getChild(f);
-
-      dv = n.getVersion();
-
-      assert dv instanceof DefaultDataVersion : "Should be an instance of DefaultDataVersion";
-
-      assert ((DefaultDataVersion) dv).getRawVersion() == 3 : "Version should have transferred";
-   }
-
-   public void testStateTransferCustomVersionAfterRemoval() throws Exception
-   {
-      Fqn f = Fqn.fromString("/one/two/three");
-      caches.get(0).getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('A'));
-      caches.get(0).put(f, "k", "v");
-      caches.get(0).getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('B'));
-      caches.get(0).put(f, "k1", "v1");
-      caches.get(0).getInvocationContext().getOptionOverrides().setDataVersion(new CharVersion('C'));
-      caches.get(0).removeNode(f);
-
-      NodeSPI n = (NodeSPI) caches.get(0).getRoot().getChild(f);
-      DataVersion dv = n.getVersion();
-
-      assert dv instanceof CharVersion : "Should be an instance of CharVersion";
-
-      assert ((CharVersion) dv).version == 'C' : "Should have accurate data version";
-
-      // now start next cache instance
-      caches.get(1).start();
-
-      TestingUtil.blockUntilViewsReceived(10000, caches.get(0), caches.get(1));
-
-      assert caches.get(1).get(f, "k")== null : "Should be removed";
-
-      n = (NodeSPI) caches.get(1).getRoot().getChild(f);
-
-      dv = n.getVersion();
-
-      assert dv instanceof CharVersion : "Should be an instance of CharVersion";
-
-      assert ((CharVersion) dv).version == 'C' : "Version should have transferred";
-   }
-
    public static class CharVersion implements DataVersion
    {
       private char version = 'A';




More information about the jbosscache-commits mailing list