[infinispan-commits] Infinispan SVN: r116 - in trunk/core/src: main/java/org/infinispan/factories and 1 other directories.

infinispan-commits at lists.jboss.org infinispan-commits at lists.jboss.org
Wed Apr 15 04:04:37 EDT 2009


Author: manik.surtani at jboss.com
Date: 2009-04-15 04:04:36 -0400 (Wed, 15 Apr 2009)
New Revision: 116

Added:
   trunk/core/src/main/java/org/infinispan/container/NewFIFOContainer.java
   trunk/core/src/main/java/org/infinispan/container/NewLRUContainer.java
   trunk/core/src/main/java/org/infinispan/container/SpinLockBasedFIFODataContainer.java
   trunk/core/src/main/java/org/infinispan/container/SpinLockBasedLRUDataContainer.java
   trunk/core/src/test/java/org/infinispan/container/NewFIFODataContainerTest.java
   trunk/core/src/test/java/org/infinispan/container/NewLRUDataContainerTest.java
   trunk/core/src/test/java/org/infinispan/container/SpinLockBasedFIFODataContainerTest.java
   trunk/core/src/test/java/org/infinispan/container/SpinLockBasedLRUDataContainerTest.java
Removed:
   trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java
   trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java
   trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java
   trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java
Modified:
   trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java
Log:
[ISPN-9] (Eviction redesign) refactored containers

Deleted: trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java	2009-04-14 18:15:11 UTC (rev 115)
+++ trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -1,597 +0,0 @@
-package org.infinispan.container;
-
-import net.jcip.annotations.ThreadSafe;
-import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.container.entries.InternalEntryFactory;
-
-import java.util.AbstractSet;
-import java.util.Iterator;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * A data container that exposes an iterator that is ordered based on order of entry into the container, with the oldest
- * entries first.
- * <p/>
- * This data container that maintains a concurrent hashtable of entries, and also maintains linking between the elements
- * for ordered iterators.
- * <p/>
- * This uses concepts from {@link java.util.concurrent.ConcurrentHashMap} in that it maintains a table of lockable
- * Segments, each of which is a specialized Hashtable, but HashEntries are also linked to each other such that they can
- * be navigated, like a {@link java.util.LinkedHashMap}.  To ensure thread safety of links between entries, we follow
- * auxillary node ideas expressed in John D. Valois' paper, <a href="http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.41.9506"><i>Lock-Free
- * Linked Lists Using Compare-and-Swap</i></a>.
- * <p/>
- * The locks maintained on linkable entrues are implemented using {@link org.infinispan.container.FIFODataContainer.SpinLock}s,
- * and due to the nature of these spin locks, they should only be held for a minimal amount of time.
- *
- * @author Manik Surtani
- * @since 4.0
- */
- at ThreadSafe
-public class FIFODataContainer implements DataContainer {
-   /**
-    * The maximum capacity, used if a higher value is implicitly specified by either of the constructors with arguments.
-    * MUST be a power of two <= 1<<30 to ensure that entries are indexable using ints.
-    */
-   static final int MAXIMUM_CAPACITY = 1 << 30;
-
-   final LinkedEntry dummyEntry = new LinkedEntry(); // a dummy linked entry
-
-   // -- these fields are all very similar to JDK's ConcurrentHashMap
-
-   /**
-    * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose the segment.
-    */
-   final int segmentMask;
-
-   /**
-    * Shift value for indexing within segments.
-    */
-   final int segmentShift;
-
-   /**
-    * The segments, each of which is a specialized hash table
-    */
-   final Segment[] segments;
-
-   Set<Object> keySet;
-
-   public FIFODataContainer() {
-      float loadFactor = 0.75f;
-      int initialCapacity = 16;
-      int concurrencyLevel = 16;
-
-      // Find power-of-two sizes best matching arguments
-      int sshift = 0;
-      int ssize = 1;
-      while (ssize < concurrencyLevel) {
-         ++sshift;
-         ssize <<= 1;
-      }
-      segmentShift = 32 - sshift;
-      segmentMask = ssize - 1;
-      this.segments = Segment.newArray(ssize);
-
-      if (initialCapacity > MAXIMUM_CAPACITY)
-         initialCapacity = MAXIMUM_CAPACITY;
-      int c = initialCapacity / ssize;
-      if (c * ssize < initialCapacity)
-         ++c;
-      int cap = 1;
-      while (cap < c)
-         cap <<= 1;
-
-      for (int i = 0; i < this.segments.length; ++i) this.segments[i] = new Segment(cap, loadFactor);
-      initLinks();
-   }
-
-   // ---------- Public API methods
-
-   public InternalCacheEntry get(Object k) {
-      int h = hash(k.hashCode());
-      Segment s = segmentFor(h);
-      LinkedEntry le = s.get(k, h);
-      InternalCacheEntry ice = le == null ? null : le.entry;
-      if (ice != null) {
-         if (ice.isExpired()) {
-            remove(k);
-            ice = null;
-         } else {
-            ice.touch();
-         }
-      }
-
-      return ice;
-   }
-
-   public void put(Object k, Object v, long lifespan, long maxIdle) {
-      // do a normal put first.
-      int h = hash(k.hashCode());
-      Segment s = segmentFor(h);
-      s.lock();
-      LinkedEntry le = null;
-      Aux before = null, after = null;
-      boolean newEntry = false;
-      try {
-         le = s.get(k, h);
-         InternalCacheEntry ice = le == null ? null : le.entry;
-         if (ice == null) {
-            newEntry = true;
-            ice = InternalEntryFactory.create(k, v, lifespan, maxIdle);
-            // only update linking if this is a new entry
-            le = new LinkedEntry();
-            le.lock();
-            after = new Aux();
-            after.lock();
-            le.next = after;
-            after.next = dummyEntry;
-         } else {
-            ice.setValue(v);
-            ice = ice.setLifespan(lifespan).setMaxIdle(maxIdle);
-         }
-
-         le.entry = ice;
-         s.locklessPut(k, h, le);
-
-         if (newEntry) {
-            dummyEntry.lock();
-            (before = dummyEntry.prev).lock();
-            before.next = le;
-            le.prev = before;
-            dummyEntry.prev = after;
-         }
-      } finally {
-         if (newEntry) {
-            if (le != null) {
-               before.unlock();
-               dummyEntry.unlock();
-               after.unlock();
-               le.unlock();
-            }
-         }
-         s.unlock();
-      }
-   }
-
-   public boolean containsKey(Object k) {
-      int h = hash(k.hashCode());
-      Segment s = segmentFor(h);
-      LinkedEntry le = s.get(k, h);
-      InternalCacheEntry ice = le == null ? null : le.entry;
-      if (ice != null) {
-         if (ice.isExpired()) {
-            remove(k);
-            ice = null;
-         }
-      }
-
-      return ice != null;
-   }
-
-   public InternalCacheEntry remove(Object k) {
-      int h = hash(k.hashCode());
-      Segment s = segmentFor(h);
-      s.lock();
-      InternalCacheEntry ice = null;
-      LinkedEntry le = null;
-      boolean linksLocked = false;
-      LinkedEntry nextEntry = null;
-      Aux before = null, after = null;
-      try {
-         le = s.locklessRemove(k, h);
-         if (le != null) {
-            ice = le.entry;
-            linksLocked = true;
-            // need to unlink
-            le.lock();
-            (before = le.prev).lock();
-            (after = le.next).lock();
-            nextEntry = after.next;
-            before.next = after.next;
-            before.next.prev = before;
-         }
-      } finally {
-         if (linksLocked) {
-            before.unlock();
-            after.unlock();
-            le.unlock();
-         }
-         s.unlock();
-      }
-
-      if (ice == null || ice.isExpired())
-         return null;
-      else
-         return ice;
-   }
-
-   public int size() {
-      // approximate sizing is good enough
-      int sz = 0;
-      final Segment[] segs = segments;
-      for (Segment s : segs) sz += s.count;
-      return sz;
-   }
-
-   public void clear() {
-      // This is expensive...
-      // lock all segments
-      for (Segment s : segments) s.lock();
-      try {
-         for (Segment s : segments) s.locklessClear();
-         initLinks();
-      } finally {
-         for (Segment s : segments) s.unlock();
-      }
-   }
-
-   public Set<Object> keySet() {
-      if (keySet == null) keySet = new KeySet();
-      return keySet;
-   }
-
-   public void purgeExpired() {
-      for (InternalCacheEntry ice : this) {
-         if (ice.isExpired()) remove(ice.getKey());
-      }
-   }
-
-   public Iterator<InternalCacheEntry> iterator() {
-      return new ValueIterator();
-   }
-
-   // --------------- Internals
-
-   /**
-    * Initializes links to an empty container
-    */
-   protected final void initLinks() {
-      Aux tail = new Aux();
-      try {
-         tail.lock();
-         dummyEntry.prev = tail;
-         dummyEntry.next = tail;
-         tail.next = dummyEntry;
-      } finally {
-         tail.unlock();
-         dummyEntry.unlock();
-      }
-   }
-
-   /**
-    * Similar to ConcurrentHashMap's hash() function: applies a supplemental hash function to a given hashCode, which
-    * defends against poor quality hash functions.  This is critical because ConcurrentHashMap uses power-of-two length
-    * hash tables, that otherwise encounter collisions for hashCodes that do not differ in lower or upper bits.
-    */
-   final int hash(int h) {
-      // Spread bits to regularize both segment and index locations,
-      // using variant of single-word Wang/Jenkins hash.
-      h += (h << 15) ^ 0xffffcd7d;
-      h ^= (h >>> 10);
-      h += (h << 3);
-      h ^= (h >>> 6);
-      h += (h << 2) + (h << 14);
-      return h ^ (h >>> 16);
-   }
-
-   /**
-    * Returns the segment that should be used for key with given hash
-    *
-    * @param hash the hash code for the key
-    * @return the segment
-    */
-   final Segment segmentFor(int hash) {
-      return segments[(hash >>> segmentShift) & segmentMask];
-   }
-
-   /**
-    * ConcurrentHashMap list entry. Note that this is never exported out as a user-visible Map.Entry.
-    * <p/>
-    * Because the value field is volatile, not final, it is legal wrt the Java Memory Model for an unsynchronized reader
-    * to see null instead of initial value when read via a data race.  Although a reordering leading to this is not
-    * likely to ever actually occur, the Segment.readValueUnderLock method is used as a backup in case a null
-    * (pre-initialized) value is ever seen in an unsynchronized access method.
-    */
-   static final class HashEntry {
-      final Object key;
-      final int hash;
-      volatile LinkedEntry value;
-      final HashEntry next;
-
-      HashEntry(Object key, int hash, HashEntry next, LinkedEntry value) {
-         this.key = key;
-         this.hash = hash;
-         this.next = next;
-         this.value = value;
-      }
-   }
-
-
-   /**
-    * Very similar to a Segment in a ConcurrentHashMap
-    */
-   static final class Segment extends ReentrantLock {
-      /**
-       * The number of elements in this segment's region.
-       */
-      transient volatile int count;
-
-      /**
-       * The table is rehashed when its size exceeds this threshold. (The value of this field is always
-       * <tt>(int)(capacity * loadFactor)</tt>.)
-       */
-      transient int threshold;
-
-      /**
-       * The per-segment table.
-       */
-      transient volatile HashEntry[] table;
-
-      /**
-       * The load factor for the hash table.  Even though this value is same for all segments, it is replicated to avoid
-       * needing links to outer object.
-       *
-       * @serial
-       */
-      final float loadFactor;
-
-      Segment(int initialCapacity, float lf) {
-         loadFactor = lf;
-         setTable(new HashEntry[initialCapacity]);
-      }
-
-      static final Segment[] newArray(int i) {
-         return new Segment[i];
-      }
-
-      /**
-       * Sets table to new HashEntry array. Call only while holding lock or in constructor.
-       */
-      final void setTable(HashEntry[] newTable) {
-         threshold = (int) (newTable.length * loadFactor);
-         table = newTable;
-      }
-
-      /**
-       * Returns properly casted first entry of bin for given hash.
-       */
-      final HashEntry getFirst(int hash) {
-         HashEntry[] tab = table;
-         return tab[hash & (tab.length - 1)];
-      }
-
-      /**
-       * Reads value field of an entry under lock. Called if value field ever appears to be null. This is possible only
-       * if a compiler happens to reorder a HashEntry initialization with its table assignment, which is legal under
-       * memory model but is not known to ever occur.
-       */
-      final LinkedEntry readValueUnderLock(HashEntry e) {
-         lock();
-         try {
-            return e.value;
-         } finally {
-            unlock();
-         }
-      }
-
-      /* Specialized implementations of map methods */
-
-      final LinkedEntry get(Object key, int hash) {
-         if (count != 0) { // read-volatile
-            HashEntry e = getFirst(hash);
-            while (e != null) {
-               if (e.hash == hash && key.equals(e.key)) {
-                  LinkedEntry v = e.value;
-                  if (v != null)
-                     return v;
-                  return readValueUnderLock(e); // recheck
-               }
-               e = e.next;
-            }
-         }
-         return null;
-      }
-
-      /**
-       * This put is lockless.  Make sure you call segment.lock() first.
-       */
-      final LinkedEntry locklessPut(Object key, int hash, LinkedEntry value) {
-         int c = count;
-         if (c++ > threshold) // ensure capacity
-            rehash();
-         HashEntry[] tab = table;
-         int index = hash & (tab.length - 1);
-         HashEntry first = tab[index];
-         HashEntry e = first;
-         while (e != null && (e.hash != hash || !key.equals(e.key)))
-            e = e.next;
-
-         LinkedEntry oldValue;
-         if (e != null) {
-            oldValue = e.value;
-            e.value = value;
-         } else {
-            oldValue = null;
-            tab[index] = new HashEntry(key, hash, first, value);
-            count = c; // write-volatile
-         }
-         return oldValue;
-      }
-
-      final void rehash() {
-         HashEntry[] oldTable = table;
-         int oldCapacity = oldTable.length;
-         if (oldCapacity >= MAXIMUM_CAPACITY)
-            return;
-
-         /*
-         * Reclassify nodes in each list to new Map.  Because we are
-         * using power-of-two expansion, the elements from each bin
-         * must either stay at same index, or move with a power of two
-         * offset. We eliminate unnecessary node creation by catching
-         * cases where old nodes can be reused because their next
-         * fields won't change. Statistically, at the default
-         * threshold, only about one-sixth of them need cloning when
-         * a table doubles. The nodes they replace will be garbage
-         * collectable as soon as they are no longer referenced by any
-         * reader thread that may be in the midst of traversing table
-         * right now.
-         */
-
-         HashEntry[] newTable = new HashEntry[oldCapacity << 1];
-         threshold = (int) (newTable.length * loadFactor);
-         int sizeMask = newTable.length - 1;
-         for (int i = 0; i < oldCapacity; i++) {
-            // We need to guarantee that any existing reads of old Map can
-            //  proceed. So we cannot yet null out each bin.
-            HashEntry e = oldTable[i];
-
-            if (e != null) {
-               HashEntry next = e.next;
-               int idx = e.hash & sizeMask;
-
-               //  Single node on list
-               if (next == null)
-                  newTable[idx] = e;
-
-               else {
-                  // Reuse trailing consecutive sequence at same slot
-                  HashEntry lastRun = e;
-                  int lastIdx = idx;
-                  for (HashEntry last = next;
-                       last != null;
-                       last = last.next) {
-                     int k = last.hash & sizeMask;
-                     if (k != lastIdx) {
-                        lastIdx = k;
-                        lastRun = last;
-                     }
-                  }
-                  newTable[lastIdx] = lastRun;
-
-                  // Clone all remaining nodes
-                  for (HashEntry p = e; p != lastRun; p = p.next) {
-                     int k = p.hash & sizeMask;
-                     HashEntry n = newTable[k];
-                     newTable[k] = new HashEntry(p.key, p.hash, n, p.value);
-                  }
-               }
-            }
-         }
-         table = newTable;
-      }
-
-      /**
-       * This is a lockless remove.  Make sure you acquire locks using segment.lock() first.
-       */
-      final LinkedEntry locklessRemove(Object key, int hash) {
-         int c = count - 1;
-         HashEntry[] tab = table;
-         int index = hash & (tab.length - 1);
-         HashEntry first = tab[index];
-         HashEntry e = first;
-         while (e != null && (e.hash != hash || !key.equals(e.key)))
-            e = e.next;
-
-         LinkedEntry oldValue = null;
-         if (e != null) {
-            oldValue = e.value;
-            // All entries following removed node can stay
-            // in list, but all preceding ones need to be
-            // cloned.
-            HashEntry newFirst = e.next;
-            for (HashEntry p = first; p != e; p = p.next)
-               newFirst = new HashEntry(p.key, p.hash,
-                                        newFirst, p.value);
-            tab[index] = newFirst;
-            count = c; // write-volatile
-
-         }
-         return oldValue;
-      }
-
-      /**
-       * This is a lockless clear.  Ensure you acquire locks on the segment first using segment.lock().
-       */
-      final void locklessClear() {
-         if (count != 0) {
-            HashEntry[] tab = table;
-            for (int i = 0; i < tab.length; i++)
-               tab[i] = null;
-            count = 0; // write-volatile
-         }
-      }
-   }
-
-   protected final class KeySet extends AbstractSet<Object> {
-
-      public Iterator<Object> iterator() {
-         return new KeyIterator();
-      }
-
-      public int size() {
-         return FIFODataContainer.this.size();
-      }
-   }
-
-   protected abstract class LinkedIterator {
-      Aux nextAux = dummyEntry.next;
-
-      public boolean hasNext() {
-         return nextAux.next != dummyEntry;
-      }
-
-      public void remove() {
-         throw new UnsupportedOperationException();
-      }
-   }
-
-   protected final class ValueIterator extends LinkedIterator implements Iterator<InternalCacheEntry> {
-      public InternalCacheEntry next() {
-         LinkedEntry le = nextAux.next;
-         if (le == dummyEntry) return null;
-         nextAux = le.next;
-         return le.entry;
-      }
-   }
-
-   protected final class KeyIterator extends LinkedIterator implements Iterator<Object> {
-      public Object next() {
-         LinkedEntry le = nextAux.next;
-         if (le == dummyEntry) return null;
-         nextAux = le.next;
-         return le.entry.getKey();
-      }
-   }
-
-   protected static abstract class SpinLock {
-      final AtomicBoolean l = new AtomicBoolean(false);
-
-      final void lock() {
-         while (!l.compareAndSet(false, true)) {
-            // spin, spin, spin!
-         }
-      }
-
-      final boolean tryLock() {
-         return l.compareAndSet(false, true);
-      }
-
-      final void unlock() {
-         l.set(false);
-      }
-   }
-
-   protected final static class Aux extends SpinLock {
-      volatile LinkedEntry next;
-   }
-
-   protected final static class LinkedEntry extends SpinLock {
-      volatile Aux prev;
-      volatile Aux next;
-      volatile InternalCacheEntry entry;
-   }
-}
-

Deleted: trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java	2009-04-14 18:15:11 UTC (rev 115)
+++ trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -1,84 +0,0 @@
-package org.infinispan.container;
-
-import net.jcip.annotations.ThreadSafe;
-import org.infinispan.container.entries.InternalCacheEntry;
-
-/**
- * A data container that exposes an iterator that is ordered based on least recently used (visited) entries first.
- * <p/>
- * This builds on the {@link org.infinispan.container.FIFODataContainer} by calling {@link
- * org.infinispan.container.LRUDataContainer#updateLinks(org.infinispan.container.FIFODataContainer.LinkedEntry)} even for
- * {@link #get(Object)} invocations to make sure ordering is intact, as per LRU.
- * <p/>
- *
- * @author Manik Surtani
- * @since 4.0
- */
- at ThreadSafe
-public class LRUDataContainer extends FIFODataContainer {
-
-   @Override
-   public InternalCacheEntry get(Object k) {
-      int h = hash(k.hashCode());
-      Segment s = segmentFor(h);
-      LinkedEntry le = s.get(k, h);
-      InternalCacheEntry ice = le == null ? null : le.entry;
-      if (ice != null) {
-         if (ice.isExpired()) {
-            remove(k);
-            ice = null;
-         } else {
-            ice.touch();
-            updateLinks(le);
-         }
-      }
-
-      return ice;
-   }
-
-   // TODO make sure even a put() on an existing entry updates links  
-
-   /**
-    * Updates links on this entry, moving it to the end of the linked list
-    *
-    * @param l linked entry to update
-    */
-   protected final void updateLinks(LinkedEntry l) {
-      if (l.next != dummyEntry.prev) {
-
-         // if we cannot lock l it means it is being updated by another process, either removing it or updating it anyway
-         // so we can skip updating links in that case.
-         if (l.tryLock()) {
-            try {
-               Aux before = l.prev;
-               before.lock();
-               Aux after = l.next;
-               after.lock();
-
-               LinkedEntry nextEntry = after.next;
-               nextEntry.lock();
-               dummyEntry.lock();
-               Aux last = dummyEntry.prev;
-               last.lock();
-
-               try {
-                  last.next = l;
-                  l.prev = last;
-                  after.next = dummyEntry;
-                  dummyEntry.prev = after;
-                  nextEntry.prev = before;
-                  before.next = nextEntry;
-               } finally {
-                  last.unlock();
-                  dummyEntry.unlock();
-                  nextEntry.unlock();
-                  after.unlock();
-                  before.unlock();
-               }
-            } finally {
-               l.unlock();
-            }
-         }
-      }
-   }
-}

Copied: trunk/core/src/main/java/org/infinispan/container/NewFIFOContainer.java (from rev 115, trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java)
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/NewFIFOContainer.java	                        (rev 0)
+++ trunk/core/src/main/java/org/infinispan/container/NewFIFOContainer.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -0,0 +1,580 @@
+package org.infinispan.container;
+
+import org.infinispan.container.entries.InternalCacheEntry;
+import org.infinispan.container.entries.InternalEntryFactory;
+
+import java.util.AbstractSet;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * // TODO: Manik: Document this
+ *
+ * @author Manik Surtani
+ * @since 4.0
+ */
+public class NewFIFOContainer implements DataContainer {
+
+   /**
+    * The maximum capacity, used if a higher value is implicitly specified by either of the constructors with arguments.
+    * MUST be a power of two <= 1<<30 to ensure that entries are indexable using ints.
+    */
+   static final int MAXIMUM_CAPACITY = 1 << 30;
+
+   // -- these fields are all very similar to JDK's ConcurrentHashMap
+
+   /**
+    * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose the segment.
+    */
+   final int segmentMask;
+
+   /**
+    * Shift value for indexing within segments.
+    */
+   final int segmentShift;
+
+   /**
+    * The segments, each of which is a specialized hash table
+    */
+   final Segment[] segments;
+
+   Set<Object> keySet;
+
+   final LinkedEntry head = new LinkedEntry(), tail = new LinkedEntry();
+
+   public NewFIFOContainer() {
+      float loadFactor = 0.75f;
+      int initialCapacity = 16;
+      int concurrencyLevel = 16;
+
+      // Find power-of-two sizes best matching arguments
+      int sshift = 0;
+      int ssize = 1;
+      while (ssize < concurrencyLevel) {
+         ++sshift;
+         ssize <<= 1;
+      }
+      segmentShift = 32 - sshift;
+      segmentMask = ssize - 1;
+      this.segments = Segment.newArray(ssize);
+
+      if (initialCapacity > MAXIMUM_CAPACITY)
+         initialCapacity = MAXIMUM_CAPACITY;
+      int c = initialCapacity / ssize;
+      if (c * ssize < initialCapacity)
+         ++c;
+      int cap = 1;
+      while (cap < c)
+         cap <<= 1;
+
+      for (int i = 0; i < this.segments.length; ++i) this.segments[i] = new Segment(cap, loadFactor);
+      initLinks();
+   }
+
+   // links and link management
+
+   static final class LinkedEntry {
+      volatile InternalCacheEntry e;
+      volatile LinkedEntry n, p;
+
+      private static final AtomicReferenceFieldUpdater<LinkedEntry, InternalCacheEntry> E_UPDATER = AtomicReferenceFieldUpdater.newUpdater(LinkedEntry.class, InternalCacheEntry.class, "e");
+      private static final AtomicReferenceFieldUpdater<LinkedEntry, LinkedEntry> N_UPDATER = AtomicReferenceFieldUpdater.newUpdater(LinkedEntry.class, LinkedEntry.class, "n");
+      private static final AtomicReferenceFieldUpdater<LinkedEntry, LinkedEntry> P_UPDATER = AtomicReferenceFieldUpdater.newUpdater(LinkedEntry.class, LinkedEntry.class, "p");
+
+      final boolean casValue(InternalCacheEntry expected, InternalCacheEntry newValue) {
+         return E_UPDATER.compareAndSet(this, expected, newValue);
+      }
+
+      final boolean casNext(LinkedEntry expected, LinkedEntry newValue) {
+         return N_UPDATER.compareAndSet(this, expected, newValue);
+      }
+
+      final boolean casPrev(LinkedEntry expected, LinkedEntry newValue) {
+         return P_UPDATER.compareAndSet(this, expected, newValue);
+      }
+
+      final void mark() {
+         e = null;
+      }
+
+      final boolean isMarked() {
+         return e == null; // an impossible value unless deleted
+      }
+   }
+
+   /**
+    * Initializes links to an empty container
+    */
+   protected final void initLinks() {
+      head.n = tail;
+      head.p = tail;
+      tail.n = head;
+      tail.p = head;
+   }
+
+   protected final void unlink(LinkedEntry le) {
+      le.p.casNext(le, le.n);
+      le.n.casPrev(le, le.p);
+   }
+
+   protected final void linkAtEnd(LinkedEntry le) {
+      le.n = tail;
+      do {
+         le.p = tail.p;
+      } while (!le.p.casNext(tail, le));
+      tail.p = le;
+   }
+
+   /**
+    * Similar to ConcurrentHashMap's hash() function: applies a supplemental hash function to a given hashCode, which
+    * defends against poor quality hash functions.  This is critical because ConcurrentHashMap uses power-of-two length
+    * hash tables, that otherwise encounter collisions for hashCodes that do not differ in lower or upper bits.
+    */
+   final int hash(int h) {
+      // Spread bits to regularize both segment and index locations,
+      // using variant of single-word Wang/Jenkins hash.
+      h += (h << 15) ^ 0xffffcd7d;
+      h ^= (h >>> 10);
+      h += (h << 3);
+      h ^= (h >>> 6);
+      h += (h << 2) + (h << 14);
+      return h ^ (h >>> 16);
+   }
+
+   /**
+    * Returns the segment that should be used for key with given hash
+    *
+    * @param hash the hash code for the key
+    * @return the segment
+    */
+   final Segment segmentFor(int hash) {
+      return segments[(hash >>> segmentShift) & segmentMask];
+   }
+
+   /**
+    * ConcurrentHashMap list entry. Note that this is never exported out as a user-visible Map.Entry.
+    * <p/>
+    * Because the value field is volatile, not final, it is legal wrt the Java Memory Model for an unsynchronized reader
+    * to see null instead of initial value when read via a data race.  Although a reordering leading to this is not
+    * likely to ever actually occur, the Segment.readValueUnderLock method is used as a backup in case a null
+    * (pre-initialized) value is ever seen in an unsynchronized access method.
+    */
+   static final class HashEntry {
+      final Object key;
+      final int hash;
+      volatile LinkedEntry value;
+      final HashEntry next;
+
+      HashEntry(Object key, int hash, HashEntry next, LinkedEntry value) {
+         this.key = key;
+         this.hash = hash;
+         this.next = next;
+         this.value = value;
+      }
+   }
+
+   /**
+    * Very similar to a Segment in a ConcurrentHashMap
+    */
+   static final class Segment extends ReentrantLock {
+      /**
+       * The number of elements in this segment's region.
+       */
+      transient volatile int count;
+
+      /**
+       * The table is rehashed when its size exceeds this threshold. (The value of this field is always
+       * <tt>(int)(capacity * loadFactor)</tt>.)
+       */
+      transient int threshold;
+
+      /**
+       * The per-segment table.
+       */
+      transient volatile HashEntry[] table;
+
+      /**
+       * The load factor for the hash table.  Even though this value is same for all segments, it is replicated to avoid
+       * needing links to outer object.
+       *
+       * @serial
+       */
+      final float loadFactor;
+
+      Segment(int initialCapacity, float lf) {
+         loadFactor = lf;
+         setTable(new HashEntry[initialCapacity]);
+      }
+
+      static final Segment[] newArray(int i) {
+         return new Segment[i];
+      }
+
+      /**
+       * Sets table to new HashEntry array. Call only while holding lock or in constructor.
+       */
+      final void setTable(HashEntry[] newTable) {
+         threshold = (int) (newTable.length * loadFactor);
+         table = newTable;
+      }
+
+      /**
+       * Returns properly casted first entry of bin for given hash.
+       */
+      final HashEntry getFirst(int hash) {
+         HashEntry[] tab = table;
+         return tab[hash & (tab.length - 1)];
+      }
+
+      /**
+       * Reads value field of an entry under lock. Called if value field ever appears to be null. This is possible only
+       * if a compiler happens to reorder a HashEntry initialization with its table assignment, which is legal under
+       * memory model but is not known to ever occur.
+       */
+      final LinkedEntry readValueUnderLock(HashEntry e) {
+         lock();
+         try {
+            return e.value;
+         } finally {
+            unlock();
+         }
+      }
+
+      /* Specialized implementations of map methods */
+
+      final LinkedEntry get(Object key, int hash) {
+         if (count != 0) { // read-volatile
+            HashEntry e = getFirst(hash);
+            while (e != null) {
+               if (e.hash == hash && key.equals(e.key)) {
+                  LinkedEntry v = e.value;
+                  if (v != null)
+                     return v;
+                  return readValueUnderLock(e); // recheck
+               }
+               e = e.next;
+            }
+         }
+         return null;
+      }
+
+      /**
+       * This put is lockless.  Make sure you call segment.lock() first.
+       */
+      final LinkedEntry locklessPut(Object key, int hash, LinkedEntry value) {
+         int c = count;
+         if (c++ > threshold) // ensure capacity
+            rehash();
+         HashEntry[] tab = table;
+         int index = hash & (tab.length - 1);
+         HashEntry first = tab[index];
+         HashEntry e = first;
+         while (e != null && (e.hash != hash || !key.equals(e.key)))
+            e = e.next;
+
+         LinkedEntry oldValue;
+         if (e != null) {
+            oldValue = e.value;
+            e.value = value;
+         } else {
+            oldValue = null;
+            tab[index] = new HashEntry(key, hash, first, value);
+            count = c; // write-volatile
+         }
+         return oldValue;
+      }
+
+      final void rehash() {
+         HashEntry[] oldTable = table;
+         int oldCapacity = oldTable.length;
+         if (oldCapacity >= MAXIMUM_CAPACITY)
+            return;
+
+         /*
+         * Reclassify nodes in each list to new Map.  Because we are
+         * using power-of-two expansion, the elements from each bin
+         * must either stay at same index, or move with a power of two
+         * offset. We eliminate unnecessary node creation by catching
+         * cases where old nodes can be reused because their next
+         * fields won't change. Statistically, at the default
+         * threshold, only about one-sixth of them need cloning when
+         * a table doubles. The nodes they replace will be garbage
+         * collectable as soon as they are no longer referenced by any
+         * reader thread that may be in the midst of traversing table
+         * right now.
+         */
+
+         HashEntry[] newTable = new HashEntry[oldCapacity << 1];
+         threshold = (int) (newTable.length * loadFactor);
+         int sizeMask = newTable.length - 1;
+         for (int i = 0; i < oldCapacity; i++) {
+            // We need to guarantee that any existing reads of old Map can
+            //  proceed. So we cannot yet null out each bin.
+            HashEntry e = oldTable[i];
+
+            if (e != null) {
+               HashEntry next = e.next;
+               int idx = e.hash & sizeMask;
+
+               //  Single node on list
+               if (next == null)
+                  newTable[idx] = e;
+
+               else {
+                  // Reuse trailing consecutive sequence at same slot
+                  HashEntry lastRun = e;
+                  int lastIdx = idx;
+                  for (HashEntry last = next;
+                       last != null;
+                       last = last.next) {
+                     int k = last.hash & sizeMask;
+                     if (k != lastIdx) {
+                        lastIdx = k;
+                        lastRun = last;
+                     }
+                  }
+                  newTable[lastIdx] = lastRun;
+
+                  // Clone all remaining nodes
+                  for (HashEntry p = e; p != lastRun; p = p.next) {
+                     int k = p.hash & sizeMask;
+                     HashEntry n = newTable[k];
+                     newTable[k] = new HashEntry(p.key, p.hash, n, p.value);
+                  }
+               }
+            }
+         }
+         table = newTable;
+      }
+
+      /**
+       * This is a lockless remove.  Make sure you acquire locks using segment.lock() first.
+       */
+      final LinkedEntry locklessRemove(Object key, int hash) {
+         int c = count - 1;
+         HashEntry[] tab = table;
+         int index = hash & (tab.length - 1);
+         HashEntry first = tab[index];
+         HashEntry e = first;
+         while (e != null && (e.hash != hash || !key.equals(e.key)))
+            e = e.next;
+
+         LinkedEntry oldValue = null;
+         if (e != null) {
+            oldValue = e.value;
+            // All entries following removed node can stay
+            // in list, but all preceding ones need to be
+            // cloned.
+            HashEntry newFirst = e.next;
+            for (HashEntry p = first; p != e; p = p.next)
+               newFirst = new HashEntry(p.key, p.hash,
+                                        newFirst, p.value);
+            tab[index] = newFirst;
+            count = c; // write-volatile
+
+         }
+         return oldValue;
+      }
+
+      /**
+       * This is a lockless clear.  Ensure you acquire locks on the segment first using segment.lock().
+       */
+      final void locklessClear() {
+         if (count != 0) {
+            HashEntry[] tab = table;
+            for (int i = 0; i < tab.length; i++)
+               tab[i] = null;
+            count = 0; // write-volatile
+         }
+      }
+   }
+
+
+   protected final class KeySet extends AbstractSet<Object> {
+
+      public Iterator<Object> iterator() {
+         return new KeyIterator();
+      }
+
+      public int size() {
+         return NewFIFOContainer.this.size();
+      }
+   }
+
+   protected abstract class LinkedIterator {
+      LinkedEntry current = head;
+
+      public boolean hasNext() {
+         current = current.n;
+         while (current.isMarked()) {
+            if (current == tail || current == head) return false;
+            current = current.n;
+         }
+         return true;
+      }
+
+      public void remove() {
+         throw new UnsupportedOperationException();
+      }
+   }
+
+   protected final class ValueIterator extends LinkedIterator implements Iterator<InternalCacheEntry> {
+      public InternalCacheEntry next() {
+         while (current.isMarked()) {
+            LinkedEntry n = current.n;
+            unlink(current);
+            current = n;
+            if (n == head || n == tail) throw new IndexOutOfBoundsException("Reached head or tail pointer!");
+         }
+         
+         return current.e;
+      }
+   }
+
+   protected final class KeyIterator extends LinkedIterator implements Iterator<Object> {
+      public Object next() {
+         while (current.isMarked()) {
+            LinkedEntry n = current.n;
+            unlink(current);
+            current = n;
+            if (n == head || n == tail) throw new IndexOutOfBoundsException("Reached head or tail pointer!");
+         }
+
+         return current.e.getKey();
+      }
+   }
+
+
+   // ----------- PUBLIC API ---------------
+
+   public InternalCacheEntry get(Object k) {
+      int h = hash(k.hashCode());
+      Segment s = segmentFor(h);
+      LinkedEntry le = s.get(k, h);
+      InternalCacheEntry ice = null;
+      if (le != null) {
+         ice = le.e;
+         if (le.isMarked()) unlink(le);
+      }
+      if (ice != null) {
+         if (ice.isExpired()) {
+            remove(k);
+            ice = null;
+         } else {
+            ice.touch();
+         }
+      }
+      return ice;
+   }
+
+   public void put(Object k, Object v, long lifespan, long maxIdle) {
+      // do a normal put first.
+      int h = hash(k.hashCode());
+      Segment s = segmentFor(h);
+      s.lock();
+      LinkedEntry le;
+      boolean newEntry = false;
+      try {
+         le = s.get(k, h);
+         InternalCacheEntry ice = le == null ? null : le.e;
+         if (ice == null) {
+            newEntry = true;
+            ice = InternalEntryFactory.create(k, v, lifespan, maxIdle);
+            // only update linking if this is a new entry
+            le = new LinkedEntry();
+         } else {
+            ice.setValue(v);
+            ice = ice.setLifespan(lifespan).setMaxIdle(maxIdle);
+         }
+
+         // need to do this anyway since the ICE impl may have changed
+         le.e = ice;
+         s.locklessPut(k, h, le);
+
+         if (newEntry) {
+            linkAtEnd(le);
+         }
+      } finally {
+         s.unlock();
+      }
+   }
+
+   public boolean containsKey(Object k) {
+      int h = hash(k.hashCode());
+      Segment s = segmentFor(h);
+      LinkedEntry le = s.get(k, h);
+      InternalCacheEntry ice = null;
+      if (le != null) {
+         ice = le.e;
+         if (le.isMarked()) unlink(le);
+      }
+      if (ice != null) {
+         if (ice.isExpired()) {
+            remove(k);
+            ice = null;
+         }
+      }
+
+      return ice != null;
+   }
+
+   public InternalCacheEntry remove(Object k) {
+      int h = hash(k.hashCode());
+      Segment s = segmentFor(h);
+      s.lock();
+      InternalCacheEntry ice = null;
+      LinkedEntry le;
+      try {
+         le = s.locklessRemove(k, h);
+         if (le != null) {
+            ice = le.e;
+            le.mark();
+            unlink(le);
+         }
+      } finally {
+         s.unlock();
+      }
+
+      if (ice == null || ice.isExpired())
+         return null;
+      else
+         return ice;
+   }
+
+   public int size() {
+      // approximate sizing is good enough
+      int sz = 0;
+      final Segment[] segs = segments;
+      for (Segment s : segs) sz += s.count;
+      return sz;
+   }
+
+   public void clear() {
+      // This is expensive...
+      // lock all segments
+      for (Segment s : segments) s.lock();
+      try {
+         for (Segment s : segments) s.locklessClear();
+         initLinks();
+      } finally {
+         for (Segment s : segments) s.unlock();
+      }
+   }
+
+   public Set<Object> keySet() {
+      if (keySet == null) keySet = new KeySet();
+      return keySet;
+   }
+
+   public void purgeExpired() {
+      for (InternalCacheEntry ice : this) {
+         if (ice.isExpired()) remove(ice.getKey());
+      }
+   }
+
+   public Iterator<InternalCacheEntry> iterator() {
+      return new ValueIterator();
+   }
+}

Copied: trunk/core/src/main/java/org/infinispan/container/NewLRUContainer.java (from rev 115, trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java)
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/NewLRUContainer.java	                        (rev 0)
+++ trunk/core/src/main/java/org/infinispan/container/NewLRUContainer.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -0,0 +1,75 @@
+package org.infinispan.container;
+
+import org.infinispan.container.entries.InternalCacheEntry;
+import org.infinispan.container.entries.InternalEntryFactory;
+
+/**
+ * // TODO: Manik: Document this
+ *
+ * @author Manik Surtani
+ * @since 4.0
+ */
+public class NewLRUContainer extends NewFIFOContainer {
+
+   @Override
+   public InternalCacheEntry get(Object k) {
+      int h = hash(k.hashCode());
+      Segment s = segmentFor(h);
+      LinkedEntry le = s.get(k, h);
+      InternalCacheEntry ice = null;
+      if (le != null) {
+         ice = le.e;
+         if (le.isMarked()) unlink(le);
+      }
+      if (ice != null) {
+         if (ice.isExpired()) {
+            remove(k);
+            ice = null;
+         } else {
+            ice.touch();
+            updateLinks(le);
+         }
+      }
+      return ice;
+   }
+
+   @Override
+   public void put(Object k, Object v, long lifespan, long maxIdle) {
+      // do a normal put first.
+      int h = hash(k.hashCode());
+      Segment s = segmentFor(h);
+      s.lock();
+      LinkedEntry le;
+      boolean newEntry = false;
+      try {
+         le = s.get(k, h);
+         InternalCacheEntry ice = le == null ? null : le.e;
+         if (ice == null) {
+            newEntry = true;
+            ice = InternalEntryFactory.create(k, v, lifespan, maxIdle);
+            le = new LinkedEntry();
+         } else {
+            ice.setValue(v);
+            ice = ice.setLifespan(lifespan).setMaxIdle(maxIdle);
+         }
+
+         // need to do this anyway since the ICE impl may have changed
+         le.e = ice;
+         s.locklessPut(k, h, le);
+
+         if (newEntry) {
+            linkAtEnd(le);
+         } else {
+            updateLinks(le);
+         }
+         
+      } finally {
+         s.unlock();
+      }
+   }
+
+   protected final void updateLinks(LinkedEntry le) {
+      unlink(le);
+      linkAtEnd(le);
+   }
+}
\ No newline at end of file

Copied: trunk/core/src/main/java/org/infinispan/container/SpinLockBasedFIFODataContainer.java (from rev 115, trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java)
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/SpinLockBasedFIFODataContainer.java	                        (rev 0)
+++ trunk/core/src/main/java/org/infinispan/container/SpinLockBasedFIFODataContainer.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -0,0 +1,597 @@
+package org.infinispan.container;
+
+import net.jcip.annotations.ThreadSafe;
+import org.infinispan.container.entries.InternalCacheEntry;
+import org.infinispan.container.entries.InternalEntryFactory;
+
+import java.util.AbstractSet;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * A data container that exposes an iterator that is ordered based on order of entry into the container, with the oldest
+ * entries first.
+ * <p/>
+ * This data container that maintains a concurrent hashtable of entries, and also maintains linking between the elements
+ * for ordered iterators.
+ * <p/>
+ * This uses concepts from {@link java.util.concurrent.ConcurrentHashMap} in that it maintains a table of lockable
+ * Segments, each of which is a specialized Hashtable, but HashEntries are also linked to each other such that they can
+ * be navigated, like a {@link java.util.LinkedHashMap}.  To ensure thread safety of links between entries, we follow
+ * auxillary node ideas expressed in John D. Valois' paper, <a href="http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.41.9506"><i>Lock-Free
+ * Linked Lists Using Compare-and-Swap</i></a>.
+ * <p/>
+ * The locks maintained on linkable entrues are implemented using {@link org.infinispan.container.SpinLockBasedFIFODataContainer.SpinLock}s,
+ * and due to the nature of these spin locks, they should only be held for a minimal amount of time.
+ *
+ * @author Manik Surtani
+ * @since 4.0
+ */
+ at ThreadSafe
+public class SpinLockBasedFIFODataContainer implements DataContainer {
+   /**
+    * The maximum capacity, used if a higher value is implicitly specified by either of the constructors with arguments.
+    * MUST be a power of two <= 1<<30 to ensure that entries are indexable using ints.
+    */
+   static final int MAXIMUM_CAPACITY = 1 << 30;
+
+   final LinkedEntry dummyEntry = new LinkedEntry(); // a dummy linked entry
+
+   // -- these fields are all very similar to JDK's ConcurrentHashMap
+
+   /**
+    * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose the segment.
+    */
+   final int segmentMask;
+
+   /**
+    * Shift value for indexing within segments.
+    */
+   final int segmentShift;
+
+   /**
+    * The segments, each of which is a specialized hash table
+    */
+   final Segment[] segments;
+
+   Set<Object> keySet;
+
+   public SpinLockBasedFIFODataContainer() {
+      float loadFactor = 0.75f;
+      int initialCapacity = 16;
+      int concurrencyLevel = 16;
+
+      // Find power-of-two sizes best matching arguments
+      int sshift = 0;
+      int ssize = 1;
+      while (ssize < concurrencyLevel) {
+         ++sshift;
+         ssize <<= 1;
+      }
+      segmentShift = 32 - sshift;
+      segmentMask = ssize - 1;
+      this.segments = Segment.newArray(ssize);
+
+      if (initialCapacity > MAXIMUM_CAPACITY)
+         initialCapacity = MAXIMUM_CAPACITY;
+      int c = initialCapacity / ssize;
+      if (c * ssize < initialCapacity)
+         ++c;
+      int cap = 1;
+      while (cap < c)
+         cap <<= 1;
+
+      for (int i = 0; i < this.segments.length; ++i) this.segments[i] = new Segment(cap, loadFactor);
+      initLinks();
+   }
+
+   // ---------- Public API methods
+
+   public InternalCacheEntry get(Object k) {
+      int h = hash(k.hashCode());
+      Segment s = segmentFor(h);
+      LinkedEntry le = s.get(k, h);
+      InternalCacheEntry ice = le == null ? null : le.entry;
+      if (ice != null) {
+         if (ice.isExpired()) {
+            remove(k);
+            ice = null;
+         } else {
+            ice.touch();
+         }
+      }
+
+      return ice;
+   }
+
+   public void put(Object k, Object v, long lifespan, long maxIdle) {
+      // do a normal put first.
+      int h = hash(k.hashCode());
+      Segment s = segmentFor(h);
+      s.lock();
+      LinkedEntry le = null;
+      Aux before = null, after = null;
+      boolean newEntry = false;
+      try {
+         le = s.get(k, h);
+         InternalCacheEntry ice = le == null ? null : le.entry;
+         if (ice == null) {
+            newEntry = true;
+            ice = InternalEntryFactory.create(k, v, lifespan, maxIdle);
+            // only update linking if this is a new entry
+            le = new LinkedEntry();
+            le.lock();
+            after = new Aux();
+            after.lock();
+            le.next = after;
+            after.next = dummyEntry;
+         } else {
+            ice.setValue(v);
+            ice = ice.setLifespan(lifespan).setMaxIdle(maxIdle);
+         }
+
+         le.entry = ice;
+         s.locklessPut(k, h, le);
+
+         if (newEntry) {
+            dummyEntry.lock();
+            (before = dummyEntry.prev).lock();
+            before.next = le;
+            le.prev = before;
+            dummyEntry.prev = after;
+         }
+      } finally {
+         if (newEntry) {
+            if (le != null) {
+               before.unlock();
+               dummyEntry.unlock();
+               after.unlock();
+               le.unlock();
+            }
+         }
+         s.unlock();
+      }
+   }
+
+   public boolean containsKey(Object k) {
+      int h = hash(k.hashCode());
+      Segment s = segmentFor(h);
+      LinkedEntry le = s.get(k, h);
+      InternalCacheEntry ice = le == null ? null : le.entry;
+      if (ice != null) {
+         if (ice.isExpired()) {
+            remove(k);
+            ice = null;
+         }
+      }
+
+      return ice != null;
+   }
+
+   public InternalCacheEntry remove(Object k) {
+      int h = hash(k.hashCode());
+      Segment s = segmentFor(h);
+      s.lock();
+      InternalCacheEntry ice = null;
+      LinkedEntry le = null;
+      boolean linksLocked = false;
+      LinkedEntry nextEntry = null;
+      Aux before = null, after = null;
+      try {
+         le = s.locklessRemove(k, h);
+         if (le != null) {
+            ice = le.entry;
+            linksLocked = true;
+            // need to unlink
+            le.lock();
+            (before = le.prev).lock();
+            (after = le.next).lock();
+            nextEntry = after.next;
+            before.next = after.next;
+            before.next.prev = before;
+         }
+      } finally {
+         if (linksLocked) {
+            before.unlock();
+            after.unlock();
+            le.unlock();
+         }
+         s.unlock();
+      }
+
+      if (ice == null || ice.isExpired())
+         return null;
+      else
+         return ice;
+   }
+
+   public int size() {
+      // approximate sizing is good enough
+      int sz = 0;
+      final Segment[] segs = segments;
+      for (Segment s : segs) sz += s.count;
+      return sz;
+   }
+
+   public void clear() {
+      // This is expensive...
+      // lock all segments
+      for (Segment s : segments) s.lock();
+      try {
+         for (Segment s : segments) s.locklessClear();
+         initLinks();
+      } finally {
+         for (Segment s : segments) s.unlock();
+      }
+   }
+
+   public Set<Object> keySet() {
+      if (keySet == null) keySet = new KeySet();
+      return keySet;
+   }
+
+   public void purgeExpired() {
+      for (InternalCacheEntry ice : this) {
+         if (ice.isExpired()) remove(ice.getKey());
+      }
+   }
+
+   public Iterator<InternalCacheEntry> iterator() {
+      return new ValueIterator();
+   }
+
+   // --------------- Internals
+
+   /**
+    * Initializes links to an empty container
+    */
+   protected final void initLinks() {
+      Aux tail = new Aux();
+      try {
+         tail.lock();
+         dummyEntry.prev = tail;
+         dummyEntry.next = tail;
+         tail.next = dummyEntry;
+      } finally {
+         tail.unlock();
+         dummyEntry.unlock();
+      }
+   }
+
+   /**
+    * Similar to ConcurrentHashMap's hash() function: applies a supplemental hash function to a given hashCode, which
+    * defends against poor quality hash functions.  This is critical because ConcurrentHashMap uses power-of-two length
+    * hash tables, that otherwise encounter collisions for hashCodes that do not differ in lower or upper bits.
+    */
+   final int hash(int h) {
+      // Spread bits to regularize both segment and index locations,
+      // using variant of single-word Wang/Jenkins hash.
+      h += (h << 15) ^ 0xffffcd7d;
+      h ^= (h >>> 10);
+      h += (h << 3);
+      h ^= (h >>> 6);
+      h += (h << 2) + (h << 14);
+      return h ^ (h >>> 16);
+   }
+
+   /**
+    * Returns the segment that should be used for key with given hash
+    *
+    * @param hash the hash code for the key
+    * @return the segment
+    */
+   final Segment segmentFor(int hash) {
+      return segments[(hash >>> segmentShift) & segmentMask];
+   }
+
+   /**
+    * ConcurrentHashMap list entry. Note that this is never exported out as a user-visible Map.Entry.
+    * <p/>
+    * Because the value field is volatile, not final, it is legal wrt the Java Memory Model for an unsynchronized reader
+    * to see null instead of initial value when read via a data race.  Although a reordering leading to this is not
+    * likely to ever actually occur, the Segment.readValueUnderLock method is used as a backup in case a null
+    * (pre-initialized) value is ever seen in an unsynchronized access method.
+    */
+   static final class HashEntry {
+      final Object key;
+      final int hash;
+      volatile LinkedEntry value;
+      final HashEntry next;
+
+      HashEntry(Object key, int hash, HashEntry next, LinkedEntry value) {
+         this.key = key;
+         this.hash = hash;
+         this.next = next;
+         this.value = value;
+      }
+   }
+
+
+   /**
+    * Very similar to a Segment in a ConcurrentHashMap
+    */
+   static final class Segment extends ReentrantLock {
+      /**
+       * The number of elements in this segment's region.
+       */
+      transient volatile int count;
+
+      /**
+       * The table is rehashed when its size exceeds this threshold. (The value of this field is always
+       * <tt>(int)(capacity * loadFactor)</tt>.)
+       */
+      transient int threshold;
+
+      /**
+       * The per-segment table.
+       */
+      transient volatile HashEntry[] table;
+
+      /**
+       * The load factor for the hash table.  Even though this value is same for all segments, it is replicated to avoid
+       * needing links to outer object.
+       *
+       * @serial
+       */
+      final float loadFactor;
+
+      Segment(int initialCapacity, float lf) {
+         loadFactor = lf;
+         setTable(new HashEntry[initialCapacity]);
+      }
+
+      static final Segment[] newArray(int i) {
+         return new Segment[i];
+      }
+
+      /**
+       * Sets table to new HashEntry array. Call only while holding lock or in constructor.
+       */
+      final void setTable(HashEntry[] newTable) {
+         threshold = (int) (newTable.length * loadFactor);
+         table = newTable;
+      }
+
+      /**
+       * Returns properly casted first entry of bin for given hash.
+       */
+      final HashEntry getFirst(int hash) {
+         HashEntry[] tab = table;
+         return tab[hash & (tab.length - 1)];
+      }
+
+      /**
+       * Reads value field of an entry under lock. Called if value field ever appears to be null. This is possible only
+       * if a compiler happens to reorder a HashEntry initialization with its table assignment, which is legal under
+       * memory model but is not known to ever occur.
+       */
+      final LinkedEntry readValueUnderLock(HashEntry e) {
+         lock();
+         try {
+            return e.value;
+         } finally {
+            unlock();
+         }
+      }
+
+      /* Specialized implementations of map methods */
+
+      final LinkedEntry get(Object key, int hash) {
+         if (count != 0) { // read-volatile
+            HashEntry e = getFirst(hash);
+            while (e != null) {
+               if (e.hash == hash && key.equals(e.key)) {
+                  LinkedEntry v = e.value;
+                  if (v != null)
+                     return v;
+                  return readValueUnderLock(e); // recheck
+               }
+               e = e.next;
+            }
+         }
+         return null;
+      }
+
+      /**
+       * This put is lockless.  Make sure you call segment.lock() first.
+       */
+      final LinkedEntry locklessPut(Object key, int hash, LinkedEntry value) {
+         int c = count;
+         if (c++ > threshold) // ensure capacity
+            rehash();
+         HashEntry[] tab = table;
+         int index = hash & (tab.length - 1);
+         HashEntry first = tab[index];
+         HashEntry e = first;
+         while (e != null && (e.hash != hash || !key.equals(e.key)))
+            e = e.next;
+
+         LinkedEntry oldValue;
+         if (e != null) {
+            oldValue = e.value;
+            e.value = value;
+         } else {
+            oldValue = null;
+            tab[index] = new HashEntry(key, hash, first, value);
+            count = c; // write-volatile
+         }
+         return oldValue;
+      }
+
+      final void rehash() {
+         HashEntry[] oldTable = table;
+         int oldCapacity = oldTable.length;
+         if (oldCapacity >= MAXIMUM_CAPACITY)
+            return;
+
+         /*
+         * Reclassify nodes in each list to new Map.  Because we are
+         * using power-of-two expansion, the elements from each bin
+         * must either stay at same index, or move with a power of two
+         * offset. We eliminate unnecessary node creation by catching
+         * cases where old nodes can be reused because their next
+         * fields won't change. Statistically, at the default
+         * threshold, only about one-sixth of them need cloning when
+         * a table doubles. The nodes they replace will be garbage
+         * collectable as soon as they are no longer referenced by any
+         * reader thread that may be in the midst of traversing table
+         * right now.
+         */
+
+         HashEntry[] newTable = new HashEntry[oldCapacity << 1];
+         threshold = (int) (newTable.length * loadFactor);
+         int sizeMask = newTable.length - 1;
+         for (int i = 0; i < oldCapacity; i++) {
+            // We need to guarantee that any existing reads of old Map can
+            //  proceed. So we cannot yet null out each bin.
+            HashEntry e = oldTable[i];
+
+            if (e != null) {
+               HashEntry next = e.next;
+               int idx = e.hash & sizeMask;
+
+               //  Single node on list
+               if (next == null)
+                  newTable[idx] = e;
+
+               else {
+                  // Reuse trailing consecutive sequence at same slot
+                  HashEntry lastRun = e;
+                  int lastIdx = idx;
+                  for (HashEntry last = next;
+                       last != null;
+                       last = last.next) {
+                     int k = last.hash & sizeMask;
+                     if (k != lastIdx) {
+                        lastIdx = k;
+                        lastRun = last;
+                     }
+                  }
+                  newTable[lastIdx] = lastRun;
+
+                  // Clone all remaining nodes
+                  for (HashEntry p = e; p != lastRun; p = p.next) {
+                     int k = p.hash & sizeMask;
+                     HashEntry n = newTable[k];
+                     newTable[k] = new HashEntry(p.key, p.hash, n, p.value);
+                  }
+               }
+            }
+         }
+         table = newTable;
+      }
+
+      /**
+       * This is a lockless remove.  Make sure you acquire locks using segment.lock() first.
+       */
+      final LinkedEntry locklessRemove(Object key, int hash) {
+         int c = count - 1;
+         HashEntry[] tab = table;
+         int index = hash & (tab.length - 1);
+         HashEntry first = tab[index];
+         HashEntry e = first;
+         while (e != null && (e.hash != hash || !key.equals(e.key)))
+            e = e.next;
+
+         LinkedEntry oldValue = null;
+         if (e != null) {
+            oldValue = e.value;
+            // All entries following removed node can stay
+            // in list, but all preceding ones need to be
+            // cloned.
+            HashEntry newFirst = e.next;
+            for (HashEntry p = first; p != e; p = p.next)
+               newFirst = new HashEntry(p.key, p.hash,
+                                        newFirst, p.value);
+            tab[index] = newFirst;
+            count = c; // write-volatile
+
+         }
+         return oldValue;
+      }
+
+      /**
+       * This is a lockless clear.  Ensure you acquire locks on the segment first using segment.lock().
+       */
+      final void locklessClear() {
+         if (count != 0) {
+            HashEntry[] tab = table;
+            for (int i = 0; i < tab.length; i++)
+               tab[i] = null;
+            count = 0; // write-volatile
+         }
+      }
+   }
+
+   protected final class KeySet extends AbstractSet<Object> {
+
+      public Iterator<Object> iterator() {
+         return new KeyIterator();
+      }
+
+      public int size() {
+         return SpinLockBasedFIFODataContainer.this.size();
+      }
+   }
+
+   protected abstract class LinkedIterator {
+      Aux nextAux = dummyEntry.next;
+
+      public boolean hasNext() {
+         return nextAux.next != dummyEntry;
+      }
+
+      public void remove() {
+         throw new UnsupportedOperationException();
+      }
+   }
+
+   protected final class ValueIterator extends LinkedIterator implements Iterator<InternalCacheEntry> {
+      public InternalCacheEntry next() {
+         LinkedEntry le = nextAux.next;
+         if (le == dummyEntry) return null;
+         nextAux = le.next;
+         return le.entry;
+      }
+   }
+
+   protected final class KeyIterator extends LinkedIterator implements Iterator<Object> {
+      public Object next() {
+         LinkedEntry le = nextAux.next;
+         if (le == dummyEntry) return null;
+         nextAux = le.next;
+         return le.entry.getKey();
+      }
+   }
+
+   protected static abstract class SpinLock {
+      final AtomicBoolean l = new AtomicBoolean(false);
+
+      final void lock() {
+         while (!l.compareAndSet(false, true)) {
+            // spin, spin, spin!
+         }
+      }
+
+      final boolean tryLock() {
+         return l.compareAndSet(false, true);
+      }
+
+      final void unlock() {
+         l.set(false);
+      }
+   }
+
+   protected final static class Aux extends SpinLock {
+      volatile LinkedEntry next;
+   }
+
+   protected final static class LinkedEntry extends SpinLock {
+      volatile Aux prev;
+      volatile Aux next;
+      volatile InternalCacheEntry entry;
+   }
+}
+


Property changes on: trunk/core/src/main/java/org/infinispan/container/SpinLockBasedFIFODataContainer.java
___________________________________________________________________
Name: svn:keywords
   + Id Revision
Name: svn:eol-style
   + LF

Copied: trunk/core/src/main/java/org/infinispan/container/SpinLockBasedLRUDataContainer.java (from rev 115, trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java)
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/SpinLockBasedLRUDataContainer.java	                        (rev 0)
+++ trunk/core/src/main/java/org/infinispan/container/SpinLockBasedLRUDataContainer.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -0,0 +1,84 @@
+package org.infinispan.container;
+
+import net.jcip.annotations.ThreadSafe;
+import org.infinispan.container.entries.InternalCacheEntry;
+
+/**
+ * A data container that exposes an iterator that is ordered based on least recently used (visited) entries first.
+ * <p/>
+ * This builds on the {@link SpinLockBasedFIFODataContainer} by calling {@link
+ * SpinLockBasedLRUDataContainer#updateLinks(org.infinispan.container.SpinLockBasedFIFODataContainer.LinkedEntry)} even for
+ * {@link #get(Object)} invocations to make sure ordering is intact, as per LRU.
+ * <p/>
+ *
+ * @author Manik Surtani
+ * @since 4.0
+ */
+ at ThreadSafe
+public class SpinLockBasedLRUDataContainer extends SpinLockBasedFIFODataContainer {
+
+   @Override
+   public InternalCacheEntry get(Object k) {
+      int h = hash(k.hashCode());
+      Segment s = segmentFor(h);
+      LinkedEntry le = s.get(k, h);
+      InternalCacheEntry ice = le == null ? null : le.entry;
+      if (ice != null) {
+         if (ice.isExpired()) {
+            remove(k);
+            ice = null;
+         } else {
+            ice.touch();
+            updateLinks(le);
+         }
+      }
+
+      return ice;
+   }
+
+   // TODO make sure even a put() on an existing entry updates links  
+
+   /**
+    * Updates links on this entry, moving it to the end of the linked list
+    *
+    * @param l linked entry to update
+    */
+   protected final void updateLinks(LinkedEntry l) {
+      if (l.next != dummyEntry.prev) {
+
+         // if we cannot lock l it means it is being updated by another process, either removing it or updating it anyway
+         // so we can skip updating links in that case.
+         if (l.tryLock()) {
+            try {
+               Aux before = l.prev;
+               before.lock();
+               Aux after = l.next;
+               after.lock();
+
+               LinkedEntry nextEntry = after.next;
+               nextEntry.lock();
+               dummyEntry.lock();
+               Aux last = dummyEntry.prev;
+               last.lock();
+
+               try {
+                  last.next = l;
+                  l.prev = last;
+                  after.next = dummyEntry;
+                  dummyEntry.prev = after;
+                  nextEntry.prev = before;
+                  before.next = nextEntry;
+               } finally {
+                  last.unlock();
+                  dummyEntry.unlock();
+                  nextEntry.unlock();
+                  after.unlock();
+                  before.unlock();
+               }
+            } finally {
+               l.unlock();
+            }
+         }
+      }
+   }
+}


Property changes on: trunk/core/src/main/java/org/infinispan/container/SpinLockBasedLRUDataContainer.java
___________________________________________________________________
Name: svn:keywords
   + Id Revision
Name: svn:eol-style
   + LF

Modified: trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java	2009-04-14 18:15:11 UTC (rev 115)
+++ trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -23,8 +23,8 @@
 
 import org.infinispan.container.DataContainer;
 import org.infinispan.container.SimpleDataContainer;
-import org.infinispan.container.FIFODataContainer;
-import org.infinispan.container.LRUDataContainer;
+import org.infinispan.container.SpinLockBasedFIFODataContainer;
+import org.infinispan.container.SpinLockBasedLRUDataContainer;
 import org.infinispan.factories.annotations.DefaultFactoryFor;
 import org.infinispan.config.ConfigurationException;
 
@@ -43,9 +43,9 @@
          case NONE:
             return (T) new SimpleDataContainer();
          case FIFO:
-            return (T) new FIFODataContainer();
+            return (T) new SpinLockBasedFIFODataContainer();
          case LRU:
-            return (T) new LRUDataContainer();
+            return (T) new SpinLockBasedLRUDataContainer();
          default:
             throw new ConfigurationException("Unknown eviction strategy " + configuration.getEvictionStrategy());
       }

Deleted: trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java	2009-04-14 18:15:11 UTC (rev 115)
+++ trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -1,211 +0,0 @@
-package org.infinispan.container;
-
-import org.infinispan.container.entries.ImmortalCacheEntry;
-import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.container.entries.MortalCacheEntry;
-import org.infinispan.container.entries.TransientCacheEntry;
-import org.infinispan.container.entries.TransientMortalCacheEntry;
-import org.testng.annotations.Test;
-
-import java.util.Random;
-import java.util.concurrent.CountDownLatch;
-
- at Test(groups = "unit", testName = "container.FIFODataContainerTest")
-public class FIFODataContainerTest extends SimpleDataContainerTest {
-
-   @Override
-   protected DataContainer createContainer() {
-      return new FIFODataContainer();
-   }
-
-   public void testOrdering() {
-      long lifespan = 600000;
-      long idle = 600000;
-      for (int i = 0; i < 10; i++) dc.put("k" + i, "value", -1, -1);
-      for (int i = 10; i < 20; i++) dc.put("k" + i, "value", lifespan, -1);
-      for (int i = 20; i < 30; i++) dc.put("k" + i, "value", -1, idle);
-      for (int i = 30; i < 40; i++) dc.put("k" + i, "value", lifespan, idle);
-
-      // random visits
-      Random r = new Random();
-      for (int i = 0; i < 100; i++) dc.get("k" + r.nextInt(40));
-
-      // ensure order is maintained.
-      int i = 0;
-      for (InternalCacheEntry ice : dc) {
-         assert ice.getKey().equals("k" + i);
-         if (i < 10) assert ice instanceof ImmortalCacheEntry;
-         if (i >= 10 && i < 20) assert ice instanceof MortalCacheEntry;
-         if (i >= 20 && i < 30) assert ice instanceof TransientCacheEntry;
-         if (i >= 30 && i < 40) assert ice instanceof TransientMortalCacheEntry;
-         i++;
-      }
-   }
-
-   private void setInitialEntry() {
-      FIFODataContainer ldc = (FIFODataContainer) dc;
-      dc.put("k", "v", -1, -1);
-
-      assert dc.size() == 1;
-
-      FIFODataContainer.Aux last = ldc.dummyEntry.prev;
-      FIFODataContainer.Aux next = ldc.dummyEntry.next;
-      FIFODataContainer.LinkedEntry le = next.next;
-      FIFODataContainer.Aux last2 = le.next;
-
-      assert last == last2;
-      assert last != next;
-      assert le != ldc.dummyEntry;
-      assert le.prev == next;
-      assert le.next == last;
-      assert le.entry != null;
-      assert le.entry.getKey().equals("k");
-      assert le.entry.getValue().equals("v");
-   }
-
-   public void testInsertingLinks() {
-      FIFODataContainer ldc = (FIFODataContainer) dc;
-      assert dc.size() == 0;
-      assert ldc.dummyEntry.prev == ldc.dummyEntry.next;
-      assert ldc.dummyEntry.entry == null;
-
-      setInitialEntry();
-
-      // add one more
-      dc.put("k2", "v2", -1, -1);
-
-      assert dc.size() == 2;
-
-      FIFODataContainer.Aux last = ldc.dummyEntry.prev;
-      FIFODataContainer.Aux next = ldc.dummyEntry.next;
-      FIFODataContainer.LinkedEntry le1 = next.next;
-      FIFODataContainer.Aux next2 = le1.next;
-      FIFODataContainer.LinkedEntry le2 = next2.next;
-      FIFODataContainer.Aux last2 = le2.next;
-
-      assert last == last2;
-      assert last != next;
-      assert last != next2;
-      assert next != next2;
-      assert le1 != ldc.dummyEntry;
-      assert le2 != ldc.dummyEntry;
-      assert le1 != le2;
-
-      assert le1.prev == next;
-      assert le1.next == next2;
-      assert le2.prev == next2;
-      assert le2.next == last;
-
-      assert le1.entry != null;
-      assert le1.entry.getKey().equals("k");
-      assert le1.entry.getValue().equals("v");
-
-      assert le2.entry != null;
-      assert le2.entry.getKey().equals("k2");
-      assert le2.entry.getValue().equals("v2");
-   }
-
-   public void testRemovingLinks() {
-      FIFODataContainer aldc = (FIFODataContainer) dc;
-      assert dc.size() == 0;
-      assert aldc.dummyEntry.prev == aldc.dummyEntry.next;
-      assert aldc.dummyEntry.entry == null;
-
-      setInitialEntry();
-
-      dc.remove("k");
-
-      assert dc.size() == 0;
-      assert aldc.dummyEntry.prev == aldc.dummyEntry.next;
-      assert aldc.dummyEntry.entry == null;
-   }
-
-   public void testClear() {
-      FIFODataContainer aldc = (FIFODataContainer) dc;
-      assert dc.size() == 0;
-      assert aldc.dummyEntry.prev == aldc.dummyEntry.next;
-      assert aldc.dummyEntry.entry == null;
-
-      setInitialEntry();
-
-      dc.clear();
-
-      assert dc.size() == 0;
-      assert aldc.dummyEntry.prev == aldc.dummyEntry.next;
-      assert aldc.dummyEntry.entry == null;
-   }
-
-   public void testMultithreadAccess() throws InterruptedException {
-      assert dc.size() == 0;
-      int NUM_THREADS = 5;
-      long testDuration = 2000; // millis
-
-      Random r = new Random();
-      CountDownLatch startLatch = new CountDownLatch(1);
-
-      Worker[] workers = new Worker[NUM_THREADS];
-      for (int i = 0; i < NUM_THREADS; i++) workers[i] = new Worker("Worker-" + i, r, startLatch);
-      for (Worker w : workers) w.start();
-
-      startLatch.countDown();
-
-      Thread.sleep(testDuration); // generate some noise
-
-      for (Worker w : workers) w.running = false;
-      for (Worker w : workers) w.join();
-
-      assertNoStaleSpinLocks((FIFODataContainer) dc);
-   }
-
-   protected void assertNoStaleSpinLocks(FIFODataContainer fdc) {
-      FIFODataContainer.SpinLock first = fdc.dummyEntry;
-      FIFODataContainer.SpinLock next = fdc.dummyEntry;
-
-      do {
-         assert !next.l.get() : "Should NOT be locked!";
-         if (next instanceof FIFODataContainer.Aux)
-            next = ((FIFODataContainer.Aux) next).next;
-         else
-            next = ((FIFODataContainer.LinkedEntry) next).next;
-
-      } while (first != next);
-   }
-
-   protected final class Worker extends Thread {
-      CountDownLatch startLatch;
-      Random r;
-      volatile boolean running = true;
-
-      public Worker(String name, Random r, CountDownLatch startLatch) {
-         super(name);
-         this.r = r;
-         this.startLatch = startLatch;
-      }
-
-      @Override
-      public void run() {
-         try {
-            startLatch.await();
-         } catch (InterruptedException ignored) {
-         }
-
-         while (running) {
-            try {
-               sleep(r.nextInt(5) * 10);
-            } catch (InterruptedException ignored) {
-            }
-            switch (r.nextInt(3)) {
-               case 0:
-                  dc.put("key" + r.nextInt(100), "value", -1, -1);
-                  break;
-               case 1:
-                  dc.remove("key" + r.nextInt(100));
-                  break;
-               case 2:
-                  dc.get("key" + r.nextInt(100));
-                  break;
-            }
-         }
-      }
-   }
-}

Deleted: trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java	2009-04-14 18:15:11 UTC (rev 115)
+++ trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -1,47 +0,0 @@
-package org.infinispan.container;
-
-import org.infinispan.container.entries.ImmortalCacheEntry;
-import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.container.entries.MortalCacheEntry;
-import org.infinispan.container.entries.TransientCacheEntry;
-import org.infinispan.container.entries.TransientMortalCacheEntry;
-import org.testng.annotations.Test;
-
- at Test(groups = "unit", testName = "container.LRUDataContainerTest")
-public class LRUDataContainerTest extends FIFODataContainerTest {
-   @Override
-   protected DataContainer createContainer() {
-      return new LRUDataContainer();
-   }
-
-   @Override
-   public void testOrdering() {
-      long lifespan = 600000;
-      long idle = 600000;
-      for (int i = 0; i < 10; i++) dc.put(i, "value", -1, -1);
-      for (int i = 10; i < 20; i++) dc.put(i, "value", lifespan, -1);
-      for (int i = 20; i < 30; i++) dc.put(i, "value", -1, idle);
-      for (int i = 30; i < 40; i++) dc.put(i, "value", lifespan, idle);
-
-      // Visit all ODD numbered elements
-      for (int i = 0; i < 40; i++) {
-         if (i % 2 == 1) dc.get(i);
-      }
-
-      // ensure order is maintained.  The first 20 elements should be EVEN.
-      int i = 0;
-      for (InternalCacheEntry ice : dc) {
-         Integer key = (Integer) ice.getKey();
-         if (i < 20)
-            assert key % 2 == 0;
-         else
-            assert key % 2 == 1;
-
-         if (key < 10) assert ice instanceof ImmortalCacheEntry;
-         if (key >= 10 && key < 20) assert ice instanceof MortalCacheEntry;
-         if (key >= 20 && key < 30) assert ice instanceof TransientCacheEntry;
-         if (key >= 30 && key < 40) assert ice instanceof TransientMortalCacheEntry;
-         i++;
-      }
-   }
-}

Copied: trunk/core/src/test/java/org/infinispan/container/NewFIFODataContainerTest.java (from rev 111, trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java)
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/NewFIFODataContainerTest.java	                        (rev 0)
+++ trunk/core/src/test/java/org/infinispan/container/NewFIFODataContainerTest.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -0,0 +1,201 @@
+package org.infinispan.container;
+
+import org.infinispan.container.entries.ImmortalCacheEntry;
+import org.infinispan.container.entries.InternalCacheEntry;
+import org.infinispan.container.entries.MortalCacheEntry;
+import org.infinispan.container.entries.TransientCacheEntry;
+import org.infinispan.container.entries.TransientMortalCacheEntry;
+import org.testng.annotations.Test;
+
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+
+ at Test(groups = "unit", testName = "container.NewFIFODataContainerTest")
+public class NewFIFODataContainerTest extends SimpleDataContainerTest {
+
+   @Override
+   protected DataContainer createContainer() {
+      return new NewFIFOContainer();
+   }
+
+   public void testOrdering() {
+      long lifespan = 600000;
+      long idle = 600000;
+      for (int i = 0; i < 10; i++) dc.put("k" + i, "value", -1, -1);
+      for (int i = 10; i < 20; i++) dc.put("k" + i, "value", lifespan, -1);
+      for (int i = 20; i < 30; i++) dc.put("k" + i, "value", -1, idle);
+      for (int i = 30; i < 40; i++) dc.put("k" + i, "value", lifespan, idle);
+
+      // random visits
+      Random r = new Random();
+      for (int i = 0; i < 100; i++) dc.get("k" + r.nextInt(40));
+
+      // ensure order is maintained.
+      int i = 0;
+      for (InternalCacheEntry ice : dc) {
+         assert ice.getKey().equals("k" + i);
+         if (i < 10) assert ice instanceof ImmortalCacheEntry;
+         if (i >= 10 && i < 20) assert ice instanceof MortalCacheEntry;
+         if (i >= 20 && i < 30) assert ice instanceof TransientCacheEntry;
+         if (i >= 30 && i < 40) assert ice instanceof TransientMortalCacheEntry;
+         i++;
+      }
+   }
+
+   private void setInitialEntry() {
+      NewFIFOContainer ldc = (NewFIFOContainer) dc;
+      dc.put("k", "v", -1, -1);
+
+      assert dc.size() == 1;
+
+      NewFIFOContainer.LinkedEntry tail = ldc.tail;
+      NewFIFOContainer.LinkedEntry head = ldc.head;
+      NewFIFOContainer.LinkedEntry e = ldc.head.n;
+
+      assert head.n == e;
+      assert head.p == tail;
+      assert tail.n == head;
+      assert tail.p == e;
+      assert e.n == tail;
+      assert e.p == head;
+      assert !e.isMarked();
+   }
+
+   public void testInsertingLinks() {
+      NewFIFOContainer ldc = (NewFIFOContainer) dc;
+      assert dc.size() == 0;
+      assert ldc.head.n == ldc.tail;
+      assert ldc.tail.n == ldc.head;
+      assert ldc.head.p == ldc.tail;
+      assert ldc.tail.p == ldc.head;
+
+      setInitialEntry();
+
+      // add one more
+      dc.put("k2", "v2", -1, -1);
+
+      assert dc.size() == 2;
+
+      NewFIFOContainer.LinkedEntry tail = ldc.tail;
+      NewFIFOContainer.LinkedEntry head = ldc.head;
+      NewFIFOContainer.LinkedEntry le1 = head.n;
+      NewFIFOContainer.LinkedEntry le2 = le1.n;
+
+      assert tail == le2.n;
+      assert tail != le1.n;
+      assert le1 != ldc.head;
+      assert le2 != ldc.head;
+      assert le1 != ldc.tail;
+      assert le2 != ldc.tail;
+      assert le1 != le2;
+
+      assert le1.p == head;
+      assert le1.n == le2;
+      assert le2.p == le1;
+      assert le2.n == tail;
+
+      assert le1.e != null;
+      assert le1.e.getKey().equals("k");
+      assert le1.e.getValue().equals("v");
+
+      assert le2.e != null;
+      assert le2.e.getKey().equals("k2");
+      assert le2.e.getValue().equals("v2");
+   }
+
+   public void testRemovingLinks() {
+      NewFIFOContainer aldc = (NewFIFOContainer) dc;
+      assert dc.size() == 0;
+      assert aldc.head.n == aldc.tail;
+      assert aldc.tail.n == aldc.head;
+      assert aldc.head.p == aldc.tail;
+      assert aldc.tail.p == aldc.head;
+
+      setInitialEntry();
+
+      dc.remove("k");
+
+      assert dc.size() == 0;
+      assert aldc.head.n == aldc.tail;
+      assert aldc.tail.n == aldc.head;
+      assert aldc.head.p == aldc.tail;
+      assert aldc.tail.p == aldc.head;
+   }
+
+   public void testClear() {
+      NewFIFOContainer aldc = (NewFIFOContainer) dc;
+      assert dc.size() == 0;
+      assert aldc.head.n == aldc.tail;
+      assert aldc.tail.n == aldc.head;
+      assert aldc.head.p == aldc.tail;
+      assert aldc.tail.p == aldc.head;
+
+      setInitialEntry();
+
+      dc.clear();
+
+      assert dc.size() == 0;
+      assert aldc.head.n == aldc.tail;
+      assert aldc.tail.n == aldc.head;
+      assert aldc.head.p == aldc.tail;
+      assert aldc.tail.p == aldc.head;
+   }
+
+   public void testMultithreadAccess() throws InterruptedException {
+      assert dc.size() == 0;
+      int NUM_THREADS = 5;
+      long testDuration = 2000; // millis
+
+      Random r = new Random();
+      CountDownLatch startLatch = new CountDownLatch(1);
+
+      Worker[] workers = new Worker[NUM_THREADS];
+      for (int i = 0; i < NUM_THREADS; i++) workers[i] = new Worker("Worker-" + i, r, startLatch);
+      for (Worker w : workers) w.start();
+
+      startLatch.countDown();
+
+      Thread.sleep(testDuration); // generate some noise
+
+      for (Worker w : workers) w.running = false;
+      for (Worker w : workers) w.join();
+   }
+
+   protected final class Worker extends Thread {
+      CountDownLatch startLatch;
+      Random r;
+      volatile boolean running = true;
+
+      public Worker(String name, Random r, CountDownLatch startLatch) {
+         super(name);
+         this.r = r;
+         this.startLatch = startLatch;
+      }
+
+      @Override
+      public void run() {
+         try {
+            startLatch.await();
+         } catch (InterruptedException ignored) {
+         }
+
+         while (running) {
+            try {
+               sleep(r.nextInt(5) * 10);
+            } catch (InterruptedException ignored) {
+            }
+            switch (r.nextInt(3)) {
+               case 0:
+                  dc.put("key" + r.nextInt(100), "value", -1, -1);
+                  break;
+               case 1:
+                  dc.remove("key" + r.nextInt(100));
+                  break;
+               case 2:
+                  dc.get("key" + r.nextInt(100));
+                  break;
+            }
+         }
+      }
+   }
+}
\ No newline at end of file

Copied: trunk/core/src/test/java/org/infinispan/container/NewLRUDataContainerTest.java (from rev 111, trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java)
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/NewLRUDataContainerTest.java	                        (rev 0)
+++ trunk/core/src/test/java/org/infinispan/container/NewLRUDataContainerTest.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -0,0 +1,47 @@
+package org.infinispan.container;
+
+import org.infinispan.container.entries.ImmortalCacheEntry;
+import org.infinispan.container.entries.InternalCacheEntry;
+import org.infinispan.container.entries.MortalCacheEntry;
+import org.infinispan.container.entries.TransientCacheEntry;
+import org.infinispan.container.entries.TransientMortalCacheEntry;
+import org.testng.annotations.Test;
+
+ at Test(groups = "unit", testName = "container.NewLRUDataContainerTest")
+public class NewLRUDataContainerTest extends NewFIFODataContainerTest {
+   @Override
+   protected DataContainer createContainer() {
+      return new NewLRUContainer();
+   }
+
+   @Override
+   public void testOrdering() {
+      long lifespan = 600000;
+      long idle = 600000;
+      for (int i = 0; i < 10; i++) dc.put(i, "value", -1, -1);
+      for (int i = 10; i < 20; i++) dc.put(i, "value", lifespan, -1);
+      for (int i = 20; i < 30; i++) dc.put(i, "value", -1, idle);
+      for (int i = 30; i < 40; i++) dc.put(i, "value", lifespan, idle);
+
+      // Visit all ODD numbered elements
+      for (int i = 0; i < 40; i++) {
+         if (i % 2 == 1) dc.get(i);
+      }
+
+      // ensure order is maintained.  The first 20 elements should be EVEN.
+      int i = 0;
+      for (InternalCacheEntry ice : dc) {
+         Integer key = (Integer) ice.getKey();
+         if (i < 20)
+            assert key % 2 == 0;
+         else
+            assert key % 2 == 1;
+
+         if (key < 10) assert ice instanceof ImmortalCacheEntry;
+         if (key >= 10 && key < 20) assert ice instanceof MortalCacheEntry;
+         if (key >= 20 && key < 30) assert ice instanceof TransientCacheEntry;
+         if (key >= 30 && key < 40) assert ice instanceof TransientMortalCacheEntry;
+         i++;
+      }
+   }
+}
\ No newline at end of file

Copied: trunk/core/src/test/java/org/infinispan/container/SpinLockBasedFIFODataContainerTest.java (from rev 115, trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java)
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/SpinLockBasedFIFODataContainerTest.java	                        (rev 0)
+++ trunk/core/src/test/java/org/infinispan/container/SpinLockBasedFIFODataContainerTest.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -0,0 +1,211 @@
+package org.infinispan.container;
+
+import org.infinispan.container.entries.ImmortalCacheEntry;
+import org.infinispan.container.entries.InternalCacheEntry;
+import org.infinispan.container.entries.MortalCacheEntry;
+import org.infinispan.container.entries.TransientCacheEntry;
+import org.infinispan.container.entries.TransientMortalCacheEntry;
+import org.testng.annotations.Test;
+
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+
+ at Test(groups = "unit", testName = "container.SpinLockBasedFIFODataContainerTest")
+public class SpinLockBasedFIFODataContainerTest extends SimpleDataContainerTest {
+
+   @Override
+   protected DataContainer createContainer() {
+      return new SpinLockBasedFIFODataContainer();
+   }
+
+   public void testOrdering() {
+      long lifespan = 600000;
+      long idle = 600000;
+      for (int i = 0; i < 10; i++) dc.put("k" + i, "value", -1, -1);
+      for (int i = 10; i < 20; i++) dc.put("k" + i, "value", lifespan, -1);
+      for (int i = 20; i < 30; i++) dc.put("k" + i, "value", -1, idle);
+      for (int i = 30; i < 40; i++) dc.put("k" + i, "value", lifespan, idle);
+
+      // random visits
+      Random r = new Random();
+      for (int i = 0; i < 100; i++) dc.get("k" + r.nextInt(40));
+
+      // ensure order is maintained.
+      int i = 0;
+      for (InternalCacheEntry ice : dc) {
+         assert ice.getKey().equals("k" + i);
+         if (i < 10) assert ice instanceof ImmortalCacheEntry;
+         if (i >= 10 && i < 20) assert ice instanceof MortalCacheEntry;
+         if (i >= 20 && i < 30) assert ice instanceof TransientCacheEntry;
+         if (i >= 30 && i < 40) assert ice instanceof TransientMortalCacheEntry;
+         i++;
+      }
+   }
+
+   private void setInitialEntry() {
+      SpinLockBasedFIFODataContainer ldc = (SpinLockBasedFIFODataContainer) dc;
+      dc.put("k", "v", -1, -1);
+
+      assert dc.size() == 1;
+
+      SpinLockBasedFIFODataContainer.Aux last = ldc.dummyEntry.prev;
+      SpinLockBasedFIFODataContainer.Aux next = ldc.dummyEntry.next;
+      SpinLockBasedFIFODataContainer.LinkedEntry le = next.next;
+      SpinLockBasedFIFODataContainer.Aux last2 = le.next;
+
+      assert last == last2;
+      assert last != next;
+      assert le != ldc.dummyEntry;
+      assert le.prev == next;
+      assert le.next == last;
+      assert le.entry != null;
+      assert le.entry.getKey().equals("k");
+      assert le.entry.getValue().equals("v");
+   }
+
+   public void testInsertingLinks() {
+      SpinLockBasedFIFODataContainer ldc = (SpinLockBasedFIFODataContainer) dc;
+      assert dc.size() == 0;
+      assert ldc.dummyEntry.prev == ldc.dummyEntry.next;
+      assert ldc.dummyEntry.entry == null;
+
+      setInitialEntry();
+
+      // add one more
+      dc.put("k2", "v2", -1, -1);
+
+      assert dc.size() == 2;
+
+      SpinLockBasedFIFODataContainer.Aux last = ldc.dummyEntry.prev;
+      SpinLockBasedFIFODataContainer.Aux next = ldc.dummyEntry.next;
+      SpinLockBasedFIFODataContainer.LinkedEntry le1 = next.next;
+      SpinLockBasedFIFODataContainer.Aux next2 = le1.next;
+      SpinLockBasedFIFODataContainer.LinkedEntry le2 = next2.next;
+      SpinLockBasedFIFODataContainer.Aux last2 = le2.next;
+
+      assert last == last2;
+      assert last != next;
+      assert last != next2;
+      assert next != next2;
+      assert le1 != ldc.dummyEntry;
+      assert le2 != ldc.dummyEntry;
+      assert le1 != le2;
+
+      assert le1.prev == next;
+      assert le1.next == next2;
+      assert le2.prev == next2;
+      assert le2.next == last;
+
+      assert le1.entry != null;
+      assert le1.entry.getKey().equals("k");
+      assert le1.entry.getValue().equals("v");
+
+      assert le2.entry != null;
+      assert le2.entry.getKey().equals("k2");
+      assert le2.entry.getValue().equals("v2");
+   }
+
+   public void testRemovingLinks() {
+      SpinLockBasedFIFODataContainer aldc = (SpinLockBasedFIFODataContainer) dc;
+      assert dc.size() == 0;
+      assert aldc.dummyEntry.prev == aldc.dummyEntry.next;
+      assert aldc.dummyEntry.entry == null;
+
+      setInitialEntry();
+
+      dc.remove("k");
+
+      assert dc.size() == 0;
+      assert aldc.dummyEntry.prev == aldc.dummyEntry.next;
+      assert aldc.dummyEntry.entry == null;
+   }
+
+   public void testClear() {
+      SpinLockBasedFIFODataContainer aldc = (SpinLockBasedFIFODataContainer) dc;
+      assert dc.size() == 0;
+      assert aldc.dummyEntry.prev == aldc.dummyEntry.next;
+      assert aldc.dummyEntry.entry == null;
+
+      setInitialEntry();
+
+      dc.clear();
+
+      assert dc.size() == 0;
+      assert aldc.dummyEntry.prev == aldc.dummyEntry.next;
+      assert aldc.dummyEntry.entry == null;
+   }
+
+   public void testMultithreadAccess() throws InterruptedException {
+      assert dc.size() == 0;
+      int NUM_THREADS = 5;
+      long testDuration = 2000; // millis
+
+      Random r = new Random();
+      CountDownLatch startLatch = new CountDownLatch(1);
+
+      Worker[] workers = new Worker[NUM_THREADS];
+      for (int i = 0; i < NUM_THREADS; i++) workers[i] = new Worker("Worker-" + i, r, startLatch);
+      for (Worker w : workers) w.start();
+
+      startLatch.countDown();
+
+      Thread.sleep(testDuration); // generate some noise
+
+      for (Worker w : workers) w.running = false;
+      for (Worker w : workers) w.join();
+
+      assertNoStaleSpinLocks((SpinLockBasedFIFODataContainer) dc);
+   }
+
+   protected void assertNoStaleSpinLocks(SpinLockBasedFIFODataContainer fdc) {
+      SpinLockBasedFIFODataContainer.SpinLock first = fdc.dummyEntry;
+      SpinLockBasedFIFODataContainer.SpinLock next = fdc.dummyEntry;
+
+      do {
+         assert !next.l.get() : "Should NOT be locked!";
+         if (next instanceof SpinLockBasedFIFODataContainer.Aux)
+            next = ((SpinLockBasedFIFODataContainer.Aux) next).next;
+         else
+            next = ((SpinLockBasedFIFODataContainer.LinkedEntry) next).next;
+
+      } while (first != next);
+   }
+
+   protected final class Worker extends Thread {
+      CountDownLatch startLatch;
+      Random r;
+      volatile boolean running = true;
+
+      public Worker(String name, Random r, CountDownLatch startLatch) {
+         super(name);
+         this.r = r;
+         this.startLatch = startLatch;
+      }
+
+      @Override
+      public void run() {
+         try {
+            startLatch.await();
+         } catch (InterruptedException ignored) {
+         }
+
+         while (running) {
+            try {
+               sleep(r.nextInt(5) * 10);
+            } catch (InterruptedException ignored) {
+            }
+            switch (r.nextInt(3)) {
+               case 0:
+                  dc.put("key" + r.nextInt(100), "value", -1, -1);
+                  break;
+               case 1:
+                  dc.remove("key" + r.nextInt(100));
+                  break;
+               case 2:
+                  dc.get("key" + r.nextInt(100));
+                  break;
+            }
+         }
+      }
+   }
+}


Property changes on: trunk/core/src/test/java/org/infinispan/container/SpinLockBasedFIFODataContainerTest.java
___________________________________________________________________
Name: svn:keywords
   + Id Revision
Name: svn:eol-style
   + LF

Copied: trunk/core/src/test/java/org/infinispan/container/SpinLockBasedLRUDataContainerTest.java (from rev 115, trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java)
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/SpinLockBasedLRUDataContainerTest.java	                        (rev 0)
+++ trunk/core/src/test/java/org/infinispan/container/SpinLockBasedLRUDataContainerTest.java	2009-04-15 08:04:36 UTC (rev 116)
@@ -0,0 +1,47 @@
+package org.infinispan.container;
+
+import org.infinispan.container.entries.ImmortalCacheEntry;
+import org.infinispan.container.entries.InternalCacheEntry;
+import org.infinispan.container.entries.MortalCacheEntry;
+import org.infinispan.container.entries.TransientCacheEntry;
+import org.infinispan.container.entries.TransientMortalCacheEntry;
+import org.testng.annotations.Test;
+
+ at Test(groups = "unit", testName = "container.SpinLockBasedLRUDataContainerTest")
+public class SpinLockBasedLRUDataContainerTest extends SpinLockBasedFIFODataContainerTest {
+   @Override
+   protected DataContainer createContainer() {
+      return new SpinLockBasedLRUDataContainer();
+   }
+
+   @Override
+   public void testOrdering() {
+      long lifespan = 600000;
+      long idle = 600000;
+      for (int i = 0; i < 10; i++) dc.put(i, "value", -1, -1);
+      for (int i = 10; i < 20; i++) dc.put(i, "value", lifespan, -1);
+      for (int i = 20; i < 30; i++) dc.put(i, "value", -1, idle);
+      for (int i = 30; i < 40; i++) dc.put(i, "value", lifespan, idle);
+
+      // Visit all ODD numbered elements
+      for (int i = 0; i < 40; i++) {
+         if (i % 2 == 1) dc.get(i);
+      }
+
+      // ensure order is maintained.  The first 20 elements should be EVEN.
+      int i = 0;
+      for (InternalCacheEntry ice : dc) {
+         Integer key = (Integer) ice.getKey();
+         if (i < 20)
+            assert key % 2 == 0;
+         else
+            assert key % 2 == 1;
+
+         if (key < 10) assert ice instanceof ImmortalCacheEntry;
+         if (key >= 10 && key < 20) assert ice instanceof MortalCacheEntry;
+         if (key >= 20 && key < 30) assert ice instanceof TransientCacheEntry;
+         if (key >= 30 && key < 40) assert ice instanceof TransientMortalCacheEntry;
+         i++;
+      }
+   }
+}


Property changes on: trunk/core/src/test/java/org/infinispan/container/SpinLockBasedLRUDataContainerTest.java
___________________________________________________________________
Name: svn:keywords
   + Id Revision
Name: svn:eol-style
   + LF




More information about the infinispan-commits mailing list