[infinispan-commits] Infinispan SVN: r1619 - in trunk/core/src: main/java/org/infinispan/container and 10 other directories.

infinispan-commits at lists.jboss.org infinispan-commits at lists.jboss.org
Thu Mar 25 06:49:04 EDT 2010


Author: vblagojevic at jboss.com
Date: 2010-03-25 06:49:02 -0400 (Thu, 25 Mar 2010)
New Revision: 1619

Added:
   trunk/core/src/main/java/org/infinispan/container/DefaultDataContainer.java
   trunk/core/src/main/java/org/infinispan/eviction/EvictionThreadPolicy.java
   trunk/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java
Removed:
   trunk/core/src/main/java/org/infinispan/container/SimpleDataContainer.java
   trunk/core/src/main/java/org/infinispan/util/concurrent/BufferedConcurrentHashMap.java
Modified:
   trunk/core/src/main/java/org/infinispan/config/Configuration.java
   trunk/core/src/main/java/org/infinispan/container/DataContainer.java
   trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java
   trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java
   trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java
   trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java
   trunk/core/src/main/java/org/infinispan/eviction/EvictionManagerImpl.java
   trunk/core/src/main/java/org/infinispan/eviction/EvictionStrategy.java
   trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java
   trunk/core/src/main/resources/config-samples/all.xml
   trunk/core/src/test/java/org/infinispan/config/parsing/XmlFileParsingTest.java
   trunk/core/src/test/java/org/infinispan/container/SimpleDataContainerTest.java
   trunk/core/src/test/java/org/infinispan/distribution/DistSyncTxFuncTest.java
   trunk/core/src/test/java/org/infinispan/eviction/BaseEvictionFunctionalTest.java
   trunk/core/src/test/java/org/infinispan/eviction/MarshalledValuesEvictionTest.java
   trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java
   trunk/core/src/test/java/org/infinispan/stress/MapStressTest.java
   trunk/core/src/test/resources/configs/named-cache-test.xml
Log:
[ISPN-299] - Implement an LIRS eviction policy
Add eviction thread policy (new property of eviction element)
Redesign EvictionManagerImpl
Deprecate other containers
Update tests

Modified: trunk/core/src/main/java/org/infinispan/config/Configuration.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/config/Configuration.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/java/org/infinispan/config/Configuration.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -23,6 +23,7 @@
 
 import org.infinispan.distribution.DefaultConsistentHash;
 import org.infinispan.eviction.EvictionStrategy;
+import org.infinispan.eviction.EvictionThreadPolicy;
 import org.infinispan.factories.ComponentRegistry;
 import org.infinispan.factories.annotations.Inject;
 import org.infinispan.factories.annotations.SurvivesRestarts;
@@ -339,7 +340,23 @@
          this.eviction.setStrategy(EvictionStrategy.NONE);
       }
    }
-
+   
+   public EvictionThreadPolicy getEvictionThreadPolicy() {
+      return eviction.threadPolicy;
+   }
+   
+   public void setEvictionThreadPolicy(EvictionThreadPolicy policy) {
+      this.eviction.setThreadPolicy(policy);
+   }
+   
+   public void setEvictionThreadPolicy(String policy){
+      this.eviction.threadPolicy = EvictionThreadPolicy.valueOf(uc(policy));
+      if (this.eviction.threadPolicy == null) {
+         log.warn("Unknown thread eviction policy  '" + policy + "'!  Using EvictionThreadPolicy.DEFAULT");
+         this.eviction.setThreadPolicy(EvictionThreadPolicy.DEFAULT);
+      }
+   }
+  
    public int getEvictionMaxEntries() {
       return eviction.maxEntries;
    }
@@ -893,7 +910,7 @@
       /** @configRef desc="Concurrency level for lock containers. Adjust this value according to the number of concurrent 
        *             threads interating with Infinispan.  Similar to the concurrencyLevel tuning parameter seen in
        *             the JDK's ConcurrentHashMap."*/
-      protected Integer concurrencyLevel = 512;
+      protected Integer concurrencyLevel = 32;
 
       @XmlAttribute
       public void setLockAcquisitionTimeout(Long lockAcquisitionTimeout) {
@@ -1277,6 +1294,9 @@
 
       /** @configRef desc="Maximum number of entries in a cache instance.  -1 means no limit." */
       protected Integer maxEntries=-1;
+      
+      /** @configRef desc="Threading policy for eviction." */
+      protected EvictionThreadPolicy threadPolicy=EvictionThreadPolicy.DEFAULT;
 
       @XmlAttribute
       public void setWakeUpInterval(Long wakeUpInterval) {
@@ -1293,6 +1313,12 @@
          testImmutability("strategy");
          this.strategy = strategy;
       }
+      
+      @XmlAttribute
+      public void setThreadPolicy(EvictionThreadPolicy threadPolicy) {
+         testImmutability("threadPolicy");
+         this.threadPolicy = threadPolicy;
+      }
 
       @XmlAttribute
       public void setMaxEntries(Integer maxEntries) {
@@ -1309,6 +1335,7 @@
 
          if (maxEntries != null ? !maxEntries.equals(that.maxEntries) : that.maxEntries != null) return false;
          if (strategy != that.strategy) return false;
+         if (threadPolicy != that.threadPolicy) return false;
          if (wakeUpInterval != null ? !wakeUpInterval.equals(that.wakeUpInterval) : that.wakeUpInterval != null)
             return false;
 
@@ -1319,6 +1346,7 @@
       public int hashCode() {
          int result = wakeUpInterval != null ? wakeUpInterval.hashCode() : 0;
          result = 31 * result + (strategy != null ? strategy.hashCode() : 0);
+         result = 31 * result + (threadPolicy != null ? threadPolicy.hashCode() : 0);
          result = 31 * result + (maxEntries != null ? maxEntries.hashCode() : 0);
          return result;
       }

Modified: trunk/core/src/main/java/org/infinispan/container/DataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/DataContainer.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/java/org/infinispan/container/DataContainer.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -21,19 +21,20 @@
  */
 package org.infinispan.container;
 
+import java.util.Collection;
+import java.util.Set;
+
 import org.infinispan.container.entries.InternalCacheEntry;
 import org.infinispan.factories.annotations.Stop;
 import org.infinispan.factories.scopes.Scope;
 import org.infinispan.factories.scopes.Scopes;
 
-import java.util.Collection;
-import java.util.Set;
-
 /**
  * The main internal data structure which stores entries
  *
  * @author Manik Surtani (<a href="mailto:manik at jboss.org">manik at jboss.org</a>)
  * @author Galder Zamarreño
+ * @author Vladimir Blagojevic
  * @since 4.0
  */
 @Scope(Scopes.NAMED_CACHE)
@@ -126,4 +127,13 @@
     * Purges entries that have passed their expiry time
     */
    void purgeExpired();
+   
+   
+   /**
+    * Returns a set of eviction candidates. Containers not supporting eviction should return an
+    * empty set.
+    * 
+    * @return a set of entries that should be evicted from this container.
+    */
+   Set<InternalCacheEntry> getEvictionCandidates();
 }

Copied: trunk/core/src/main/java/org/infinispan/container/DefaultDataContainer.java (from rev 1610, trunk/core/src/main/java/org/infinispan/container/SimpleDataContainer.java)
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/DefaultDataContainer.java	                        (rev 0)
+++ trunk/core/src/main/java/org/infinispan/container/DefaultDataContainer.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -0,0 +1,403 @@
+package org.infinispan.container;
+
+import java.util.AbstractCollection;
+import java.util.AbstractSet;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import net.jcip.annotations.ThreadSafe;
+
+import org.infinispan.Cache;
+import org.infinispan.container.entries.InternalCacheEntry;
+import org.infinispan.container.entries.InternalEntryFactory;
+import org.infinispan.eviction.EvictionStrategy;
+import org.infinispan.eviction.EvictionThreadPolicy;
+import org.infinispan.factories.annotations.Inject;
+import org.infinispan.util.Immutables;
+import org.infinispan.util.concurrent.BoundedConcurrentHashMap;
+import org.infinispan.util.concurrent.BoundedConcurrentHashMap.Eviction;
+import org.infinispan.util.concurrent.BoundedConcurrentHashMap.EvictionListener;
+
+/**
+ * Simple data container that does not order entries for eviction, implemented using two ConcurrentHashMaps, one for
+ * mortal and one for immortal entries.
+ * <p/>
+ * This container does not support eviction, in that entries are unsorted.
+ * <p/>
+ * This implementation offers O(1) performance for all operations.
+ *
+ * @author Manik Surtani
+ * @author Galder Zamarreño
+ * @author Vladimir Blagojevic
+ * @since 4.0
+ */
+ at ThreadSafe
+public class DefaultDataContainer implements DataContainer {
+   final ConcurrentMap<Object, InternalCacheEntry> immortalEntries;
+   final ConcurrentMap<Object, InternalCacheEntry> mortalEntries;
+   final AtomicInteger numEntries = new AtomicInteger(0);
+   final InternalEntryFactory entryFactory;
+   final DefaultEvictionListener evictionListener; 
+   protected Cache<Object, Object> cache;
+
+
+   protected DefaultDataContainer(int concurrencyLevel) {
+      this(concurrencyLevel, false, false);
+   }
+
+   protected DefaultDataContainer(int concurrencyLevel, boolean recordCreation, boolean recordLastUsed) {
+      immortalEntries = new ConcurrentHashMap<Object, InternalCacheEntry>(128, 0.75f, concurrencyLevel);
+      mortalEntries = new ConcurrentHashMap<Object, InternalCacheEntry>(64, 0.75f, concurrencyLevel);
+      entryFactory = new InternalEntryFactory(recordCreation, recordLastUsed);
+      evictionListener = null;
+   }
+   
+   protected DefaultDataContainer(int concurrencyLevel, int maxEntries, EvictionStrategy strategy, EvictionThreadPolicy policy,
+            boolean recordCreation, boolean recordLastUsed) {
+      
+      // translate eviction policy and strategy
+      switch (policy) {
+         case DEFAULT:
+            evictionListener = new DefaultEvictionListener();
+            break;
+         case PIGGYBACK:
+            evictionListener = new PiggybackEvictionListener();
+            break;
+         default:
+            throw new IllegalArgumentException("No such eviction thread policy " + strategy);
+      }
+      
+      Eviction eviction;
+      switch (strategy) {
+         case FIFO:
+         case LRU:
+            eviction = Eviction.LRU;            
+            break;
+         case LIRS:
+            eviction = Eviction.LIRS;            
+            break;
+         default:
+            throw new IllegalArgumentException("No such eviction strategy " + strategy);
+      }
+      immortalEntries = new BoundedConcurrentHashMap<Object, InternalCacheEntry>(maxEntries,concurrencyLevel, eviction, evictionListener);
+      mortalEntries = new ConcurrentHashMap<Object, InternalCacheEntry>(64, 0.75f, concurrencyLevel);
+      entryFactory = new InternalEntryFactory(recordCreation, recordLastUsed);
+   }
+   
+   @Inject
+   public void initialize(Cache<Object, Object> cache) {      
+      this.cache = cache;    
+   }
+   
+   public static DataContainer boundedDataContainer(int concurrencyLevel, int maxEntries, EvictionStrategy strategy, EvictionThreadPolicy policy) {
+      return new DefaultDataContainer(concurrencyLevel, maxEntries, strategy,policy, false, false) {
+
+         @Override
+         public int size() {
+            return immortalEntries.size() + mortalEntries.size();
+         }
+
+         @Override
+         public Set<InternalCacheEntry> getEvictionCandidates() {
+            return evictionListener.getEvicted();
+         }
+      };
+   }
+   
+   public static DataContainer unBoundedDataContainer(int concurrencyLevel) {
+      return new DefaultDataContainer(concurrencyLevel) ;
+   }
+   
+   @Override
+   public Set<InternalCacheEntry> getEvictionCandidates() {
+      return Collections.emptySet();
+   }
+
+   public InternalCacheEntry peek(Object key) {
+      InternalCacheEntry e = immortalEntries.get(key);
+      if (e == null) e = mortalEntries.get(key);
+      return e;
+   }
+
+   public InternalCacheEntry get(Object k) {
+      InternalCacheEntry e = peek(k);
+      if (e != null) {
+         if (e.isExpired()) {
+            mortalEntries.remove(k);
+            numEntries.getAndDecrement();
+            e = null;
+         } else {
+            e.touch();
+         }
+      }
+      return e;
+   }
+
+   protected void successfulPut(InternalCacheEntry ice, boolean newEntry) {
+      // no-op
+   }
+
+   public void put(Object k, Object v, long lifespan, long maxIdle) {
+      InternalCacheEntry e = immortalEntries.get(k);
+      if (e != null) {
+         e.setValue(v);
+         e = entryFactory.update(e, lifespan, maxIdle);
+
+         if (e.canExpire()) {
+            immortalEntries.remove(k);
+            mortalEntries.put(k, e);
+         }
+         successfulPut(e, false);
+      } else {
+         e = mortalEntries.get(k);
+         if (e != null) {
+            e.setValue(v);
+            InternalCacheEntry original = e;
+            e = entryFactory.update(e, lifespan, maxIdle);
+
+            if (!e.canExpire()) {
+               mortalEntries.remove(k);
+               immortalEntries.put(k, e);
+            } else if (e != original) {
+               // the entry has changed type, but still can expire!
+               mortalEntries.put(k, e);
+            }
+            successfulPut(e, false);
+         } else {
+            // this is a brand-new entry
+            numEntries.getAndIncrement();
+            e = entryFactory.createNewEntry(k, v, lifespan, maxIdle);
+            if (e.canExpire())
+               mortalEntries.put(k, e);
+            else
+               immortalEntries.put(k, e);
+            successfulPut(e, true);
+         }
+      }
+   }
+
+   public boolean containsKey(Object k) {
+      InternalCacheEntry ice = peek(k);
+      if (ice != null && ice.isExpired()) {
+         mortalEntries.remove(k);
+         numEntries.getAndDecrement();
+         ice = null;
+      }
+      return ice != null;
+   }
+
+   public InternalCacheEntry remove(Object k) {
+      InternalCacheEntry e = immortalEntries.remove(k);
+      if (e == null) e = mortalEntries.remove(k);
+      if (e != null) numEntries.getAndDecrement();
+
+      return e == null || e.isExpired() ? null : e;
+   }
+
+   public int size() {
+      return numEntries.get();
+   }
+
+   public void clear() {
+      immortalEntries.clear();
+      mortalEntries.clear();
+      numEntries.set(0);
+   }
+
+   public Set<Object> keySet() {
+      return new KeySet();
+   }
+
+   public Collection<Object> values() {
+      return new Values();
+   }
+
+   public Set<InternalCacheEntry> entrySet() {
+      return new EntrySet();
+   }
+
+   public void purgeExpired() {
+      for (Iterator<InternalCacheEntry> entries = mortalEntries.values().iterator(); entries.hasNext();) {
+         InternalCacheEntry e = entries.next();
+         if (e.isExpired()) {
+            entries.remove();
+            numEntries.getAndDecrement();
+         }
+      }
+   }
+
+   public Iterator<InternalCacheEntry> iterator() {
+      return new EntryIterator(immortalEntries.values().iterator(), mortalEntries.values().iterator());
+   }
+   
+   private class DefaultEvictionListener implements EvictionListener<Object, InternalCacheEntry>{
+      final List <InternalCacheEntry> evicted = Collections.synchronizedList(new LinkedList<InternalCacheEntry>());
+
+      @Override
+      public void evicted(Object key, InternalCacheEntry value) {
+         evicted.add(value);
+      }   
+      
+      public Set<InternalCacheEntry> getEvicted() {
+         synchronized (evicted) {
+            return new HashSet<InternalCacheEntry>(evicted);
+         } 
+      }
+   }
+   
+   private class PiggybackEvictionListener extends  DefaultEvictionListener{
+      
+      @Override
+      public void evicted(Object key, InternalCacheEntry value) {
+         cache.getAdvancedCache().evict(key);
+      }  
+      
+      public Set<InternalCacheEntry> getEvicted() {
+         return Collections.emptySet();
+      }
+   }
+
+   private class KeySet extends AbstractSet<Object> {
+      final Set<Object> immortalKeys;
+      final Set<Object> mortalKeys;
+
+      public KeySet() {
+         immortalKeys = immortalEntries.keySet();
+         mortalKeys = mortalEntries.keySet();
+      }
+
+      public Iterator<Object> iterator() {
+         return new KeyIterator(immortalKeys.iterator(), mortalKeys.iterator());
+      }
+
+      public void clear() {
+         throw new UnsupportedOperationException();
+      }
+
+      public boolean contains(Object o) {
+         return immortalKeys.contains(o) || mortalKeys.contains(o);
+      }
+
+      public boolean remove(Object o) {
+         throw new UnsupportedOperationException();
+      }
+
+      public int size() {
+         return immortalKeys.size() + mortalKeys.size();
+      }
+   }
+
+   private static class KeyIterator implements Iterator<Object> {
+      Iterator<Iterator<Object>> metaIterator;
+      Iterator<Object> currentIterator;
+
+      private KeyIterator(Iterator<Object> immortalIterator, Iterator<Object> mortalIterator) {
+         metaIterator = Arrays.asList(immortalIterator, mortalIterator).iterator();
+         if (metaIterator.hasNext()) currentIterator = metaIterator.next();
+      }
+
+      public boolean hasNext() {
+         boolean hasNext = currentIterator.hasNext();
+         while (!hasNext && metaIterator.hasNext()) {
+            currentIterator = metaIterator.next();
+            hasNext = currentIterator.hasNext();
+         }
+         return hasNext;
+      }
+
+      @SuppressWarnings("unchecked")
+      public Object next() {
+         return currentIterator.next();
+      }
+
+      public void remove() {
+         throw new UnsupportedOperationException();
+      }
+   }
+
+   private class EntrySet extends AbstractSet<InternalCacheEntry> {
+      public Iterator<InternalCacheEntry> iterator() {
+         return new ImmutableEntryIterator(immortalEntries.values().iterator(), mortalEntries.values().iterator());
+      }
+
+      @Override
+      public int size() {
+         return immortalEntries.size() + mortalEntries.size();
+      }
+   }
+
+   private static class MortalInmortalIterator {
+      Iterator<Iterator<InternalCacheEntry>> metaIterator;
+      Iterator<InternalCacheEntry> currentIterator;
+
+      private MortalInmortalIterator(Iterator<InternalCacheEntry> immortalIterator, Iterator<InternalCacheEntry> mortalIterator) {
+         metaIterator = Arrays.asList(immortalIterator, mortalIterator).iterator();
+         if (metaIterator.hasNext()) currentIterator = metaIterator.next();
+      }
+
+      public boolean hasNext() {
+         boolean hasNext = currentIterator.hasNext();
+         while (!hasNext && metaIterator.hasNext()) {
+            currentIterator = metaIterator.next();
+            hasNext = currentIterator.hasNext();
+         }
+         return hasNext;
+      }
+
+      public void remove() {
+         throw new UnsupportedOperationException();
+      }
+   }
+
+   private class EntryIterator extends MortalInmortalIterator implements Iterator<InternalCacheEntry> {
+      private EntryIterator(Iterator<InternalCacheEntry> immortalIterator, Iterator<InternalCacheEntry> mortalIterator) {
+         super(immortalIterator, mortalIterator);
+      }
+
+      @SuppressWarnings("unchecked")
+      public InternalCacheEntry next() {
+         return currentIterator.next();
+      }
+   }
+
+   private class ImmutableEntryIterator extends MortalInmortalIterator implements Iterator<InternalCacheEntry> {
+      private ImmutableEntryIterator(Iterator<InternalCacheEntry> immortalIterator, Iterator<InternalCacheEntry> mortalIterator) {
+         super(immortalIterator, mortalIterator);
+      }
+
+      public InternalCacheEntry next() {
+         return Immutables.immutableInternalCacheEntry(currentIterator.next());
+      }
+   }
+
+   private class Values extends AbstractCollection<Object> {
+      @Override
+      public Iterator<Object> iterator() {
+         return new ValueIterator(immortalEntries.values().iterator(), mortalEntries.values().iterator());
+      }
+
+      @Override
+      public int size() {
+         return immortalEntries.size() + mortalEntries.size();
+      }
+   }
+
+   private class ValueIterator extends MortalInmortalIterator implements Iterator<Object> {
+      private ValueIterator(Iterator<InternalCacheEntry> immortalIterator, Iterator<InternalCacheEntry> mortalIterator) {
+         super(immortalIterator, mortalIterator);
+      }
+
+      public Object next() {
+         return currentIterator.next().getValue();
+      }
+   }
+}

Modified: trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -8,6 +8,7 @@
 import java.util.AbstractCollection;
 import java.util.AbstractSet;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
@@ -38,6 +39,7 @@
  * @since 4.0
  */
 @ThreadSafe
+ at Deprecated
 public class FIFODataContainer implements DataContainer {
 
    InternalEntryFactory entryFactory = new InternalEntryFactory(false, false);
@@ -860,4 +862,9 @@
    public Iterator<InternalCacheEntry> iterator() {
       return new EntryIterator();
    }
+
+   @Override
+   public Set<InternalCacheEntry> getEvictionCandidates() {
+      return Collections.emptySet();
+   }
 }

Modified: trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -9,7 +9,7 @@
 import java.util.Iterator;
 
 /**
- * Based on the same techniques outlined in the {@link SimpleDataContainer}, this implementation always forces the
+ * Based on the same techniques outlined in the {@link DefaultDataContainer}, this implementation always forces the
  * collection of creation timestamps for entries.  This means that {@link org.infinispan.container.entries.ImmortalCacheEntry}
  * and {@link org.infinispan.container.entries.TransientCacheEntry} are never used, since only {@link org.infinispan.container.entries.MortalCacheEntry}
  * and {@link org.infinispan.container.entries.TransientMortalCacheEntry} instances capture timestamps.
@@ -26,8 +26,9 @@
  * @author Manik Surtani
  * @since 4.0
  */
+ at Deprecated
 @ThreadSafe
-public class FIFOSimpleDataContainer extends SimpleDataContainer {
+public class FIFOSimpleDataContainer extends DefaultDataContainer {
    // This is to facilitate faster sorting.  DO we really care about millisecond accuracy when ordering the collection?
    final static int DEFAULT_TIMESTAMP_GRANULARITY = 1000;
 

Modified: trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -20,6 +20,7 @@
  * @since 4.0
  */
 @ThreadSafe
+ at Deprecated
 public class LRUDataContainer extends FIFODataContainer {
 
    public LRUDataContainer(int concurrencyLevel) {

Modified: trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -6,7 +6,7 @@
 import java.util.Comparator;
 
 /**
- * Based on the same techniques outlined in the {@link SimpleDataContainer}, this implementation always forces the
+ * Based on the same techniques outlined in the {@link DefaultDataContainer}, this implementation always forces the
  * collection of last used timestamps for entries.  This means that {@link org.infinispan.container.entries.ImmortalCacheEntry}
  * and {@link org.infinispan.container.entries.MortalCacheEntry} are never used, since only {@link org.infinispan.container.entries.TransientCacheEntry}
  * and {@link org.infinispan.container.entries.TransientMortalCacheEntry} instances capture timestamps.
@@ -24,6 +24,7 @@
  * @since 4.0
  */
 @ThreadSafe
+ at Deprecated
 public class LRUSimpleDataContainer extends FIFOSimpleDataContainer {
    public LRUSimpleDataContainer(int concurrencyLevel) {
       super(concurrencyLevel, false, true, new LRUComparator(DEFAULT_TIMESTAMP_GRANULARITY));

Deleted: trunk/core/src/main/java/org/infinispan/container/SimpleDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/SimpleDataContainer.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/java/org/infinispan/container/SimpleDataContainer.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -1,299 +0,0 @@
-package org.infinispan.container;
-
-import net.jcip.annotations.ThreadSafe;
-import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.container.entries.InternalEntryFactory;
-import org.infinispan.util.Immutables;
-
-import java.util.AbstractCollection;
-import java.util.AbstractSet;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * Simple data container that does not order entries for eviction, implemented using two ConcurrentHashMaps, one for
- * mortal and one for immortal entries.
- * <p/>
- * This container does not support eviction, in that entries are unsorted.
- * <p/>
- * This implementation offers O(1) performance for all operations.
- *
- * @author Manik Surtani
- * @author Galder Zamarreño
- * @since 4.0
- */
- at ThreadSafe
-public class SimpleDataContainer implements DataContainer {
-   final ConcurrentMap<Object, InternalCacheEntry> immortalEntries;
-   final ConcurrentMap<Object, InternalCacheEntry> mortalEntries;
-   final AtomicInteger numEntries = new AtomicInteger(0);
-   final InternalEntryFactory entryFactory;
-
-
-   public SimpleDataContainer(int concurrencyLevel) {
-      this(concurrencyLevel, false, false);
-   }
-
-   SimpleDataContainer(int concurrencyLevel, boolean recordCreation, boolean recordLastUsed) {
-      immortalEntries = new ConcurrentHashMap<Object, InternalCacheEntry>(128, 0.75f, concurrencyLevel);
-      mortalEntries = new ConcurrentHashMap<Object, InternalCacheEntry>(64, 0.75f, concurrencyLevel);
-      entryFactory = new InternalEntryFactory(recordCreation, recordLastUsed);
-   }
-
-   public InternalCacheEntry peek(Object key) {
-      InternalCacheEntry e = immortalEntries.get(key);
-      if (e == null) e = mortalEntries.get(key);
-      return e;
-   }
-
-   public InternalCacheEntry get(Object k) {
-      InternalCacheEntry e = peek(k);
-      if (e != null) {
-         if (e.isExpired()) {
-            mortalEntries.remove(k);
-            numEntries.getAndDecrement();
-            e = null;
-         } else {
-            e.touch();
-         }
-      }
-      return e;
-   }
-
-   protected void successfulPut(InternalCacheEntry ice, boolean newEntry) {
-      // no-op
-   }
-
-   public void put(Object k, Object v, long lifespan, long maxIdle) {
-      InternalCacheEntry e = immortalEntries.get(k);
-      if (e != null) {
-         e.setValue(v);
-         e = entryFactory.update(e, lifespan, maxIdle);
-
-         if (e.canExpire()) {
-            immortalEntries.remove(k);
-            mortalEntries.put(k, e);
-         }
-         successfulPut(e, false);
-      } else {
-         e = mortalEntries.get(k);
-         if (e != null) {
-            e.setValue(v);
-            InternalCacheEntry original = e;
-            e = entryFactory.update(e, lifespan, maxIdle);
-
-            if (!e.canExpire()) {
-               mortalEntries.remove(k);
-               immortalEntries.put(k, e);
-            } else if (e != original) {
-               // the entry has changed type, but still can expire!
-               mortalEntries.put(k, e);
-            }
-            successfulPut(e, false);
-         } else {
-            // this is a brand-new entry
-            numEntries.getAndIncrement();
-            e = entryFactory.createNewEntry(k, v, lifespan, maxIdle);
-            if (e.canExpire())
-               mortalEntries.put(k, e);
-            else
-               immortalEntries.put(k, e);
-            successfulPut(e, true);
-         }
-      }
-   }
-
-   public boolean containsKey(Object k) {
-      InternalCacheEntry ice = peek(k);
-      if (ice != null && ice.isExpired()) {
-         mortalEntries.remove(k);
-         numEntries.getAndDecrement();
-         ice = null;
-      }
-      return ice != null;
-   }
-
-   public InternalCacheEntry remove(Object k) {
-      InternalCacheEntry e = immortalEntries.remove(k);
-      if (e == null) e = mortalEntries.remove(k);
-      if (e != null) numEntries.getAndDecrement();
-
-      return e == null || e.isExpired() ? null : e;
-   }
-
-   public int size() {
-      return numEntries.get();
-   }
-
-   public void clear() {
-      immortalEntries.clear();
-      mortalEntries.clear();
-      numEntries.set(0);
-   }
-
-   public Set<Object> keySet() {
-      return new KeySet();
-   }
-
-   public Collection<Object> values() {
-      return new Values();
-   }
-
-   public Set<InternalCacheEntry> entrySet() {
-      return new EntrySet();
-   }
-
-   public void purgeExpired() {
-      for (Iterator<InternalCacheEntry> entries = mortalEntries.values().iterator(); entries.hasNext();) {
-         InternalCacheEntry e = entries.next();
-         if (e.isExpired()) {
-            entries.remove();
-            numEntries.getAndDecrement();
-         }
-      }
-   }
-
-   public Iterator<InternalCacheEntry> iterator() {
-      return new EntryIterator(immortalEntries.values().iterator(), mortalEntries.values().iterator());
-   }
-
-   private class KeySet extends AbstractSet<Object> {
-      final Set<Object> immortalKeys;
-      final Set<Object> mortalKeys;
-
-      public KeySet() {
-         immortalKeys = immortalEntries.keySet();
-         mortalKeys = mortalEntries.keySet();
-      }
-
-      public Iterator<Object> iterator() {
-         return new KeyIterator(immortalKeys.iterator(), mortalKeys.iterator());
-      }
-
-      public void clear() {
-         throw new UnsupportedOperationException();
-      }
-
-      public boolean contains(Object o) {
-         return immortalKeys.contains(o) || mortalKeys.contains(o);
-      }
-
-      public boolean remove(Object o) {
-         throw new UnsupportedOperationException();
-      }
-
-      public int size() {
-         return immortalKeys.size() + mortalKeys.size();
-      }
-   }
-
-   private static class KeyIterator implements Iterator<Object> {
-      Iterator<Iterator<Object>> metaIterator;
-      Iterator<Object> currentIterator;
-
-      private KeyIterator(Iterator<Object> immortalIterator, Iterator<Object> mortalIterator) {
-         metaIterator = Arrays.asList(immortalIterator, mortalIterator).iterator();
-         if (metaIterator.hasNext()) currentIterator = metaIterator.next();
-      }
-
-      public boolean hasNext() {
-         boolean hasNext = currentIterator.hasNext();
-         while (!hasNext && metaIterator.hasNext()) {
-            currentIterator = metaIterator.next();
-            hasNext = currentIterator.hasNext();
-         }
-         return hasNext;
-      }
-
-      @SuppressWarnings("unchecked")
-      public Object next() {
-         return currentIterator.next();
-      }
-
-      public void remove() {
-         throw new UnsupportedOperationException();
-      }
-   }
-
-   private class EntrySet extends AbstractSet<InternalCacheEntry> {
-      public Iterator<InternalCacheEntry> iterator() {
-         return new ImmutableEntryIterator(immortalEntries.values().iterator(), mortalEntries.values().iterator());
-      }
-
-      @Override
-      public int size() {
-         return immortalEntries.size() + mortalEntries.size();
-      }
-   }
-
-   private static class MortalInmortalIterator {
-      Iterator<Iterator<InternalCacheEntry>> metaIterator;
-      Iterator<InternalCacheEntry> currentIterator;
-
-      private MortalInmortalIterator(Iterator<InternalCacheEntry> immortalIterator, Iterator<InternalCacheEntry> mortalIterator) {
-         metaIterator = Arrays.asList(immortalIterator, mortalIterator).iterator();
-         if (metaIterator.hasNext()) currentIterator = metaIterator.next();
-      }
-
-      public boolean hasNext() {
-         boolean hasNext = currentIterator.hasNext();
-         while (!hasNext && metaIterator.hasNext()) {
-            currentIterator = metaIterator.next();
-            hasNext = currentIterator.hasNext();
-         }
-         return hasNext;
-      }
-
-      public void remove() {
-         throw new UnsupportedOperationException();
-      }
-   }
-
-   private class EntryIterator extends MortalInmortalIterator implements Iterator<InternalCacheEntry> {
-      private EntryIterator(Iterator<InternalCacheEntry> immortalIterator, Iterator<InternalCacheEntry> mortalIterator) {
-         super(immortalIterator, mortalIterator);
-      }
-
-      @SuppressWarnings("unchecked")
-      public InternalCacheEntry next() {
-         return currentIterator.next();
-      }
-   }
-
-   private class ImmutableEntryIterator extends MortalInmortalIterator implements Iterator<InternalCacheEntry> {
-      private ImmutableEntryIterator(Iterator<InternalCacheEntry> immortalIterator, Iterator<InternalCacheEntry> mortalIterator) {
-         super(immortalIterator, mortalIterator);
-      }
-
-      public InternalCacheEntry next() {
-         return Immutables.immutableInternalCacheEntry(currentIterator.next());
-      }
-   }
-
-   private class Values extends AbstractCollection<Object> {
-      @Override
-      public Iterator<Object> iterator() {
-         return new ValueIterator(immortalEntries.values().iterator(), mortalEntries.values().iterator());
-      }
-
-      @Override
-      public int size() {
-         return immortalEntries.size() + mortalEntries.size();
-      }
-   }
-
-   private class ValueIterator extends MortalInmortalIterator implements Iterator<Object> {
-      private ValueIterator(Iterator<InternalCacheEntry> immortalIterator, Iterator<InternalCacheEntry> mortalIterator) {
-         super(immortalIterator, mortalIterator);
-      }
-
-      public Object next() {
-         return currentIterator.next().getValue();
-      }
-   }
-}

Modified: trunk/core/src/main/java/org/infinispan/eviction/EvictionManagerImpl.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/eviction/EvictionManagerImpl.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/java/org/infinispan/eviction/EvictionManagerImpl.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -17,6 +17,7 @@
 import org.infinispan.util.logging.Log;
 import org.infinispan.util.logging.LogFactory;
 
+import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledFuture;
@@ -28,7 +29,7 @@
 public class EvictionManagerImpl implements EvictionManager {
    private static final Log log = LogFactory.getLog(EvictionManagerImpl.class);
    private static final boolean trace = log.isTraceEnabled();
-   ScheduledFuture evictionTask;
+   ScheduledFuture <?> evictionTask;
 
    // components to be injected
    ScheduledExecutorService executor;
@@ -108,28 +109,23 @@
       }
 
       // finally iterate through data container if too big
-      int dcsz = dataContainer.size();
-      if (dcsz > maxEntries) {
+      Set<InternalCacheEntry> evictionCandidates = dataContainer.getEvictionCandidates();
+      if(!evictionCandidates.isEmpty()) {      
          AdvancedCache<Object, Object> ac = cache.getAdvancedCache();
          if (trace) {
-            log.trace("Data container is larger than maxEntries, size is {0}.  Evicting...", dcsz);
+            log.trace("Evicting data container entries");
             start = System.currentTimeMillis();
-         }
-         for (InternalCacheEntry ice : dataContainer) {
-            Object k = ice.getKey();
-            try {
-               dcsz = dataContainer.size();
-               if (dcsz > maxEntries) {
+         } 
+         for (InternalCacheEntry entry : evictionCandidates) {
+            Object k = entry.getKey();
+            try {                
                   if (trace) log.trace("Attempting to evict key [{0}]", k);
                   ac.withFlags(FAIL_SILENTLY).evict(k);
-               } else {
-                  if (trace) log.trace("Evicted enough entries");
-                  break;
                }
-            } catch (Exception e) {
+            catch (Exception e) {
                log.warn("Caught exception when iterating through data container.  Current entry is under key [{0}]", e, k);
             }
-         }
+         }               
          if (trace)
             log.trace("Eviction process completed in {0}", Util.prettyPrintTime(System.currentTimeMillis() - start));
       } else {

Modified: trunk/core/src/main/java/org/infinispan/eviction/EvictionStrategy.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/eviction/EvictionStrategy.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/java/org/infinispan/eviction/EvictionStrategy.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -10,7 +10,8 @@
    NONE,
    UNORDERED,
    FIFO,
-   LRU;
+   LRU,
+   LIRS;
    
    public boolean isEnabled() {
       return this != NONE;

Added: trunk/core/src/main/java/org/infinispan/eviction/EvictionThreadPolicy.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/eviction/EvictionThreadPolicy.java	                        (rev 0)
+++ trunk/core/src/main/java/org/infinispan/eviction/EvictionThreadPolicy.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -0,0 +1,12 @@
+package org.infinispan.eviction;
+
+/**
+ * Supported eviction thread policy
+ *
+ * @author Vladimir Blagojevic
+ * @since 4.0
+ */
+public enum EvictionThreadPolicy {
+   PIGGYBACK,
+   DEFAULT;  
+}

Modified: trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -23,32 +23,40 @@
 
 import org.infinispan.config.ConfigurationException;
 import org.infinispan.container.DataContainer;
-import org.infinispan.container.FIFOSimpleDataContainer;
-import org.infinispan.container.LRUSimpleDataContainer;
-import org.infinispan.container.SimpleDataContainer;
+import org.infinispan.container.DefaultDataContainer;
+import org.infinispan.eviction.EvictionStrategy;
+import org.infinispan.eviction.EvictionThreadPolicy;
 import org.infinispan.factories.annotations.DefaultFactoryFor;
 
 /**
  * Constructs the data container
- *
+ * 
  * @author Manik Surtani (<a href="mailto:manik at jboss.org">manik at jboss.org</a>)
+ * @author Vladimir Blagojevic
  * @since 4.0
  */
 @DefaultFactoryFor(classes = DataContainer.class)
-public class DataContainerFactory extends AbstractNamedCacheComponentFactory implements AutoInstantiableFactory {
+public class DataContainerFactory extends AbstractNamedCacheComponentFactory implements
+         AutoInstantiableFactory {
 
    @SuppressWarnings("unchecked")
    public <T> T construct(Class<T> componentType) {
-      switch (configuration.getEvictionStrategy()) {
+      EvictionStrategy st = configuration.getEvictionStrategy();
+      int level = configuration.getConcurrencyLevel();
+     
+      switch (st) {
          case NONE:
          case UNORDERED:
-            return (T) new SimpleDataContainer(configuration.getConcurrencyLevel());
+            return (T) DefaultDataContainer.unBoundedDataContainer(level);
+         case LRU:
          case FIFO:
-            return (T) new FIFOSimpleDataContainer(configuration.getConcurrencyLevel());
-         case LRU:
-            return (T) new LRUSimpleDataContainer(configuration.getConcurrencyLevel());
+         case LIRS:
+            int maxEntries = configuration.getEvictionMaxEntries();
+            EvictionThreadPolicy policy = configuration.getEvictionThreadPolicy();
+            return (T) DefaultDataContainer.boundedDataContainer(level, maxEntries, st, policy);
          default:
-            throw new ConfigurationException("Unknown eviction strategy " + configuration.getEvictionStrategy());
+            throw new ConfigurationException("Unknown eviction strategy "
+                     + configuration.getEvictionStrategy());
       }
    }
 }

Added: trunk/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java	                        (rev 0)
+++ trunk/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -0,0 +1,1857 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ * 
+ * Modified for https://jira.jboss.org/jira/browse/ISPN-299
+ * Includes ideas described in http://portal.acm.org/citation.cfm?id=1547428
+ * 
+ */
+
+package org.infinispan.util.concurrent;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.*;
+import java.util.*;
+import java.io.Serializable;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+
+/**
+ * A hash table supporting full concurrency of retrievals and
+ * adjustable expected concurrency for updates. This class obeys the
+ * same functional specification as {@link java.util.Hashtable}, and
+ * includes versions of methods corresponding to each method of
+ * <tt>Hashtable</tt>. However, even though all operations are
+ * thread-safe, retrieval operations do <em>not</em> entail locking,
+ * and there is <em>not</em> any support for locking the entire table
+ * in a way that prevents all access.  This class is fully
+ * interoperable with <tt>Hashtable</tt> in programs that rely on its
+ * thread safety but not on its synchronization details.
+ *
+ * <p> Retrieval operations (including <tt>get</tt>) generally do not
+ * block, so may overlap with update operations (including
+ * <tt>put</tt> and <tt>remove</tt>). Retrievals reflect the results
+ * of the most recently <em>completed</em> update operations holding
+ * upon their onset.  For aggregate operations such as <tt>putAll</tt>
+ * and <tt>clear</tt>, concurrent retrievals may reflect insertion or
+ * removal of only some entries.  Similarly, Iterators and
+ * Enumerations return elements reflecting the state of the hash table
+ * at some point at or since the creation of the iterator/enumeration.
+ * They do <em>not</em> throw {@link ConcurrentModificationException}.
+ * However, iterators are designed to be used by only one thread at a time.
+ *
+ * <p> The allowed concurrency among update operations is guided by
+ * the optional <tt>concurrencyLevel</tt> constructor argument
+ * (default <tt>16</tt>), which is used as a hint for internal sizing.  The
+ * table is internally partitioned to try to permit the indicated
+ * number of concurrent updates without contention. Because placement
+ * in hash tables is essentially random, the actual concurrency will
+ * vary.  Ideally, you should choose a value to accommodate as many
+ * threads as will ever concurrently modify the table. Using a
+ * significantly higher value than you need can waste space and time,
+ * and a significantly lower value can lead to thread contention. But
+ * overestimates and underestimates within an order of magnitude do
+ * not usually have much noticeable impact. A value of one is
+ * appropriate when it is known that only one thread will modify and
+ * all others will only read. Also, resizing this or any other kind of
+ * hash table is a relatively slow operation, so, when possible, it is
+ * a good idea to provide estimates of expected table sizes in
+ * constructors.
+ *
+ * <p>This class and its views and iterators implement all of the
+ * <em>optional</em> methods of the {@link Map} and {@link Iterator}
+ * interfaces.
+ *
+ * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
+ * does <em>not</em> allow <tt>null</tt> to be used as a key or value.
+ *
+ * <p>This class is a member of the
+ * <a href="{@docRoot}/../technotes/guides/collections/index.html">
+ * Java Collections Framework</a>.
+ *
+ * @since 1.5
+ * @author Doug Lea
+ * @param <K> the type of keys maintained by this map
+ * @param <V> the type of mapped values
+ */
+public class BoundedConcurrentHashMap<K, V> extends AbstractMap<K, V>
+        implements ConcurrentMap<K, V>, Serializable {
+    private static final long serialVersionUID = 7249069246763182397L;
+
+    /*
+     * The basic strategy is to subdivide the table among Segments,
+     * each of which itself is a concurrently readable hash table.
+     */
+
+    /* ---------------- Constants -------------- */
+
+    /**
+     * The default initial capacity for this table,
+     * used when not otherwise specified in a constructor.
+     */
+    static final int DEFAULT_MAXIMUM_CAPACITY = 512;
+
+    /**
+     * The default load factor for this table, used when not
+     * otherwise specified in a constructor.
+     */
+    static final float DEFAULT_LOAD_FACTOR = 0.75f;
+
+    /**
+     * The default concurrency level for this table, used when not
+     * otherwise specified in a constructor.
+     */
+    static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+
+    /**
+     * The maximum capacity, used if a higher value is implicitly
+     * specified by either of the constructors with arguments.  MUST
+     * be a power of two <= 1<<30 to ensure that entries are indexable
+     * using ints.
+     */
+    static final int MAXIMUM_CAPACITY = 1 << 30;
+
+    /**
+     * The maximum number of segments to allow; used to bound
+     * constructor arguments.
+     */
+    static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
+
+    /**
+     * Number of unsynchronized retries in size and containsValue
+     * methods before resorting to locking. This is used to avoid
+     * unbounded retries if tables undergo continuous modification
+     * which would make it impossible to obtain an accurate result.
+     */
+    static final int RETRIES_BEFORE_LOCK = 2;
+
+    /* ---------------- Fields -------------- */
+
+    /**
+     * Mask value for indexing into segments. The upper bits of a
+     * key's hash code are used to choose the segment.
+     */
+    final int segmentMask;
+
+    /**
+     * Shift value for indexing within segments.
+     */
+    final int segmentShift;
+
+    /**
+     * The segments, each of which is a specialized hash table
+     */
+    final Segment<K,V>[] segments;
+
+    transient Set<K> keySet;
+    transient Set<Map.Entry<K,V>> entrySet;
+    transient Collection<V> values;
+
+    /* ---------------- Small Utilities -------------- */
+
+    /**
+     * Applies a supplemental hash function to a given hashCode, which
+     * defends against poor quality hash functions.  This is critical
+     * because ConcurrentHashMap uses power-of-two length hash tables,
+     * that otherwise encounter collisions for hashCodes that do not
+     * differ in lower or upper bits.
+     */
+    private static int hash(int h) {
+        // Spread bits to regularize both segment and index locations,
+        // using variant of single-word Wang/Jenkins hash.
+        h += (h <<  15) ^ 0xffffcd7d;
+        h ^= (h >>> 10);
+        h += (h <<   3);
+        h ^= (h >>>  6);
+        h += (h <<   2) + (h << 14);
+        return h ^ (h >>> 16);
+    }
+
+    /**
+     * Returns the segment that should be used for key with given hash
+     * @param hash the hash code for the key
+     * @return the segment
+     */
+    final Segment<K,V> segmentFor(int hash) {
+        return segments[(hash >>> segmentShift) & segmentMask];
+    }
+
+    /* ---------------- Inner Classes -------------- */
+
+    /**
+     * ConcurrentHashMap list entry. Note that this is never exported
+     * out as a user-visible Map.Entry.
+     *
+     * Because the value field is volatile, not final, it is legal wrt
+     * the Java Memory Model for an unsynchronized reader to see null
+     * instead of initial value when read via a data race.  Although a
+     * reordering leading to this is not likely to ever actually
+     * occur, the Segment.readValueUnderLock method is used as a
+     * backup in case a null (pre-initialized) value is ever seen in
+     * an unsynchronized access method.
+     */
+    static final class HashEntry<K, V> {
+       final K key;
+       final int hash;
+       volatile V value;
+       final HashEntry<K, V> next;
+       volatile Recency state;
+
+       HashEntry(K key, int hash, HashEntry<K, V> next, V value) {
+           this.key = key;
+           this.hash = hash;
+           this.next = next;
+           this.value = value;
+           this.state = Recency.HIR_RESIDENT;
+       }
+
+       public int hashCode() {
+           int result = 17;
+           result = (result * 31) + hash;
+           result = (result * 31) + key.hashCode();
+           return result;
+       }
+
+       public boolean equals(Object o) {
+           // HashEntry is internal class, never leaks out of CHM, hence slight optimization
+           if (this == o)
+               return true;
+           if (o == null)
+               return false;
+           HashEntry<?, ?> other = (HashEntry<?, ?>) o;
+           return hash == other.hash && key.equals(other.key);
+       }
+
+       public void transitionToLIRResident() {           
+           state = Recency.LIR_RESIDENT;
+       }
+
+       public void transitionHIRResidentToHIRNonResident() {            
+           state = Recency.HIR_NONRESIDENT;
+       }
+       
+       public void transitionLIRResidentToHIRResident() {            
+           state = Recency.HIR_RESIDENT;
+       }
+
+       public Recency recency() {
+           return state;
+       }
+
+       @SuppressWarnings("unchecked")
+       static <K, V> HashEntry<K, V>[] newArray(int i) {
+           return new HashEntry[i];
+       }
+   }
+    
+    private enum Recency {
+       HIR_RESIDENT, LIR_RESIDENT, HIR_NONRESIDENT
+   }
+
+   public enum Eviction {
+       NONE {
+           @Override
+           public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
+               return new NullEvictionPolicy<K, V>();
+           }
+       },
+       LRU {
+
+           @Override
+           public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
+               return new LRU<K, V>(s,capacity,lf,capacity*10,lf);
+           }
+       },
+       LIRS {
+           @Override
+           public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
+               return new LIRS<K,V>(s,capacity,lf,capacity*10,lf);
+           }
+       };
+
+       abstract <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf);
+   }
+   
+   public interface EvictionListener<K, V> {
+       void evicted(K key, V value);
+   }
+   
+   static class NullEvictionListener<K,V> implements EvictionListener<K, V>{
+       @Override
+       public void evicted(K key, V value) {            
+       }        
+   }
+
+   public interface EvictionPolicy<K, V> {
+
+       public final static int MAX_BATCH_SIZE = 64;
+
+       /**
+        * Invokes eviction policy algorithm and returns set of evicted entries.
+        * 
+        * <p>
+        * Set cannot be null but could possibly be an empty set.
+        * 
+        * @return set of evicted entries.
+        */
+       Set<HashEntry<K, V>> execute();
+
+       /**
+        * Invoked to notify EvictionPolicy implementation that there has been an attempt to access
+        * an entry in Segment, however that entry was not present in Segment.
+        * 
+        * @param e
+        *            accessed entry in Segment
+        */
+       void onEntryMiss(HashEntry<K, V> e);
+
+       /**
+        * Invoked to notify EvictionPolicy implementation that an entry in Segment has been
+        * accessed. Returns true if batching threshold has been reached, false otherwise.
+        * <p>
+        * Note that this method is potentially invoked without holding a lock on Segment.
+        * 
+        * @return true if batching threshold has been reached, false otherwise.
+        * 
+        * @param e
+        *            accessed entry in Segment
+        */
+       boolean onEntryHit(HashEntry<K, V> e);
+
+       /**
+        * Invoked to notify EvictionPolicy implementation that an entry e has been removed from
+        * Segment.
+        * 
+        * @param e
+        *            removed entry in Segment
+        */
+       void onEntryRemove(HashEntry<K, V> e);
+
+       /**
+        * Invoked to notify EvictionPolicy implementation that all Segment entries have been
+        * cleared.
+        * 
+        */
+       void clear();
+
+       /**
+        * Returns type of eviction algorithm (strategy).
+        * 
+        * @return type of eviction algorithm
+        */
+       Eviction strategy();
+
+       /**
+        * Returns true if batching threshold has expired, false otherwise.
+        * <p>
+        * Note that this method is potentially invoked without holding a lock on Segment.
+        * 
+        * @return true if batching threshold has expired, false otherwise.
+        */
+       boolean thresholdExpired();
+   }
+
+   static class NullEvictionPolicy<K, V> implements EvictionPolicy<K, V> {
+
+       @Override
+       public void clear() {
+       }
+
+       @Override
+       public Set<HashEntry<K, V>> execute() {
+           return Collections.emptySet();
+       }
+
+       @Override
+       public boolean onEntryHit(HashEntry<K, V> e) {
+           return false;
+       }
+
+       @Override
+       public void onEntryMiss(HashEntry<K, V> e) {
+       }
+
+       @Override
+       public void onEntryRemove(HashEntry<K, V> e) {
+       }
+
+       @Override
+       public boolean thresholdExpired() {
+           return false;
+       }
+
+       @Override
+       public Eviction strategy() {
+           return Eviction.NONE;
+       }
+   }
+
+   static final class LRU<K, V> implements EvictionPolicy<K, V> {
+       private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
+       private final Segment<K,V> segment;
+       private final LinkedList<HashEntry<K, V>> lruQueue;
+       private final int maxBatchQueueSize;
+       private final int trimDownSize;
+       private final float batchThresholdFactor;
+
+       public LRU(Segment<K,V> s, int capacity, float lf, int maxBatchSize, float batchThresholdFactor) {
+           this.segment = s;
+           this.trimDownSize = (int) (capacity * lf);
+           this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
+           this.batchThresholdFactor = batchThresholdFactor;
+           this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
+           this.lruQueue = new LinkedList<HashEntry<K, V>>();
+       }
+
+       @Override
+       public Set<HashEntry<K, V>> execute() {
+           Set<HashEntry<K, V>> evicted = Collections.emptySet();
+           if (isOverflow()) {
+               evicted = new HashSet<HashEntry<K, V>>();
+           }
+           try {
+               for (HashEntry<K, V> e : accessQueue) {
+                   if (lruQueue.remove(e)) {
+                       lruQueue.addFirst(e);
+                   }
+               }
+               while (isOverflow()) {
+                   HashEntry<K, V> first = lruQueue.getLast();
+                   segment.remove(first.key, first.hash, null);
+                   evicted.add(first);
+               }
+           } finally {
+               accessQueue.clear();
+           }
+           return evicted;
+       }
+
+       private boolean isOverflow() {
+           return lruQueue.size() > trimDownSize;
+       }
+
+       @Override
+       public void onEntryMiss(HashEntry<K, V> e) {
+           lruQueue.addFirst(e);
+       }
+
+       /*
+        * Invoked without holding a lock on Segment
+        */
+       @Override
+       public boolean onEntryHit(HashEntry<K, V> e) {
+           accessQueue.add(e);
+           return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
+       }
+
+       /*
+        * Invoked without holding a lock on Segment
+        */
+       @Override
+       public boolean thresholdExpired() {
+           return accessQueue.size() >= maxBatchQueueSize;
+       }
+
+       @Override
+       public void onEntryRemove(HashEntry<K, V> e) {
+           lruQueue.remove(e);
+           // we could have multiple instances of e in accessQueue; remove them all
+           while (accessQueue.remove(e));
+       }
+
+       @Override
+       public void clear() {
+           lruQueue.clear();
+           accessQueue.clear();
+       }
+
+       @Override
+       public Eviction strategy() {
+           return Eviction.LRU;
+       }
+   }
+
+   static final class LIRS<K, V> implements EvictionPolicy<K, V> {
+       private final static int MIN_HIR_SIZE = 2;
+       private final Segment<K,V> segment;
+       private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
+       private final LinkedHashMap<Integer, HashEntry<K, V>> stack;
+       private final LinkedList<HashEntry<K, V>> queue;
+       private final int maxBatchQueueSize;
+       private final int lirSizeLimit;
+       private final int hirSizeLimit;
+       private int currentLIRSize;
+       private final float batchThresholdFactor;
+
+       public LIRS(Segment<K,V> s, int capacity, float lf, int maxBatchSize, float batchThresholdFactor) {
+           this.segment = s;
+           int tmpLirSize = (int) (capacity * 0.9);
+           int tmpHirSizeLimit = capacity - tmpLirSize;
+           if (tmpHirSizeLimit < MIN_HIR_SIZE) {
+               hirSizeLimit = MIN_HIR_SIZE;
+               lirSizeLimit = capacity - hirSizeLimit;
+           } else {
+               hirSizeLimit = tmpHirSizeLimit;
+               lirSizeLimit = tmpLirSize;
+           }
+           this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
+           this.batchThresholdFactor = batchThresholdFactor;
+           this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
+           this.stack = new LinkedHashMap<Integer, HashEntry<K, V>>();
+           this.queue = new LinkedList<HashEntry<K, V>>();
+       }
+
+       @Override
+       public Set<HashEntry<K, V>> execute() {
+           Set<HashEntry<K, V>> evicted = new HashSet<HashEntry<K, V>>();
+           try {
+               for (HashEntry<K, V> e : accessQueue) {
+                   if (present(e)) {
+                       if (e.recency() == Recency.LIR_RESIDENT) {
+                           handleLIRHit(e, evicted);
+                       } else if (e.recency() == Recency.HIR_RESIDENT) {
+                           handleHIRHit(e, evicted);
+                       }
+                   }
+               }
+               removeFromSegment(evicted);
+           } finally {
+               accessQueue.clear();
+           }
+           return evicted;
+       }
+
+       private void handleHIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
+           boolean inStack = stack.containsKey(e.hashCode());
+           if (inStack)
+               stack.remove(e.hashCode());
+
+           // first put on top of the stack
+           stack.put(e.hashCode(), e);
+
+           if (inStack) {                
+               queue.remove(e);
+               e.transitionToLIRResident();
+               switchBottomostLIRtoHIRAndPrune(evicted);
+           } else {               
+               queue.remove(e);
+               queue.addLast(e);
+           }
+       }
+
+       private void handleLIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
+           stack.remove(e.hashCode());
+           stack.put(e.hashCode(), e);
+           for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
+               HashEntry<K, V> next = i.next();
+               if (next.recency() == Recency.LIR_RESIDENT) {
+                   break;
+               } else {
+                   i.remove();
+                   evicted.add(next);
+               }
+           }
+       }
+
+       private boolean present(HashEntry<K, V> e) {
+           return stack.containsKey(e.hashCode()) || queue.contains(e);
+       }
+
+       @Override
+       public void onEntryMiss(HashEntry<K, V> e) {
+           // initialization
+           if (currentLIRSize + 1 < lirSizeLimit) {
+               currentLIRSize++;
+               e.transitionToLIRResident();
+               stack.put(e.hashCode(), e);
+           } else {
+               if (queue.size() < hirSizeLimit) {                    
+                   queue.addLast(e);
+               } else {
+                   boolean inStack = stack.containsKey(e.hashCode());
+                   HashEntry<K, V> first = queue.removeFirst();                    
+                   first.transitionHIRResidentToHIRNonResident();
+
+                   stack.put(e.hashCode(), e);
+
+                   if (inStack) {
+                       e.transitionToLIRResident();
+                       Set<HashEntry<K, V>> evicted = new HashSet<HashEntry<K, V>>();
+                       switchBottomostLIRtoHIRAndPrune(evicted);
+                       removeFromSegment(evicted);
+                   } else {                        
+                       queue.addLast(e);
+                   }
+
+                   // evict from segment
+                   segment.remove(first.key, first.hash, null);
+               }
+           }
+       }
+
+       private void removeFromSegment(Set<HashEntry<K, V>> evicted) {
+           for (HashEntry<K, V> e : evicted) {
+               segment.remove(e.key, e.hash, null);
+           }
+       }
+
+       private void switchBottomostLIRtoHIRAndPrune(Set<HashEntry<K, V>> evicted) {
+           boolean seenFirstLIR = false;
+           for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
+               HashEntry<K, V> next = i.next();
+               if (next.recency() == Recency.LIR_RESIDENT) {
+                   if (!seenFirstLIR) {
+                       seenFirstLIR = true;
+                       i.remove();
+                       next.transitionLIRResidentToHIRResident();                       
+                       queue.addLast(next);
+                   } else {
+                       break;
+                   }
+               } else {
+                   i.remove();
+                   evicted.add(next);
+               }
+           }
+       }
+
+       /*
+        * Invoked without holding a lock on Segment
+        */
+       @Override
+       public boolean onEntryHit(HashEntry<K, V> e) {
+           accessQueue.add(e);
+           return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
+       }
+
+       /*
+        * Invoked without holding a lock on Segment
+        */
+       @Override
+       public boolean thresholdExpired() {
+           return accessQueue.size() >= maxBatchQueueSize;
+       }
+
+       @Override
+       public void onEntryRemove(HashEntry<K, V> e) {
+           HashEntry<K, V> removed = stack.remove(e.hashCode());
+           if (removed != null && removed.recency() == Recency.LIR_RESIDENT) {
+               currentLIRSize--;
+           }
+           queue.remove(e);
+           // we could have multiple instances of e in accessQueue; remove them all
+           while (accessQueue.remove(e));
+       }
+
+       @Override
+       public void clear() {
+           stack.clear();
+           accessQueue.clear();
+       }
+
+       @Override
+       public Eviction strategy() {
+           return Eviction.LIRS;
+       }
+   }
+
+    /**
+     * Segments are specialized versions of hash tables.  This
+     * subclasses from ReentrantLock opportunistically, just to
+     * simplify some locking and avoid separate construction.
+     */
+    static final class Segment<K,V> extends ReentrantLock implements Serializable {
+        /*
+         * Segments maintain a table of entry lists that are ALWAYS
+         * kept in a consistent state, so can be read without locking.
+         * Next fields of nodes are immutable (final).  All list
+         * additions are performed at the front of each bin. This
+         * makes it easy to check changes, and also fast to traverse.
+         * When nodes would otherwise be changed, new nodes are
+         * created to replace them. This works well for hash tables
+         * since the bin lists tend to be short. (The average length
+         * is less than two for the default load factor threshold.)
+         *
+         * Read operations can thus proceed without locking, but rely
+         * on selected uses of volatiles to ensure that completed
+         * write operations performed by other threads are
+         * noticed. For most purposes, the "count" field, tracking the
+         * number of elements, serves as that volatile variable
+         * ensuring visibility.  This is convenient because this field
+         * needs to be read in many read operations anyway:
+         *
+         *   - All (unsynchronized) read operations must first read the
+         *     "count" field, and should not look at table entries if
+         *     it is 0.
+         *
+         *   - All (synchronized) write operations should write to
+         *     the "count" field after structurally changing any bin.
+         *     The operations must not take any action that could even
+         *     momentarily cause a concurrent read operation to see
+         *     inconsistent data. This is made easier by the nature of
+         *     the read operations in Map. For example, no operation
+         *     can reveal that the table has grown but the threshold
+         *     has not yet been updated, so there are no atomicity
+         *     requirements for this with respect to reads.
+         *
+         * As a guide, all critical volatile reads and writes to the
+         * count field are marked in code comments.
+         */
+
+        private static final long serialVersionUID = 2249069246763182397L;
+
+        /**
+         * The number of elements in this segment's region.
+         */
+        transient volatile int count;
+
+        /**
+         * Number of updates that alter the size of the table. This is
+         * used during bulk-read methods to make sure they see a
+         * consistent snapshot: If modCounts change during a traversal
+         * of segments computing size or checking containsValue, then
+         * we might have an inconsistent view of state so (usually)
+         * must retry.
+         */
+        transient int modCount;
+
+        /**
+         * The table is rehashed when its size exceeds this threshold.
+         * (The value of this field is always <tt>(int)(capacity *
+         * loadFactor)</tt>.)
+         */
+        transient int threshold;
+
+        /**
+         * The per-segment table.
+         */
+        transient volatile HashEntry<K,V>[] table;
+
+        /**
+         * The load factor for the hash table.  Even though this value
+         * is same for all segments, it is replicated to avoid needing
+         * links to outer object.
+         * @serial
+         */
+        final float loadFactor;
+        
+        transient final EvictionPolicy<K, V> eviction;
+
+        transient final EvictionListener<K, V> evictionListener;
+
+        Segment(int cap, float lf, Eviction es, EvictionListener<K, V> listener) {
+           loadFactor = lf;
+           eviction = es.make(this, cap, lf);
+           evictionListener = listener;
+           setTable(HashEntry.<K, V> newArray(cap));
+       }
+
+        @SuppressWarnings("unchecked")
+        static final <K,V> Segment<K,V>[] newArray(int i) {
+            return new Segment[i];
+        }
+
+        /**
+         * Sets table to new HashEntry array.
+         * Call only while holding lock or in constructor.
+         */
+        void setTable(HashEntry<K,V>[] newTable) {
+            threshold = (int)(newTable.length * loadFactor);
+            table = newTable;
+        }
+
+        /**
+         * Returns properly casted first entry of bin for given hash.
+         */
+        HashEntry<K,V> getFirst(int hash) {
+            HashEntry<K,V>[] tab = table;
+            return tab[hash & (tab.length - 1)];
+        }
+
+        /**
+         * Reads value field of an entry under lock. Called if value
+         * field ever appears to be null. This is possible only if a
+         * compiler happens to reorder a HashEntry initialization with
+         * its table assignment, which is legal under memory model
+         * but is not known to ever occur.
+         */
+        V readValueUnderLock(HashEntry<K,V> e) {
+            lock();
+            try {
+                return e.value;
+            } finally {
+                unlock();
+            }
+        }
+
+        /* Specialized implementations of map methods */
+
+        V get(Object key, int hash) {
+           int c = count;
+           if (c != 0) { // read-volatile
+               V result = null;
+               HashEntry<K, V> e = getFirst(hash);
+               loop: while (e != null) {
+                   if (e.hash == hash && key.equals(e.key)) {
+                       V v = e.value;
+                       if (v != null) {
+                           result = v;
+                           break loop;
+                       } else {
+                           result = readValueUnderLock(e); // recheck
+                           break loop;
+                       }
+                   }
+                   e = e.next;
+               }
+               // a hit
+               if (result != null) {
+                   if (eviction.onEntryHit(e)) {
+                       Set<HashEntry<K, V>> evicted = attemptEviction(false);
+                       // piggyback listener invocation on callers thread outside lock
+                       if (evicted != null) {
+                           for (HashEntry<K, V> he : evicted) {
+                               evictionListener.evicted(he.key, he.value);
+                           }
+                       }
+                   }
+               }
+               return result;
+           }
+           return null;
+       }
+
+       private Set<HashEntry<K, V>> attemptEviction(boolean lockedAlready) {
+           Set<HashEntry<K, V>> evicted = null;
+           boolean obtainedLock = !lockedAlready ? tryLock() : true;
+           if (!obtainedLock && eviction.thresholdExpired()) {
+               lock();
+               obtainedLock = true;
+           }
+           if (obtainedLock) {
+               try {
+                   evicted = eviction.execute();
+               } finally {
+                   if (!lockedAlready)
+                       unlock();
+               }
+           }
+           return evicted;
+       }
+
+        boolean containsKey(Object key, int hash) {
+            if (count != 0) { // read-volatile
+                HashEntry<K,V> e = getFirst(hash);
+                while (e != null) {
+                    if (e.hash == hash && key.equals(e.key))
+                        return true;
+                    e = e.next;
+                }
+            }
+            return false;
+        }
+
+        boolean containsValue(Object value) {
+            if (count != 0) { // read-volatile
+                HashEntry<K,V>[] tab = table;
+                int len = tab.length;
+                for (int i = 0 ; i < len; i++) {
+                    for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
+                        V v = e.value;
+                        if (v == null) // recheck
+                            v = readValueUnderLock(e);
+                        if (value.equals(v))
+                            return true;
+                    }
+                }
+            }
+            return false;
+        }
+
+        boolean replace(K key, int hash, V oldValue, V newValue) {
+           lock();
+           Set<HashEntry<K, V>> evicted = null;
+           try {
+               HashEntry<K, V> e = getFirst(hash);
+               while (e != null && (e.hash != hash || !key.equals(e.key)))
+                   e = e.next;
+
+               boolean replaced = false;
+               if (e != null && oldValue.equals(e.value)) {
+                   replaced = true;
+                   e.value = newValue;
+                   if (eviction.onEntryHit(e)) {
+                       evicted = attemptEviction(true);
+                   }
+               }
+               return replaced;
+           } finally {
+               unlock();
+               // piggyback listener invocation on callers thread outside lock   
+               if (evicted != null) {
+                   for (HashEntry<K, V> he : evicted) {
+                       evictionListener.evicted(he.key, he.value);
+                   }
+               }
+           }
+       }
+
+       V replace(K key, int hash, V newValue) {
+           lock();
+           Set<HashEntry<K, V>> evicted = null;
+           try {
+               HashEntry<K, V> e = getFirst(hash);
+               while (e != null && (e.hash != hash || !key.equals(e.key)))
+                   e = e.next;
+
+               V oldValue = null;
+               if (e != null) {
+                   oldValue = e.value;
+                   e.value = newValue;
+                   if (eviction.onEntryHit(e)) {
+                       evicted = attemptEviction(true);
+                   }
+               }
+               return oldValue;
+           } finally {
+               unlock();
+               // piggyback listener invocation on callers thread outside lock
+               if(evicted != null) {
+                   for (HashEntry<K, V> he : evicted) {
+                       evictionListener.evicted(he.key, he.value);
+                   }                
+               }
+           }
+       }
+
+
+       V put(K key, int hash, V value, boolean onlyIfAbsent) {
+          lock();
+          Set<HashEntry<K, V>> evicted = null;
+          try {
+              int c = count;
+              if (c++ > threshold && eviction.strategy() == Eviction.NONE) // ensure capacity
+                  rehash();
+              HashEntry<K, V>[] tab = table;
+              int index = hash & (tab.length - 1);
+              HashEntry<K, V> first = tab[index];
+              HashEntry<K, V> e = first;
+              while (e != null && (e.hash != hash || !key.equals(e.key)))
+                  e = e.next;
+
+              V oldValue;
+              if (e != null) {
+                  oldValue = e.value;
+                  if (!onlyIfAbsent) {
+                      e.value = value;
+                      eviction.onEntryHit(e);
+                  }
+              } else {
+                  oldValue = null;
+                  ++modCount;
+                  count = c; // write-volatile
+                  if (eviction.strategy() != Eviction.NONE) {
+                      if (c > tab.length) {
+                          // remove entries;lower count
+                          evicted = eviction.execute();
+                          // re-read first
+                          first = tab[index];
+                      }
+                      // add a new entry
+                      tab[index] = new HashEntry<K, V>(key, hash, first, value);
+                      // notify a miss
+                      eviction.onEntryMiss(tab[index]);
+                  } else {
+                      tab[index] = new HashEntry<K, V>(key, hash, first, value);
+                  }
+              }
+              return oldValue;
+          } finally {
+              unlock();
+              // piggyback listener invocation on callers thread outside lock
+              if(evicted != null) {
+                  for (HashEntry<K, V> he : evicted) {
+                      evictionListener.evicted(he.key, he.value);
+                  }                
+              }
+          }
+      }
+
+
+        void rehash() {
+            HashEntry<K,V>[] oldTable = table;
+            int oldCapacity = oldTable.length;
+            if (oldCapacity >= MAXIMUM_CAPACITY)
+                return;
+
+            /*
+             * Reclassify nodes in each list to new Map.  Because we are
+             * using power-of-two expansion, the elements from each bin
+             * must either stay at same index, or move with a power of two
+             * offset. We eliminate unnecessary node creation by catching
+             * cases where old nodes can be reused because their next
+             * fields won't change. Statistically, at the default
+             * threshold, only about one-sixth of them need cloning when
+             * a table doubles. The nodes they replace will be garbage
+             * collectable as soon as they are no longer referenced by any
+             * reader thread that may be in the midst of traversing table
+             * right now.
+             */
+
+            HashEntry<K,V>[] newTable = HashEntry.newArray(oldCapacity<<1);
+            threshold = (int)(newTable.length * loadFactor);
+            int sizeMask = newTable.length - 1;
+            for (int i = 0; i < oldCapacity ; i++) {
+                // We need to guarantee that any existing reads of old Map can
+                //  proceed. So we cannot yet null out each bin.
+                HashEntry<K,V> e = oldTable[i];
+
+                if (e != null) {
+                    HashEntry<K,V> next = e.next;
+                    int idx = e.hash & sizeMask;
+
+                    //  Single node on list
+                    if (next == null)
+                        newTable[idx] = e;
+
+                    else {
+                        // Reuse trailing consecutive sequence at same slot
+                        HashEntry<K,V> lastRun = e;
+                        int lastIdx = idx;
+                        for (HashEntry<K,V> last = next;
+                             last != null;
+                             last = last.next) {
+                            int k = last.hash & sizeMask;
+                            if (k != lastIdx) {
+                                lastIdx = k;
+                                lastRun = last;
+                            }
+                        }
+                        newTable[lastIdx] = lastRun;
+
+                        // Clone all remaining nodes
+                        for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
+                            int k = p.hash & sizeMask;
+                            HashEntry<K,V> n = newTable[k];
+                            newTable[k] = new HashEntry<K,V>(p.key, p.hash,
+                                                             n, p.value);
+                        }
+                    }
+                }
+            }
+            table = newTable;
+        }
+
+        /**
+         * Remove; match on key only if value null, else match both.
+         */
+        V remove(Object key, int hash, Object value) {
+           lock();
+           try {
+               int c = count - 1;
+               HashEntry<K, V>[] tab = table;
+               int index = hash & (tab.length - 1);
+               HashEntry<K, V> first = tab[index];
+               HashEntry<K, V> e = first;
+               while (e != null && (e.hash != hash || !key.equals(e.key)))
+                   e = e.next;
+
+               V oldValue = null;
+               if (e != null) {
+                   V v = e.value;
+                   if (value == null || value.equals(v)) {
+                       oldValue = v;
+                       // All entries following removed node can stay
+                       // in list, but all preceding ones need to be
+                       // cloned.
+                       ++modCount;
+
+                       // e was removed
+                       eviction.onEntryRemove(e);
+
+                       HashEntry<K, V> newFirst = e.next;
+                       for (HashEntry<K, V> p = first; p != e; p = p.next) {
+                           // allow p to be GC-ed
+                           eviction.onEntryRemove(p);
+                           newFirst = new HashEntry<K, V>(p.key, p.hash, newFirst, p.value);
+                           // and notify eviction algorithm about new hash entries
+                           eviction.onEntryMiss(newFirst);
+                       }
+
+                       tab[index] = newFirst;
+                       count = c; // write-volatile
+                   }
+               }
+               return oldValue;
+           } finally {
+               unlock();
+           }
+       }
+
+        void clear() {
+           if (count != 0) {
+               lock();
+               try {
+                   HashEntry<K, V>[] tab = table;
+                   for (int i = 0; i < tab.length; i++)
+                       tab[i] = null;
+                   ++modCount;
+                   eviction.clear();
+                   count = 0; // write-volatile
+               } finally {
+                   unlock();
+               }
+           }
+       }
+    }
+
+
+
+    /* ---------------- Public operations -------------- */
+
+
+    /**
+     * Creates a new, empty map with the specified maximum capacity, load factor and concurrency
+     * level. 
+     * 
+     * @param capacity
+     *            is the upper bound capacity for the number of elements in this map                             
+     *            
+     * @param concurrencyLevel
+     *            the estimated number of concurrently updating threads. The implementation performs
+     *            internal sizing to try to accommodate this many threads.
+     * 
+     * @param evictionStrategy
+     *            the algorithm used to evict elements from this map
+     * 
+     * @param evictionListener
+     *            the evicton listener callback to be notified about evicted elements
+     * 
+     * @throws IllegalArgumentException
+     *             if the initial capacity is negative or the load factor or concurrencyLevel are
+     *             nonpositive.
+     */
+    public BoundedConcurrentHashMap(int capacity, int concurrencyLevel,
+                    Eviction evictionStrategy, EvictionListener<K, V> evictionListener) {
+        if (capacity < 0 || concurrencyLevel <= 0)
+            throw new IllegalArgumentException();
+        
+        if (evictionStrategy == null || evictionListener == null)
+            throw new IllegalArgumentException();
+
+        if (concurrencyLevel > MAX_SEGMENTS)
+            concurrencyLevel = MAX_SEGMENTS;
+
+        // Find power-of-two sizes best matching arguments
+        int sshift = 0;
+        int ssize = 1;
+        while (ssize < concurrencyLevel) {
+            ++sshift;
+            ssize <<= 1;
+        }
+        segmentShift = 32 - sshift;
+        segmentMask = ssize - 1;
+        this.segments = Segment.newArray(ssize);
+
+        if (capacity > MAXIMUM_CAPACITY)
+            capacity = MAXIMUM_CAPACITY;
+        int c = capacity / ssize;
+        if (c * ssize < capacity)
+            ++c;
+        int cap = 1;
+        while (cap < c)
+            cap <<= 1;
+        
+        for (int i = 0; i < this.segments.length; ++i)
+            this.segments[i] = new Segment<K, V>(cap, DEFAULT_LOAD_FACTOR, evictionStrategy,
+                            evictionListener);
+    }
+
+    /**
+     * Creates a new, empty map with the specified maximum capacity, load factor, concurrency
+     * level and LRU eviction policy. 
+     * 
+     * @param capacity
+     *            is the upper bound capacity for the number of elements in this map                             
+     *            
+     * @param concurrencyLevel
+     *            the estimated number of concurrently updating threads. The implementation performs
+     *            internal sizing to try to accommodate this many threads.
+     * 
+     * @throws IllegalArgumentException
+     *             if the initial capacity is negative or the load factor or concurrencyLevel are
+     *             nonpositive.
+     */
+    public BoundedConcurrentHashMap(int capacity, int concurrencyLevel) {
+        this(capacity, concurrencyLevel, Eviction.LRU);
+    }
+    
+    /**
+     * Creates a new, empty map with the specified maximum capacity, load factor, concurrency
+     * level and eviction strategy. 
+     * 
+     * @param capacity
+     *            is the upper bound capacity for the number of elements in this map                             
+     *            
+     * @param concurrencyLevel
+     *            the estimated number of concurrently updating threads. The implementation performs
+     *            internal sizing to try to accommodate this many threads.
+     * 
+     * @param evictionStrategy
+     *            the algorithm used to evict elements from this map
+     * 
+     * @throws IllegalArgumentException
+     *             if the initial capacity is negative or the load factor or concurrencyLevel are
+     *             nonpositive.
+     */
+    public BoundedConcurrentHashMap(int capacity, int concurrencyLevel, Eviction evictionStrategy) {
+        this(capacity, concurrencyLevel, evictionStrategy, new NullEvictionListener<K, V>());
+    }
+
+    /**
+     * Creates a new, empty map with the specified maximum capacity, default concurrency
+     * level and LRU eviction policy. 
+     *
+     *  @param capacity
+     *            is the upper bound capacity for the number of elements in this map                             
+     *            
+     *            
+     * @throws IllegalArgumentException if the initial capacity of
+     * elements is negative or the load factor is nonpositive
+     *
+     * @since 1.6
+     */
+    public BoundedConcurrentHashMap(int capacity) {
+        this(capacity, DEFAULT_CONCURRENCY_LEVEL);
+    }
+
+    /**
+     * Creates a new, empty map with the default maximum capacity
+     */
+    public BoundedConcurrentHashMap() {
+        this(DEFAULT_MAXIMUM_CAPACITY, DEFAULT_CONCURRENCY_LEVEL);
+    }
+
+    /**
+     * Returns <tt>true</tt> if this map contains no key-value mappings.
+     *
+     * @return <tt>true</tt> if this map contains no key-value mappings
+     */
+    public boolean isEmpty() {
+        final Segment<K,V>[] segments = this.segments;
+        /*
+         * We keep track of per-segment modCounts to avoid ABA
+         * problems in which an element in one segment was added and
+         * in another removed during traversal, in which case the
+         * table was never actually empty at any point. Note the
+         * similar use of modCounts in the size() and containsValue()
+         * methods, which are the only other methods also susceptible
+         * to ABA problems.
+         */
+        int[] mc = new int[segments.length];
+        int mcsum = 0;
+        for (int i = 0; i < segments.length; ++i) {
+            if (segments[i].count != 0)
+                return false;
+            else
+                mcsum += mc[i] = segments[i].modCount;
+        }
+        // If mcsum happens to be zero, then we know we got a snapshot
+        // before any modifications at all were made.  This is
+        // probably common enough to bother tracking.
+        if (mcsum != 0) {
+            for (int i = 0; i < segments.length; ++i) {
+                if (segments[i].count != 0 ||
+                    mc[i] != segments[i].modCount)
+                    return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Returns the number of key-value mappings in this map.  If the
+     * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
+     * <tt>Integer.MAX_VALUE</tt>.
+     *
+     * @return the number of key-value mappings in this map
+     */
+    public int size() {
+        final Segment<K,V>[] segments = this.segments;
+        long sum = 0;
+        long check = 0;
+        int[] mc = new int[segments.length];
+        // Try a few times to get accurate count. On failure due to
+        // continuous async changes in table, resort to locking.
+        for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
+            check = 0;
+            sum = 0;
+            int mcsum = 0;
+            for (int i = 0; i < segments.length; ++i) {
+                sum += segments[i].count;
+                mcsum += mc[i] = segments[i].modCount;
+            }
+            if (mcsum != 0) {
+                for (int i = 0; i < segments.length; ++i) {
+                    check += segments[i].count;
+                    if (mc[i] != segments[i].modCount) {
+                        check = -1; // force retry
+                        break;
+                    }
+                }
+            }
+            if (check == sum)
+                break;
+        }
+        if (check != sum) { // Resort to locking all segments
+            sum = 0;
+            for (int i = 0; i < segments.length; ++i)
+                segments[i].lock();
+            for (int i = 0; i < segments.length; ++i)
+                sum += segments[i].count;
+            for (int i = 0; i < segments.length; ++i)
+                segments[i].unlock();
+        }
+        if (sum > Integer.MAX_VALUE)
+            return Integer.MAX_VALUE;
+        else
+            return (int)sum;
+    }
+
+    /**
+     * Returns the value to which the specified key is mapped,
+     * or {@code null} if this map contains no mapping for the key.
+     *
+     * <p>More formally, if this map contains a mapping from a key
+     * {@code k} to a value {@code v} such that {@code key.equals(k)},
+     * then this method returns {@code v}; otherwise it returns
+     * {@code null}.  (There can be at most one such mapping.)
+     *
+     * @throws NullPointerException if the specified key is null
+     */
+    public V get(Object key) {
+        int hash = hash(key.hashCode());
+        return segmentFor(hash).get(key, hash);
+    }
+
+    /**
+     * Tests if the specified object is a key in this table.
+     *
+     * @param  key   possible key
+     * @return <tt>true</tt> if and only if the specified object
+     *         is a key in this table, as determined by the
+     *         <tt>equals</tt> method; <tt>false</tt> otherwise.
+     * @throws NullPointerException if the specified key is null
+     */
+    public boolean containsKey(Object key) {
+        int hash = hash(key.hashCode());
+        return segmentFor(hash).containsKey(key, hash);
+    }
+
+    /**
+     * Returns <tt>true</tt> if this map maps one or more keys to the
+     * specified value. Note: This method requires a full internal
+     * traversal of the hash table, and so is much slower than
+     * method <tt>containsKey</tt>.
+     *
+     * @param value value whose presence in this map is to be tested
+     * @return <tt>true</tt> if this map maps one or more keys to the
+     *         specified value
+     * @throws NullPointerException if the specified value is null
+     */
+    public boolean containsValue(Object value) {
+        if (value == null)
+            throw new NullPointerException();
+
+        // See explanation of modCount use above
+
+        final Segment<K,V>[] segments = this.segments;
+        int[] mc = new int[segments.length];
+
+        // Try a few times without locking
+        for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
+            int sum = 0;
+            int mcsum = 0;
+            for (int i = 0; i < segments.length; ++i) {
+                int c = segments[i].count;
+                mcsum += mc[i] = segments[i].modCount;
+                if (segments[i].containsValue(value))
+                    return true;
+            }
+            boolean cleanSweep = true;
+            if (mcsum != 0) {
+                for (int i = 0; i < segments.length; ++i) {
+                    int c = segments[i].count;
+                    if (mc[i] != segments[i].modCount) {
+                        cleanSweep = false;
+                        break;
+                    }
+                }
+            }
+            if (cleanSweep)
+                return false;
+        }
+        // Resort to locking all segments
+        for (int i = 0; i < segments.length; ++i)
+            segments[i].lock();
+        boolean found = false;
+        try {
+            for (int i = 0; i < segments.length; ++i) {
+                if (segments[i].containsValue(value)) {
+                    found = true;
+                    break;
+                }
+            }
+        } finally {
+            for (int i = 0; i < segments.length; ++i)
+                segments[i].unlock();
+        }
+        return found;
+    }
+
+    /**
+     * Legacy method testing if some key maps into the specified value
+     * in this table.  This method is identical in functionality to
+     * {@link #containsValue}, and exists solely to ensure
+     * full compatibility with class {@link java.util.Hashtable},
+     * which supported this method prior to introduction of the
+     * Java Collections framework.
+
+     * @param  value a value to search for
+     * @return <tt>true</tt> if and only if some key maps to the
+     *         <tt>value</tt> argument in this table as
+     *         determined by the <tt>equals</tt> method;
+     *         <tt>false</tt> otherwise
+     * @throws NullPointerException if the specified value is null
+     */
+    public boolean contains(Object value) {
+        return containsValue(value);
+    }
+
+    /**
+     * Maps the specified key to the specified value in this table.
+     * Neither the key nor the value can be null.
+     *
+     * <p> The value can be retrieved by calling the <tt>get</tt> method
+     * with a key that is equal to the original key.
+     *
+     * @param key key with which the specified value is to be associated
+     * @param value value to be associated with the specified key
+     * @return the previous value associated with <tt>key</tt>, or
+     *         <tt>null</tt> if there was no mapping for <tt>key</tt>
+     * @throws NullPointerException if the specified key or value is null
+     */
+    public V put(K key, V value) {
+        if (value == null)
+            throw new NullPointerException();
+        int hash = hash(key.hashCode());
+        return segmentFor(hash).put(key, hash, value, false);
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return the previous value associated with the specified key,
+     *         or <tt>null</tt> if there was no mapping for the key
+     * @throws NullPointerException if the specified key or value is null
+     */
+    public V putIfAbsent(K key, V value) {
+        if (value == null)
+            throw new NullPointerException();
+        int hash = hash(key.hashCode());
+        return segmentFor(hash).put(key, hash, value, true);
+    }
+
+    /**
+     * Copies all of the mappings from the specified map to this one.
+     * These mappings replace any mappings that this map had for any of the
+     * keys currently in the specified map.
+     *
+     * @param m mappings to be stored in this map
+     */
+    public void putAll(Map<? extends K, ? extends V> m) {
+        for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
+            put(e.getKey(), e.getValue());
+    }
+
+    /**
+     * Removes the key (and its corresponding value) from this map.
+     * This method does nothing if the key is not in the map.
+     *
+     * @param  key the key that needs to be removed
+     * @return the previous value associated with <tt>key</tt>, or
+     *         <tt>null</tt> if there was no mapping for <tt>key</tt>
+     * @throws NullPointerException if the specified key is null
+     */
+    public V remove(Object key) {
+        int hash = hash(key.hashCode());
+        return segmentFor(hash).remove(key, hash, null);
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @throws NullPointerException if the specified key is null
+     */
+    public boolean remove(Object key, Object value) {
+        int hash = hash(key.hashCode());
+        if (value == null)
+            return false;
+        return segmentFor(hash).remove(key, hash, value) != null;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @throws NullPointerException if any of the arguments are null
+     */
+    public boolean replace(K key, V oldValue, V newValue) {
+        if (oldValue == null || newValue == null)
+            throw new NullPointerException();
+        int hash = hash(key.hashCode());
+        return segmentFor(hash).replace(key, hash, oldValue, newValue);
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return the previous value associated with the specified key,
+     *         or <tt>null</tt> if there was no mapping for the key
+     * @throws NullPointerException if the specified key or value is null
+     */
+    public V replace(K key, V value) {
+        if (value == null)
+            throw new NullPointerException();
+        int hash = hash(key.hashCode());
+        return segmentFor(hash).replace(key, hash, value);
+    }
+
+    /**
+     * Removes all of the mappings from this map.
+     */
+    public void clear() {
+        for (int i = 0; i < segments.length; ++i)
+            segments[i].clear();
+    }
+
+    /**
+     * Returns a {@link Set} view of the keys contained in this map.
+     * The set is backed by the map, so changes to the map are
+     * reflected in the set, and vice-versa.  The set supports element
+     * removal, which removes the corresponding mapping from this map,
+     * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
+     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
+     * operations.  It does not support the <tt>add</tt> or
+     * <tt>addAll</tt> operations.
+     *
+     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+     * that will never throw {@link ConcurrentModificationException},
+     * and guarantees to traverse elements as they existed upon
+     * construction of the iterator, and may (but is not guaranteed to)
+     * reflect any modifications subsequent to construction.
+     */
+    public Set<K> keySet() {
+        Set<K> ks = keySet;
+        return (ks != null) ? ks : (keySet = new KeySet());
+    }
+
+    /**
+     * Returns a {@link Collection} view of the values contained in this map.
+     * The collection is backed by the map, so changes to the map are
+     * reflected in the collection, and vice-versa.  The collection
+     * supports element removal, which removes the corresponding
+     * mapping from this map, via the <tt>Iterator.remove</tt>,
+     * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
+     * <tt>retainAll</tt>, and <tt>clear</tt> operations.  It does not
+     * support the <tt>add</tt> or <tt>addAll</tt> operations.
+     *
+     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+     * that will never throw {@link ConcurrentModificationException},
+     * and guarantees to traverse elements as they existed upon
+     * construction of the iterator, and may (but is not guaranteed to)
+     * reflect any modifications subsequent to construction.
+     */
+    public Collection<V> values() {
+        Collection<V> vs = values;
+        return (vs != null) ? vs : (values = new Values());
+    }
+
+    /**
+     * Returns a {@link Set} view of the mappings contained in this map.
+     * The set is backed by the map, so changes to the map are
+     * reflected in the set, and vice-versa.  The set supports element
+     * removal, which removes the corresponding mapping from the map,
+     * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
+     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
+     * operations.  It does not support the <tt>add</tt> or
+     * <tt>addAll</tt> operations.
+     *
+     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+     * that will never throw {@link ConcurrentModificationException},
+     * and guarantees to traverse elements as they existed upon
+     * construction of the iterator, and may (but is not guaranteed to)
+     * reflect any modifications subsequent to construction.
+     */
+    public Set<Map.Entry<K,V>> entrySet() {
+        Set<Map.Entry<K,V>> es = entrySet;
+        return (es != null) ? es : (entrySet = new EntrySet());
+    }
+
+    /**
+     * Returns an enumeration of the keys in this table.
+     *
+     * @return an enumeration of the keys in this table
+     * @see #keySet()
+     */
+    public Enumeration<K> keys() {
+        return new KeyIterator();
+    }
+
+    /**
+     * Returns an enumeration of the values in this table.
+     *
+     * @return an enumeration of the values in this table
+     * @see #values()
+     */
+    public Enumeration<V> elements() {
+        return new ValueIterator();
+    }
+
+    /* ---------------- Iterator Support -------------- */
+
+    abstract class HashIterator {
+        int nextSegmentIndex;
+        int nextTableIndex;
+        HashEntry<K,V>[] currentTable;
+        HashEntry<K, V> nextEntry;
+        HashEntry<K, V> lastReturned;
+
+        HashIterator() {
+            nextSegmentIndex = segments.length - 1;
+            nextTableIndex = -1;
+            advance();
+        }
+
+        public boolean hasMoreElements() { return hasNext(); }
+
+        final void advance() {
+            if (nextEntry != null && (nextEntry = nextEntry.next) != null)
+                return;
+
+            while (nextTableIndex >= 0) {
+                if ( (nextEntry = currentTable[nextTableIndex--]) != null)
+                    return;
+            }
+
+            while (nextSegmentIndex >= 0) {
+                Segment<K,V> seg = segments[nextSegmentIndex--];
+                if (seg.count != 0) {
+                    currentTable = seg.table;
+                    for (int j = currentTable.length - 1; j >= 0; --j) {
+                        if ( (nextEntry = currentTable[j]) != null) {
+                            nextTableIndex = j - 1;
+                            return;
+                        }
+                    }
+                }
+            }
+        }
+
+        public boolean hasNext() { return nextEntry != null; }
+
+        HashEntry<K,V> nextEntry() {
+            if (nextEntry == null)
+                throw new NoSuchElementException();
+            lastReturned = nextEntry;
+            advance();
+            return lastReturned;
+        }
+
+        public void remove() {
+            if (lastReturned == null)
+                throw new IllegalStateException();
+            BoundedConcurrentHashMap.this.remove(lastReturned.key);
+            lastReturned = null;
+        }
+    }
+
+    final class KeyIterator
+        extends HashIterator
+        implements Iterator<K>, Enumeration<K>
+    {
+        public K next()        { return super.nextEntry().key; }
+        public K nextElement() { return super.nextEntry().key; }
+    }
+
+    final class ValueIterator
+        extends HashIterator
+        implements Iterator<V>, Enumeration<V>
+    {
+        public V next()        { return super.nextEntry().value; }
+        public V nextElement() { return super.nextEntry().value; }
+    }
+
+    /**
+     * Custom Entry class used by EntryIterator.next(), that relays
+     * setValue changes to the underlying map.
+     */
+    final class WriteThroughEntry
+        extends AbstractMap.SimpleEntry<K,V>
+    {
+        WriteThroughEntry(K k, V v) {
+            super(k,v);
+        }
+
+        /**
+         * Set our entry's value and write through to the map. The
+         * value to return is somewhat arbitrary here. Since a
+         * WriteThroughEntry does not necessarily track asynchronous
+         * changes, the most recent "previous" value could be
+         * different from what we return (or could even have been
+         * removed in which case the put will re-establish). We do not
+         * and cannot guarantee more.
+         */
+        public V setValue(V value) {
+            if (value == null) throw new NullPointerException();
+            V v = super.setValue(value);
+            BoundedConcurrentHashMap.this.put(getKey(), value);
+            return v;
+        }
+    }
+
+    final class EntryIterator
+        extends HashIterator
+        implements Iterator<Entry<K,V>>
+    {
+        public Map.Entry<K,V> next() {
+            HashEntry<K,V> e = super.nextEntry();
+            return new WriteThroughEntry(e.key, e.value);
+        }
+    }
+
+    final class KeySet extends AbstractSet<K> {
+        public Iterator<K> iterator() {
+            return new KeyIterator();
+        }
+        public int size() {
+            return BoundedConcurrentHashMap.this.size();
+        }
+        public boolean isEmpty() {
+            return BoundedConcurrentHashMap.this.isEmpty();
+        }
+        public boolean contains(Object o) {
+            return BoundedConcurrentHashMap.this.containsKey(o);
+        }
+        public boolean remove(Object o) {
+            return BoundedConcurrentHashMap.this.remove(o) != null;
+        }
+        public void clear() {
+            BoundedConcurrentHashMap.this.clear();
+        }
+    }
+
+    final class Values extends AbstractCollection<V> {
+        public Iterator<V> iterator() {
+            return new ValueIterator();
+        }
+        public int size() {
+            return BoundedConcurrentHashMap.this.size();
+        }
+        public boolean isEmpty() {
+            return BoundedConcurrentHashMap.this.isEmpty();
+        }
+        public boolean contains(Object o) {
+            return BoundedConcurrentHashMap.this.containsValue(o);
+        }
+        public void clear() {
+            BoundedConcurrentHashMap.this.clear();
+        }
+    }
+
+    final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
+        public Iterator<Map.Entry<K,V>> iterator() {
+            return new EntryIterator();
+        }
+        public boolean contains(Object o) {
+            if (!(o instanceof Map.Entry))
+                return false;
+            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
+            V v = BoundedConcurrentHashMap.this.get(e.getKey());
+            return v != null && v.equals(e.getValue());
+        }
+        public boolean remove(Object o) {
+            if (!(o instanceof Map.Entry))
+                return false;
+            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
+            return BoundedConcurrentHashMap.this.remove(e.getKey(), e.getValue());
+        }
+        public int size() {
+            return BoundedConcurrentHashMap.this.size();
+        }
+        public boolean isEmpty() {
+            return BoundedConcurrentHashMap.this.isEmpty();
+        }
+        public void clear() {
+            BoundedConcurrentHashMap.this.clear();
+        }
+    }
+
+    /* ---------------- Serialization Support -------------- */
+
+    /**
+     * Save the state of the <tt>ConcurrentHashMap</tt> instance to a
+     * stream (i.e., serialize it).
+     * @param s the stream
+     * @serialData
+     * the key (Object) and value (Object)
+     * for each key-value mapping, followed by a null pair.
+     * The key-value mappings are emitted in no particular order.
+     */
+    private void writeObject(java.io.ObjectOutputStream s) throws IOException  {
+        s.defaultWriteObject();
+
+        for (int k = 0; k < segments.length; ++k) {
+            Segment<K,V> seg = segments[k];
+            seg.lock();
+            try {
+                HashEntry<K,V>[] tab = seg.table;
+                for (int i = 0; i < tab.length; ++i) {
+                    for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
+                        s.writeObject(e.key);
+                        s.writeObject(e.value);
+                    }
+                }
+            } finally {
+                seg.unlock();
+            }
+        }
+        s.writeObject(null);
+        s.writeObject(null);
+    }
+
+    /**
+     * Reconstitute the <tt>ConcurrentHashMap</tt> instance from a
+     * stream (i.e., deserialize it).
+     * @param s the stream
+     */
+    private void readObject(java.io.ObjectInputStream s)
+        throws IOException, ClassNotFoundException  {
+        s.defaultReadObject();
+
+        // Initialize each segment to be minimally sized, and let grow.
+        for (int i = 0; i < segments.length; ++i) {
+            segments[i].setTable(new HashEntry[1]);
+        }
+
+        // Read the keys and values, and put the mappings in the table
+        for (;;) {
+            K key = (K) s.readObject();
+            V value = (V) s.readObject();
+            if (key == null)
+                break;
+            put(key, value);
+        }
+    }
+}
\ No newline at end of file

Deleted: trunk/core/src/main/java/org/infinispan/util/concurrent/BufferedConcurrentHashMap.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/util/concurrent/BufferedConcurrentHashMap.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/java/org/infinispan/util/concurrent/BufferedConcurrentHashMap.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -1,1842 +0,0 @@
-/*
- * Written by Doug Lea with assistance from members of JCP JSR-166
- * Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
- * 
- * 
- * Modified by Vladimir Blagojevic to include lock amortized eviction. 
- * For more details see http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/papers/TR-09-1.pdf
- * https://jira.jboss.org/jira/browse/ISPN-299 
- * 
- */
-
-package org.infinispan.util.concurrent;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.AbstractCollection;
-import java.util.AbstractMap;
-import java.util.AbstractSet;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.ConcurrentModificationException;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Hashtable;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * A hash table supporting full concurrency of retrievals and adjustable expected concurrency for
- * updates. This class obeys the same functional specification as {@link java.util.Hashtable}, and
- * includes versions of methods corresponding to each method of <tt>Hashtable</tt>. However, even
- * though all operations are thread-safe, retrieval operations do <em>not</em> entail locking, and
- * there is <em>not</em> any support for locking the entire table in a way that prevents all access.
- * This class is fully interoperable with <tt>Hashtable</tt> in programs that rely on its thread
- * safety but not on its synchronization details.
- * 
- * <p>
- * Retrieval operations (including <tt>get</tt>) generally do not block, so may overlap with update
- * operations (including <tt>put</tt> and <tt>remove</tt>). Retrievals reflect the results of the
- * most recently <em>completed</em> update operations holding upon their onset. For aggregate
- * operations such as <tt>putAll</tt> and <tt>clear</tt>, concurrent retrievals may reflect
- * insertion or removal of only some entries. Similarly, Iterators and Enumerations return elements
- * reflecting the state of the hash table at some point at or since the creation of the
- * iterator/enumeration. They do <em>not</em> throw {@link ConcurrentModificationException}.
- * However, iterators are designed to be used by only one thread at a time.
- * 
- * <p>
- * The allowed concurrency among update operations is guided by the optional
- * <tt>concurrencyLevel</tt> constructor argument (default <tt>16</tt>), which is used as a hint for
- * internal sizing. The table is internally partitioned to try to permit the indicated number of
- * concurrent updates without contention. Because placement in hash tables is essentially random,
- * the actual concurrency will vary. Ideally, you should choose a value to accommodate as many
- * threads as will ever concurrently modify the table. Using a significantly higher value than you
- * need can waste space and time, and a significantly lower value can lead to thread contention. But
- * overestimates and underestimates within an order of magnitude do not usually have much noticeable
- * impact. A value of one is appropriate when it is known that only one thread will modify and all
- * others will only read. Also, resizing this or any other kind of hash table is a relatively slow
- * operation, so, when possible, it is a good idea to provide estimates of expected table sizes in
- * constructors.
- * 
- * <p>
- * This class and its views and iterators implement all of the <em>optional</em> methods of the
- * {@link Map} and {@link Iterator} interfaces.
- * 
- * <p>
- * Like {@link Hashtable} but unlike {@link HashMap}, this class does <em>not</em> allow
- * <tt>null</tt> to be used as a key or value.
- * 
- * <p>
- * This class is a member of the <a href="{@docRoot}/../technotes/guides/collections/index.html">
- * Java Collections Framework</a>.
- * 
- * @since 1.5
- * @author Doug Lea
- * @param <K>
- *            the type of keys maintained by this map
- * @param <V>
- *            the type of mapped values
- */
-public class BufferedConcurrentHashMap<K, V> extends AbstractMap<K, V> implements
-                ConcurrentMap<K, V>, Serializable {
-    private static final long serialVersionUID = 7249069246763182397L;
-
-    /*
-     * The basic strategy is to subdivide the table among Segments, each of which itself is a
-     * concurrently readable hash table.
-     */
-
-    /* ---------------- Constants -------------- */
-    /**
-     * The default initial capacity for this table, used when not otherwise specified in a
-     * constructor.
-     */
-    static final int DEFAULT_INITIAL_CAPACITY = 16;
-
-    /**
-     * The default load factor for this table, used when not otherwise specified in a constructor.
-     */
-    static final float DEFAULT_LOAD_FACTOR = 0.75f;
-
-    /**
-     * The default concurrency level for this table, used when not otherwise specified in a
-     * constructor.
-     */
-    static final int DEFAULT_CONCURRENCY_LEVEL = 16;
-
-    /**
-     * The maximum capacity, used if a higher value is implicitly specified by either of the
-     * constructors with arguments. MUST be a power of two <= 1<<30 to ensure that entries are
-     * indexable using ints.
-     */
-    static final int MAXIMUM_CAPACITY = 1 << 30;
-
-    /**
-     * The maximum number of segments to allow; used to bound constructor arguments.
-     */
-    static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
-
-    /**
-     * Number of unsynchronized retries in size and containsValue methods before resorting to
-     * locking. This is used to avoid unbounded retries if tables undergo continuous modification
-     * which would make it impossible to obtain an accurate result.
-     */
-    static final int RETRIES_BEFORE_LOCK = 2;
-
-    /* ---------------- Fields -------------- */
-
-    /**
-     * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose
-     * the segment.
-     */
-    final int segmentMask;
-
-    /**
-     * Shift value for indexing within segments.
-     */
-    final int segmentShift;
-
-    /**
-     * The segments, each of which is a specialized hash table
-     */
-    final Segment<K, V>[] segments;
-
-    transient Set<K> keySet;
-    transient Set<Map.Entry<K, V>> entrySet;
-    transient Collection<V> values;
-
-    /* ---------------- Small Utilities -------------- */
-
-    /**
-     * Applies a supplemental hash function to a given hashCode, which defends against poor quality
-     * hash functions. This is critical because ConcurrentHashMap uses power-of-two length hash
-     * tables, that otherwise encounter collisions for hashCodes that do not differ in lower or
-     * upper bits.
-     */
-    private static int hash(int h) {
-        // Spread bits to regularize both segment and index locations,
-        // using variant of single-word Wang/Jenkins hash.
-        h += (h << 15) ^ 0xffffcd7d;
-        h ^= (h >>> 10);
-        h += (h << 3);
-        h ^= (h >>> 6);
-        h += (h << 2) + (h << 14);
-        return h ^ (h >>> 16);
-    }
-
-    /**
-     * Returns the segment that should be used for key with given hash
-     * 
-     * @param hash
-     *            the hash code for the key
-     * @return the segment
-     */
-    final Segment<K, V> segmentFor(int hash) {
-        return segments[(hash >>> segmentShift) & segmentMask];
-    }
-
-    /* ---------------- Inner Classes -------------- */
-
-    /**
-     * ConcurrentHashMap list entry. Note that this is never exported out as a user-visible
-     * Map.Entry.
-     * 
-     * Because the value field is volatile, not final, it is legal wrt the Java Memory Model for an
-     * unsynchronized reader to see null instead of initial value when read via a data race.
-     * Although a reordering leading to this is not likely to ever actually occur, the
-     * Segment.readValueUnderLock method is used as a backup in case a null (pre-initialized) value
-     * is ever seen in an unsynchronized access method.
-     */
-    static final class HashEntry<K, V> {
-        final K key;
-        final int hash;
-        volatile V value;
-        final HashEntry<K, V> next;
-        volatile Recency state;
-
-        HashEntry(K key, int hash, HashEntry<K, V> next, V value) {
-            this.key = key;
-            this.hash = hash;
-            this.next = next;
-            this.value = value;
-            this.state = Recency.HIR_RESIDENT;
-        }
-
-        public int hashCode() {
-            int result = 17;
-            result = (result * 31) + hash;
-            result = (result * 31) + key.hashCode();
-            return result;
-        }
-
-        public boolean equals(Object o) {
-            // HashEntry is internal class, never leaks out of CHM, hence slight optimization
-            if (this == o)
-                return true;
-            if (o == null)
-                return false;
-            HashEntry<?, ?> other = (HashEntry<?, ?>) o;
-            return hash == other.hash && key.equals(other.key);
-        }
-
-        public void transitionToLIRResident() {           
-            state = Recency.LIR_RESIDENT;
-        }
-
-        public void transitionHIRResidentToHIRNonResident() {            
-            state = Recency.HIR_NONRESIDENT;
-        }
-        
-        public void transitionLIRResidentToHIRResident() {            
-            state = Recency.HIR_RESIDENT;
-        }
-
-        public Recency recency() {
-            return state;
-        }
-
-        @SuppressWarnings("unchecked")
-        static <K, V> HashEntry<K, V>[] newArray(int i) {
-            return new HashEntry[i];
-        }
-    }
-
-    private enum Recency {
-        HIR_RESIDENT, LIR_RESIDENT, HIR_NONRESIDENT
-    }
-
-    public enum Eviction {
-        NONE {
-            @Override
-            public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
-                return new NullEvictionPolicy<K, V>();
-            }
-        },
-        LRU {
-
-            @Override
-            public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
-                return new LRU<K, V>(s,capacity,lf,capacity*10,lf);
-            }
-        },
-        LIRS {
-            @Override
-            public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
-                return new LIRS<K,V>(s,capacity,lf,capacity*10,lf);
-            }
-        };
-
-        abstract <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf);
-    }
-    
-    public interface EvictionListener<K, V> {
-        void evicted(K key, V value);
-    }
-    
-    static class NullEvictionListener<K,V> implements EvictionListener<K, V>{
-        @Override
-        public void evicted(K key, V value) {            
-        }        
-    }
-
-    public interface EvictionPolicy<K, V> {
-
-        public final static int MAX_BATCH_SIZE = 64;
-
-        /**
-         * Invokes eviction policy algorithm and returns set of evicted entries.
-         * 
-         * <p>
-         * Set cannot be null but could possibly be an empty set.
-         * 
-         * @return set of evicted entries.
-         */
-        Set<HashEntry<K, V>> execute();
-
-        /**
-         * Invoked to notify EvictionPolicy implementation that there has been an attempt to access
-         * an entry in Segment, however that entry was not present in Segment.
-         * 
-         * @param e
-         *            accessed entry in Segment
-         */
-        void onEntryMiss(HashEntry<K, V> e);
-
-        /**
-         * Invoked to notify EvictionPolicy implementation that an entry in Segment has been
-         * accessed. Returns true if batching threshold has been reached, false otherwise.
-         * <p>
-         * Note that this method is potentially invoked without holding a lock on Segment.
-         * 
-         * @return true if batching threshold has been reached, false otherwise.
-         * 
-         * @param e
-         *            accessed entry in Segment
-         */
-        boolean onEntryHit(HashEntry<K, V> e);
-
-        /**
-         * Invoked to notify EvictionPolicy implementation that an entry e has been removed from
-         * Segment.
-         * 
-         * @param e
-         *            removed entry in Segment
-         */
-        void onEntryRemove(HashEntry<K, V> e);
-
-        /**
-         * Invoked to notify EvictionPolicy implementation that all Segment entries have been
-         * cleared.
-         * 
-         */
-        void clear();
-
-        /**
-         * Returns type of eviction algorithm (strategy).
-         * 
-         * @return type of eviction algorithm
-         */
-        Eviction strategy();
-
-        /**
-         * Returns true if batching threshold has expired, false otherwise.
-         * <p>
-         * Note that this method is potentially invoked without holding a lock on Segment.
-         * 
-         * @return true if batching threshold has expired, false otherwise.
-         */
-        boolean thresholdExpired();
-    }
-
-    static class NullEvictionPolicy<K, V> implements EvictionPolicy<K, V> {
-
-        @Override
-        public void clear() {
-        }
-
-        @Override
-        public Set<HashEntry<K, V>> execute() {
-            return Collections.emptySet();
-        }
-
-        @Override
-        public boolean onEntryHit(HashEntry<K, V> e) {
-            return false;
-        }
-
-        @Override
-        public void onEntryMiss(HashEntry<K, V> e) {
-        }
-
-        @Override
-        public void onEntryRemove(HashEntry<K, V> e) {
-        }
-
-        @Override
-        public boolean thresholdExpired() {
-            return false;
-        }
-
-        @Override
-        public Eviction strategy() {
-            return Eviction.NONE;
-        }
-    }
-
-    static final class LRU<K, V> implements EvictionPolicy<K, V> {
-        private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
-        private final Segment<K,V> segment;
-        private final LinkedList<HashEntry<K, V>> lruQueue;
-        private final int maxBatchQueueSize;
-        private final int trimDownSize;
-        private final float batchThresholdFactor;
-
-        public LRU(Segment<K,V> s, int capacity, float lf, int maxBatchSize, float batchThresholdFactor) {
-            this.segment = s;
-            this.trimDownSize = (int) (capacity * lf);
-            this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
-            this.batchThresholdFactor = batchThresholdFactor;
-            this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
-            this.lruQueue = new LinkedList<HashEntry<K, V>>();
-        }
-
-        @Override
-        public Set<HashEntry<K, V>> execute() {
-            Set<HashEntry<K, V>> evicted = Collections.emptySet();
-            if (isOverflow()) {
-                evicted = new HashSet<HashEntry<K, V>>();
-            }
-            try {
-                for (HashEntry<K, V> e : accessQueue) {
-                    if (lruQueue.remove(e)) {
-                        lruQueue.addFirst(e);
-                    }
-                }
-                while (isOverflow()) {
-                    HashEntry<K, V> first = lruQueue.getLast();
-                    segment.remove(first.key, first.hash, null);
-                    evicted.add(first);
-                }
-            } finally {
-                accessQueue.clear();
-            }
-            return evicted;
-        }
-
-        private boolean isOverflow() {
-            return lruQueue.size() > trimDownSize;
-        }
-
-        @Override
-        public void onEntryMiss(HashEntry<K, V> e) {
-            lruQueue.addFirst(e);
-        }
-
-        /*
-         * Invoked without holding a lock on Segment
-         */
-        @Override
-        public boolean onEntryHit(HashEntry<K, V> e) {
-            accessQueue.add(e);
-            return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
-        }
-
-        /*
-         * Invoked without holding a lock on Segment
-         */
-        @Override
-        public boolean thresholdExpired() {
-            return accessQueue.size() >= maxBatchQueueSize;
-        }
-
-        @Override
-        public void onEntryRemove(HashEntry<K, V> e) {
-            lruQueue.remove(e);
-            // we could have multiple instances of e in accessQueue; remove them all
-            while (accessQueue.remove(e));
-        }
-
-        @Override
-        public void clear() {
-            lruQueue.clear();
-            accessQueue.clear();
-        }
-
-        @Override
-        public Eviction strategy() {
-            return Eviction.LRU;
-        }
-    }
-
-    static final class LIRS<K, V> implements EvictionPolicy<K, V> {
-        private final static int MIN_HIR_SIZE = 2;
-        private final Segment<K,V> segment;
-        private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
-        private final LinkedHashMap<Integer, HashEntry<K, V>> stack;
-        private final LinkedList<HashEntry<K, V>> queue;
-        private final int maxBatchQueueSize;
-        private final int lirSizeLimit;
-        private final int hirSizeLimit;
-        private int currentLIRSize;
-        private final float batchThresholdFactor;
-
-        public LIRS(Segment<K,V> s, int capacity, float lf, int maxBatchSize, float batchThresholdFactor) {
-            this.segment = s;
-            int tmpLirSize = (int) (capacity * 0.9);
-            int tmpHirSizeLimit = capacity - tmpLirSize;
-            if (tmpHirSizeLimit < MIN_HIR_SIZE) {
-                hirSizeLimit = MIN_HIR_SIZE;
-                lirSizeLimit = capacity - hirSizeLimit;
-            } else {
-                hirSizeLimit = tmpHirSizeLimit;
-                lirSizeLimit = tmpLirSize;
-            }
-            this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
-            this.batchThresholdFactor = batchThresholdFactor;
-            this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
-            this.stack = new LinkedHashMap<Integer, HashEntry<K, V>>();
-            this.queue = new LinkedList<HashEntry<K, V>>();
-        }
-
-        @Override
-        public Set<HashEntry<K, V>> execute() {
-            Set<HashEntry<K, V>> evicted = new HashSet<HashEntry<K, V>>();
-            try {
-                for (HashEntry<K, V> e : accessQueue) {
-                    if (present(e)) {
-                        if (e.recency() == Recency.LIR_RESIDENT) {
-                            handleLIRHit(e, evicted);
-                        } else if (e.recency() == Recency.HIR_RESIDENT) {
-                            handleHIRHit(e, evicted);
-                        }
-                    }
-                }
-                removeFromSegment(evicted);
-            } finally {
-                accessQueue.clear();
-            }
-            return evicted;
-        }
-
-        private void handleHIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
-            boolean inStack = stack.containsKey(e.hashCode());
-            if (inStack)
-                stack.remove(e.hashCode());
-
-            // first put on top of the stack
-            stack.put(e.hashCode(), e);
-
-            if (inStack) {                
-                queue.remove(e);
-                e.transitionToLIRResident();
-                switchBottomostLIRtoHIRAndPrune(evicted);
-            } else {               
-                queue.remove(e);
-                queue.addLast(e);
-            }
-        }
-
-        private void handleLIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
-            stack.remove(e.hashCode());
-            stack.put(e.hashCode(), e);
-            for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
-                HashEntry<K, V> next = i.next();
-                if (next.recency() == Recency.LIR_RESIDENT) {
-                    break;
-                } else {
-                    i.remove();
-                    evicted.add(next);
-                }
-            }
-        }
-
-        private boolean present(HashEntry<K, V> e) {
-            return stack.containsKey(e.hashCode()) || queue.contains(e);
-        }
-
-        @Override
-        public void onEntryMiss(HashEntry<K, V> e) {
-            // initialization
-            if (currentLIRSize + 1 < lirSizeLimit) {
-                currentLIRSize++;
-                e.transitionToLIRResident();
-                stack.put(e.hashCode(), e);
-            } else {
-                if (queue.size() < hirSizeLimit) {                    
-                    queue.addLast(e);
-                } else {
-                    boolean inStack = stack.containsKey(e.hashCode());
-                    HashEntry<K, V> first = queue.removeFirst();                    
-                    first.transitionHIRResidentToHIRNonResident();
-
-                    stack.put(e.hashCode(), e);
-
-                    if (inStack) {
-                        e.transitionToLIRResident();
-                        Set<HashEntry<K, V>> evicted = new HashSet<HashEntry<K, V>>();
-                        switchBottomostLIRtoHIRAndPrune(evicted);
-                        removeFromSegment(evicted);
-                    } else {                        
-                        queue.addLast(e);
-                    }
-
-                    // evict from segment
-                    segment.remove(first.key, first.hash, null);
-                }
-            }
-        }
-
-        private void removeFromSegment(Set<HashEntry<K, V>> evicted) {
-            for (HashEntry<K, V> e : evicted) {
-                segment.remove(e.key, e.hash, null);
-            }
-        }
-
-        private void switchBottomostLIRtoHIRAndPrune(Set<HashEntry<K, V>> evicted) {
-            boolean seenFirstLIR = false;
-            for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
-                HashEntry<K, V> next = i.next();
-                if (next.recency() == Recency.LIR_RESIDENT) {
-                    if (!seenFirstLIR) {
-                        seenFirstLIR = true;
-                        i.remove();
-                        next.transitionLIRResidentToHIRResident();                       
-                        queue.addLast(next);
-                    } else {
-                        break;
-                    }
-                } else {
-                    i.remove();
-                    evicted.add(next);
-                }
-            }
-        }
-
-        /*
-         * Invoked without holding a lock on Segment
-         */
-        @Override
-        public boolean onEntryHit(HashEntry<K, V> e) {
-            accessQueue.add(e);
-            return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
-        }
-
-        /*
-         * Invoked without holding a lock on Segment
-         */
-        @Override
-        public boolean thresholdExpired() {
-            return accessQueue.size() >= maxBatchQueueSize;
-        }
-
-        @Override
-        public void onEntryRemove(HashEntry<K, V> e) {
-            HashEntry<K, V> removed = stack.remove(e.hashCode());
-            if (removed != null && removed.recency() == Recency.LIR_RESIDENT) {
-                currentLIRSize--;
-            }
-            queue.remove(e);
-            // we could have multiple instances of e in accessQueue; remove them all
-            while (accessQueue.remove(e));
-        }
-
-        @Override
-        public void clear() {
-            stack.clear();
-            accessQueue.clear();
-        }
-
-        @Override
-        public Eviction strategy() {
-            return Eviction.LIRS;
-        }
-    }
-
-    /**
-     * Segments are specialized versions of hash tables. This subclasses from ReentrantLock
-     * opportunistically, just to simplify some locking and avoid separate construction.
-     */
-    static final class Segment<K, V> extends ReentrantLock implements Serializable {
-
-        /*
-         * Segments maintain a table of entry lists that are ALWAYS kept in a consistent state, so
-         * can be read without locking. Next fields of nodes are immutable (final). All list
-         * additions are performed at the front of each bin. This makes it easy to check changes,
-         * and also fast to traverse. When nodes would otherwise be changed, new nodes are created
-         * to replace them. This works well for hash tables since the bin lists tend to be short.
-         * (The average length is less than two for the default load factor threshold.)
-         * 
-         * Read operations can thus proceed without locking, but rely on selected uses of volatiles
-         * to ensure that completed write operations performed by other threads are noticed. For
-         * most purposes, the "count" field, tracking the number of elements, serves as that
-         * volatile variable ensuring visibility. This is convenient because this field needs to be
-         * read in many read operations anyway:
-         * 
-         * - All (unsynchronized) read operations must first read the "count" field, and should not
-         * look at table entries if it is 0.
-         * 
-         * - All (synchronized) write operations should write to the "count" field after
-         * structurally changing any bin. The operations must not take any action that could even
-         * momentarily cause a concurrent read operation to see inconsistent data. This is made
-         * easier by the nature of the read operations in Map. For example, no operation can reveal
-         * that the table has grown but the threshold has not yet been updated, so there are no
-         * atomicity requirements for this with respect to reads.
-         * 
-         * As a guide, all critical volatile reads and writes to the count field are marked in code
-         * comments.
-         */
-
-        private static final long serialVersionUID = 2249069246763182397L;
-
-        /**
-         * The number of elements in this segment's region.
-         */
-        transient volatile int count;
-
-        /**
-         * Number of updates that alter the size of the table. This is used during bulk-read methods
-         * to make sure they see a consistent snapshot: If modCounts change during a traversal of
-         * segments computing size or checking containsValue, then we might have an inconsistent
-         * view of state so (usually) must retry.
-         */
-        transient int modCount;
-
-        /**
-         * The table is rehashed when its size exceeds this threshold. (The value of this field is
-         * always <tt>(int)(capacity *
-         * loadFactor)</tt>.)
-         */
-        transient int threshold;
-
-        /**
-         * The per-segment table.
-         */
-        transient volatile HashEntry<K, V>[] table;
-
-        transient final EvictionPolicy<K, V> eviction;
-
-        transient final EvictionListener<K, V> evictionListener;
-
-        /**
-         * The load factor for the hash table. Even though this value is same for all segments, it
-         * is replicated to avoid needing links to outer object.
-         * 
-         * @serial
-         */
-        final float loadFactor;
-
-        Segment(int cap, float lf, Eviction es, EvictionListener<K, V> listener) {
-            loadFactor = lf;
-            eviction = es.make(this, cap, lf);
-            evictionListener = listener;
-            setTable(HashEntry.<K, V> newArray(cap));
-        }
-
-        @SuppressWarnings("unchecked")
-        static <K, V> Segment<K, V>[] newArray(int i) {
-            return new Segment[i];
-        }
-
-        /**
-         * Sets table to new HashEntry array. Call only while holding lock or in constructor.
-         */
-        void setTable(HashEntry<K, V>[] newTable) {
-            threshold = (int) (newTable.length * loadFactor);
-            table = newTable;
-        }
-
-        /**
-         * Returns properly casted first entry of bin for given hash.
-         */
-        HashEntry<K, V> getFirst(int hash) {
-            HashEntry<K, V>[] tab = table;
-            return tab[hash & (tab.length - 1)];
-        }
-
-        /**
-         * Reads value field of an entry under lock. Called if value field ever appears to be null.
-         * This is possible only if a compiler happens to reorder a HashEntry initialization with
-         * its table assignment, which is legal under memory model but is not known to ever occur.
-         */
-        V readValueUnderLock(HashEntry<K, V> e) {
-            lock();
-            try {
-                return e.value;
-            } finally {
-                unlock();
-            }
-        }
-
-        V get(Object key, int hash) {
-            int c = count;
-            if (c != 0) { // read-volatile
-                V result = null;
-                HashEntry<K, V> e = getFirst(hash);
-                loop: while (e != null) {
-                    if (e.hash == hash && key.equals(e.key)) {
-                        V v = e.value;
-                        if (v != null) {
-                            result = v;
-                            break loop;
-                        } else {
-                            result = readValueUnderLock(e); // recheck
-                            break loop;
-                        }
-                    }
-                    e = e.next;
-                }
-                // a hit
-                if (result != null) {
-                    if (eviction.onEntryHit(e)) {
-                        Set<HashEntry<K, V>> evicted = attemptEviction(false);
-                        // piggyback listener invocation on callers thread outside lock
-                        if (evicted != null) {
-                            for (HashEntry<K, V> he : evicted) {
-                                evictionListener.evicted(he.key, he.value);
-                            }
-                        }
-                    }
-                }
-                return result;
-            }
-            return null;
-        }
-
-        private Set<HashEntry<K, V>> attemptEviction(boolean lockedAlready) {
-            Set<HashEntry<K, V>> evicted = null;
-            boolean obtainedLock = !lockedAlready ? tryLock() : true;
-            if (!obtainedLock && eviction.thresholdExpired()) {
-                lock();
-                obtainedLock = true;
-            }
-            if (obtainedLock) {
-                try {
-                    evicted = eviction.execute();
-                } finally {
-                    if (!lockedAlready)
-                        unlock();
-                }
-            }
-            return evicted;
-        }
-
-        boolean containsKey(Object key, int hash) {
-            if (count != 0) { // read-volatile
-                HashEntry<K, V> e = getFirst(hash);
-                while (e != null) {
-                    if (e.hash == hash && key.equals(e.key))
-                        return true;
-                    e = e.next;
-                }
-            }
-            return false;
-        }
-
-        boolean containsValue(Object value) {
-            if (count != 0) { // read-volatile
-                HashEntry<K, V>[] tab = table;
-                int len = tab.length;
-                for (int i = 0; i < len; i++) {
-                    for (HashEntry<K, V> e = tab[i]; e != null; e = e.next) {
-                        V v = e.value;
-                        if (v == null) // recheck
-                            v = readValueUnderLock(e);
-                        if (value.equals(v))
-                            return true;
-                    }
-                }
-            }
-            return false;
-        }
-
-        boolean replace(K key, int hash, V oldValue, V newValue) {
-            lock();
-            Set<HashEntry<K, V>> evicted = null;
-            try {
-                HashEntry<K, V> e = getFirst(hash);
-                while (e != null && (e.hash != hash || !key.equals(e.key)))
-                    e = e.next;
-
-                boolean replaced = false;
-                if (e != null && oldValue.equals(e.value)) {
-                    replaced = true;
-                    e.value = newValue;
-                    if (eviction.onEntryHit(e)) {
-                        evicted = attemptEviction(true);
-                    }
-                }
-                return replaced;
-            } finally {
-                unlock();
-                // piggyback listener invocation on callers thread outside lock   
-                if (evicted != null) {
-                    for (HashEntry<K, V> he : evicted) {
-                        evictionListener.evicted(he.key, he.value);
-                    }
-                }
-            }
-        }
-
-        V replace(K key, int hash, V newValue) {
-            lock();
-            Set<HashEntry<K, V>> evicted = null;
-            try {
-                HashEntry<K, V> e = getFirst(hash);
-                while (e != null && (e.hash != hash || !key.equals(e.key)))
-                    e = e.next;
-
-                V oldValue = null;
-                if (e != null) {
-                    oldValue = e.value;
-                    e.value = newValue;
-                    if (eviction.onEntryHit(e)) {
-                        evicted = attemptEviction(true);
-                    }
-                }
-                return oldValue;
-            } finally {
-                unlock();
-                // piggyback listener invocation on callers thread outside lock
-                if(evicted != null) {
-                    for (HashEntry<K, V> he : evicted) {
-                        evictionListener.evicted(he.key, he.value);
-                    }                
-                }
-            }
-        }
-
-        V put(K key, int hash, V value, boolean onlyIfAbsent) {
-            lock();
-            Set<HashEntry<K, V>> evicted = null;
-            try {
-                int c = count;
-                if (c++ > threshold && eviction.strategy() == Eviction.NONE) // ensure capacity
-                    rehash();
-                HashEntry<K, V>[] tab = table;
-                int index = hash & (tab.length - 1);
-                HashEntry<K, V> first = tab[index];
-                HashEntry<K, V> e = first;
-                while (e != null && (e.hash != hash || !key.equals(e.key)))
-                    e = e.next;
-
-                V oldValue;
-                if (e != null) {
-                    oldValue = e.value;
-                    if (!onlyIfAbsent) {
-                        e.value = value;
-                        eviction.onEntryHit(e);
-                    }
-                } else {
-                    oldValue = null;
-                    ++modCount;
-                    count = c; // write-volatile
-                    if (eviction.strategy() != Eviction.NONE) {
-                        if (c > tab.length) {
-                            // remove entries;lower count
-                            evicted = eviction.execute();
-                            // re-read first
-                            first = tab[index];
-                        }
-                        // add a new entry
-                        tab[index] = new HashEntry<K, V>(key, hash, first, value);
-                        // notify a miss
-                        eviction.onEntryMiss(tab[index]);
-                    } else {
-                        tab[index] = new HashEntry<K, V>(key, hash, first, value);
-                    }
-                }
-                return oldValue;
-            } finally {
-                unlock();
-                // piggyback listener invocation on callers thread outside lock
-                if(evicted != null) {
-                    for (HashEntry<K, V> he : evicted) {
-                        evictionListener.evicted(he.key, he.value);
-                    }                
-                }
-            }
-        }
-
-        void rehash() {
-            HashEntry<K, V>[] oldTable = table;
-            int oldCapacity = oldTable.length;
-            if (oldCapacity >= MAXIMUM_CAPACITY)
-                return;
-
-            /*
-             * Reclassify nodes in each list to new Map. Because we are using power-of-two
-             * expansion, the elements from each bin must either stay at same index, or move with a
-             * power of two offset. We eliminate unnecessary node creation by catching cases where
-             * old nodes can be reused because their next fields won't change. Statistically, at the
-             * default threshold, only about one-sixth of them need cloning when a table doubles.
-             * The nodes they replace will be garbage collectable as soon as they are no longer
-             * referenced by any reader thread that may be in the midst of traversing table right
-             * now.
-             */
-
-            HashEntry<K, V>[] newTable = HashEntry.newArray(oldCapacity << 1);
-            threshold = (int) (newTable.length * loadFactor);
-            int sizeMask = newTable.length - 1;
-            for (int i = 0; i < oldCapacity; i++) {
-                // We need to guarantee that any existing reads of old Map can
-                // proceed. So we cannot yet null out each bin.
-                HashEntry<K, V> e = oldTable[i];
-
-                if (e != null) {
-                    HashEntry<K, V> next = e.next;
-                    int idx = e.hash & sizeMask;
-
-                    // Single node on list
-                    if (next == null)
-                        newTable[idx] = e;
-
-                    else {
-                        // Reuse trailing consecutive sequence at same slot
-                        HashEntry<K, V> lastRun = e;
-                        int lastIdx = idx;
-                        for (HashEntry<K, V> last = next; last != null; last = last.next) {
-                            int k = last.hash & sizeMask;
-                            if (k != lastIdx) {
-                                lastIdx = k;
-                                lastRun = last;
-                            }
-                        }
-                        newTable[lastIdx] = lastRun;
-
-                        // Clone all remaining nodes
-                        for (HashEntry<K, V> p = e; p != lastRun; p = p.next) {
-                            int k = p.hash & sizeMask;
-                            HashEntry<K, V> n = newTable[k];
-                            newTable[k] = new HashEntry<K, V>(p.key, p.hash, n, p.value);
-                        }
-                    }
-                }
-            }
-            table = newTable;
-        }
-
-        /**
-         * Remove; match on key only if value null, else match both.
-         */
-        V remove(Object key, int hash, Object value) {
-            lock();
-            try {
-                int c = count - 1;
-                HashEntry<K, V>[] tab = table;
-                int index = hash & (tab.length - 1);
-                HashEntry<K, V> first = tab[index];
-                HashEntry<K, V> e = first;
-                while (e != null && (e.hash != hash || !key.equals(e.key)))
-                    e = e.next;
-
-                V oldValue = null;
-                if (e != null) {
-                    V v = e.value;
-                    if (value == null || value.equals(v)) {
-                        oldValue = v;
-                        // All entries following removed node can stay
-                        // in list, but all preceding ones need to be
-                        // cloned.
-                        ++modCount;
-
-                        // e was removed
-                        eviction.onEntryRemove(e);
-
-                        HashEntry<K, V> newFirst = e.next;
-                        for (HashEntry<K, V> p = first; p != e; p = p.next) {
-                            // allow p to be GC-ed
-                            eviction.onEntryRemove(p);
-                            newFirst = new HashEntry<K, V>(p.key, p.hash, newFirst, p.value);
-                            // and notify eviction algorithm about new hash entries
-                            eviction.onEntryMiss(newFirst);
-                        }
-
-                        tab[index] = newFirst;
-                        count = c; // write-volatile
-                    }
-                }
-                return oldValue;
-            } finally {
-                unlock();
-            }
-        }
-
-        void clear() {
-            if (count != 0) {
-                lock();
-                try {
-                    HashEntry<K, V>[] tab = table;
-                    for (int i = 0; i < tab.length; i++)
-                        tab[i] = null;
-                    ++modCount;
-                    eviction.clear();
-                    count = 0; // write-volatile
-                } finally {
-                    unlock();
-                }
-            }
-        }
-    }
-
-    /* ---------------- Public operations -------------- */
-
-    /**
-     * Creates a new, empty map with the specified initial capacity, load factor and concurrency
-     * level. Note that initialCapacity is in fact targeted maximum map capacity in case when 
-     * eviction other than <code>Eviction.NONE</code> is used.  
-     * 
-     * @param initialCapacity
-     *            the initial capacity. The implementation performs internal sizing to accommodate
-     *            this many elements. Note that initial capacity becomes max capacity in case when
-     *            eviction other than <code>Eviction.NONE</code> is used.                                 
-     *            
-     * @param loadFactor
-     *            the load factor threshold, used to control resizing. Resizing may be performed
-     *            when the average number of elements per bin exceeds this threshold.
-     * @param concurrencyLevel
-     *            the estimated number of concurrently updating threads. The implementation performs
-     *            internal sizing to try to accommodate this many threads.
-     * 
-     * @param evictionStrategy
-     *            the algorithm used to evict elements from this map
-     * 
-     * @param evictionListener
-     *            the evicton listener callback to be notified about evicted elements
-     * 
-     * @throws IllegalArgumentException
-     *             if the initial capacity is negative or the load factor or concurrencyLevel are
-     *             nonpositive.
-     */
-    public BufferedConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel,
-                    Eviction evictionStrategy, EvictionListener<K, V> evictionListener) {
-        if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
-            throw new IllegalArgumentException();
-        
-        if (evictionStrategy == null || evictionListener == null)
-            throw new IllegalArgumentException();
-
-        if (concurrencyLevel > MAX_SEGMENTS)
-            concurrencyLevel = MAX_SEGMENTS;
-
-        // Find power-of-two sizes best matching arguments
-        int sshift = 0;
-        int ssize = 1;
-        while (ssize < concurrencyLevel) {
-            ++sshift;
-            ssize <<= 1;
-        }
-        segmentShift = 32 - sshift;
-        segmentMask = ssize - 1;
-        this.segments = Segment.newArray(ssize);
-
-        if (initialCapacity > MAXIMUM_CAPACITY)
-            initialCapacity = MAXIMUM_CAPACITY;
-        int c = initialCapacity / ssize;
-        if (c * ssize < initialCapacity)
-            ++c;
-        int cap = 1;
-        while (cap < c)
-            cap <<= 1;
-        
-        for (int i = 0; i < this.segments.length; ++i)
-            this.segments[i] = new Segment<K, V>(cap, loadFactor, evictionStrategy,
-                            evictionListener);
-    }
-
-    public BufferedConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) {
-        this(initialCapacity, loadFactor, concurrencyLevel, Eviction.LRU);
-    }
-    
-    public BufferedConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel, Eviction evictionStrategy) {
-        this(initialCapacity, loadFactor, concurrencyLevel, evictionStrategy, new NullEvictionListener<K, V>());
-    }
-
-    /**
-     * Creates a new, empty map with the specified initial capacity and load factor and with the
-     * default concurrencyLevel (16).
-     * 
-     * @param initialCapacity
-     *            The implementation performs internal sizing to accommodate this many elements.
-     * @param loadFactor
-     *            the load factor threshold, used to control resizing. Resizing may be performed
-     *            when the average number of elements per bin exceeds this threshold.
-     * @throws IllegalArgumentException
-     *             if the initial capacity of elements is negative or the load factor is nonpositive
-     * 
-     * @since 1.6
-     */
-    public BufferedConcurrentHashMap(int initialCapacity, float loadFactor) {
-        this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
-    }
-
-    /**
-     * Creates a new, empty map with the specified initial capacity, and with default load factor
-     * (0.75) and concurrencyLevel (16).
-     * 
-     * @param initialCapacity
-     *            the initial capacity. The implementation performs internal sizing to accommodate
-     *            this many elements.
-     * @throws IllegalArgumentException
-     *             if the initial capacity of elements is negative.
-     */
-    public BufferedConcurrentHashMap(int initialCapacity) {
-        this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
-    }
-
-    /**
-     * Creates a new, empty map with a default initial capacity (16), load factor (0.75) and
-     * concurrencyLevel (16).
-     */
-    public BufferedConcurrentHashMap() {
-        this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
-    }
-
-    /**
-     * Creates a new map with the same mappings as the given map. The map is created with a capacity
-     * of 1.5 times the number of mappings in the given map or 16 (whichever is greater), and a
-     * default load factor (0.75) and concurrencyLevel (16).
-     * 
-     * @param m
-     *            the map
-     */
-    public BufferedConcurrentHashMap(Map<? extends K, ? extends V> m) {
-        this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, DEFAULT_INITIAL_CAPACITY),
-                        DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
-        putAll(m);
-    }
-
-    /**
-     * Returns <tt>true</tt> if this map contains no key-value mappings.
-     * 
-     * @return <tt>true</tt> if this map contains no key-value mappings
-     */
-    public boolean isEmpty() {
-        final Segment<K, V>[] segments = this.segments;
-        /*
-         * We keep track of per-segment modCounts to avoid ABA problems in which an element in one
-         * segment was added and in another removed during traversal, in which case the table was
-         * never actually empty at any point. Note the similar use of modCounts in the size() and
-         * containsValue() methods, which are the only other methods also susceptible to ABA
-         * problems.
-         */
-        int[] mc = new int[segments.length];
-        int mcsum = 0;
-        for (int i = 0; i < segments.length; ++i) {
-            if (segments[i].count != 0)
-                return false;
-            else
-                mcsum += mc[i] = segments[i].modCount;
-        }
-        // If mcsum happens to be zero, then we know we got a snapshot
-        // before any modifications at all were made. This is
-        // probably common enough to bother tracking.
-        if (mcsum != 0) {
-            for (int i = 0; i < segments.length; ++i) {
-                if (segments[i].count != 0 || mc[i] != segments[i].modCount)
-                    return false;
-            }
-        }
-        return true;
-    }
-
-    /**
-     * Returns the number of key-value mappings in this map. If the map contains more than
-     * <tt>Integer.MAX_VALUE</tt> elements, returns <tt>Integer.MAX_VALUE</tt>.
-     * 
-     * @return the number of key-value mappings in this map
-     */
-    public int size() {
-        final Segment<K, V>[] segments = this.segments;
-        long sum = 0;
-        long check = 0;
-        int[] mc = new int[segments.length];
-        // Try a few times to get accurate count. On failure due to
-        // continuous async changes in table, resort to locking.
-        for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
-            check = 0;
-            sum = 0;
-            int mcsum = 0;
-            for (int i = 0; i < segments.length; ++i) {
-                sum += segments[i].count;
-                mcsum += mc[i] = segments[i].modCount;
-            }
-            if (mcsum != 0) {
-                for (int i = 0; i < segments.length; ++i) {
-                    check += segments[i].count;
-                    if (mc[i] != segments[i].modCount) {
-                        check = -1; // force retry
-                        break;
-                    }
-                }
-            }
-            if (check == sum)
-                break;
-        }
-        if (true) { // Resort to locking all segments
-            sum = 0;
-            for (int i = 0; i < segments.length; ++i)
-                segments[i].lock();
-            for (int i = 0; i < segments.length; ++i)
-                sum += segments[i].count;
-            for (int i = 0; i < segments.length; ++i)
-                segments[i].unlock();
-        }
-        if (sum > Integer.MAX_VALUE)
-            return Integer.MAX_VALUE;
-        else
-            return (int) sum;
-    }
-
-    /**
-     * Returns the value to which the specified key is mapped, or {@code null} if this map contains
-     * no mapping for the key.
-     * 
-     * <p>
-     * More formally, if this map contains a mapping from a key {@code k} to a value {@code v} such
-     * that {@code key.equals(k)}, then this method returns {@code v}; otherwise it returns {@code
-     * null}. (There can be at most one such mapping.)
-     * 
-     * @throws NullPointerException
-     *             if the specified key is null
-     */
-    public V get(Object key) {
-        int hash = hash(key.hashCode());
-        return segmentFor(hash).get(key, hash);
-    }
-
-    /**
-     * Tests if the specified object is a key in this table.
-     * 
-     * @param key
-     *            possible key
-     * @return <tt>true</tt> if and only if the specified object is a key in this table, as
-     *         determined by the <tt>equals</tt> method; <tt>false</tt> otherwise.
-     * @throws NullPointerException
-     *             if the specified key is null
-     */
-    public boolean containsKey(Object key) {
-        int hash = hash(key.hashCode());
-        return segmentFor(hash).containsKey(key, hash);
-    }
-
-    /**
-     * Returns <tt>true</tt> if this map maps one or more keys to the specified value. Note: This
-     * method requires a full internal traversal of the hash table, and so is much slower than
-     * method <tt>containsKey</tt>.
-     * 
-     * @param value
-     *            value whose presence in this map is to be tested
-     * @return <tt>true</tt> if this map maps one or more keys to the specified value
-     * @throws NullPointerException
-     *             if the specified value is null
-     */
-    public boolean containsValue(Object value) {
-        if (value == null)
-            throw new NullPointerException();
-
-        // See explanation of modCount use above
-
-        final Segment<K, V>[] segments = this.segments;
-        int[] mc = new int[segments.length];
-
-        // Try a few times without locking
-        for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
-            int sum = 0;
-            int mcsum = 0;
-            for (int i = 0; i < segments.length; ++i) {
-                int c = segments[i].count;
-                mcsum += mc[i] = segments[i].modCount;
-                if (segments[i].containsValue(value))
-                    return true;
-            }
-            boolean cleanSweep = true;
-            if (mcsum != 0) {
-                for (int i = 0; i < segments.length; ++i) {
-                    int c = segments[i].count;
-                    if (mc[i] != segments[i].modCount) {
-                        cleanSweep = false;
-                        break;
-                    }
-                }
-            }
-            if (cleanSweep)
-                return false;
-        }
-        // Resort to locking all segments
-        for (int i = 0; i < segments.length; ++i)
-            segments[i].lock();
-        boolean found = false;
-        try {
-            for (int i = 0; i < segments.length; ++i) {
-                if (segments[i].containsValue(value)) {
-                    found = true;
-                    break;
-                }
-            }
-        } finally {
-            for (int i = 0; i < segments.length; ++i)
-                segments[i].unlock();
-        }
-        return found;
-    }
-
-    /**
-     * Legacy method testing if some key maps into the specified value in this table. This method is
-     * identical in functionality to {@link #containsValue}, and exists solely to ensure full
-     * compatibility with class {@link java.util.Hashtable}, which supported this method prior to
-     * introduction of the Java Collections framework.
-     * 
-     * @param value
-     *            a value to search for
-     * @return <tt>true</tt> if and only if some key maps to the <tt>value</tt> argument in this
-     *         table as determined by the <tt>equals</tt> method; <tt>false</tt> otherwise
-     * @throws NullPointerException
-     *             if the specified value is null
-     */
-    public boolean contains(Object value) {
-        return containsValue(value);
-    }
-
-    /**
-     * Maps the specified key to the specified value in this table. Neither the key nor the value
-     * can be null.
-     * 
-     * <p>
-     * The value can be retrieved by calling the <tt>get</tt> method with a key that is equal to the
-     * original key.
-     * 
-     * @param key
-     *            key with which the specified value is to be associated
-     * @param value
-     *            value to be associated with the specified key
-     * @return the previous value associated with <tt>key</tt>, or <tt>null</tt> if there was no
-     *         mapping for <tt>key</tt>
-     * @throws NullPointerException
-     *             if the specified key or value is null
-     */
-    public V put(K key, V value) {
-        if (value == null)
-            throw new NullPointerException();
-        int hash = hash(key.hashCode());
-        return segmentFor(hash).put(key, hash, value, false);
-    }
-
-    /**
-     * {@inheritDoc}
-     * 
-     * @return the previous value associated with the specified key, or <tt>null</tt> if there was
-     *         no mapping for the key
-     * @throws NullPointerException
-     *             if the specified key or value is null
-     */
-    public V putIfAbsent(K key, V value) {
-        if (value == null)
-            throw new NullPointerException();
-        int hash = hash(key.hashCode());
-        return segmentFor(hash).put(key, hash, value, true);
-    }
-
-    /**
-     * Copies all of the mappings from the specified map to this one. These mappings replace any
-     * mappings that this map had for any of the keys currently in the specified map.
-     * 
-     * @param m
-     *            mappings to be stored in this map
-     */
-    public void putAll(Map<? extends K, ? extends V> m) {
-        for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
-            put(e.getKey(), e.getValue());
-    }
-
-    /**
-     * Removes the key (and its corresponding value) from this map. This method does nothing if the
-     * key is not in the map.
-     * 
-     * @param key
-     *            the key that needs to be removed
-     * @return the previous value associated with <tt>key</tt>, or <tt>null</tt> if there was no
-     *         mapping for <tt>key</tt>
-     * @throws NullPointerException
-     *             if the specified key is null
-     */
-    public V remove(Object key) {
-        int hash = hash(key.hashCode());
-        return segmentFor(hash).remove(key, hash, null);
-    }
-
-    /**
-     * {@inheritDoc}
-     * 
-     * @throws NullPointerException
-     *             if the specified key is null
-     */
-    public boolean remove(Object key, Object value) {
-        int hash = hash(key.hashCode());
-        if (value == null)
-            return false;
-        return segmentFor(hash).remove(key, hash, value) != null;
-    }
-
-    /**
-     * {@inheritDoc}
-     * 
-     * @throws NullPointerException
-     *             if any of the arguments are null
-     */
-    public boolean replace(K key, V oldValue, V newValue) {
-        if (oldValue == null || newValue == null)
-            throw new NullPointerException();
-        int hash = hash(key.hashCode());
-        return segmentFor(hash).replace(key, hash, oldValue, newValue);
-    }
-
-    /**
-     * {@inheritDoc}
-     * 
-     * @return the previous value associated with the specified key, or <tt>null</tt> if there was
-     *         no mapping for the key
-     * @throws NullPointerException
-     *             if the specified key or value is null
-     */
-    public V replace(K key, V value) {
-        if (value == null)
-            throw new NullPointerException();
-        int hash = hash(key.hashCode());
-        return segmentFor(hash).replace(key, hash, value);
-    }
-
-    /**
-     * Removes all of the mappings from this map.
-     */
-    public void clear() {
-        for (int i = 0; i < segments.length; ++i)
-            segments[i].clear();
-    }
-
-    /**
-     * Returns a {@link Set} view of the keys contained in this map. The set is backed by the map,
-     * so changes to the map are reflected in the set, and vice-versa. The set supports element
-     * removal, which removes the corresponding mapping from this map, via the
-     * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
-     * <tt>clear</tt> operations. It does not support the <tt>add</tt> or <tt>addAll</tt>
-     * operations.
-     * 
-     * <p>
-     * The view's <tt>iterator</tt> is a "weakly consistent" iterator that will never throw
-     * {@link ConcurrentModificationException}, and guarantees to traverse elements as they existed
-     * upon construction of the iterator, and may (but is not guaranteed to) reflect any
-     * modifications subsequent to construction.
-     */
-    public Set<K> keySet() {
-        Set<K> ks = keySet;
-        return (ks != null) ? ks : (keySet = new KeySet());
-    }
-
-    /**
-     * Returns a {@link Collection} view of the values contained in this map. The collection is
-     * backed by the map, so changes to the map are reflected in the collection, and vice-versa. The
-     * collection supports element removal, which removes the corresponding mapping from this map,
-     * via the <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>, <tt>removeAll</tt>,
-     * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not support the <tt>add</tt> or
-     * <tt>addAll</tt> operations.
-     * 
-     * <p>
-     * The view's <tt>iterator</tt> is a "weakly consistent" iterator that will never throw
-     * {@link ConcurrentModificationException}, and guarantees to traverse elements as they existed
-     * upon construction of the iterator, and may (but is not guaranteed to) reflect any
-     * modifications subsequent to construction.
-     */
-    public Collection<V> values() {
-        Collection<V> vs = values;
-        return (vs != null) ? vs : (values = new Values());
-    }
-
-    /**
-     * Returns a {@link Set} view of the mappings contained in this map. The set is backed by the
-     * map, so changes to the map are reflected in the set, and vice-versa. The set supports element
-     * removal, which removes the corresponding mapping from the map, via the
-     * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
-     * <tt>clear</tt> operations. It does not support the <tt>add</tt> or <tt>addAll</tt>
-     * operations.
-     * 
-     * <p>
-     * The view's <tt>iterator</tt> is a "weakly consistent" iterator that will never throw
-     * {@link ConcurrentModificationException}, and guarantees to traverse elements as they existed
-     * upon construction of the iterator, and may (but is not guaranteed to) reflect any
-     * modifications subsequent to construction.
-     */
-    public Set<Map.Entry<K, V>> entrySet() {
-        Set<Map.Entry<K, V>> es = entrySet;
-        return (es != null) ? es : (entrySet = new EntrySet());
-    }
-
-    /**
-     * Returns an enumeration of the keys in this table.
-     * 
-     * @return an enumeration of the keys in this table
-     * @see #keySet()
-     */
-    public Enumeration<K> keys() {
-        return new KeyIterator();
-    }
-
-    /**
-     * Returns an enumeration of the values in this table.
-     * 
-     * @return an enumeration of the values in this table
-     * @see #values()
-     */
-    public Enumeration<V> elements() {
-        return new ValueIterator();
-    }
-
-    /* ---------------- Iterator Support -------------- */
-
-    abstract class HashIterator {
-        int nextSegmentIndex;
-        int nextTableIndex;
-        HashEntry<K, V>[] currentTable;
-        HashEntry<K, V> nextEntry;
-        HashEntry<K, V> lastReturned;
-
-        HashIterator() {
-            nextSegmentIndex = segments.length - 1;
-            nextTableIndex = -1;
-            advance();
-        }
-
-        public boolean hasMoreElements() {
-            return hasNext();
-        }
-
-        final void advance() {
-            if (nextEntry != null && (nextEntry = nextEntry.next) != null)
-                return;
-
-            while (nextTableIndex >= 0) {
-                if ((nextEntry = currentTable[nextTableIndex--]) != null)
-                    return;
-            }
-
-            while (nextSegmentIndex >= 0) {
-                Segment<K, V> seg = segments[nextSegmentIndex--];
-                if (seg.count != 0) {
-                    currentTable = seg.table;
-                    for (int j = currentTable.length - 1; j >= 0; --j) {
-                        if ((nextEntry = currentTable[j]) != null) {
-                            nextTableIndex = j - 1;
-                            return;
-                        }
-                    }
-                }
-            }
-        }
-
-        public boolean hasNext() {
-            return nextEntry != null;
-        }
-
-        HashEntry<K, V> nextEntry() {
-            if (nextEntry == null)
-                throw new NoSuchElementException();
-            lastReturned = nextEntry;
-            advance();
-            return lastReturned;
-        }
-
-        public void remove() {
-            if (lastReturned == null)
-                throw new IllegalStateException();
-            BufferedConcurrentHashMap.this.remove(lastReturned.key);
-            lastReturned = null;
-        }
-    }
-
-    final class KeyIterator extends HashIterator implements Iterator<K>, Enumeration<K> {
-        public K next() {
-            return super.nextEntry().key;
-        }
-
-        public K nextElement() {
-            return super.nextEntry().key;
-        }
-    }
-
-    final class ValueIterator extends HashIterator implements Iterator<V>, Enumeration<V> {
-        public V next() {
-            return super.nextEntry().value;
-        }
-
-        public V nextElement() {
-            return super.nextEntry().value;
-        }
-    }
-
-    /**
-     * Custom Entry class used by EntryIterator.next(), that relays setValue changes to the
-     * underlying map.
-     */
-    final class WriteThroughEntry extends AbstractMap.SimpleEntry<K, V> {
-        private static final long serialVersionUID = -1075078642155041669L;
-
-        WriteThroughEntry(K k, V v) {
-            super(k, v);
-        }
-
-        /**
-         * Set our entry's value and write through to the map. The value to return is somewhat
-         * arbitrary here. Since a WriteThroughEntry does not necessarily track asynchronous
-         * changes, the most recent "previous" value could be different from what we return (or
-         * could even have been removed in which case the put will re-establish). We do not and
-         * cannot guarantee more.
-         */
-        public V setValue(V value) {
-            if (value == null)
-                throw new NullPointerException();
-            V v = super.setValue(value);
-            BufferedConcurrentHashMap.this.put(getKey(), value);
-            return v;
-        }
-    }
-
-    final class EntryIterator extends HashIterator implements Iterator<Entry<K, V>> {
-        public Map.Entry<K, V> next() {
-            HashEntry<K, V> e = super.nextEntry();
-            return new WriteThroughEntry(e.key, e.value);
-        }
-    }
-
-    final class KeySet extends AbstractSet<K> {
-        public Iterator<K> iterator() {
-            return new KeyIterator();
-        }
-
-        public int size() {
-            return BufferedConcurrentHashMap.this.size();
-        }
-
-        public boolean contains(Object o) {
-            return BufferedConcurrentHashMap.this.containsKey(o);
-        }
-
-        public boolean remove(Object o) {
-            return BufferedConcurrentHashMap.this.remove(o) != null;
-        }
-
-        public void clear() {
-            BufferedConcurrentHashMap.this.clear();
-        }
-    }
-
-    final class Values extends AbstractCollection<V> {
-        public Iterator<V> iterator() {
-            return new ValueIterator();
-        }
-
-        public int size() {
-            return BufferedConcurrentHashMap.this.size();
-        }
-
-        public boolean contains(Object o) {
-            return BufferedConcurrentHashMap.this.containsValue(o);
-        }
-
-        public void clear() {
-            BufferedConcurrentHashMap.this.clear();
-        }
-    }
-
-    final class EntrySet extends AbstractSet<Map.Entry<K, V>> {
-        public Iterator<Map.Entry<K, V>> iterator() {
-            return new EntryIterator();
-        }
-
-        public boolean contains(Object o) {
-            if (!(o instanceof Map.Entry))
-                return false;
-            Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
-            V v = BufferedConcurrentHashMap.this.get(e.getKey());
-            return v != null && v.equals(e.getValue());
-        }
-
-        public boolean remove(Object o) {
-            if (!(o instanceof Map.Entry))
-                return false;
-            Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
-            return BufferedConcurrentHashMap.this.remove(e.getKey(), e.getValue());
-        }
-
-        public int size() {
-            return BufferedConcurrentHashMap.this.size();
-        }
-
-        public void clear() {
-            BufferedConcurrentHashMap.this.clear();
-        }
-    }
-
-    /* ---------------- Serialization Support -------------- */
-
-    /**
-     * Save the state of the <tt>ConcurrentHashMap</tt> instance to a stream (i.e., serialize it).
-     * 
-     * @param s
-     *            the stream
-     * @serialData the key (Object) and value (Object) for each key-value mapping, followed by a
-     *             null pair. The key-value mappings are emitted in no particular order.
-     */
-    private void writeObject(java.io.ObjectOutputStream s) throws IOException {
-        s.defaultWriteObject();
-
-        for (int k = 0; k < segments.length; ++k) {
-            Segment<K, V> seg = segments[k];
-            seg.lock();
-            try {
-                HashEntry<K, V>[] tab = seg.table;
-                for (int i = 0; i < tab.length; ++i) {
-                    for (HashEntry<K, V> e = tab[i]; e != null; e = e.next) {
-                        s.writeObject(e.key);
-                        s.writeObject(e.value);
-                    }
-                }
-            } finally {
-                seg.unlock();
-            }
-        }
-        s.writeObject(null);
-        s.writeObject(null);
-    }
-
-    /**
-     * Reconstitute the <tt>ConcurrentHashMap</tt> instance from a stream (i.e., deserialize it).
-     * 
-     * @param s
-     *            the stream
-     */
-    private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {
-        s.defaultReadObject();
-
-        // Initialize each segment to be minimally sized, and let grow.
-        for (int i = 0; i < segments.length; ++i) {
-            segments[i].setTable(new HashEntry[1]);
-        }
-
-        // Read the keys and values, and put the mappings in the table
-        for (;;) {
-            K key = (K) s.readObject();
-            V value = (V) s.readObject();
-            if (key == null)
-                break;
-            put(key, value);
-        }
-    }
-}

Modified: trunk/core/src/main/resources/config-samples/all.xml
===================================================================
--- trunk/core/src/main/resources/config-samples/all.xml	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/main/resources/config-samples/all.xml	2010-03-25 10:49:02 UTC (rev 1619)
@@ -135,7 +135,7 @@
          Eviction configuration.  WakeupInterval defines how often the eviction thread runs, in milliseconds.  0 means
          the eviction thread will never run.  A separate executor is used for eviction in each cache.
       -->
-      <eviction wakeUpInterval="500" maxEntries="5000" strategy="FIFO" />
+      <eviction wakeUpInterval="500" maxEntries="5000" strategy="FIFO"  threadPolicy="PIGGYBACK"/>
       <expiration lifespan="60000" maxIdle="1000"/>
    </namedCache>
 

Modified: trunk/core/src/test/java/org/infinispan/config/parsing/XmlFileParsingTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/config/parsing/XmlFileParsingTest.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/test/java/org/infinispan/config/parsing/XmlFileParsingTest.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -9,6 +9,7 @@
 import org.infinispan.config.InfinispanConfiguration;
 import org.infinispan.distribution.DefaultConsistentHash;
 import org.infinispan.eviction.EvictionStrategy;
+import org.infinispan.eviction.EvictionThreadPolicy;
 import org.infinispan.loaders.file.FileCacheStoreConfig;
 import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
 import org.infinispan.test.AbstractInfinispanTest;
@@ -190,6 +191,7 @@
       assert c.getEvictionStrategy().equals(EvictionStrategy.FIFO);
       assert c.getExpirationLifespan() == 60000;
       assert c.getExpirationMaxIdle() == 1000;
+      assert c.getEvictionThreadPolicy() == EvictionThreadPolicy.PIGGYBACK;
 
       c = namedCaches.get("withDeadlockDetection");
       assert c.isEnableDeadlockDetection();

Modified: trunk/core/src/test/java/org/infinispan/container/SimpleDataContainerTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/SimpleDataContainerTest.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/test/java/org/infinispan/container/SimpleDataContainerTest.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -30,7 +30,7 @@
    }
 
    protected DataContainer createContainer() {
-      return new SimpleDataContainer(16);
+      return new DefaultDataContainer(16);
    }
 
    public void testExpiredData() throws InterruptedException {

Modified: trunk/core/src/test/java/org/infinispan/distribution/DistSyncTxFuncTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/distribution/DistSyncTxFuncTest.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/test/java/org/infinispan/distribution/DistSyncTxFuncTest.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -8,7 +8,7 @@
 
 import javax.transaction.TransactionManager;
 
- at Test(groups = "functional", testName = "distribution.DistSyncTxFuncTest")
+ at Test(groups = "functional", enabled=false, testName = "distribution.DistSyncTxFuncTest")
 public class DistSyncTxFuncTest extends BaseDistFunctionalTest {
    
    public DistSyncTxFuncTest() {

Modified: trunk/core/src/test/java/org/infinispan/eviction/BaseEvictionFunctionalTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/eviction/BaseEvictionFunctionalTest.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/test/java/org/infinispan/eviction/BaseEvictionFunctionalTest.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -13,6 +13,8 @@
 @Test(groups = "functional", testName = "eviction.BaseEvictionFunctionalTest")
 public abstract class BaseEvictionFunctionalTest extends SingleCacheManagerTest {
    
+   private static final int CACHE_SIZE=128;
+   
    protected BaseEvictionFunctionalTest() {
       cleanup = CleanupPhase.AFTER_METHOD;
    }
@@ -23,7 +25,7 @@
       Configuration cfg = new Configuration();
       cfg.setEvictionStrategy(getEvictionStrategy());
       cfg.setEvictionWakeUpInterval(100);
-      cfg.setEvictionMaxEntries(1); // 1 max entries
+      cfg.setEvictionMaxEntries(128); // 1 max entries
       cfg.setUseLockStriping(false); // to minimise chances of deadlock in the unit test
       CacheManager cm = TestCacheManagerFactory.createCacheManager(cfg);
       cache = cm.getCache();
@@ -46,14 +48,14 @@
       for (Writer writer : w) writer.running = false;
       for (Writer writer : w) writer.join();
 
-      // wait for the cache size to drop to 1, up to a specified amount of time.
-      long giveUpTime = System.currentTimeMillis() + (1000 * 60); // 1 min?
+      // wait for the cache size to drop to CACHE_SIZE, up to a specified amount of time.
+      long giveUpTime = System.currentTimeMillis() + (1000 * 10); // 10 sec
       while (cache.getAdvancedCache().getDataContainer().size() > 1 && System.currentTimeMillis() < giveUpTime) {
-//         System.out.println("Cache size is " + cache.size() + " and time diff is " + (giveUpTime - System.currentTimeMillis()));
+         //System.out.println("Cache size is " + cache.size() + " and time diff is " + (giveUpTime - System.currentTimeMillis()));
          Thread.sleep(100);
       }
 
-      assert cache.getAdvancedCache().getDataContainer().size() == 1 : "Expected 1, was " + cache.size(); // this is what we expect the cache to be pruned to      
+      assert cache.getAdvancedCache().getDataContainer().size() <= CACHE_SIZE : "Expected 1, was " + cache.size(); // this is what we expect the cache to be pruned to      
    }
 
    private class Writer extends Thread {

Modified: trunk/core/src/test/java/org/infinispan/eviction/MarshalledValuesEvictionTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/eviction/MarshalledValuesEvictionTest.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/test/java/org/infinispan/eviction/MarshalledValuesEvictionTest.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -35,17 +35,21 @@
 import org.infinispan.test.SingleCacheManagerTest;
 import org.infinispan.test.TestingUtil;
 import org.infinispan.test.fwk.TestCacheManagerFactory;
+import org.jgroups.util.Util;
 import org.testng.annotations.Test;
 
 @Test(groups = "functional", testName = "eviction.MarshalledValuesEvictionTest")
 public class MarshalledValuesEvictionTest extends SingleCacheManagerTest {
+   
+   private static final int CACHE_SIZE=128;
 
+
    @Override
    protected CacheManager createCacheManager() throws Exception {
       Configuration cfg = new Configuration();
       cfg.setEvictionStrategy(EvictionStrategy.FIFO);
       cfg.setEvictionWakeUpInterval(100);
-      cfg.setEvictionMaxEntries(1); // 1 max entries
+      cfg.setEvictionMaxEntries(CACHE_SIZE); // CACHE_SIZE max entries
       cfg.setUseLockStriping(false); // to minimise chances of deadlock in the unit test
       cfg.setUseLazyDeserialization(true);
       CacheManager cm = TestCacheManagerFactory.createCacheManager(cfg);
@@ -57,43 +61,46 @@
    }
    
    public void testEvictCustomKeyValue() {
-      MarshalledValueTest.Pojo p1 = new MarshalledValueTest.Pojo();
-      p1.i = 64;
-      MarshalledValueTest.Pojo p2 = new MarshalledValueTest.Pojo();
-      p2.i = 24;
-      MarshalledValueTest.Pojo p3 = new MarshalledValueTest.Pojo();
-      p3.i = 97;
-      MarshalledValueTest.Pojo p4 = new MarshalledValueTest.Pojo();
-      p4.i = 35;
+      for (int i = 0; i<CACHE_SIZE*2;i++) {
+         MarshalledValueTest.Pojo p1 = new MarshalledValueTest.Pojo();
+         p1.i = (int)Util.random(2000);
+         MarshalledValueTest.Pojo p2 = new MarshalledValueTest.Pojo();
+         p2.i = 24;
+         cache.put(p1, p2);         
+      }   
 
-      cache.put(p1, p2);
-      cache.put(p3, p4);
-
-      // wait for the cache size to drop to 1, up to a specified amount of time.
-      long giveupTime = System.currentTimeMillis() + (1000 * 60); // 1 mins?
-      while (cache.getAdvancedCache().getDataContainer().size() > 1 && System.currentTimeMillis() < giveupTime) {
+      // wait for the cache size to drop to CACHE_SIZE, up to a specified amount of time.
+      long giveupTime = System.currentTimeMillis() + (1000 * 10); // 10 sec
+      while (cache.getAdvancedCache().getDataContainer().size() > CACHE_SIZE && System.currentTimeMillis() < giveupTime) {
          TestingUtil.sleepThread(100);
       }
       
+      assert cache.getAdvancedCache().getDataContainer().size() <= CACHE_SIZE : "Expected 1, was " + cache.size(); 
+
+      //let eviction manager kick in
+      Util.sleep(3000);
       MockMarshalledValueInterceptor interceptor = (MockMarshalledValueInterceptor) TestingUtil.findInterceptor(cache, MarshalledValueInterceptor.class);
       assert !interceptor.marshalledValueCreated;
    }
 
    public void testEvictPrimitiveKeyCustomValue() {
-      MarshalledValueTest.Pojo p1 = new MarshalledValueTest.Pojo();
-      p1.i = 51;
-      MarshalledValueTest.Pojo p2 = new MarshalledValueTest.Pojo();
-      p2.i = 78;
+      for (int i = 0; i<CACHE_SIZE*2;i++) {
+         MarshalledValueTest.Pojo p1 = new MarshalledValueTest.Pojo();
+         p1.i = (int)Util.random(2000);
+         MarshalledValueTest.Pojo p2 = new MarshalledValueTest.Pojo();
+         p2.i = 24;
+         cache.put(p1, p2);         
+      }
 
-      cache.put("key-isoprene", p1);
-      cache.put("key-hexastyle", p2);
-
-      // wait for the cache size to drop to 1, up to a specified amount of time.
-      long giveupTime = System.currentTimeMillis() + (1000 * 60); // 1 mins?
-      while (cache.getAdvancedCache().getDataContainer().size() > 1 && System.currentTimeMillis() < giveupTime) {
+      // wait for the cache size to drop to CACHE_SIZE, up to a specified amount of time.
+      long giveupTime = System.currentTimeMillis() + (1000 * 10); // 10 sec
+      while (cache.getAdvancedCache().getDataContainer().size() > CACHE_SIZE && System.currentTimeMillis() < giveupTime) {
          TestingUtil.sleepThread(100);
       }
       
+      assert cache.getAdvancedCache().getDataContainer().size() <= CACHE_SIZE : "Expected 1, was " + cache.size(); 
+      //let eviction manager kick in
+      Util.sleep(3000);      
       MockMarshalledValueInterceptor interceptor = (MockMarshalledValueInterceptor) TestingUtil.findInterceptor(cache, MarshalledValueInterceptor.class);
       assert !interceptor.marshalledValueCreated;
    }

Modified: trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -33,7 +33,7 @@
    private static final Random R = new Random();
 
    public void testSimpleDataContainer() throws InterruptedException {
-      doTest(new SimpleDataContainer(5000));
+      doTest(DefaultDataContainer.unBoundedDataContainer(5000));
    }
 
    public void testFIFODataContainer() throws InterruptedException {

Modified: trunk/core/src/test/java/org/infinispan/stress/MapStressTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/stress/MapStressTest.java	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/test/java/org/infinispan/stress/MapStressTest.java	2010-03-25 10:49:02 UTC (rev 1619)
@@ -8,12 +8,13 @@
 import java.util.Map;
 import java.util.Random;
 import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentHashMap;
+import org.infinispan.util.concurrent.BoundedConcurrentHashMap;
+import org.infinispan.util.concurrent.BoundedConcurrentHashMap.Eviction;
+
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
-import org.infinispan.util.concurrent.BufferedConcurrentHashMap;
-import org.infinispan.util.concurrent.BufferedConcurrentHashMap.Eviction;
+
 import org.testng.annotations.BeforeClass;
 import org.testng.annotations.Test;
 
@@ -52,15 +53,15 @@
     }
     
     public void testConcurrentHashMap() throws Exception {
-        doTest(new ConcurrentHashMap<Integer, Integer>(MAP_CAPACITY, MAP_LOAD_FACTOR, CONCURRENCY));
+        doTest(new BoundedConcurrentHashMap<Integer, Integer>(MAP_CAPACITY, CONCURRENCY));
     }
    
     public void testBufferedConcurrentHashMapLRU() throws Exception {
-        doTest(new BufferedConcurrentHashMap<Integer, Integer>(MAP_CAPACITY, MAP_LOAD_FACTOR, CONCURRENCY, Eviction.LRU));
+        doTest(new BoundedConcurrentHashMap<Integer, Integer>(MAP_CAPACITY, CONCURRENCY, Eviction.LRU));
     }
     
     public void testBufferedConcurrentHashMapLIRS() throws Exception {
-        doTest(new BufferedConcurrentHashMap<Integer, Integer>(MAP_CAPACITY, MAP_LOAD_FACTOR, CONCURRENCY, Eviction.LIRS));
+        doTest(new BoundedConcurrentHashMap<Integer, Integer>(MAP_CAPACITY, CONCURRENCY, Eviction.LIRS));
     }
 
     public void testHashMap() throws Exception {

Modified: trunk/core/src/test/resources/configs/named-cache-test.xml
===================================================================
--- trunk/core/src/test/resources/configs/named-cache-test.xml	2010-03-24 11:05:42 UTC (rev 1618)
+++ trunk/core/src/test/resources/configs/named-cache-test.xml	2010-03-25 10:49:02 UTC (rev 1619)
@@ -129,7 +129,7 @@
          Eviction configuration.  WakeupInterval defines how often the eviction thread runs, in milliseconds.  0 means
          the eviction thread will never run.  A separate executor is used for eviction in each cache.
       -->
-      <eviction wakeUpInterval="500" maxEntries="5000" strategy="FIFO"/>
+      <eviction wakeUpInterval="500" maxEntries="5000"  threadPolicy="PIGGYBACK" strategy="FIFO"/>
       <expiration lifespan="60000" maxIdle="1000"/>
    </namedCache>
 



More information about the infinispan-commits mailing list