[infinispan-commits] Infinispan SVN: r2523 - trunk/core/src/main/java/org/infinispan/util/concurrent and 1 other directory.
infinispan-commits at lists.jboss.org
infinispan-commits at lists.jboss.org
Wed Oct 20 06:34:58 EDT 2010
Author: trustin
Date: 2010-10-20 06:34:58 -0400 (Wed, 20 Oct 2010)
New Revision: 2523
Modified:
branches/4.2.x/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java
trunk/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java
Log:
* Fixed the indentation and license header of BoundedConcurrentHashMap
* Fixed some Eclipse Java compiler warnings
Modified: branches/4.2.x/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java
===================================================================
--- branches/4.2.x/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java 2010-10-20 10:03:11 UTC (rev 2522)
+++ branches/4.2.x/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java 2010-10-20 10:34:58 UTC (rev 2523)
@@ -1,4 +1,26 @@
/*
+ * JBoss, Home of Professional Open Source
+ *
+ * Copyright ${year}, Red Hat, Inc. and individual contributors
+ * by the @authors tag. See the copyright.txt in the distribution
+ * for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/licenses/publicdomain
@@ -92,208 +114,207 @@
*/
public class BoundedConcurrentHashMap<K, V> extends AbstractMap<K, V>
implements ConcurrentMap<K, V>, Serializable {
- private static final long serialVersionUID = 7249069246763182397L;
+ private static final long serialVersionUID = 7249069246763182397L;
- /*
- * The basic strategy is to subdivide the table among Segments,
- * each of which itself is a concurrently readable hash table.
- */
+ /*
+ * The basic strategy is to subdivide the table among Segments,
+ * each of which itself is a concurrently readable hash table.
+ */
- /* ---------------- Constants -------------- */
+ /* ---------------- Constants -------------- */
- /**
- * The default initial capacity for this table,
- * used when not otherwise specified in a constructor.
- */
- static final int DEFAULT_MAXIMUM_CAPACITY = 512;
+ /**
+ * The default initial capacity for this table,
+ * used when not otherwise specified in a constructor.
+ */
+ static final int DEFAULT_MAXIMUM_CAPACITY = 512;
- /**
- * The default load factor for this table, used when not
- * otherwise specified in a constructor.
- */
- static final float DEFAULT_LOAD_FACTOR = 0.75f;
+ /**
+ * The default load factor for this table, used when not
+ * otherwise specified in a constructor.
+ */
+ static final float DEFAULT_LOAD_FACTOR = 0.75f;
- /**
- * The default concurrency level for this table, used when not
- * otherwise specified in a constructor.
- */
- static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+ /**
+ * The default concurrency level for this table, used when not
+ * otherwise specified in a constructor.
+ */
+ static final int DEFAULT_CONCURRENCY_LEVEL = 16;
- /**
- * The maximum capacity, used if a higher value is implicitly
- * specified by either of the constructors with arguments. MUST
- * be a power of two <= 1<<30 to ensure that entries are indexable
- * using ints.
- */
- static final int MAXIMUM_CAPACITY = 1 << 30;
+ /**
+ * The maximum capacity, used if a higher value is implicitly
+ * specified by either of the constructors with arguments. MUST
+ * be a power of two <= 1<<30 to ensure that entries are indexable
+ * using ints.
+ */
+ static final int MAXIMUM_CAPACITY = 1 << 30;
- /**
- * The maximum number of segments to allow; used to bound
- * constructor arguments.
- */
- static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
+ /**
+ * The maximum number of segments to allow; used to bound
+ * constructor arguments.
+ */
+ static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
- /**
- * Number of unsynchronized retries in size and containsValue
- * methods before resorting to locking. This is used to avoid
- * unbounded retries if tables undergo continuous modification
- * which would make it impossible to obtain an accurate result.
- */
- static final int RETRIES_BEFORE_LOCK = 2;
+ /**
+ * Number of unsynchronized retries in size and containsValue
+ * methods before resorting to locking. This is used to avoid
+ * unbounded retries if tables undergo continuous modification
+ * which would make it impossible to obtain an accurate result.
+ */
+ static final int RETRIES_BEFORE_LOCK = 2;
- /* ---------------- Fields -------------- */
+ /* ---------------- Fields -------------- */
- /**
- * Mask value for indexing into segments. The upper bits of a
- * key's hash code are used to choose the segment.
- */
- final int segmentMask;
+ /**
+ * Mask value for indexing into segments. The upper bits of a
+ * key's hash code are used to choose the segment.
+ */
+ final int segmentMask;
- /**
- * Shift value for indexing within segments.
- */
- final int segmentShift;
+ /**
+ * Shift value for indexing within segments.
+ */
+ final int segmentShift;
- /**
- * The segments, each of which is a specialized hash table
- */
- final Segment<K,V>[] segments;
+ /**
+ * The segments, each of which is a specialized hash table
+ */
+ final Segment<K,V>[] segments;
- transient Set<K> keySet;
- transient Set<Map.Entry<K,V>> entrySet;
- transient Collection<V> values;
+ transient Set<K> keySet;
+ transient Set<Map.Entry<K,V>> entrySet;
+ transient Collection<V> values;
- /* ---------------- Small Utilities -------------- */
+ /* ---------------- Small Utilities -------------- */
- /**
- * Applies a supplemental hash function to a given hashCode, which
- * defends against poor quality hash functions. This is critical
- * because ConcurrentHashMap uses power-of-two length hash tables,
- * that otherwise encounter collisions for hashCodes that do not
- * differ in lower or upper bits.
- */
- private static int hash(int h) {
- // Spread bits to regularize both segment and index locations,
- // using variant of single-word Wang/Jenkins hash.
- h += h << 15 ^ 0xffffcd7d;
- h ^= h >>> 10;
- h += h << 3;
- h ^= h >>> 6;
- h += (h << 2) + (h << 14);
- return h ^ h >>> 16;
- }
+ /**
+ * Applies a supplemental hash function to a given hashCode, which
+ * defends against poor quality hash functions. This is critical
+ * because ConcurrentHashMap uses power-of-two length hash tables,
+ * that otherwise encounter collisions for hashCodes that do not
+ * differ in lower or upper bits.
+ */
+ private static int hash(int h) {
+ // Spread bits to regularize both segment and index locations,
+ // using variant of single-word Wang/Jenkins hash.
+ h += h << 15 ^ 0xffffcd7d;
+ h ^= h >>> 10;
+ h += h << 3;
+ h ^= h >>> 6;
+ h += (h << 2) + (h << 14);
+ return h ^ h >>> 16;
+ }
- /**
- * Returns the segment that should be used for key with given hash
- * @param hash the hash code for the key
- * @return the segment
- */
- final Segment<K,V> segmentFor(int hash) {
- return segments[hash >>> segmentShift & segmentMask];
- }
+ /**
+ * Returns the segment that should be used for key with given hash
+ * @param hash the hash code for the key
+ * @return the segment
+ */
+ final Segment<K,V> segmentFor(int hash) {
+ return segments[hash >>> segmentShift & segmentMask];
+ }
- /* ---------------- Inner Classes -------------- */
+ /* ---------------- Inner Classes -------------- */
- /**
- * ConcurrentHashMap list entry. Note that this is never exported
- * out as a user-visible Map.Entry.
- *
- * Because the value field is volatile, not final, it is legal wrt
- * the Java Memory Model for an unsynchronized reader to see null
- * instead of initial value when read via a data race. Although a
- * reordering leading to this is not likely to ever actually
- * occur, the Segment.readValueUnderLock method is used as a
- * backup in case a null (pre-initialized) value is ever seen in
- * an unsynchronized access method.
- */
- static final class HashEntry<K, V> {
- final K key;
- final int hash;
- volatile V value;
- final HashEntry<K, V> next;
- volatile Recency state;
+ /**
+ * ConcurrentHashMap list entry. Note that this is never exported
+ * out as a user-visible Map.Entry.
+ *
+ * Because the value field is volatile, not final, it is legal wrt
+ * the Java Memory Model for an unsynchronized reader to see null
+ * instead of initial value when read via a data race. Although a
+ * reordering leading to this is not likely to ever actually
+ * occur, the Segment.readValueUnderLock method is used as a
+ * backup in case a null (pre-initialized) value is ever seen in
+ * an unsynchronized access method.
+ */
+ static final class HashEntry<K, V> {
+ final K key;
+ final int hash;
+ volatile V value;
+ final HashEntry<K, V> next;
+ volatile Recency state;
- HashEntry(K key, int hash, HashEntry<K, V> next, V value) {
- this.key = key;
- this.hash = hash;
- this.next = next;
- this.value = value;
- this.state = Recency.HIR_RESIDENT;
- }
+ HashEntry(K key, int hash, HashEntry<K, V> next, V value) {
+ this.key = key;
+ this.hash = hash;
+ this.next = next;
+ this.value = value;
+ this.state = Recency.HIR_RESIDENT;
+ }
- @Override
+ @Override
public int hashCode() {
- int result = 17;
- result = result * 31 + hash;
- result = result * 31 + key.hashCode();
- return result;
- }
+ int result = 17;
+ result = result * 31 + hash;
+ result = result * 31 + key.hashCode();
+ return result;
+ }
- @Override
+ @Override
public boolean equals(Object o) {
- // HashEntry is internal class, never leaks out of CHM, hence slight optimization
- if (this == o) {
+ // HashEntry is internal class, never leaks out of CHM, hence slight optimization
+ if (this == o) {
return true;
}
- if (o == null) {
+ if (o == null) {
return false;
}
- HashEntry<?, ?> other = (HashEntry<?, ?>) o;
- return hash == other.hash && key.equals(other.key);
- }
+ HashEntry<?, ?> other = (HashEntry<?, ?>) o;
+ return hash == other.hash && key.equals(other.key);
+ }
- public void transitionToLIRResident() {
- state = Recency.LIR_RESIDENT;
- }
+ public void transitionToLIRResident() {
+ state = Recency.LIR_RESIDENT;
+ }
- public void transitionHIRResidentToHIRNonResident() {
- state = Recency.HIR_NONRESIDENT;
- }
+ public void transitionHIRResidentToHIRNonResident() {
+ state = Recency.HIR_NONRESIDENT;
+ }
- public void transitionLIRResidentToHIRResident() {
- state = Recency.HIR_RESIDENT;
- }
+ public void transitionLIRResidentToHIRResident() {
+ state = Recency.HIR_RESIDENT;
+ }
- public Recency recency() {
- return state;
- }
+ public Recency recency() {
+ return state;
+ }
- @SuppressWarnings("unchecked")
- static <K, V> HashEntry<K, V>[] newArray(int i) {
- return new HashEntry[i];
- }
+ @SuppressWarnings("unchecked")
+ static <K, V> HashEntry<K, V>[] newArray(int i) {
+ return new HashEntry[i];
+ }
}
- private enum Recency {
- HIR_RESIDENT, LIR_RESIDENT, HIR_NONRESIDENT
+ private enum Recency {
+ HIR_RESIDENT, LIR_RESIDENT, HIR_NONRESIDENT
}
public enum Eviction {
- NONE {
- @Override
- public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
- return new NullEvictionPolicy<K, V>();
- }
- },
- LRU {
+ NONE {
+ @Override
+ public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
+ return new NullEvictionPolicy<K, V>();
+ }
+ },
+ LRU {
+ @Override
+ public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
+ return new LRU<K, V>(s,capacity,lf,capacity*10,lf);
+ }
+ },
+ LIRS {
+ @Override
+ public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
+ return new LIRS<K,V>(s,capacity,capacity*10,lf);
+ }
+ };
- @Override
- public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
- return new LRU<K, V>(s,capacity,lf,capacity*10,lf);
- }
- },
- LIRS {
- @Override
- public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
- return new LIRS<K,V>(s,capacity,lf,capacity*10,lf);
- }
- };
-
- abstract <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf);
+ abstract <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf);
}
public interface EvictionListener<K, V> {
- void onEntryEviction(K key, V value);
+ void onEntryEviction(K key, V value);
}
static class NullEvictionListener<K, V> implements EvictionListener<K, V> {
@@ -305,1676 +326,1707 @@
public interface EvictionPolicy<K, V> {
- public final static int MAX_BATCH_SIZE = 64;
+ public final static int MAX_BATCH_SIZE = 64;
- /**
- * Invokes eviction policy algorithm and returns set of evicted entries.
- *
- * <p>
- * Set cannot be null but could possibly be an empty set.
- *
- * @return set of evicted entries.
- */
- Set<HashEntry<K, V>> execute();
+ /**
+ * Invokes eviction policy algorithm and returns set of evicted entries.
+ *
+ * <p>
+ * Set cannot be null but could possibly be an empty set.
+ *
+ * @return set of evicted entries.
+ */
+ Set<HashEntry<K, V>> execute();
- /**
- * Invoked to notify EvictionPolicy implementation that there has been an attempt to access
- * an entry in Segment, however that entry was not present in Segment.
- *
- * @param e
- * accessed entry in Segment
- *
- * @return non null set of evicted entries.
- */
- Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e);
+ /**
+ * Invoked to notify EvictionPolicy implementation that there has been an attempt to access
+ * an entry in Segment, however that entry was not present in Segment.
+ *
+ * @param e
+ * accessed entry in Segment
+ *
+ * @return non null set of evicted entries.
+ */
+ Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e);
- /**
- * Invoked to notify EvictionPolicy implementation that an entry in Segment has been
- * accessed. Returns true if batching threshold has been reached, false otherwise.
- * <p>
- * Note that this method is potentially invoked without holding a lock on Segment.
- *
- * @return true if batching threshold has been reached, false otherwise.
- *
- * @param e
- * accessed entry in Segment
- */
- boolean onEntryHit(HashEntry<K, V> e);
+ /**
+ * Invoked to notify EvictionPolicy implementation that an entry in Segment has been
+ * accessed. Returns true if batching threshold has been reached, false otherwise.
+ * <p>
+ * Note that this method is potentially invoked without holding a lock on Segment.
+ *
+ * @return true if batching threshold has been reached, false otherwise.
+ *
+ * @param e
+ * accessed entry in Segment
+ */
+ boolean onEntryHit(HashEntry<K, V> e);
- /**
- * Invoked to notify EvictionPolicy implementation that an entry e has been removed from
- * Segment.
- *
- * @param e
- * removed entry in Segment
- */
- void onEntryRemove(HashEntry<K, V> e);
+ /**
+ * Invoked to notify EvictionPolicy implementation that an entry e has been removed from
+ * Segment.
+ *
+ * @param e
+ * removed entry in Segment
+ */
+ void onEntryRemove(HashEntry<K, V> e);
- /**
- * Invoked to notify EvictionPolicy implementation that all Segment entries have been
- * cleared.
- *
- */
- void clear();
+ /**
+ * Invoked to notify EvictionPolicy implementation that all Segment entries have been
+ * cleared.
+ *
+ */
+ void clear();
- /**
- * Returns type of eviction algorithm (strategy).
- *
- * @return type of eviction algorithm
- */
- Eviction strategy();
+ /**
+ * Returns type of eviction algorithm (strategy).
+ *
+ * @return type of eviction algorithm
+ */
+ Eviction strategy();
- /**
- * Returns true if batching threshold has expired, false otherwise.
- * <p>
- * Note that this method is potentially invoked without holding a lock on Segment.
- *
- * @return true if batching threshold has expired, false otherwise.
- */
- boolean thresholdExpired();
+ /**
+ * Returns true if batching threshold has expired, false otherwise.
+ * <p>
+ * Note that this method is potentially invoked without holding a lock on Segment.
+ *
+ * @return true if batching threshold has expired, false otherwise.
+ */
+ boolean thresholdExpired();
}
static class NullEvictionPolicy<K, V> implements EvictionPolicy<K, V> {
- @Override
- public void clear() {
- }
+ @Override
+ public void clear() {
+ // Do nothing.
+ }
- @Override
- public Set<HashEntry<K, V>> execute() {
- return Collections.emptySet();
- }
+ @Override
+ public Set<HashEntry<K, V>> execute() {
+ return Collections.emptySet();
+ }
- @Override
- public boolean onEntryHit(HashEntry<K, V> e) {
- return false;
- }
+ @Override
+ public boolean onEntryHit(HashEntry<K, V> e) {
+ return false;
+ }
- @Override
- public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
- return Collections.emptySet();
- }
+ @Override
+ public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
+ return Collections.emptySet();
+ }
- @Override
- public void onEntryRemove(HashEntry<K, V> e) {
- }
+ @Override
+ public void onEntryRemove(HashEntry<K, V> e) {
+ // Do nothing.
+ }
- @Override
- public boolean thresholdExpired() {
- return false;
- }
+ @Override
+ public boolean thresholdExpired() {
+ return false;
+ }
- @Override
- public Eviction strategy() {
- return Eviction.NONE;
- }
+ @Override
+ public Eviction strategy() {
+ return Eviction.NONE;
+ }
}
static final class LRU<K, V> implements EvictionPolicy<K, V> {
- private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
- private final Segment<K,V> segment;
- private final LinkedList<HashEntry<K, V>> lruQueue;
- private final int maxBatchQueueSize;
- private final int trimDownSize;
- private final float batchThresholdFactor;
+ private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
+ private final Segment<K,V> segment;
+ private final LinkedList<HashEntry<K, V>> lruQueue;
+ private final int maxBatchQueueSize;
+ private final int trimDownSize;
+ private final float batchThresholdFactor;
- public LRU(Segment<K,V> s, int capacity, float lf, int maxBatchSize, float batchThresholdFactor) {
- this.segment = s;
- this.trimDownSize = (int) (capacity * lf);
- this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
- this.batchThresholdFactor = batchThresholdFactor;
- this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
- this.lruQueue = new LinkedList<HashEntry<K, V>>();
- }
+ public LRU(Segment<K,V> s, int capacity, float lf, int maxBatchSize, float batchThresholdFactor) {
+ this.segment = s;
+ this.trimDownSize = (int) (capacity * lf);
+ this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
+ this.batchThresholdFactor = batchThresholdFactor;
+ this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
+ this.lruQueue = new LinkedList<HashEntry<K, V>>();
+ }
- @Override
- public Set<HashEntry<K, V>> execute() {
- Set<HashEntry<K, V>> evicted = Collections.emptySet();
- if (isOverflow()) {
- evicted = new HashSet<HashEntry<K, V>>();
- }
- try {
- for (HashEntry<K, V> e : accessQueue) {
- if (lruQueue.remove(e)) {
- lruQueue.addFirst(e);
- }
+ @Override
+ public Set<HashEntry<K, V>> execute() {
+ Set<HashEntry<K, V>> evicted = Collections.emptySet();
+ if (isOverflow()) {
+ evicted = new HashSet<HashEntry<K, V>>();
+ }
+ try {
+ for (HashEntry<K, V> e : accessQueue) {
+ if (lruQueue.remove(e)) {
+ lruQueue.addFirst(e);
}
- while (isOverflow()) {
- HashEntry<K, V> first = lruQueue.getLast();
- segment.remove(first.key, first.hash, null);
- evicted.add(first);
- }
- } finally {
- accessQueue.clear();
- }
- return evicted;
- }
+ }
+ while (isOverflow()) {
+ HashEntry<K, V> first = lruQueue.getLast();
+ segment.remove(first.key, first.hash, null);
+ evicted.add(first);
+ }
+ } finally {
+ accessQueue.clear();
+ }
+ return evicted;
+ }
- private boolean isOverflow() {
- return lruQueue.size() > trimDownSize;
- }
+ private boolean isOverflow() {
+ return lruQueue.size() > trimDownSize;
+ }
- @Override
- public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
- lruQueue.addFirst(e);
- return Collections.emptySet();
- }
+ @Override
+ public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
+ lruQueue.addFirst(e);
+ return Collections.emptySet();
+ }
- /*
- * Invoked without holding a lock on Segment
- */
- @Override
- public boolean onEntryHit(HashEntry<K, V> e) {
- accessQueue.add(e);
- return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
- }
+ /*
+ * Invoked without holding a lock on Segment
+ */
+ @Override
+ public boolean onEntryHit(HashEntry<K, V> e) {
+ accessQueue.add(e);
+ return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
+ }
- /*
- * Invoked without holding a lock on Segment
- */
- @Override
- public boolean thresholdExpired() {
- return accessQueue.size() >= maxBatchQueueSize;
- }
+ /*
+ * Invoked without holding a lock on Segment
+ */
+ @Override
+ public boolean thresholdExpired() {
+ return accessQueue.size() >= maxBatchQueueSize;
+ }
- @Override
- public void onEntryRemove(HashEntry<K, V> e) {
- lruQueue.remove(e);
- // we could have multiple instances of e in accessQueue; remove them all
- while (accessQueue.remove(e)) {
+ @Override
+ public void onEntryRemove(HashEntry<K, V> e) {
+ lruQueue.remove(e);
+ // we could have multiple instances of e in accessQueue; remove them all
+ while (accessQueue.remove(e)) {
continue;
- }
- }
+ }
+ }
- @Override
- public void clear() {
- lruQueue.clear();
- accessQueue.clear();
- }
+ @Override
+ public void clear() {
+ lruQueue.clear();
+ accessQueue.clear();
+ }
- @Override
- public Eviction strategy() {
- return Eviction.LRU;
- }
+ @Override
+ public Eviction strategy() {
+ return Eviction.LRU;
+ }
}
static final class LIRS<K, V> implements EvictionPolicy<K, V> {
- private final static int MIN_HIR_SIZE = 2;
- private final Segment<K,V> segment;
- private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
- private final LinkedHashMap<K, HashEntry<K, V>> stack;
- private final LinkedList<HashEntry<K, V>> queue;
- private final int maxBatchQueueSize;
- private final int lirSizeLimit;
- private final int hirSizeLimit;
- private int currentLIRSize;
- private final float batchThresholdFactor;
+ private final static int MIN_HIR_SIZE = 2;
+ private final Segment<K,V> segment;
+ private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
+ private final LinkedHashMap<K, HashEntry<K, V>> stack;
+ private final LinkedList<HashEntry<K, V>> queue;
+ private final int maxBatchQueueSize;
+ private final int lirSizeLimit;
+ private final int hirSizeLimit;
+ private int currentLIRSize;
+ private final float batchThresholdFactor;
- public LIRS(Segment<K,V> s, int capacity, float lf, int maxBatchSize, float batchThresholdFactor) {
- this.segment = s;
- int tmpLirSize = (int) (capacity * 0.9);
- int tmpHirSizeLimit = capacity - tmpLirSize;
- if (tmpHirSizeLimit < MIN_HIR_SIZE) {
- hirSizeLimit = MIN_HIR_SIZE;
- lirSizeLimit = capacity - hirSizeLimit;
- } else {
- hirSizeLimit = tmpHirSizeLimit;
- lirSizeLimit = tmpLirSize;
- }
- this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
- this.batchThresholdFactor = batchThresholdFactor;
- this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
- this.stack = new LinkedHashMap<K, HashEntry<K, V>>();
- this.queue = new LinkedList<HashEntry<K, V>>();
- }
+ public LIRS(Segment<K,V> s, int capacity, int maxBatchSize, float batchThresholdFactor) {
+ this.segment = s;
+ int tmpLirSize = (int) (capacity * 0.9);
+ int tmpHirSizeLimit = capacity - tmpLirSize;
+ if (tmpHirSizeLimit < MIN_HIR_SIZE) {
+ hirSizeLimit = MIN_HIR_SIZE;
+ lirSizeLimit = capacity - hirSizeLimit;
+ } else {
+ hirSizeLimit = tmpHirSizeLimit;
+ lirSizeLimit = tmpLirSize;
+ }
+ this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
+ this.batchThresholdFactor = batchThresholdFactor;
+ this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
+ this.stack = new LinkedHashMap<K, HashEntry<K, V>>();
+ this.queue = new LinkedList<HashEntry<K, V>>();
+ }
- @Override
- public Set<HashEntry<K, V>> execute() {
- Set<HashEntry<K, V>> evicted = new HashSet<HashEntry<K, V>>();
- try {
- for (HashEntry<K, V> e : accessQueue) {
- if (present(e)) {
- if (e.recency() == Recency.LIR_RESIDENT) {
- handleLIRHit(e, evicted);
- } else if (e.recency() == Recency.HIR_RESIDENT) {
- handleHIRHit(e, evicted);
- }
- }
+ @Override
+ public Set<HashEntry<K, V>> execute() {
+ Set<HashEntry<K, V>> evicted = new HashSet<HashEntry<K, V>>();
+ try {
+ for (HashEntry<K, V> e : accessQueue) {
+ if (present(e)) {
+ if (e.recency() == Recency.LIR_RESIDENT) {
+ handleLIRHit(e, evicted);
+ } else if (e.recency() == Recency.HIR_RESIDENT) {
+ handleHIRHit(e, evicted);
+ }
}
- removeFromSegment(evicted);
- } finally {
- accessQueue.clear();
- }
- return evicted;
- }
+ }
+ removeFromSegment(evicted);
+ } finally {
+ accessQueue.clear();
+ }
+ return evicted;
+ }
- private void handleHIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
- boolean inStack = stack.containsKey(e.key);
- if (inStack) {
+ private void handleHIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
+ boolean inStack = stack.containsKey(e.key);
+ if (inStack) {
stack.remove(e.key);
}
- // first put on top of the stack
- stack.put(e.key, e);
+ // first put on top of the stack
+ stack.put(e.key, e);
- if (inStack) {
- queue.remove(e);
- e.transitionToLIRResident();
- switchBottomostLIRtoHIRAndPrune(evicted);
- } else {
- queue.remove(e);
- queue.addLast(e);
- }
- }
+ if (inStack) {
+ queue.remove(e);
+ e.transitionToLIRResident();
+ switchBottomostLIRtoHIRAndPrune(evicted);
+ } else {
+ queue.remove(e);
+ queue.addLast(e);
+ }
+ }
- private void handleLIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
- stack.remove(e.key);
- stack.put(e.key, e);
- for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
- HashEntry<K, V> next = i.next();
- if (next.recency() == Recency.LIR_RESIDENT) {
- break;
- } else {
- i.remove();
- evicted.add(next);
- }
- }
- }
+ private void handleLIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
+ stack.remove(e.key);
+ stack.put(e.key, e);
+ for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
+ HashEntry<K, V> next = i.next();
+ if (next.recency() == Recency.LIR_RESIDENT) {
+ break;
+ } else {
+ i.remove();
+ evicted.add(next);
+ }
+ }
+ }
- private boolean present(HashEntry<K, V> e) {
- return stack.containsKey(e.key) || queue.contains(e);
- }
+ private boolean present(HashEntry<K, V> e) {
+ return stack.containsKey(e.key) || queue.contains(e);
+ }
- @Override
- public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
- // initialization
- Set<HashEntry<K, V>> evicted = Collections.emptySet();
- if (currentLIRSize + 1 < lirSizeLimit) {
- currentLIRSize++;
- e.transitionToLIRResident();
+ @Override
+ public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
+ // initialization
+ Set<HashEntry<K, V>> evicted = Collections.emptySet();
+ if (currentLIRSize + 1 < lirSizeLimit) {
+ currentLIRSize++;
+ e.transitionToLIRResident();
+ stack.put(e.key, e);
+ } else {
+ if (queue.size() < hirSizeLimit) {
+ queue.addLast(e);
+ } else {
+ boolean inStack = stack.containsKey(e.key);
+ HashEntry<K, V> first = queue.removeFirst();
+ first.transitionHIRResidentToHIRNonResident();
+
stack.put(e.key, e);
- } else {
- if (queue.size() < hirSizeLimit) {
- queue.addLast(e);
+
+ evicted = new HashSet<HashEntry<K, V>>();
+ if (inStack) {
+ e.transitionToLIRResident();
+ switchBottomostLIRtoHIRAndPrune(evicted);
} else {
- boolean inStack = stack.containsKey(e.key);
- HashEntry<K, V> first = queue.removeFirst();
- first.transitionHIRResidentToHIRNonResident();
-
- stack.put(e.key, e);
-
- evicted = new HashSet<HashEntry<K, V>>();
- if (inStack) {
- e.transitionToLIRResident();
- switchBottomostLIRtoHIRAndPrune(evicted);
- } else {
- queue.addLast(e);
- evicted.add(first);
- }
- // evict from segment
- removeFromSegment(evicted);
+ queue.addLast(e);
+ evicted.add(first);
}
- }
- return evicted;
- }
+ // evict from segment
+ removeFromSegment(evicted);
+ }
+ }
+ return evicted;
+ }
- private void removeFromSegment(Set<HashEntry<K, V>> evicted) {
- for (HashEntry<K, V> e : evicted) {
- segment.remove(e.key, e.hash, null);
- }
- }
+ private void removeFromSegment(Set<HashEntry<K, V>> evicted) {
+ for (HashEntry<K, V> e : evicted) {
+ segment.remove(e.key, e.hash, null);
+ }
+ }
- private void switchBottomostLIRtoHIRAndPrune(Set<HashEntry<K, V>> evicted) {
- boolean seenFirstLIR = false;
- for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
- HashEntry<K, V> next = i.next();
- if (next.recency() == Recency.LIR_RESIDENT) {
- if (!seenFirstLIR) {
- seenFirstLIR = true;
- i.remove();
- next.transitionLIRResidentToHIRResident();
- queue.addLast(next);
- } else {
- break;
- }
+ private void switchBottomostLIRtoHIRAndPrune(Set<HashEntry<K, V>> evicted) {
+ boolean seenFirstLIR = false;
+ for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
+ HashEntry<K, V> next = i.next();
+ if (next.recency() == Recency.LIR_RESIDENT) {
+ if (!seenFirstLIR) {
+ seenFirstLIR = true;
+ i.remove();
+ next.transitionLIRResidentToHIRResident();
+ queue.addLast(next);
} else {
- i.remove();
- evicted.add(next);
+ break;
}
- }
- }
+ } else {
+ i.remove();
+ evicted.add(next);
+ }
+ }
+ }
- /*
- * Invoked without holding a lock on Segment
- */
- @Override
- public boolean onEntryHit(HashEntry<K, V> e) {
- accessQueue.add(e);
- return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
- }
+ /*
+ * Invoked without holding a lock on Segment
+ */
+ @Override
+ public boolean onEntryHit(HashEntry<K, V> e) {
+ accessQueue.add(e);
+ return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
+ }
- /*
- * Invoked without holding a lock on Segment
- */
- @Override
- public boolean thresholdExpired() {
- return accessQueue.size() >= maxBatchQueueSize;
- }
+ /*
+ * Invoked without holding a lock on Segment
+ */
+ @Override
+ public boolean thresholdExpired() {
+ return accessQueue.size() >= maxBatchQueueSize;
+ }
- @Override
- public void onEntryRemove(HashEntry<K, V> e) {
- HashEntry<K, V> removed = stack.remove(e.key);
- if (removed != null && removed.recency() == Recency.LIR_RESIDENT) {
- currentLIRSize--;
- }
- queue.remove(e);
- // we could have multiple instances of e in accessQueue; remove them all
- while (accessQueue.remove(e)) {
- continue;
- }
- }
+ @Override
+ public void onEntryRemove(HashEntry<K, V> e) {
+ HashEntry<K, V> removed = stack.remove(e.key);
+ if (removed != null && removed.recency() == Recency.LIR_RESIDENT) {
+ currentLIRSize--;
+ }
+ queue.remove(e);
+ // we could have multiple instances of e in accessQueue; remove them all
+ while (accessQueue.remove(e)) {
+ continue;
+ }
+ }
- @Override
- public void clear() {
- stack.clear();
- accessQueue.clear();
- }
+ @Override
+ public void clear() {
+ stack.clear();
+ accessQueue.clear();
+ }
- @Override
- public Eviction strategy() {
- return Eviction.LIRS;
- }
+ @Override
+ public Eviction strategy() {
+ return Eviction.LIRS;
+ }
}
- /**
- * Segments are specialized versions of hash tables. This
- * subclasses from ReentrantLock opportunistically, just to
- * simplify some locking and avoid separate construction.
- */
- static final class Segment<K,V> extends ReentrantLock implements Serializable {
- /*
- * Segments maintain a table of entry lists that are ALWAYS
- * kept in a consistent state, so can be read without locking.
- * Next fields of nodes are immutable (final). All list
- * additions are performed at the front of each bin. This
- * makes it easy to check changes, and also fast to traverse.
- * When nodes would otherwise be changed, new nodes are
- * created to replace them. This works well for hash tables
- * since the bin lists tend to be short. (The average length
- * is less than two for the default load factor threshold.)
- *
- * Read operations can thus proceed without locking, but rely
- * on selected uses of volatiles to ensure that completed
- * write operations performed by other threads are
- * noticed. For most purposes, the "count" field, tracking the
- * number of elements, serves as that volatile variable
- * ensuring visibility. This is convenient because this field
- * needs to be read in many read operations anyway:
- *
- * - All (unsynchronized) read operations must first read the
- * "count" field, and should not look at table entries if
- * it is 0.
- *
- * - All (synchronized) write operations should write to
- * the "count" field after structurally changing any bin.
- * The operations must not take any action that could even
- * momentarily cause a concurrent read operation to see
- * inconsistent data. This is made easier by the nature of
- * the read operations in Map. For example, no operation
- * can reveal that the table has grown but the threshold
- * has not yet been updated, so there are no atomicity
- * requirements for this with respect to reads.
- *
- * As a guide, all critical volatile reads and writes to the
- * count field are marked in code comments.
- */
+ /**
+ * Segments are specialized versions of hash tables. This
+ * subclasses from ReentrantLock opportunistically, just to
+ * simplify some locking and avoid separate construction.
+ */
+ static final class Segment<K,V> extends ReentrantLock {
+ /*
+ * Segments maintain a table of entry lists that are ALWAYS
+ * kept in a consistent state, so can be read without locking.
+ * Next fields of nodes are immutable (final). All list
+ * additions are performed at the front of each bin. This
+ * makes it easy to check changes, and also fast to traverse.
+ * When nodes would otherwise be changed, new nodes are
+ * created to replace them. This works well for hash tables
+ * since the bin lists tend to be short. (The average length
+ * is less than two for the default load factor threshold.)
+ *
+ * Read operations can thus proceed without locking, but rely
+ * on selected uses of volatiles to ensure that completed
+ * write operations performed by other threads are
+ * noticed. For most purposes, the "count" field, tracking the
+ * number of elements, serves as that volatile variable
+ * ensuring visibility. This is convenient because this field
+ * needs to be read in many read operations anyway:
+ *
+ * - All (unsynchronized) read operations must first read the
+ * "count" field, and should not look at table entries if
+ * it is 0.
+ *
+ * - All (synchronized) write operations should write to
+ * the "count" field after structurally changing any bin.
+ * The operations must not take any action that could even
+ * momentarily cause a concurrent read operation to see
+ * inconsistent data. This is made easier by the nature of
+ * the read operations in Map. For example, no operation
+ * can reveal that the table has grown but the threshold
+ * has not yet been updated, so there are no atomicity
+ * requirements for this with respect to reads.
+ *
+ * As a guide, all critical volatile reads and writes to the
+ * count field are marked in code comments.
+ */
- private static final long serialVersionUID = 2249069246763182397L;
+ private static final long serialVersionUID = 2249069246763182397L;
- /**
- * The number of elements in this segment's region.
- */
- transient volatile int count;
+ /**
+ * The number of elements in this segment's region.
+ */
+ transient volatile int count;
- /**
- * Number of updates that alter the size of the table. This is
- * used during bulk-read methods to make sure they see a
- * consistent snapshot: If modCounts change during a traversal
- * of segments computing size or checking containsValue, then
- * we might have an inconsistent view of state so (usually)
- * must retry.
- */
- transient int modCount;
+ /**
+ * Number of updates that alter the size of the table. This is
+ * used during bulk-read methods to make sure they see a
+ * consistent snapshot: If modCounts change during a traversal
+ * of segments computing size or checking containsValue, then
+ * we might have an inconsistent view of state so (usually)
+ * must retry.
+ */
+ transient int modCount;
- /**
- * The table is rehashed when its size exceeds this threshold.
- * (The value of this field is always <tt>(int)(capacity *
- * loadFactor)</tt>.)
- */
- transient int threshold;
+ /**
+ * The table is rehashed when its size exceeds this threshold.
+ * (The value of this field is always <tt>(int)(capacity *
+ * loadFactor)</tt>.)
+ */
+ transient int threshold;
- /**
- * The per-segment table.
- */
- transient volatile HashEntry<K,V>[] table;
+ /**
+ * The per-segment table.
+ */
+ transient volatile HashEntry<K,V>[] table;
- /**
- * The load factor for the hash table. Even though this value
- * is same for all segments, it is replicated to avoid needing
- * links to outer object.
- * @serial
- */
- final float loadFactor;
+ /**
+ * The load factor for the hash table. Even though this value
+ * is same for all segments, it is replicated to avoid needing
+ * links to outer object.
+ * @serial
+ */
+ final float loadFactor;
- transient final EvictionPolicy<K, V> eviction;
+ transient final EvictionPolicy<K, V> eviction;
- transient final EvictionListener<K, V> evictionListener;
+ transient final EvictionListener<K, V> evictionListener;
- Segment(int cap, float lf, Eviction es, EvictionListener<K, V> listener) {
- loadFactor = lf;
- eviction = es.make(this, cap, lf);
- evictionListener = listener;
- setTable(HashEntry.<K, V> newArray(cap));
- }
+ Segment(int cap, float lf, Eviction es, EvictionListener<K, V> listener) {
+ loadFactor = lf;
+ eviction = es.make(this, cap, lf);
+ evictionListener = listener;
+ setTable(HashEntry.<K, V> newArray(cap));
+ }
- @SuppressWarnings("unchecked")
- static final <K,V> Segment<K,V>[] newArray(int i) {
- return new Segment[i];
- }
+ @SuppressWarnings("unchecked")
+ static final <K,V> Segment<K,V>[] newArray(int i) {
+ return new Segment[i];
+ }
- EvictionListener<K, V> getEvictionListener() {
- return evictionListener;
- }
+ EvictionListener<K, V> getEvictionListener() {
+ return evictionListener;
+ }
- /**
- * Sets table to new HashEntry array.
- * Call only while holding lock or in constructor.
- */
- void setTable(HashEntry<K,V>[] newTable) {
- threshold = (int)(newTable.length * loadFactor);
- table = newTable;
- }
+ /**
+ * Sets table to new HashEntry array.
+ * Call only while holding lock or in constructor.
+ */
+ void setTable(HashEntry<K,V>[] newTable) {
+ threshold = (int)(newTable.length * loadFactor);
+ table = newTable;
+ }
- /**
- * Returns properly casted first entry of bin for given hash.
- */
- HashEntry<K,V> getFirst(int hash) {
- HashEntry<K,V>[] tab = table;
- return tab[hash & tab.length - 1];
- }
+ /**
+ * Returns properly casted first entry of bin for given hash.
+ */
+ HashEntry<K,V> getFirst(int hash) {
+ HashEntry<K,V>[] tab = table;
+ return tab[hash & tab.length - 1];
+ }
- /**
- * Reads value field of an entry under lock. Called if value
- * field ever appears to be null. This is possible only if a
- * compiler happens to reorder a HashEntry initialization with
- * its table assignment, which is legal under memory model
- * but is not known to ever occur.
- */
- V readValueUnderLock(HashEntry<K,V> e) {
+ /**
+ * Reads value field of an entry under lock. Called if value
+ * field ever appears to be null. This is possible only if a
+ * compiler happens to reorder a HashEntry initialization with
+ * its table assignment, which is legal under memory model
+ * but is not known to ever occur.
+ */
+ V readValueUnderLock(HashEntry<K,V> e) {
+ lock();
+ try {
+ return e.value;
+ } finally {
+ unlock();
+ }
+ }
+
+ /* Specialized implementations of map methods */
+
+ V get(Object key, int hash) {
+ int c = count;
+ if (c != 0) { // read-volatile
+ V result = null;
+ HashEntry<K, V> e = getFirst(hash);
+ loop: while (e != null) {
+ if (e.hash == hash && key.equals(e.key)) {
+ V v = e.value;
+ if (v != null) {
+ result = v;
+ break loop;
+ } else {
+ result = readValueUnderLock(e); // recheck
+ break loop;
+ }
+ }
+ e = e.next;
+ }
+ // a hit
+ if (result != null) {
+ if (eviction.onEntryHit(e)) {
+ Set<HashEntry<K, V>> evicted = attemptEviction(false);
+ // piggyback listener invocation on callers thread outside lock
+ if (evicted != null) {
+ for (HashEntry<K, V> he : evicted) {
+ evictionListener.onEntryEviction(he.key, he.value);
+ }
+ }
+ }
+ }
+ return result;
+ }
+ return null;
+ }
+
+ private Set<HashEntry<K, V>> attemptEviction(boolean lockedAlready) {
+ Set<HashEntry<K, V>> evicted = null;
+ boolean obtainedLock = !lockedAlready ? tryLock() : true;
+ if (!obtainedLock && eviction.thresholdExpired()) {
lock();
+ obtainedLock = true;
+ }
+ if (obtainedLock) {
try {
- return e.value;
+ evicted = eviction.execute();
} finally {
- unlock();
+ if (!lockedAlready) {
+ unlock();
+ }
}
- }
+ }
+ return evicted;
+ }
- /* Specialized implementations of map methods */
-
- V get(Object key, int hash) {
- int c = count;
- if (c != 0) { // read-volatile
- V result = null;
- HashEntry<K, V> e = getFirst(hash);
- loop: while (e != null) {
- if (e.hash == hash && key.equals(e.key)) {
- V v = e.value;
- if (v != null) {
- result = v;
- break loop;
- } else {
- result = readValueUnderLock(e); // recheck
- break loop;
- }
- }
- e = e.next;
+ boolean containsKey(Object key, int hash) {
+ if (count != 0) { // read-volatile
+ HashEntry<K,V> e = getFirst(hash);
+ while (e != null) {
+ if (e.hash == hash && key.equals(e.key)) {
+ return true;
}
- // a hit
- if (result != null) {
- if (eviction.onEntryHit(e)) {
- Set<HashEntry<K, V>> evicted = attemptEviction(false);
- // piggyback listener invocation on callers thread outside lock
- if (evicted != null) {
- for (HashEntry<K, V> he : evicted) {
- evictionListener.onEntryEviction(he.key, he.value);
- }
- }
- }
- }
- return result;
- }
- return null;
- }
+ e = e.next;
+ }
+ }
+ return false;
+ }
- private Set<HashEntry<K, V>> attemptEviction(boolean lockedAlready) {
- Set<HashEntry<K, V>> evicted = null;
- boolean obtainedLock = !lockedAlready ? tryLock() : true;
- if (!obtainedLock && eviction.thresholdExpired()) {
- lock();
- obtainedLock = true;
- }
- if (obtainedLock) {
- try {
- evicted = eviction.execute();
- } finally {
- if (!lockedAlready) {
- unlock();
+ boolean containsValue(Object value) {
+ if (count != 0) { // read-volatile
+ HashEntry<K,V>[] tab = table;
+ int len = tab.length;
+ for (int i = 0 ; i < len; i++) {
+ for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
+ V v = e.value;
+ if (v == null) {
+ v = readValueUnderLock(e);
}
- }
- }
- return evicted;
- }
-
- boolean containsKey(Object key, int hash) {
- if (count != 0) { // read-volatile
- HashEntry<K,V> e = getFirst(hash);
- while (e != null) {
- if (e.hash == hash && key.equals(e.key)) {
+ if (value.equals(v)) {
return true;
}
- e = e.next;
- }
+ }
}
- return false;
- }
+ }
+ return false;
+ }
- boolean containsValue(Object value) {
- if (count != 0) { // read-volatile
- HashEntry<K,V>[] tab = table;
- int len = tab.length;
- for (int i = 0 ; i < len; i++) {
- for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
- V v = e.value;
- if (v == null) {
- v = readValueUnderLock(e);
- }
- if (value.equals(v)) {
- return true;
- }
- }
- }
+ boolean replace(K key, int hash, V oldValue, V newValue) {
+ lock();
+ Set<HashEntry<K, V>> evicted = null;
+ try {
+ HashEntry<K, V> e = getFirst(hash);
+ while (e != null && (e.hash != hash || !key.equals(e.key))) {
+ e = e.next;
}
- return false;
- }
- boolean replace(K key, int hash, V oldValue, V newValue) {
- lock();
- Set<HashEntry<K, V>> evicted = null;
- try {
- HashEntry<K, V> e = getFirst(hash);
- while (e != null && (e.hash != hash || !key.equals(e.key))) {
- e = e.next;
+ boolean replaced = false;
+ if (e != null && oldValue.equals(e.value)) {
+ replaced = true;
+ e.value = newValue;
+ if (eviction.onEntryHit(e)) {
+ evicted = attemptEviction(true);
}
-
- boolean replaced = false;
- if (e != null && oldValue.equals(e.value)) {
- replaced = true;
- e.value = newValue;
- if (eviction.onEntryHit(e)) {
- evicted = attemptEviction(true);
- }
+ }
+ return replaced;
+ } finally {
+ unlock();
+ // piggyback listener invocation on callers thread outside lock
+ if (evicted != null) {
+ for (HashEntry<K, V> he : evicted) {
+ evictionListener.onEntryEviction(he.key, he.value);
}
- return replaced;
- } finally {
- unlock();
- // piggyback listener invocation on callers thread outside lock
- if (evicted != null) {
- for (HashEntry<K, V> he : evicted) {
- evictionListener.onEntryEviction(he.key, he.value);
- }
- }
- }
- }
+ }
+ }
+ }
- V replace(K key, int hash, V newValue) {
- lock();
- Set<HashEntry<K, V>> evicted = null;
- try {
- HashEntry<K, V> e = getFirst(hash);
- while (e != null && (e.hash != hash || !key.equals(e.key))) {
- e = e.next;
- }
+ V replace(K key, int hash, V newValue) {
+ lock();
+ Set<HashEntry<K, V>> evicted = null;
+ try {
+ HashEntry<K, V> e = getFirst(hash);
+ while (e != null && (e.hash != hash || !key.equals(e.key))) {
+ e = e.next;
+ }
- V oldValue = null;
- if (e != null) {
- oldValue = e.value;
- e.value = newValue;
- if (eviction.onEntryHit(e)) {
- evicted = attemptEviction(true);
- }
+ V oldValue = null;
+ if (e != null) {
+ oldValue = e.value;
+ e.value = newValue;
+ if (eviction.onEntryHit(e)) {
+ evicted = attemptEviction(true);
}
- return oldValue;
- } finally {
- unlock();
- // piggyback listener invocation on callers thread outside lock
- if(evicted != null) {
- for (HashEntry<K, V> he : evicted) {
- evictionListener.onEntryEviction(he.key, he.value);
- }
+ }
+ return oldValue;
+ } finally {
+ unlock();
+ // piggyback listener invocation on callers thread outside lock
+ if(evicted != null) {
+ for (HashEntry<K, V> he : evicted) {
+ evictionListener.onEntryEviction(he.key, he.value);
}
- }
- }
+ }
+ }
+ }
-
- V put(K key, int hash, V value, boolean onlyIfAbsent) {
- lock();
- Set<HashEntry<K, V>> evicted = null;
- try {
- int c = count;
- if (c++ > threshold && eviction.strategy() == Eviction.NONE) {
+ V put(K key, int hash, V value, boolean onlyIfAbsent) {
+ lock();
+ Set<HashEntry<K, V>> evicted = null;
+ try {
+ int c = count;
+ if (c++ > threshold && eviction.strategy() == Eviction.NONE) {
rehash();
}
- HashEntry<K, V>[] tab = table;
- int index = hash & tab.length - 1;
- HashEntry<K, V> first = tab[index];
- HashEntry<K, V> e = first;
- while (e != null && (e.hash != hash || !key.equals(e.key))) {
+ HashEntry<K, V>[] tab = table;
+ int index = hash & tab.length - 1;
+ HashEntry<K, V> first = tab[index];
+ HashEntry<K, V> e = first;
+ while (e != null && (e.hash != hash || !key.equals(e.key))) {
e = e.next;
}
- V oldValue;
- if (e != null) {
- oldValue = e.value;
- if (!onlyIfAbsent) {
- e.value = value;
- eviction.onEntryHit(e);
+ V oldValue;
+ if (e != null) {
+ oldValue = e.value;
+ if (!onlyIfAbsent) {
+ e.value = value;
+ eviction.onEntryHit(e);
+ }
+ } else {
+ oldValue = null;
+ ++modCount;
+ count = c; // write-volatile
+ if (eviction.strategy() != Eviction.NONE) {
+ if (c > tab.length) {
+ // remove entries;lower count
+ evicted = eviction.execute();
+ // re-read first
+ first = tab[index];
}
- } else {
- oldValue = null;
- ++modCount;
- count = c; // write-volatile
- if (eviction.strategy() != Eviction.NONE) {
- if (c > tab.length) {
- // remove entries;lower count
- evicted = eviction.execute();
- // re-read first
- first = tab[index];
- }
- // add a new entry
- tab[index] = new HashEntry<K, V>(key, hash, first, value);
- // notify a miss
- Set<HashEntry<K, V>> newlyEvicted = eviction.onEntryMiss(tab[index]);
- if (!newlyEvicted.isEmpty()) {
- if (evicted != null) {
- evicted.addAll(newlyEvicted);
- } else {
- evicted = newlyEvicted;
- }
- }
- } else {
- tab[index] = new HashEntry<K, V>(key, hash, first, value);
+ // add a new entry
+ tab[index] = new HashEntry<K, V>(key, hash, first, value);
+ // notify a miss
+ Set<HashEntry<K, V>> newlyEvicted = eviction.onEntryMiss(tab[index]);
+ if (!newlyEvicted.isEmpty()) {
+ if (evicted != null) {
+ evicted.addAll(newlyEvicted);
+ } else {
+ evicted = newlyEvicted;
+ }
}
- }
- return oldValue;
- } finally {
- unlock();
- // piggyback listener invocation on callers thread outside lock
- if(evicted != null) {
- for (HashEntry<K, V> he : evicted) {
- evictionListener.onEntryEviction(he.key, he.value);
- }
- }
- }
+ } else {
+ tab[index] = new HashEntry<K, V>(key, hash, first, value);
+ }
+ }
+ return oldValue;
+ } finally {
+ unlock();
+ // piggyback listener invocation on callers thread outside lock
+ if(evicted != null) {
+ for (HashEntry<K, V> he : evicted) {
+ evictionListener.onEntryEviction(he.key, he.value);
+ }
+ }
+ }
}
+ void rehash() {
+ HashEntry<K,V>[] oldTable = table;
+ int oldCapacity = oldTable.length;
+ if (oldCapacity >= MAXIMUM_CAPACITY) {
+ return;
+ }
- void rehash() {
- HashEntry<K,V>[] oldTable = table;
- int oldCapacity = oldTable.length;
- if (oldCapacity >= MAXIMUM_CAPACITY) {
- return;
- }
+ /*
+ * Reclassify nodes in each list to new Map. Because we are
+ * using power-of-two expansion, the elements from each bin
+ * must either stay at same index, or move with a power of two
+ * offset. We eliminate unnecessary node creation by catching
+ * cases where old nodes can be reused because their next
+ * fields won't change. Statistically, at the default
+ * threshold, only about one-sixth of them need cloning when
+ * a table doubles. The nodes they replace will be garbage
+ * collectable as soon as they are no longer referenced by any
+ * reader thread that may be in the midst of traversing table
+ * right now.
+ */
- /*
- * Reclassify nodes in each list to new Map. Because we are
- * using power-of-two expansion, the elements from each bin
- * must either stay at same index, or move with a power of two
- * offset. We eliminate unnecessary node creation by catching
- * cases where old nodes can be reused because their next
- * fields won't change. Statistically, at the default
- * threshold, only about one-sixth of them need cloning when
- * a table doubles. The nodes they replace will be garbage
- * collectable as soon as they are no longer referenced by any
- * reader thread that may be in the midst of traversing table
- * right now.
- */
+ HashEntry<K,V>[] newTable = HashEntry.newArray(oldCapacity<<1);
+ threshold = (int)(newTable.length * loadFactor);
+ int sizeMask = newTable.length - 1;
+ for (int i = 0; i < oldCapacity ; i++) {
+ // We need to guarantee that any existing reads of old Map can
+ // proceed. So we cannot yet null out each bin.
+ HashEntry<K,V> e = oldTable[i];
- HashEntry<K,V>[] newTable = HashEntry.newArray(oldCapacity<<1);
- threshold = (int)(newTable.length * loadFactor);
- int sizeMask = newTable.length - 1;
- for (int i = 0; i < oldCapacity ; i++) {
- // We need to guarantee that any existing reads of old Map can
- // proceed. So we cannot yet null out each bin.
- HashEntry<K,V> e = oldTable[i];
+ if (e != null) {
+ HashEntry<K,V> next = e.next;
+ int idx = e.hash & sizeMask;
- if (e != null) {
- HashEntry<K,V> next = e.next;
- int idx = e.hash & sizeMask;
+ // Single node on list
+ if (next == null) {
+ newTable[idx] = e;
+ } else {
+ // Reuse trailing consecutive sequence at same slot
+ HashEntry<K,V> lastRun = e;
+ int lastIdx = idx;
+ for (HashEntry<K,V> last = next;
+ last != null;
+ last = last.next) {
+ int k = last.hash & sizeMask;
+ if (k != lastIdx) {
+ lastIdx = k;
+ lastRun = last;
+ }
+ }
+ newTable[lastIdx] = lastRun;
- // Single node on list
- if (next == null) {
- newTable[idx] = e;
- } else {
- // Reuse trailing consecutive sequence at same slot
- HashEntry<K,V> lastRun = e;
- int lastIdx = idx;
- for (HashEntry<K,V> last = next;
- last != null;
- last = last.next) {
- int k = last.hash & sizeMask;
- if (k != lastIdx) {
- lastIdx = k;
- lastRun = last;
- }
- }
- newTable[lastIdx] = lastRun;
+ // Clone all remaining nodes
+ for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
+ int k = p.hash & sizeMask;
+ HashEntry<K,V> n = newTable[k];
+ newTable[k] = new HashEntry<K,V>(p.key, p.hash,
+ n, p.value);
+ }
+ }
+ }
+ }
+ table = newTable;
+ }
- // Clone all remaining nodes
- for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
- int k = p.hash & sizeMask;
- HashEntry<K,V> n = newTable[k];
- newTable[k] = new HashEntry<K,V>(p.key, p.hash,
- n, p.value);
- }
- }
- }
+ /**
+ * Remove; match on key only if value null, else match both.
+ */
+ V remove(Object key, int hash, Object value) {
+ lock();
+ try {
+ int c = count - 1;
+ HashEntry<K, V>[] tab = table;
+ int index = hash & tab.length - 1;
+ HashEntry<K, V> first = tab[index];
+ HashEntry<K, V> e = first;
+ while (e != null && (e.hash != hash || !key.equals(e.key))) {
+ e = e.next;
}
- table = newTable;
- }
- /**
- * Remove; match on key only if value null, else match both.
- */
- V remove(Object key, int hash, Object value) {
- lock();
- try {
- int c = count - 1;
- HashEntry<K, V>[] tab = table;
- int index = hash & tab.length - 1;
- HashEntry<K, V> first = tab[index];
- HashEntry<K, V> e = first;
- while (e != null && (e.hash != hash || !key.equals(e.key))) {
- e = e.next;
- }
+ V oldValue = null;
+ if (e != null) {
+ V v = e.value;
+ if (value == null || value.equals(v)) {
+ oldValue = v;
+ // All entries following removed node can stay
+ // in list, but all preceding ones need to be
+ // cloned.
+ ++modCount;
- V oldValue = null;
- if (e != null) {
- V v = e.value;
- if (value == null || value.equals(v)) {
- oldValue = v;
- // All entries following removed node can stay
- // in list, but all preceding ones need to be
- // cloned.
- ++modCount;
+ // e was removed
+ eviction.onEntryRemove(e);
- // e was removed
- eviction.onEntryRemove(e);
+ HashEntry<K, V> newFirst = e.next;
+ for (HashEntry<K, V> p = first; p != e; p = p.next) {
+ // allow p to be GC-ed
+ eviction.onEntryRemove(p);
+ newFirst = new HashEntry<K, V>(p.key, p.hash, newFirst, p.value);
+ // and notify eviction algorithm about new hash entries
+ eviction.onEntryMiss(newFirst);
+ }
- HashEntry<K, V> newFirst = e.next;
- for (HashEntry<K, V> p = first; p != e; p = p.next) {
- // allow p to be GC-ed
- eviction.onEntryRemove(p);
- newFirst = new HashEntry<K, V>(p.key, p.hash, newFirst, p.value);
- // and notify eviction algorithm about new hash entries
- eviction.onEntryMiss(newFirst);
- }
+ tab[index] = newFirst;
+ count = c; // write-volatile
+ }
+ }
+ return oldValue;
+ } finally {
+ unlock();
+ }
+ }
- tab[index] = newFirst;
- count = c; // write-volatile
- }
+ void clear() {
+ if (count != 0) {
+ lock();
+ try {
+ HashEntry<K, V>[] tab = table;
+ for (int i = 0; i < tab.length; i++) {
+ tab[i] = null;
}
- return oldValue;
- } finally {
+ ++modCount;
+ eviction.clear();
+ count = 0; // write-volatile
+ } finally {
unlock();
- }
- }
+ }
+ }
+ }
+ }
- void clear() {
- if (count != 0) {
- lock();
- try {
- HashEntry<K, V>[] tab = table;
- for (int i = 0; i < tab.length; i++) {
- tab[i] = null;
- }
- ++modCount;
- eviction.clear();
- count = 0; // write-volatile
- } finally {
- unlock();
- }
- }
- }
- }
+ /* ---------------- Public operations -------------- */
- /* ---------------- Public operations -------------- */
-
-
- /**
- * Creates a new, empty map with the specified maximum capacity, load factor and concurrency
- * level.
- *
- * @param capacity
- * is the upper bound capacity for the number of elements in this map
- *
- * @param concurrencyLevel
- * the estimated number of concurrently updating threads. The implementation performs
- * internal sizing to try to accommodate this many threads.
- *
- * @param evictionStrategy
- * the algorithm used to evict elements from this map
- *
- * @param evictionListener
- * the evicton listener callback to be notified about evicted elements
- *
- * @throws IllegalArgumentException
- * if the initial capacity is negative or the load factor or concurrencyLevel are
- * nonpositive.
- */
- public BoundedConcurrentHashMap(int capacity, int concurrencyLevel,
- Eviction evictionStrategy, EvictionListener<K, V> evictionListener) {
- if (capacity < 0 || concurrencyLevel <= 0) {
+ /**
+ * Creates a new, empty map with the specified maximum capacity, load factor and concurrency
+ * level.
+ *
+ * @param capacity
+ * is the upper bound capacity for the number of elements in this map
+ *
+ * @param concurrencyLevel
+ * the estimated number of concurrently updating threads. The implementation performs
+ * internal sizing to try to accommodate this many threads.
+ *
+ * @param evictionStrategy
+ * the algorithm used to evict elements from this map
+ *
+ * @param evictionListener
+ * the evicton listener callback to be notified about evicted elements
+ *
+ * @throws IllegalArgumentException
+ * if the initial capacity is negative or the load factor or concurrencyLevel are
+ * nonpositive.
+ */
+ public BoundedConcurrentHashMap(int capacity, int concurrencyLevel,
+ Eviction evictionStrategy, EvictionListener<K, V> evictionListener) {
+ if (capacity < 0 || concurrencyLevel <= 0) {
throw new IllegalArgumentException();
}
- concurrencyLevel = Math.min(capacity / 2, concurrencyLevel); // concurrencyLevel cannot be > capacity/2
- concurrencyLevel = Math.max(concurrencyLevel, 1); // concurrencyLevel cannot be less than 1
+ concurrencyLevel = Math.min(capacity / 2, concurrencyLevel); // concurrencyLevel cannot be > capacity/2
+ concurrencyLevel = Math.max(concurrencyLevel, 1); // concurrencyLevel cannot be less than 1
- // minimum two elements per segment
- if (capacity < concurrencyLevel * 2 && capacity != 1) {
+ // minimum two elements per segment
+ if (capacity < concurrencyLevel * 2 && capacity != 1) {
throw new IllegalArgumentException("Maximum capacity has to be at least twice the concurrencyLevel");
}
- if (evictionStrategy == null || evictionListener == null) {
+ if (evictionStrategy == null || evictionListener == null) {
throw new IllegalArgumentException();
}
- if (concurrencyLevel > MAX_SEGMENTS) {
+ if (concurrencyLevel > MAX_SEGMENTS) {
concurrencyLevel = MAX_SEGMENTS;
}
- // Find power-of-two sizes best matching arguments
- int sshift = 0;
- int ssize = 1;
- while (ssize < concurrencyLevel) {
- ++sshift;
- ssize <<= 1;
- }
- segmentShift = 32 - sshift;
- segmentMask = ssize - 1;
- this.segments = Segment.newArray(ssize);
+ // Find power-of-two sizes best matching arguments
+ int sshift = 0;
+ int ssize = 1;
+ while (ssize < concurrencyLevel) {
+ ++sshift;
+ ssize <<= 1;
+ }
+ segmentShift = 32 - sshift;
+ segmentMask = ssize - 1;
+ this.segments = Segment.newArray(ssize);
- if (capacity > MAXIMUM_CAPACITY) {
+ if (capacity > MAXIMUM_CAPACITY) {
capacity = MAXIMUM_CAPACITY;
}
- int c = capacity / ssize;
- if (c * ssize < capacity) {
+ int c = capacity / ssize;
+ if (c * ssize < capacity) {
++c;
}
- int cap = 1;
- while (cap < c) {
+ int cap = 1;
+ while (cap < c) {
cap <<= 1;
}
- for (int i = 0; i < this.segments.length; ++i) {
- this.segments[i] = new Segment<K, V>(cap, DEFAULT_LOAD_FACTOR, evictionStrategy,
- evictionListener);
+ for (int i = 0; i < this.segments.length; ++i) {
+ this.segments[i] = new Segment<K, V>(cap, DEFAULT_LOAD_FACTOR, evictionStrategy, evictionListener);
}
- }
+ }
- /**
- * Creates a new, empty map with the specified maximum capacity, load factor, concurrency
- * level and LRU eviction policy.
- *
- * @param capacity
- * is the upper bound capacity for the number of elements in this map
- *
- * @param concurrencyLevel
- * the estimated number of concurrently updating threads. The implementation performs
- * internal sizing to try to accommodate this many threads.
- *
- * @throws IllegalArgumentException
- * if the initial capacity is negative or the load factor or concurrencyLevel are
- * nonpositive.
- */
- public BoundedConcurrentHashMap(int capacity, int concurrencyLevel) {
- this(capacity, concurrencyLevel, Eviction.LRU);
- }
+ /**
+ * Creates a new, empty map with the specified maximum capacity, load factor, concurrency
+ * level and LRU eviction policy.
+ *
+ * @param capacity
+ * is the upper bound capacity for the number of elements in this map
+ *
+ * @param concurrencyLevel
+ * the estimated number of concurrently updating threads. The implementation performs
+ * internal sizing to try to accommodate this many threads.
+ *
+ * @throws IllegalArgumentException
+ * if the initial capacity is negative or the load factor or concurrencyLevel are
+ * nonpositive.
+ */
+ public BoundedConcurrentHashMap(int capacity, int concurrencyLevel) {
+ this(capacity, concurrencyLevel, Eviction.LRU);
+ }
- /**
- * Creates a new, empty map with the specified maximum capacity, load factor, concurrency
- * level and eviction strategy.
- *
- * @param capacity
- * is the upper bound capacity for the number of elements in this map
- *
- * @param concurrencyLevel
- * the estimated number of concurrently updating threads. The implementation performs
- * internal sizing to try to accommodate this many threads.
- *
- * @param evictionStrategy
- * the algorithm used to evict elements from this map
- *
- * @throws IllegalArgumentException
- * if the initial capacity is negative or the load factor or concurrencyLevel are
- * nonpositive.
- */
- public BoundedConcurrentHashMap(int capacity, int concurrencyLevel, Eviction evictionStrategy) {
- this(capacity, concurrencyLevel, evictionStrategy, new NullEvictionListener<K, V>());
- }
+ /**
+ * Creates a new, empty map with the specified maximum capacity, load factor, concurrency
+ * level and eviction strategy.
+ *
+ * @param capacity
+ * is the upper bound capacity for the number of elements in this map
+ *
+ * @param concurrencyLevel
+ * the estimated number of concurrently updating threads. The implementation performs
+ * internal sizing to try to accommodate this many threads.
+ *
+ * @param evictionStrategy
+ * the algorithm used to evict elements from this map
+ *
+ * @throws IllegalArgumentException
+ * if the initial capacity is negative or the load factor or concurrencyLevel are
+ * nonpositive.
+ */
+ public BoundedConcurrentHashMap(int capacity, int concurrencyLevel, Eviction evictionStrategy) {
+ this(capacity, concurrencyLevel, evictionStrategy, new NullEvictionListener<K, V>());
+ }
- /**
- * Creates a new, empty map with the specified maximum capacity, default concurrency
- * level and LRU eviction policy.
- *
- * @param capacity
- * is the upper bound capacity for the number of elements in this map
- *
- *
- * @throws IllegalArgumentException if the initial capacity of
- * elements is negative or the load factor is nonpositive
- *
- * @since 1.6
- */
- public BoundedConcurrentHashMap(int capacity) {
- this(capacity, DEFAULT_CONCURRENCY_LEVEL);
- }
+ /**
+ * Creates a new, empty map with the specified maximum capacity, default concurrency
+ * level and LRU eviction policy.
+ *
+ * @param capacity
+ * is the upper bound capacity for the number of elements in this map
+ *
+ *
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative or the load factor is nonpositive
+ *
+ * @since 1.6
+ */
+ public BoundedConcurrentHashMap(int capacity) {
+ this(capacity, DEFAULT_CONCURRENCY_LEVEL);
+ }
- /**
- * Creates a new, empty map with the default maximum capacity
- */
- public BoundedConcurrentHashMap() {
- this(DEFAULT_MAXIMUM_CAPACITY, DEFAULT_CONCURRENCY_LEVEL);
- }
+ /**
+ * Creates a new, empty map with the default maximum capacity
+ */
+ public BoundedConcurrentHashMap() {
+ this(DEFAULT_MAXIMUM_CAPACITY, DEFAULT_CONCURRENCY_LEVEL);
+ }
- /**
- * Returns <tt>true</tt> if this map contains no key-value mappings.
- *
- * @return <tt>true</tt> if this map contains no key-value mappings
- */
- @Override
+ /**
+ * Returns <tt>true</tt> if this map contains no key-value mappings.
+ *
+ * @return <tt>true</tt> if this map contains no key-value mappings
+ */
+ @Override
public boolean isEmpty() {
- final Segment<K,V>[] segments = this.segments;
- /*
- * We keep track of per-segment modCounts to avoid ABA
- * problems in which an element in one segment was added and
- * in another removed during traversal, in which case the
- * table was never actually empty at any point. Note the
- * similar use of modCounts in the size() and containsValue()
- * methods, which are the only other methods also susceptible
- * to ABA problems.
- */
- int[] mc = new int[segments.length];
- int mcsum = 0;
- for (int i = 0; i < segments.length; ++i) {
- if (segments[i].count != 0) {
+ final Segment<K,V>[] segments = this.segments;
+ /*
+ * We keep track of per-segment modCounts to avoid ABA
+ * problems in which an element in one segment was added and
+ * in another removed during traversal, in which case the
+ * table was never actually empty at any point. Note the
+ * similar use of modCounts in the size() and containsValue()
+ * methods, which are the only other methods also susceptible
+ * to ABA problems.
+ */
+ int[] mc = new int[segments.length];
+ int mcsum = 0;
+ for (int i = 0; i < segments.length; ++i) {
+ if (segments[i].count != 0) {
+ return false;
+ } else {
+ mcsum += mc[i] = segments[i].modCount;
+ }
+ }
+ // If mcsum happens to be zero, then we know we got a snapshot
+ // before any modifications at all were made. This is
+ // probably common enough to bother tracking.
+ if (mcsum != 0) {
+ for (int i = 0; i < segments.length; ++i) {
+ if (segments[i].count != 0 || mc[i] != segments[i].modCount) {
return false;
- } else {
- mcsum += mc[i] = segments[i].modCount;
}
- }
- // If mcsum happens to be zero, then we know we got a snapshot
- // before any modifications at all were made. This is
- // probably common enough to bother tracking.
- if (mcsum != 0) {
- for (int i = 0; i < segments.length; ++i) {
- if (segments[i].count != 0 ||
- mc[i] != segments[i].modCount) {
- return false;
- }
- }
- }
- return true;
- }
+ }
+ }
+ return true;
+ }
- /**
- * Returns the number of key-value mappings in this map. If the
- * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
- * <tt>Integer.MAX_VALUE</tt>.
- *
- * @return the number of key-value mappings in this map
- */
- @Override
+ /**
+ * Returns the number of key-value mappings in this map. If the
+ * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
+ * <tt>Integer.MAX_VALUE</tt>.
+ *
+ * @return the number of key-value mappings in this map
+ */
+ @Override
public int size() {
- final Segment<K,V>[] segments = this.segments;
- long sum = 0;
- long check = 0;
- int[] mc = new int[segments.length];
- // Try a few times to get accurate count. On failure due to
- // continuous async changes in table, resort to locking.
- for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
- check = 0;
- sum = 0;
- int mcsum = 0;
- for (int i = 0; i < segments.length; ++i) {
- sum += segments[i].count;
- mcsum += mc[i] = segments[i].modCount;
+ final Segment<K,V>[] segments = this.segments;
+ long sum = 0;
+ long check = 0;
+ int[] mc = new int[segments.length];
+ // Try a few times to get accurate count. On failure due to
+ // continuous async changes in table, resort to locking.
+ for (int k = 0; k < RETRIES_BEFORE_LOCK; ++ k) {
+ check = 0;
+ sum = 0;
+ int mcsum = 0;
+ for (int i = 0; i < segments.length; ++ i) {
+ sum += segments[i].count;
+ mcsum += mc[i] = segments[i].modCount;
+ }
+ if (mcsum != 0) {
+ for (int i = 0; i < segments.length; ++ i) {
+ check += segments[i].count;
+ if (mc[i] != segments[i].modCount) {
+ check = -1; // force retry
+ break;
+ }
}
- if (mcsum != 0) {
- for (int i = 0; i < segments.length; ++i) {
- check += segments[i].count;
- if (mc[i] != segments[i].modCount) {
- check = -1; // force retry
- break;
- }
- }
- }
- if (check == sum) {
- break;
- }
- }
- if (check != sum) { // Resort to locking all segments
- sum = 0;
- for (int i = 0; i < segments.length; ++i) {
- segments[i].lock();
- }
- for (int i = 0; i < segments.length; ++i) {
- sum += segments[i].count;
- }
- for (int i = 0; i < segments.length; ++i) {
- segments[i].unlock();
- }
- }
- if (sum > Integer.MAX_VALUE) {
+ }
+ if (check == sum) {
+ break;
+ }
+ }
+ if (check != sum) { // Resort to locking all segments
+ sum = 0;
+ for (int i = 0; i < segments.length; ++ i) {
+ segments[i].lock();
+ }
+ for (int i = 0; i < segments.length; ++ i) {
+ sum += segments[i].count;
+ }
+ for (int i = 0; i < segments.length; ++ i) {
+ segments[i].unlock();
+ }
+ }
+ if (sum > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
} else {
- return (int)sum;
+ return (int) sum;
}
- }
+ }
- /**
- * Returns the value to which the specified key is mapped,
- * or {@code null} if this map contains no mapping for the key.
- *
- * <p>More formally, if this map contains a mapping from a key
- * {@code k} to a value {@code v} such that {@code key.equals(k)},
- * then this method returns {@code v}; otherwise it returns
- * {@code null}. (There can be at most one such mapping.)
- *
- * @throws NullPointerException if the specified key is null
- */
- @Override
+ /**
+ * Returns the value to which the specified key is mapped,
+ * or {@code null} if this map contains no mapping for the key.
+ *
+ * <p>More formally, if this map contains a mapping from a key
+ * {@code k} to a value {@code v} such that {@code key.equals(k)},
+ * then this method returns {@code v}; otherwise it returns
+ * {@code null}. (There can be at most one such mapping.)
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
public V get(Object key) {
- int hash = hash(key.hashCode());
- return segmentFor(hash).get(key, hash);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).get(key, hash);
+ }
- /**
- * Tests if the specified object is a key in this table.
- *
- * @param key possible key
- * @return <tt>true</tt> if and only if the specified object
- * is a key in this table, as determined by the
- * <tt>equals</tt> method; <tt>false</tt> otherwise.
- * @throws NullPointerException if the specified key is null
- */
- @Override
+ /**
+ * Tests if the specified object is a key in this table.
+ *
+ * @param key possible key
+ * @return <tt>true</tt> if and only if the specified object
+ * is a key in this table, as determined by the
+ * <tt>equals</tt> method; <tt>false</tt> otherwise.
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
public boolean containsKey(Object key) {
- int hash = hash(key.hashCode());
- return segmentFor(hash).containsKey(key, hash);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).containsKey(key, hash);
+ }
- /**
- * Returns <tt>true</tt> if this map maps one or more keys to the
- * specified value. Note: This method requires a full internal
- * traversal of the hash table, and so is much slower than
- * method <tt>containsKey</tt>.
- *
- * @param value value whose presence in this map is to be tested
- * @return <tt>true</tt> if this map maps one or more keys to the
- * specified value
- * @throws NullPointerException if the specified value is null
- */
- @Override
+ /**
+ * Returns <tt>true</tt> if this map maps one or more keys to the
+ * specified value. Note: This method requires a full internal
+ * traversal of the hash table, and so is much slower than
+ * method <tt>containsKey</tt>.
+ *
+ * @param value value whose presence in this map is to be tested
+ * @return <tt>true</tt> if this map maps one or more keys to the
+ * specified value
+ * @throws NullPointerException if the specified value is null
+ */
+ @Override
public boolean containsValue(Object value) {
- if (value == null) {
+ if (value == null) {
throw new NullPointerException();
}
- // See explanation of modCount use above
+ // See explanation of modCount use above
- final Segment<K,V>[] segments = this.segments;
- int[] mc = new int[segments.length];
+ final Segment<K, V>[] segments = this.segments;
+ int[] mc = new int[segments.length];
- // Try a few times without locking
- for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
- int sum = 0;
- int mcsum = 0;
- for (int i = 0; i < segments.length; ++i) {
- int c = segments[i].count;
- mcsum += mc[i] = segments[i].modCount;
- if (segments[i].containsValue(value)) {
- return true;
+ // Try a few times without locking
+ for (int k = 0; k < RETRIES_BEFORE_LOCK; ++ k) {
+ int sum = 0;
+ int mcsum = 0;
+ for (int i = 0; i < segments.length; ++ i) {
+ int c = segments[i].count;
+ mcsum += mc[i] = segments[i].modCount;
+ if (segments[i].containsValue(value)) {
+ return true;
+ }
+ }
+ boolean cleanSweep = true;
+ if (mcsum != 0) {
+ for (int i = 0; i < segments.length; ++ i) {
+ int c = segments[i].count;
+ if (mc[i] != segments[i].modCount) {
+ cleanSweep = false;
+ break;
}
}
- boolean cleanSweep = true;
- if (mcsum != 0) {
- for (int i = 0; i < segments.length; ++i) {
- int c = segments[i].count;
- if (mc[i] != segments[i].modCount) {
- cleanSweep = false;
- break;
- }
- }
- }
- if (cleanSweep) {
- return false;
- }
- }
- // Resort to locking all segments
- for (int i = 0; i < segments.length; ++i) {
+ }
+ if (cleanSweep) {
+ return false;
+ }
+ }
+ // Resort to locking all segments
+ for (int i = 0; i < segments.length; ++ i) {
segments[i].lock();
}
- boolean found = false;
- try {
- for (int i = 0; i < segments.length; ++i) {
- if (segments[i].containsValue(value)) {
- found = true;
- break;
- }
+ boolean found = false;
+ try {
+ for (int i = 0; i < segments.length; ++ i) {
+ if (segments[i].containsValue(value)) {
+ found = true;
+ break;
}
- } finally {
- for (int i = 0; i < segments.length; ++i) {
- segments[i].unlock();
- }
- }
- return found;
- }
+ }
+ } finally {
+ for (int i = 0; i < segments.length; ++ i) {
+ segments[i].unlock();
+ }
+ }
+ return found;
+ }
- /**
- * Legacy method testing if some key maps into the specified value
- * in this table. This method is identical in functionality to
- * {@link #containsValue}, and exists solely to ensure
- * full compatibility with class {@link java.util.Hashtable},
- * which supported this method prior to introduction of the
- * Java Collections framework.
+ /**
+ * Legacy method testing if some key maps into the specified value
+ * in this table. This method is identical in functionality to
+ * {@link #containsValue}, and exists solely to ensure
+ * full compatibility with class {@link java.util.Hashtable},
+ * which supported this method prior to introduction of the
+ * Java Collections framework.
- * @param value a value to search for
- * @return <tt>true</tt> if and only if some key maps to the
- * <tt>value</tt> argument in this table as
- * determined by the <tt>equals</tt> method;
- * <tt>false</tt> otherwise
- * @throws NullPointerException if the specified value is null
- */
- public boolean contains(Object value) {
- return containsValue(value);
- }
+ * @param value a value to search for
+ * @return <tt>true</tt> if and only if some key maps to the
+ * <tt>value</tt> argument in this table as
+ * determined by the <tt>equals</tt> method;
+ * <tt>false</tt> otherwise
+ * @throws NullPointerException if the specified value is null
+ */
+ public boolean contains(Object value) {
+ return containsValue(value);
+ }
- /**
- * Maps the specified key to the specified value in this table.
- * Neither the key nor the value can be null.
- *
- * <p> The value can be retrieved by calling the <tt>get</tt> method
- * with a key that is equal to the original key.
- *
- * @param key key with which the specified value is to be associated
- * @param value value to be associated with the specified key
- * @return the previous value associated with <tt>key</tt>, or
- * <tt>null</tt> if there was no mapping for <tt>key</tt>
- * @throws NullPointerException if the specified key or value is null
- */
- @Override
+ /**
+ * Maps the specified key to the specified value in this table.
+ * Neither the key nor the value can be null.
+ *
+ * <p> The value can be retrieved by calling the <tt>get</tt> method
+ * with a key that is equal to the original key.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value value to be associated with the specified key
+ * @return the previous value associated with <tt>key</tt>, or
+ * <tt>null</tt> if there was no mapping for <tt>key</tt>
+ * @throws NullPointerException if the specified key or value is null
+ */
+ @Override
public V put(K key, V value) {
- if (value == null) {
+ if (value == null) {
throw new NullPointerException();
}
- int hash = hash(key.hashCode());
- return segmentFor(hash).put(key, hash, value, false);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).put(key, hash, value, false);
+ }
- /**
- * {@inheritDoc}
- *
- * @return the previous value associated with the specified key,
- * or <tt>null</tt> if there was no mapping for the key
- * @throws NullPointerException if the specified key or value is null
- */
- public V putIfAbsent(K key, V value) {
- if (value == null) {
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key,
+ * or <tt>null</tt> if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ @Override
+ public V putIfAbsent(K key, V value) {
+ if (value == null) {
throw new NullPointerException();
}
- int hash = hash(key.hashCode());
- return segmentFor(hash).put(key, hash, value, true);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).put(key, hash, value, true);
+ }
- /**
- * Copies all of the mappings from the specified map to this one.
- * These mappings replace any mappings that this map had for any of the
- * keys currently in the specified map.
- *
- * @param m mappings to be stored in this map
- */
- @Override
+ /**
+ * Copies all of the mappings from the specified map to this one.
+ * These mappings replace any mappings that this map had for any of the
+ * keys currently in the specified map.
+ *
+ * @param m mappings to be stored in this map
+ */
+ @Override
public void putAll(Map<? extends K, ? extends V> m) {
- for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
+ for (Map.Entry<? extends K, ? extends V> e: m.entrySet()) {
put(e.getKey(), e.getValue());
}
- }
+ }
- /**
- * Removes the key (and its corresponding value) from this map.
- * This method does nothing if the key is not in the map.
- *
- * @param key the key that needs to be removed
- * @return the previous value associated with <tt>key</tt>, or
- * <tt>null</tt> if there was no mapping for <tt>key</tt>
- * @throws NullPointerException if the specified key is null
- */
- @Override
+ /**
+ * Removes the key (and its corresponding value) from this map.
+ * This method does nothing if the key is not in the map.
+ *
+ * @param key the key that needs to be removed
+ * @return the previous value associated with <tt>key</tt>, or
+ * <tt>null</tt> if there was no mapping for <tt>key</tt>
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
public V remove(Object key) {
- int hash = hash(key.hashCode());
- return segmentFor(hash).remove(key, hash, null);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).remove(key, hash, null);
+ }
- /**
- * {@inheritDoc}
- *
- * @throws NullPointerException if the specified key is null
- */
- public boolean remove(Object key, Object value) {
- int hash = hash(key.hashCode());
- if (value == null) {
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
+ public boolean remove(Object key, Object value) {
+ int hash = hash(key.hashCode());
+ if (value == null) {
return false;
}
- return segmentFor(hash).remove(key, hash, value) != null;
- }
+ return segmentFor(hash).remove(key, hash, value) != null;
+ }
- /**
- * {@inheritDoc}
- *
- * @throws NullPointerException if any of the arguments are null
- */
- public boolean replace(K key, V oldValue, V newValue) {
- if (oldValue == null || newValue == null) {
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if any of the arguments are null
+ */
+ @Override
+ public boolean replace(K key, V oldValue, V newValue) {
+ if (oldValue == null || newValue == null) {
throw new NullPointerException();
}
- int hash = hash(key.hashCode());
- return segmentFor(hash).replace(key, hash, oldValue, newValue);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).replace(key, hash, oldValue, newValue);
+ }
- /**
- * {@inheritDoc}
- *
- * @return the previous value associated with the specified key,
- * or <tt>null</tt> if there was no mapping for the key
- * @throws NullPointerException if the specified key or value is null
- */
- public V replace(K key, V value) {
- if (value == null) {
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key,
+ * or <tt>null</tt> if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ @Override
+ public V replace(K key, V value) {
+ if (value == null) {
throw new NullPointerException();
}
- int hash = hash(key.hashCode());
- return segmentFor(hash).replace(key, hash, value);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).replace(key, hash, value);
+ }
- /**
- * Removes all of the mappings from this map.
- */
- @Override
+ /**
+ * Removes all of the mappings from this map.
+ */
+ @Override
public void clear() {
- for (int i = 0; i < segments.length; ++i) {
+ for (int i = 0; i < segments.length; ++ i) {
segments[i].clear();
}
- }
+ }
- /**
- * Returns a {@link Set} view of the keys contained in this map.
- * The set is backed by the map, so changes to the map are
- * reflected in the set, and vice-versa. The set supports element
- * removal, which removes the corresponding mapping from this map,
- * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
- * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
- * operations. It does not support the <tt>add</tt> or
- * <tt>addAll</tt> operations.
- *
- * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
- * that will never throw {@link ConcurrentModificationException},
- * and guarantees to traverse elements as they existed upon
- * construction of the iterator, and may (but is not guaranteed to)
- * reflect any modifications subsequent to construction.
- */
- @Override
+ /**
+ * Returns a {@link Set} view of the keys contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. The set supports element
+ * removal, which removes the corresponding mapping from this map,
+ * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
+ * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
+ * operations. It does not support the <tt>add</tt> or
+ * <tt>addAll</tt> operations.
+ *
+ * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ */
+ @Override
public Set<K> keySet() {
- Set<K> ks = keySet;
- return ks != null ? ks : (keySet = new KeySet());
- }
+ Set<K> ks = keySet;
+ return ks != null? ks : (keySet = new KeySet());
+ }
- /**
- * Returns a {@link Collection} view of the values contained in this map.
- * The collection is backed by the map, so changes to the map are
- * reflected in the collection, and vice-versa. The collection
- * supports element removal, which removes the corresponding
- * mapping from this map, via the <tt>Iterator.remove</tt>,
- * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
- * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not
- * support the <tt>add</tt> or <tt>addAll</tt> operations.
- *
- * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
- * that will never throw {@link ConcurrentModificationException},
- * and guarantees to traverse elements as they existed upon
- * construction of the iterator, and may (but is not guaranteed to)
- * reflect any modifications subsequent to construction.
- */
- @Override
+ /**
+ * Returns a {@link Collection} view of the values contained in this map.
+ * The collection is backed by the map, so changes to the map are
+ * reflected in the collection, and vice-versa. The collection
+ * supports element removal, which removes the corresponding
+ * mapping from this map, via the <tt>Iterator.remove</tt>,
+ * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
+ * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not
+ * support the <tt>add</tt> or <tt>addAll</tt> operations.
+ *
+ * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ */
+ @Override
public Collection<V> values() {
- Collection<V> vs = values;
- return vs != null ? vs : (values = new Values());
- }
+ Collection<V> vs = values;
+ return vs != null? vs : (values = new Values());
+ }
- /**
- * Returns a {@link Set} view of the mappings contained in this map.
- * The set is backed by the map, so changes to the map are
- * reflected in the set, and vice-versa. The set supports element
- * removal, which removes the corresponding mapping from the map,
- * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
- * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
- * operations. It does not support the <tt>add</tt> or
- * <tt>addAll</tt> operations.
- *
- * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
- * that will never throw {@link ConcurrentModificationException},
- * and guarantees to traverse elements as they existed upon
- * construction of the iterator, and may (but is not guaranteed to)
- * reflect any modifications subsequent to construction.
- */
- @Override
- public Set<Map.Entry<K,V>> entrySet() {
- Set<Map.Entry<K,V>> es = entrySet;
- return es != null ? es : (entrySet = new EntrySet());
- }
+ /**
+ * Returns a {@link Set} view of the mappings contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. The set supports element
+ * removal, which removes the corresponding mapping from the map,
+ * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
+ * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
+ * operations. It does not support the <tt>add</tt> or
+ * <tt>addAll</tt> operations.
+ *
+ * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ */
+ @Override
+ public Set<Map.Entry<K, V>> entrySet() {
+ Set<Map.Entry<K, V>> es = entrySet;
+ return es != null? es : (entrySet = new EntrySet());
+ }
- /**
- * Returns an enumeration of the keys in this table.
- *
- * @return an enumeration of the keys in this table
- * @see #keySet()
- */
- public Enumeration<K> keys() {
- return new KeyIterator();
- }
+ /**
+ * Returns an enumeration of the keys in this table.
+ *
+ * @return an enumeration of the keys in this table
+ * @see #keySet()
+ */
+ public Enumeration<K> keys() {
+ return new KeyIterator();
+ }
- /**
- * Returns an enumeration of the values in this table.
- *
- * @return an enumeration of the values in this table
- * @see #values()
- */
- public Enumeration<V> elements() {
- return new ValueIterator();
- }
+ /**
+ * Returns an enumeration of the values in this table.
+ *
+ * @return an enumeration of the values in this table
+ * @see #values()
+ */
+ public Enumeration<V> elements() {
+ return new ValueIterator();
+ }
- /* ---------------- Iterator Support -------------- */
+ /* ---------------- Iterator Support -------------- */
- abstract class HashIterator {
- int nextSegmentIndex;
- int nextTableIndex;
- HashEntry<K,V>[] currentTable;
- HashEntry<K, V> nextEntry;
- HashEntry<K, V> lastReturned;
+ abstract class HashIterator {
+ int nextSegmentIndex;
- HashIterator() {
- nextSegmentIndex = segments.length - 1;
- nextTableIndex = -1;
- advance();
- }
+ int nextTableIndex;
- public boolean hasMoreElements() { return hasNext(); }
+ HashEntry<K, V>[] currentTable;
- final void advance() {
- if (nextEntry != null && (nextEntry = nextEntry.next) != null) {
+ HashEntry<K, V> nextEntry;
+
+ HashEntry<K, V> lastReturned;
+
+ HashIterator() {
+ nextSegmentIndex = segments.length - 1;
+ nextTableIndex = -1;
+ advance();
+ }
+
+ public boolean hasMoreElements() {
+ return hasNext();
+ }
+
+ final void advance() {
+ if (nextEntry != null && (nextEntry = nextEntry.next) != null) {
+ return;
+ }
+
+ while (nextTableIndex >= 0) {
+ if ((nextEntry = currentTable[nextTableIndex --]) != null) {
return;
}
+ }
- while (nextTableIndex >= 0) {
- if ( (nextEntry = currentTable[nextTableIndex--]) != null) {
- return;
+ while (nextSegmentIndex >= 0) {
+ Segment<K, V> seg = segments[nextSegmentIndex --];
+ if (seg.count != 0) {
+ currentTable = seg.table;
+ for (int j = currentTable.length - 1; j >= 0; -- j) {
+ if ((nextEntry = currentTable[j]) != null) {
+ nextTableIndex = j - 1;
+ return;
+ }
}
}
+ }
+ }
- while (nextSegmentIndex >= 0) {
- Segment<K,V> seg = segments[nextSegmentIndex--];
- if (seg.count != 0) {
- currentTable = seg.table;
- for (int j = currentTable.length - 1; j >= 0; --j) {
- if ( (nextEntry = currentTable[j]) != null) {
- nextTableIndex = j - 1;
- return;
- }
- }
- }
- }
- }
+ public boolean hasNext() {
+ return nextEntry != null;
+ }
- public boolean hasNext() { return nextEntry != null; }
+ HashEntry<K, V> nextEntry() {
+ if (nextEntry == null) {
+ throw new NoSuchElementException();
+ }
+ lastReturned = nextEntry;
+ advance();
+ return lastReturned;
+ }
- HashEntry<K,V> nextEntry() {
- if (nextEntry == null) {
- throw new NoSuchElementException();
- }
- lastReturned = nextEntry;
- advance();
- return lastReturned;
- }
+ public void remove() {
+ if (lastReturned == null) {
+ throw new IllegalStateException();
+ }
+ BoundedConcurrentHashMap.this.remove(lastReturned.key);
+ lastReturned = null;
+ }
+ }
- public void remove() {
- if (lastReturned == null) {
- throw new IllegalStateException();
- }
- BoundedConcurrentHashMap.this.remove(lastReturned.key);
- lastReturned = null;
- }
- }
+ final class KeyIterator extends HashIterator implements Iterator<K>, Enumeration<K> {
+ @Override
+ public K next() {
+ return super.nextEntry().key;
+ }
- final class KeyIterator
- extends HashIterator
- implements Iterator<K>, Enumeration<K>
- {
- public K next() { return super.nextEntry().key; }
- public K nextElement() { return super.nextEntry().key; }
- }
+ @Override
+ public K nextElement() {
+ return super.nextEntry().key;
+ }
+ }
- final class ValueIterator
- extends HashIterator
- implements Iterator<V>, Enumeration<V>
- {
- public V next() { return super.nextEntry().value; }
- public V nextElement() { return super.nextEntry().value; }
- }
+ final class ValueIterator extends HashIterator implements Iterator<V>, Enumeration<V> {
+ @Override
+ public V next() {
+ return super.nextEntry().value;
+ }
- /**
- * Custom Entry class used by EntryIterator.next(), that relays
- * setValue changes to the underlying map.
- */
- final class WriteThroughEntry
- extends AbstractMap.SimpleEntry<K,V>
- {
- WriteThroughEntry(K k, V v) {
- super(k,v);
- }
+ @Override
+ public V nextElement() {
+ return super.nextEntry().value;
+ }
+ }
- /**
- * Set our entry's value and write through to the map. The
- * value to return is somewhat arbitrary here. Since a
- * WriteThroughEntry does not necessarily track asynchronous
- * changes, the most recent "previous" value could be
- * different from what we return (or could even have been
- * removed in which case the put will re-establish). We do not
- * and cannot guarantee more.
- */
- @Override
+ /**
+ * Custom Entry class used by EntryIterator.next(), that relays
+ * setValue changes to the underlying map.
+ */
+ final class WriteThroughEntry extends AbstractMap.SimpleEntry<K, V> {
+
+ private static final long serialVersionUID = -7041346694785573824L;
+
+ WriteThroughEntry(K k, V v) {
+ super(k, v);
+ }
+
+ /**
+ * Set our entry's value and write through to the map. The
+ * value to return is somewhat arbitrary here. Since a
+ * WriteThroughEntry does not necessarily track asynchronous
+ * changes, the most recent "previous" value could be
+ * different from what we return (or could even have been
+ * removed in which case the put will re-establish). We do not
+ * and cannot guarantee more.
+ */
+ @Override
public V setValue(V value) {
- if (value == null) {
- throw new NullPointerException();
- }
- V v = super.setValue(value);
- BoundedConcurrentHashMap.this.put(getKey(), value);
- return v;
- }
- }
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ V v = super.setValue(value);
+ BoundedConcurrentHashMap.this.put(getKey(), value);
+ return v;
+ }
+ }
- final class EntryIterator
- extends HashIterator
- implements Iterator<Entry<K,V>>
- {
- public Map.Entry<K,V> next() {
- HashEntry<K,V> e = super.nextEntry();
- return new WriteThroughEntry(e.key, e.value);
- }
- }
+ final class EntryIterator extends HashIterator implements Iterator<Entry<K, V>> {
+ @Override
+ public Map.Entry<K, V> next() {
+ HashEntry<K, V> e = super.nextEntry();
+ return new WriteThroughEntry(e.key, e.value);
+ }
+ }
- final class KeySet extends AbstractSet<K> {
- @Override
+ final class KeySet extends AbstractSet<K> {
+ @Override
public Iterator<K> iterator() {
- return new KeyIterator();
- }
- @Override
+ return new KeyIterator();
+ }
+
+ @Override
public int size() {
- return BoundedConcurrentHashMap.this.size();
- }
- @Override
+ return BoundedConcurrentHashMap.this.size();
+ }
+
+ @Override
public boolean isEmpty() {
- return BoundedConcurrentHashMap.this.isEmpty();
- }
- @Override
+ return BoundedConcurrentHashMap.this.isEmpty();
+ }
+
+ @Override
public boolean contains(Object o) {
- return BoundedConcurrentHashMap.this.containsKey(o);
- }
- @Override
+ return BoundedConcurrentHashMap.this.containsKey(o);
+ }
+
+ @Override
public boolean remove(Object o) {
- return BoundedConcurrentHashMap.this.remove(o) != null;
- }
- @Override
+ return BoundedConcurrentHashMap.this.remove(o) != null;
+ }
+
+ @Override
public void clear() {
- BoundedConcurrentHashMap.this.clear();
- }
- }
+ BoundedConcurrentHashMap.this.clear();
+ }
+ }
- final class Values extends AbstractCollection<V> {
- @Override
+ final class Values extends AbstractCollection<V> {
+ @Override
public Iterator<V> iterator() {
- return new ValueIterator();
- }
- @Override
+ return new ValueIterator();
+ }
+
+ @Override
public int size() {
- return BoundedConcurrentHashMap.this.size();
- }
- @Override
+ return BoundedConcurrentHashMap.this.size();
+ }
+
+ @Override
public boolean isEmpty() {
- return BoundedConcurrentHashMap.this.isEmpty();
- }
- @Override
+ return BoundedConcurrentHashMap.this.isEmpty();
+ }
+
+ @Override
public boolean contains(Object o) {
- return BoundedConcurrentHashMap.this.containsValue(o);
- }
- @Override
+ return BoundedConcurrentHashMap.this.containsValue(o);
+ }
+
+ @Override
public void clear() {
- BoundedConcurrentHashMap.this.clear();
- }
- }
+ BoundedConcurrentHashMap.this.clear();
+ }
+ }
- final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
- @Override
- public Iterator<Map.Entry<K,V>> iterator() {
- return new EntryIterator();
- }
- @Override
+ final class EntrySet extends AbstractSet<Map.Entry<K, V>> {
+ @Override
+ public Iterator<Map.Entry<K, V>> iterator() {
+ return new EntryIterator();
+ }
+
+ @Override
public boolean contains(Object o) {
- if (!(o instanceof Map.Entry)) {
- return false;
- }
- Map.Entry<?,?> e = (Map.Entry<?,?>)o;
- V v = BoundedConcurrentHashMap.this.get(e.getKey());
- return v != null && v.equals(e.getValue());
- }
- @Override
+ if (!(o instanceof Map.Entry)) {
+ return false;
+ }
+ Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
+ V v = BoundedConcurrentHashMap.this.get(e.getKey());
+ return v != null && v.equals(e.getValue());
+ }
+
+ @Override
public boolean remove(Object o) {
- if (!(o instanceof Map.Entry)) {
- return false;
- }
- Map.Entry<?,?> e = (Map.Entry<?,?>)o;
- return BoundedConcurrentHashMap.this.remove(e.getKey(), e.getValue());
- }
- @Override
+ if (!(o instanceof Map.Entry)) {
+ return false;
+ }
+ Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
+ return BoundedConcurrentHashMap.this.remove(e.getKey(), e.getValue());
+ }
+
+ @Override
public int size() {
- return BoundedConcurrentHashMap.this.size();
- }
- @Override
+ return BoundedConcurrentHashMap.this.size();
+ }
+
+ @Override
public boolean isEmpty() {
- return BoundedConcurrentHashMap.this.isEmpty();
- }
- @Override
+ return BoundedConcurrentHashMap.this.isEmpty();
+ }
+
+ @Override
public void clear() {
- BoundedConcurrentHashMap.this.clear();
- }
- }
+ BoundedConcurrentHashMap.this.clear();
+ }
+ }
- /* ---------------- Serialization Support -------------- */
+ /* ---------------- Serialization Support -------------- */
- /**
- * Save the state of the <tt>ConcurrentHashMap</tt> instance to a
- * stream (i.e., serialize it).
- * @param s the stream
- * @serialData
- * the key (Object) and value (Object)
- * for each key-value mapping, followed by a null pair.
- * The key-value mappings are emitted in no particular order.
- */
- private void writeObject(java.io.ObjectOutputStream s) throws IOException {
- s.defaultWriteObject();
+ /**
+ * Save the state of the <tt>ConcurrentHashMap</tt> instance to a
+ * stream (i.e., serialize it).
+ * @param s the stream
+ * @serialData
+ * the key (Object) and value (Object)
+ * for each key-value mapping, followed by a null pair.
+ * The key-value mappings are emitted in no particular order.
+ */
+ private void writeObject(java.io.ObjectOutputStream s) throws IOException {
+ s.defaultWriteObject();
- for (int k = 0; k < segments.length; ++k) {
- Segment<K,V> seg = segments[k];
- seg.lock();
- try {
- HashEntry<K,V>[] tab = seg.table;
- for (int i = 0; i < tab.length; ++i) {
- for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
- s.writeObject(e.key);
- s.writeObject(e.value);
- }
- }
- } finally {
- seg.unlock();
+ for (int k = 0; k < segments.length; ++ k) {
+ Segment<K, V> seg = segments[k];
+ seg.lock();
+ try {
+ HashEntry<K, V>[] tab = seg.table;
+ for (int i = 0; i < tab.length; ++ i) {
+ for (HashEntry<K, V> e = tab[i]; e != null; e = e.next) {
+ s.writeObject(e.key);
+ s.writeObject(e.value);
+ }
}
- }
- s.writeObject(null);
- s.writeObject(null);
- }
+ } finally {
+ seg.unlock();
+ }
+ }
+ s.writeObject(null);
+ s.writeObject(null);
+ }
- /**
- * Reconstitute the <tt>ConcurrentHashMap</tt> instance from a
- * stream (i.e., deserialize it).
- * @param s the stream
- */
- private void readObject(java.io.ObjectInputStream s)
- throws IOException, ClassNotFoundException {
- s.defaultReadObject();
+ /**
+ * Reconstitute the <tt>ConcurrentHashMap</tt> instance from a
+ * stream (i.e., deserialize it).
+ * @param s the stream
+ */
+ @SuppressWarnings("unchecked")
+ private void readObject(java.io.ObjectInputStream s) throws IOException,
+ ClassNotFoundException {
+ s.defaultReadObject();
- // Initialize each segment to be minimally sized, and let grow.
- for (int i = 0; i < segments.length; ++i) {
- segments[i].setTable(new HashEntry[1]);
- }
+ // Initialize each segment to be minimally sized, and let grow.
+ for (int i = 0; i < segments.length; ++ i) {
+ segments[i].setTable(new HashEntry[1]);
+ }
- // Read the keys and values, and put the mappings in the table
- for (;;) {
- K key = (K) s.readObject();
- V value = (V) s.readObject();
- if (key == null) {
- break;
- }
- put(key, value);
- }
- }
+ // Read the keys and values, and put the mappings in the table
+ for (;;) {
+ K key = (K) s.readObject();
+ V value = (V) s.readObject();
+ if (key == null) {
+ break;
+ }
+ put(key, value);
+ }
+ }
}
\ No newline at end of file
Modified: trunk/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java 2010-10-20 10:03:11 UTC (rev 2522)
+++ trunk/core/src/main/java/org/infinispan/util/concurrent/BoundedConcurrentHashMap.java 2010-10-20 10:34:58 UTC (rev 2523)
@@ -1,4 +1,26 @@
/*
+ * JBoss, Home of Professional Open Source
+ *
+ * Copyright ${year}, Red Hat, Inc. and individual contributors
+ * by the @authors tag. See the copyright.txt in the distribution
+ * for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/licenses/publicdomain
@@ -92,208 +114,207 @@
*/
public class BoundedConcurrentHashMap<K, V> extends AbstractMap<K, V>
implements ConcurrentMap<K, V>, Serializable {
- private static final long serialVersionUID = 7249069246763182397L;
+ private static final long serialVersionUID = 7249069246763182397L;
- /*
- * The basic strategy is to subdivide the table among Segments,
- * each of which itself is a concurrently readable hash table.
- */
+ /*
+ * The basic strategy is to subdivide the table among Segments,
+ * each of which itself is a concurrently readable hash table.
+ */
- /* ---------------- Constants -------------- */
+ /* ---------------- Constants -------------- */
- /**
- * The default initial capacity for this table,
- * used when not otherwise specified in a constructor.
- */
- static final int DEFAULT_MAXIMUM_CAPACITY = 512;
+ /**
+ * The default initial capacity for this table,
+ * used when not otherwise specified in a constructor.
+ */
+ static final int DEFAULT_MAXIMUM_CAPACITY = 512;
- /**
- * The default load factor for this table, used when not
- * otherwise specified in a constructor.
- */
- static final float DEFAULT_LOAD_FACTOR = 0.75f;
+ /**
+ * The default load factor for this table, used when not
+ * otherwise specified in a constructor.
+ */
+ static final float DEFAULT_LOAD_FACTOR = 0.75f;
- /**
- * The default concurrency level for this table, used when not
- * otherwise specified in a constructor.
- */
- static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+ /**
+ * The default concurrency level for this table, used when not
+ * otherwise specified in a constructor.
+ */
+ static final int DEFAULT_CONCURRENCY_LEVEL = 16;
- /**
- * The maximum capacity, used if a higher value is implicitly
- * specified by either of the constructors with arguments. MUST
- * be a power of two <= 1<<30 to ensure that entries are indexable
- * using ints.
- */
- static final int MAXIMUM_CAPACITY = 1 << 30;
+ /**
+ * The maximum capacity, used if a higher value is implicitly
+ * specified by either of the constructors with arguments. MUST
+ * be a power of two <= 1<<30 to ensure that entries are indexable
+ * using ints.
+ */
+ static final int MAXIMUM_CAPACITY = 1 << 30;
- /**
- * The maximum number of segments to allow; used to bound
- * constructor arguments.
- */
- static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
+ /**
+ * The maximum number of segments to allow; used to bound
+ * constructor arguments.
+ */
+ static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
- /**
- * Number of unsynchronized retries in size and containsValue
- * methods before resorting to locking. This is used to avoid
- * unbounded retries if tables undergo continuous modification
- * which would make it impossible to obtain an accurate result.
- */
- static final int RETRIES_BEFORE_LOCK = 2;
+ /**
+ * Number of unsynchronized retries in size and containsValue
+ * methods before resorting to locking. This is used to avoid
+ * unbounded retries if tables undergo continuous modification
+ * which would make it impossible to obtain an accurate result.
+ */
+ static final int RETRIES_BEFORE_LOCK = 2;
- /* ---------------- Fields -------------- */
+ /* ---------------- Fields -------------- */
- /**
- * Mask value for indexing into segments. The upper bits of a
- * key's hash code are used to choose the segment.
- */
- final int segmentMask;
+ /**
+ * Mask value for indexing into segments. The upper bits of a
+ * key's hash code are used to choose the segment.
+ */
+ final int segmentMask;
- /**
- * Shift value for indexing within segments.
- */
- final int segmentShift;
+ /**
+ * Shift value for indexing within segments.
+ */
+ final int segmentShift;
- /**
- * The segments, each of which is a specialized hash table
- */
- final Segment<K,V>[] segments;
+ /**
+ * The segments, each of which is a specialized hash table
+ */
+ final Segment<K,V>[] segments;
- transient Set<K> keySet;
- transient Set<Map.Entry<K,V>> entrySet;
- transient Collection<V> values;
+ transient Set<K> keySet;
+ transient Set<Map.Entry<K,V>> entrySet;
+ transient Collection<V> values;
- /* ---------------- Small Utilities -------------- */
+ /* ---------------- Small Utilities -------------- */
- /**
- * Applies a supplemental hash function to a given hashCode, which
- * defends against poor quality hash functions. This is critical
- * because ConcurrentHashMap uses power-of-two length hash tables,
- * that otherwise encounter collisions for hashCodes that do not
- * differ in lower or upper bits.
- */
- private static int hash(int h) {
- // Spread bits to regularize both segment and index locations,
- // using variant of single-word Wang/Jenkins hash.
- h += h << 15 ^ 0xffffcd7d;
- h ^= h >>> 10;
- h += h << 3;
- h ^= h >>> 6;
- h += (h << 2) + (h << 14);
- return h ^ h >>> 16;
- }
+ /**
+ * Applies a supplemental hash function to a given hashCode, which
+ * defends against poor quality hash functions. This is critical
+ * because ConcurrentHashMap uses power-of-two length hash tables,
+ * that otherwise encounter collisions for hashCodes that do not
+ * differ in lower or upper bits.
+ */
+ private static int hash(int h) {
+ // Spread bits to regularize both segment and index locations,
+ // using variant of single-word Wang/Jenkins hash.
+ h += h << 15 ^ 0xffffcd7d;
+ h ^= h >>> 10;
+ h += h << 3;
+ h ^= h >>> 6;
+ h += (h << 2) + (h << 14);
+ return h ^ h >>> 16;
+ }
- /**
- * Returns the segment that should be used for key with given hash
- * @param hash the hash code for the key
- * @return the segment
- */
- final Segment<K,V> segmentFor(int hash) {
- return segments[hash >>> segmentShift & segmentMask];
- }
+ /**
+ * Returns the segment that should be used for key with given hash
+ * @param hash the hash code for the key
+ * @return the segment
+ */
+ final Segment<K,V> segmentFor(int hash) {
+ return segments[hash >>> segmentShift & segmentMask];
+ }
- /* ---------------- Inner Classes -------------- */
+ /* ---------------- Inner Classes -------------- */
- /**
- * ConcurrentHashMap list entry. Note that this is never exported
- * out as a user-visible Map.Entry.
- *
- * Because the value field is volatile, not final, it is legal wrt
- * the Java Memory Model for an unsynchronized reader to see null
- * instead of initial value when read via a data race. Although a
- * reordering leading to this is not likely to ever actually
- * occur, the Segment.readValueUnderLock method is used as a
- * backup in case a null (pre-initialized) value is ever seen in
- * an unsynchronized access method.
- */
- static final class HashEntry<K, V> {
- final K key;
- final int hash;
- volatile V value;
- final HashEntry<K, V> next;
- volatile Recency state;
+ /**
+ * ConcurrentHashMap list entry. Note that this is never exported
+ * out as a user-visible Map.Entry.
+ *
+ * Because the value field is volatile, not final, it is legal wrt
+ * the Java Memory Model for an unsynchronized reader to see null
+ * instead of initial value when read via a data race. Although a
+ * reordering leading to this is not likely to ever actually
+ * occur, the Segment.readValueUnderLock method is used as a
+ * backup in case a null (pre-initialized) value is ever seen in
+ * an unsynchronized access method.
+ */
+ static final class HashEntry<K, V> {
+ final K key;
+ final int hash;
+ volatile V value;
+ final HashEntry<K, V> next;
+ volatile Recency state;
- HashEntry(K key, int hash, HashEntry<K, V> next, V value) {
- this.key = key;
- this.hash = hash;
- this.next = next;
- this.value = value;
- this.state = Recency.HIR_RESIDENT;
- }
+ HashEntry(K key, int hash, HashEntry<K, V> next, V value) {
+ this.key = key;
+ this.hash = hash;
+ this.next = next;
+ this.value = value;
+ this.state = Recency.HIR_RESIDENT;
+ }
- @Override
+ @Override
public int hashCode() {
- int result = 17;
- result = result * 31 + hash;
- result = result * 31 + key.hashCode();
- return result;
- }
+ int result = 17;
+ result = result * 31 + hash;
+ result = result * 31 + key.hashCode();
+ return result;
+ }
- @Override
+ @Override
public boolean equals(Object o) {
- // HashEntry is internal class, never leaks out of CHM, hence slight optimization
- if (this == o) {
+ // HashEntry is internal class, never leaks out of CHM, hence slight optimization
+ if (this == o) {
return true;
}
- if (o == null) {
+ if (o == null) {
return false;
}
- HashEntry<?, ?> other = (HashEntry<?, ?>) o;
- return hash == other.hash && key.equals(other.key);
- }
+ HashEntry<?, ?> other = (HashEntry<?, ?>) o;
+ return hash == other.hash && key.equals(other.key);
+ }
- public void transitionToLIRResident() {
- state = Recency.LIR_RESIDENT;
- }
+ public void transitionToLIRResident() {
+ state = Recency.LIR_RESIDENT;
+ }
- public void transitionHIRResidentToHIRNonResident() {
- state = Recency.HIR_NONRESIDENT;
- }
+ public void transitionHIRResidentToHIRNonResident() {
+ state = Recency.HIR_NONRESIDENT;
+ }
- public void transitionLIRResidentToHIRResident() {
- state = Recency.HIR_RESIDENT;
- }
+ public void transitionLIRResidentToHIRResident() {
+ state = Recency.HIR_RESIDENT;
+ }
- public Recency recency() {
- return state;
- }
+ public Recency recency() {
+ return state;
+ }
- @SuppressWarnings("unchecked")
- static <K, V> HashEntry<K, V>[] newArray(int i) {
- return new HashEntry[i];
- }
+ @SuppressWarnings("unchecked")
+ static <K, V> HashEntry<K, V>[] newArray(int i) {
+ return new HashEntry[i];
+ }
}
- private enum Recency {
- HIR_RESIDENT, LIR_RESIDENT, HIR_NONRESIDENT
+ private enum Recency {
+ HIR_RESIDENT, LIR_RESIDENT, HIR_NONRESIDENT
}
public enum Eviction {
- NONE {
- @Override
- public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
- return new NullEvictionPolicy<K, V>();
- }
- },
- LRU {
+ NONE {
+ @Override
+ public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
+ return new NullEvictionPolicy<K, V>();
+ }
+ },
+ LRU {
+ @Override
+ public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
+ return new LRU<K, V>(s,capacity,lf,capacity*10,lf);
+ }
+ },
+ LIRS {
+ @Override
+ public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
+ return new LIRS<K,V>(s,capacity,capacity*10,lf);
+ }
+ };
- @Override
- public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
- return new LRU<K, V>(s,capacity,lf,capacity*10,lf);
- }
- },
- LIRS {
- @Override
- public <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf) {
- return new LIRS<K,V>(s,capacity,lf,capacity*10,lf);
- }
- };
-
- abstract <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf);
+ abstract <K, V> EvictionPolicy<K, V> make(Segment<K, V> s, int capacity, float lf);
}
public interface EvictionListener<K, V> {
- void onEntryEviction(K key, V value);
+ void onEntryEviction(K key, V value);
}
static class NullEvictionListener<K, V> implements EvictionListener<K, V> {
@@ -305,1676 +326,1707 @@
public interface EvictionPolicy<K, V> {
- public final static int MAX_BATCH_SIZE = 64;
+ public final static int MAX_BATCH_SIZE = 64;
- /**
- * Invokes eviction policy algorithm and returns set of evicted entries.
- *
- * <p>
- * Set cannot be null but could possibly be an empty set.
- *
- * @return set of evicted entries.
- */
- Set<HashEntry<K, V>> execute();
+ /**
+ * Invokes eviction policy algorithm and returns set of evicted entries.
+ *
+ * <p>
+ * Set cannot be null but could possibly be an empty set.
+ *
+ * @return set of evicted entries.
+ */
+ Set<HashEntry<K, V>> execute();
- /**
- * Invoked to notify EvictionPolicy implementation that there has been an attempt to access
- * an entry in Segment, however that entry was not present in Segment.
- *
- * @param e
- * accessed entry in Segment
- *
- * @return non null set of evicted entries.
- */
- Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e);
+ /**
+ * Invoked to notify EvictionPolicy implementation that there has been an attempt to access
+ * an entry in Segment, however that entry was not present in Segment.
+ *
+ * @param e
+ * accessed entry in Segment
+ *
+ * @return non null set of evicted entries.
+ */
+ Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e);
- /**
- * Invoked to notify EvictionPolicy implementation that an entry in Segment has been
- * accessed. Returns true if batching threshold has been reached, false otherwise.
- * <p>
- * Note that this method is potentially invoked without holding a lock on Segment.
- *
- * @return true if batching threshold has been reached, false otherwise.
- *
- * @param e
- * accessed entry in Segment
- */
- boolean onEntryHit(HashEntry<K, V> e);
+ /**
+ * Invoked to notify EvictionPolicy implementation that an entry in Segment has been
+ * accessed. Returns true if batching threshold has been reached, false otherwise.
+ * <p>
+ * Note that this method is potentially invoked without holding a lock on Segment.
+ *
+ * @return true if batching threshold has been reached, false otherwise.
+ *
+ * @param e
+ * accessed entry in Segment
+ */
+ boolean onEntryHit(HashEntry<K, V> e);
- /**
- * Invoked to notify EvictionPolicy implementation that an entry e has been removed from
- * Segment.
- *
- * @param e
- * removed entry in Segment
- */
- void onEntryRemove(HashEntry<K, V> e);
+ /**
+ * Invoked to notify EvictionPolicy implementation that an entry e has been removed from
+ * Segment.
+ *
+ * @param e
+ * removed entry in Segment
+ */
+ void onEntryRemove(HashEntry<K, V> e);
- /**
- * Invoked to notify EvictionPolicy implementation that all Segment entries have been
- * cleared.
- *
- */
- void clear();
+ /**
+ * Invoked to notify EvictionPolicy implementation that all Segment entries have been
+ * cleared.
+ *
+ */
+ void clear();
- /**
- * Returns type of eviction algorithm (strategy).
- *
- * @return type of eviction algorithm
- */
- Eviction strategy();
+ /**
+ * Returns type of eviction algorithm (strategy).
+ *
+ * @return type of eviction algorithm
+ */
+ Eviction strategy();
- /**
- * Returns true if batching threshold has expired, false otherwise.
- * <p>
- * Note that this method is potentially invoked without holding a lock on Segment.
- *
- * @return true if batching threshold has expired, false otherwise.
- */
- boolean thresholdExpired();
+ /**
+ * Returns true if batching threshold has expired, false otherwise.
+ * <p>
+ * Note that this method is potentially invoked without holding a lock on Segment.
+ *
+ * @return true if batching threshold has expired, false otherwise.
+ */
+ boolean thresholdExpired();
}
static class NullEvictionPolicy<K, V> implements EvictionPolicy<K, V> {
- @Override
- public void clear() {
- }
+ @Override
+ public void clear() {
+ // Do nothing.
+ }
- @Override
- public Set<HashEntry<K, V>> execute() {
- return Collections.emptySet();
- }
+ @Override
+ public Set<HashEntry<K, V>> execute() {
+ return Collections.emptySet();
+ }
- @Override
- public boolean onEntryHit(HashEntry<K, V> e) {
- return false;
- }
+ @Override
+ public boolean onEntryHit(HashEntry<K, V> e) {
+ return false;
+ }
- @Override
- public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
- return Collections.emptySet();
- }
+ @Override
+ public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
+ return Collections.emptySet();
+ }
- @Override
- public void onEntryRemove(HashEntry<K, V> e) {
- }
+ @Override
+ public void onEntryRemove(HashEntry<K, V> e) {
+ // Do nothing.
+ }
- @Override
- public boolean thresholdExpired() {
- return false;
- }
+ @Override
+ public boolean thresholdExpired() {
+ return false;
+ }
- @Override
- public Eviction strategy() {
- return Eviction.NONE;
- }
+ @Override
+ public Eviction strategy() {
+ return Eviction.NONE;
+ }
}
static final class LRU<K, V> implements EvictionPolicy<K, V> {
- private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
- private final Segment<K,V> segment;
- private final LinkedList<HashEntry<K, V>> lruQueue;
- private final int maxBatchQueueSize;
- private final int trimDownSize;
- private final float batchThresholdFactor;
+ private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
+ private final Segment<K,V> segment;
+ private final LinkedList<HashEntry<K, V>> lruQueue;
+ private final int maxBatchQueueSize;
+ private final int trimDownSize;
+ private final float batchThresholdFactor;
- public LRU(Segment<K,V> s, int capacity, float lf, int maxBatchSize, float batchThresholdFactor) {
- this.segment = s;
- this.trimDownSize = (int) (capacity * lf);
- this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
- this.batchThresholdFactor = batchThresholdFactor;
- this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
- this.lruQueue = new LinkedList<HashEntry<K, V>>();
- }
+ public LRU(Segment<K,V> s, int capacity, float lf, int maxBatchSize, float batchThresholdFactor) {
+ this.segment = s;
+ this.trimDownSize = (int) (capacity * lf);
+ this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
+ this.batchThresholdFactor = batchThresholdFactor;
+ this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
+ this.lruQueue = new LinkedList<HashEntry<K, V>>();
+ }
- @Override
- public Set<HashEntry<K, V>> execute() {
- Set<HashEntry<K, V>> evicted = Collections.emptySet();
- if (isOverflow()) {
- evicted = new HashSet<HashEntry<K, V>>();
- }
- try {
- for (HashEntry<K, V> e : accessQueue) {
- if (lruQueue.remove(e)) {
- lruQueue.addFirst(e);
- }
+ @Override
+ public Set<HashEntry<K, V>> execute() {
+ Set<HashEntry<K, V>> evicted = Collections.emptySet();
+ if (isOverflow()) {
+ evicted = new HashSet<HashEntry<K, V>>();
+ }
+ try {
+ for (HashEntry<K, V> e : accessQueue) {
+ if (lruQueue.remove(e)) {
+ lruQueue.addFirst(e);
}
- while (isOverflow()) {
- HashEntry<K, V> first = lruQueue.getLast();
- segment.remove(first.key, first.hash, null);
- evicted.add(first);
- }
- } finally {
- accessQueue.clear();
- }
- return evicted;
- }
+ }
+ while (isOverflow()) {
+ HashEntry<K, V> first = lruQueue.getLast();
+ segment.remove(first.key, first.hash, null);
+ evicted.add(first);
+ }
+ } finally {
+ accessQueue.clear();
+ }
+ return evicted;
+ }
- private boolean isOverflow() {
- return lruQueue.size() > trimDownSize;
- }
+ private boolean isOverflow() {
+ return lruQueue.size() > trimDownSize;
+ }
- @Override
- public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
- lruQueue.addFirst(e);
- return Collections.emptySet();
- }
+ @Override
+ public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
+ lruQueue.addFirst(e);
+ return Collections.emptySet();
+ }
- /*
- * Invoked without holding a lock on Segment
- */
- @Override
- public boolean onEntryHit(HashEntry<K, V> e) {
- accessQueue.add(e);
- return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
- }
+ /*
+ * Invoked without holding a lock on Segment
+ */
+ @Override
+ public boolean onEntryHit(HashEntry<K, V> e) {
+ accessQueue.add(e);
+ return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
+ }
- /*
- * Invoked without holding a lock on Segment
- */
- @Override
- public boolean thresholdExpired() {
- return accessQueue.size() >= maxBatchQueueSize;
- }
+ /*
+ * Invoked without holding a lock on Segment
+ */
+ @Override
+ public boolean thresholdExpired() {
+ return accessQueue.size() >= maxBatchQueueSize;
+ }
- @Override
- public void onEntryRemove(HashEntry<K, V> e) {
- lruQueue.remove(e);
- // we could have multiple instances of e in accessQueue; remove them all
- while (accessQueue.remove(e)) {
+ @Override
+ public void onEntryRemove(HashEntry<K, V> e) {
+ lruQueue.remove(e);
+ // we could have multiple instances of e in accessQueue; remove them all
+ while (accessQueue.remove(e)) {
continue;
- }
- }
+ }
+ }
- @Override
- public void clear() {
- lruQueue.clear();
- accessQueue.clear();
- }
+ @Override
+ public void clear() {
+ lruQueue.clear();
+ accessQueue.clear();
+ }
- @Override
- public Eviction strategy() {
- return Eviction.LRU;
- }
+ @Override
+ public Eviction strategy() {
+ return Eviction.LRU;
+ }
}
static final class LIRS<K, V> implements EvictionPolicy<K, V> {
- private final static int MIN_HIR_SIZE = 2;
- private final Segment<K,V> segment;
- private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
- private final LinkedHashMap<K, HashEntry<K, V>> stack;
- private final LinkedList<HashEntry<K, V>> queue;
- private final int maxBatchQueueSize;
- private final int lirSizeLimit;
- private final int hirSizeLimit;
- private int currentLIRSize;
- private final float batchThresholdFactor;
+ private final static int MIN_HIR_SIZE = 2;
+ private final Segment<K,V> segment;
+ private final ConcurrentLinkedQueue<HashEntry<K, V>> accessQueue;
+ private final LinkedHashMap<K, HashEntry<K, V>> stack;
+ private final LinkedList<HashEntry<K, V>> queue;
+ private final int maxBatchQueueSize;
+ private final int lirSizeLimit;
+ private final int hirSizeLimit;
+ private int currentLIRSize;
+ private final float batchThresholdFactor;
- public LIRS(Segment<K,V> s, int capacity, float lf, int maxBatchSize, float batchThresholdFactor) {
- this.segment = s;
- int tmpLirSize = (int) (capacity * 0.9);
- int tmpHirSizeLimit = capacity - tmpLirSize;
- if (tmpHirSizeLimit < MIN_HIR_SIZE) {
- hirSizeLimit = MIN_HIR_SIZE;
- lirSizeLimit = capacity - hirSizeLimit;
- } else {
- hirSizeLimit = tmpHirSizeLimit;
- lirSizeLimit = tmpLirSize;
- }
- this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
- this.batchThresholdFactor = batchThresholdFactor;
- this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
- this.stack = new LinkedHashMap<K, HashEntry<K, V>>();
- this.queue = new LinkedList<HashEntry<K, V>>();
- }
+ public LIRS(Segment<K,V> s, int capacity, int maxBatchSize, float batchThresholdFactor) {
+ this.segment = s;
+ int tmpLirSize = (int) (capacity * 0.9);
+ int tmpHirSizeLimit = capacity - tmpLirSize;
+ if (tmpHirSizeLimit < MIN_HIR_SIZE) {
+ hirSizeLimit = MIN_HIR_SIZE;
+ lirSizeLimit = capacity - hirSizeLimit;
+ } else {
+ hirSizeLimit = tmpHirSizeLimit;
+ lirSizeLimit = tmpLirSize;
+ }
+ this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize;
+ this.batchThresholdFactor = batchThresholdFactor;
+ this.accessQueue = new ConcurrentLinkedQueue<HashEntry<K, V>>();
+ this.stack = new LinkedHashMap<K, HashEntry<K, V>>();
+ this.queue = new LinkedList<HashEntry<K, V>>();
+ }
- @Override
- public Set<HashEntry<K, V>> execute() {
- Set<HashEntry<K, V>> evicted = new HashSet<HashEntry<K, V>>();
- try {
- for (HashEntry<K, V> e : accessQueue) {
- if (present(e)) {
- if (e.recency() == Recency.LIR_RESIDENT) {
- handleLIRHit(e, evicted);
- } else if (e.recency() == Recency.HIR_RESIDENT) {
- handleHIRHit(e, evicted);
- }
- }
+ @Override
+ public Set<HashEntry<K, V>> execute() {
+ Set<HashEntry<K, V>> evicted = new HashSet<HashEntry<K, V>>();
+ try {
+ for (HashEntry<K, V> e : accessQueue) {
+ if (present(e)) {
+ if (e.recency() == Recency.LIR_RESIDENT) {
+ handleLIRHit(e, evicted);
+ } else if (e.recency() == Recency.HIR_RESIDENT) {
+ handleHIRHit(e, evicted);
+ }
}
- removeFromSegment(evicted);
- } finally {
- accessQueue.clear();
- }
- return evicted;
- }
+ }
+ removeFromSegment(evicted);
+ } finally {
+ accessQueue.clear();
+ }
+ return evicted;
+ }
- private void handleHIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
- boolean inStack = stack.containsKey(e.key);
- if (inStack) {
+ private void handleHIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
+ boolean inStack = stack.containsKey(e.key);
+ if (inStack) {
stack.remove(e.key);
}
- // first put on top of the stack
- stack.put(e.key, e);
+ // first put on top of the stack
+ stack.put(e.key, e);
- if (inStack) {
- queue.remove(e);
- e.transitionToLIRResident();
- switchBottomostLIRtoHIRAndPrune(evicted);
- } else {
- queue.remove(e);
- queue.addLast(e);
- }
- }
+ if (inStack) {
+ queue.remove(e);
+ e.transitionToLIRResident();
+ switchBottomostLIRtoHIRAndPrune(evicted);
+ } else {
+ queue.remove(e);
+ queue.addLast(e);
+ }
+ }
- private void handleLIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
- stack.remove(e.key);
- stack.put(e.key, e);
- for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
- HashEntry<K, V> next = i.next();
- if (next.recency() == Recency.LIR_RESIDENT) {
- break;
- } else {
- i.remove();
- evicted.add(next);
- }
- }
- }
+ private void handleLIRHit(HashEntry<K, V> e, Set<HashEntry<K, V>> evicted) {
+ stack.remove(e.key);
+ stack.put(e.key, e);
+ for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
+ HashEntry<K, V> next = i.next();
+ if (next.recency() == Recency.LIR_RESIDENT) {
+ break;
+ } else {
+ i.remove();
+ evicted.add(next);
+ }
+ }
+ }
- private boolean present(HashEntry<K, V> e) {
- return stack.containsKey(e.key) || queue.contains(e);
- }
+ private boolean present(HashEntry<K, V> e) {
+ return stack.containsKey(e.key) || queue.contains(e);
+ }
- @Override
- public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
- // initialization
- Set<HashEntry<K, V>> evicted = Collections.emptySet();
- if (currentLIRSize + 1 < lirSizeLimit) {
- currentLIRSize++;
- e.transitionToLIRResident();
+ @Override
+ public Set<HashEntry<K, V>> onEntryMiss(HashEntry<K, V> e) {
+ // initialization
+ Set<HashEntry<K, V>> evicted = Collections.emptySet();
+ if (currentLIRSize + 1 < lirSizeLimit) {
+ currentLIRSize++;
+ e.transitionToLIRResident();
+ stack.put(e.key, e);
+ } else {
+ if (queue.size() < hirSizeLimit) {
+ queue.addLast(e);
+ } else {
+ boolean inStack = stack.containsKey(e.key);
+ HashEntry<K, V> first = queue.removeFirst();
+ first.transitionHIRResidentToHIRNonResident();
+
stack.put(e.key, e);
- } else {
- if (queue.size() < hirSizeLimit) {
- queue.addLast(e);
+
+ evicted = new HashSet<HashEntry<K, V>>();
+ if (inStack) {
+ e.transitionToLIRResident();
+ switchBottomostLIRtoHIRAndPrune(evicted);
} else {
- boolean inStack = stack.containsKey(e.key);
- HashEntry<K, V> first = queue.removeFirst();
- first.transitionHIRResidentToHIRNonResident();
-
- stack.put(e.key, e);
-
- evicted = new HashSet<HashEntry<K, V>>();
- if (inStack) {
- e.transitionToLIRResident();
- switchBottomostLIRtoHIRAndPrune(evicted);
- } else {
- queue.addLast(e);
- evicted.add(first);
- }
- // evict from segment
- removeFromSegment(evicted);
+ queue.addLast(e);
+ evicted.add(first);
}
- }
- return evicted;
- }
+ // evict from segment
+ removeFromSegment(evicted);
+ }
+ }
+ return evicted;
+ }
- private void removeFromSegment(Set<HashEntry<K, V>> evicted) {
- for (HashEntry<K, V> e : evicted) {
- segment.remove(e.key, e.hash, null);
- }
- }
+ private void removeFromSegment(Set<HashEntry<K, V>> evicted) {
+ for (HashEntry<K, V> e : evicted) {
+ segment.remove(e.key, e.hash, null);
+ }
+ }
- private void switchBottomostLIRtoHIRAndPrune(Set<HashEntry<K, V>> evicted) {
- boolean seenFirstLIR = false;
- for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
- HashEntry<K, V> next = i.next();
- if (next.recency() == Recency.LIR_RESIDENT) {
- if (!seenFirstLIR) {
- seenFirstLIR = true;
- i.remove();
- next.transitionLIRResidentToHIRResident();
- queue.addLast(next);
- } else {
- break;
- }
+ private void switchBottomostLIRtoHIRAndPrune(Set<HashEntry<K, V>> evicted) {
+ boolean seenFirstLIR = false;
+ for (Iterator<HashEntry<K, V>> i = stack.values().iterator(); i.hasNext();) {
+ HashEntry<K, V> next = i.next();
+ if (next.recency() == Recency.LIR_RESIDENT) {
+ if (!seenFirstLIR) {
+ seenFirstLIR = true;
+ i.remove();
+ next.transitionLIRResidentToHIRResident();
+ queue.addLast(next);
} else {
- i.remove();
- evicted.add(next);
+ break;
}
- }
- }
+ } else {
+ i.remove();
+ evicted.add(next);
+ }
+ }
+ }
- /*
- * Invoked without holding a lock on Segment
- */
- @Override
- public boolean onEntryHit(HashEntry<K, V> e) {
- accessQueue.add(e);
- return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
- }
+ /*
+ * Invoked without holding a lock on Segment
+ */
+ @Override
+ public boolean onEntryHit(HashEntry<K, V> e) {
+ accessQueue.add(e);
+ return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor;
+ }
- /*
- * Invoked without holding a lock on Segment
- */
- @Override
- public boolean thresholdExpired() {
- return accessQueue.size() >= maxBatchQueueSize;
- }
+ /*
+ * Invoked without holding a lock on Segment
+ */
+ @Override
+ public boolean thresholdExpired() {
+ return accessQueue.size() >= maxBatchQueueSize;
+ }
- @Override
- public void onEntryRemove(HashEntry<K, V> e) {
- HashEntry<K, V> removed = stack.remove(e.key);
- if (removed != null && removed.recency() == Recency.LIR_RESIDENT) {
- currentLIRSize--;
- }
- queue.remove(e);
- // we could have multiple instances of e in accessQueue; remove them all
- while (accessQueue.remove(e)) {
- continue;
- }
- }
+ @Override
+ public void onEntryRemove(HashEntry<K, V> e) {
+ HashEntry<K, V> removed = stack.remove(e.key);
+ if (removed != null && removed.recency() == Recency.LIR_RESIDENT) {
+ currentLIRSize--;
+ }
+ queue.remove(e);
+ // we could have multiple instances of e in accessQueue; remove them all
+ while (accessQueue.remove(e)) {
+ continue;
+ }
+ }
- @Override
- public void clear() {
- stack.clear();
- accessQueue.clear();
- }
+ @Override
+ public void clear() {
+ stack.clear();
+ accessQueue.clear();
+ }
- @Override
- public Eviction strategy() {
- return Eviction.LIRS;
- }
+ @Override
+ public Eviction strategy() {
+ return Eviction.LIRS;
+ }
}
- /**
- * Segments are specialized versions of hash tables. This
- * subclasses from ReentrantLock opportunistically, just to
- * simplify some locking and avoid separate construction.
- */
- static final class Segment<K,V> extends ReentrantLock implements Serializable {
- /*
- * Segments maintain a table of entry lists that are ALWAYS
- * kept in a consistent state, so can be read without locking.
- * Next fields of nodes are immutable (final). All list
- * additions are performed at the front of each bin. This
- * makes it easy to check changes, and also fast to traverse.
- * When nodes would otherwise be changed, new nodes are
- * created to replace them. This works well for hash tables
- * since the bin lists tend to be short. (The average length
- * is less than two for the default load factor threshold.)
- *
- * Read operations can thus proceed without locking, but rely
- * on selected uses of volatiles to ensure that completed
- * write operations performed by other threads are
- * noticed. For most purposes, the "count" field, tracking the
- * number of elements, serves as that volatile variable
- * ensuring visibility. This is convenient because this field
- * needs to be read in many read operations anyway:
- *
- * - All (unsynchronized) read operations must first read the
- * "count" field, and should not look at table entries if
- * it is 0.
- *
- * - All (synchronized) write operations should write to
- * the "count" field after structurally changing any bin.
- * The operations must not take any action that could even
- * momentarily cause a concurrent read operation to see
- * inconsistent data. This is made easier by the nature of
- * the read operations in Map. For example, no operation
- * can reveal that the table has grown but the threshold
- * has not yet been updated, so there are no atomicity
- * requirements for this with respect to reads.
- *
- * As a guide, all critical volatile reads and writes to the
- * count field are marked in code comments.
- */
+ /**
+ * Segments are specialized versions of hash tables. This
+ * subclasses from ReentrantLock opportunistically, just to
+ * simplify some locking and avoid separate construction.
+ */
+ static final class Segment<K,V> extends ReentrantLock {
+ /*
+ * Segments maintain a table of entry lists that are ALWAYS
+ * kept in a consistent state, so can be read without locking.
+ * Next fields of nodes are immutable (final). All list
+ * additions are performed at the front of each bin. This
+ * makes it easy to check changes, and also fast to traverse.
+ * When nodes would otherwise be changed, new nodes are
+ * created to replace them. This works well for hash tables
+ * since the bin lists tend to be short. (The average length
+ * is less than two for the default load factor threshold.)
+ *
+ * Read operations can thus proceed without locking, but rely
+ * on selected uses of volatiles to ensure that completed
+ * write operations performed by other threads are
+ * noticed. For most purposes, the "count" field, tracking the
+ * number of elements, serves as that volatile variable
+ * ensuring visibility. This is convenient because this field
+ * needs to be read in many read operations anyway:
+ *
+ * - All (unsynchronized) read operations must first read the
+ * "count" field, and should not look at table entries if
+ * it is 0.
+ *
+ * - All (synchronized) write operations should write to
+ * the "count" field after structurally changing any bin.
+ * The operations must not take any action that could even
+ * momentarily cause a concurrent read operation to see
+ * inconsistent data. This is made easier by the nature of
+ * the read operations in Map. For example, no operation
+ * can reveal that the table has grown but the threshold
+ * has not yet been updated, so there are no atomicity
+ * requirements for this with respect to reads.
+ *
+ * As a guide, all critical volatile reads and writes to the
+ * count field are marked in code comments.
+ */
- private static final long serialVersionUID = 2249069246763182397L;
+ private static final long serialVersionUID = 2249069246763182397L;
- /**
- * The number of elements in this segment's region.
- */
- transient volatile int count;
+ /**
+ * The number of elements in this segment's region.
+ */
+ transient volatile int count;
- /**
- * Number of updates that alter the size of the table. This is
- * used during bulk-read methods to make sure they see a
- * consistent snapshot: If modCounts change during a traversal
- * of segments computing size or checking containsValue, then
- * we might have an inconsistent view of state so (usually)
- * must retry.
- */
- transient int modCount;
+ /**
+ * Number of updates that alter the size of the table. This is
+ * used during bulk-read methods to make sure they see a
+ * consistent snapshot: If modCounts change during a traversal
+ * of segments computing size or checking containsValue, then
+ * we might have an inconsistent view of state so (usually)
+ * must retry.
+ */
+ transient int modCount;
- /**
- * The table is rehashed when its size exceeds this threshold.
- * (The value of this field is always <tt>(int)(capacity *
- * loadFactor)</tt>.)
- */
- transient int threshold;
+ /**
+ * The table is rehashed when its size exceeds this threshold.
+ * (The value of this field is always <tt>(int)(capacity *
+ * loadFactor)</tt>.)
+ */
+ transient int threshold;
- /**
- * The per-segment table.
- */
- transient volatile HashEntry<K,V>[] table;
+ /**
+ * The per-segment table.
+ */
+ transient volatile HashEntry<K,V>[] table;
- /**
- * The load factor for the hash table. Even though this value
- * is same for all segments, it is replicated to avoid needing
- * links to outer object.
- * @serial
- */
- final float loadFactor;
+ /**
+ * The load factor for the hash table. Even though this value
+ * is same for all segments, it is replicated to avoid needing
+ * links to outer object.
+ * @serial
+ */
+ final float loadFactor;
- transient final EvictionPolicy<K, V> eviction;
+ transient final EvictionPolicy<K, V> eviction;
- transient final EvictionListener<K, V> evictionListener;
+ transient final EvictionListener<K, V> evictionListener;
- Segment(int cap, float lf, Eviction es, EvictionListener<K, V> listener) {
- loadFactor = lf;
- eviction = es.make(this, cap, lf);
- evictionListener = listener;
- setTable(HashEntry.<K, V> newArray(cap));
- }
+ Segment(int cap, float lf, Eviction es, EvictionListener<K, V> listener) {
+ loadFactor = lf;
+ eviction = es.make(this, cap, lf);
+ evictionListener = listener;
+ setTable(HashEntry.<K, V> newArray(cap));
+ }
- @SuppressWarnings("unchecked")
- static final <K,V> Segment<K,V>[] newArray(int i) {
- return new Segment[i];
- }
+ @SuppressWarnings("unchecked")
+ static final <K,V> Segment<K,V>[] newArray(int i) {
+ return new Segment[i];
+ }
- EvictionListener<K, V> getEvictionListener() {
- return evictionListener;
- }
+ EvictionListener<K, V> getEvictionListener() {
+ return evictionListener;
+ }
- /**
- * Sets table to new HashEntry array.
- * Call only while holding lock or in constructor.
- */
- void setTable(HashEntry<K,V>[] newTable) {
- threshold = (int)(newTable.length * loadFactor);
- table = newTable;
- }
+ /**
+ * Sets table to new HashEntry array.
+ * Call only while holding lock or in constructor.
+ */
+ void setTable(HashEntry<K,V>[] newTable) {
+ threshold = (int)(newTable.length * loadFactor);
+ table = newTable;
+ }
- /**
- * Returns properly casted first entry of bin for given hash.
- */
- HashEntry<K,V> getFirst(int hash) {
- HashEntry<K,V>[] tab = table;
- return tab[hash & tab.length - 1];
- }
+ /**
+ * Returns properly casted first entry of bin for given hash.
+ */
+ HashEntry<K,V> getFirst(int hash) {
+ HashEntry<K,V>[] tab = table;
+ return tab[hash & tab.length - 1];
+ }
- /**
- * Reads value field of an entry under lock. Called if value
- * field ever appears to be null. This is possible only if a
- * compiler happens to reorder a HashEntry initialization with
- * its table assignment, which is legal under memory model
- * but is not known to ever occur.
- */
- V readValueUnderLock(HashEntry<K,V> e) {
+ /**
+ * Reads value field of an entry under lock. Called if value
+ * field ever appears to be null. This is possible only if a
+ * compiler happens to reorder a HashEntry initialization with
+ * its table assignment, which is legal under memory model
+ * but is not known to ever occur.
+ */
+ V readValueUnderLock(HashEntry<K,V> e) {
+ lock();
+ try {
+ return e.value;
+ } finally {
+ unlock();
+ }
+ }
+
+ /* Specialized implementations of map methods */
+
+ V get(Object key, int hash) {
+ int c = count;
+ if (c != 0) { // read-volatile
+ V result = null;
+ HashEntry<K, V> e = getFirst(hash);
+ loop: while (e != null) {
+ if (e.hash == hash && key.equals(e.key)) {
+ V v = e.value;
+ if (v != null) {
+ result = v;
+ break loop;
+ } else {
+ result = readValueUnderLock(e); // recheck
+ break loop;
+ }
+ }
+ e = e.next;
+ }
+ // a hit
+ if (result != null) {
+ if (eviction.onEntryHit(e)) {
+ Set<HashEntry<K, V>> evicted = attemptEviction(false);
+ // piggyback listener invocation on callers thread outside lock
+ if (evicted != null) {
+ for (HashEntry<K, V> he : evicted) {
+ evictionListener.onEntryEviction(he.key, he.value);
+ }
+ }
+ }
+ }
+ return result;
+ }
+ return null;
+ }
+
+ private Set<HashEntry<K, V>> attemptEviction(boolean lockedAlready) {
+ Set<HashEntry<K, V>> evicted = null;
+ boolean obtainedLock = !lockedAlready ? tryLock() : true;
+ if (!obtainedLock && eviction.thresholdExpired()) {
lock();
+ obtainedLock = true;
+ }
+ if (obtainedLock) {
try {
- return e.value;
+ evicted = eviction.execute();
} finally {
- unlock();
+ if (!lockedAlready) {
+ unlock();
+ }
}
- }
+ }
+ return evicted;
+ }
- /* Specialized implementations of map methods */
-
- V get(Object key, int hash) {
- int c = count;
- if (c != 0) { // read-volatile
- V result = null;
- HashEntry<K, V> e = getFirst(hash);
- loop: while (e != null) {
- if (e.hash == hash && key.equals(e.key)) {
- V v = e.value;
- if (v != null) {
- result = v;
- break loop;
- } else {
- result = readValueUnderLock(e); // recheck
- break loop;
- }
- }
- e = e.next;
+ boolean containsKey(Object key, int hash) {
+ if (count != 0) { // read-volatile
+ HashEntry<K,V> e = getFirst(hash);
+ while (e != null) {
+ if (e.hash == hash && key.equals(e.key)) {
+ return true;
}
- // a hit
- if (result != null) {
- if (eviction.onEntryHit(e)) {
- Set<HashEntry<K, V>> evicted = attemptEviction(false);
- // piggyback listener invocation on callers thread outside lock
- if (evicted != null) {
- for (HashEntry<K, V> he : evicted) {
- evictionListener.onEntryEviction(he.key, he.value);
- }
- }
- }
- }
- return result;
- }
- return null;
- }
+ e = e.next;
+ }
+ }
+ return false;
+ }
- private Set<HashEntry<K, V>> attemptEviction(boolean lockedAlready) {
- Set<HashEntry<K, V>> evicted = null;
- boolean obtainedLock = !lockedAlready ? tryLock() : true;
- if (!obtainedLock && eviction.thresholdExpired()) {
- lock();
- obtainedLock = true;
- }
- if (obtainedLock) {
- try {
- evicted = eviction.execute();
- } finally {
- if (!lockedAlready) {
- unlock();
+ boolean containsValue(Object value) {
+ if (count != 0) { // read-volatile
+ HashEntry<K,V>[] tab = table;
+ int len = tab.length;
+ for (int i = 0 ; i < len; i++) {
+ for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
+ V v = e.value;
+ if (v == null) {
+ v = readValueUnderLock(e);
}
- }
- }
- return evicted;
- }
-
- boolean containsKey(Object key, int hash) {
- if (count != 0) { // read-volatile
- HashEntry<K,V> e = getFirst(hash);
- while (e != null) {
- if (e.hash == hash && key.equals(e.key)) {
+ if (value.equals(v)) {
return true;
}
- e = e.next;
- }
+ }
}
- return false;
- }
+ }
+ return false;
+ }
- boolean containsValue(Object value) {
- if (count != 0) { // read-volatile
- HashEntry<K,V>[] tab = table;
- int len = tab.length;
- for (int i = 0 ; i < len; i++) {
- for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
- V v = e.value;
- if (v == null) {
- v = readValueUnderLock(e);
- }
- if (value.equals(v)) {
- return true;
- }
- }
- }
+ boolean replace(K key, int hash, V oldValue, V newValue) {
+ lock();
+ Set<HashEntry<K, V>> evicted = null;
+ try {
+ HashEntry<K, V> e = getFirst(hash);
+ while (e != null && (e.hash != hash || !key.equals(e.key))) {
+ e = e.next;
}
- return false;
- }
- boolean replace(K key, int hash, V oldValue, V newValue) {
- lock();
- Set<HashEntry<K, V>> evicted = null;
- try {
- HashEntry<K, V> e = getFirst(hash);
- while (e != null && (e.hash != hash || !key.equals(e.key))) {
- e = e.next;
+ boolean replaced = false;
+ if (e != null && oldValue.equals(e.value)) {
+ replaced = true;
+ e.value = newValue;
+ if (eviction.onEntryHit(e)) {
+ evicted = attemptEviction(true);
}
-
- boolean replaced = false;
- if (e != null && oldValue.equals(e.value)) {
- replaced = true;
- e.value = newValue;
- if (eviction.onEntryHit(e)) {
- evicted = attemptEviction(true);
- }
+ }
+ return replaced;
+ } finally {
+ unlock();
+ // piggyback listener invocation on callers thread outside lock
+ if (evicted != null) {
+ for (HashEntry<K, V> he : evicted) {
+ evictionListener.onEntryEviction(he.key, he.value);
}
- return replaced;
- } finally {
- unlock();
- // piggyback listener invocation on callers thread outside lock
- if (evicted != null) {
- for (HashEntry<K, V> he : evicted) {
- evictionListener.onEntryEviction(he.key, he.value);
- }
- }
- }
- }
+ }
+ }
+ }
- V replace(K key, int hash, V newValue) {
- lock();
- Set<HashEntry<K, V>> evicted = null;
- try {
- HashEntry<K, V> e = getFirst(hash);
- while (e != null && (e.hash != hash || !key.equals(e.key))) {
- e = e.next;
- }
+ V replace(K key, int hash, V newValue) {
+ lock();
+ Set<HashEntry<K, V>> evicted = null;
+ try {
+ HashEntry<K, V> e = getFirst(hash);
+ while (e != null && (e.hash != hash || !key.equals(e.key))) {
+ e = e.next;
+ }
- V oldValue = null;
- if (e != null) {
- oldValue = e.value;
- e.value = newValue;
- if (eviction.onEntryHit(e)) {
- evicted = attemptEviction(true);
- }
+ V oldValue = null;
+ if (e != null) {
+ oldValue = e.value;
+ e.value = newValue;
+ if (eviction.onEntryHit(e)) {
+ evicted = attemptEviction(true);
}
- return oldValue;
- } finally {
- unlock();
- // piggyback listener invocation on callers thread outside lock
- if(evicted != null) {
- for (HashEntry<K, V> he : evicted) {
- evictionListener.onEntryEviction(he.key, he.value);
- }
+ }
+ return oldValue;
+ } finally {
+ unlock();
+ // piggyback listener invocation on callers thread outside lock
+ if(evicted != null) {
+ for (HashEntry<K, V> he : evicted) {
+ evictionListener.onEntryEviction(he.key, he.value);
}
- }
- }
+ }
+ }
+ }
-
- V put(K key, int hash, V value, boolean onlyIfAbsent) {
- lock();
- Set<HashEntry<K, V>> evicted = null;
- try {
- int c = count;
- if (c++ > threshold && eviction.strategy() == Eviction.NONE) {
+ V put(K key, int hash, V value, boolean onlyIfAbsent) {
+ lock();
+ Set<HashEntry<K, V>> evicted = null;
+ try {
+ int c = count;
+ if (c++ > threshold && eviction.strategy() == Eviction.NONE) {
rehash();
}
- HashEntry<K, V>[] tab = table;
- int index = hash & tab.length - 1;
- HashEntry<K, V> first = tab[index];
- HashEntry<K, V> e = first;
- while (e != null && (e.hash != hash || !key.equals(e.key))) {
+ HashEntry<K, V>[] tab = table;
+ int index = hash & tab.length - 1;
+ HashEntry<K, V> first = tab[index];
+ HashEntry<K, V> e = first;
+ while (e != null && (e.hash != hash || !key.equals(e.key))) {
e = e.next;
}
- V oldValue;
- if (e != null) {
- oldValue = e.value;
- if (!onlyIfAbsent) {
- e.value = value;
- eviction.onEntryHit(e);
+ V oldValue;
+ if (e != null) {
+ oldValue = e.value;
+ if (!onlyIfAbsent) {
+ e.value = value;
+ eviction.onEntryHit(e);
+ }
+ } else {
+ oldValue = null;
+ ++modCount;
+ count = c; // write-volatile
+ if (eviction.strategy() != Eviction.NONE) {
+ if (c > tab.length) {
+ // remove entries;lower count
+ evicted = eviction.execute();
+ // re-read first
+ first = tab[index];
}
- } else {
- oldValue = null;
- ++modCount;
- count = c; // write-volatile
- if (eviction.strategy() != Eviction.NONE) {
- if (c > tab.length) {
- // remove entries;lower count
- evicted = eviction.execute();
- // re-read first
- first = tab[index];
- }
- // add a new entry
- tab[index] = new HashEntry<K, V>(key, hash, first, value);
- // notify a miss
- Set<HashEntry<K, V>> newlyEvicted = eviction.onEntryMiss(tab[index]);
- if (!newlyEvicted.isEmpty()) {
- if (evicted != null) {
- evicted.addAll(newlyEvicted);
- } else {
- evicted = newlyEvicted;
- }
- }
- } else {
- tab[index] = new HashEntry<K, V>(key, hash, first, value);
+ // add a new entry
+ tab[index] = new HashEntry<K, V>(key, hash, first, value);
+ // notify a miss
+ Set<HashEntry<K, V>> newlyEvicted = eviction.onEntryMiss(tab[index]);
+ if (!newlyEvicted.isEmpty()) {
+ if (evicted != null) {
+ evicted.addAll(newlyEvicted);
+ } else {
+ evicted = newlyEvicted;
+ }
}
- }
- return oldValue;
- } finally {
- unlock();
- // piggyback listener invocation on callers thread outside lock
- if(evicted != null) {
- for (HashEntry<K, V> he : evicted) {
- evictionListener.onEntryEviction(he.key, he.value);
- }
- }
- }
+ } else {
+ tab[index] = new HashEntry<K, V>(key, hash, first, value);
+ }
+ }
+ return oldValue;
+ } finally {
+ unlock();
+ // piggyback listener invocation on callers thread outside lock
+ if(evicted != null) {
+ for (HashEntry<K, V> he : evicted) {
+ evictionListener.onEntryEviction(he.key, he.value);
+ }
+ }
+ }
}
+ void rehash() {
+ HashEntry<K,V>[] oldTable = table;
+ int oldCapacity = oldTable.length;
+ if (oldCapacity >= MAXIMUM_CAPACITY) {
+ return;
+ }
- void rehash() {
- HashEntry<K,V>[] oldTable = table;
- int oldCapacity = oldTable.length;
- if (oldCapacity >= MAXIMUM_CAPACITY) {
- return;
- }
+ /*
+ * Reclassify nodes in each list to new Map. Because we are
+ * using power-of-two expansion, the elements from each bin
+ * must either stay at same index, or move with a power of two
+ * offset. We eliminate unnecessary node creation by catching
+ * cases where old nodes can be reused because their next
+ * fields won't change. Statistically, at the default
+ * threshold, only about one-sixth of them need cloning when
+ * a table doubles. The nodes they replace will be garbage
+ * collectable as soon as they are no longer referenced by any
+ * reader thread that may be in the midst of traversing table
+ * right now.
+ */
- /*
- * Reclassify nodes in each list to new Map. Because we are
- * using power-of-two expansion, the elements from each bin
- * must either stay at same index, or move with a power of two
- * offset. We eliminate unnecessary node creation by catching
- * cases where old nodes can be reused because their next
- * fields won't change. Statistically, at the default
- * threshold, only about one-sixth of them need cloning when
- * a table doubles. The nodes they replace will be garbage
- * collectable as soon as they are no longer referenced by any
- * reader thread that may be in the midst of traversing table
- * right now.
- */
+ HashEntry<K,V>[] newTable = HashEntry.newArray(oldCapacity<<1);
+ threshold = (int)(newTable.length * loadFactor);
+ int sizeMask = newTable.length - 1;
+ for (int i = 0; i < oldCapacity ; i++) {
+ // We need to guarantee that any existing reads of old Map can
+ // proceed. So we cannot yet null out each bin.
+ HashEntry<K,V> e = oldTable[i];
- HashEntry<K,V>[] newTable = HashEntry.newArray(oldCapacity<<1);
- threshold = (int)(newTable.length * loadFactor);
- int sizeMask = newTable.length - 1;
- for (int i = 0; i < oldCapacity ; i++) {
- // We need to guarantee that any existing reads of old Map can
- // proceed. So we cannot yet null out each bin.
- HashEntry<K,V> e = oldTable[i];
+ if (e != null) {
+ HashEntry<K,V> next = e.next;
+ int idx = e.hash & sizeMask;
- if (e != null) {
- HashEntry<K,V> next = e.next;
- int idx = e.hash & sizeMask;
+ // Single node on list
+ if (next == null) {
+ newTable[idx] = e;
+ } else {
+ // Reuse trailing consecutive sequence at same slot
+ HashEntry<K,V> lastRun = e;
+ int lastIdx = idx;
+ for (HashEntry<K,V> last = next;
+ last != null;
+ last = last.next) {
+ int k = last.hash & sizeMask;
+ if (k != lastIdx) {
+ lastIdx = k;
+ lastRun = last;
+ }
+ }
+ newTable[lastIdx] = lastRun;
- // Single node on list
- if (next == null) {
- newTable[idx] = e;
- } else {
- // Reuse trailing consecutive sequence at same slot
- HashEntry<K,V> lastRun = e;
- int lastIdx = idx;
- for (HashEntry<K,V> last = next;
- last != null;
- last = last.next) {
- int k = last.hash & sizeMask;
- if (k != lastIdx) {
- lastIdx = k;
- lastRun = last;
- }
- }
- newTable[lastIdx] = lastRun;
+ // Clone all remaining nodes
+ for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
+ int k = p.hash & sizeMask;
+ HashEntry<K,V> n = newTable[k];
+ newTable[k] = new HashEntry<K,V>(p.key, p.hash,
+ n, p.value);
+ }
+ }
+ }
+ }
+ table = newTable;
+ }
- // Clone all remaining nodes
- for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
- int k = p.hash & sizeMask;
- HashEntry<K,V> n = newTable[k];
- newTable[k] = new HashEntry<K,V>(p.key, p.hash,
- n, p.value);
- }
- }
- }
+ /**
+ * Remove; match on key only if value null, else match both.
+ */
+ V remove(Object key, int hash, Object value) {
+ lock();
+ try {
+ int c = count - 1;
+ HashEntry<K, V>[] tab = table;
+ int index = hash & tab.length - 1;
+ HashEntry<K, V> first = tab[index];
+ HashEntry<K, V> e = first;
+ while (e != null && (e.hash != hash || !key.equals(e.key))) {
+ e = e.next;
}
- table = newTable;
- }
- /**
- * Remove; match on key only if value null, else match both.
- */
- V remove(Object key, int hash, Object value) {
- lock();
- try {
- int c = count - 1;
- HashEntry<K, V>[] tab = table;
- int index = hash & tab.length - 1;
- HashEntry<K, V> first = tab[index];
- HashEntry<K, V> e = first;
- while (e != null && (e.hash != hash || !key.equals(e.key))) {
- e = e.next;
- }
+ V oldValue = null;
+ if (e != null) {
+ V v = e.value;
+ if (value == null || value.equals(v)) {
+ oldValue = v;
+ // All entries following removed node can stay
+ // in list, but all preceding ones need to be
+ // cloned.
+ ++modCount;
- V oldValue = null;
- if (e != null) {
- V v = e.value;
- if (value == null || value.equals(v)) {
- oldValue = v;
- // All entries following removed node can stay
- // in list, but all preceding ones need to be
- // cloned.
- ++modCount;
+ // e was removed
+ eviction.onEntryRemove(e);
- // e was removed
- eviction.onEntryRemove(e);
+ HashEntry<K, V> newFirst = e.next;
+ for (HashEntry<K, V> p = first; p != e; p = p.next) {
+ // allow p to be GC-ed
+ eviction.onEntryRemove(p);
+ newFirst = new HashEntry<K, V>(p.key, p.hash, newFirst, p.value);
+ // and notify eviction algorithm about new hash entries
+ eviction.onEntryMiss(newFirst);
+ }
- HashEntry<K, V> newFirst = e.next;
- for (HashEntry<K, V> p = first; p != e; p = p.next) {
- // allow p to be GC-ed
- eviction.onEntryRemove(p);
- newFirst = new HashEntry<K, V>(p.key, p.hash, newFirst, p.value);
- // and notify eviction algorithm about new hash entries
- eviction.onEntryMiss(newFirst);
- }
+ tab[index] = newFirst;
+ count = c; // write-volatile
+ }
+ }
+ return oldValue;
+ } finally {
+ unlock();
+ }
+ }
- tab[index] = newFirst;
- count = c; // write-volatile
- }
+ void clear() {
+ if (count != 0) {
+ lock();
+ try {
+ HashEntry<K, V>[] tab = table;
+ for (int i = 0; i < tab.length; i++) {
+ tab[i] = null;
}
- return oldValue;
- } finally {
+ ++modCount;
+ eviction.clear();
+ count = 0; // write-volatile
+ } finally {
unlock();
- }
- }
+ }
+ }
+ }
+ }
- void clear() {
- if (count != 0) {
- lock();
- try {
- HashEntry<K, V>[] tab = table;
- for (int i = 0; i < tab.length; i++) {
- tab[i] = null;
- }
- ++modCount;
- eviction.clear();
- count = 0; // write-volatile
- } finally {
- unlock();
- }
- }
- }
- }
+ /* ---------------- Public operations -------------- */
- /* ---------------- Public operations -------------- */
-
-
- /**
- * Creates a new, empty map with the specified maximum capacity, load factor and concurrency
- * level.
- *
- * @param capacity
- * is the upper bound capacity for the number of elements in this map
- *
- * @param concurrencyLevel
- * the estimated number of concurrently updating threads. The implementation performs
- * internal sizing to try to accommodate this many threads.
- *
- * @param evictionStrategy
- * the algorithm used to evict elements from this map
- *
- * @param evictionListener
- * the evicton listener callback to be notified about evicted elements
- *
- * @throws IllegalArgumentException
- * if the initial capacity is negative or the load factor or concurrencyLevel are
- * nonpositive.
- */
- public BoundedConcurrentHashMap(int capacity, int concurrencyLevel,
- Eviction evictionStrategy, EvictionListener<K, V> evictionListener) {
- if (capacity < 0 || concurrencyLevel <= 0) {
+ /**
+ * Creates a new, empty map with the specified maximum capacity, load factor and concurrency
+ * level.
+ *
+ * @param capacity
+ * is the upper bound capacity for the number of elements in this map
+ *
+ * @param concurrencyLevel
+ * the estimated number of concurrently updating threads. The implementation performs
+ * internal sizing to try to accommodate this many threads.
+ *
+ * @param evictionStrategy
+ * the algorithm used to evict elements from this map
+ *
+ * @param evictionListener
+ * the evicton listener callback to be notified about evicted elements
+ *
+ * @throws IllegalArgumentException
+ * if the initial capacity is negative or the load factor or concurrencyLevel are
+ * nonpositive.
+ */
+ public BoundedConcurrentHashMap(int capacity, int concurrencyLevel,
+ Eviction evictionStrategy, EvictionListener<K, V> evictionListener) {
+ if (capacity < 0 || concurrencyLevel <= 0) {
throw new IllegalArgumentException();
}
- concurrencyLevel = Math.min(capacity / 2, concurrencyLevel); // concurrencyLevel cannot be > capacity/2
- concurrencyLevel = Math.max(concurrencyLevel, 1); // concurrencyLevel cannot be less than 1
+ concurrencyLevel = Math.min(capacity / 2, concurrencyLevel); // concurrencyLevel cannot be > capacity/2
+ concurrencyLevel = Math.max(concurrencyLevel, 1); // concurrencyLevel cannot be less than 1
- // minimum two elements per segment
- if (capacity < concurrencyLevel * 2 && capacity != 1) {
+ // minimum two elements per segment
+ if (capacity < concurrencyLevel * 2 && capacity != 1) {
throw new IllegalArgumentException("Maximum capacity has to be at least twice the concurrencyLevel");
}
- if (evictionStrategy == null || evictionListener == null) {
+ if (evictionStrategy == null || evictionListener == null) {
throw new IllegalArgumentException();
}
- if (concurrencyLevel > MAX_SEGMENTS) {
+ if (concurrencyLevel > MAX_SEGMENTS) {
concurrencyLevel = MAX_SEGMENTS;
}
- // Find power-of-two sizes best matching arguments
- int sshift = 0;
- int ssize = 1;
- while (ssize < concurrencyLevel) {
- ++sshift;
- ssize <<= 1;
- }
- segmentShift = 32 - sshift;
- segmentMask = ssize - 1;
- this.segments = Segment.newArray(ssize);
+ // Find power-of-two sizes best matching arguments
+ int sshift = 0;
+ int ssize = 1;
+ while (ssize < concurrencyLevel) {
+ ++sshift;
+ ssize <<= 1;
+ }
+ segmentShift = 32 - sshift;
+ segmentMask = ssize - 1;
+ this.segments = Segment.newArray(ssize);
- if (capacity > MAXIMUM_CAPACITY) {
+ if (capacity > MAXIMUM_CAPACITY) {
capacity = MAXIMUM_CAPACITY;
}
- int c = capacity / ssize;
- if (c * ssize < capacity) {
+ int c = capacity / ssize;
+ if (c * ssize < capacity) {
++c;
}
- int cap = 1;
- while (cap < c) {
+ int cap = 1;
+ while (cap < c) {
cap <<= 1;
}
- for (int i = 0; i < this.segments.length; ++i) {
- this.segments[i] = new Segment<K, V>(cap, DEFAULT_LOAD_FACTOR, evictionStrategy,
- evictionListener);
+ for (int i = 0; i < this.segments.length; ++i) {
+ this.segments[i] = new Segment<K, V>(cap, DEFAULT_LOAD_FACTOR, evictionStrategy, evictionListener);
}
- }
+ }
- /**
- * Creates a new, empty map with the specified maximum capacity, load factor, concurrency
- * level and LRU eviction policy.
- *
- * @param capacity
- * is the upper bound capacity for the number of elements in this map
- *
- * @param concurrencyLevel
- * the estimated number of concurrently updating threads. The implementation performs
- * internal sizing to try to accommodate this many threads.
- *
- * @throws IllegalArgumentException
- * if the initial capacity is negative or the load factor or concurrencyLevel are
- * nonpositive.
- */
- public BoundedConcurrentHashMap(int capacity, int concurrencyLevel) {
- this(capacity, concurrencyLevel, Eviction.LRU);
- }
+ /**
+ * Creates a new, empty map with the specified maximum capacity, load factor, concurrency
+ * level and LRU eviction policy.
+ *
+ * @param capacity
+ * is the upper bound capacity for the number of elements in this map
+ *
+ * @param concurrencyLevel
+ * the estimated number of concurrently updating threads. The implementation performs
+ * internal sizing to try to accommodate this many threads.
+ *
+ * @throws IllegalArgumentException
+ * if the initial capacity is negative or the load factor or concurrencyLevel are
+ * nonpositive.
+ */
+ public BoundedConcurrentHashMap(int capacity, int concurrencyLevel) {
+ this(capacity, concurrencyLevel, Eviction.LRU);
+ }
- /**
- * Creates a new, empty map with the specified maximum capacity, load factor, concurrency
- * level and eviction strategy.
- *
- * @param capacity
- * is the upper bound capacity for the number of elements in this map
- *
- * @param concurrencyLevel
- * the estimated number of concurrently updating threads. The implementation performs
- * internal sizing to try to accommodate this many threads.
- *
- * @param evictionStrategy
- * the algorithm used to evict elements from this map
- *
- * @throws IllegalArgumentException
- * if the initial capacity is negative or the load factor or concurrencyLevel are
- * nonpositive.
- */
- public BoundedConcurrentHashMap(int capacity, int concurrencyLevel, Eviction evictionStrategy) {
- this(capacity, concurrencyLevel, evictionStrategy, new NullEvictionListener<K, V>());
- }
+ /**
+ * Creates a new, empty map with the specified maximum capacity, load factor, concurrency
+ * level and eviction strategy.
+ *
+ * @param capacity
+ * is the upper bound capacity for the number of elements in this map
+ *
+ * @param concurrencyLevel
+ * the estimated number of concurrently updating threads. The implementation performs
+ * internal sizing to try to accommodate this many threads.
+ *
+ * @param evictionStrategy
+ * the algorithm used to evict elements from this map
+ *
+ * @throws IllegalArgumentException
+ * if the initial capacity is negative or the load factor or concurrencyLevel are
+ * nonpositive.
+ */
+ public BoundedConcurrentHashMap(int capacity, int concurrencyLevel, Eviction evictionStrategy) {
+ this(capacity, concurrencyLevel, evictionStrategy, new NullEvictionListener<K, V>());
+ }
- /**
- * Creates a new, empty map with the specified maximum capacity, default concurrency
- * level and LRU eviction policy.
- *
- * @param capacity
- * is the upper bound capacity for the number of elements in this map
- *
- *
- * @throws IllegalArgumentException if the initial capacity of
- * elements is negative or the load factor is nonpositive
- *
- * @since 1.6
- */
- public BoundedConcurrentHashMap(int capacity) {
- this(capacity, DEFAULT_CONCURRENCY_LEVEL);
- }
+ /**
+ * Creates a new, empty map with the specified maximum capacity, default concurrency
+ * level and LRU eviction policy.
+ *
+ * @param capacity
+ * is the upper bound capacity for the number of elements in this map
+ *
+ *
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative or the load factor is nonpositive
+ *
+ * @since 1.6
+ */
+ public BoundedConcurrentHashMap(int capacity) {
+ this(capacity, DEFAULT_CONCURRENCY_LEVEL);
+ }
- /**
- * Creates a new, empty map with the default maximum capacity
- */
- public BoundedConcurrentHashMap() {
- this(DEFAULT_MAXIMUM_CAPACITY, DEFAULT_CONCURRENCY_LEVEL);
- }
+ /**
+ * Creates a new, empty map with the default maximum capacity
+ */
+ public BoundedConcurrentHashMap() {
+ this(DEFAULT_MAXIMUM_CAPACITY, DEFAULT_CONCURRENCY_LEVEL);
+ }
- /**
- * Returns <tt>true</tt> if this map contains no key-value mappings.
- *
- * @return <tt>true</tt> if this map contains no key-value mappings
- */
- @Override
+ /**
+ * Returns <tt>true</tt> if this map contains no key-value mappings.
+ *
+ * @return <tt>true</tt> if this map contains no key-value mappings
+ */
+ @Override
public boolean isEmpty() {
- final Segment<K,V>[] segments = this.segments;
- /*
- * We keep track of per-segment modCounts to avoid ABA
- * problems in which an element in one segment was added and
- * in another removed during traversal, in which case the
- * table was never actually empty at any point. Note the
- * similar use of modCounts in the size() and containsValue()
- * methods, which are the only other methods also susceptible
- * to ABA problems.
- */
- int[] mc = new int[segments.length];
- int mcsum = 0;
- for (int i = 0; i < segments.length; ++i) {
- if (segments[i].count != 0) {
+ final Segment<K,V>[] segments = this.segments;
+ /*
+ * We keep track of per-segment modCounts to avoid ABA
+ * problems in which an element in one segment was added and
+ * in another removed during traversal, in which case the
+ * table was never actually empty at any point. Note the
+ * similar use of modCounts in the size() and containsValue()
+ * methods, which are the only other methods also susceptible
+ * to ABA problems.
+ */
+ int[] mc = new int[segments.length];
+ int mcsum = 0;
+ for (int i = 0; i < segments.length; ++i) {
+ if (segments[i].count != 0) {
+ return false;
+ } else {
+ mcsum += mc[i] = segments[i].modCount;
+ }
+ }
+ // If mcsum happens to be zero, then we know we got a snapshot
+ // before any modifications at all were made. This is
+ // probably common enough to bother tracking.
+ if (mcsum != 0) {
+ for (int i = 0; i < segments.length; ++i) {
+ if (segments[i].count != 0 || mc[i] != segments[i].modCount) {
return false;
- } else {
- mcsum += mc[i] = segments[i].modCount;
}
- }
- // If mcsum happens to be zero, then we know we got a snapshot
- // before any modifications at all were made. This is
- // probably common enough to bother tracking.
- if (mcsum != 0) {
- for (int i = 0; i < segments.length; ++i) {
- if (segments[i].count != 0 ||
- mc[i] != segments[i].modCount) {
- return false;
- }
- }
- }
- return true;
- }
+ }
+ }
+ return true;
+ }
- /**
- * Returns the number of key-value mappings in this map. If the
- * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
- * <tt>Integer.MAX_VALUE</tt>.
- *
- * @return the number of key-value mappings in this map
- */
- @Override
+ /**
+ * Returns the number of key-value mappings in this map. If the
+ * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
+ * <tt>Integer.MAX_VALUE</tt>.
+ *
+ * @return the number of key-value mappings in this map
+ */
+ @Override
public int size() {
- final Segment<K,V>[] segments = this.segments;
- long sum = 0;
- long check = 0;
- int[] mc = new int[segments.length];
- // Try a few times to get accurate count. On failure due to
- // continuous async changes in table, resort to locking.
- for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
- check = 0;
- sum = 0;
- int mcsum = 0;
- for (int i = 0; i < segments.length; ++i) {
- sum += segments[i].count;
- mcsum += mc[i] = segments[i].modCount;
+ final Segment<K,V>[] segments = this.segments;
+ long sum = 0;
+ long check = 0;
+ int[] mc = new int[segments.length];
+ // Try a few times to get accurate count. On failure due to
+ // continuous async changes in table, resort to locking.
+ for (int k = 0; k < RETRIES_BEFORE_LOCK; ++ k) {
+ check = 0;
+ sum = 0;
+ int mcsum = 0;
+ for (int i = 0; i < segments.length; ++ i) {
+ sum += segments[i].count;
+ mcsum += mc[i] = segments[i].modCount;
+ }
+ if (mcsum != 0) {
+ for (int i = 0; i < segments.length; ++ i) {
+ check += segments[i].count;
+ if (mc[i] != segments[i].modCount) {
+ check = -1; // force retry
+ break;
+ }
}
- if (mcsum != 0) {
- for (int i = 0; i < segments.length; ++i) {
- check += segments[i].count;
- if (mc[i] != segments[i].modCount) {
- check = -1; // force retry
- break;
- }
- }
- }
- if (check == sum) {
- break;
- }
- }
- if (check != sum) { // Resort to locking all segments
- sum = 0;
- for (int i = 0; i < segments.length; ++i) {
- segments[i].lock();
- }
- for (int i = 0; i < segments.length; ++i) {
- sum += segments[i].count;
- }
- for (int i = 0; i < segments.length; ++i) {
- segments[i].unlock();
- }
- }
- if (sum > Integer.MAX_VALUE) {
+ }
+ if (check == sum) {
+ break;
+ }
+ }
+ if (check != sum) { // Resort to locking all segments
+ sum = 0;
+ for (int i = 0; i < segments.length; ++ i) {
+ segments[i].lock();
+ }
+ for (int i = 0; i < segments.length; ++ i) {
+ sum += segments[i].count;
+ }
+ for (int i = 0; i < segments.length; ++ i) {
+ segments[i].unlock();
+ }
+ }
+ if (sum > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
} else {
- return (int)sum;
+ return (int) sum;
}
- }
+ }
- /**
- * Returns the value to which the specified key is mapped,
- * or {@code null} if this map contains no mapping for the key.
- *
- * <p>More formally, if this map contains a mapping from a key
- * {@code k} to a value {@code v} such that {@code key.equals(k)},
- * then this method returns {@code v}; otherwise it returns
- * {@code null}. (There can be at most one such mapping.)
- *
- * @throws NullPointerException if the specified key is null
- */
- @Override
+ /**
+ * Returns the value to which the specified key is mapped,
+ * or {@code null} if this map contains no mapping for the key.
+ *
+ * <p>More formally, if this map contains a mapping from a key
+ * {@code k} to a value {@code v} such that {@code key.equals(k)},
+ * then this method returns {@code v}; otherwise it returns
+ * {@code null}. (There can be at most one such mapping.)
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
public V get(Object key) {
- int hash = hash(key.hashCode());
- return segmentFor(hash).get(key, hash);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).get(key, hash);
+ }
- /**
- * Tests if the specified object is a key in this table.
- *
- * @param key possible key
- * @return <tt>true</tt> if and only if the specified object
- * is a key in this table, as determined by the
- * <tt>equals</tt> method; <tt>false</tt> otherwise.
- * @throws NullPointerException if the specified key is null
- */
- @Override
+ /**
+ * Tests if the specified object is a key in this table.
+ *
+ * @param key possible key
+ * @return <tt>true</tt> if and only if the specified object
+ * is a key in this table, as determined by the
+ * <tt>equals</tt> method; <tt>false</tt> otherwise.
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
public boolean containsKey(Object key) {
- int hash = hash(key.hashCode());
- return segmentFor(hash).containsKey(key, hash);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).containsKey(key, hash);
+ }
- /**
- * Returns <tt>true</tt> if this map maps one or more keys to the
- * specified value. Note: This method requires a full internal
- * traversal of the hash table, and so is much slower than
- * method <tt>containsKey</tt>.
- *
- * @param value value whose presence in this map is to be tested
- * @return <tt>true</tt> if this map maps one or more keys to the
- * specified value
- * @throws NullPointerException if the specified value is null
- */
- @Override
+ /**
+ * Returns <tt>true</tt> if this map maps one or more keys to the
+ * specified value. Note: This method requires a full internal
+ * traversal of the hash table, and so is much slower than
+ * method <tt>containsKey</tt>.
+ *
+ * @param value value whose presence in this map is to be tested
+ * @return <tt>true</tt> if this map maps one or more keys to the
+ * specified value
+ * @throws NullPointerException if the specified value is null
+ */
+ @Override
public boolean containsValue(Object value) {
- if (value == null) {
+ if (value == null) {
throw new NullPointerException();
}
- // See explanation of modCount use above
+ // See explanation of modCount use above
- final Segment<K,V>[] segments = this.segments;
- int[] mc = new int[segments.length];
+ final Segment<K, V>[] segments = this.segments;
+ int[] mc = new int[segments.length];
- // Try a few times without locking
- for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
- int sum = 0;
- int mcsum = 0;
- for (int i = 0; i < segments.length; ++i) {
- int c = segments[i].count;
- mcsum += mc[i] = segments[i].modCount;
- if (segments[i].containsValue(value)) {
- return true;
+ // Try a few times without locking
+ for (int k = 0; k < RETRIES_BEFORE_LOCK; ++ k) {
+ int sum = 0;
+ int mcsum = 0;
+ for (int i = 0; i < segments.length; ++ i) {
+ int c = segments[i].count;
+ mcsum += mc[i] = segments[i].modCount;
+ if (segments[i].containsValue(value)) {
+ return true;
+ }
+ }
+ boolean cleanSweep = true;
+ if (mcsum != 0) {
+ for (int i = 0; i < segments.length; ++ i) {
+ int c = segments[i].count;
+ if (mc[i] != segments[i].modCount) {
+ cleanSweep = false;
+ break;
}
}
- boolean cleanSweep = true;
- if (mcsum != 0) {
- for (int i = 0; i < segments.length; ++i) {
- int c = segments[i].count;
- if (mc[i] != segments[i].modCount) {
- cleanSweep = false;
- break;
- }
- }
- }
- if (cleanSweep) {
- return false;
- }
- }
- // Resort to locking all segments
- for (int i = 0; i < segments.length; ++i) {
+ }
+ if (cleanSweep) {
+ return false;
+ }
+ }
+ // Resort to locking all segments
+ for (int i = 0; i < segments.length; ++ i) {
segments[i].lock();
}
- boolean found = false;
- try {
- for (int i = 0; i < segments.length; ++i) {
- if (segments[i].containsValue(value)) {
- found = true;
- break;
- }
+ boolean found = false;
+ try {
+ for (int i = 0; i < segments.length; ++ i) {
+ if (segments[i].containsValue(value)) {
+ found = true;
+ break;
}
- } finally {
- for (int i = 0; i < segments.length; ++i) {
- segments[i].unlock();
- }
- }
- return found;
- }
+ }
+ } finally {
+ for (int i = 0; i < segments.length; ++ i) {
+ segments[i].unlock();
+ }
+ }
+ return found;
+ }
- /**
- * Legacy method testing if some key maps into the specified value
- * in this table. This method is identical in functionality to
- * {@link #containsValue}, and exists solely to ensure
- * full compatibility with class {@link java.util.Hashtable},
- * which supported this method prior to introduction of the
- * Java Collections framework.
+ /**
+ * Legacy method testing if some key maps into the specified value
+ * in this table. This method is identical in functionality to
+ * {@link #containsValue}, and exists solely to ensure
+ * full compatibility with class {@link java.util.Hashtable},
+ * which supported this method prior to introduction of the
+ * Java Collections framework.
- * @param value a value to search for
- * @return <tt>true</tt> if and only if some key maps to the
- * <tt>value</tt> argument in this table as
- * determined by the <tt>equals</tt> method;
- * <tt>false</tt> otherwise
- * @throws NullPointerException if the specified value is null
- */
- public boolean contains(Object value) {
- return containsValue(value);
- }
+ * @param value a value to search for
+ * @return <tt>true</tt> if and only if some key maps to the
+ * <tt>value</tt> argument in this table as
+ * determined by the <tt>equals</tt> method;
+ * <tt>false</tt> otherwise
+ * @throws NullPointerException if the specified value is null
+ */
+ public boolean contains(Object value) {
+ return containsValue(value);
+ }
- /**
- * Maps the specified key to the specified value in this table.
- * Neither the key nor the value can be null.
- *
- * <p> The value can be retrieved by calling the <tt>get</tt> method
- * with a key that is equal to the original key.
- *
- * @param key key with which the specified value is to be associated
- * @param value value to be associated with the specified key
- * @return the previous value associated with <tt>key</tt>, or
- * <tt>null</tt> if there was no mapping for <tt>key</tt>
- * @throws NullPointerException if the specified key or value is null
- */
- @Override
+ /**
+ * Maps the specified key to the specified value in this table.
+ * Neither the key nor the value can be null.
+ *
+ * <p> The value can be retrieved by calling the <tt>get</tt> method
+ * with a key that is equal to the original key.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value value to be associated with the specified key
+ * @return the previous value associated with <tt>key</tt>, or
+ * <tt>null</tt> if there was no mapping for <tt>key</tt>
+ * @throws NullPointerException if the specified key or value is null
+ */
+ @Override
public V put(K key, V value) {
- if (value == null) {
+ if (value == null) {
throw new NullPointerException();
}
- int hash = hash(key.hashCode());
- return segmentFor(hash).put(key, hash, value, false);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).put(key, hash, value, false);
+ }
- /**
- * {@inheritDoc}
- *
- * @return the previous value associated with the specified key,
- * or <tt>null</tt> if there was no mapping for the key
- * @throws NullPointerException if the specified key or value is null
- */
- public V putIfAbsent(K key, V value) {
- if (value == null) {
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key,
+ * or <tt>null</tt> if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ @Override
+ public V putIfAbsent(K key, V value) {
+ if (value == null) {
throw new NullPointerException();
}
- int hash = hash(key.hashCode());
- return segmentFor(hash).put(key, hash, value, true);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).put(key, hash, value, true);
+ }
- /**
- * Copies all of the mappings from the specified map to this one.
- * These mappings replace any mappings that this map had for any of the
- * keys currently in the specified map.
- *
- * @param m mappings to be stored in this map
- */
- @Override
+ /**
+ * Copies all of the mappings from the specified map to this one.
+ * These mappings replace any mappings that this map had for any of the
+ * keys currently in the specified map.
+ *
+ * @param m mappings to be stored in this map
+ */
+ @Override
public void putAll(Map<? extends K, ? extends V> m) {
- for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
+ for (Map.Entry<? extends K, ? extends V> e: m.entrySet()) {
put(e.getKey(), e.getValue());
}
- }
+ }
- /**
- * Removes the key (and its corresponding value) from this map.
- * This method does nothing if the key is not in the map.
- *
- * @param key the key that needs to be removed
- * @return the previous value associated with <tt>key</tt>, or
- * <tt>null</tt> if there was no mapping for <tt>key</tt>
- * @throws NullPointerException if the specified key is null
- */
- @Override
+ /**
+ * Removes the key (and its corresponding value) from this map.
+ * This method does nothing if the key is not in the map.
+ *
+ * @param key the key that needs to be removed
+ * @return the previous value associated with <tt>key</tt>, or
+ * <tt>null</tt> if there was no mapping for <tt>key</tt>
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
public V remove(Object key) {
- int hash = hash(key.hashCode());
- return segmentFor(hash).remove(key, hash, null);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).remove(key, hash, null);
+ }
- /**
- * {@inheritDoc}
- *
- * @throws NullPointerException if the specified key is null
- */
- public boolean remove(Object key, Object value) {
- int hash = hash(key.hashCode());
- if (value == null) {
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
+ public boolean remove(Object key, Object value) {
+ int hash = hash(key.hashCode());
+ if (value == null) {
return false;
}
- return segmentFor(hash).remove(key, hash, value) != null;
- }
+ return segmentFor(hash).remove(key, hash, value) != null;
+ }
- /**
- * {@inheritDoc}
- *
- * @throws NullPointerException if any of the arguments are null
- */
- public boolean replace(K key, V oldValue, V newValue) {
- if (oldValue == null || newValue == null) {
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if any of the arguments are null
+ */
+ @Override
+ public boolean replace(K key, V oldValue, V newValue) {
+ if (oldValue == null || newValue == null) {
throw new NullPointerException();
}
- int hash = hash(key.hashCode());
- return segmentFor(hash).replace(key, hash, oldValue, newValue);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).replace(key, hash, oldValue, newValue);
+ }
- /**
- * {@inheritDoc}
- *
- * @return the previous value associated with the specified key,
- * or <tt>null</tt> if there was no mapping for the key
- * @throws NullPointerException if the specified key or value is null
- */
- public V replace(K key, V value) {
- if (value == null) {
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key,
+ * or <tt>null</tt> if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ @Override
+ public V replace(K key, V value) {
+ if (value == null) {
throw new NullPointerException();
}
- int hash = hash(key.hashCode());
- return segmentFor(hash).replace(key, hash, value);
- }
+ int hash = hash(key.hashCode());
+ return segmentFor(hash).replace(key, hash, value);
+ }
- /**
- * Removes all of the mappings from this map.
- */
- @Override
+ /**
+ * Removes all of the mappings from this map.
+ */
+ @Override
public void clear() {
- for (int i = 0; i < segments.length; ++i) {
+ for (int i = 0; i < segments.length; ++ i) {
segments[i].clear();
}
- }
+ }
- /**
- * Returns a {@link Set} view of the keys contained in this map.
- * The set is backed by the map, so changes to the map are
- * reflected in the set, and vice-versa. The set supports element
- * removal, which removes the corresponding mapping from this map,
- * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
- * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
- * operations. It does not support the <tt>add</tt> or
- * <tt>addAll</tt> operations.
- *
- * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
- * that will never throw {@link ConcurrentModificationException},
- * and guarantees to traverse elements as they existed upon
- * construction of the iterator, and may (but is not guaranteed to)
- * reflect any modifications subsequent to construction.
- */
- @Override
+ /**
+ * Returns a {@link Set} view of the keys contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. The set supports element
+ * removal, which removes the corresponding mapping from this map,
+ * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
+ * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
+ * operations. It does not support the <tt>add</tt> or
+ * <tt>addAll</tt> operations.
+ *
+ * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ */
+ @Override
public Set<K> keySet() {
- Set<K> ks = keySet;
- return ks != null ? ks : (keySet = new KeySet());
- }
+ Set<K> ks = keySet;
+ return ks != null? ks : (keySet = new KeySet());
+ }
- /**
- * Returns a {@link Collection} view of the values contained in this map.
- * The collection is backed by the map, so changes to the map are
- * reflected in the collection, and vice-versa. The collection
- * supports element removal, which removes the corresponding
- * mapping from this map, via the <tt>Iterator.remove</tt>,
- * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
- * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not
- * support the <tt>add</tt> or <tt>addAll</tt> operations.
- *
- * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
- * that will never throw {@link ConcurrentModificationException},
- * and guarantees to traverse elements as they existed upon
- * construction of the iterator, and may (but is not guaranteed to)
- * reflect any modifications subsequent to construction.
- */
- @Override
+ /**
+ * Returns a {@link Collection} view of the values contained in this map.
+ * The collection is backed by the map, so changes to the map are
+ * reflected in the collection, and vice-versa. The collection
+ * supports element removal, which removes the corresponding
+ * mapping from this map, via the <tt>Iterator.remove</tt>,
+ * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
+ * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not
+ * support the <tt>add</tt> or <tt>addAll</tt> operations.
+ *
+ * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ */
+ @Override
public Collection<V> values() {
- Collection<V> vs = values;
- return vs != null ? vs : (values = new Values());
- }
+ Collection<V> vs = values;
+ return vs != null? vs : (values = new Values());
+ }
- /**
- * Returns a {@link Set} view of the mappings contained in this map.
- * The set is backed by the map, so changes to the map are
- * reflected in the set, and vice-versa. The set supports element
- * removal, which removes the corresponding mapping from the map,
- * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
- * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
- * operations. It does not support the <tt>add</tt> or
- * <tt>addAll</tt> operations.
- *
- * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
- * that will never throw {@link ConcurrentModificationException},
- * and guarantees to traverse elements as they existed upon
- * construction of the iterator, and may (but is not guaranteed to)
- * reflect any modifications subsequent to construction.
- */
- @Override
- public Set<Map.Entry<K,V>> entrySet() {
- Set<Map.Entry<K,V>> es = entrySet;
- return es != null ? es : (entrySet = new EntrySet());
- }
+ /**
+ * Returns a {@link Set} view of the mappings contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. The set supports element
+ * removal, which removes the corresponding mapping from the map,
+ * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
+ * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
+ * operations. It does not support the <tt>add</tt> or
+ * <tt>addAll</tt> operations.
+ *
+ * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ */
+ @Override
+ public Set<Map.Entry<K, V>> entrySet() {
+ Set<Map.Entry<K, V>> es = entrySet;
+ return es != null? es : (entrySet = new EntrySet());
+ }
- /**
- * Returns an enumeration of the keys in this table.
- *
- * @return an enumeration of the keys in this table
- * @see #keySet()
- */
- public Enumeration<K> keys() {
- return new KeyIterator();
- }
+ /**
+ * Returns an enumeration of the keys in this table.
+ *
+ * @return an enumeration of the keys in this table
+ * @see #keySet()
+ */
+ public Enumeration<K> keys() {
+ return new KeyIterator();
+ }
- /**
- * Returns an enumeration of the values in this table.
- *
- * @return an enumeration of the values in this table
- * @see #values()
- */
- public Enumeration<V> elements() {
- return new ValueIterator();
- }
+ /**
+ * Returns an enumeration of the values in this table.
+ *
+ * @return an enumeration of the values in this table
+ * @see #values()
+ */
+ public Enumeration<V> elements() {
+ return new ValueIterator();
+ }
- /* ---------------- Iterator Support -------------- */
+ /* ---------------- Iterator Support -------------- */
- abstract class HashIterator {
- int nextSegmentIndex;
- int nextTableIndex;
- HashEntry<K,V>[] currentTable;
- HashEntry<K, V> nextEntry;
- HashEntry<K, V> lastReturned;
+ abstract class HashIterator {
+ int nextSegmentIndex;
- HashIterator() {
- nextSegmentIndex = segments.length - 1;
- nextTableIndex = -1;
- advance();
- }
+ int nextTableIndex;
- public boolean hasMoreElements() { return hasNext(); }
+ HashEntry<K, V>[] currentTable;
- final void advance() {
- if (nextEntry != null && (nextEntry = nextEntry.next) != null) {
+ HashEntry<K, V> nextEntry;
+
+ HashEntry<K, V> lastReturned;
+
+ HashIterator() {
+ nextSegmentIndex = segments.length - 1;
+ nextTableIndex = -1;
+ advance();
+ }
+
+ public boolean hasMoreElements() {
+ return hasNext();
+ }
+
+ final void advance() {
+ if (nextEntry != null && (nextEntry = nextEntry.next) != null) {
+ return;
+ }
+
+ while (nextTableIndex >= 0) {
+ if ((nextEntry = currentTable[nextTableIndex --]) != null) {
return;
}
+ }
- while (nextTableIndex >= 0) {
- if ( (nextEntry = currentTable[nextTableIndex--]) != null) {
- return;
+ while (nextSegmentIndex >= 0) {
+ Segment<K, V> seg = segments[nextSegmentIndex --];
+ if (seg.count != 0) {
+ currentTable = seg.table;
+ for (int j = currentTable.length - 1; j >= 0; -- j) {
+ if ((nextEntry = currentTable[j]) != null) {
+ nextTableIndex = j - 1;
+ return;
+ }
}
}
+ }
+ }
- while (nextSegmentIndex >= 0) {
- Segment<K,V> seg = segments[nextSegmentIndex--];
- if (seg.count != 0) {
- currentTable = seg.table;
- for (int j = currentTable.length - 1; j >= 0; --j) {
- if ( (nextEntry = currentTable[j]) != null) {
- nextTableIndex = j - 1;
- return;
- }
- }
- }
- }
- }
+ public boolean hasNext() {
+ return nextEntry != null;
+ }
- public boolean hasNext() { return nextEntry != null; }
+ HashEntry<K, V> nextEntry() {
+ if (nextEntry == null) {
+ throw new NoSuchElementException();
+ }
+ lastReturned = nextEntry;
+ advance();
+ return lastReturned;
+ }
- HashEntry<K,V> nextEntry() {
- if (nextEntry == null) {
- throw new NoSuchElementException();
- }
- lastReturned = nextEntry;
- advance();
- return lastReturned;
- }
+ public void remove() {
+ if (lastReturned == null) {
+ throw new IllegalStateException();
+ }
+ BoundedConcurrentHashMap.this.remove(lastReturned.key);
+ lastReturned = null;
+ }
+ }
- public void remove() {
- if (lastReturned == null) {
- throw new IllegalStateException();
- }
- BoundedConcurrentHashMap.this.remove(lastReturned.key);
- lastReturned = null;
- }
- }
+ final class KeyIterator extends HashIterator implements Iterator<K>, Enumeration<K> {
+ @Override
+ public K next() {
+ return super.nextEntry().key;
+ }
- final class KeyIterator
- extends HashIterator
- implements Iterator<K>, Enumeration<K>
- {
- public K next() { return super.nextEntry().key; }
- public K nextElement() { return super.nextEntry().key; }
- }
+ @Override
+ public K nextElement() {
+ return super.nextEntry().key;
+ }
+ }
- final class ValueIterator
- extends HashIterator
- implements Iterator<V>, Enumeration<V>
- {
- public V next() { return super.nextEntry().value; }
- public V nextElement() { return super.nextEntry().value; }
- }
+ final class ValueIterator extends HashIterator implements Iterator<V>, Enumeration<V> {
+ @Override
+ public V next() {
+ return super.nextEntry().value;
+ }
- /**
- * Custom Entry class used by EntryIterator.next(), that relays
- * setValue changes to the underlying map.
- */
- final class WriteThroughEntry
- extends AbstractMap.SimpleEntry<K,V>
- {
- WriteThroughEntry(K k, V v) {
- super(k,v);
- }
+ @Override
+ public V nextElement() {
+ return super.nextEntry().value;
+ }
+ }
- /**
- * Set our entry's value and write through to the map. The
- * value to return is somewhat arbitrary here. Since a
- * WriteThroughEntry does not necessarily track asynchronous
- * changes, the most recent "previous" value could be
- * different from what we return (or could even have been
- * removed in which case the put will re-establish). We do not
- * and cannot guarantee more.
- */
- @Override
+ /**
+ * Custom Entry class used by EntryIterator.next(), that relays
+ * setValue changes to the underlying map.
+ */
+ final class WriteThroughEntry extends AbstractMap.SimpleEntry<K, V> {
+
+ private static final long serialVersionUID = -7041346694785573824L;
+
+ WriteThroughEntry(K k, V v) {
+ super(k, v);
+ }
+
+ /**
+ * Set our entry's value and write through to the map. The
+ * value to return is somewhat arbitrary here. Since a
+ * WriteThroughEntry does not necessarily track asynchronous
+ * changes, the most recent "previous" value could be
+ * different from what we return (or could even have been
+ * removed in which case the put will re-establish). We do not
+ * and cannot guarantee more.
+ */
+ @Override
public V setValue(V value) {
- if (value == null) {
- throw new NullPointerException();
- }
- V v = super.setValue(value);
- BoundedConcurrentHashMap.this.put(getKey(), value);
- return v;
- }
- }
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ V v = super.setValue(value);
+ BoundedConcurrentHashMap.this.put(getKey(), value);
+ return v;
+ }
+ }
- final class EntryIterator
- extends HashIterator
- implements Iterator<Entry<K,V>>
- {
- public Map.Entry<K,V> next() {
- HashEntry<K,V> e = super.nextEntry();
- return new WriteThroughEntry(e.key, e.value);
- }
- }
+ final class EntryIterator extends HashIterator implements Iterator<Entry<K, V>> {
+ @Override
+ public Map.Entry<K, V> next() {
+ HashEntry<K, V> e = super.nextEntry();
+ return new WriteThroughEntry(e.key, e.value);
+ }
+ }
- final class KeySet extends AbstractSet<K> {
- @Override
+ final class KeySet extends AbstractSet<K> {
+ @Override
public Iterator<K> iterator() {
- return new KeyIterator();
- }
- @Override
+ return new KeyIterator();
+ }
+
+ @Override
public int size() {
- return BoundedConcurrentHashMap.this.size();
- }
- @Override
+ return BoundedConcurrentHashMap.this.size();
+ }
+
+ @Override
public boolean isEmpty() {
- return BoundedConcurrentHashMap.this.isEmpty();
- }
- @Override
+ return BoundedConcurrentHashMap.this.isEmpty();
+ }
+
+ @Override
public boolean contains(Object o) {
- return BoundedConcurrentHashMap.this.containsKey(o);
- }
- @Override
+ return BoundedConcurrentHashMap.this.containsKey(o);
+ }
+
+ @Override
public boolean remove(Object o) {
- return BoundedConcurrentHashMap.this.remove(o) != null;
- }
- @Override
+ return BoundedConcurrentHashMap.this.remove(o) != null;
+ }
+
+ @Override
public void clear() {
- BoundedConcurrentHashMap.this.clear();
- }
- }
+ BoundedConcurrentHashMap.this.clear();
+ }
+ }
- final class Values extends AbstractCollection<V> {
- @Override
+ final class Values extends AbstractCollection<V> {
+ @Override
public Iterator<V> iterator() {
- return new ValueIterator();
- }
- @Override
+ return new ValueIterator();
+ }
+
+ @Override
public int size() {
- return BoundedConcurrentHashMap.this.size();
- }
- @Override
+ return BoundedConcurrentHashMap.this.size();
+ }
+
+ @Override
public boolean isEmpty() {
- return BoundedConcurrentHashMap.this.isEmpty();
- }
- @Override
+ return BoundedConcurrentHashMap.this.isEmpty();
+ }
+
+ @Override
public boolean contains(Object o) {
- return BoundedConcurrentHashMap.this.containsValue(o);
- }
- @Override
+ return BoundedConcurrentHashMap.this.containsValue(o);
+ }
+
+ @Override
public void clear() {
- BoundedConcurrentHashMap.this.clear();
- }
- }
+ BoundedConcurrentHashMap.this.clear();
+ }
+ }
- final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
- @Override
- public Iterator<Map.Entry<K,V>> iterator() {
- return new EntryIterator();
- }
- @Override
+ final class EntrySet extends AbstractSet<Map.Entry<K, V>> {
+ @Override
+ public Iterator<Map.Entry<K, V>> iterator() {
+ return new EntryIterator();
+ }
+
+ @Override
public boolean contains(Object o) {
- if (!(o instanceof Map.Entry)) {
- return false;
- }
- Map.Entry<?,?> e = (Map.Entry<?,?>)o;
- V v = BoundedConcurrentHashMap.this.get(e.getKey());
- return v != null && v.equals(e.getValue());
- }
- @Override
+ if (!(o instanceof Map.Entry)) {
+ return false;
+ }
+ Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
+ V v = BoundedConcurrentHashMap.this.get(e.getKey());
+ return v != null && v.equals(e.getValue());
+ }
+
+ @Override
public boolean remove(Object o) {
- if (!(o instanceof Map.Entry)) {
- return false;
- }
- Map.Entry<?,?> e = (Map.Entry<?,?>)o;
- return BoundedConcurrentHashMap.this.remove(e.getKey(), e.getValue());
- }
- @Override
+ if (!(o instanceof Map.Entry)) {
+ return false;
+ }
+ Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
+ return BoundedConcurrentHashMap.this.remove(e.getKey(), e.getValue());
+ }
+
+ @Override
public int size() {
- return BoundedConcurrentHashMap.this.size();
- }
- @Override
+ return BoundedConcurrentHashMap.this.size();
+ }
+
+ @Override
public boolean isEmpty() {
- return BoundedConcurrentHashMap.this.isEmpty();
- }
- @Override
+ return BoundedConcurrentHashMap.this.isEmpty();
+ }
+
+ @Override
public void clear() {
- BoundedConcurrentHashMap.this.clear();
- }
- }
+ BoundedConcurrentHashMap.this.clear();
+ }
+ }
- /* ---------------- Serialization Support -------------- */
+ /* ---------------- Serialization Support -------------- */
- /**
- * Save the state of the <tt>ConcurrentHashMap</tt> instance to a
- * stream (i.e., serialize it).
- * @param s the stream
- * @serialData
- * the key (Object) and value (Object)
- * for each key-value mapping, followed by a null pair.
- * The key-value mappings are emitted in no particular order.
- */
- private void writeObject(java.io.ObjectOutputStream s) throws IOException {
- s.defaultWriteObject();
+ /**
+ * Save the state of the <tt>ConcurrentHashMap</tt> instance to a
+ * stream (i.e., serialize it).
+ * @param s the stream
+ * @serialData
+ * the key (Object) and value (Object)
+ * for each key-value mapping, followed by a null pair.
+ * The key-value mappings are emitted in no particular order.
+ */
+ private void writeObject(java.io.ObjectOutputStream s) throws IOException {
+ s.defaultWriteObject();
- for (int k = 0; k < segments.length; ++k) {
- Segment<K,V> seg = segments[k];
- seg.lock();
- try {
- HashEntry<K,V>[] tab = seg.table;
- for (int i = 0; i < tab.length; ++i) {
- for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
- s.writeObject(e.key);
- s.writeObject(e.value);
- }
- }
- } finally {
- seg.unlock();
+ for (int k = 0; k < segments.length; ++ k) {
+ Segment<K, V> seg = segments[k];
+ seg.lock();
+ try {
+ HashEntry<K, V>[] tab = seg.table;
+ for (int i = 0; i < tab.length; ++ i) {
+ for (HashEntry<K, V> e = tab[i]; e != null; e = e.next) {
+ s.writeObject(e.key);
+ s.writeObject(e.value);
+ }
}
- }
- s.writeObject(null);
- s.writeObject(null);
- }
+ } finally {
+ seg.unlock();
+ }
+ }
+ s.writeObject(null);
+ s.writeObject(null);
+ }
- /**
- * Reconstitute the <tt>ConcurrentHashMap</tt> instance from a
- * stream (i.e., deserialize it).
- * @param s the stream
- */
- private void readObject(java.io.ObjectInputStream s)
- throws IOException, ClassNotFoundException {
- s.defaultReadObject();
+ /**
+ * Reconstitute the <tt>ConcurrentHashMap</tt> instance from a
+ * stream (i.e., deserialize it).
+ * @param s the stream
+ */
+ @SuppressWarnings("unchecked")
+ private void readObject(java.io.ObjectInputStream s) throws IOException,
+ ClassNotFoundException {
+ s.defaultReadObject();
- // Initialize each segment to be minimally sized, and let grow.
- for (int i = 0; i < segments.length; ++i) {
- segments[i].setTable(new HashEntry[1]);
- }
+ // Initialize each segment to be minimally sized, and let grow.
+ for (int i = 0; i < segments.length; ++ i) {
+ segments[i].setTable(new HashEntry[1]);
+ }
- // Read the keys and values, and put the mappings in the table
- for (;;) {
- K key = (K) s.readObject();
- V value = (V) s.readObject();
- if (key == null) {
- break;
- }
- put(key, value);
- }
- }
+ // Read the keys and values, and put the mappings in the table
+ for (;;) {
+ K key = (K) s.readObject();
+ V value = (V) s.readObject();
+ if (key == null) {
+ break;
+ }
+ put(key, value);
+ }
+ }
}
\ No newline at end of file
More information about the infinispan-commits
mailing list