[infinispan-commits] Infinispan SVN: r1834 - in trunk/core/src: main/java/org/infinispan/container/entries and 2 other directories.
infinispan-commits at lists.jboss.org
infinispan-commits at lists.jboss.org
Mon May 24 11:21:39 EDT 2010
Author: manik.surtani at jboss.com
Date: 2010-05-24 11:21:38 -0400 (Mon, 24 May 2010)
New Revision: 1834
Removed:
trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java
trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java
trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java
trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java
trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java
trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java
Modified:
trunk/core/src/main/java/org/infinispan/container/DefaultDataContainer.java
trunk/core/src/main/java/org/infinispan/container/entries/InternalEntryFactory.java
trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java
Log:
Simplified InternalEntryFactory and removed deprecated classes
Modified: trunk/core/src/main/java/org/infinispan/container/DefaultDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/DefaultDataContainer.java 2010-05-24 11:46:22 UTC (rev 1833)
+++ trunk/core/src/main/java/org/infinispan/container/DefaultDataContainer.java 2010-05-24 15:21:38 UTC (rev 1834)
@@ -51,18 +51,13 @@
protected DefaultDataContainer(int concurrencyLevel) {
- this(concurrencyLevel, false, false);
- }
-
- protected DefaultDataContainer(int concurrencyLevel, boolean recordCreation, boolean recordLastUsed) {
immortalEntries = new ConcurrentHashMap<Object, InternalCacheEntry>(128, 0.75f, concurrencyLevel);
mortalEntries = new ConcurrentHashMap<Object, InternalCacheEntry>(64, 0.75f, concurrencyLevel);
- entryFactory = new InternalEntryFactory(recordCreation, recordLastUsed);
+ entryFactory = new InternalEntryFactory();
evictionListener = null;
}
- protected DefaultDataContainer(int concurrencyLevel, int maxEntries, EvictionStrategy strategy, EvictionThreadPolicy policy,
- boolean recordCreation, boolean recordLastUsed) {
+ protected DefaultDataContainer(int concurrencyLevel, int maxEntries, EvictionStrategy strategy, EvictionThreadPolicy policy) {
// translate eviction policy and strategy
switch (policy) {
@@ -91,7 +86,7 @@
}
immortalEntries = new BoundedConcurrentHashMap<Object, InternalCacheEntry>(maxEntries, concurrencyLevel, eviction, evictionListener);
mortalEntries = new ConcurrentHashMap<Object, InternalCacheEntry>(64, 0.75f, concurrencyLevel);
- entryFactory = new InternalEntryFactory(recordCreation, recordLastUsed);
+ entryFactory = new InternalEntryFactory();
}
@Inject
@@ -100,7 +95,7 @@
}
public static DataContainer boundedDataContainer(int concurrencyLevel, int maxEntries, EvictionStrategy strategy, EvictionThreadPolicy policy) {
- return new DefaultDataContainer(concurrencyLevel, maxEntries, strategy,policy, false, false) {
+ return new DefaultDataContainer(concurrencyLevel, maxEntries, strategy,policy) {
@Override
public int size() {
Deleted: trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java 2010-05-24 11:46:22 UTC (rev 1833)
+++ trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java 2010-05-24 15:21:38 UTC (rev 1834)
@@ -1,870 +0,0 @@
-package org.infinispan.container;
-
-import net.jcip.annotations.ThreadSafe;
-import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.container.entries.InternalEntryFactory;
-import org.infinispan.util.Immutables;
-
-import java.util.AbstractCollection;
-import java.util.AbstractSet;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import java.util.concurrent.locks.LockSupport;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * A container that maintains order of entries based on when they were placed in the container. Iterators obtained from
- * this container maintain this order.
- * <p/>
- * This container offers constant-time operation for all public API methods.
- * <p/>
- * This is implemented using a set of lockable segments, each of which is a hash table, not unlike the JDK's {@link
- * java.util.concurrent.ConcurrentHashMap} with the exception that each entry is also linked.
- * <p/>
- * Links are maintained using techniques inspired by H. Sundell and P. Tsigas' 2008 paper, <a
- * href="http://www.md.chalmers.se/~tsigas/papers/Lock-Free-Deques-Doubly-Lists-JPDC.pdf"><i>Lock Free Deques and Doubly
- * Linked Lists</i></a>, M. Michael's 2002 paper, <a href="http://www.research.ibm.com/people/m/michael/spaa-2002.pdf"><i>High
- * Performance Dynamic Lock-Free Hash Tables and List-Based Sets</i></a>
- * <p />
- * This implementation uses a technique of delegating marker nodes similar to the technique used in Sun's
- * {@link java.util.concurrent.ConcurrentSkipListMap}, which is deemed more memory efficient and better performing than
- * {@link java.util.concurrent.atomic.AtomicMarkableReference}s.
- * <p/>
- *
- * @author Manik Surtani
- * @author Galder Zamarreño
- * @since 4.0
- */
- at ThreadSafe
- at Deprecated
-public class FIFODataContainer implements DataContainer {
-
- InternalEntryFactory entryFactory = new InternalEntryFactory(false, false);
-
- /**
- * The maximum capacity, used if a higher value is implicitly specified by either of the constructors with arguments.
- * MUST be a power of two <= 1<<30 to ensure that entries are indexable using ints.
- */
- static final int MAXIMUM_CAPACITY = 1 << 30;
-
- // -- these fields are all very similar to JDK's ConcurrentHashMap
-
- /**
- * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose the segment.
- */
- final int segmentMask;
-
- /**
- * Shift value for indexing within segments.
- */
- final int segmentShift;
-
- /**
- * The segments, each of which is a specialized hash table
- */
- final Segment[] segments;
-
- Set<Object> keySet;
-
- final LinkedEntry head = new LinkedEntry(null), tail = new LinkedEntry(null);
-
- public FIFODataContainer(int concurrencyLevel) {
- float loadFactor = 0.75f;
- int initialCapacity = 256;
-
- // Find power-of-two sizes best matching arguments
- int sshift = 0;
- int ssize = 1;
- while (ssize < concurrencyLevel) {
- ++sshift;
- ssize <<= 1;
- }
- segmentShift = 32 - sshift;
- segmentMask = ssize - 1;
- this.segments = Segment.newArray(ssize);
-
- if (initialCapacity > MAXIMUM_CAPACITY)
- initialCapacity = MAXIMUM_CAPACITY;
- int c = initialCapacity / ssize;
- if (c * ssize < initialCapacity)
- ++c;
- int cap = 1;
- while (cap < c)
- cap <<= 1;
-
- for (int i = 0; i < this.segments.length; ++i) this.segments[i] = new Segment(cap, loadFactor);
- initLinks();
- }
-
- // links and link management
-
- /**
- * Back off
- *
- * @param nanos nanos to back off for. If -1, starts at a default
- * @return next time, back off for these nanos
- */
- private static final long backoffStart = 10000;
-
- private long backoff(long nanos) {
- long actualNanos = nanos < 0 ? backoffStart : nanos;
- LockSupport.parkNanos(actualNanos);
- long newNanos = actualNanos << 1;
- return newNanos > 10000000 ? backoffStart : newNanos;
-// int millis = (1+ r.nextInt(9)) * 10;
-// LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(millis));
-// return -1;
- }
-
- /**
- * Tests whether a given linked entry is marked for deletion. In this implementation, being "marked" means that it
- * is of type Marker rather than LinkedEntry, but given the relative cost of an "instanceof" check, we prefer to test
- * the state of the InternalCacheEntry referenced by the LinkedEntry. An InternalCacheEntry *always* exists so if it
- * is null, then this is a marker (or possibly the head or tail dummy entry).
- *
- * @param e entry to test
- * @return true if the entry is marked for removal. False if it is not, or if the entry is the head or tail dummy
- * entry.
- */
- protected final boolean isMarkedForRemoval(LinkedEntry e) {
- return e != head && e != tail && e.e == null;
- }
-
- /**
- * Places a removal marker the 'previous' reference on the given entry. Note that marking a reference does not mean
- * that the reference pointed to is marked for removal, rather it means the LinkedEntry doing the referencing is the
- * entry to be removed.
- *
- * @param e entry
- * @return true if the marking was successful, false otherwise. Could return false if the reference is already
- * marked, or if the CAS failed.
- */
- protected final boolean markPrevReference(LinkedEntry e) {
- if (isMarkedForRemoval(e.p)) return false;
- Marker m = new Marker(e.p);
- return e.casPrev(e.p, m);
- }
-
- /**
- * Places a removal marker the 'next' reference on the given entry. Note that marking a reference does not mean that
- * the reference pointed to is marked for removal, rather it means the LinkedEntry doing the referencing is the entry
- * to be removed.
- *
- * @param e entry
- * @return true if the marking was successful, false otherwise. Could return false if the reference is already
- * marked, or if the CAS failed.
- */
- protected final boolean markNextReference(LinkedEntry e) {
- if (isMarkedForRemoval(e.n)) return false;
- Marker m = new Marker(e.n);
- return e.casNext(e.n, m);
- }
-
- /**
- * The LinkedEntry class. This entry is stored in the lockable Segments, and is also capable of being doubly
- * linked.
- */
- static class LinkedEntry {
- volatile InternalCacheEntry e;
- /**
- * Links to next and previous entries. Needs to be volatile.
- */
- volatile LinkedEntry n, p;
-
- /**
- * CAS updaters for prev and next references
- */
- private static final AtomicReferenceFieldUpdater<LinkedEntry, LinkedEntry> N_UPDATER = AtomicReferenceFieldUpdater.newUpdater(LinkedEntry.class, LinkedEntry.class, "n");
- private static final AtomicReferenceFieldUpdater<LinkedEntry, LinkedEntry> P_UPDATER = AtomicReferenceFieldUpdater.newUpdater(LinkedEntry.class, LinkedEntry.class, "p");
-
- /**
- * LinkedEntries must always have a valid InternalCacheEntry.
- *
- * @param e internal cache entry
- */
- LinkedEntry(InternalCacheEntry e) {
- this.e = e;
- }
-
- final boolean casNext(LinkedEntry expected, LinkedEntry newValue) {
- return N_UPDATER.compareAndSet(this, expected, newValue);
- }
-
- final boolean casPrev(LinkedEntry expected, LinkedEntry newValue) {
- return P_UPDATER.compareAndSet(this, expected, newValue);
- }
-
- @Override
- public String toString() {
- return "E" + Integer.toHexString(System.identityHashCode(this));
- }
- }
-
- /**
- * A marker. If a reference in LinkedEntry (either to its previous or next entry) needs to be marked, it should be
- * CAS'd with an instance of Marker that points to the actual entry. Typically this is done by calling {@link
- * FIFODataContainer#markNextReference(org.infinispan.container.FIFODataContainer.LinkedEntry)} or {@link
- * FIFODataContainer#markPrevReference(org.infinispan.container.FIFODataContainer.LinkedEntry)}
- */
- static final class Marker extends LinkedEntry {
- Marker(LinkedEntry actual) {
- super(null);
- n = actual;
- p = actual;
- }
-
- @Override
- public String toString() {
- return "M" + Integer.toHexString(System.identityHashCode(this));
- }
- }
-
- /**
- * Initializes links to an empty container
- */
- protected final void initLinks() {
- head.n = tail;
- head.p = tail;
- tail.n = head;
- tail.p = head;
- }
-
- /**
- * Un-links an entry from the doubly linked list in a threadsafe, lock-free manner. The entry is typically retrieved
- * using Segment#locklessRemove() after locking the Segment.
- *
- * @param entry entry to unlink
- */
- protected final void unlink(LinkedEntry entry) {
- if (entry == head || entry == tail) return;
- for (; ;) {
- LinkedEntry next = entry.n;
- if (isMarkedForRemoval(next)) return;
- LinkedEntry prev;
- if (markNextReference(entry)) {
- next = entry.n;
- while (true) {
- prev = entry.p;
- if (isMarkedForRemoval(prev) || markPrevReference(entry)) {
- prev = entry.p;
- break;
- }
- }
- prev = correctPrev(prev.p, next.n);
- }
- }
- }
-
- /**
- * Links a new entry at the end of the linked list. Typically done when a put() creates a new entry, or if ordering
- * needs to be updated based on access. If this entry already exists in the linked list, it should first be {@link
- * #unlink(org.infinispan.container.FIFODataContainer.LinkedEntry)}ed.
- *
- * @param entry entry to link at end
- */
- // Corresponds to PushLeft() in the Sundell/Tsigas paper
- protected final void linkAtEnd(LinkedEntry entry) {
- LinkedEntry prev = tail.p;
- long backoffTime = -1;
- for (; ;) {
- entry.p = unmarkPrevIfNeeded(prev);
- entry.n = tail;
- if (prev.casNext(tail, entry)) break;
- prev = correctPrev(prev, tail);
- backoffTime = backoff(backoffTime);
- }
-
- backoffTime = -1;
- for (; ;) {
- LinkedEntry l1 = tail.p;
- if (isMarkedForRemoval(l1) || entry.n != tail) break;
- if (tail.casPrev(l1, entry)) {
- if (isMarkedForRemoval(entry.p)) correctPrev(entry, tail);
- break;
- }
- backoffTime = backoff(backoffTime);
- }
- }
-
- /**
- * Retrieves the next entry after a given entry, skipping marked entries accordingly.
- *
- * @param current current entry to inspect
- * @return the next valid entry, or null if we have reached the end of the list.
- */
- protected final LinkedEntry getNext(LinkedEntry current) {
- for (; ;) {
- if (current == tail) return null;
- LinkedEntry next = current.n;
- if (isMarkedForRemoval(next)) next = next.n;
- boolean marked = isMarkedForRemoval(next.n);
- if (marked && !isMarkedForRemoval(current.n)) {
- markPrevReference(next);
- current.casNext(next, next.n.n); // since next.n is a marker
- continue;
- }
- current = next;
- if (!marked && next != tail) return current;
- }
- }
-
- /**
- * Correct 'previous' links. This 'helper' function is used if unable to properly set previous pointers (due to a
- * concurrent update) and is used when traversing the list in reverse.
- *
- * @param suggestedPreviousEntry suggested previous entry
- * @param currentEntry current entry
- * @return the actual valid, previous entry. Links are also corrected in the process.
- */
- protected final LinkedEntry correctPrev(LinkedEntry suggestedPreviousEntry, LinkedEntry currentEntry) {
-// verifyLL();
- LinkedEntry lastLink = null, link1, prev2;
- LinkedEntry prev = suggestedPreviousEntry, node = currentEntry;
- long backoffTime = -1;
- while (true) {
- link1 = node.p;
- if (isMarkedForRemoval(link1)) break;
- prev2 = prev.n;
- if (isMarkedForRemoval(prev2)) {
- if (lastLink != null) {
- markPrevReference(prev);
- LinkedEntry unmarkedPrev2P = unmarkPrevIfNeeded(prev2.p);
- lastLink.casNext(prev, unmarkedPrev2P);
- prev = lastLink;
- lastLink = null;
- continue;
- }
- prev2 = prev.p;
- prev = prev2;
- continue;
- }
-
- if (prev2 != node) {
- lastLink = prev;
- prev = prev2;
- continue;
- }
-
- LinkedEntry unmarked = unmarkPrevIfNeeded(prev);
- if (node.casPrev(link1, unmarked)) {
- if (isMarkedForRemoval(prev.p)) {
- continue;
- }
- break;
- }
- backoffTime = backoff(backoffTime);
- }
- return prev;
- }
-
- private LinkedEntry unmarkPrevIfNeeded(LinkedEntry e) {
- if (isMarkedForRemoval(e)) return e.p;
- else return e;
- }
-
-
- /**
- * Similar to ConcurrentHashMap's hash() function: applies a supplemental hash function to a given hashCode, which
- * defends against poor quality hash functions. This is critical because ConcurrentHashMap uses power-of-two length
- * hash tables, that otherwise encounter collisions for hashCodes that do not differ in lower or upper bits.
- */
- final int hashOld(int h) {
- // Spread bits to regularize both segment and index locations,
- // using variant of single-word Wang/Jenkins hash.
- h += (h << 15) ^ 0xffffcd7d;
- h ^= (h >>> 10);
- h += (h << 3);
- h ^= (h >>> 6);
- h += (h << 2) + (h << 14);
- return h ^ (h >>> 16);
- }
-
- /**
- * Use the objects built in hash to obtain an initial value, then use a second four byte hash to obtain a more
- * uniform distribution of hash values. This uses a <a href = "http://burtleburtle.net/bob/hash/integer.html">4-byte
- * (integer) hash</a>, which produces well distributed values even when the original hash produces thghtly clustered
- * values.
- * <p />
- * Contributed by akluge <a href-="http://www.vizitsolutions.com/ConsistentHashingCaching.html">http://www.vizitsolutions.com/ConsistentHashingCaching.html</a>
- */
- final int hash(int hash) {
- hash = (hash + 0x7ED55D16) + (hash << 12);
- hash = (hash ^ 0xc761c23c) ^ (hash >> 19);
- hash = (hash + 0x165667b1) + (hash << 5);
- hash = (hash + 0xd3a2646c) ^ (hash << 9);
- hash = (hash + 0xfd7046c5) + (hash << 3);
- hash = (hash ^ 0xb55a4f09) ^ (hash >> 16);
-
- return hash;
- }
-
- /**
- * Returns the segment that should be used for key with given hash
- *
- * @param hash the hash code for the key
- * @return the segment
- */
- final Segment segmentFor(int hash) {
- return segments[(hash >>> segmentShift) & segmentMask];
- }
-
- /**
- * ConcurrentHashMap list entry. Note that this is never exported out as a user-visible Map.Entry.
- * <p/>
- * Because the value field is volatile, not final, it is legal wrt the Java Memory Model for an unsynchronized reader
- * to see null instead of initial value when read via a data race. Although a reordering leading to this is not
- * likely to ever actually occur, the Segment.readValueUnderLock method is used as a backup in case a null
- * (pre-initialized) value is ever seen in an unsynchronized access method.
- */
- static final class HashEntry {
- final Object key;
- final int hash;
- volatile LinkedEntry value;
- final HashEntry next;
-
- HashEntry(Object key, int hash, HashEntry next, LinkedEntry value) {
- this.key = key;
- this.hash = hash;
- this.next = next;
- this.value = value;
- }
- }
-
- /**
- * Very similar to a Segment in a ConcurrentHashMap
- */
- static final class Segment extends ReentrantLock {
-
- private static final long serialVersionUID = 6057507888185462418L;
-
- /**
- * The number of elements in this segment's region.
- */
- transient volatile int count;
-
- /**
- * The table is rehashed when its size exceeds this threshold. (The value of this field is always
- * <tt>(int)(capacity * loadFactor)</tt>.)
- */
- transient int threshold;
-
- /**
- * The per-segment table.
- */
- transient volatile HashEntry[] table;
-
- /**
- * The load factor for the hash table. Even though this value is same for all segments, it is replicated to avoid
- * needing links to outer object.
- *
- * @serial
- */
- final float loadFactor;
-
- Segment(int initialCapacity, float lf) {
- loadFactor = lf;
- setTable(new HashEntry[initialCapacity]);
- }
-
- static Segment[] newArray(int i) {
- return new Segment[i];
- }
-
- /**
- * Sets table to new HashEntry array. Call only while holding lock or in constructor.
- */
- final void setTable(HashEntry[] newTable) {
- threshold = (int) (newTable.length * loadFactor);
- table = newTable;
- }
-
- /**
- * Returns properly casted first entry of bin for given hash.
- */
- final HashEntry getFirst(int hash) {
- HashEntry[] tab = table;
- return tab[hash & (tab.length - 1)];
- }
-
- /**
- * Reads value field of an entry under lock. Called if value field ever appears to be null. This is possible only
- * if a compiler happens to reorder a HashEntry initialization with its table assignment, which is legal under
- * memory model but is not known to ever occur.
- */
- final LinkedEntry readValueUnderLock(HashEntry e) {
- lock();
- try {
- return e.value;
- } finally {
- unlock();
- }
- }
-
- /* Specialized implementations of map methods */
-
- final LinkedEntry get(Object key, int hash) {
- if (count != 0) { // read-volatile
- HashEntry e = getFirst(hash);
- while (e != null) {
- if (e.hash == hash && key.equals(e.key)) {
- LinkedEntry v = e.value;
- if (v != null)
- return v;
- return readValueUnderLock(e); // recheck
- }
- e = e.next;
- }
- }
- return null;
- }
-
- /**
- * This put is lockless. Make sure you call segment.lock() first.
- */
- final LinkedEntry locklessPut(Object key, int hash, LinkedEntry value) {
- int c = count;
- if (c++ > threshold) // ensure capacity
- rehash();
- HashEntry[] tab = table;
- int index = hash & (tab.length - 1);
- HashEntry first = tab[index];
- HashEntry e = first;
- while (e != null && (e.hash != hash || !key.equals(e.key)))
- e = e.next;
-
- LinkedEntry oldValue;
- if (e != null) {
- oldValue = e.value;
- e.value = value;
- } else {
- oldValue = null;
- tab[index] = new HashEntry(key, hash, first, value);
- count = c; // write-volatile
- }
- return oldValue;
- }
-
- final void rehash() {
- HashEntry[] oldTable = table;
- int oldCapacity = oldTable.length;
- if (oldCapacity >= MAXIMUM_CAPACITY)
- return;
-
- /*
- * Reclassify nodes in each list to new Map. Because we are
- * using power-of-two expansion, the elements from each bin
- * must either stay at same index, or move with a power of two
- * offset. We eliminate unnecessary node creation by catching
- * cases where old nodes can be reused because their next
- * fields won't change. Statistically, at the default
- * threshold, only about one-sixth of them need cloning when
- * a table doubles. The nodes they replace will be garbage
- * collectable as soon as they are no longer referenced by any
- * reader thread that may be in the midst of traversing table
- * right now.
- */
-
- HashEntry[] newTable = new HashEntry[oldCapacity << 1];
- threshold = (int) (newTable.length * loadFactor);
- int sizeMask = newTable.length - 1;
- for (int i = 0; i < oldCapacity; i++) {
- // We need to guarantee that any existing reads of old Map can
- // proceed. So we cannot yet null out each bin.
- HashEntry e = oldTable[i];
-
- if (e != null) {
- HashEntry next = e.next;
- int idx = e.hash & sizeMask;
-
- // Single node on list
- if (next == null)
- newTable[idx] = e;
-
- else {
- // Reuse trailing consecutive sequence at same slot
- HashEntry lastRun = e;
- int lastIdx = idx;
- for (HashEntry last = next;
- last != null;
- last = last.next) {
- int k = last.hash & sizeMask;
- if (k != lastIdx) {
- lastIdx = k;
- lastRun = last;
- }
- }
- newTable[lastIdx] = lastRun;
-
- // Clone all remaining nodes
- for (HashEntry p = e; p != lastRun; p = p.next) {
- int k = p.hash & sizeMask;
- HashEntry n = newTable[k];
- newTable[k] = new HashEntry(p.key, p.hash, n, p.value);
- }
- }
- }
- }
- table = newTable;
- }
-
- /**
- * This is a lockless remove. Make sure you acquire locks using segment.lock() first.
- */
- final LinkedEntry locklessRemove(Object key, int hash) {
- int c = count - 1;
- HashEntry[] tab = table;
- int index = hash & (tab.length - 1);
- HashEntry first = tab[index];
- HashEntry e = first;
- while (e != null && (e.hash != hash || !key.equals(e.key)))
- e = e.next;
-
- LinkedEntry oldValue = null;
- if (e != null) {
- oldValue = e.value;
- // All entries following removed node can stay
- // in list, but all preceding ones need to be
- // cloned.
- HashEntry newFirst = e.next;
- for (HashEntry p = first; p != e; p = p.next)
- newFirst = new HashEntry(p.key, p.hash,
- newFirst, p.value);
- tab[index] = newFirst;
- count = c; // write-volatile
-
- }
- return oldValue;
- }
-
- /**
- * This is a lockless clear. Ensure you acquire locks on the segment first using segment.lock().
- */
- final void locklessClear() {
- if (count != 0) {
- HashEntry[] tab = table;
- for (int i = 0; i < tab.length; i++)
- tab[i] = null;
- count = 0; // write-volatile
- }
- }
- }
-
-
- protected final class KeySet extends AbstractSet<Object> {
- public Iterator<Object> iterator() {
- return new KeyIterator();
- }
-
- public int size() {
- return FIFODataContainer.this.size();
- }
- }
-
- protected final class Values extends AbstractCollection<Object> {
- public Iterator<Object> iterator() {
- return new ValueIterator();
- }
-
- public int size() {
- return FIFODataContainer.this.size();
- }
- }
-
- protected final class EntrySet extends AbstractSet<InternalCacheEntry> {
- public Iterator<InternalCacheEntry> iterator() {
- return new ImmutableEntryIterator();
- }
-
- public int size() {
- return FIFODataContainer.this.size();
- }
- }
-
- protected abstract class LinkedIterator {
- LinkedEntry current = head;
-
- public boolean hasNext() {
- if (current == tail) return false;
- current = getNext(current);
- return current != null;
- }
-
- public void remove() {
- throw new UnsupportedOperationException();
- }
- }
-
- protected final class EntryIterator extends LinkedIterator implements Iterator<InternalCacheEntry> {
- public InternalCacheEntry next() {
- return current.e;
- }
- }
-
- protected final class ImmutableEntryIterator extends LinkedIterator implements Iterator<InternalCacheEntry> {
- public InternalCacheEntry next() {
- return Immutables.immutableInternalCacheEntry(current.e);
- }
- }
-
- protected final class KeyIterator extends LinkedIterator implements Iterator<Object> {
- public Object next() {
- return current.e.getKey();
- }
- }
-
- protected final class ValueIterator extends LinkedIterator implements Iterator<Object> {
- public Object next() {
- return current.e.getValue();
- }
- }
-
-
- // ----------- PUBLIC API ---------------
-
- public InternalCacheEntry get(Object k) {
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- LinkedEntry le = s.get(k, h);
- InternalCacheEntry ice = null;
- if (le != null) ice = le.e;
- if (ice != null) {
- if (ice.isExpired()) {
- remove(k);
- ice = null;
- } else {
- ice.touch();
- }
- }
- return ice;
- }
-
- public InternalCacheEntry peek(Object k) {
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- LinkedEntry le = s.get(k, h);
- InternalCacheEntry ice = null;
- if (le != null) ice = le.e;
- return ice;
- }
-
- public void put(Object k, Object v, long lifespan, long maxIdle) {
- // do a normal put first.
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- s.lock();
- LinkedEntry le;
- boolean newEntry = false;
- try {
- le = s.get(k, h);
- InternalCacheEntry ice = le == null ? null : le.e;
- if (ice == null) {
- newEntry = true;
- ice = InternalEntryFactory.create(k, v, lifespan, maxIdle);
- // only update linking if this is a new entry
- le = new LinkedEntry(ice);
- } else {
- ice.setValue(v);
- ice = entryFactory.update(ice, lifespan, maxIdle);
- // need to do this anyway since the ICE impl may have changed
- le.e = ice;
- }
-
- s.locklessPut(k, h, le);
-
- if (newEntry) {
- linkAtEnd(le);
- }
- } finally {
- s.unlock();
- }
- }
-
- public boolean containsKey(Object k) {
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- LinkedEntry le = s.get(k, h);
- InternalCacheEntry ice = null;
- if (le != null) ice = le.e;
- if (ice != null) {
- if (ice.isExpired()) {
- remove(k);
- ice = null;
- }
- }
-
- return ice != null;
- }
-
- public InternalCacheEntry remove(Object k) {
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- s.lock();
- InternalCacheEntry ice = null;
- LinkedEntry le;
- try {
- le = s.locklessRemove(k, h);
- if (le != null) {
- ice = le.e;
- unlink(le);
- }
- } finally {
- s.unlock();
- }
-
- if (ice == null || ice.isExpired())
- return null;
- else
- return ice;
- }
-
- public int size() {
- // approximate sizing is good enough
- int sz = 0;
- final Segment[] segs = segments;
- for (Segment s : segs) sz += s.count;
- return sz;
- }
-
- public void clear() {
- // This is expensive...
- // lock all segments
- for (Segment s : segments) s.lock();
- try {
- for (Segment s : segments) s.locklessClear();
- initLinks();
- } finally {
- for (Segment s : segments) s.unlock();
- }
- }
-
- public Set<Object> keySet() {
- if (keySet == null) keySet = new KeySet();
- return keySet;
- }
-
- public Collection<Object> values() {
- return new Values();
- }
-
- public Set<InternalCacheEntry> entrySet() {
- return new EntrySet();
- }
-
- public void purgeExpired() {
- for (InternalCacheEntry ice : this) {
- if (ice.isExpired()) remove(ice.getKey());
- }
- }
-
- public Iterator<InternalCacheEntry> iterator() {
- return new EntryIterator();
- }
-
- @Override
- public Set<InternalCacheEntry> getEvictionCandidates() {
- return Collections.emptySet();
- }
-}
Deleted: trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java 2010-05-24 11:46:22 UTC (rev 1833)
+++ trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java 2010-05-24 15:21:38 UTC (rev 1834)
@@ -1,81 +0,0 @@
-package org.infinispan.container;
-
-import net.jcip.annotations.ThreadSafe;
-import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.util.TimSort;
-
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.Iterator;
-
-/**
- * Based on the same techniques outlined in the {@link DefaultDataContainer}, this implementation always forces the
- * collection of creation timestamps for entries. This means that {@link org.infinispan.container.entries.ImmortalCacheEntry}
- * and {@link org.infinispan.container.entries.TransientCacheEntry} are never used, since only {@link org.infinispan.container.entries.MortalCacheEntry}
- * and {@link org.infinispan.container.entries.TransientMortalCacheEntry} instances capture timestamps.
- * <p/>
- * All gets, puts, etc are constant time operations.
- * <p/>
- * Iteration incurs a O(N log(N)) cost since the timestamps are sorted first, and there is an added memory overhead in
- * temporary space to hold sorted references. When sorting, this implementation does not use the millisecond granularity
- * when ordering timestamps; instead it defaults to a 1-second granularity since the FIFO ordering does not need to be
- * strict and the TimSort implementation used for sorting performs significantly better with minimal reordering offered
- * by a coarser granularity.
- * <p/>
- *
- * @author Manik Surtani
- * @since 4.0
- */
- at Deprecated
- at ThreadSafe
-public class FIFOSimpleDataContainer extends DefaultDataContainer {
- // This is to facilitate faster sorting. DO we really care about millisecond accuracy when ordering the collection?
- final static int DEFAULT_TIMESTAMP_GRANULARITY = 1000;
-
- private final Comparator<InternalCacheEntry> COMPARATOR;
-
- public FIFOSimpleDataContainer(int concurrencyLevel) {
- this(concurrencyLevel, true, false, new FIFOComparator(DEFAULT_TIMESTAMP_GRANULARITY));
- }
-
- public FIFOSimpleDataContainer(int concurrencyLevel, int timestampGranularity) {
- this(concurrencyLevel, true, false, new FIFOComparator(timestampGranularity));
- }
-
- FIFOSimpleDataContainer(int concurrencyLevel, boolean recordCreated, boolean recordLastUsed, Comparator<InternalCacheEntry> c) {
- super(concurrencyLevel, recordCreated, recordLastUsed);
- COMPARATOR = c;
- }
-
-
- @Override
- public Iterator<InternalCacheEntry> iterator() {
- InternalCacheEntry[] sortedEntries = new InternalCacheEntry[immortalEntries.size() + mortalEntries.size()];
- int i=0;
- for (InternalCacheEntry ice: immortalEntries.values()){
- if (i == sortedEntries.length) break;
- sortedEntries[i++] = ice;
- }
-
- for (InternalCacheEntry ice: mortalEntries.values()){
- if (i == sortedEntries.length) break;
- sortedEntries[i++] = ice;
- }
-
- TimSort.sort(sortedEntries, COMPARATOR);
- return Arrays.asList(sortedEntries).iterator();
- }
-
- private static final class FIFOComparator implements Comparator<InternalCacheEntry> {
- int timestampGranularity;
-
- private FIFOComparator(int timestampGranularity) {
- this.timestampGranularity = timestampGranularity;
- }
-
- @Override
- public int compare(InternalCacheEntry o1, InternalCacheEntry o2) {
- return (int) o1.getCreated() / timestampGranularity - (int) o2.getCreated() / timestampGranularity;
- }
- }
-}
\ No newline at end of file
Deleted: trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java 2010-05-24 11:46:22 UTC (rev 1833)
+++ trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java 2010-05-24 15:21:38 UTC (rev 1834)
@@ -1,96 +0,0 @@
-package org.infinispan.container;
-
-import net.jcip.annotations.ThreadSafe;
-import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.container.entries.InternalEntryFactory;
-
-/**
- * Based on the same techniques outlined in the {@link org.infinispan.container.FIFODataContainer}, this implementation
- * additionally unlinks and re-links entries at the tail whenever entries are visited (using a get()) or are updated (a
- * put() on an existing key).
- * <p/>
- * Again, these are constant-time operations.
- * <p/>
- * Note though that this implementation does have a far lesser degree of concurrency when compared with its FIFO variant
- * due to the segment locking necessary even when doing a get() (since gets reorder links). This has a knock-on effect
- * not just on get() but even on other write() operations since they all compete for the same segment lock (when working
- * on keys mapped to the same segment, of course).
- *
- * @author Manik Surtani
- * @since 4.0
- */
- at ThreadSafe
- at Deprecated
-public class LRUDataContainer extends FIFODataContainer {
-
- public LRUDataContainer(int concurrencyLevel) {
- super(concurrencyLevel);
- }
-
- @Override
- public InternalCacheEntry get(Object k) {
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
-
- LinkedEntry le = s.get(k, h);
- InternalCacheEntry ice = null;
- if (le != null) ice = le.e;
- if (ice != null) {
- if (ice.isExpired()) {
- remove(k);
- ice = null;
- } else {
- ice.touch();
- boolean needToUnlockSegment = false;
- try {
- s.lock(); // we need to lock this segment to safely update links
- needToUnlockSegment = true;
- updateLinks(le);
- } finally {
- if (needToUnlockSegment) s.unlock();
- }
- }
- }
- return ice;
- }
-
- @Override
- public void put(Object k, Object v, long lifespan, long maxIdle) {
- // do a normal put first.
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- s.lock();
- LinkedEntry le;
- boolean newEntry = false;
- try {
- le = s.get(k, h);
- InternalCacheEntry ice = le == null ? null : le.e;
- if (ice == null) {
- newEntry = true;
- ice = InternalEntryFactory.create(k, v, lifespan, maxIdle);
- le = new LinkedEntry(ice);
- } else {
- ice.setValue(v);
- ice = entryFactory.update(ice, lifespan, maxIdle);
- // need to do this anyway since the ICE impl may have changed
- le.e = ice;
- }
-
- s.locklessPut(k, h, le);
-
- if (newEntry) {
- linkAtEnd(le);
- } else {
- updateLinks(le);
- }
-
- } finally {
- s.unlock();
- }
- }
-
- protected final void updateLinks(LinkedEntry le) {
- unlink(le);
- linkAtEnd(le);
- }
-}
\ No newline at end of file
Deleted: trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java 2010-05-24 11:46:22 UTC (rev 1833)
+++ trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java 2010-05-24 15:21:38 UTC (rev 1834)
@@ -1,49 +0,0 @@
-package org.infinispan.container;
-
-import net.jcip.annotations.ThreadSafe;
-import org.infinispan.container.entries.InternalCacheEntry;
-
-import java.util.Comparator;
-
-/**
- * Based on the same techniques outlined in the {@link DefaultDataContainer}, this implementation always forces the
- * collection of last used timestamps for entries. This means that {@link org.infinispan.container.entries.ImmortalCacheEntry}
- * and {@link org.infinispan.container.entries.MortalCacheEntry} are never used, since only {@link org.infinispan.container.entries.TransientCacheEntry}
- * and {@link org.infinispan.container.entries.TransientMortalCacheEntry} instances capture timestamps.
- * <p/>
- * All gets, puts, etc are constant time operations.
- * <p/>
- * Iteration incurs a O(N log(N)) cost since the timestamps are sorted first, and there is an added memory overhead in
- * temporary space to hold sorted references. When sorting, this implementation does not use the millisecond granularity
- * when ordering timestamps; instead it defaults to a 1-second granularity since the LRU ordering does not need to be
- * strict and the TimSort implementation used for sorting performs significantly better with minimal reordering offered
- * by a coarser granularity.
- * <p/>
- *
- * @author Manik Surtani
- * @since 4.0
- */
- at ThreadSafe
- at Deprecated
-public class LRUSimpleDataContainer extends FIFOSimpleDataContainer {
- public LRUSimpleDataContainer(int concurrencyLevel) {
- super(concurrencyLevel, false, true, new LRUComparator(DEFAULT_TIMESTAMP_GRANULARITY));
- }
-
- public LRUSimpleDataContainer(int concurrencyLevel, int timestampGranularity) {
- super(concurrencyLevel, false, true, new LRUComparator(timestampGranularity));
- }
-
- private static final class LRUComparator implements Comparator<InternalCacheEntry> {
- int timestampGranularity;
-
- private LRUComparator(int timestampGranularity) {
- this.timestampGranularity = timestampGranularity;
- }
-
- @Override
- public int compare(InternalCacheEntry o1, InternalCacheEntry o2) {
- return (int) o1.getLastUsed() / timestampGranularity - (int) o2.getLastUsed() / timestampGranularity;
- }
- }
-}
\ No newline at end of file
Modified: trunk/core/src/main/java/org/infinispan/container/entries/InternalEntryFactory.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/entries/InternalEntryFactory.java 2010-05-24 11:46:22 UTC (rev 1833)
+++ trunk/core/src/main/java/org/infinispan/container/entries/InternalEntryFactory.java 2010-05-24 15:21:38 UTC (rev 1834)
@@ -8,13 +8,6 @@
*/
public class InternalEntryFactory {
- boolean recordCreation, recordLastUsed;
-
- public InternalEntryFactory(boolean recordCreation, boolean recordLastUsed) {
- this.recordCreation = recordCreation;
- this.recordLastUsed = recordLastUsed;
- }
-
public static InternalCacheEntry create(Object key, Object value) {
return new ImmortalCacheEntry(key, value);
}
@@ -49,31 +42,14 @@
}
public InternalCacheEntry createNewEntry(Object key, Object value, long lifespan, long maxIdle) {
- if (lifespan < 0 && maxIdle < 0) {
- if (recordCreation || recordLastUsed) {
- if (recordCreation && !recordLastUsed) return new MortalCacheEntry(key, value, -1);
- if (!recordCreation && recordLastUsed) return new TransientCacheEntry(key, value, -1);
- return new TransientMortalCacheEntry(key, value, -1, -1);
- } else {
- return new ImmortalCacheEntry(key, value);
- }
- }
+ if (lifespan < 0 && maxIdle < 0)
+ return new ImmortalCacheEntry(key, value);
- if (lifespan > -1 && maxIdle < 0) {
- if (recordLastUsed) {
- return new TransientMortalCacheEntry(key, value, -1, lifespan);
- } else {
- return new MortalCacheEntry(key, value, lifespan);
- }
- }
+ if (lifespan > -1 && maxIdle < 0)
+ return new MortalCacheEntry(key, value, lifespan);
- if (lifespan < 0 && maxIdle > -1) {
- if (recordCreation) {
- return new TransientMortalCacheEntry(key, value, maxIdle, -1);
- } else {
- return new TransientCacheEntry(key, value, maxIdle);
- }
- }
+ if (lifespan < 0 && maxIdle > -1)
+ return new TransientCacheEntry(key, value, maxIdle);
// else...
return new TransientMortalCacheEntry(key, value, maxIdle, lifespan);
@@ -108,18 +84,9 @@
} else if (ice instanceof MortalCacheEntry) {
if (lifespan < 0) {
if (maxIdle < 0) {
- if (recordCreation) {
- ice.setLifespan(-1);
- return ice;
- } else {
- return new ImmortalCacheEntry(ice.getKey(), ice.getValue());
- }
+ return new ImmortalCacheEntry(ice.getKey(), ice.getValue());
} else {
- if (recordCreation) {
- return new TransientMortalCacheEntry(ice.getKey(), ice.getValue(), maxIdle, -1, System.currentTimeMillis(), ice.getCreated());
- } else {
- return new TransientCacheEntry(ice.getKey(), ice.getValue(), maxIdle);
- }
+ return new TransientCacheEntry(ice.getKey(), ice.getValue(), maxIdle);
}
} else {
if (maxIdle < 0) {
@@ -132,23 +99,14 @@
} else if (ice instanceof TransientCacheEntry) {
if (lifespan < 0) {
if (maxIdle < 0) {
- if (recordLastUsed) {
- ice.setMaxIdle(-1);
- return ice;
- } else {
- return new ImmortalCacheEntry(ice.getKey(), ice.getValue());
- }
+ return new ImmortalCacheEntry(ice.getKey(), ice.getValue());
} else {
ice.setMaxIdle(maxIdle);
return ice;
}
} else {
if (maxIdle < 0) {
- if (recordLastUsed) {
- return new TransientMortalCacheEntry(ice.getKey(), ice.getValue(), maxIdle, lifespan, ice.getLastUsed(), System.currentTimeMillis());
- } else {
- return new MortalCacheEntry(ice.getKey(), ice.getValue(), lifespan);
- }
+ return new MortalCacheEntry(ice.getKey(), ice.getValue(), lifespan);
} else {
return new TransientMortalCacheEntry(ice.getKey(), ice.getValue(), maxIdle, lifespan, System.currentTimeMillis(), ice.getCreated());
}
@@ -156,35 +114,13 @@
} else if (ice instanceof TransientMortalCacheEntry) {
if (lifespan < 0) {
if (maxIdle < 0) {
- if (recordCreation || recordLastUsed) {
- if (recordLastUsed && recordCreation) {
- ice.setLifespan(lifespan);
- ice.setMaxIdle(maxIdle);
- return ice;
- } else if (recordCreation) {
- return new MortalCacheEntry(ice.getKey(), ice.getValue(), -1, ice.getCreated());
- } else {
- return new TransientCacheEntry(ice.getKey(), ice.getValue(), -1, ice.getLastUsed());
- }
- } else {
- return new ImmortalCacheEntry(ice.getKey(), ice.getValue());
- }
+ return new ImmortalCacheEntry(ice.getKey(), ice.getValue());
} else {
- if (recordCreation) {
- return new TransientMortalCacheEntry(ice.getKey(), ice.getValue(), maxIdle, -1, System.currentTimeMillis(), ice.getCreated());
- } else {
- return new TransientCacheEntry(ice.getKey(), ice.getValue(), maxIdle);
- }
+ return new TransientCacheEntry(ice.getKey(), ice.getValue(), maxIdle);
}
} else {
if (maxIdle < 0) {
- if (recordLastUsed) {
- ice.setLifespan(lifespan);
- ice.setMaxIdle(maxIdle);
- return ice;
- } else {
- return new MortalCacheEntry(ice.getKey(), ice.getValue(), lifespan);
- }
+ return new MortalCacheEntry(ice.getKey(), ice.getValue(), lifespan);
} else {
ice.setLifespan(lifespan);
ice.setMaxIdle(maxIdle);
Deleted: trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java 2010-05-24 11:46:22 UTC (rev 1833)
+++ trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java 2010-05-24 15:21:38 UTC (rev 1834)
@@ -1,126 +0,0 @@
-package org.infinispan.container;
-
-import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.container.entries.MortalCacheEntry;
-import org.infinispan.container.entries.TransientMortalCacheEntry;
-import org.infinispan.test.TestingUtil;
-import org.testng.annotations.Test;
-
-import java.util.Random;
-import java.util.concurrent.CountDownLatch;
-
- at Test(groups = "unit", testName = "container.FIFODataContainerTest")
-public class FIFODataContainerTest extends SimpleDataContainerTest {
-
- @Override
- protected DataContainer createContainer() {
- return new FIFOSimpleDataContainer(16, 1);
- }
-
- @Override
- protected Class<? extends InternalCacheEntry> transienttype() {
- return TransientMortalCacheEntry.class;
- }
-
- @Override
- protected Class<? extends InternalCacheEntry> immortaltype() {
- return MortalCacheEntry.class;
- }
-
- public void testOrdering() {
- long lifespan = 600000;
- long idle = 600000;
- for (int i = 0; i < 10; i++) {
- dc.put("k" + i, "value", -1, -1);
- TestingUtil.sleepThread(10);
- }
- for (int i = 10; i < 20; i++) {
- dc.put("k" + i, "value", lifespan, -1);
- TestingUtil.sleepThread(10);
- }
- for (int i = 20; i < 30; i++) {
- dc.put("k" + i, "value", -1, idle);
- TestingUtil.sleepThread(10);
- }
- for (int i = 30; i < 40; i++) {
- dc.put("k" + i, "value", lifespan, idle);
- TestingUtil.sleepThread(10);
- }
-
- // random visits
- Random r = new Random();
- for (int i = 0; i < 100; i++) {
- dc.get("k" + r.nextInt(40));
- TestingUtil.sleepThread(10);
- }
-
- // ensure order is maintained.
- int i = 0;
- for (InternalCacheEntry ice : dc) {
- assert ice.getKey().equals("k" + i);
- if (i < 10) assert ice.getClass().equals(immortaltype());
- if (i >= 10 && i < 20) assert ice.getClass().equals(mortaltype());
- if (i >= 20 && i < 30) assert ice.getClass().equals(transienttype());
- if (i >= 30 && i < 40) assert ice instanceof TransientMortalCacheEntry;
- i++;
- }
- }
-
- public void testMultithreadAccess() throws InterruptedException {
- assert dc.size() == 0;
- int NUM_THREADS = 10;
- long testDuration = 2000; // millis
-
- Random r = new Random();
- CountDownLatch startLatch = new CountDownLatch(1);
-
- Worker[] workers = new Worker[NUM_THREADS];
- for (int i = 0; i < NUM_THREADS; i++) workers[i] = new Worker("Worker-" + i, r, startLatch);
- for (Worker w : workers) w.start();
-
- startLatch.countDown();
-
- Thread.sleep(testDuration); // generate some noise
-
- for (Worker w : workers) w.running = false;
- for (Worker w : workers) w.join();
- }
-
- protected final class Worker extends Thread {
- CountDownLatch startLatch;
- Random r;
- volatile boolean running = true;
-
- public Worker(String name, Random r, CountDownLatch startLatch) {
- super(name);
- this.r = r;
- this.startLatch = startLatch;
- }
-
- @Override
- public void run() {
- try {
- startLatch.await();
- } catch (InterruptedException ignored) {
- }
-
- while (running) {
- try {
- sleep(r.nextInt(5) * 10);
- } catch (InterruptedException ignored) {
- }
- switch (r.nextInt(3)) {
- case 0:
- dc.put("key" + r.nextInt(100), "value", -1, -1);
- break;
- case 1:
- dc.remove("key" + r.nextInt(100));
- break;
- case 2:
- dc.get("key" + r.nextInt(100));
- break;
- }
- }
- }
- }
-}
\ No newline at end of file
Deleted: trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java 2010-05-24 11:46:22 UTC (rev 1833)
+++ trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java 2010-05-24 15:21:38 UTC (rev 1834)
@@ -1,74 +0,0 @@
-package org.infinispan.container;
-
-import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.container.entries.TransientCacheEntry;
-import org.infinispan.container.entries.TransientMortalCacheEntry;
-import org.infinispan.test.TestingUtil;
-import org.testng.annotations.Test;
-
- at Test(groups = "unit", testName = "container.LRUDataContainerTest")
-public class LRUDataContainerTest extends FIFODataContainerTest {
- @Override
- protected DataContainer createContainer() {
- return new LRUSimpleDataContainer(16, 1);
- }
-
- @Override
- protected Class<? extends InternalCacheEntry> mortaltype() {
- return TransientMortalCacheEntry.class;
- }
-
- @Override
- protected Class<? extends InternalCacheEntry> immortaltype() {
- return TransientCacheEntry.class;
- }
-
- @Override
- protected Class<? extends InternalCacheEntry> transienttype() {
- return TransientCacheEntry.class;
- }
-
- @Override
- public void testOrdering() {
- long lifespan = 600000;
- long idle = 600000;
- for (int i = 0; i < 10; i++) {
- dc.put(i, "value", -1, -1);
- TestingUtil.sleepThread(10);
- }
- for (int i = 10; i < 20; i++) {
- dc.put(i, "value", lifespan, -1);
- TestingUtil.sleepThread(10);
- }
- for (int i = 20; i < 30; i++) {
- dc.put(i, "value", -1, idle);
- TestingUtil.sleepThread(10);
- }
- for (int i = 30; i < 40; i++) {
- dc.put(i, "value", lifespan, idle);
- TestingUtil.sleepThread(10);
- }
-
- // Visit all ODD numbered elements
- for (int i = 0; i < 40; i++) {
- if (i % 2 == 1) dc.get(i);
- TestingUtil.sleepThread(10);
- }
-
- // ensure order is maintained. The first 20 elements should be EVEN.
- int i = 0;
- for (InternalCacheEntry ice : dc) {
- Integer key = (Integer) ice.getKey();
- if (i < 20)
- assert key % 2 == 0;
- else
- assert key % 2 == 1;
-
- if (key < 10) assert ice.getClass().equals(immortaltype());
- if (key >= 10 && key < 20) assert ice.getClass().equals(mortaltype());
- if (key >= 20 && key < 30) assert ice.getClass().equals(transienttype()) : "Expected " + transienttype() + " for key " + key + " but was " + ice.getClass();
- if (key >= 30 && key < 40) assert ice instanceof TransientMortalCacheEntry;
- i++;
- }
- }
-}
\ No newline at end of file
Modified: trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java 2010-05-24 11:46:22 UTC (rev 1833)
+++ trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java 2010-05-24 15:21:38 UTC (rev 1834)
@@ -36,22 +36,6 @@
doTest(DefaultDataContainer.unBoundedDataContainer(5000));
}
- public void testFIFODataContainer() throws InterruptedException {
- doTest(new FIFODataContainer(5000));
- }
-
- public void testLRUDataContainer() throws InterruptedException {
- doTest(new LRUDataContainer(5000));
- }
-
- public void testLRUSimpleDataContainer() throws InterruptedException {
- doTest(new LRUSimpleDataContainer(5000));
- }
-
- public void testFIFOSimpleDataContainer() throws InterruptedException {
- doTest(new FIFOSimpleDataContainer(5000));
- }
-
private void doTest(final DataContainer dc) throws InterruptedException {
doTest(dc, true);
doTest(dc, false);
More information about the infinispan-commits
mailing list