[infinispan-commits] Infinispan SVN: r1264 - in trunk/core/src: main/java/org/infinispan/container/entries and 4 other directories.
infinispan-commits at lists.jboss.org
infinispan-commits at lists.jboss.org
Wed Dec 9 11:54:44 EST 2009
Author: manik.surtani at jboss.com
Date: 2009-12-09 11:54:43 -0500 (Wed, 09 Dec 2009)
New Revision: 1264
Added:
trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java
trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java
trunk/core/src/main/java/org/infinispan/util/TimSort.java
Removed:
trunk/core/src/main/java/org/infinispan/container/FIFOAMRDataContainer.java
trunk/core/src/main/java/org/infinispan/container/LRUAMRDataContainer.java
Modified:
trunk/core/src/main/java/org/infinispan/container/DataContainer.java
trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java
trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java
trunk/core/src/main/java/org/infinispan/container/SimpleDataContainer.java
trunk/core/src/main/java/org/infinispan/container/entries/AbstractInternalCacheEntry.java
trunk/core/src/main/java/org/infinispan/container/entries/CacheEntry.java
trunk/core/src/main/java/org/infinispan/container/entries/ExpiryHelper.java
trunk/core/src/main/java/org/infinispan/container/entries/ImmortalCacheEntry.java
trunk/core/src/main/java/org/infinispan/container/entries/InternalCacheEntry.java
trunk/core/src/main/java/org/infinispan/container/entries/InternalEntryFactory.java
trunk/core/src/main/java/org/infinispan/container/entries/MortalCacheEntry.java
trunk/core/src/main/java/org/infinispan/container/entries/ReadCommittedEntry.java
trunk/core/src/main/java/org/infinispan/container/entries/TransientCacheEntry.java
trunk/core/src/main/java/org/infinispan/container/entries/TransientMortalCacheEntry.java
trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java
trunk/core/src/main/java/org/infinispan/util/Immutables.java
trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java
trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java
trunk/core/src/test/java/org/infinispan/container/SimpleDataContainerTest.java
trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java
Log:
[ISPN-277] (LRU data container endlesly looping or exhibiting heavy contention)
Modified: trunk/core/src/main/java/org/infinispan/container/DataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/DataContainer.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/DataContainer.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -22,6 +22,7 @@
package org.infinispan.container;
import org.infinispan.container.entries.InternalCacheEntry;
+import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
@@ -92,6 +93,7 @@
/**
* Removes all entries in the container
*/
+ @Stop(priority = 999)
void clear();
/**
Deleted: trunk/core/src/main/java/org/infinispan/container/FIFOAMRDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/FIFOAMRDataContainer.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/FIFOAMRDataContainer.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -1,875 +0,0 @@
-package org.infinispan.container;
-
-import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.container.entries.InternalEntryFactory;
-import org.infinispan.util.Immutables;
-
-import java.util.AbstractCollection;
-import java.util.AbstractSet;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicMarkableReference;
-import java.util.concurrent.locks.LockSupport;
-import java.util.concurrent.locks.ReentrantLock;
-
-import net.jcip.annotations.ThreadSafe;
-
-/**
- * A container that maintains order of entries based on when they were placed in the container. Iterators obtained from
- * this container maintain this order.
- * <p/>
- * This container offers constant-time operation for all public API methods.
- * <p/>
- * This is implemented using a set of lockable segments, each of which is a hash table, not unlike the JDK's {@link
- * java.util.concurrent.ConcurrentHashMap} with the exception that each entry is also linked.
- * <p/>
- * Links are maintained using techniques inspired by H. Sundell and P. Tsigas' 2008 paper, <a
- * href="http://www.md.chalmers.se/~tsigas/papers/Lock-Free-Deques-Doubly-Lists-JPDC.pdf"><i>Lock Free Deques and Doubly
- * Linked Lists</i></a>, M. Michael's 2002 paper, <a href="http://www.research.ibm.com/people/m/michael/spaa-2002.pdf"><i>High
- * Performance Dynamic Lock-Free Hash Tables and List-Based Sets</i></a>.
- * <p />
- * This implementation uses JDK {@link java.util.concurrent.atomic.AtomicMarkableReference}
- * to implement reference deletion markers.
- * <p/>
- *
- * @author Manik Surtani
- * @author Galder Zamarreño
- * @since 4.0
- */
- at ThreadSafe
-public class FIFOAMRDataContainer implements DataContainer {
-
- /*
- This implementation closely follows the pseudocode in Sundell and Tsigas' paper (Referred to as STP) for managing
- the lock-free, threadsafe doubly linked list. AtomicMarkedReferences are used to implement the pointers referred
- to in the paper.
- */
-
- /**
- * The maximum capacity, used if a higher value is implicitly specified by either of the constructors with arguments.
- * MUST be a power of two <= 1<<30 to ensure that entries are indexable using ints.
- */
- static final int MAXIMUM_CAPACITY = 1 << 30;
-
- // -- these fields are all very similar to JDK's ConcurrentHashMap
-
- /**
- * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose the segment.
- */
- final int segmentMask;
-
- /**
- * Shift value for indexing within segments.
- */
- final int segmentShift;
-
- /**
- * The segments, each of which is a specialized hash table
- */
- final Segment[] segments;
-
- Set<Object> keySet;
-
- final LinkedEntry head = new LinkedEntry(null), tail = new LinkedEntry(null);
-
- public FIFOAMRDataContainer(int concurrencyLevel) {
- float loadFactor = 0.75f;
- int initialCapacity = 256;
-
- // Find power-of-two sizes best matching arguments
- int sshift = 0;
- int ssize = 1;
- while (ssize < concurrencyLevel) {
- ++sshift;
- ssize <<= 1;
- }
- segmentShift = 32 - sshift;
- segmentMask = ssize - 1;
- this.segments = Segment.newArray(ssize);
-
- if (initialCapacity > MAXIMUM_CAPACITY)
- initialCapacity = MAXIMUM_CAPACITY;
- int c = initialCapacity / ssize;
- if (c * ssize < initialCapacity)
- ++c;
- int cap = 1;
- while (cap < c)
- cap <<= 1;
-
- for (int i = 0; i < this.segments.length; ++i) this.segments[i] = new Segment(cap, loadFactor);
- initLinks();
- }
-
- // links and link management
-
- /**
- * Back off
- *
- * @param nanos nanos to back off for. If -1, starts at a default
- * @return next time, back off for these nanos
- */
- Random r = new Random();
- private static final long backoffStart = 10000;
-
- private long backoff(long nanos) {
-// long actualNanos = nanos < 0 ? backoffStart : nanos;
-// LockSupport.parkNanos(actualNanos);
-// long newNanos = actualNanos << 1;
-// return newNanos > 10000000 ? backoffStart : newNanos;
- int millis = (1 + r.nextInt(9)) * 10;
- LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(millis));
- return -1;
- }
-
- /**
- * Tests whether a given linked entry is marked for deletion. In this implementation, being "marked" means that it
- * is of type Marker rather than LinkedEntry, but given the relative cost of an "instanceof" check, we prefer to test
- * the state of the InternalCacheEntry referenced by the LinkedEntry. An InternalCacheEntry *always* exists so if it
- * is null, then this is a marker (or possibly the head or tail dummy entry).
- *
- * @param e entry to test
- * @return true if the entry is marked for removal. False if it is not, or if the entry is the head or tail dummy
- * entry.
- */
-// protected final boolean isMarkedForRemoval(LinkedEntry e) {
-// return e != head && e != tail && e.e == null;
-// }
-
- /**
- * Places a removal marker the 'previous' reference on the given entry. Note that marking a reference does not mean
- * that the reference pointed to is marked for removal, rather it means the LinkedEntry doing the referencing is the
- * entry to be removed.
- *
- * @param e entry
- * @return true if the marking was successful, false otherwise. Could return false if the reference is already
- * marked, or if the CAS failed.
- */
-// protected final boolean markPrevReference(LinkedEntry e) {
-// return !e.p.isMarked() && e.p.attemptMark(e.p.getReference(), true);
-// }
-
- /**
- * Places a removal marker the 'next' reference on the given entry. Note that marking a reference does not mean that
- * the reference pointed to is marked for removal, rather it means the LinkedEntry doing the referencing is the entry
- * to be removed.
- *
- * @param e entry
- * @return true if the marking was successful, false otherwise. Could return false if the reference is already
- * marked, or if the CAS failed.
- */
-// protected final boolean markNextReference(LinkedEntry e) {
-// return !e.n.isMarked() && e.n.attemptMark(e.n.getReference(), true);
-// }
-
- /**
- * The LinkedEntry class. This entry is stored in the lockable Segments, and is also capable of being doubly
- * linked.
- */
- static class LinkedEntry {
- volatile InternalCacheEntry e;
- /**
- * Links to next and previous entries. Needs to be volatile.
- */
-// volatile LinkedEntry n, p;
- AtomicMarkableReference<LinkedEntry> n = new AtomicMarkableReference<LinkedEntry>(null, false),
- p = new AtomicMarkableReference<LinkedEntry>(null, false);
-
- /**
- * CAS updaters for prev and next references
- */
-// private static final AtomicReferenceFieldUpdater<LinkedEntry, LinkedEntry> N_UPDATER = AtomicReferenceFieldUpdater.newUpdater(LinkedEntry.class, LinkedEntry.class, "n");
-// private static final AtomicReferenceFieldUpdater<LinkedEntry, LinkedEntry> P_UPDATER = AtomicReferenceFieldUpdater.newUpdater(LinkedEntry.class, LinkedEntry.class, "p");
-
- /**
- * LinkedEntries must always have a valid InternalCacheEntry.
- *
- * @param e internal cache entry
- */
- LinkedEntry(InternalCacheEntry e) {
- this.e = e;
- }
-
-// final boolean casNext(LinkedEntry expected, LinkedEntry newValue) {
-// return n.compareAndSet(expected, newValue, false, false);
-// }
-//
-// final boolean casPrev(LinkedEntry expected, LinkedEntry newValue) {
-// return p.compareAndSet(expected, newValue, false, false);
-// }
-//
-// @Override
-// public String toString() {
-// return "E" + Integer.toHexString(System.identityHashCode(this));
-// }
- }
-
- /**
- * A marker. If a reference in LinkedEntry (either to its previous or next entry) needs to be marked, it should be
- * CAS'd with an instance of Marker that points to the actual entry. Typically this is done by calling {@link
- * FIFOAMRDataContainer#markNextReference(FIFOAMRDataContainer.LinkedEntry)} or {@link
- * FIFOAMRDataContainer#markPrevReference(FIFOAMRDataContainer.LinkedEntry)}
- */
-// static final class Marker extends LinkedEntry {
-// Marker(LinkedEntry actual) {
-// super(null);
-// n = actual;
-// p = actual;
-// }
-//
-// @Override
-// public String toString() {
-// return "M" + Integer.toHexString(System.identityHashCode(this));
-// }
-// }
-
- /**
- * Initializes links to an empty container
- */
- protected final void initLinks() {
- head.n.set(tail, false);
- head.p.set(tail, false);
- tail.n.set(head, false);
- tail.p.set(head, false);
- }
-
- /**
- * Un-links an entry from the doubly linked list in a threadsafe, lock-free manner. The entry is typically retrieved
- * using Segment#locklessRemove() after locking the Segment.
- *
- * @param node entry to unlink
- */
- // This corresponds to the Delete() function in STP
- protected final void unlink(LinkedEntry node) {
- if (node == head || node == tail) return;
- while (true) {
- AtomicMarkableReference<LinkedEntry> next = node.n;
- if (next.isMarked()) return;
- if (node.n.compareAndSet(next.getReference(), next.getReference(), false, true)) {
- AtomicMarkableReference<LinkedEntry> prev;
- while (true) {
- prev = node.p;
- if (prev.isMarked() || node.p.compareAndSet(prev.getReference(), prev.getReference(), false, true)) {
- break;
- }
- }
- correctPrev(prev.getReference().p.getReference(), next.getReference());
- }
- }
- }
-
- /**
- * Links a new entry at the end of the linked list. Typically done when a put() creates a new entry, or if ordering
- * needs to be updated based on access. If this entry already exists in the linked list, it should first be {@link
- * #unlink(FIFOAMRDataContainer.LinkedEntry)}ed.
- *
- * @param node entry to link at end
- */
- // Corresponds to PushRight() in STP
- protected final void linkAtEnd(LinkedEntry node) {
- LinkedEntry next = tail;
- LinkedEntry prev = next.p.getReference();
- long backoffTime = -1;
- while (true) {
- node.p.set(prev, false);
- node.n.set(next, false);
- if (prev.n.compareAndSet(next, node, false, false)) break;
- prev = correctPrev(prev, next);
- backoffTime = backoff(backoffTime);
- }
-
- // PushEnd()
- backoffTime = -1;
- while (true) {
- AtomicMarkableReference<LinkedEntry> l1 = next.p;
- if (l1.isMarked() || (node.n.isMarked() || node.n.getReference() != next)) break;
- if (next.p.compareAndSet(l1.getReference(), node, false, false)) {
- if (node.p.isMarked()) correctPrev(node, next);
- break;
- }
- backoffTime = backoff(backoffTime);
- }
- }
-
- /**
- * Retrieves the next entry after a given entry, skipping marked entries accordingly.
- *
- * @param current current entry to inspect
- * @return the next valid entry, or null if we have reached the end of the list.
- */
- // Corresponds to the Next() function in STP pom
- protected final LinkedEntry getNext(LinkedEntry current) {
- while (true) {
- if (current == tail) return null;
- AtomicMarkableReference<LinkedEntry> next = current.n;
- boolean d = next.getReference().n.isMarked();
- if (d && (!current.n.isMarked() || current.n.getReference() != next.getReference())) {
- // set mark next.p
- next.getReference().p.attemptMark(next.getReference().p.getReference(), true);
- current.n.compareAndSet(next.getReference(), next.getReference().n.getReference(), false, false);
- continue;
- }
-
- current = next.getReference();
- if (!d && next.getReference() != tail) return current;
- }
- }
-
- /**
- * Correct 'previous' links. This 'helper' function is used if unable to properly set previous pointers (due to a
- * concurrent update) and is used when traversing the list in reverse.
- *
- * @param prev suggested previous entry
- * @param node current entry
- * @return the actual valid, previous entry. Links are also corrected in the process.
- */
- // Corresponds to CorrectPrev() in STP
- protected final LinkedEntry correctPrev(LinkedEntry prev, LinkedEntry node) {
- LinkedEntry lastLink = null;
- AtomicMarkableReference<LinkedEntry> link1, prev2;
- long backoffTime = -1;
-
- // holders to atomically retrieve ref + mark
- boolean[] markHolder = new boolean[1];
- LinkedEntry referenceHolder;
-
- while (true) {
- link1 = node.p;
- if (link1.isMarked()) break;
-
- prev2 = prev.n;
- if (prev2.isMarked()) {
- if (lastLink != null) {
- AtomicMarkableReference<LinkedEntry> prevP = prev.p;
- while (!prevP.attemptMark(prevP.getReference(), true)) {}
- lastLink.n.compareAndSet(prev, prev2.getReference(), lastLink.n.isMarked(), false);
- prev = lastLink;
- lastLink = null;
- continue;
- }
- prev2 = prev.p;
- prev = prev2.getReference();
- continue;
- }
-
- if (prev2.getReference() != node) {
- lastLink = prev;
- prev = prev2.getReference();
- continue;
- }
-
- referenceHolder = link1.get(markHolder);
- if (node.p.compareAndSet(referenceHolder, prev, markHolder[0], false)) {
- if (prev.p.isMarked()) continue;
- break;
- }
- backoffTime = backoff(backoffTime);
- }
- return prev;
- }
-
-// private LinkedEntry unmarkPrevIfNeeded(LinkedEntry e) {
-// if (isMarkedForRemoval(e)) return e.p;
-// else return e;
-// }
-
-
- /**
- * Similar to ConcurrentHashMap's hash() function: applies a supplemental hash function to a given hashCode, which
- * defends against poor quality hash functions. This is critical because ConcurrentHashMap uses power-of-two length
- * hash tables, that otherwise encounter collisions for hashCodes that do not differ in lower or upper bits.
- */
- final int hashOld(int h) {
- // Spread bits to regularize both segment and index locations,
- // using variant of single-word Wang/Jenkins hash.
- h += (h << 15) ^ 0xffffcd7d;
- h ^= (h >>> 10);
- h += (h << 3);
- h ^= (h >>> 6);
- h += (h << 2) + (h << 14);
- return h ^ (h >>> 16);
- }
-
- /**
- * Use the objects built in hash to obtain an initial value, then use a second four byte hash to obtain a more
- * uniform distribution of hash values. This uses a <a href = "http://burtleburtle.net/bob/hash/integer.html">4-byte
- * (integer) hash</a>, which produces well distributed values even when the original hash produces thghtly clustered
- * values.
- * <p/>
- * Contributed by akluge <a href-="http://www.vizitsolutions.com/ConsistentHashingCaching.html">http://www.vizitsolutions.com/ConsistentHashingCaching.html</a>
- */
- final int hash(int hash) {
- hash = (hash + 0x7ED55D16) + (hash << 12);
- hash = (hash ^ 0xc761c23c) ^ (hash >> 19);
- hash = (hash + 0x165667b1) + (hash << 5);
- hash = (hash + 0xd3a2646c) ^ (hash << 9);
- hash = (hash + 0xfd7046c5) + (hash << 3);
- hash = (hash ^ 0xb55a4f09) ^ (hash >> 16);
-
- return hash;
- }
-
- /**
- * Returns the segment that should be used for key with given hash
- *
- * @param hash the hash code for the key
- * @return the segment
- */
- final Segment segmentFor(int hash) {
- return segments[(hash >>> segmentShift) & segmentMask];
- }
-
- /**
- * ConcurrentHashMap list entry. Note that this is never exported out as a user-visible Map.Entry.
- * <p/>
- * Because the value field is volatile, not final, it is legal wrt the Java Memory Model for an unsynchronized reader
- * to see null instead of initial value when read via a data race. Although a reordering leading to this is not
- * likely to ever actually occur, the Segment.readValueUnderLock method is used as a backup in case a null
- * (pre-initialized) value is ever seen in an unsynchronized access method.
- */
- static final class HashEntry {
- final Object key;
- final int hash;
- volatile LinkedEntry value;
- final HashEntry next;
-
- HashEntry(Object key, int hash, HashEntry next, LinkedEntry value) {
- this.key = key;
- this.hash = hash;
- this.next = next;
- this.value = value;
- }
- }
-
- /**
- * Very similar to a Segment in a ConcurrentHashMap
- */
- static final class Segment extends ReentrantLock {
-
- private static final long serialVersionUID = -6980774204494312874L;
-
- /**
- * The number of elements in this segment's region.
- */
- transient volatile int count;
-
- /**
- * The table is rehashed when its size exceeds this threshold. (The value of this field is always
- * <tt>(int)(capacity * loadFactor)</tt>.)
- */
- transient int threshold;
-
- /**
- * The per-segment table.
- */
- transient volatile HashEntry[] table;
-
- /**
- * The load factor for the hash table. Even though this value is same for all segments, it is replicated to avoid
- * needing links to outer object.
- *
- * @serial
- */
- final float loadFactor;
-
- Segment(int initialCapacity, float lf) {
- loadFactor = lf;
- setTable(new HashEntry[initialCapacity]);
- }
-
- static final Segment[] newArray(int i) {
- return new Segment[i];
- }
-
- /**
- * Sets table to new HashEntry array. Call only while holding lock or in constructor.
- */
- final void setTable(HashEntry[] newTable) {
- threshold = (int) (newTable.length * loadFactor);
- table = newTable;
- }
-
- /**
- * Returns properly casted first entry of bin for given hash.
- */
- final HashEntry getFirst(int hash) {
- HashEntry[] tab = table;
- return tab[hash & (tab.length - 1)];
- }
-
- /**
- * Reads value field of an entry under lock. Called if value field ever appears to be null. This is possible only
- * if a compiler happens to reorder a HashEntry initialization with its table assignment, which is legal under
- * memory model but is not known to ever occur.
- */
- final LinkedEntry readValueUnderLock(HashEntry e) {
- lock();
- try {
- return e.value;
- } finally {
- unlock();
- }
- }
-
- /* Specialized implementations of map methods */
-
- final LinkedEntry get(Object key, int hash) {
- if (count != 0) { // read-volatile
- HashEntry e = getFirst(hash);
- while (e != null) {
- if (e.hash == hash && key.equals(e.key)) {
- LinkedEntry v = e.value;
- if (v != null)
- return v;
- return readValueUnderLock(e); // recheck
- }
- e = e.next;
- }
- }
- return null;
- }
-
- /**
- * This put is lockless. Make sure you call segment.lock() first.
- */
- final LinkedEntry locklessPut(Object key, int hash, LinkedEntry value) {
- int c = count;
- if (c++ > threshold) // ensure capacity
- rehash();
- HashEntry[] tab = table;
- int index = hash & (tab.length - 1);
- HashEntry first = tab[index];
- HashEntry e = first;
- while (e != null && (e.hash != hash || !key.equals(e.key)))
- e = e.next;
-
- LinkedEntry oldValue;
- if (e != null) {
- oldValue = e.value;
- e.value = value;
- } else {
- oldValue = null;
- tab[index] = new HashEntry(key, hash, first, value);
- count = c; // write-volatile
- }
- return oldValue;
- }
-
- final void rehash() {
- HashEntry[] oldTable = table;
- int oldCapacity = oldTable.length;
- if (oldCapacity >= MAXIMUM_CAPACITY)
- return;
-
- /*
- * Reclassify nodes in each list to new Map. Because we are
- * using power-of-two expansion, the elements from each bin
- * must either stay at same index, or move with a power of two
- * offset. We eliminate unnecessary node creation by catching
- * cases where old nodes can be reused because their next
- * fields won't change. Statistically, at the default
- * threshold, only about one-sixth of them need cloning when
- * a table doubles. The nodes they replace will be garbage
- * collectable as soon as they are no longer referenced by any
- * reader thread that may be in the midst of traversing table
- * right now.
- */
-
- HashEntry[] newTable = new HashEntry[oldCapacity << 1];
- threshold = (int) (newTable.length * loadFactor);
- int sizeMask = newTable.length - 1;
- for (int i = 0; i < oldCapacity; i++) {
- // We need to guarantee that any existing reads of old Map can
- // proceed. So we cannot yet null out each bin.
- HashEntry e = oldTable[i];
-
- if (e != null) {
- HashEntry next = e.next;
- int idx = e.hash & sizeMask;
-
- // Single node on list
- if (next == null)
- newTable[idx] = e;
-
- else {
- // Reuse trailing consecutive sequence at same slot
- HashEntry lastRun = e;
- int lastIdx = idx;
- for (HashEntry last = next;
- last != null;
- last = last.next) {
- int k = last.hash & sizeMask;
- if (k != lastIdx) {
- lastIdx = k;
- lastRun = last;
- }
- }
- newTable[lastIdx] = lastRun;
-
- // Clone all remaining nodes
- for (HashEntry p = e; p != lastRun; p = p.next) {
- int k = p.hash & sizeMask;
- HashEntry n = newTable[k];
- newTable[k] = new HashEntry(p.key, p.hash, n, p.value);
- }
- }
- }
- }
- table = newTable;
- }
-
- /**
- * This is a lockless remove. Make sure you acquire locks using segment.lock() first.
- */
- final LinkedEntry locklessRemove(Object key, int hash) {
- int c = count - 1;
- HashEntry[] tab = table;
- int index = hash & (tab.length - 1);
- HashEntry first = tab[index];
- HashEntry e = first;
- while (e != null && (e.hash != hash || !key.equals(e.key)))
- e = e.next;
-
- LinkedEntry oldValue = null;
- if (e != null) {
- oldValue = e.value;
- // All entries following removed node can stay
- // in list, but all preceding ones need to be
- // cloned.
- HashEntry newFirst = e.next;
- for (HashEntry p = first; p != e; p = p.next)
- newFirst = new HashEntry(p.key, p.hash,
- newFirst, p.value);
- tab[index] = newFirst;
- count = c; // write-volatile
-
- }
- return oldValue;
- }
-
- /**
- * This is a lockless clear. Ensure you acquire locks on the segment first using segment.lock().
- */
- final void locklessClear() {
- if (count != 0) {
- HashEntry[] tab = table;
- for (int i = 0; i < tab.length; i++)
- tab[i] = null;
- count = 0; // write-volatile
- }
- }
- }
-
-
- protected final class KeySet extends AbstractSet<Object> {
- public Iterator<Object> iterator() {
- return new KeyIterator();
- }
-
- public int size() {
- return FIFOAMRDataContainer.this.size();
- }
- }
-
- protected final class Values extends AbstractCollection<Object> {
- public Iterator<Object> iterator() {
- return new ValueIterator();
- }
-
- public int size() {
- return FIFOAMRDataContainer.this.size();
- }
- }
-
- protected final class EntrySet extends AbstractSet<InternalCacheEntry> {
- public Iterator<InternalCacheEntry> iterator() {
- return new ImmutableEntryIterator();
- }
-
- public int size() {
- return FIFOAMRDataContainer.this.size();
- }
- }
-
- protected abstract class LinkedIterator {
- LinkedEntry current = head;
-
- public boolean hasNext() {
- if (current == tail) return false;
- current = getNext(current);
- return current != null;
- }
-
- public void remove() {
- throw new UnsupportedOperationException();
- }
- }
-
- protected final class EntryIterator extends LinkedIterator implements Iterator<InternalCacheEntry> {
- public InternalCacheEntry next() {
- return current.e;
- }
- }
-
- protected final class ImmutableEntryIterator extends LinkedIterator implements Iterator<InternalCacheEntry> {
- public InternalCacheEntry next() {
- return Immutables.immutableInternalCacheEntry(current.e);
- }
- }
-
- protected final class KeyIterator extends LinkedIterator implements Iterator<Object> {
- public Object next() {
- return current.e.getKey();
- }
- }
-
- protected final class ValueIterator extends LinkedIterator implements Iterator<Object> {
- public Object next() {
- return current.e.getValue();
- }
- }
-
-
- // ----------- PUBLIC API ---------------
-
- public InternalCacheEntry get(Object k) {
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- LinkedEntry le = s.get(k, h);
- InternalCacheEntry ice = null;
- if (le != null) ice = le.e;
- if (ice != null) {
- if (ice.isExpired()) {
- remove(k);
- ice = null;
- } else {
- ice.touch();
- }
- }
- return ice;
- }
-
- public InternalCacheEntry peek(Object k) {
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- LinkedEntry le = s.get(k, h);
- InternalCacheEntry ice = null;
- if (le != null) ice = le.e;
- return ice;
- }
-
- public void put(Object k, Object v, long lifespan, long maxIdle) {
- // do a normal put first.
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- s.lock();
- LinkedEntry le;
- boolean newEntry = false;
- try {
- le = s.get(k, h);
- InternalCacheEntry ice = le == null ? null : le.e;
- if (ice == null) {
- newEntry = true;
- ice = InternalEntryFactory.create(k, v, lifespan, maxIdle);
- // only update linking if this is a new entry
- le = new LinkedEntry(ice);
- } else {
- ice.setValue(v);
- ice = ice.setLifespan(lifespan).setMaxIdle(maxIdle);
- // need to do this anyway since the ICE impl may have changed
- le.e = ice;
- }
-
- s.locklessPut(k, h, le);
-
- if (newEntry) {
- linkAtEnd(le);
- }
- } finally {
- s.unlock();
- }
- }
-
- public boolean containsKey(Object k) {
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- LinkedEntry le = s.get(k, h);
- InternalCacheEntry ice = null;
- if (le != null) ice = le.e;
- if (ice != null) {
- if (ice.isExpired()) {
- remove(k);
- ice = null;
- }
- }
-
- return ice != null;
- }
-
- public InternalCacheEntry remove(Object k) {
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- s.lock();
- InternalCacheEntry ice = null;
- LinkedEntry le;
- try {
- le = s.locklessRemove(k, h);
- if (le != null) {
- ice = le.e;
- unlink(le);
- }
- } finally {
- s.unlock();
- }
-
- if (ice == null || ice.isExpired())
- return null;
- else
- return ice;
- }
-
- public int size() {
- // approximate sizing is good enough
- int sz = 0;
- final Segment[] segs = segments;
- for (Segment s : segs) sz += s.count;
- return sz;
- }
-
- public void clear() {
- // This is expensive...
- // lock all segments
- for (Segment s : segments) s.lock();
- try {
- for (Segment s : segments) s.locklessClear();
- initLinks();
- } finally {
- for (Segment s : segments) s.unlock();
- }
- }
-
- public Set<Object> keySet() {
- if (keySet == null) keySet = new KeySet();
- return keySet;
- }
-
- public Collection<Object> values() {
- return new Values();
- }
-
- public Set<InternalCacheEntry> entrySet() {
- return new EntrySet();
- }
-
- public void purgeExpired() {
- for (InternalCacheEntry ice : this) {
- if (ice.isExpired()) remove(ice.getKey());
- }
- }
-
- public Iterator<InternalCacheEntry> iterator() {
- return new EntryIterator();
- }
-}
\ No newline at end of file
Modified: trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/FIFODataContainer.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -40,6 +40,8 @@
@ThreadSafe
public class FIFODataContainer implements DataContainer {
+ InternalEntryFactory entryFactory = new InternalEntryFactory(false, false);
+
/**
* The maximum capacity, used if a higher value is implicitly specified by either of the constructors with arguments.
* MUST be a power of two <= 1<<30 to ensure that entries are indexable using ints.
@@ -763,7 +765,7 @@
le = new LinkedEntry(ice);
} else {
ice.setValue(v);
- ice = ice.setLifespan(lifespan).setMaxIdle(maxIdle);
+ ice = entryFactory.update(ice, lifespan, maxIdle);
// need to do this anyway since the ICE impl may have changed
le.e = ice;
}
Added: trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java (rev 0)
+++ trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -0,0 +1,80 @@
+package org.infinispan.container;
+
+import net.jcip.annotations.ThreadSafe;
+import org.infinispan.container.entries.InternalCacheEntry;
+import org.infinispan.util.TimSort;
+
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Iterator;
+
+/**
+ * Based on the same techniques outlined in the {@link SimpleDataContainer}, this implementation always forces the
+ * collection of creation timestamps for entries. This means that {@link org.infinispan.container.entries.ImmortalCacheEntry}
+ * and {@link org.infinispan.container.entries.TransientCacheEntry} are never used, since only {@link org.infinispan.container.entries.MortalCacheEntry}
+ * and {@link org.infinispan.container.entries.TransientMortalCacheEntry} instances capture timestamps.
+ * <p/>
+ * All gets, puts, etc are constant time operations.
+ * <p/>
+ * Iteration incurs a O(N log(N)) cost since the timestamps are sorted first, and there is an added memory overhead in
+ * temporary space to hold sorted references. When sorting, this implementation does not use the millisecond granularity
+ * when ordering timestamps; instead it defaults to a 1-second granularity since the FIFO ordering does not need to be
+ * strict and the TimSort implementation used for sorting performs significantly better with minimal reordering offered
+ * by a coarser granularity.
+ * <p/>
+ *
+ * @author Manik Surtani
+ * @since 4.0
+ */
+ at ThreadSafe
+public class FIFOSimpleDataContainer extends SimpleDataContainer {
+ // This is to facilitate faster sorting. DO we really care about millisecond accuracy when ordering the collection?
+ final static int DEFAULT_TIMESTAMP_GRANULARITY = 1000;
+
+ private final Comparator<InternalCacheEntry> COMPARATOR;
+
+ public FIFOSimpleDataContainer(int concurrencyLevel) {
+ this(concurrencyLevel, true, false, new FIFOComparator(DEFAULT_TIMESTAMP_GRANULARITY));
+ }
+
+ public FIFOSimpleDataContainer(int concurrencyLevel, int timestampGranularity) {
+ this(concurrencyLevel, true, false, new FIFOComparator(timestampGranularity));
+ }
+
+ FIFOSimpleDataContainer(int concurrencyLevel, boolean recordCreated, boolean recordLastUsed, Comparator<InternalCacheEntry> c) {
+ super(concurrencyLevel, recordCreated, recordLastUsed);
+ COMPARATOR = c;
+ }
+
+
+ @Override
+ public Iterator<InternalCacheEntry> iterator() {
+ InternalCacheEntry[] sortedEntries = new InternalCacheEntry[immortalEntries.size() + mortalEntries.size()];
+ int i=0;
+ for (InternalCacheEntry ice: immortalEntries.values()){
+ if (i == sortedEntries.length) break;
+ sortedEntries[i++] = ice;
+ }
+
+ for (InternalCacheEntry ice: mortalEntries.values()){
+ if (i == sortedEntries.length) break;
+ sortedEntries[i++] = ice;
+ }
+
+ TimSort.sort(sortedEntries, COMPARATOR);
+ return Arrays.asList(sortedEntries).iterator();
+ }
+
+ private static final class FIFOComparator implements Comparator<InternalCacheEntry> {
+ int timestampGranularity;
+
+ private FIFOComparator(int timestampGranularity) {
+ this.timestampGranularity = timestampGranularity;
+ }
+
+ @Override
+ public int compare(InternalCacheEntry o1, InternalCacheEntry o2) {
+ return (int) o1.getCreated() / timestampGranularity - (int) o2.getCreated() / timestampGranularity;
+ }
+ }
+}
\ No newline at end of file
Property changes on: trunk/core/src/main/java/org/infinispan/container/FIFOSimpleDataContainer.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/core/src/main/java/org/infinispan/container/LRUAMRDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/LRUAMRDataContainer.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/LRUAMRDataContainer.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -1,94 +0,0 @@
-package org.infinispan.container;
-
-import net.jcip.annotations.ThreadSafe;
-import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.container.entries.InternalEntryFactory;
-
-/**
- * Based on the same techniques outlined in the {@link FIFODataContainer}, this implementation
- * additionally unlinks and re-links entries at the tail whenever entries are visited (using a get()) or are updated (a
- * put() on an existing key).
- * <p/>
- * Again, these are constant-time operations.
- * <p/>
- * Note though that this implementation does have a far lesser degree of concurrency when compared with its FIFO variant
- * due to the segment locking necessary even when doing a get() (since gets reorder links). This has a knock-on effect
- * not just on get() but even on other write() operations since they all compete for the same segment lock (when working
- * on keys mapped to the same segment, of course).
- *
- * @author Manik Surtani
- * @since 4.0
- */
- at ThreadSafe
-public class LRUAMRDataContainer extends FIFOAMRDataContainer {
-
- public LRUAMRDataContainer(int concurrencyLevel) {
- super(concurrencyLevel);
- }
-
- @Override
- public InternalCacheEntry get(Object k) {
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- LinkedEntry le = s.get(k, h);
- InternalCacheEntry ice = null;
- if (le != null) ice = le.e;
- if (ice != null) {
- if (ice.isExpired()) {
- remove(k);
- ice = null;
- } else {
- ice.touch();
- boolean needToUnlock = false;
- try {
- s.lock(); // no not update links unless segment is locked!
- needToUnlock = true;
- updateLinks(le);
- } finally {
- if (needToUnlock) s.unlock();
- }
- }
- }
- return ice;
- }
-
- @Override
- public void put(Object k, Object v, long lifespan, long maxIdle) {
- // do a normal put first.
- int h = hash(k.hashCode());
- Segment s = segmentFor(h);
- s.lock();
- LinkedEntry le;
- boolean newEntry = false;
- try {
- le = s.get(k, h);
- InternalCacheEntry ice = le == null ? null : le.e;
- if (ice == null) {
- newEntry = true;
- ice = InternalEntryFactory.create(k, v, lifespan, maxIdle);
- le = new LinkedEntry(ice);
- } else {
- ice.setValue(v);
- ice = ice.setLifespan(lifespan).setMaxIdle(maxIdle);
- // need to do this anyway since the ICE impl may have changed
- le.e = ice;
- }
-
- s.locklessPut(k, h, le);
-
- if (newEntry) {
- linkAtEnd(le);
- } else {
- updateLinks(le);
- }
-
- } finally {
- s.unlock();
- }
- }
-
- protected final void updateLinks(LinkedEntry le) {
- unlink(le);
- linkAtEnd(le);
- }
-}
\ No newline at end of file
Modified: trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -70,7 +70,7 @@
le = new LinkedEntry(ice);
} else {
ice.setValue(v);
- ice = ice.setLifespan(lifespan).setMaxIdle(maxIdle);
+ ice = entryFactory.update(ice, lifespan, maxIdle);
// need to do this anyway since the ICE impl may have changed
le.e = ice;
}
Copied: trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java (from rev 1263, trunk/core/src/main/java/org/infinispan/container/LRUDataContainer.java)
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java (rev 0)
+++ trunk/core/src/main/java/org/infinispan/container/LRUSimpleDataContainer.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -0,0 +1,48 @@
+package org.infinispan.container;
+
+import net.jcip.annotations.ThreadSafe;
+import org.infinispan.container.entries.InternalCacheEntry;
+
+import java.util.Comparator;
+
+/**
+ * Based on the same techniques outlined in the {@link SimpleDataContainer}, this implementation always forces the
+ * collection of last used timestamps for entries. This means that {@link org.infinispan.container.entries.ImmortalCacheEntry}
+ * and {@link org.infinispan.container.entries.MortalCacheEntry} are never used, since only {@link org.infinispan.container.entries.TransientCacheEntry}
+ * and {@link org.infinispan.container.entries.TransientMortalCacheEntry} instances capture timestamps.
+ * <p/>
+ * All gets, puts, etc are constant time operations.
+ * <p/>
+ * Iteration incurs a O(N log(N)) cost since the timestamps are sorted first, and there is an added memory overhead in
+ * temporary space to hold sorted references. When sorting, this implementation does not use the millisecond granularity
+ * when ordering timestamps; instead it defaults to a 1-second granularity since the LRU ordering does not need to be
+ * strict and the TimSort implementation used for sorting performs significantly better with minimal reordering offered
+ * by a coarser granularity.
+ * <p/>
+ *
+ * @author Manik Surtani
+ * @since 4.0
+ */
+ at ThreadSafe
+public class LRUSimpleDataContainer extends FIFOSimpleDataContainer {
+ public LRUSimpleDataContainer(int concurrencyLevel) {
+ super(concurrencyLevel, false, true, new LRUComparator(DEFAULT_TIMESTAMP_GRANULARITY));
+ }
+
+ public LRUSimpleDataContainer(int concurrencyLevel, int timestampGranularity) {
+ super(concurrencyLevel, false, true, new LRUComparator(timestampGranularity));
+ }
+
+ private static final class LRUComparator implements Comparator<InternalCacheEntry> {
+ int timestampGranularity;
+
+ private LRUComparator(int timestampGranularity) {
+ this.timestampGranularity = timestampGranularity;
+ }
+
+ @Override
+ public int compare(InternalCacheEntry o1, InternalCacheEntry o2) {
+ return (int) o1.getLastUsed() / timestampGranularity - (int) o2.getLastUsed() / timestampGranularity;
+ }
+ }
+}
\ No newline at end of file
Modified: trunk/core/src/main/java/org/infinispan/container/SimpleDataContainer.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/SimpleDataContainer.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/SimpleDataContainer.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -3,7 +3,6 @@
import net.jcip.annotations.ThreadSafe;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.InternalEntryFactory;
-import org.infinispan.factories.annotations.Stop;
import org.infinispan.util.Immutables;
import java.util.AbstractCollection;
@@ -33,10 +32,17 @@
final ConcurrentMap<Object, InternalCacheEntry> immortalEntries;
final ConcurrentMap<Object, InternalCacheEntry> mortalEntries;
final AtomicInteger numEntries = new AtomicInteger(0);
+ final InternalEntryFactory entryFactory;
- public SimpleDataContainer(int concurrencyLevel) {
+
+ public SimpleDataContainer(int concurrencyLevel) {
+ this(concurrencyLevel, false, false);
+ }
+
+ SimpleDataContainer(int concurrencyLevel, boolean recordCreation, boolean recordLastUsed) {
immortalEntries = new ConcurrentHashMap<Object, InternalCacheEntry>(128, 0.75f, concurrencyLevel);
mortalEntries = new ConcurrentHashMap<Object, InternalCacheEntry>(64, 0.75f, concurrencyLevel);
+ entryFactory = new InternalEntryFactory(recordCreation, recordLastUsed);
}
public InternalCacheEntry peek(Object key) {
@@ -59,23 +65,27 @@
return e;
}
+ protected void successfulPut(InternalCacheEntry ice, boolean newEntry) {
+ // no-op
+ }
+
public void put(Object k, Object v, long lifespan, long maxIdle) {
InternalCacheEntry e = immortalEntries.get(k);
if (e != null) {
e.setValue(v);
- if (lifespan > -1) e = e.setLifespan(lifespan);
- if (maxIdle > -1) e = e.setMaxIdle(maxIdle);
+ e = entryFactory.update(e, lifespan, maxIdle);
if (e.canExpire()) {
immortalEntries.remove(k);
mortalEntries.put(k, e);
}
+ successfulPut(e, false);
} else {
e = mortalEntries.get(k);
if (e != null) {
e.setValue(v);
InternalCacheEntry original = e;
- e = e.setLifespan(lifespan).setMaxIdle(maxIdle);
+ e = entryFactory.update(e, lifespan, maxIdle);
if (!e.canExpire()) {
mortalEntries.remove(k);
@@ -84,15 +94,16 @@
// the entry has changed type, but still can expire!
mortalEntries.put(k, e);
}
+ successfulPut(e, false);
} else {
// this is a brand-new entry
numEntries.getAndIncrement();
- e = InternalEntryFactory.create(k, v, lifespan, maxIdle);
+ e = entryFactory.createNewEntry(k, v, lifespan, maxIdle);
if (e.canExpire())
mortalEntries.put(k, e);
else
immortalEntries.put(k, e);
-
+ successfulPut(e, true);
}
}
}
@@ -119,7 +130,6 @@
return numEntries.get();
}
- @Stop(priority = 999)
public void clear() {
immortalEntries.clear();
mortalEntries.clear();
Modified: trunk/core/src/main/java/org/infinispan/container/entries/AbstractInternalCacheEntry.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/entries/AbstractInternalCacheEntry.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/entries/AbstractInternalCacheEntry.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -59,6 +59,13 @@
return false;
}
+ public void setMaxIdle(long maxIdle) {
+ }
+
+ public void setLifespan(long lifespan) {
+ }
+
+
public final Object getKey() {
return key;
}
Modified: trunk/core/src/main/java/org/infinispan/container/entries/CacheEntry.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/entries/CacheEntry.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/entries/CacheEntry.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -1,9 +1,9 @@
package org.infinispan.container.entries;
-import java.util.Map;
-
import org.infinispan.container.DataContainer;
+import java.util.Map;
+
/**
* An entry that is stored in the data container
*
@@ -66,27 +66,17 @@
/**
* Sets the maximum idle time of the entry.
- * <p/>
- * Note that if this method is used, you should always use a reference to the return value after invocation, since as
- * an optimization, implementations may change type of CacheEntry used after invoking this method, for example
- * changing a MortalCacheEntry to an ImmortalCacheEntry.
*
* @param maxIdle maxIdle to set
- * @return the updated CacheEntry
*/
- CacheEntry setMaxIdle(long maxIdle);
+ void setMaxIdle(long maxIdle);
/**
* Sets the lifespan of the entry.
- * <p/>
- * Note that if this method is used, you should always use a reference to the return value after invocation, since as
- * an optimization, implementations may change type of CacheEntry used after invoking this method, for example
- * changing a MortalCacheEntry to an ImmortalCacheEntry.
*
* @param lifespan lifespan to set
- * @return the updated CacheEntry
*/
- CacheEntry setLifespan(long lifespan);
+ void setLifespan(long lifespan);
/**
* Sets the value of the entry, returing the previous value
Modified: trunk/core/src/main/java/org/infinispan/container/entries/ExpiryHelper.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/entries/ExpiryHelper.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/entries/ExpiryHelper.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -8,11 +8,11 @@
*/
class ExpiryHelper {
static final boolean isExpiredMortal(long lifespan, long created) {
- return lifespan > -1 && System.currentTimeMillis() > created + lifespan;
+ return lifespan > -1 && created > -1 && System.currentTimeMillis() > created + lifespan;
}
static final boolean isExpiredTransient(long maxIdle, long lastUsed) {
- return maxIdle > -1 && System.currentTimeMillis() > maxIdle + lastUsed;
+ return maxIdle > -1 && lastUsed > -1 && System.currentTimeMillis() > maxIdle + lastUsed;
}
static final boolean isExpiredTransientMortal(long maxIdle, long lastUsed, long lifespan, long created) {
Modified: trunk/core/src/main/java/org/infinispan/container/entries/ImmortalCacheEntry.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/entries/ImmortalCacheEntry.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/entries/ImmortalCacheEntry.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -30,22 +30,6 @@
return false;
}
- public InternalCacheEntry setMaxIdle(long maxIdle) {
- if (maxIdle > -1) {
- return new TransientCacheEntry(key, cacheValue.value, maxIdle);
- } else {
- return this;
- }
- }
-
- public InternalCacheEntry setLifespan(long lifespan) {
- if (lifespan > -1) {
- return new MortalCacheEntry(key, cacheValue.value, lifespan);
- } else {
- return this;
- }
- }
-
public final long getCreated() {
return -1;
}
Modified: trunk/core/src/main/java/org/infinispan/container/entries/InternalCacheEntry.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/entries/InternalCacheEntry.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/entries/InternalCacheEntry.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -19,27 +19,17 @@
/**
* Sets the maximum idle time of the entry.
- * <p/>
- * Note that if this method is used, you should always use a reference to the return value after invocation, since as
- * an optimization, implementations may change type of CacheEntry used after invoking this method, for example
- * changing a MortalCacheEntry to an ImmortalCacheEntry.
*
* @param maxIdle maxIdle to set
- * @return the updated CacheEntry
*/
- InternalCacheEntry setMaxIdle(long maxIdle);
+ void setMaxIdle(long maxIdle);
/**
* Sets the lifespan of the entry.
- * <p/>
- * Note that if this method is used, you should always use a reference to the return value after invocation, since as
- * an optimization, implementations may change type of CacheEntry used after invoking this method, for example
- * changing a MortalCacheEntry to an ImmortalCacheEntry.
*
* @param lifespan lifespan to set
- * @return the updated CacheEntry
*/
- InternalCacheEntry setLifespan(long lifespan);
+ void setLifespan(long lifespan);
/**
* @return timestamp when the entry was created
Modified: trunk/core/src/main/java/org/infinispan/container/entries/InternalEntryFactory.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/entries/InternalEntryFactory.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/entries/InternalEntryFactory.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -7,6 +7,14 @@
* @since 4.0
*/
public class InternalEntryFactory {
+
+ boolean recordCreation, recordLastUsed;
+
+ public InternalEntryFactory(boolean recordCreation, boolean recordLastUsed) {
+ this.recordCreation = recordCreation;
+ this.recordLastUsed = recordLastUsed;
+ }
+
public static final InternalCacheEntry create(Object key, Object value) {
return new ImmortalCacheEntry(key, value);
}
@@ -39,4 +47,151 @@
if (lifespan < 0 && maxIdle > -1) return new TransientCacheValue(v, maxIdle, lastUsed);
return new TransientMortalCacheValue(v, created, lifespan, maxIdle, lastUsed);
}
+
+ public InternalCacheEntry createNewEntry(Object key, Object value, long lifespan, long maxIdle) {
+ if (lifespan < 0 && maxIdle < 0) {
+ if (recordCreation || recordLastUsed) {
+ if (recordCreation && !recordLastUsed) return new MortalCacheEntry(key, value, -1);
+ if (!recordCreation && recordLastUsed) return new TransientCacheEntry(key, value, -1);
+ return new TransientMortalCacheEntry(key, value, -1, -1);
+ } else {
+ return new ImmortalCacheEntry(key, value);
+ }
+ }
+
+ if (lifespan > -1 && maxIdle < 0) {
+ if (recordLastUsed) {
+ return new TransientMortalCacheEntry(key, value, lifespan, -1);
+ } else {
+ return new MortalCacheEntry(key, value, lifespan);
+ }
+ }
+
+ if (lifespan < 0 && maxIdle > -1) {
+ if (recordCreation) {
+ return new TransientMortalCacheEntry(key, value, -1, maxIdle);
+ } else {
+ return new TransientCacheEntry(key, value, maxIdle);
+ }
+ }
+
+ // else...
+ return new TransientMortalCacheEntry(key, value, lifespan, maxIdle);
+ }
+
+ /**
+ * Sets the values on the given internal cache entry, potentially reconstructing the entry to the most appropriate
+ * type (Mortal, Immortal, Transient or TransientMortal) based on the lifespan and maxIdle being set. As such,
+ * callers must *always* assume that the InternalCacheEntry instance is being changed and must switch reference to
+ * the instance being returned, even though this *may* not be a new instance at all.
+ *
+ * @param ice cache entry to work on
+ * @param lifespan lifespan to set
+ * @param maxIdle max idle to set
+ * @return a cache entry
+ */
+ public InternalCacheEntry update(InternalCacheEntry ice, long lifespan, long maxIdle) {
+ if (ice instanceof ImmortalCacheEntry) {
+ if (lifespan < 0) {
+ if (maxIdle < 0) {
+ return ice;
+ } else {
+ return new TransientCacheEntry(ice.getKey(), ice.getValue(), maxIdle);
+ }
+ } else {
+ if (maxIdle < 0) {
+ return new MortalCacheEntry(ice.getKey(), ice.getValue(), lifespan);
+ } else {
+ return new TransientMortalCacheEntry(ice.getKey(), ice.getValue(), maxIdle, lifespan, System.currentTimeMillis(), ice.getCreated());
+ }
+ }
+ } else if (ice instanceof MortalCacheEntry) {
+ if (lifespan < 0) {
+ if (maxIdle < 0) {
+ if (recordCreation) {
+ ice.setLifespan(-1);
+ return ice;
+ } else {
+ return new ImmortalCacheEntry(ice.getKey(), ice.getValue());
+ }
+ } else {
+ if (recordCreation) {
+ return new TransientMortalCacheEntry(ice.getKey(), ice.getValue(), maxIdle, -1, System.currentTimeMillis(), ice.getCreated());
+ } else {
+ return new TransientCacheEntry(ice.getKey(), ice.getValue(), maxIdle);
+ }
+ }
+ } else {
+ if (maxIdle < 0) {
+ ice.setLifespan(lifespan);
+ return ice;
+ } else {
+ return new TransientMortalCacheEntry(ice.getKey(), ice.getValue(), maxIdle, lifespan, System.currentTimeMillis(), ice.getCreated());
+ }
+ }
+ } else if (ice instanceof TransientCacheEntry) {
+ if (lifespan < 0) {
+ if (maxIdle < 0) {
+ if (recordLastUsed) {
+ ice.setMaxIdle(-1);
+ return ice;
+ } else {
+ return new ImmortalCacheEntry(ice.getKey(), ice.getValue());
+ }
+ } else {
+ ice.setMaxIdle(maxIdle);
+ return ice;
+ }
+ } else {
+ if (maxIdle < 0) {
+ if (recordLastUsed) {
+ return new TransientMortalCacheEntry(ice.getKey(), ice.getValue(), maxIdle, lifespan, ice.getLastUsed(), System.currentTimeMillis());
+ } else {
+ return new MortalCacheEntry(ice.getKey(), ice.getValue(), lifespan);
+ }
+ } else {
+ return new TransientMortalCacheEntry(ice.getKey(), ice.getValue(), maxIdle, lifespan, System.currentTimeMillis(), ice.getCreated());
+ }
+ }
+ } else if (ice instanceof TransientMortalCacheEntry) {
+ if (lifespan < 0) {
+ if (maxIdle < 0) {
+ if (recordCreation || recordLastUsed) {
+ if (recordLastUsed && recordCreation) {
+ ice.setLifespan(lifespan);
+ ice.setMaxIdle(maxIdle);
+ return ice;
+ } else if (recordCreation) {
+ return new MortalCacheEntry(ice.getKey(), ice.getValue(), -1, ice.getCreated());
+ } else {
+ return new TransientCacheEntry(ice.getKey(), ice.getValue(), -1, ice.getLastUsed());
+ }
+ } else {
+ return new ImmortalCacheEntry(ice.getKey(), ice.getValue());
+ }
+ } else {
+ if (recordCreation) {
+ return new TransientMortalCacheEntry(ice.getKey(), ice.getValue(), maxIdle, -1, System.currentTimeMillis(), ice.getCreated());
+ } else {
+ return new TransientCacheEntry(ice.getKey(), ice.getValue(), maxIdle);
+ }
+ }
+ } else {
+ if (maxIdle < 0) {
+ if (recordLastUsed) {
+ ice.setLifespan(lifespan);
+ ice.setMaxIdle(maxIdle);
+ return ice;
+ } else {
+ return new MortalCacheEntry(ice.getKey(), ice.getValue(), lifespan);
+ }
+ } else {
+ ice.setLifespan(lifespan);
+ ice.setMaxIdle(maxIdle);
+ return ice;
+ }
+ }
+ }
+ return ice;
+ }
}
Modified: trunk/core/src/main/java/org/infinispan/container/entries/MortalCacheEntry.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/entries/MortalCacheEntry.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/entries/MortalCacheEntry.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -43,25 +43,10 @@
return true;
}
- public InternalCacheEntry setMaxIdle(long maxIdle) {
- if (maxIdle > -1) {
- TransientMortalCacheEntry tmce = new TransientMortalCacheEntry(key, cacheValue.value);
- tmce.setMaxIdle(maxIdle);
- return tmce;
- } else {
- return this;
- }
+ public void setLifespan(long lifespan) {
+ cacheValue.setLifespan(lifespan);
}
- public InternalCacheEntry setLifespan(long lifespan) {
- if (lifespan < 0) {
- return new ImmortalCacheEntry(key, cacheValue.value);
- } else {
- cacheValue.lifespan = lifespan;
- return this;
- }
- }
-
public final long getCreated() {
return cacheValue.created;
}
Modified: trunk/core/src/main/java/org/infinispan/container/entries/ReadCommittedEntry.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/entries/ReadCommittedEntry.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/entries/ReadCommittedEntry.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -23,10 +23,11 @@
import org.infinispan.atomic.AtomicHashMap;
import org.infinispan.container.DataContainer;
-import static org.infinispan.container.entries.ReadCommittedEntry.Flags.*;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
+import static org.infinispan.container.entries.ReadCommittedEntry.Flags.*;
+
/**
* A wrapper around a cached entry that encapsulates read committed semantics when writes are initiated, committed or
* rolled back.
@@ -107,14 +108,12 @@
return maxIdle;
}
- public final MVCCEntry setMaxIdle(long maxIdle) {
+ public final void setMaxIdle(long maxIdle) {
this.maxIdle = maxIdle;
- return this;
}
- public final MVCCEntry setLifespan(long lifespan) {
+ public final void setLifespan(long lifespan) {
this.lifespan = lifespan;
- return this;
}
public final Object getKey() {
Modified: trunk/core/src/main/java/org/infinispan/container/entries/TransientCacheEntry.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/entries/TransientCacheEntry.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/entries/TransientCacheEntry.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -47,25 +47,10 @@
return cacheValue.isExpired();
}
- public InternalCacheEntry setMaxIdle(long maxIdle) {
- if (maxIdle < 0) {
- return new ImmortalCacheEntry(key, cacheValue.value);
- } else {
- cacheValue.maxIdle = maxIdle;
- return this;
- }
+ public void setMaxIdle(long maxIdle) {
+ cacheValue.maxIdle = maxIdle;
}
- public InternalCacheEntry setLifespan(long lifespan) {
- if (lifespan > -1) {
- TransientMortalCacheEntry tmce = new TransientMortalCacheEntry(key, cacheValue.value);
- tmce.setLifespan(lifespan);
- return tmce;
- } else {
- return this;
- }
- }
-
public long getCreated() {
return -1;
}
@@ -137,7 +122,7 @@
long lastUsed = UnsignedNumeric.readUnsignedLong(input);
Long maxIdle = input.readLong();
return new TransientCacheEntry(k, v, maxIdle, lastUsed);
- }
+ }
}
@Override
Modified: trunk/core/src/main/java/org/infinispan/container/entries/TransientMortalCacheEntry.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/container/entries/TransientMortalCacheEntry.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/container/entries/TransientMortalCacheEntry.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -1,13 +1,13 @@
package org.infinispan.container.entries;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
import org.infinispan.io.UnsignedNumeric;
import org.infinispan.marshall.Ids;
import org.infinispan.marshall.Marshallable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
/**
* A cache entry that is both transient and mortal.
*
@@ -36,22 +36,12 @@
this.cacheValue = new TransientMortalCacheValue(value, created, lifespan, maxIdle, lastUsed);
}
- public InternalCacheEntry setLifespan(long lifespan) {
- if (lifespan < 0) {
- return new TransientCacheEntry(key, cacheValue.value, cacheValue.lastUsed, cacheValue.maxIdle);
- } else {
- this.cacheValue.lifespan = lifespan;
- return this;
- }
+ public void setLifespan(long lifespan) {
+ this.cacheValue.lifespan = lifespan;
}
- public InternalCacheEntry setMaxIdle(long maxIdle) {
- if (maxIdle < 0) {
- return new MortalCacheEntry(key, cacheValue.value, cacheValue.lifespan, cacheValue.created);
- } else {
- this.cacheValue.maxIdle = maxIdle;
- return this;
- }
+ public void setMaxIdle(long maxIdle) {
+ this.cacheValue.maxIdle = maxIdle;
}
public Object getValue() {
@@ -125,11 +115,11 @@
clone.cacheValue = cacheValue.clone();
return clone;
}
-
+
@Override
public String toString() {
return getClass().getSimpleName() + "{" +
- "cacheValue=" + cacheValue +
+ "cacheValue=" + cacheValue +
"} " + super.toString();
}
Modified: trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/factories/DataContainerFactory.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -23,8 +23,8 @@
import org.infinispan.config.ConfigurationException;
import org.infinispan.container.DataContainer;
-import org.infinispan.container.FIFODataContainer;
-import org.infinispan.container.LRUDataContainer;
+import org.infinispan.container.FIFOSimpleDataContainer;
+import org.infinispan.container.LRUSimpleDataContainer;
import org.infinispan.container.SimpleDataContainer;
import org.infinispan.factories.annotations.DefaultFactoryFor;
@@ -43,9 +43,9 @@
case NONE:
return (T) new SimpleDataContainer(configuration.getConcurrencyLevel());
case FIFO:
- return (T) new FIFODataContainer(configuration.getConcurrencyLevel());
+ return (T) new FIFOSimpleDataContainer(configuration.getConcurrencyLevel());
case LRU:
- return (T) new LRUDataContainer(configuration.getConcurrencyLevel());
+ return (T) new LRUSimpleDataContainer(configuration.getConcurrencyLevel());
default:
throw new ConfigurationException("Unknown eviction strategy " + configuration.getEvictionStrategy());
}
Modified: trunk/core/src/main/java/org/infinispan/util/Immutables.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/util/Immutables.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/main/java/org/infinispan/util/Immutables.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -488,11 +488,11 @@
return entry.isExpired();
}
- public InternalCacheEntry setLifespan(long lifespan) {
+ public void setLifespan(long lifespan) {
throw new UnsupportedOperationException();
}
- public InternalCacheEntry setMaxIdle(long maxIdle) {
+ public void setMaxIdle(long maxIdle) {
throw new UnsupportedOperationException();
}
Added: trunk/core/src/main/java/org/infinispan/util/TimSort.java
===================================================================
--- trunk/core/src/main/java/org/infinispan/util/TimSort.java (rev 0)
+++ trunk/core/src/main/java/org/infinispan/util/TimSort.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -0,0 +1,934 @@
+/*
+ * Copyright 2009 Google Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Sun designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Sun in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+package org.infinispan.util;
+
+import java.util.Arrays;
+import java.util.Comparator;
+
+/**
+ * TimSort, backported from JDK7 sources (build openjdk-7-ea-src-b77-03_dec_2009).
+ * Javadocs copied accordingly as well.
+ *
+ * A stable, adaptive, iterative mergesort that requires far fewer than
+ * n lg(n) comparisons when running on partially sorted arrays, while
+ * offering performance comparable to a traditional mergesort when run
+ * on random arrays. Like all proper mergesorts, this sort is stable and
+ * runs O(n log n) time (worst case). In the worst case, this sort requires
+ * temporary storage space for n/2 object references; in the best case,
+ * it requires only a small constant amount of space.
+ *
+ * This implementation was adapted from Tim Peters's list sort for
+ * Python, which is described in detail here:
+ *
+ * http://svn.python.org/projects/python/trunk/Objects/listsort.txt
+ *
+ * Tim's C code may be found here:
+ *
+ * http://svn.python.org/projects/python/trunk/Objects/listobject.c
+ *
+ * The underlying techniques are described in this paper (and may have
+ * even earlier origins):
+ *
+ * "Optimistic Sorting and Information Theoretic Complexity"
+ * Peter McIlroy
+ * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms),
+ * pp 467-474, Austin, Texas, 25-27 January 1993.
+ *
+ * While the API to this class consists solely of static methods, it is
+ * (privately) instantiable; a TimSort instance holds the state of an ongoing
+ * sort, assuming the input array is large enough to warrant the full-blown
+ * TimSort. Small arrays are sorted in place, using a binary insertion sort.
+ *
+ * @author Josh Bloch
+ */
+public class TimSort<T> {
+ /**
+ * This is the minimum sized sequence that will be merged. Shorter
+ * sequences will be lengthened by calling binarySort. If the entire
+ * array is less than this length, no merges will be performed.
+ *
+ * This constant should be a power of two. It was 64 in Tim Peter's C
+ * implementation, but 32 was empirically determined to work better in
+ * this implementation. In the unlikely event that you set this constant
+ * to be a number that's not a power of two, you'll need to change the
+ * {@link #minRunLength} computation.
+ *
+ * If you decrease this constant, you must change the stackLen
+ * computation in the TimSort constructor, or you risk an
+ * ArrayOutOfBounds exception. See listsort.txt for a discussion
+ * of the minimum stack length required as a function of the length
+ * of the array being sorted and the minimum merge sequence length.
+ */
+ private static final int MIN_MERGE = 32;
+
+ /**
+ * The array being sorted.
+ */
+ private final T[] a;
+
+ /**
+ * The comparator for this sort.
+ */
+ private final Comparator<? super T> c;
+
+ /**
+ * When we get into galloping mode, we stay there until both runs win less
+ * often than MIN_GALLOP consecutive times.
+ */
+ private static final int MIN_GALLOP = 7;
+
+ /**
+ * This controls when we get *into* galloping mode. It is initialized
+ * to MIN_GALLOP. The mergeLo and mergeHi methods nudge it higher for
+ * random data, and lower for highly structured data.
+ */
+ private int minGallop = MIN_GALLOP;
+
+ /**
+ * Maximum initial size of tmp array, which is used for merging. The array
+ * can grow to accommodate demand.
+ *
+ * Unlike Tim's original C version, we do not allocate this much storage
+ * when sorting smaller arrays. This change was required for performance.
+ */
+ private static final int INITIAL_TMP_STORAGE_LENGTH = 256;
+
+ /**
+ * Temp storage for merges.
+ */
+ private T[] tmp; // Actual runtime type will be Object[], regardless of T
+
+ /**
+ * A stack of pending runs yet to be merged. Run i starts at
+ * address base[i] and extends for len[i] elements. It's always
+ * true (so long as the indices are in bounds) that:
+ *
+ * runBase[i] + runLen[i] == runBase[i + 1]
+ *
+ * so we could cut the storage for this, but it's a minor amount,
+ * and keeping all the info explicit simplifies the code.
+ */
+ private int stackSize = 0; // Number of pending runs on stack
+ private final int[] runBase;
+ private final int[] runLen;
+
+ /**
+ * Creates a TimSort instance to maintain the state of an ongoing sort.
+ *
+ * @param a the array to be sorted
+ * @param c the comparator to determine the order of the sort
+ */
+ private TimSort(T[] a, Comparator<? super T> c) {
+ this.a = a;
+ this.c = c;
+
+ // Allocate temp storage (which may be increased later if necessary)
+ int len = a.length;
+ @SuppressWarnings({"unchecked", "UnnecessaryLocalVariable"})
+ T[] newArray = (T[]) new Object[len < 2 * INITIAL_TMP_STORAGE_LENGTH ?
+ len >>> 1 : INITIAL_TMP_STORAGE_LENGTH];
+ tmp = newArray;
+
+ /*
+ * Allocate runs-to-be-merged stack (which cannot be expanded). The
+ * stack length requirements are described in listsort.txt. The C
+ * version always uses the same stack length (85), but this was
+ * measured to be too expensive when sorting "mid-sized" arrays (e.g.,
+ * 100 elements) in Java. Therefore, we use smaller (but sufficiently
+ * large) stack lengths for smaller arrays. The "magic numbers" in the
+ * computation below must be changed if MIN_MERGE is decreased. See
+ * the MIN_MERGE declaration above for more information.
+ */
+ int stackLen = (len < 120 ? 5 :
+ len < 1542 ? 10 :
+ len < 119151 ? 19 : 40);
+ runBase = new int[stackLen];
+ runLen = new int[stackLen];
+ }
+
+ /*
+ * The next two methods (which are package private and static) constitute
+ * the entire API of this class. Each of these methods obeys the contract
+ * of the public method with the same signature in java.util.Arrays.
+ */
+
+ public static <T> void sort(T[] a, Comparator<? super T> c) {
+ sort(a, 0, a.length, c);
+ }
+
+ public static <T> void sort(T[] a, int lo, int hi, Comparator<? super T> c) {
+ if (c == null) {
+ Arrays.sort(a, lo, hi);
+ return;
+ }
+
+ rangeCheck(a.length, lo, hi);
+ int nRemaining = hi - lo;
+ if (nRemaining < 2)
+ return; // Arrays of size 0 and 1 are always sorted
+
+ // If array is small, do a "mini-TimSort" with no merges
+ if (nRemaining < MIN_MERGE) {
+ int initRunLen = countRunAndMakeAscending(a, lo, hi, c);
+ binarySort(a, lo, hi, lo + initRunLen, c);
+ return;
+ }
+
+ /**
+ * March over the array once, left to right, finding natural runs,
+ * extending short natural runs to minRun elements, and merging runs
+ * to maintain stack invariant.
+ */
+ TimSort<T> ts = new TimSort<T>(a, c);
+ int minRun = minRunLength(nRemaining);
+ do {
+ // Identify next run
+ int runLen = countRunAndMakeAscending(a, lo, hi, c);
+
+ // If run is short, extend to min(minRun, nRemaining)
+ if (runLen < minRun) {
+ int force = nRemaining <= minRun ? nRemaining : minRun;
+ binarySort(a, lo, lo + force, lo + runLen, c);
+ runLen = force;
+ }
+
+ // Push run onto pending-run stack, and maybe merge
+ ts.pushRun(lo, runLen);
+ ts.mergeCollapse();
+
+ // Advance to find next run
+ lo += runLen;
+ nRemaining -= runLen;
+ } while (nRemaining != 0);
+
+ // Merge all remaining runs to complete sort
+ assert lo == hi;
+ ts.mergeForceCollapse();
+ assert ts.stackSize == 1;
+ }
+
+ /**
+ * Sorts the specified portion of the specified array using a binary
+ * insertion sort. This is the best method for sorting small numbers
+ * of elements. It requires O(n log n) compares, but O(n^2) data
+ * movement (worst case).
+ *
+ * If the initial part of the specified range is already sorted,
+ * this method can take advantage of it: the method assumes that the
+ * elements from index {@code lo}, inclusive, to {@code start},
+ * exclusive are already sorted.
+ *
+ * @param a the array in which a range is to be sorted
+ * @param lo the index of the first element in the range to be sorted
+ * @param hi the index after the last element in the range to be sorted
+ * @param start the index of the first element in the range that is
+ * not already known to be sorted (@code lo <= start <= hi}
+ * @param c comparator to used for the sort
+ */
+ @SuppressWarnings("fallthrough")
+ private static <T> void binarySort(T[] a, int lo, int hi, int start,
+ Comparator<? super T> c) {
+ assert lo <= start && start <= hi;
+ if (start == lo)
+ start++;
+ for ( ; start < hi; start++) {
+ T pivot = a[start];
+
+ // Set left (and right) to the index where a[start] (pivot) belongs
+ int left = lo;
+ int right = start;
+ assert left <= right;
+ /*
+ * Invariants:
+ * pivot >= all in [lo, left).
+ * pivot < all in [right, start).
+ */
+ while (left < right) {
+ int mid = (left + right) >>> 1;
+ if (c.compare(pivot, a[mid]) < 0)
+ right = mid;
+ else
+ left = mid + 1;
+ }
+ assert left == right;
+
+ /*
+ * The invariants still hold: pivot >= all in [lo, left) and
+ * pivot < all in [left, start), so pivot belongs at left. Note
+ * that if there are elements equal to pivot, left points to the
+ * first slot after them -- that's why this sort is stable.
+ * Slide elements over to make room to make room for pivot.
+ */
+ int n = start - left; // The number of elements to move
+ // Switch is just an optimization for arraycopy in default case
+ switch(n) {
+ case 2: a[left + 2] = a[left + 1];
+ case 1: a[left + 1] = a[left];
+ break;
+ default: System.arraycopy(a, left, a, left + 1, n);
+ }
+ a[left] = pivot;
+ }
+ }
+
+ /**
+ * Returns the length of the run beginning at the specified position in
+ * the specified array and reverses the run if it is descending (ensuring
+ * that the run will always be ascending when the method returns).
+ *
+ * A run is the longest ascending sequence with:
+ *
+ * a[lo] <= a[lo + 1] <= a[lo + 2] <= ...
+ *
+ * or the longest descending sequence with:
+ *
+ * a[lo] > a[lo + 1] > a[lo + 2] > ...
+ *
+ * For its intended use in a stable mergesort, the strictness of the
+ * definition of "descending" is needed so that the call can safely
+ * reverse a descending sequence without violating stability.
+ *
+ * @param a the array in which a run is to be counted and possibly reversed
+ * @param lo index of the first element in the run
+ * @param hi index after the last element that may be contained in the run.
+ It is required that @code{lo < hi}.
+ * @param c the comparator to used for the sort
+ * @return the length of the run beginning at the specified position in
+ * the specified array
+ */
+ private static <T> int countRunAndMakeAscending(T[] a, int lo, int hi,
+ Comparator<? super T> c) {
+ assert lo < hi;
+ int runHi = lo + 1;
+ if (runHi == hi)
+ return 1;
+
+ // Find end of run, and reverse range if descending
+ if (c.compare(a[runHi++], a[lo]) < 0) { // Descending
+ while(runHi < hi && c.compare(a[runHi], a[runHi - 1]) < 0)
+ runHi++;
+ reverseRange(a, lo, runHi);
+ } else { // Ascending
+ while (runHi < hi && c.compare(a[runHi], a[runHi - 1]) >= 0)
+ runHi++;
+ }
+
+ return runHi - lo;
+ }
+
+ /**
+ * Reverse the specified range of the specified array.
+ *
+ * @param a the array in which a range is to be reversed
+ * @param lo the index of the first element in the range to be reversed
+ * @param hi the index after the last element in the range to be reversed
+ */
+ private static void reverseRange(Object[] a, int lo, int hi) {
+ hi--;
+ while (lo < hi) {
+ Object t = a[lo];
+ a[lo++] = a[hi];
+ a[hi--] = t;
+ }
+ }
+
+ /**
+ * Returns the minimum acceptable run length for an array of the specified
+ * length. Natural runs shorter than this will be extended with
+ * {@link #binarySort}.
+ *
+ * Roughly speaking, the computation is:
+ *
+ * If n < MIN_MERGE, return n (it's too small to bother with fancy stuff).
+ * Else if n is an exact power of 2, return MIN_MERGE/2.
+ * Else return an int k, MIN_MERGE/2 <= k <= MIN_MERGE, such that n/k
+ * is close to, but strictly less than, an exact power of 2.
+ *
+ * For the rationale, see listsort.txt.
+ *
+ * @param n the length of the array to be sorted
+ * @return the length of the minimum run to be merged
+ */
+ private static int minRunLength(int n) {
+ assert n >= 0;
+ int r = 0; // Becomes 1 if any 1 bits are shifted off
+ while (n >= MIN_MERGE) {
+ r |= (n & 1);
+ n >>= 1;
+ }
+ return n + r;
+ }
+
+ /**
+ * Pushes the specified run onto the pending-run stack.
+ *
+ * @param runBase index of the first element in the run
+ * @param runLen the number of elements in the run
+ */
+ private void pushRun(int runBase, int runLen) {
+ this.runBase[stackSize] = runBase;
+ this.runLen[stackSize] = runLen;
+ stackSize++;
+ }
+
+ /**
+ * Examines the stack of runs waiting to be merged and merges adjacent runs
+ * until the stack invariants are reestablished:
+ *
+ * 1. runLen[i - 3] > runLen[i - 2] + runLen[i - 1]
+ * 2. runLen[i - 2] > runLen[i - 1]
+ *
+ * This method is called each time a new run is pushed onto the stack,
+ * so the invariants are guaranteed to hold for i < stackSize upon
+ * entry to the method.
+ */
+ private void mergeCollapse() {
+ while (stackSize > 1) {
+ int n = stackSize - 2;
+ if (n > 0 && runLen[n-1] <= runLen[n] + runLen[n+1]) {
+ if (runLen[n - 1] < runLen[n + 1])
+ n--;
+ mergeAt(n);
+ } else if (runLen[n] <= runLen[n + 1]) {
+ mergeAt(n);
+ } else {
+ break; // Invariant is established
+ }
+ }
+ }
+
+ /**
+ * Merges all runs on the stack until only one remains. This method is
+ * called once, to complete the sort.
+ */
+ private void mergeForceCollapse() {
+ while (stackSize > 1) {
+ int n = stackSize - 2;
+ if (n > 0 && runLen[n - 1] < runLen[n + 1])
+ n--;
+ mergeAt(n);
+ }
+ }
+
+ /**
+ * Merges the two runs at stack indices i and i+1. Run i must be
+ * the penultimate or antepenultimate run on the stack. In other words,
+ * i must be equal to stackSize-2 or stackSize-3.
+ *
+ * @param i stack index of the first of the two runs to merge
+ */
+ private void mergeAt(int i) {
+ assert stackSize >= 2;
+ assert i >= 0;
+ assert i == stackSize - 2 || i == stackSize - 3;
+
+ int base1 = runBase[i];
+ int len1 = runLen[i];
+ int base2 = runBase[i + 1];
+ int len2 = runLen[i + 1];
+ assert len1 > 0 && len2 > 0;
+ assert base1 + len1 == base2;
+
+ /*
+ * Record the length of the combined runs; if i is the 3rd-last
+ * run now, also slide over the last run (which isn't involved
+ * in this merge). The current run (i+1) goes away in any case.
+ */
+ runLen[i] = len1 + len2;
+ if (i == stackSize - 3) {
+ runBase[i + 1] = runBase[i + 2];
+ runLen[i + 1] = runLen[i + 2];
+ }
+ stackSize--;
+
+ /*
+ * Find where the first element of run2 goes in run1. Prior elements
+ * in run1 can be ignored (because they're already in place).
+ */
+ int k = gallopRight(a[base2], a, base1, len1, 0, c);
+ assert k >= 0;
+ base1 += k;
+ len1 -= k;
+ if (len1 == 0)
+ return;
+
+ /*
+ * Find where the last element of run1 goes in run2. Subsequent elements
+ * in run2 can be ignored (because they're already in place).
+ */
+ len2 = gallopLeft(a[base1 + len1 - 1], a, base2, len2, len2 - 1, c);
+ assert len2 >= 0;
+ if (len2 == 0)
+ return;
+
+ // Merge remaining runs, using tmp array with min(len1, len2) elements
+ if (len1 <= len2)
+ mergeLo(base1, len1, base2, len2);
+ else
+ mergeHi(base1, len1, base2, len2);
+ }
+
+ /**
+ * Locates the position at which to insert the specified key into the
+ * specified sorted range; if the range contains an element equal to key,
+ * returns the index of the leftmost equal element.
+ *
+ * @param key the key whose insertion point to search for
+ * @param a the array in which to search
+ * @param base the index of the first element in the range
+ * @param len the length of the range; must be > 0
+ * @param hint the index at which to begin the search, 0 <= hint < n.
+ * The closer hint is to the result, the faster this method will run.
+ * @param c the comparator used to order the range, and to search
+ * @return the int k, 0 <= k <= n such that a[b + k - 1] < key <= a[b + k],
+ * pretending that a[b - 1] is minus infinity and a[b + n] is infinity.
+ * In other words, key belongs at index b + k; or in other words,
+ * the first k elements of a should precede key, and the last n - k
+ * should follow it.
+ */
+ private static <T> int gallopLeft(T key, T[] a, int base, int len, int hint,
+ Comparator<? super T> c) {
+ assert len > 0 && hint >= 0 && hint < len;
+ int lastOfs = 0;
+ int ofs = 1;
+ if (c.compare(key, a[base + hint]) > 0) {
+ // Gallop right until a[base+hint+lastOfs] < key <= a[base+hint+ofs]
+ int maxOfs = len - hint;
+ while (ofs < maxOfs && c.compare(key, a[base + hint + ofs]) > 0) {
+ lastOfs = ofs;
+ ofs = (ofs << 1) + 1;
+ if (ofs <= 0) // int overflow
+ ofs = maxOfs;
+ }
+ if (ofs > maxOfs)
+ ofs = maxOfs;
+
+ // Make offsets relative to base
+ lastOfs += hint;
+ ofs += hint;
+ } else { // key <= a[base + hint]
+ // Gallop left until a[base+hint-ofs] < key <= a[base+hint-lastOfs]
+ final int maxOfs = hint + 1;
+ while (ofs < maxOfs && c.compare(key, a[base + hint - ofs]) <= 0) {
+ lastOfs = ofs;
+ ofs = (ofs << 1) + 1;
+ if (ofs <= 0) // int overflow
+ ofs = maxOfs;
+ }
+ if (ofs > maxOfs)
+ ofs = maxOfs;
+
+ // Make offsets relative to base
+ int tmp = lastOfs;
+ lastOfs = hint - ofs;
+ ofs = hint - tmp;
+ }
+ assert -1 <= lastOfs && lastOfs < ofs && ofs <= len;
+
+ /*
+ * Now a[base+lastOfs] < key <= a[base+ofs], so key belongs somewhere
+ * to the right of lastOfs but no farther right than ofs. Do a binary
+ * search, with invariant a[base + lastOfs - 1] < key <= a[base + ofs].
+ */
+ lastOfs++;
+ while (lastOfs < ofs) {
+ int m = lastOfs + ((ofs - lastOfs) >>> 1);
+
+ if (c.compare(key, a[base + m]) > 0)
+ lastOfs = m + 1; // a[base + m] < key
+ else
+ ofs = m; // key <= a[base + m]
+ }
+ assert lastOfs == ofs; // so a[base + ofs - 1] < key <= a[base + ofs]
+ return ofs;
+ }
+
+ /**
+ * Like gallopLeft, except that if the range contains an element equal to
+ * key, gallopRight returns the index after the rightmost equal element.
+ *
+ * @param key the key whose insertion point to search for
+ * @param a the array in which to search
+ * @param base the index of the first element in the range
+ * @param len the length of the range; must be > 0
+ * @param hint the index at which to begin the search, 0 <= hint < n.
+ * The closer hint is to the result, the faster this method will run.
+ * @param c the comparator used to order the range, and to search
+ * @return the int k, 0 <= k <= n such that a[b + k - 1] <= key < a[b + k]
+ */
+ private static <T> int gallopRight(T key, T[] a, int base, int len,
+ int hint, Comparator<? super T> c) {
+ assert len > 0 && hint >= 0 && hint < len;
+
+ int ofs = 1;
+ int lastOfs = 0;
+ if (c.compare(key, a[base + hint]) < 0) {
+ // Gallop left until a[b+hint - ofs] <= key < a[b+hint - lastOfs]
+ int maxOfs = hint + 1;
+ while (ofs < maxOfs && c.compare(key, a[base + hint - ofs]) < 0) {
+ lastOfs = ofs;
+ ofs = (ofs << 1) + 1;
+ if (ofs <= 0) // int overflow
+ ofs = maxOfs;
+ }
+ if (ofs > maxOfs)
+ ofs = maxOfs;
+
+ // Make offsets relative to b
+ int tmp = lastOfs;
+ lastOfs = hint - ofs;
+ ofs = hint - tmp;
+ } else { // a[b + hint] <= key
+ // Gallop right until a[b+hint + lastOfs] <= key < a[b+hint + ofs]
+ int maxOfs = len - hint;
+ while (ofs < maxOfs && c.compare(key, a[base + hint + ofs]) >= 0) {
+ lastOfs = ofs;
+ ofs = (ofs << 1) + 1;
+ if (ofs <= 0) // int overflow
+ ofs = maxOfs;
+ }
+ if (ofs > maxOfs)
+ ofs = maxOfs;
+
+ // Make offsets relative to b
+ lastOfs += hint;
+ ofs += hint;
+ }
+ assert -1 <= lastOfs && lastOfs < ofs && ofs <= len;
+
+ /*
+ * Now a[b + lastOfs] <= key < a[b + ofs], so key belongs somewhere to
+ * the right of lastOfs but no farther right than ofs. Do a binary
+ * search, with invariant a[b + lastOfs - 1] <= key < a[b + ofs].
+ */
+ lastOfs++;
+ while (lastOfs < ofs) {
+ int m = lastOfs + ((ofs - lastOfs) >>> 1);
+
+ if (c.compare(key, a[base + m]) < 0)
+ ofs = m; // key < a[b + m]
+ else
+ lastOfs = m + 1; // a[b + m] <= key
+ }
+ assert lastOfs == ofs; // so a[b + ofs - 1] <= key < a[b + ofs]
+ return ofs;
+ }
+
+ /**
+ * Merges two adjacent runs in place, in a stable fashion. The first
+ * element of the first run must be greater than the first element of the
+ * second run (a[base1] > a[base2]), and the last element of the first run
+ * (a[base1 + len1-1]) must be greater than all elements of the second run.
+ *
+ * For performance, this method should be called only when len1 <= len2;
+ * its twin, mergeHi should be called if len1 >= len2. (Either method
+ * may be called if len1 == len2.)
+ *
+ * @param base1 index of first element in first run to be merged
+ * @param len1 length of first run to be merged (must be > 0)
+ * @param base2 index of first element in second run to be merged
+ * (must be aBase + aLen)
+ * @param len2 length of second run to be merged (must be > 0)
+ */
+ private void mergeLo(int base1, int len1, int base2, int len2) {
+ assert len1 > 0 && len2 > 0 && base1 + len1 == base2;
+
+ // Copy first run into temp array
+ T[] a = this.a; // For performance
+ T[] tmp = ensureCapacity(len1);
+ System.arraycopy(a, base1, tmp, 0, len1);
+
+ int cursor1 = 0; // Indexes into tmp array
+ int cursor2 = base2; // Indexes int a
+ int dest = base1; // Indexes int a
+
+ // Move first element of second run and deal with degenerate cases
+ a[dest++] = a[cursor2++];
+ if (--len2 == 0) {
+ System.arraycopy(tmp, cursor1, a, dest, len1);
+ return;
+ }
+ if (len1 == 1) {
+ System.arraycopy(a, cursor2, a, dest, len2);
+ a[dest + len2] = tmp[cursor1]; // Last elt of run 1 to end of merge
+ return;
+ }
+
+ Comparator<? super T> c = this.c; // Use local variable for performance
+ int minGallop = this.minGallop; // " " " " "
+ outer:
+ while (true) {
+ int count1 = 0; // Number of times in a row that first run won
+ int count2 = 0; // Number of times in a row that second run won
+
+ /*
+ * Do the straightforward thing until (if ever) one run starts
+ * winning consistently.
+ */
+ do {
+ assert len1 > 1 && len2 > 0;
+ if (c.compare(a[cursor2], tmp[cursor1]) < 0) {
+ a[dest++] = a[cursor2++];
+ count2++;
+ count1 = 0;
+ if (--len2 == 0)
+ break outer;
+ } else {
+ a[dest++] = tmp[cursor1++];
+ count1++;
+ count2 = 0;
+ if (--len1 == 1)
+ break outer;
+ }
+ } while ((count1 | count2) < minGallop);
+
+ /*
+ * One run is winning so consistently that galloping may be a
+ * huge win. So try that, and continue galloping until (if ever)
+ * neither run appears to be winning consistently anymore.
+ */
+ do {
+ assert len1 > 1 && len2 > 0;
+ count1 = gallopRight(a[cursor2], tmp, cursor1, len1, 0, c);
+ if (count1 != 0) {
+ System.arraycopy(tmp, cursor1, a, dest, count1);
+ dest += count1;
+ cursor1 += count1;
+ len1 -= count1;
+ if (len1 <= 1) // len1 == 1 || len1 == 0
+ break outer;
+ }
+ a[dest++] = a[cursor2++];
+ if (--len2 == 0)
+ break outer;
+
+ count2 = gallopLeft(tmp[cursor1], a, cursor2, len2, 0, c);
+ if (count2 != 0) {
+ System.arraycopy(a, cursor2, a, dest, count2);
+ dest += count2;
+ cursor2 += count2;
+ len2 -= count2;
+ if (len2 == 0)
+ break outer;
+ }
+ a[dest++] = tmp[cursor1++];
+ if (--len1 == 1)
+ break outer;
+ minGallop--;
+ } while (count1 >= MIN_GALLOP | count2 >= MIN_GALLOP);
+ if (minGallop < 0)
+ minGallop = 0;
+ minGallop += 2; // Penalize for leaving gallop mode
+ } // End of "outer" loop
+ this.minGallop = minGallop < 1 ? 1 : minGallop; // Write back to field
+
+ if (len1 == 1) {
+ assert len2 > 0;
+ System.arraycopy(a, cursor2, a, dest, len2);
+ a[dest + len2] = tmp[cursor1]; // Last elt of run 1 to end of merge
+ } else if (len1 == 0) {
+ throw new IllegalArgumentException(
+ "Comparison method violates its general contract!");
+ } else {
+ assert len2 == 0;
+ assert len1 > 1;
+ System.arraycopy(tmp, cursor1, a, dest, len1);
+ }
+ }
+
+ /**
+ * Like mergeLo, except that this method should be called only if
+ * len1 >= len2; mergeLo should be called if len1 <= len2. (Either method
+ * may be called if len1 == len2.)
+ *
+ * @param base1 index of first element in first run to be merged
+ * @param len1 length of first run to be merged (must be > 0)
+ * @param base2 index of first element in second run to be merged
+ * (must be aBase + aLen)
+ * @param len2 length of second run to be merged (must be > 0)
+ */
+ private void mergeHi(int base1, int len1, int base2, int len2) {
+ assert len1 > 0 && len2 > 0 && base1 + len1 == base2;
+
+ // Copy second run into temp array
+ T[] a = this.a; // For performance
+ T[] tmp = ensureCapacity(len2);
+ System.arraycopy(a, base2, tmp, 0, len2);
+
+ int cursor1 = base1 + len1 - 1; // Indexes into a
+ int cursor2 = len2 - 1; // Indexes into tmp array
+ int dest = base2 + len2 - 1; // Indexes into a
+
+ // Move last element of first run and deal with degenerate cases
+ a[dest--] = a[cursor1--];
+ if (--len1 == 0) {
+ System.arraycopy(tmp, 0, a, dest - (len2 - 1), len2);
+ return;
+ }
+ if (len2 == 1) {
+ dest -= len1;
+ cursor1 -= len1;
+ System.arraycopy(a, cursor1 + 1, a, dest + 1, len1);
+ a[dest] = tmp[cursor2];
+ return;
+ }
+
+ Comparator<? super T> c = this.c; // Use local variable for performance
+ int minGallop = this.minGallop; // " " " " "
+ outer:
+ while (true) {
+ int count1 = 0; // Number of times in a row that first run won
+ int count2 = 0; // Number of times in a row that second run won
+
+ /*
+ * Do the straightforward thing until (if ever) one run
+ * appears to win consistently.
+ */
+ do {
+ assert len1 > 0 && len2 > 1;
+ if (c.compare(tmp[cursor2], a[cursor1]) < 0) {
+ a[dest--] = a[cursor1--];
+ count1++;
+ count2 = 0;
+ if (--len1 == 0)
+ break outer;
+ } else {
+ a[dest--] = tmp[cursor2--];
+ count2++;
+ count1 = 0;
+ if (--len2 == 1)
+ break outer;
+ }
+ } while ((count1 | count2) < minGallop);
+
+ /*
+ * One run is winning so consistently that galloping may be a
+ * huge win. So try that, and continue galloping until (if ever)
+ * neither run appears to be winning consistently anymore.
+ */
+ do {
+ assert len1 > 0 && len2 > 1;
+ count1 = len1 - gallopRight(tmp[cursor2], a, base1, len1, len1 - 1, c);
+ if (count1 != 0) {
+ dest -= count1;
+ cursor1 -= count1;
+ len1 -= count1;
+ System.arraycopy(a, cursor1 + 1, a, dest + 1, count1);
+ if (len1 == 0)
+ break outer;
+ }
+ a[dest--] = tmp[cursor2--];
+ if (--len2 == 1)
+ break outer;
+
+ count2 = len2 - gallopLeft(a[cursor1], tmp, 0, len2, len2 - 1, c);
+ if (count2 != 0) {
+ dest -= count2;
+ cursor2 -= count2;
+ len2 -= count2;
+ System.arraycopy(tmp, cursor2 + 1, a, dest + 1, count2);
+ if (len2 <= 1) // len2 == 1 || len2 == 0
+ break outer;
+ }
+ a[dest--] = a[cursor1--];
+ if (--len1 == 0)
+ break outer;
+ minGallop--;
+ } while (count1 >= MIN_GALLOP | count2 >= MIN_GALLOP);
+ if (minGallop < 0)
+ minGallop = 0;
+ minGallop += 2; // Penalize for leaving gallop mode
+ } // End of "outer" loop
+ this.minGallop = minGallop < 1 ? 1 : minGallop; // Write back to field
+
+ if (len2 == 1) {
+ assert len1 > 0;
+ dest -= len1;
+ cursor1 -= len1;
+ System.arraycopy(a, cursor1 + 1, a, dest + 1, len1);
+ a[dest] = tmp[cursor2]; // Move first elt of run2 to front of merge
+ } else if (len2 == 0) {
+ throw new IllegalArgumentException(
+ "Comparison method violates its general contract!");
+ } else {
+ assert len1 == 0;
+ assert len2 > 0;
+ System.arraycopy(tmp, 0, a, dest - (len2 - 1), len2);
+ }
+ }
+
+ /**
+ * Ensures that the external array tmp has at least the specified
+ * number of elements, increasing its size if necessary. The size
+ * increases exponentially to ensure amortized linear time complexity.
+ *
+ * @param minCapacity the minimum required capacity of the tmp array
+ * @return tmp, whether or not it grew
+ */
+ private T[] ensureCapacity(int minCapacity) {
+ if (tmp.length < minCapacity) {
+ // Compute smallest power of 2 > minCapacity
+ int newSize = minCapacity;
+ newSize |= newSize >> 1;
+ newSize |= newSize >> 2;
+ newSize |= newSize >> 4;
+ newSize |= newSize >> 8;
+ newSize |= newSize >> 16;
+ newSize++;
+
+ if (newSize < 0) // Not bloody likely!
+ newSize = minCapacity;
+ else
+ newSize = Math.min(newSize, a.length >>> 1);
+
+ @SuppressWarnings({"unchecked", "UnnecessaryLocalVariable"})
+ T[] newArray = (T[]) new Object[newSize];
+ tmp = newArray;
+ }
+ return tmp;
+ }
+
+ /**
+ * Checks that fromIndex and toIndex are in range, and throws an
+ * appropriate exception if they aren't.
+ *
+ * @param arrayLen the length of the array
+ * @param fromIndex the index of the first element of the range
+ * @param toIndex the index after the last element of the range
+ * @throws IllegalArgumentException if fromIndex > toIndex
+ * @throws ArrayIndexOutOfBoundsException if fromIndex < 0
+ * or toIndex > arrayLen
+ */
+ private static void rangeCheck(int arrayLen, int fromIndex, int toIndex) {
+ if (fromIndex > toIndex)
+ throw new IllegalArgumentException("fromIndex(" + fromIndex +
+ ") > toIndex(" + toIndex+")");
+ if (fromIndex < 0)
+ throw new ArrayIndexOutOfBoundsException(fromIndex);
+ if (toIndex > arrayLen)
+ throw new ArrayIndexOutOfBoundsException(toIndex);
+ }
+}
Property changes on: trunk/core/src/main/java/org/infinispan/util/TimSort.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/test/java/org/infinispan/container/FIFODataContainerTest.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -1,10 +1,9 @@
package org.infinispan.container;
-import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MortalCacheEntry;
-import org.infinispan.container.entries.TransientCacheEntry;
import org.infinispan.container.entries.TransientMortalCacheEntry;
+import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
import java.util.Random;
@@ -15,132 +14,58 @@
@Override
protected DataContainer createContainer() {
- return new FIFODataContainer(16);
+ return new FIFOSimpleDataContainer(16, 1);
}
+ @Override
+ protected Class<? extends InternalCacheEntry> transienttype() {
+ return TransientMortalCacheEntry.class;
+ }
+
+ @Override
+ protected Class<? extends InternalCacheEntry> immortaltype() {
+ return MortalCacheEntry.class;
+ }
+
public void testOrdering() {
long lifespan = 600000;
long idle = 600000;
- for (int i = 0; i < 10; i++) dc.put("k" + i, "value", -1, -1);
- for (int i = 10; i < 20; i++) dc.put("k" + i, "value", lifespan, -1);
- for (int i = 20; i < 30; i++) dc.put("k" + i, "value", -1, idle);
- for (int i = 30; i < 40; i++) dc.put("k" + i, "value", lifespan, idle);
+ for (int i = 0; i < 10; i++) {
+ dc.put("k" + i, "value", -1, -1);
+ TestingUtil.sleepThread(10);
+ }
+ for (int i = 10; i < 20; i++) {
+ dc.put("k" + i, "value", lifespan, -1);
+ TestingUtil.sleepThread(10);
+ }
+ for (int i = 20; i < 30; i++) {
+ dc.put("k" + i, "value", -1, idle);
+ TestingUtil.sleepThread(10);
+ }
+ for (int i = 30; i < 40; i++) {
+ dc.put("k" + i, "value", lifespan, idle);
+ TestingUtil.sleepThread(10);
+ }
// random visits
Random r = new Random();
- for (int i = 0; i < 100; i++) dc.get("k" + r.nextInt(40));
+ for (int i = 0; i < 100; i++) {
+ dc.get("k" + r.nextInt(40));
+ TestingUtil.sleepThread(10);
+ }
// ensure order is maintained.
int i = 0;
for (InternalCacheEntry ice : dc) {
assert ice.getKey().equals("k" + i);
- if (i < 10) assert ice instanceof ImmortalCacheEntry;
- if (i >= 10 && i < 20) assert ice instanceof MortalCacheEntry;
- if (i >= 20 && i < 30) assert ice instanceof TransientCacheEntry;
+ if (i < 10) assert ice.getClass().equals(immortaltype());
+ if (i >= 10 && i < 20) assert ice.getClass().equals(mortaltype());
+ if (i >= 20 && i < 30) assert ice.getClass().equals(transienttype());
if (i >= 30 && i < 40) assert ice instanceof TransientMortalCacheEntry;
i++;
}
}
- private void setInitialEntry() {
- FIFODataContainer ldc = (FIFODataContainer) dc;
- dc.put("k", "v", -1, -1);
-
- assert dc.size() == 1;
-
- FIFODataContainer.LinkedEntry tail = ldc.tail;
- FIFODataContainer.LinkedEntry head = ldc.head;
- FIFODataContainer.LinkedEntry e = ldc.head.n;
-
- assert head.n == e;
- assert head.p == tail;
- assert tail.n == head;
- assert tail.p == e;
- assert e.n == tail;
- assert e.p == head;
- assert !ldc.isMarkedForRemoval(e);
- }
-
- public void testInsertingLinks() {
- FIFODataContainer ldc = (FIFODataContainer) dc;
- assert dc.size() == 0;
- assert ldc.head.n == ldc.tail;
- assert ldc.tail.n == ldc.head;
- assert ldc.head.p == ldc.tail;
- assert ldc.tail.p == ldc.head;
-
- setInitialEntry();
-
- // add one more
- dc.put("k2", "v2", -1, -1);
-
- assert dc.size() == 2;
-
- FIFODataContainer.LinkedEntry tail = ldc.tail;
- FIFODataContainer.LinkedEntry head = ldc.head;
- FIFODataContainer.LinkedEntry le1 = head.n;
- FIFODataContainer.LinkedEntry le2 = le1.n;
-
- assert tail == le2.n;
- assert tail != le1.n;
- assert le1 != ldc.head;
- assert le2 != ldc.head;
- assert le1 != ldc.tail;
- assert le2 != ldc.tail;
- assert le1 != le2;
-
- assert le1.p == head;
- assert le1.n == le2;
- assert le2.p == le1;
- assert le2.n == tail;
-
- assert le1.e != null;
- assert le1.e.getKey().equals("k");
- assert le1.e.getValue().equals("v");
-
- assert le2.e != null;
- assert le2.e.getKey().equals("k2");
- assert le2.e.getValue().equals("v2");
- }
-
- public void testRemovingLinks() {
- FIFODataContainer aldc = (FIFODataContainer) dc;
- assert dc.size() == 0;
- assert aldc.head.n == aldc.tail;
- assert aldc.tail.n == aldc.head;
- assert aldc.head.p == aldc.tail;
- assert aldc.tail.p == aldc.head;
-
- setInitialEntry();
-
- dc.remove("k");
-
- assert dc.size() == 0;
- assert aldc.head.n == aldc.tail;
- assert aldc.tail.n == aldc.head;
- assert aldc.head.p == aldc.tail;
- assert aldc.tail.p == aldc.head;
- }
-
- public void testClear() {
- FIFODataContainer aldc = (FIFODataContainer) dc;
- assert dc.size() == 0;
- assert aldc.head.n == aldc.tail;
- assert aldc.tail.n == aldc.head;
- assert aldc.head.p == aldc.tail;
- assert aldc.tail.p == aldc.head;
-
- setInitialEntry();
-
- dc.clear();
-
- assert dc.size() == 0;
- assert aldc.head.n == aldc.tail;
- assert aldc.tail.n == aldc.head;
- assert aldc.head.p == aldc.tail;
- assert aldc.tail.p == aldc.head;
- }
-
public void testMultithreadAccess() throws InterruptedException {
assert dc.size() == 0;
int NUM_THREADS = 10;
Modified: trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/test/java/org/infinispan/container/LRUDataContainerTest.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -1,31 +1,58 @@
package org.infinispan.container;
-import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
-import org.infinispan.container.entries.MortalCacheEntry;
import org.infinispan.container.entries.TransientCacheEntry;
import org.infinispan.container.entries.TransientMortalCacheEntry;
+import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "container.LRUDataContainerTest")
public class LRUDataContainerTest extends FIFODataContainerTest {
@Override
protected DataContainer createContainer() {
- return new LRUDataContainer(16);
+ return new LRUSimpleDataContainer(16, 1);
}
@Override
+ protected Class<? extends InternalCacheEntry> mortaltype() {
+ return TransientMortalCacheEntry.class;
+ }
+
+ @Override
+ protected Class<? extends InternalCacheEntry> immortaltype() {
+ return TransientCacheEntry.class;
+ }
+
+ @Override
+ protected Class<? extends InternalCacheEntry> transienttype() {
+ return TransientCacheEntry.class;
+ }
+
+ @Override
public void testOrdering() {
long lifespan = 600000;
long idle = 600000;
- for (int i = 0; i < 10; i++) dc.put(i, "value", -1, -1);
- for (int i = 10; i < 20; i++) dc.put(i, "value", lifespan, -1);
- for (int i = 20; i < 30; i++) dc.put(i, "value", -1, idle);
- for (int i = 30; i < 40; i++) dc.put(i, "value", lifespan, idle);
+ for (int i = 0; i < 10; i++) {
+ dc.put(i, "value", -1, -1);
+ TestingUtil.sleepThread(10);
+ }
+ for (int i = 10; i < 20; i++) {
+ dc.put(i, "value", lifespan, -1);
+ TestingUtil.sleepThread(10);
+ }
+ for (int i = 20; i < 30; i++) {
+ dc.put(i, "value", -1, idle);
+ TestingUtil.sleepThread(10);
+ }
+ for (int i = 30; i < 40; i++) {
+ dc.put(i, "value", lifespan, idle);
+ TestingUtil.sleepThread(10);
+ }
// Visit all ODD numbered elements
for (int i = 0; i < 40; i++) {
if (i % 2 == 1) dc.get(i);
+ TestingUtil.sleepThread(10);
}
// ensure order is maintained. The first 20 elements should be EVEN.
@@ -37,9 +64,9 @@
else
assert key % 2 == 1;
- if (key < 10) assert ice instanceof ImmortalCacheEntry;
- if (key >= 10 && key < 20) assert ice instanceof MortalCacheEntry;
- if (key >= 20 && key < 30) assert ice instanceof TransientCacheEntry;
+ if (key < 10) assert ice.getClass().equals(immortaltype());
+ if (key >= 10 && key < 20) assert ice.getClass().equals(mortaltype());
+ if (key >= 20 && key < 30) assert ice.getClass().equals(transienttype()) : "Expected " + transienttype() + " for key " + key + " but was " + ice.getClass();
if (key >= 30 && key < 40) assert ice instanceof TransientMortalCacheEntry;
i++;
}
Modified: trunk/core/src/test/java/org/infinispan/container/SimpleDataContainerTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/container/SimpleDataContainerTest.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/test/java/org/infinispan/container/SimpleDataContainerTest.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -38,7 +38,7 @@
Thread.sleep(100);
InternalCacheEntry entry = dc.get("k");
- assert entry instanceof TransientCacheEntry;
+ assert entry.getClass().equals(transienttype());
assert entry.getLastUsed() <= System.currentTimeMillis();
long entryLastUsed = entry.getLastUsed();
Thread.sleep(100);
@@ -53,7 +53,7 @@
entry = dc.get("k");
assert entry != null : "Entry should not be null!";
- assert entry instanceof MortalCacheEntry : "Expected MortalCacheEntry, was " + entry.getClass().getSimpleName();
+ assert entry.getClass().equals(mortaltype()) : "Expected "+mortaltype()+", was " + entry.getClass().getSimpleName();
assert entry.getCreated() <= System.currentTimeMillis();
dc.put("k", "v", 0, -1);
@@ -72,23 +72,20 @@
long idle = 600000;
dc.put("k", "v", -1, -1);
InternalCacheEntry ice = dc.get("k");
- assert ice instanceof ImmortalCacheEntry;
+ assert ice.getClass().equals(immortaltype());
assert ice.getExpiryTime() == -1;
- assert ice.getLastUsed() == -1;
- assert ice.getCreated() == -1;
assert ice.getMaxIdle() == -1;
assert ice.getLifespan() == -1;
dc.put("k", "v", -1, idle);
long oldTime = System.currentTimeMillis();
Thread.sleep(100); // for time calc granularity
ice = dc.get("k");
- assert ice instanceof TransientCacheEntry;
+ assert ice.getClass().equals(transienttype());
assert ice.getExpiryTime() == -1;
assert ice.getLastUsed() > oldTime;
Thread.sleep(100); // for time calc granularity
assert ice.getLastUsed() < System.currentTimeMillis();
assert ice.getMaxIdle() == idle;
- assert ice.getCreated() == -1;
assert ice.getLifespan() == -1;
oldTime = System.currentTimeMillis();
@@ -101,18 +98,30 @@
assert ice.getLastUsed() < System.currentTimeMillis();
}
+ protected Class<? extends InternalCacheEntry> mortaltype() {
+ return MortalCacheEntry.class;
+ }
+
+ protected Class<? extends InternalCacheEntry> immortaltype() {
+ return ImmortalCacheEntry.class;
+ }
+
+ protected Class<? extends InternalCacheEntry> transienttype() {
+ return TransientCacheEntry.class;
+ }
+
public void testExpirableToImmortalAndBack() {
dc.put("k", "v", 6000000, -1);
assert dc.containsKey("k");
- assert dc.get("k") instanceof MortalCacheEntry;
+ assert dc.get("k").getClass().equals(mortaltype());
dc.put("k", "v2", -1, -1);
assert dc.containsKey("k");
- assert dc.get("k") instanceof ImmortalCacheEntry;
+ assert dc.get("k").getClass().equals(immortaltype());
dc.put("k", "v3", -1, 6000000);
assert dc.containsKey("k");
- assert dc.get("k") instanceof TransientCacheEntry;
+ assert dc.get("k").getClass().equals(transienttype());
dc.put("k", "v3", 6000000, 6000000);
assert dc.containsKey("k");
@@ -120,7 +129,7 @@
dc.put("k", "v", 6000000, -1);
assert dc.containsKey("k");
- assert dc.get("k") instanceof MortalCacheEntry;
+ assert dc.get("k").getClass().equals(mortaltype());
}
public void testKeySet() {
@@ -205,9 +214,10 @@
expected.add(Immutables.immutableInternalCacheEntry(dc.get("k3")));
expected.add(Immutables.immutableInternalCacheEntry(dc.get("k4")));
- for (Map.Entry o : dc.entrySet()) assert expected.remove(o);
+ Set actual = new HashSet();
+ for (Map.Entry o : dc.entrySet()) actual.add(o);
- assert expected.isEmpty() : "Did not see keys " + expected + " in iterator!";
+ assert actual.equals(expected) : "Expected to see keys " + expected + " but only saw " + actual;
}
public void testGetDuringKeySetLoop() {
Modified: trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java
===================================================================
--- trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java 2009-12-08 13:59:06 UTC (rev 1263)
+++ trunk/core/src/test/java/org/infinispan/stress/DataContainerStressTest.java 2009-12-09 16:54:43 UTC (rev 1264)
@@ -1,16 +1,12 @@
package org.infinispan.stress;
-import org.infinispan.container.DataContainer;
-import org.infinispan.container.FIFOAMRDataContainer;
-import org.infinispan.container.FIFODataContainer;
-import org.infinispan.container.LRUAMRDataContainer;
-import org.infinispan.container.LRUDataContainer;
-import org.infinispan.container.SimpleDataContainer;
+import org.infinispan.container.*;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
import java.util.Map;
+import java.util.Random;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
@@ -25,11 +21,16 @@
@Test(testName = "stress.DataContainerStressTest", groups = "stress", enabled = false,
description = "Disabled by default, designed to be run manually.")
public class DataContainerStressTest {
- final CountDownLatch latch = new CountDownLatch(1);
- final int RUN_TIME_MILLIS = 60 * 1000; // 1 min
+ volatile CountDownLatch latch;
+ final int RUN_TIME_MILLIS = 45 * 1000; // 1 min
+ final int WARMUP_TIME_MILLIS = 10 * 1000; // 10 sec
final int num_loops = 10000;
+ final int warmup_num_loops = 10000;
boolean use_time = true;
+ final int NUM_KEYS = 100;
+
private static final Log log = LogFactory.getLog(DataContainerStressTest.class);
+ private static final Random R = new Random();
public void testSimpleDataContainer() throws InterruptedException {
doTest(new SimpleDataContainer(5000));
@@ -39,32 +40,39 @@
doTest(new FIFODataContainer(5000));
}
- public void testFIFOAMRDataContainer() throws InterruptedException {
- doTest(new FIFOAMRDataContainer(5000));
+ public void testLRUDataContainer() throws InterruptedException {
+ doTest(new LRUDataContainer(5000));
}
- public void testLRUAMRDataContainer() throws InterruptedException {
- doTest(new LRUAMRDataContainer(5000));
+ public void testLRUSimpleDataContainer() throws InterruptedException {
+ doTest(new LRUSimpleDataContainer(5000));
}
- public void testLRUDataContainer() throws InterruptedException {
- doTest(new LRUDataContainer(5000));
+ public void testFIFOSimpleDataContainer() throws InterruptedException {
+ doTest(new FIFOSimpleDataContainer(5000));
}
private void doTest(final DataContainer dc) throws InterruptedException {
+ doTest(dc, true);
+ doTest(dc, false);
+ }
+
+ private void doTest(final DataContainer dc, boolean warmup) throws InterruptedException {
+ latch = new CountDownLatch(1);
final String key = "key";
final Map<String, String> perf = new ConcurrentSkipListMap<String, String>();
final AtomicBoolean run = new AtomicBoolean(true);
+ final int actual_num_loops = warmup ? warmup_num_loops : num_loops;
Thread getter = new Thread() {
public void run() {
waitForStart();
long start = System.nanoTime();
int runs = 0;
- while (use_time && run.get() || runs < num_loops) {
+ while (use_time && run.get() || runs < actual_num_loops) {
if (runs % 100000 == 0) log.info("GET run # " + runs);
// TestingUtil.sleepThread(10);
- dc.get(key);
+ dc.get(key + R.nextInt(NUM_KEYS));
runs++;
}
perf.put("GET", opsPerMS(System.nanoTime() - start, runs));
@@ -76,10 +84,10 @@
waitForStart();
long start = System.nanoTime();
int runs = 0;
- while (use_time && run.get() || runs < num_loops) {
+ while (use_time && run.get() || runs < actual_num_loops) {
if (runs % 100000 == 0) log.info("PUT run # " + runs);
// TestingUtil.sleepThread(10);
- dc.put(key, "value", -1, -1);
+ dc.put(key + R.nextInt(NUM_KEYS), "value", -1, -1);
runs++;
}
perf.put("PUT", opsPerMS(System.nanoTime() - start, runs));
@@ -91,10 +99,10 @@
waitForStart();
long start = System.nanoTime();
int runs = 0;
- while (use_time && run.get() || runs < num_loops) {
+ while (use_time && run.get() || runs < actual_num_loops) {
if (runs % 100000 == 0) log.info("REM run # " + runs);
// TestingUtil.sleepThread(10);
- dc.remove(key);
+ dc.remove(key + R.nextInt(NUM_KEYS));
runs++;
}
perf.put("REM", opsPerMS(System.nanoTime() - start, runs));
@@ -106,10 +114,10 @@
latch.countDown();
// wait some time
- Thread.sleep(RUN_TIME_MILLIS);
+ Thread.sleep(warmup ? WARMUP_TIME_MILLIS : RUN_TIME_MILLIS);
run.set(false);
for (Thread t : threads) t.join();
- log.warn("{0}: Performance: {1}", dc.getClass().getSimpleName(), perf);
+ if (!warmup) log.warn("{0}: Performance: {1}", dc.getClass().getSimpleName(), perf);
}
private void waitForStart() {
More information about the infinispan-commits
mailing list