JBoss Cache SVN: r5965 - searchable/trunk/src/main/java/org/jboss/cache/search.
by jbosscache-commits@lists.jboss.org
Author: navssurtani
Date: 2008-06-10 05:13:33 -0400 (Tue, 10 Jun 2008)
New Revision: 5965
Modified:
searchable/trunk/src/main/java/org/jboss/cache/search/QueryResultImpl.java
searchable/trunk/src/main/java/org/jboss/cache/search/SearchableCacheFactory.java
searchable/trunk/src/main/java/org/jboss/cache/search/SearchableCacheImpl.java
searchable/trunk/src/main/java/org/jboss/cache/search/SearchableListener.java
Log:
Started working on adding Work objects to queue
Modified: searchable/trunk/src/main/java/org/jboss/cache/search/QueryResultImpl.java
===================================================================
--- searchable/trunk/src/main/java/org/jboss/cache/search/QueryResultImpl.java 2008-06-09 14:32:08 UTC (rev 5964)
+++ searchable/trunk/src/main/java/org/jboss/cache/search/QueryResultImpl.java 2008-06-10 09:13:33 UTC (rev 5965)
@@ -18,6 +18,7 @@
{
private Query query;
private Cache cache;
+ private Class[] classes;
public QueryResultImpl(Query luceneQuery, Cache cache)
{
@@ -25,6 +26,14 @@
this.cache = cache;
}
+ public QueryResultImpl(Query luceneQuery, Cache cache, Class[] classes)
+ {
+ this.query = luceneQuery;
+ this.cache=cache;
+ this.classes = classes;
+
+ }
+
public FullTextQuery setSort(Sort sort)
{
return null; //To change body of implemented methods use File | Settings | File Templates.
Modified: searchable/trunk/src/main/java/org/jboss/cache/search/SearchableCacheFactory.java
===================================================================
--- searchable/trunk/src/main/java/org/jboss/cache/search/SearchableCacheFactory.java 2008-06-09 14:32:08 UTC (rev 5964)
+++ searchable/trunk/src/main/java/org/jboss/cache/search/SearchableCacheFactory.java 2008-06-10 09:13:33 UTC (rev 5965)
@@ -1,5 +1,6 @@
package org.jboss.cache.search;
+import org.hibernate.search.impl.SearchFactoryImpl;
import org.jboss.cache.Cache;
/**
@@ -11,6 +12,9 @@
*/
public class SearchableCacheFactory
{
+
+ private SearchFactoryImpl searchFactory;
+
public SearchableCache createSearchableCache (Cache c, Class... classes)
{
Modified: searchable/trunk/src/main/java/org/jboss/cache/search/SearchableCacheImpl.java
===================================================================
--- searchable/trunk/src/main/java/org/jboss/cache/search/SearchableCacheImpl.java 2008-06-09 14:32:08 UTC (rev 5964)
+++ searchable/trunk/src/main/java/org/jboss/cache/search/SearchableCacheImpl.java 2008-06-10 09:13:33 UTC (rev 5965)
@@ -12,7 +12,6 @@
import org.jboss.cache.config.Configuration;
import org.jgroups.Address;
-import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -28,6 +27,7 @@
{
// this is the ACTUAL cache. that does all the work.
private Cache cache;
+ private Class[] classes;
public SearchableCacheImpl(Cache cache)
{
@@ -39,6 +39,12 @@
return new QueryResultImpl(luceneQuery, cache);
}
+ public QueryResult find(Query luceneQuery, Class... classes)
+ {
+ return new QueryResultImpl(luceneQuery, cache, classes);
+
+ }
+
public Configuration getConfiguration()
{
return cache.getConfiguration();
Modified: searchable/trunk/src/main/java/org/jboss/cache/search/SearchableListener.java
===================================================================
--- searchable/trunk/src/main/java/org/jboss/cache/search/SearchableListener.java 2008-06-09 14:32:08 UTC (rev 5964)
+++ searchable/trunk/src/main/java/org/jboss/cache/search/SearchableListener.java 2008-06-10 09:13:33 UTC (rev 5965)
@@ -2,6 +2,7 @@
import org.hibernate.search.backend.Work;
import org.hibernate.search.backend.WorkType;
+import org.hibernate.search.impl.SearchFactoryImpl;
import org.jboss.cache.notifications.annotation.CacheListener;
import org.jboss.cache.notifications.annotation.NodeModified;
import org.jboss.cache.notifications.event.NodeModifiedEvent;
@@ -19,14 +20,15 @@
@CacheListener
public class SearchableListener
{
-
+
+ private SearchFactoryImpl searchFactory;
+
@NodeModified
public void updateLuceneIndexes(NodeModifiedEvent event)
{
if (!event.isPre())
{
-
// TODO: Update Lucene Indexes. See Hibernate Search's FullTextEventListener class for details on what to do.
}
else
@@ -39,18 +41,17 @@
{
Map dataMap = event.getData();
- for (Object key: dataMap.keySet())
+ for (Object key : dataMap.keySet())
{
String keyString = (String) key;
- String docId = Transformer.generateId(event.getFqn(), keyString) ;
+ String docId = Transformer.generateId(event.getFqn(), keyString);
- new Work (dataMap.get(key), docId, WorkType.DELETE);
- new Work (dataMap.get(key), docId, WorkType.ADD);
+ new Work(dataMap.get(key), docId, WorkType.ADD);
- //TODO: Add to queue.
+ searchFactory.getWorker().performWork(new Work(dataMap.get(key), docId, WorkType.DELETE);
+ );
}
-
}
@@ -58,18 +59,17 @@
{
Map dataMap = event.getData();
- for(Object key: dataMap.keySet())
+ for (Object key : dataMap.keySet())
{
String keyString = (String) key;
String docId = Transformer.generateId(event.getFqn(), keyString);
- new Work (dataMap.get(key), docId, WorkType.DELETE);
+ new Work(dataMap.get(key), docId, WorkType.DELETE);
//TODO: Add to queue.
}
-
}
}
16 years, 6 months
JBoss Cache SVN: r5963 - in pojo/trunk/src: main/java/org/jboss/cache/pojo/impl and 4 other directories.
by jbosscache-commits@lists.jboss.org
Author: jason.greene(a)jboss.com
Date: 2008-06-06 22:29:28 -0400 (Fri, 06 Jun 2008)
New Revision: 5963
Added:
pojo/trunk/src/main/java/org/jboss/cache/pojo/util/ConcurrentReferenceHashMap.java
Modified:
pojo/trunk/src/main/java/org/jboss/cache/pojo/collection/CachedArray.java
pojo/trunk/src/main/java/org/jboss/cache/pojo/collection/CachedArrayRegistry.java
pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/ArrayHandler.java
pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/CollectionClassHandler.java
pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/MethodDeclarations.java
pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/PojoUtil.java
pojo/trunk/src/main/java/org/jboss/cache/pojo/interceptors/PojoTxUndoInterceptor.java
pojo/trunk/src/test/java/org/jboss/cache/pojo/TxUndoTest.java
pojo/trunk/src/test/java/org/jboss/cache/pojo/test/ArrayObject.java
Log:
Fix undo support of collections and arrays
Use weak references to hold on to arrays
Modified: pojo/trunk/src/main/java/org/jboss/cache/pojo/collection/CachedArray.java
===================================================================
--- pojo/trunk/src/main/java/org/jboss/cache/pojo/collection/CachedArray.java 2008-06-06 14:52:03 UTC (rev 5962)
+++ pojo/trunk/src/main/java/org/jboss/cache/pojo/collection/CachedArray.java 2008-06-07 02:29:28 UTC (rev 5963)
@@ -40,7 +40,7 @@
protected Fqn<?> fqn;
private int length = -1;
private Class<?> type;
-
+
public static CachedArray load(Fqn<?> fqn, PojoCacheImpl cache, Class<?> type)
{
boolean primitive = CachedType.isImmediate(type.getComponentType());
@@ -52,73 +52,73 @@
{
Class<?> type = originalArray.getClass();
assert type.isArray();
-
+
Class<?> component = type.getComponentType();
boolean primitive = CachedType.isImmediate(component);
CachedArray array = primitive ? new CachedPrimitiveArray(fqn, type, cache) : new CachedObjectArray(fqn, type, cache);
-
+
int length = Array.getLength(originalArray);
for (int c = 0; c < length; c++)
array.set(c, Array.get(originalArray, c));
-
+
array.length = length;
array.writeInfo();
-
+
return array;
}
-
+
protected CachedArray(Fqn<?> fqn, Class<?> type, PojoCacheImpl cache)
{
this.fqn = fqn;
this.type = type;
this.cache = cache;
}
-
+
public Fqn<?> getFqn()
{
return fqn;
}
-
+
public abstract void set(int index, Object element);
-
+
public abstract Object get(int index);
-
+
protected void writeInfo()
{
cache.getCache().put(fqn, LENGTH, length);
}
-
+
public void destroy()
{
cache.getCache().removeNode(fqn);
- length = 0;
+ length = -1;
}
-
+
public int length()
{
if (length == -1)
{
Integer i = (Integer)cache.getCache().get(fqn, LENGTH);
- length = i != null ? i.intValue() : 0;
+ length = i != null ? i.intValue() : 0;
}
-
+
return length;
}
-
+
public Object toArray()
- {
+ {
try
{
int len = length();
Object array = Array.newInstance(type.getComponentType(), len);
for (int i = 0; i < len; i++)
Array.set(array, i, get(i));
-
+
return array;
}
catch (Exception e)
{
throw new CacheException("Could not construct array " + type);
}
- }
+ }
}
\ No newline at end of file
Modified: pojo/trunk/src/main/java/org/jboss/cache/pojo/collection/CachedArrayRegistry.java
===================================================================
--- pojo/trunk/src/main/java/org/jboss/cache/pojo/collection/CachedArrayRegistry.java 2008-06-06 14:52:03 UTC (rev 5962)
+++ pojo/trunk/src/main/java/org/jboss/cache/pojo/collection/CachedArrayRegistry.java 2008-06-07 02:29:28 UTC (rev 5963)
@@ -21,29 +21,34 @@
*/
package org.jboss.cache.pojo.collection;
-import java.util.concurrent.ConcurrentHashMap;
+import java.util.EnumSet;
import java.util.concurrent.ConcurrentMap;
+import org.jboss.cache.pojo.util.ConcurrentReferenceHashMap;
+import org.jboss.cache.pojo.util.ConcurrentReferenceHashMap.Option;
+import org.jboss.cache.pojo.util.ConcurrentReferenceHashMap.ReferenceType;
+
/**
- * A n internal registry which is responsible for mapping a Java array
+ * An internal registry which is responsible for mapping a Java array
* instance to a <code>CachedArray</code>.
*
* @author Jason T. Greene
*/
public class CachedArrayRegistry
{
- private static ConcurrentMap<Object, CachedArray> map = new ConcurrentHashMap<Object, CachedArray>();
-
+ private static ConcurrentMap<Object, CachedArray> map = new ConcurrentReferenceHashMap<Object, CachedArray>
+ (16, ReferenceType.WEAK, ReferenceType.STRONG, EnumSet.of(Option.IDENTITY_COMPARISONS));
+
public static void register(Object array, CachedArray cached)
{
map.put(array, cached);
}
-
+
public static CachedArray unregister(Object array)
{
return map.remove(array);
}
-
+
public static CachedArray lookup(Object array)
{
return map.get(array);
Modified: pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/ArrayHandler.java
===================================================================
--- pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/ArrayHandler.java 2008-06-06 14:52:03 UTC (rev 5962)
+++ pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/ArrayHandler.java 2008-06-07 02:29:28 UTC (rev 5963)
@@ -33,6 +33,7 @@
public class ArrayHandler extends AbstractHandler
{
private final PojoCacheImpl cache;
+ private final PojoUtil util = new PojoUtil();
ArrayHandler(PojoCacheImpl cache)
{
@@ -55,7 +56,7 @@
cache.getCache().put(fqn, PojoInstance.KEY, pojoInstance);
CachedArray cached = CachedArray.create(fqn, cache, obj);
- CachedArrayRegistry.register(obj, cached);
+ util.attachArray(obj, cached);
}
@Override
@@ -71,9 +72,11 @@
@Override
protected Object remove(Fqn<?> fqn, Fqn<?> referencingFqn, Object obj)
{
- CachedArray cached = CachedArrayRegistry.unregister(obj);
- if (cached != null)
+ CachedArray cached = CachedArrayRegistry.lookup(obj);
+ if (cached != null) {
+ util.detachArray(obj, cached);
cached.destroy();
+ }
return obj;
}
Modified: pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/CollectionClassHandler.java
===================================================================
--- pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/CollectionClassHandler.java 2008-06-06 14:52:03 UTC (rev 5962)
+++ pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/CollectionClassHandler.java 2008-06-07 02:29:28 UTC (rev 5963)
@@ -44,12 +44,12 @@
cache_ = pCache_.getCache();
internal_ = internal;
}
-
+
protected Fqn<?> getFqn(Object collection)
{
if (! (collection instanceof ClassProxy))
return null;
-
+
BaseInterceptor interceptor = CollectionInterceptorUtil.getInterceptor((ClassProxy) collection);
return interceptor != null ? interceptor.getFqn() : null;
}
@@ -278,9 +278,9 @@
AbstractCollectionInterceptor interceptor = (AbstractCollectionInterceptor) CollectionInterceptorUtil.getInterceptor((ClassProxy) obj);
// detach the interceptor. This will trigger a copy and remove.
- interceptor.detach(true);
+ (new PojoUtil()).detachCollectionInterceptor(interceptor);
cache_.removeNode(fqn);
-
+
return interceptor.getCurrentCopy();
}
Modified: pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/MethodDeclarations.java
===================================================================
--- pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/MethodDeclarations.java 2008-06-06 14:52:03 UTC (rev 5962)
+++ pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/MethodDeclarations.java 2008-06-07 02:29:28 UTC (rev 5963)
@@ -7,14 +7,16 @@
package org.jboss.cache.pojo.impl;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.List;
+
import org.jboss.aop.InstanceAdvisor;
import org.jboss.aop.advice.Interceptor;
import org.jboss.cache.Fqn;
+import org.jboss.cache.pojo.collection.CachedArray;
+import org.jboss.cache.pojo.interceptors.dynamic.AbstractCollectionInterceptor;
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-import java.util.List;
-
/**
* Method declarations for rollback method mostly.
*
@@ -25,8 +27,10 @@
{
public static final Method attachInterceptor;
public static final Method detachInterceptor;
+ public static final Method detachCollectionInterceptor;
public static final Method undoAttachInterceptor;
public static final Method undoDetachInterceptor;
+ public static final Method undoDetachCollectionInterceptor;
;
public static final Method inMemorySubstitution;
;
@@ -36,6 +40,10 @@
public static final Method decrementReferenceCount;
public static final Method undoIncrementReferenceCount;
public static final Method undoDecrementReferenceCount;
+ public static final Method attachArray;
+ public static final Method detachArray;
+ public static final Method undoAttachArray;
+ public static final Method undoDetachArray;
static
{
@@ -45,10 +53,15 @@
new Class[]{Object.class, InstanceAdvisor.class, Interceptor.class});
detachInterceptor = PojoUtil.class.getDeclaredMethod("detachInterceptor",
new Class[]{InstanceAdvisor.class, Interceptor.class});
+ detachCollectionInterceptor = PojoUtil.class.getDeclaredMethod("detachCollectionInterceptor",
+ new Class[]{AbstractCollectionInterceptor.class});
+
undoAttachInterceptor = PojoUtil.class.getDeclaredMethod("undoAttachInterceptor",
new Class[]{Object.class, InstanceAdvisor.class, Interceptor.class});
undoDetachInterceptor = PojoUtil.class.getDeclaredMethod("undoDetachInterceptor",
new Class[]{InstanceAdvisor.class, Interceptor.class});
+ undoDetachCollectionInterceptor = PojoUtil.class.getDeclaredMethod("undoDetachCollectionInterceptor",
+ new Class[]{AbstractCollectionInterceptor.class});
inMemorySubstitution = PojoUtil.class.getDeclaredMethod("inMemorySubstitution",
new Class[]{Object.class, Field.class, Object.class});
undoInMemorySubstitution = PojoUtil.class.getDeclaredMethod("undoInMemorySubstitution",
@@ -61,6 +74,11 @@
new Class[]{Fqn.class, int.class, List.class});
undoDecrementReferenceCount = PojoUtil.class.getDeclaredMethod("undoDecrementReferenceCount",
new Class[]{Fqn.class, int.class, List.class});
+
+ attachArray = PojoUtil.class.getDeclaredMethod("attachArray", new Class[]{Object.class, CachedArray.class});
+ detachArray = PojoUtil.class.getDeclaredMethod("detachArray", new Class[]{Object.class, CachedArray.class});
+ undoAttachArray = PojoUtil.class.getDeclaredMethod("undoAttachArray", new Class[]{Object.class, CachedArray.class});
+ undoDetachArray = PojoUtil.class.getDeclaredMethod("undoDetachArray", new Class[]{Object.class, CachedArray.class});
}
catch (NoSuchMethodException e)
{
Modified: pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/PojoUtil.java
===================================================================
--- pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/PojoUtil.java 2008-06-06 14:52:03 UTC (rev 5962)
+++ pojo/trunk/src/main/java/org/jboss/cache/pojo/impl/PojoUtil.java 2008-06-07 02:29:28 UTC (rev 5963)
@@ -17,6 +17,9 @@
import org.jboss.cache.Fqn;
import org.jboss.cache.pojo.PojoCacheException;
import org.jboss.cache.pojo.annotation.TxUndo;
+import org.jboss.cache.pojo.collection.CachedArray;
+import org.jboss.cache.pojo.collection.CachedArrayRegistry;
+import org.jboss.cache.pojo.interceptors.dynamic.AbstractCollectionInterceptor;
import org.jboss.cache.pojo.interceptors.dynamic.CacheFieldInterceptor;
/**
@@ -41,6 +44,21 @@
_detachInterceptor(advisor, interceptor);
}
+ @TxUndo
+ public void detachCollectionInterceptor(AbstractCollectionInterceptor interceptor) {
+ interceptor.detach(true);
+ }
+
+ @TxUndo
+ public void attachArray(Object array, CachedArray cached) {
+ CachedArrayRegistry.register(array, cached);
+ }
+
+ @TxUndo
+ public void detachArray(Object array, CachedArray cached) {
+ CachedArrayRegistry.unregister(array);
+ }
+
public void undoAttachInterceptor(Object pojo, InstanceAdvisor advisor, Interceptor interceptor)
{
_detachInterceptor(advisor, interceptor);
@@ -57,6 +75,19 @@
_attachInterceptor(pojo, advisor, interceptor);
}
+ public void undoDetachCollectionInterceptor(AbstractCollectionInterceptor interceptor) {
+ interceptor.attach(null, false);
+ }
+
+ public void undoAttachArray(Object array, CachedArray cached) {
+ CachedArrayRegistry.unregister(array);
+ }
+
+ public void undoDetachArray(Object array, CachedArray cached) {
+ CachedArrayRegistry.register(array, cached);
+ }
+
+
@TxUndo
public void inMemorySubstitution(Object obj, Field field, Object newValue)
{
Modified: pojo/trunk/src/main/java/org/jboss/cache/pojo/interceptors/PojoTxUndoInterceptor.java
===================================================================
--- pojo/trunk/src/main/java/org/jboss/cache/pojo/interceptors/PojoTxUndoInterceptor.java 2008-06-06 14:52:03 UTC (rev 5962)
+++ pojo/trunk/src/main/java/org/jboss/cache/pojo/interceptors/PojoTxUndoInterceptor.java 2008-06-07 02:29:28 UTC (rev 5963)
@@ -64,6 +64,24 @@
MethodCall mc = new MethodCall(method, invocation.getArguments(), invocation.getTargetObject());
handler.addToList(mc);
}
+ else if (methodName.equals(MethodDeclarations.detachCollectionInterceptor.getName()))
+ {
+ Method method = MethodDeclarations.undoDetachCollectionInterceptor;
+ MethodCall mc = new MethodCall(method, invocation.getArguments(), invocation.getTargetObject());
+ handler.addToList(mc);
+ }
+ else if (methodName.equals(MethodDeclarations.attachArray.getName()))
+ {
+ Method method = MethodDeclarations.undoAttachArray;
+ MethodCall mc = new MethodCall(method, invocation.getArguments(), invocation.getTargetObject());
+ handler.addToList(mc);
+ }
+ else if (methodName.equals(MethodDeclarations.detachArray.getName()))
+ {
+ Method method = MethodDeclarations.undoDetachArray;
+ MethodCall mc = new MethodCall(method, invocation.getArguments(), invocation.getTargetObject());
+ handler.addToList(mc);
+ }
else if (methodName.equals(MethodDeclarations.inMemorySubstitution.getName()))
{
Method method = MethodDeclarations.undoInMemorySubstitution;
Copied: pojo/trunk/src/main/java/org/jboss/cache/pojo/util/ConcurrentReferenceHashMap.java (from rev 5598, experimental/jsr166/src/jsr166y/ConcurrentReferenceHashMap.java)
===================================================================
--- pojo/trunk/src/main/java/org/jboss/cache/pojo/util/ConcurrentReferenceHashMap.java (rev 0)
+++ pojo/trunk/src/main/java/org/jboss/cache/pojo/util/ConcurrentReferenceHashMap.java 2008-06-07 02:29:28 UTC (rev 5963)
@@ -0,0 +1,1685 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+package org.jboss.cache.pojo.util;
+import java.io.IOException;
+import java.io.Serializable;
+import java.lang.ref.Reference;
+import java.lang.ref.ReferenceQueue;
+import java.lang.ref.SoftReference;
+import java.lang.ref.WeakReference;
+import java.util.AbstractCollection;
+import java.util.AbstractMap;
+import java.util.AbstractSet;
+import java.util.Collection;
+import java.util.ConcurrentModificationException;
+import java.util.EnumSet;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Hashtable;
+import java.util.IdentityHashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * An advanced hash table supporting configurable garbage collection semantics
+ * of keys and values, optional referential-equality, full concurrency of
+ * retrievals, and adjustable expected concurrency for updates.
+ *
+ * This table is designed around specific advanced use-cases. If there is any
+ * doubt whether this table is for you, you most likely should be using
+ * {@link java.util.concurrent.ConcurrentHashMap} instead.
+ *
+ * This table supports strong, weak, and soft keys and values. By default keys
+ * are weak, and values are strong. Such a configuration offers similar behavior
+ * to {@link java.util.WeakHashMap}, entries of this table are periodically
+ * removed once their corresponding keys are no longer referenced outside of
+ * this table. In other words, this table will not prevent a key from being
+ * discarded by the garbage collector. Once a key has been discarded by the
+ * collector, the corresponding entry is no longer visible to this table;
+ * however, the entry may occupy space until a future table operation decides to
+ * reclaim it. For this reason, summary functions such as <tt>size</tt> and
+ * <tt>isEmpty</tt> might return a value greater than the observed number of
+ * entries. In order to support a high level of concurrency, stale entries are
+ * only reclaimed during blocking (usually mutating) operations.
+ *
+ * Enabling soft keys allows entries in this table to remain until their space
+ * is absolutely needed by the garbage collector. This is unlike weak keys which
+ * can be reclaimed as soon as they are no longer referenced by a normal strong
+ * reference. The primary use case for soft keys is a cache, which ideally
+ * occupies memory that is not in use for as long as possible.
+ *
+ * By default, values are held using a normal strong reference. This provides
+ * the commonly desired guarantee that a value will always have at least the
+ * same life-span as it's key. For this reason, care should be taken to ensure
+ * that a value never refers, either directly or indirectly, to its key, thereby
+ * preventing reclamation. If this is unavoidable, then it is recommended to use
+ * the same reference type in use for the key. However, it should be noted that
+ * non-strong values may disappear before their corresponding key.
+ *
+ * While this table does allow the use of both strong keys and values, it is
+ * recommended to use {@link java.util.concurrent.ConcurrentHashMap} for such a
+ * configuration, since it is optimized for that case.
+ *
+ * Just like {@link java.util.concurrent.ConcurrentHashMap}, this class obeys
+ * the same functional specification as {@link java.util.Hashtable}, and
+ * includes versions of methods corresponding to each method of
+ * <tt>Hashtable</tt>. However, even though all operations are thread-safe,
+ * retrieval operations do <em>not</em> entail locking, and there is
+ * <em>not</em> any support for locking the entire table in a way that
+ * prevents all access. This class is fully interoperable with
+ * <tt>Hashtable</tt> in programs that rely on its thread safety but not on
+ * its synchronization details.
+ *
+ * <p>
+ * Retrieval operations (including <tt>get</tt>) generally do not block, so
+ * may overlap with update operations (including <tt>put</tt> and
+ * <tt>remove</tt>). Retrievals reflect the results of the most recently
+ * <em>completed</em> update operations holding upon their onset. For
+ * aggregate operations such as <tt>putAll</tt> and <tt>clear</tt>,
+ * concurrent retrievals may reflect insertion or removal of only some entries.
+ * Similarly, Iterators and Enumerations return elements reflecting the state of
+ * the hash table at some point at or since the creation of the
+ * iterator/enumeration. They do <em>not</em> throw
+ * {@link ConcurrentModificationException}. However, iterators are designed to
+ * be used by only one thread at a time.
+ *
+ * <p>
+ * The allowed concurrency among update operations is guided by the optional
+ * <tt>concurrencyLevel</tt> constructor argument (default <tt>16</tt>),
+ * which is used as a hint for internal sizing. The table is internally
+ * partitioned to try to permit the indicated number of concurrent updates
+ * without contention. Because placement in hash tables is essentially random,
+ * the actual concurrency will vary. Ideally, you should choose a value to
+ * accommodate as many threads as will ever concurrently modify the table. Using
+ * a significantly higher value than you need can waste space and time, and a
+ * significantly lower value can lead to thread contention. But overestimates
+ * and underestimates within an order of magnitude do not usually have much
+ * noticeable impact. A value of one is appropriate when it is known that only
+ * one thread will modify and all others will only read. Also, resizing this or
+ * any other kind of hash table is a relatively slow operation, so, when
+ * possible, it is a good idea to provide estimates of expected table sizes in
+ * constructors.
+ *
+ * <p>
+ * This class and its views and iterators implement all of the <em>optional</em>
+ * methods of the {@link Map} and {@link Iterator} interfaces.
+ *
+ * <p>
+ * Like {@link Hashtable} but unlike {@link HashMap}, this class does
+ * <em>not</em> allow <tt>null</tt> to be used as a key or value.
+ *
+ * <p>
+ * This class is a member of the <a href="{@docRoot}/../technotes/guides/collections/index.html">
+ * Java Collections Framework</a>.
+ *
+ * @author Doug Lea
+ * @author Jason T. Greene
+ * @param <K> the type of keys maintained by this map
+ * @param <V> the type of mapped values
+ */
+public class ConcurrentReferenceHashMap<K, V> extends AbstractMap<K, V>
+ implements java.util.concurrent.ConcurrentMap<K, V>, Serializable {
+ private static final long serialVersionUID = 7249069246763182397L;
+
+ /*
+ * The basic strategy is to subdivide the table among Segments,
+ * each of which itself is a concurrently readable hash table.
+ */
+
+ /**
+ * An option specifying which Java reference type should be used to refer
+ * to a key and/or value.
+ */
+ public static enum ReferenceType {
+ /** Indicates a normal Java strong reference should be used */
+ STRONG,
+ /** Indicates a {@link WeakReference} should be used */
+ WEAK,
+ /** Indicates a {@link SoftReference} should be used */
+ SOFT
+ };
+
+
+ public static enum Option {
+ /** Indicates that referential-equality (== instead of .equals()) should
+ * be used when locating keys. This offers similar behavior to {@link IdentityHashMap} */
+ IDENTITY_COMPARISONS
+ };
+
+ /* ---------------- Constants -------------- */
+
+ static final ReferenceType DEFAULT_KEY_TYPE = ReferenceType.WEAK;
+
+ static final ReferenceType DEFAULT_VALUE_TYPE = ReferenceType.STRONG;
+
+
+ /**
+ * The default initial capacity for this table,
+ * used when not otherwise specified in a constructor.
+ */
+ static final int DEFAULT_INITIAL_CAPACITY = 16;
+
+ /**
+ * The default load factor for this table, used when not
+ * otherwise specified in a constructor.
+ */
+ static final float DEFAULT_LOAD_FACTOR = 0.75f;
+
+ /**
+ * The default concurrency level for this table, used when not
+ * otherwise specified in a constructor.
+ */
+ static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+
+ /**
+ * The maximum capacity, used if a higher value is implicitly
+ * specified by either of the constructors with arguments. MUST
+ * be a power of two <= 1<<30 to ensure that entries are indexable
+ * using ints.
+ */
+ static final int MAXIMUM_CAPACITY = 1 << 30;
+
+ /**
+ * The maximum number of segments to allow; used to bound
+ * constructor arguments.
+ */
+ static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
+
+ /**
+ * Number of unsynchronized retries in size and containsValue
+ * methods before resorting to locking. This is used to avoid
+ * unbounded retries if tables undergo continuous modification
+ * which would make it impossible to obtain an accurate result.
+ */
+ static final int RETRIES_BEFORE_LOCK = 2;
+
+ /* ---------------- Fields -------------- */
+
+ /**
+ * Mask value for indexing into segments. The upper bits of a
+ * key's hash code are used to choose the segment.
+ */
+ final int segmentMask;
+
+ /**
+ * Shift value for indexing within segments.
+ */
+ final int segmentShift;
+
+ /**
+ * The segments, each of which is a specialized hash table
+ */
+ final Segment<K,V>[] segments;
+
+ boolean identityComparisons;
+
+ transient Set<K> keySet;
+ transient Set<Map.Entry<K,V>> entrySet;
+ transient Collection<V> values;
+
+ /* ---------------- Small Utilities -------------- */
+
+ /**
+ * Applies a supplemental hash function to a given hashCode, which
+ * defends against poor quality hash functions. This is critical
+ * because ConcurrentReferenceHashMap uses power-of-two length hash tables,
+ * that otherwise encounter collisions for hashCodes that do not
+ * differ in lower or upper bits.
+ */
+ private static int hash(int h) {
+ // Spread bits to regularize both segment and index locations,
+ // using variant of single-word Wang/Jenkins hash.
+ h += (h << 15) ^ 0xffffcd7d;
+ h ^= (h >>> 10);
+ h += (h << 3);
+ h ^= (h >>> 6);
+ h += (h << 2) + (h << 14);
+ return h ^ (h >>> 16);
+ }
+
+ /**
+ * Returns the segment that should be used for key with given hash
+ * @param hash the hash code for the key
+ * @return the segment
+ */
+ final Segment<K,V> segmentFor(int hash) {
+ return segments[(hash >>> segmentShift) & segmentMask];
+ }
+
+ private int hashOf(Object key) {
+ return hash(identityComparisons ?
+ System.identityHashCode(key) : key.hashCode());
+ }
+
+ /* ---------------- Inner Classes -------------- */
+
+ static interface KeyReference {
+ int keyHash();
+ }
+
+ /**
+ * A weak-key reference which stores the key hash needed for reclamation.
+ */
+ static final class WeakKeyReference<K> extends WeakReference<K> implements KeyReference {
+ final int hash;
+ WeakKeyReference(K key, int hash, ReferenceQueue<K> refQueue) {
+ super(key, refQueue);
+ this.hash = hash;
+ }
+ public final int keyHash() {
+ return hash;
+ }
+ }
+
+ /**
+ * A soft-key reference which stores the key hash needed for reclamation.
+ */
+ static final class SoftKeyReference<K> extends SoftReference<K> implements KeyReference {
+ final int hash;
+ SoftKeyReference(K key, int hash, ReferenceQueue<K> refQueue) {
+ super(key, refQueue);
+ this.hash = hash;
+ }
+ public final int keyHash() {
+ return hash;
+ }
+ }
+
+ /**
+ * ConcurrentReferenceHashMap list entry. Note that this is never exported
+ * out as a user-visible Map.Entry.
+ *
+ * Because the value field is volatile, not final, it is legal wrt
+ * the Java Memory Model for an unsynchronized reader to see null
+ * instead of initial value when read via a data race. Although a
+ * reordering leading to this is not likely to ever actually
+ * occur, the Segment.readValueUnderLock method is used as a
+ * backup in case a null (pre-initialized) value is ever seen in
+ * an unsynchronized access method.
+ */
+ static final class HashEntry<K,V> {
+ final Object keyRef;
+ final int hash;
+ volatile Object valueRef;
+ final HashEntry<K,V> next;
+
+ HashEntry(K key, int hash, HashEntry<K,V> next, V value,
+ ReferenceType keyType, ReferenceType valueType,
+ ReferenceQueue<K> refQueue) {
+ this.keyRef = newKeyReference(key, keyType, hash, refQueue);
+ this.hash = hash;
+ this.next = next;
+ this.valueRef = newValueReference(value, valueType);
+ }
+
+ final Object newKeyReference(K key, ReferenceType keyType, int hash,
+ ReferenceQueue<K> refQueue) {
+ if (keyType == ReferenceType.WEAK)
+ return new WeakKeyReference<K>(key, hash, refQueue);
+ if (keyType == ReferenceType.SOFT)
+ return new SoftKeyReference<K>(key, hash, refQueue);
+
+ return key;
+ }
+
+ final Object newValueReference(V value, ReferenceType valueType) {
+ if (valueType == ReferenceType.WEAK)
+ return new WeakReference<V>(value);
+ if (valueType == ReferenceType.SOFT)
+ return new SoftReference<V>(value);
+
+ return value;
+ }
+
+ @SuppressWarnings("unchecked")
+ final K key() {
+ if (keyRef instanceof Reference)
+ return ((Reference<K>)keyRef).get();
+
+ return (K) keyRef;
+ }
+
+ final V value() {
+ return dereferenceValue(valueRef);
+ }
+
+ @SuppressWarnings("unchecked")
+ final V dereferenceValue(Object value) {
+ if (value instanceof Reference)
+ return ((Reference<V>)value).get();
+
+ return (V) value;
+ }
+
+ final void setValue(V value, ReferenceType valueType) {
+ this.valueRef = newValueReference(value, valueType);
+ }
+
+ @SuppressWarnings("unchecked")
+ static final <K,V> HashEntry<K,V>[] newArray(int i) {
+ return new HashEntry[i];
+ }
+ }
+
+ /**
+ * Segments are specialized versions of hash tables. This
+ * subclasses from ReentrantLock opportunistically, just to
+ * simplify some locking and avoid separate construction.
+ */
+ static final class Segment<K,V> extends ReentrantLock implements Serializable {
+ /*
+ * Segments maintain a table of entry lists that are ALWAYS
+ * kept in a consistent state, so can be read without locking.
+ * Next fields of nodes are immutable (final). All list
+ * additions are performed at the front of each bin. This
+ * makes it easy to check changes, and also fast to traverse.
+ * When nodes would otherwise be changed, new nodes are
+ * created to replace them. This works well for hash tables
+ * since the bin lists tend to be short. (The average length
+ * is less than two for the default load factor threshold.)
+ *
+ * Read operations can thus proceed without locking, but rely
+ * on selected uses of volatiles to ensure that completed
+ * write operations performed by other threads are
+ * noticed. For most purposes, the "count" field, tracking the
+ * number of elements, serves as that volatile variable
+ * ensuring visibility. This is convenient because this field
+ * needs to be read in many read operations anyway:
+ *
+ * - All (unsynchronized) read operations must first read the
+ * "count" field, and should not look at table entries if
+ * it is 0.
+ *
+ * - All (synchronized) write operations should write to
+ * the "count" field after structurally changing any bin.
+ * The operations must not take any action that could even
+ * momentarily cause a concurrent read operation to see
+ * inconsistent data. This is made easier by the nature of
+ * the read operations in Map. For example, no operation
+ * can reveal that the table has grown but the threshold
+ * has not yet been updated, so there are no atomicity
+ * requirements for this with respect to reads.
+ *
+ * As a guide, all critical volatile reads and writes to the
+ * count field are marked in code comments.
+ */
+
+ private static final long serialVersionUID = 2249069246763182397L;
+
+ /**
+ * The number of elements in this segment's region.
+ */
+ transient volatile int count;
+
+ /**
+ * Number of updates that alter the size of the table. This is
+ * used during bulk-read methods to make sure they see a
+ * consistent snapshot: If modCounts change during a traversal
+ * of segments computing size or checking containsValue, then
+ * we might have an inconsistent view of state so (usually)
+ * must retry.
+ */
+ transient int modCount;
+
+ /**
+ * The table is rehashed when its size exceeds this threshold.
+ * (The value of this field is always <tt>(int)(capacity *
+ * loadFactor)</tt>.)
+ */
+ transient int threshold;
+
+ /**
+ * The per-segment table.
+ */
+ transient volatile HashEntry<K,V>[] table;
+
+ /**
+ * The load factor for the hash table. Even though this value
+ * is same for all segments, it is replicated to avoid needing
+ * links to outer object.
+ * @serial
+ */
+ final float loadFactor;
+
+ /**
+ * The collected weak-key reference queue for this segment.
+ * This should be (re)initialized whenever table is assigned,
+ */
+ transient volatile ReferenceQueue<K> refQueue;
+
+ final ReferenceType keyType;
+
+ final ReferenceType valueType;
+
+ final boolean identityComparisons;
+
+ Segment(int initialCapacity, float lf, ReferenceType keyType,
+ ReferenceType valueType, boolean identityComparisons) {
+ loadFactor = lf;
+ this.keyType = keyType;
+ this.valueType = valueType;
+ this.identityComparisons = identityComparisons;
+ setTable(HashEntry.<K,V>newArray(initialCapacity));
+ }
+
+ @SuppressWarnings("unchecked")
+ static final <K,V> Segment<K,V>[] newArray(int i) {
+ return new Segment[i];
+ }
+
+ private boolean keyEq(Object src, Object dest) {
+ return identityComparisons ? src == dest : src.equals(dest);
+ }
+
+ /**
+ * Sets table to new HashEntry array.
+ * Call only while holding lock or in constructor.
+ */
+ void setTable(HashEntry<K,V>[] newTable) {
+ threshold = (int)(newTable.length * loadFactor);
+ table = newTable;
+ refQueue = new ReferenceQueue<K>();
+ }
+
+ /**
+ * Returns properly casted first entry of bin for given hash.
+ */
+ HashEntry<K,V> getFirst(int hash) {
+ HashEntry<K,V>[] tab = table;
+ return tab[hash & (tab.length - 1)];
+ }
+
+ HashEntry<K,V> newHashEntry(K key, int hash, HashEntry<K, V> next, V value) {
+ return new HashEntry<K,V>(key, hash, next, value, keyType, valueType, refQueue);
+ }
+
+ /**
+ * Reads value field of an entry under lock. Called if value
+ * field ever appears to be null. This is possible only if a
+ * compiler happens to reorder a HashEntry initialization with
+ * its table assignment, which is legal under memory model
+ * but is not known to ever occur.
+ */
+ V readValueUnderLock(HashEntry<K,V> e) {
+ lock();
+ try {
+ removeStale();
+ return e.value();
+ } finally {
+ unlock();
+ }
+ }
+
+ /* Specialized implementations of map methods */
+
+ V get(Object key, int hash) {
+ if (count != 0) { // read-volatile
+ HashEntry<K,V> e = getFirst(hash);
+ while (e != null) {
+ if (e.hash == hash && keyEq(key, e.key())) {
+ Object opaque = e.valueRef;
+ if (opaque != null)
+ return e.dereferenceValue(opaque);
+
+ return readValueUnderLock(e); // recheck
+ }
+ e = e.next;
+ }
+ }
+ return null;
+ }
+
+ boolean containsKey(Object key, int hash) {
+ if (count != 0) { // read-volatile
+ HashEntry<K,V> e = getFirst(hash);
+ while (e != null) {
+ if (e.hash == hash && keyEq(key, e.key()))
+ return true;
+ e = e.next;
+ }
+ }
+ return false;
+ }
+
+ boolean containsValue(Object value) {
+ if (count != 0) { // read-volatile
+ HashEntry<K,V>[] tab = table;
+ int len = tab.length;
+ for (int i = 0 ; i < len; i++) {
+ for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
+ Object opaque = e.valueRef;
+ V v;
+
+ if (opaque == null)
+ v = readValueUnderLock(e); // recheck
+ else
+ v = e.dereferenceValue(opaque);
+
+ if (value.equals(v))
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ boolean replace(K key, int hash, V oldValue, V newValue) {
+ lock();
+ try {
+ removeStale();
+ HashEntry<K,V> e = getFirst(hash);
+ while (e != null && (e.hash != hash || !keyEq(key, e.key())))
+ e = e.next;
+
+ boolean replaced = false;
+ if (e != null && oldValue.equals(e.value())) {
+ replaced = true;
+ e.setValue(newValue, valueType);
+ }
+ return replaced;
+ } finally {
+ unlock();
+ }
+ }
+
+ V replace(K key, int hash, V newValue) {
+ lock();
+ try {
+ removeStale();
+ HashEntry<K,V> e = getFirst(hash);
+ while (e != null && (e.hash != hash || !keyEq(key, e.key())))
+ e = e.next;
+
+ V oldValue = null;
+ if (e != null) {
+ oldValue = e.value();
+ e.setValue(newValue, valueType);
+ }
+ return oldValue;
+ } finally {
+ unlock();
+ }
+ }
+
+
+ V put(K key, int hash, V value, boolean onlyIfAbsent) {
+ lock();
+ try {
+ removeStale();
+ int c = count;
+ if (c++ > threshold) {// ensure capacity
+ int reduced = rehash();
+ if (reduced > 0) // adjust from possible weak cleanups
+ count = (c -= reduced) - 1; // write-volatile
+ }
+
+ HashEntry<K,V>[] tab = table;
+ int index = hash & (tab.length - 1);
+ HashEntry<K,V> first = tab[index];
+ HashEntry<K,V> e = first;
+ while (e != null && (e.hash != hash || !keyEq(key, e.key())))
+ e = e.next;
+
+ V oldValue;
+ if (e != null) {
+ oldValue = e.value();
+ if (!onlyIfAbsent)
+ e.setValue(value, valueType);
+ }
+ else {
+ oldValue = null;
+ ++modCount;
+ tab[index] = newHashEntry(key, hash, first, value);
+ count = c; // write-volatile
+ }
+ return oldValue;
+ } finally {
+ unlock();
+ }
+ }
+
+ int rehash() {
+ HashEntry<K,V>[] oldTable = table;
+ int oldCapacity = oldTable.length;
+ if (oldCapacity >= MAXIMUM_CAPACITY)
+ return 0;
+
+ /*
+ * Reclassify nodes in each list to new Map. Because we are
+ * using power-of-two expansion, the elements from each bin
+ * must either stay at same index, or move with a power of two
+ * offset. We eliminate unnecessary node creation by catching
+ * cases where old nodes can be reused because their next
+ * fields won't change. Statistically, at the default
+ * threshold, only about one-sixth of them need cloning when
+ * a table doubles. The nodes they replace will be garbage
+ * collectable as soon as they are no longer referenced by any
+ * reader thread that may be in the midst of traversing table
+ * right now.
+ */
+
+ HashEntry<K,V>[] newTable = HashEntry.newArray(oldCapacity<<1);
+ threshold = (int)(newTable.length * loadFactor);
+ int sizeMask = newTable.length - 1;
+ int reduce = 0;
+ for (int i = 0; i < oldCapacity ; i++) {
+ // We need to guarantee that any existing reads of old Map can
+ // proceed. So we cannot yet null out each bin.
+ HashEntry<K,V> e = oldTable[i];
+
+ if (e != null) {
+ HashEntry<K,V> next = e.next;
+ int idx = e.hash & sizeMask;
+
+ // Single node on list
+ if (next == null)
+ newTable[idx] = e;
+
+ else {
+ // Reuse trailing consecutive sequence at same slot
+ HashEntry<K,V> lastRun = e;
+ int lastIdx = idx;
+ for (HashEntry<K,V> last = next;
+ last != null;
+ last = last.next) {
+ int k = last.hash & sizeMask;
+ if (k != lastIdx) {
+ lastIdx = k;
+ lastRun = last;
+ }
+ }
+ newTable[lastIdx] = lastRun;
+ // Clone all remaining nodes
+ for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
+ // Skip GC'd weak refs
+ K key = p.key();
+ if (key == null) {
+ reduce++;
+ continue;
+ }
+ int k = p.hash & sizeMask;
+ HashEntry<K,V> n = newTable[k];
+ newTable[k] = newHashEntry(key, p.hash, n, p.value());
+ }
+ }
+ }
+ }
+ table = newTable;
+ return reduce;
+ }
+
+ /**
+ * Remove; match on key only if value null, else match both.
+ */
+ V remove(Object key, int hash, Object value, boolean weakRemove) {
+ lock();
+ try {
+ if (!weakRemove)
+ removeStale();
+ int c = count - 1;
+ HashEntry<K,V>[] tab = table;
+ int index = hash & (tab.length - 1);
+ HashEntry<K,V> first = tab[index];
+ HashEntry<K,V> e = first;
+ // a weak remove operation compares the WeakReference instance
+ while (e != null && (!weakRemove || key != e.keyRef)
+ && (e.hash != hash || !keyEq(key, e.key())))
+ e = e.next;
+
+ V oldValue = null;
+ if (e != null) {
+ V v = e.value();
+ if (value == null || value.equals(v)) {
+ oldValue = v;
+ // All entries following removed node can stay
+ // in list, but all preceding ones need to be
+ // cloned.
+ ++modCount;
+ HashEntry<K,V> newFirst = e.next;
+ for (HashEntry<K,V> p = first; p != e; p = p.next) {
+ K pKey = p.key();
+ if (pKey == null) { // Skip GC'd keys
+ c--;
+ continue;
+ }
+
+ newFirst = newHashEntry(pKey, p.hash, newFirst, p.value());
+ }
+ tab[index] = newFirst;
+ count = c; // write-volatile
+ }
+ }
+ return oldValue;
+ } finally {
+ unlock();
+ }
+ }
+
+ final void removeStale() {
+ if (keyType == ReferenceType.STRONG)
+ return;
+
+ KeyReference ref;
+ while ((ref = (KeyReference) refQueue.poll()) != null) {
+ remove(ref, ref.keyHash(), null, true);
+ }
+ }
+
+ void clear() {
+ if (count != 0) {
+ lock();
+ try {
+ HashEntry<K,V>[] tab = table;
+ for (int i = 0; i < tab.length ; i++)
+ tab[i] = null;
+ ++modCount;
+ // replace the reference queue to avoid unnecessary stale cleanups
+ refQueue = new ReferenceQueue<K>();
+ count = 0; // write-volatile
+ } finally {
+ unlock();
+ }
+ }
+ }
+ }
+
+
+
+ /* ---------------- Public operations -------------- */
+
+ /**
+ * Creates a new, empty map with the specified initial
+ * capacity, reference types, load factor and concurrency level.
+ *
+ * Behavioral changing options such as {@link Option#IDENTITY_COMPARISONS}
+ * can also be specified.
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements.
+ * @param loadFactor the load factor threshold, used to control resizing.
+ * Resizing may be performed when the average number of elements per
+ * bin exceeds this threshold.
+ * @param concurrencyLevel the estimated number of concurrently
+ * updating threads. The implementation performs internal sizing
+ * to try to accommodate this many threads.
+ * @param keyType the reference type to use for keys
+ * @param valueType the reference type to use for values
+ * @param options the behavioral options
+ * @throws IllegalArgumentException if the initial capacity is
+ * negative or the load factor or concurrencyLevel are
+ * nonpositive.
+ */
+ public ConcurrentReferenceHashMap(int initialCapacity,
+ float loadFactor, int concurrencyLevel,
+ ReferenceType keyType, ReferenceType valueType,
+ EnumSet<Option> options) {
+ if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
+ throw new IllegalArgumentException();
+
+ if (concurrencyLevel > MAX_SEGMENTS)
+ concurrencyLevel = MAX_SEGMENTS;
+
+ // Find power-of-two sizes best matching arguments
+ int sshift = 0;
+ int ssize = 1;
+ while (ssize < concurrencyLevel) {
+ ++sshift;
+ ssize <<= 1;
+ }
+ segmentShift = 32 - sshift;
+ segmentMask = ssize - 1;
+ this.segments = Segment.newArray(ssize);
+
+ if (initialCapacity > MAXIMUM_CAPACITY)
+ initialCapacity = MAXIMUM_CAPACITY;
+ int c = initialCapacity / ssize;
+ if (c * ssize < initialCapacity)
+ ++c;
+ int cap = 1;
+ while (cap < c)
+ cap <<= 1;
+
+ identityComparisons = options != null && options.contains(Option.IDENTITY_COMPARISONS);
+
+ for (int i = 0; i < this.segments.length; ++i)
+ this.segments[i] = new Segment<K,V>(cap, loadFactor,
+ keyType, valueType, identityComparisons);
+ }
+
+ /**
+ * Creates a new, empty map with the specified initial
+ * capacity, load factor and concurrency level.
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements.
+ * @param loadFactor the load factor threshold, used to control resizing.
+ * Resizing may be performed when the average number of elements per
+ * bin exceeds this threshold.
+ * @param concurrencyLevel the estimated number of concurrently
+ * updating threads. The implementation performs internal sizing
+ * to try to accommodate this many threads.
+ * @throws IllegalArgumentException if the initial capacity is
+ * negative or the load factor or concurrencyLevel are
+ * nonpositive.
+ */
+ public ConcurrentReferenceHashMap(int initialCapacity,
+ float loadFactor, int concurrencyLevel) {
+ this(initialCapacity, loadFactor, concurrencyLevel,
+ DEFAULT_KEY_TYPE, DEFAULT_VALUE_TYPE, null);
+ }
+
+ /**
+ * Creates a new, empty map with the specified initial capacity
+ * and load factor and with the default reference types (weak keys,
+ * strong values), and concurrencyLevel (16).
+ *
+ * @param initialCapacity The implementation performs internal
+ * sizing to accommodate this many elements.
+ * @param loadFactor the load factor threshold, used to control resizing.
+ * Resizing may be performed when the average number of elements per
+ * bin exceeds this threshold.
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative or the load factor is nonpositive
+ *
+ * @since 1.6
+ */
+ public ConcurrentReferenceHashMap(int initialCapacity, float loadFactor) {
+ this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
+ }
+
+
+ /**
+ * Creates a new, empty map with the specified initial capacity,
+ * reference types and with default load factor (0.75) and concurrencyLevel (16).
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements.
+ * @param keyType the reference type to use for keys
+ * @param valueType the reference type to use for values
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative.
+ */
+ public ConcurrentReferenceHashMap(int initialCapacity,
+ ReferenceType keyType, ReferenceType valueType) {
+ this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL,
+ keyType, valueType, null);
+ }
+
+ /**
+ * Creates a new, empty map with the specified initial capacity,
+ * reference types and with default load factor (0.75) and concurrencyLevel (16).
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements.
+ * @param keyType the reference type to use for keys
+ * @param valueType the reference type to use for values
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative.
+ */
+ public ConcurrentReferenceHashMap(int initialCapacity,
+ ReferenceType keyType, ReferenceType valueType, EnumSet<Option> options) {
+ this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL,
+ keyType, valueType, options);
+ }
+
+
+ /**
+ * Creates a new, empty map with the specified initial capacity,
+ * and with default reference types (weak keys, strong values),
+ * load factor (0.75) and concurrencyLevel (16).
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements.
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative.
+ */
+ public ConcurrentReferenceHashMap(int initialCapacity) {
+ this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
+ }
+
+ /**
+ * Creates a new, empty map with a default initial capacity (16),
+ * reference types (weak keys, strong values), default
+ * load factor (0.75) and concurrencyLevel (16).
+ */
+ public ConcurrentReferenceHashMap() {
+ this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
+ }
+
+ /**
+ * Creates a new map with the same mappings as the given map.
+ * The map is created with a capacity of 1.5 times the number
+ * of mappings in the given map or 16 (whichever is greater),
+ * and a default load factor (0.75) and concurrencyLevel (16).
+ *
+ * @param m the map
+ */
+ public ConcurrentReferenceHashMap(Map<? extends K, ? extends V> m) {
+ this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
+ DEFAULT_INITIAL_CAPACITY),
+ DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
+ putAll(m);
+ }
+
+ /**
+ * Returns <tt>true</tt> if this map contains no key-value mappings.
+ *
+ * @return <tt>true</tt> if this map contains no key-value mappings
+ */
+ public boolean isEmpty() {
+ final Segment<K,V>[] segments = this.segments;
+ /*
+ * We keep track of per-segment modCounts to avoid ABA
+ * problems in which an element in one segment was added and
+ * in another removed during traversal, in which case the
+ * table was never actually empty at any point. Note the
+ * similar use of modCounts in the size() and containsValue()
+ * methods, which are the only other methods also susceptible
+ * to ABA problems.
+ */
+ int[] mc = new int[segments.length];
+ int mcsum = 0;
+ for (int i = 0; i < segments.length; ++i) {
+ if (segments[i].count != 0)
+ return false;
+ else
+ mcsum += mc[i] = segments[i].modCount;
+ }
+ // If mcsum happens to be zero, then we know we got a snapshot
+ // before any modifications at all were made. This is
+ // probably common enough to bother tracking.
+ if (mcsum != 0) {
+ for (int i = 0; i < segments.length; ++i) {
+ if (segments[i].count != 0 ||
+ mc[i] != segments[i].modCount)
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Returns the number of key-value mappings in this map. If the
+ * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
+ * <tt>Integer.MAX_VALUE</tt>.
+ *
+ * @return the number of key-value mappings in this map
+ */
+ public int size() {
+ final Segment<K,V>[] segments = this.segments;
+ long sum = 0;
+ long check = 0;
+ int[] mc = new int[segments.length];
+ // Try a few times to get accurate count. On failure due to
+ // continuous async changes in table, resort to locking.
+ for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
+ check = 0;
+ sum = 0;
+ int mcsum = 0;
+ for (int i = 0; i < segments.length; ++i) {
+ sum += segments[i].count;
+ mcsum += mc[i] = segments[i].modCount;
+ }
+ if (mcsum != 0) {
+ for (int i = 0; i < segments.length; ++i) {
+ check += segments[i].count;
+ if (mc[i] != segments[i].modCount) {
+ check = -1; // force retry
+ break;
+ }
+ }
+ }
+ if (check == sum)
+ break;
+ }
+ if (check != sum) { // Resort to locking all segments
+ sum = 0;
+ for (int i = 0; i < segments.length; ++i)
+ segments[i].lock();
+ for (int i = 0; i < segments.length; ++i)
+ sum += segments[i].count;
+ for (int i = 0; i < segments.length; ++i)
+ segments[i].unlock();
+ }
+ if (sum > Integer.MAX_VALUE)
+ return Integer.MAX_VALUE;
+ else
+ return (int)sum;
+ }
+
+ /**
+ * Returns the value to which the specified key is mapped,
+ * or {@code null} if this map contains no mapping for the key.
+ *
+ * <p>More formally, if this map contains a mapping from a key
+ * {@code k} to a value {@code v} such that {@code key.equals(k)},
+ * then this method returns {@code v}; otherwise it returns
+ * {@code null}. (There can be at most one such mapping.)
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ public V get(Object key) {
+ int hash = hashOf(key);
+ return segmentFor(hash).get(key, hash);
+ }
+
+ /**
+ * Tests if the specified object is a key in this table.
+ *
+ * @param key possible key
+ * @return <tt>true</tt> if and only if the specified object
+ * is a key in this table, as determined by the
+ * <tt>equals</tt> method; <tt>false</tt> otherwise.
+ * @throws NullPointerException if the specified key is null
+ */
+ public boolean containsKey(Object key) {
+ int hash = hashOf(key);
+ return segmentFor(hash).containsKey(key, hash);
+ }
+
+ /**
+ * Returns <tt>true</tt> if this map maps one or more keys to the
+ * specified value. Note: This method requires a full internal
+ * traversal of the hash table, and so is much slower than
+ * method <tt>containsKey</tt>.
+ *
+ * @param value value whose presence in this map is to be tested
+ * @return <tt>true</tt> if this map maps one or more keys to the
+ * specified value
+ * @throws NullPointerException if the specified value is null
+ */
+ public boolean containsValue(Object value) {
+ if (value == null)
+ throw new NullPointerException();
+
+ // See explanation of modCount use above
+
+ final Segment<K,V>[] segments = this.segments;
+ int[] mc = new int[segments.length];
+
+ // Try a few times without locking
+ for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
+ int sum = 0;
+ int mcsum = 0;
+ for (int i = 0; i < segments.length; ++i) {
+ int c = segments[i].count;
+ mcsum += mc[i] = segments[i].modCount;
+ if (segments[i].containsValue(value))
+ return true;
+ }
+ boolean cleanSweep = true;
+ if (mcsum != 0) {
+ for (int i = 0; i < segments.length; ++i) {
+ int c = segments[i].count;
+ if (mc[i] != segments[i].modCount) {
+ cleanSweep = false;
+ break;
+ }
+ }
+ }
+ if (cleanSweep)
+ return false;
+ }
+ // Resort to locking all segments
+ for (int i = 0; i < segments.length; ++i)
+ segments[i].lock();
+ boolean found = false;
+ try {
+ for (int i = 0; i < segments.length; ++i) {
+ if (segments[i].containsValue(value)) {
+ found = true;
+ break;
+ }
+ }
+ } finally {
+ for (int i = 0; i < segments.length; ++i)
+ segments[i].unlock();
+ }
+ return found;
+ }
+
+ /**
+ * Legacy method testing if some key maps into the specified value
+ * in this table. This method is identical in functionality to
+ * {@link #containsValue}, and exists solely to ensure
+ * full compatibility with class {@link java.util.Hashtable},
+ * which supported this method prior to introduction of the
+ * Java Collections framework.
+
+ * @param value a value to search for
+ * @return <tt>true</tt> if and only if some key maps to the
+ * <tt>value</tt> argument in this table as
+ * determined by the <tt>equals</tt> method;
+ * <tt>false</tt> otherwise
+ * @throws NullPointerException if the specified value is null
+ */
+ public boolean contains(Object value) {
+ return containsValue(value);
+ }
+
+ /**
+ * Maps the specified key to the specified value in this table.
+ * Neither the key nor the value can be null.
+ *
+ * <p> The value can be retrieved by calling the <tt>get</tt> method
+ * with a key that is equal to the original key.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value value to be associated with the specified key
+ * @return the previous value associated with <tt>key</tt>, or
+ * <tt>null</tt> if there was no mapping for <tt>key</tt>
+ * @throws NullPointerException if the specified key or value is null
+ */
+ public V put(K key, V value) {
+ if (value == null)
+ throw new NullPointerException();
+ int hash = hashOf(key);
+ return segmentFor(hash).put(key, hash, value, false);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key,
+ * or <tt>null</tt> if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ public V putIfAbsent(K key, V value) {
+ if (value == null)
+ throw new NullPointerException();
+ int hash = hashOf(key);
+ return segmentFor(hash).put(key, hash, value, true);
+ }
+
+ /**
+ * Copies all of the mappings from the specified map to this one.
+ * These mappings replace any mappings that this map had for any of the
+ * keys currently in the specified map.
+ *
+ * @param m mappings to be stored in this map
+ */
+ public void putAll(Map<? extends K, ? extends V> m) {
+ for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
+ put(e.getKey(), e.getValue());
+ }
+
+ /**
+ * Removes the key (and its corresponding value) from this map.
+ * This method does nothing if the key is not in the map.
+ *
+ * @param key the key that needs to be removed
+ * @return the previous value associated with <tt>key</tt>, or
+ * <tt>null</tt> if there was no mapping for <tt>key</tt>
+ * @throws NullPointerException if the specified key is null
+ */
+ public V remove(Object key) {
+ int hash = hashOf(key);
+ return segmentFor(hash).remove(key, hash, null, false);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ public boolean remove(Object key, Object value) {
+ int hash = hashOf(key);
+ if (value == null)
+ return false;
+ return segmentFor(hash).remove(key, hash, value, false) != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if any of the arguments are null
+ */
+ public boolean replace(K key, V oldValue, V newValue) {
+ if (oldValue == null || newValue == null)
+ throw new NullPointerException();
+ int hash = hashOf(key);
+ return segmentFor(hash).replace(key, hash, oldValue, newValue);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key,
+ * or <tt>null</tt> if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ public V replace(K key, V value) {
+ if (value == null)
+ throw new NullPointerException();
+ int hash = hashOf(key);
+ return segmentFor(hash).replace(key, hash, value);
+ }
+
+ /**
+ * Removes all of the mappings from this map.
+ */
+ public void clear() {
+ for (int i = 0; i < segments.length; ++i)
+ segments[i].clear();
+ }
+
+ /**
+ * Removes any stale entries whose keys have been finalized. Use of this
+ * method is normally not necessary since stale entries are automatically
+ * removed lazily, when blocking operations are required. However, there
+ * are some cases where this operation should be performed eagerly, such
+ * as cleaning up old references to a ClassLoader in a multi-classloader
+ * environment.
+ *
+ * Note: this method will acquire locks, one at a time, across all segments
+ * of this table, so if it is to be used, it should be used sparingly.
+ */
+ public void purgeStaleEntries() {
+ for (int i = 0; i < segments.length; ++i)
+ segments[i].removeStale();
+ }
+
+
+ /**
+ * Returns a {@link Set} view of the keys contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. The set supports element
+ * removal, which removes the corresponding mapping from this map,
+ * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
+ * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
+ * operations. It does not support the <tt>add</tt> or
+ * <tt>addAll</tt> operations.
+ *
+ * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ */
+ public Set<K> keySet() {
+ Set<K> ks = keySet;
+ return (ks != null) ? ks : (keySet = new KeySet());
+ }
+
+ /**
+ * Returns a {@link Collection} view of the values contained in this map.
+ * The collection is backed by the map, so changes to the map are
+ * reflected in the collection, and vice-versa. The collection
+ * supports element removal, which removes the corresponding
+ * mapping from this map, via the <tt>Iterator.remove</tt>,
+ * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
+ * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not
+ * support the <tt>add</tt> or <tt>addAll</tt> operations.
+ *
+ * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ */
+ public Collection<V> values() {
+ Collection<V> vs = values;
+ return (vs != null) ? vs : (values = new Values());
+ }
+
+ /**
+ * Returns a {@link Set} view of the mappings contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. The set supports element
+ * removal, which removes the corresponding mapping from the map,
+ * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
+ * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
+ * operations. It does not support the <tt>add</tt> or
+ * <tt>addAll</tt> operations.
+ *
+ * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ */
+ public Set<Map.Entry<K,V>> entrySet() {
+ Set<Map.Entry<K,V>> es = entrySet;
+ return (es != null) ? es : (entrySet = new EntrySet());
+ }
+
+ /**
+ * Returns an enumeration of the keys in this table.
+ *
+ * @return an enumeration of the keys in this table
+ * @see #keySet()
+ */
+ public Enumeration<K> keys() {
+ return new KeyIterator();
+ }
+
+ /**
+ * Returns an enumeration of the values in this table.
+ *
+ * @return an enumeration of the values in this table
+ * @see #values()
+ */
+ public Enumeration<V> elements() {
+ return new ValueIterator();
+ }
+
+ /* ---------------- Iterator Support -------------- */
+
+ abstract class HashIterator {
+ int nextSegmentIndex;
+ int nextTableIndex;
+ HashEntry<K,V>[] currentTable;
+ HashEntry<K, V> nextEntry;
+ HashEntry<K, V> lastReturned;
+ K currentKey; // Strong reference to weak key (prevents gc)
+
+ HashIterator() {
+ nextSegmentIndex = segments.length - 1;
+ nextTableIndex = -1;
+ advance();
+ }
+
+ public boolean hasMoreElements() { return hasNext(); }
+
+ final void advance() {
+ if (nextEntry != null && (nextEntry = nextEntry.next) != null)
+ return;
+
+ while (nextTableIndex >= 0) {
+ if ( (nextEntry = currentTable[nextTableIndex--]) != null)
+ return;
+ }
+
+ while (nextSegmentIndex >= 0) {
+ Segment<K,V> seg = segments[nextSegmentIndex--];
+ if (seg.count != 0) {
+ currentTable = seg.table;
+ for (int j = currentTable.length - 1; j >= 0; --j) {
+ if ( (nextEntry = currentTable[j]) != null) {
+ nextTableIndex = j - 1;
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ public boolean hasNext() {
+ while (nextEntry != null) {
+ if (nextEntry.key() != null)
+ return true;
+ advance();
+ }
+
+ return false;
+ }
+
+ HashEntry<K,V> nextEntry() {
+ do {
+ if (nextEntry == null)
+ throw new NoSuchElementException();
+
+ lastReturned = nextEntry;
+ currentKey = lastReturned.key();
+ advance();
+ } while (currentKey == null); // Skip GC'd keys
+
+ return lastReturned;
+ }
+
+ public void remove() {
+ if (lastReturned == null)
+ throw new IllegalStateException();
+ ConcurrentReferenceHashMap.this.remove(currentKey);
+ lastReturned = null;
+ }
+ }
+
+ final class KeyIterator
+ extends HashIterator
+ implements Iterator<K>, Enumeration<K>
+ {
+ public K next() { return super.nextEntry().key(); }
+ public K nextElement() { return super.nextEntry().key(); }
+ }
+
+ final class ValueIterator
+ extends HashIterator
+ implements Iterator<V>, Enumeration<V>
+ {
+ public V next() { return super.nextEntry().value(); }
+ public V nextElement() { return super.nextEntry().value(); }
+ }
+
+ /*
+ * This class is needed for JDK5 compatibility.
+ */
+ static class SimpleEntry<K, V> implements Entry<K, V>,
+ java.io.Serializable {
+ private static final long serialVersionUID = -8499721149061103585L;
+
+ private final K key;
+ private V value;
+
+ public SimpleEntry(K key, V value) {
+ this.key = key;
+ this.value = value;
+ }
+
+ public SimpleEntry(Entry<? extends K, ? extends V> entry) {
+ this.key = entry.getKey();
+ this.value = entry.getValue();
+ }
+
+ public K getKey() {
+ return key;
+ }
+
+ public V getValue() {
+ return value;
+ }
+
+ public V setValue(V value) {
+ V oldValue = this.value;
+ this.value = value;
+ return oldValue;
+ }
+
+ public boolean equals(Object o) {
+ if (!(o instanceof Map.Entry))
+ return false;
+ @SuppressWarnings("unchecked")
+ Map.Entry e = (Map.Entry) o;
+ return eq(key, e.getKey()) && eq(value, e.getValue());
+ }
+
+ public int hashCode() {
+ return (key == null ? 0 : key.hashCode())
+ ^ (value == null ? 0 : value.hashCode());
+ }
+
+ public String toString() {
+ return key + "=" + value;
+ }
+
+ private static boolean eq(Object o1, Object o2) {
+ return o1 == null ? o2 == null : o1.equals(o2);
+ }
+ }
+
+
+ /**
+ * Custom Entry class used by EntryIterator.next(), that relays setValue
+ * changes to the underlying map.
+ */
+ final class WriteThroughEntry extends SimpleEntry<K,V>
+ {
+ private static final long serialVersionUID = -7900634345345313646L;
+
+ WriteThroughEntry(K k, V v) {
+ super(k,v);
+ }
+
+ /**
+ * Set our entry's value and write through to the map. The
+ * value to return is somewhat arbitrary here. Since a
+ * WriteThroughEntry does not necessarily track asynchronous
+ * changes, the most recent "previous" value could be
+ * different from what we return (or could even have been
+ * removed in which case the put will re-establish). We do not
+ * and cannot guarantee more.
+ */
+ public V setValue(V value) {
+ if (value == null) throw new NullPointerException();
+ V v = super.setValue(value);
+ ConcurrentReferenceHashMap.this.put(getKey(), value);
+ return v;
+ }
+ }
+
+ final class EntryIterator
+ extends HashIterator
+ implements Iterator<Entry<K,V>>
+ {
+ public Map.Entry<K,V> next() {
+ HashEntry<K,V> e = super.nextEntry();
+ return new WriteThroughEntry(e.key(), e.value());
+ }
+ }
+
+ final class KeySet extends AbstractSet<K> {
+ public Iterator<K> iterator() {
+ return new KeyIterator();
+ }
+ public int size() {
+ return ConcurrentReferenceHashMap.this.size();
+ }
+ public boolean isEmpty() {
+ return ConcurrentReferenceHashMap.this.isEmpty();
+ }
+ public boolean contains(Object o) {
+ return ConcurrentReferenceHashMap.this.containsKey(o);
+ }
+ public boolean remove(Object o) {
+ return ConcurrentReferenceHashMap.this.remove(o) != null;
+ }
+ public void clear() {
+ ConcurrentReferenceHashMap.this.clear();
+ }
+ }
+
+ final class Values extends AbstractCollection<V> {
+ public Iterator<V> iterator() {
+ return new ValueIterator();
+ }
+ public int size() {
+ return ConcurrentReferenceHashMap.this.size();
+ }
+ public boolean isEmpty() {
+ return ConcurrentReferenceHashMap.this.isEmpty();
+ }
+ public boolean contains(Object o) {
+ return ConcurrentReferenceHashMap.this.containsValue(o);
+ }
+ public void clear() {
+ ConcurrentReferenceHashMap.this.clear();
+ }
+ }
+
+ final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
+ public Iterator<Map.Entry<K,V>> iterator() {
+ return new EntryIterator();
+ }
+ public boolean contains(Object o) {
+ if (!(o instanceof Map.Entry))
+ return false;
+ Map.Entry<?,?> e = (Map.Entry<?,?>)o;
+ V v = ConcurrentReferenceHashMap.this.get(e.getKey());
+ return v != null && v.equals(e.getValue());
+ }
+ public boolean remove(Object o) {
+ if (!(o instanceof Map.Entry))
+ return false;
+ Map.Entry<?,?> e = (Map.Entry<?,?>)o;
+ return ConcurrentReferenceHashMap.this.remove(e.getKey(), e.getValue());
+ }
+ public int size() {
+ return ConcurrentReferenceHashMap.this.size();
+ }
+ public boolean isEmpty() {
+ return ConcurrentReferenceHashMap.this.isEmpty();
+ }
+ public void clear() {
+ ConcurrentReferenceHashMap.this.clear();
+ }
+ }
+
+ /* ---------------- Serialization Support -------------- */
+
+ /**
+ * Save the state of the <tt>ConcurrentReferenceHashMap</tt> instance to a
+ * stream (i.e., serialize it).
+ * @param s the stream
+ * @serialData
+ * the key (Object) and value (Object)
+ * for each key-value mapping, followed by a null pair.
+ * The key-value mappings are emitted in no particular order.
+ */
+ private void writeObject(java.io.ObjectOutputStream s) throws IOException {
+ s.defaultWriteObject();
+
+ for (int k = 0; k < segments.length; ++k) {
+ Segment<K,V> seg = segments[k];
+ seg.lock();
+ try {
+ HashEntry<K,V>[] tab = seg.table;
+ for (int i = 0; i < tab.length; ++i) {
+ for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
+ K key = e.key();
+ if (key == null) // Skip GC'd keys
+ continue;
+
+ s.writeObject(key);
+ s.writeObject(e.value());
+ }
+ }
+ } finally {
+ seg.unlock();
+ }
+ }
+ s.writeObject(null);
+ s.writeObject(null);
+ }
+
+ /**
+ * Reconstitute the <tt>ConcurrentReferenceHashMap</tt> instance from a
+ * stream (i.e., deserialize it).
+ * @param s the stream
+ */
+ @SuppressWarnings("unchecked")
+ private void readObject(java.io.ObjectInputStream s)
+ throws IOException, ClassNotFoundException {
+ s.defaultReadObject();
+
+ // Initialize each segment to be minimally sized, and let grow.
+ for (int i = 0; i < segments.length; ++i) {
+ segments[i].setTable(new HashEntry[1]);
+ }
+
+ // Read the keys and values, and put the mappings in the table
+ for (;;) {
+ K key = (K) s.readObject();
+ V value = (V) s.readObject();
+ if (key == null)
+ break;
+ put(key, value);
+ }
+ }
+}
Modified: pojo/trunk/src/test/java/org/jboss/cache/pojo/TxUndoTest.java
===================================================================
--- pojo/trunk/src/test/java/org/jboss/cache/pojo/TxUndoTest.java 2008-06-06 14:52:03 UTC (rev 5962)
+++ pojo/trunk/src/test/java/org/jboss/cache/pojo/TxUndoTest.java 2008-06-07 02:29:28 UTC (rev 5963)
@@ -9,7 +9,12 @@
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
+import static org.testng.AssertJUnit.assertSame;
+import static org.testng.AssertJUnit.assertNull;
+import java.util.ArrayList;
+import java.util.List;
+
import javax.transaction.TransactionManager;
import org.apache.commons.logging.Log;
@@ -17,6 +22,7 @@
import org.jboss.aop.Advised;
import org.jboss.aop.advice.Interceptor;
import org.jboss.cache.pojo.interceptors.dynamic.CacheFieldInterceptor;
+import org.jboss.cache.pojo.test.ArrayObject;
import org.jboss.cache.pojo.test.Person;
import org.jboss.cache.transaction.DummyTransactionManager;
import org.testng.annotations.AfterMethod;
@@ -119,7 +125,47 @@
assertFalse("Should not have cache interceptor ", hasCacheInterceptor(test));
}
+ public void testCollectionRollback() throws Exception
+ {
+ Person test = new Person();
+ test.setName("Ben");
+ List<String> languages = new ArrayList<String>();
+ languages.add("English");
+ languages.add("French");
+ test.setLanguages(languages);
+ cache_.attach("/a", test);
+ tx_mgr.begin();
+ cache_.detach("/a");
+ tx_mgr.rollback();
+ languages = test.getLanguages();
+ tx_mgr.begin();
+ languages.add("temp");
+ tx_mgr.rollback();
+ assertFalse("Languages is not attached", languages.contains("temp"));
+ assertTrue(languages.contains("English"));
+ assertTrue(languages.contains("French"));
+ }
+ public void testArrayRollback() throws Exception
+ {
+ ArrayObject test = new ArrayObject();
+ ArrayObject.Person bob = new ArrayObject.Person("bob");
+ ArrayObject.Person david = new ArrayObject.Person("david");
+ test.setPerson(0, bob);
+ test.setPerson(1, david);
+ cache_.attach("/a", test);
+ tx_mgr.begin();
+ cache_.detach("/a");
+ tx_mgr.rollback();
+
+ tx_mgr.begin();
+ test.setPerson(2, bob);
+ assertSame(bob, test.getPerson(2));
+ tx_mgr.rollback();
+ assertNull("Array is not attached", test.getPerson(2));
+ assertSame(bob, test.getPerson(0));
+ assertSame(david, test.getPerson(1));
+ }
}
Modified: pojo/trunk/src/test/java/org/jboss/cache/pojo/test/ArrayObject.java
===================================================================
--- pojo/trunk/src/test/java/org/jboss/cache/pojo/test/ArrayObject.java 2008-06-06 14:52:03 UTC (rev 5962)
+++ pojo/trunk/src/test/java/org/jboss/cache/pojo/test/ArrayObject.java 2008-06-07 02:29:28 UTC (rev 5963)
@@ -23,6 +23,13 @@
private String name;
private int age;
+ public Person() {
+ }
+
+ public Person(String name) {
+ this.name = name;
+ }
+
public String getName()
{
return name;
16 years, 6 months
JBoss Cache SVN: r5962 - searchable/trunk/src/main/java/org/jboss/cache/search.
by jbosscache-commits@lists.jboss.org
Author: navssurtani
Date: 2008-06-06 10:52:03 -0400 (Fri, 06 Jun 2008)
New Revision: 5962
Modified:
searchable/trunk/src/main/java/org/jboss/cache/search/SearchableListener.java
Log:
Edited SearchableListener
Modified: searchable/trunk/src/main/java/org/jboss/cache/search/SearchableListener.java
===================================================================
--- searchable/trunk/src/main/java/org/jboss/cache/search/SearchableListener.java 2008-06-06 08:53:21 UTC (rev 5961)
+++ searchable/trunk/src/main/java/org/jboss/cache/search/SearchableListener.java 2008-06-06 14:52:03 UTC (rev 5962)
@@ -53,4 +53,23 @@
}
+
+ void handleDeleteData(NodeModifiedEvent event)
+ {
+ Map dataMap = event.getData();
+
+ for(Object key: dataMap.keySet())
+ {
+ String keyString = (String) key;
+ String docId = Transformer.generateId(event.getFqn(), keyString);
+
+ new Work (dataMap.get(key), docId, WorkType.DELETE);
+
+ //TODO: Add to queue.
+
+ }
+
+
+ }
+
}
16 years, 6 months
JBoss Cache SVN: r5961 - core/trunk/src/test/java/org/jboss/cache/api.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-06-06 04:53:21 -0400 (Fri, 06 Jun 2008)
New Revision: 5961
Modified:
core/trunk/src/test/java/org/jboss/cache/api/CacheAPITest.java
core/trunk/src/test/java/org/jboss/cache/api/DestroyedCacheAPITest.java
Log:
Patched tests
Modified: core/trunk/src/test/java/org/jboss/cache/api/CacheAPITest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/CacheAPITest.java 2008-06-05 10:53:15 UTC (rev 5960)
+++ core/trunk/src/test/java/org/jboss/cache/api/CacheAPITest.java 2008-06-06 08:53:21 UTC (rev 5961)
@@ -113,43 +113,6 @@
}
/**
- * Test that fqn-specific application of cache listeners has not been implemented and will not be implemented
- * in 2.0.0. It is a feature for 2.1.0 but the interface needed to be in place now.
- */
- public void testFqnBasedCacheListeners()
- {
- try
- {
- cache.getCacheListeners(Fqn.ROOT);
- fail("Fqn-based cache listener operation should throw an exception");
- }
- catch (Exception e)
- {
- // expected
- }
-
- try
- {
- cache.addCacheListener(Fqn.ROOT, new Listener());
- fail("Fqn-based cache listener operation should throw an exception");
- }
- catch (Exception e)
- {
- // expected
- }
-
- try
- {
- cache.removeCacheListener(Fqn.ROOT, new Listener());
- fail("Fqn-based cache listener operation should throw an exception");
- }
- catch (Exception e)
- {
- // expected
- }
- }
-
- /**
* All cache operations should happen on a {@link Node} - I.e., you look up a {@link Node} and perform data operations
* on this {@link Node}. For convenience and familiarity with JBoss Cache 1.x, we provide some helpers in {@link Cache}
* which dives you direct data access to nodes.
Modified: core/trunk/src/test/java/org/jboss/cache/api/DestroyedCacheAPITest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/DestroyedCacheAPITest.java 2008-06-05 10:53:15 UTC (rev 5960)
+++ core/trunk/src/test/java/org/jboss/cache/api/DestroyedCacheAPITest.java 2008-06-06 08:53:21 UTC (rev 5961)
@@ -34,7 +34,7 @@
private Fqn parent = Fqn.fromString("/test/fqn");
private Fqn child = Fqn.fromString("/test/fqn/child");
private String version;
- private Node<String,String> root;
+ private Node<String, String> root;
@BeforeMethod(alwaysRun = true)
public void setUp() throws Exception
@@ -109,43 +109,6 @@
}
/**
- * Test that fqn-specific application of cache listeners has not been implemented and will not be implemented
- * in 2.0.0. It is a feature for 2.1.0 but the interface needed to be in place now.
- */
- public void testFqnBasedCacheListeners()
- {
- try
- {
- cache.getCacheListeners(Fqn.ROOT);
- fail("Fqn-based cache listener operation should throw an exception");
- }
- catch (Exception e)
- {
- // expected
- }
-
- try
- {
- cache.addCacheListener(Fqn.ROOT, new Listener());
- fail("Fqn-based cache listener operation should throw an exception");
- }
- catch (Exception e)
- {
- // expected
- }
-
- try
- {
- cache.removeCacheListener(Fqn.ROOT, new Listener());
- fail("Fqn-based cache listener operation should throw an exception");
- }
- catch (Exception e)
- {
- // expected
- }
- }
-
- /**
* Tests the basic gets, puts. Expectation is all will throw an
* ISE.
* <p/>
16 years, 7 months
JBoss Cache SVN: r5960 - core/trunk/src/main/java/org/jboss/cache/loader.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-06-05 06:53:15 -0400 (Thu, 05 Jun 2008)
New Revision: 5960
Modified:
core/trunk/src/main/java/org/jboss/cache/loader/AbstractCacheLoader.java
Log:
Added some logging
Modified: core/trunk/src/main/java/org/jboss/cache/loader/AbstractCacheLoader.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/loader/AbstractCacheLoader.java 2008-06-05 10:39:56 UTC (rev 5959)
+++ core/trunk/src/main/java/org/jboss/cache/loader/AbstractCacheLoader.java 2008-06-05 10:53:15 UTC (rev 5960)
@@ -211,6 +211,8 @@
if (r != null)
{
+ if (trace)
+ log.trace("Using region " + r.getFqn() + ", which has registered class loader " + r.getClassLoader() + " as a context class loader.");
// set the region's class loader as the thread's context classloader
needToResetLoader = true;
current = Thread.currentThread();
16 years, 7 months
JBoss Cache SVN: r5959 - in core/trunk/src/main/java/org/jboss/cache: invocation and 1 other directory.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-06-05 06:39:56 -0400 (Thu, 05 Jun 2008)
New Revision: 5959
Modified:
core/trunk/src/main/java/org/jboss/cache/Cache.java
core/trunk/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java
Log:
* Updated Javadocs
* Removed cache listener manipulation methods that worked on specific regions as this was way too inefficient to implement properly.
Modified: core/trunk/src/main/java/org/jboss/cache/Cache.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/Cache.java 2008-06-05 10:34:51 UTC (rev 5958)
+++ core/trunk/src/main/java/org/jboss/cache/Cache.java 2008-06-05 10:39:56 UTC (rev 5959)
@@ -103,15 +103,6 @@
void addCacheListener(Object listener);
/**
- * Adds a {@link org.jboss.cache.notifications.annotation.CacheListener}-annotated object to a given region. The object passed in needs to be properly annotated with the
- * {@link org.jboss.cache.notifications.annotation.CacheListener} annotation otherwise an {@link org.jboss.cache.notifications.IncorrectCacheListenerException} will be thrown.
- *
- * @param region region to add listener to
- * @param listener listener to add
- */
- void addCacheListener(Fqn<?> region, Object listener);
-
- /**
* Removes a {@link org.jboss.cache.notifications.annotation.CacheListener}-annotated object from the cache. The object passed in needs to be properly annotated with the
* {@link org.jboss.cache.notifications.annotation.CacheListener} annotation otherwise an {@link org.jboss.cache.notifications.IncorrectCacheListenerException} will be thrown.
*
@@ -120,15 +111,6 @@
void removeCacheListener(Object listener);
/**
- * Removes a {@link org.jboss.cache.notifications.annotation.CacheListener}-annotated object from a given region. The object passed in needs to be properly annotated with the
- * {@link org.jboss.cache.notifications.annotation.CacheListener} annotation otherwise an {@link org.jboss.cache.notifications.IncorrectCacheListenerException} will be thrown.
- *
- * @param region region from which to remove listener
- * @param listener listener to remove
- */
- void removeCacheListener(Fqn<?> region, Object listener);
-
- /**
* Retrieves an immutable {@link List} of objects annotated as {@link org.jboss.cache.notifications.annotation.CacheListener}s attached to the cache.
*
* @return an immutable {@link List} of objects annotated as {@link org.jboss.cache.notifications.annotation.CacheListener}s attached to the cache.
@@ -136,13 +118,6 @@
Set<Object> getCacheListeners();
/**
- * Retrieves an immutable {@link List} of objects annotated as {@link org.jboss.cache.notifications.annotation.CacheListener}s attached to a specific region.
- *
- * @return an immutable {@link List} of objects annotated as {@link org.jboss.cache.notifications.annotation.CacheListener}s attached to a specific region.
- */
- Set<Object> getCacheListeners(Fqn<?> region);
-
- /**
* Associates the specified value with the specified key for a {@link Node} in this cache.
* If the {@link Node} previously contained a mapping for this key, the old value is replaced by the specified value.
*
@@ -151,7 +126,7 @@
* @param value value to be associated with the specified key.
* @return previous value associated with specified key, or <code>null</code> if there was no mapping for key.
* A <code>null</code> return can also indicate that the Node previously associated <code>null</code> with the specified key, if the implementation supports null values.
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @throws IllegalStateException if the cache is not in a started state.
*/
V put(Fqn<?> fqn, K key, V value);
@@ -163,7 +138,7 @@
* @param value value to be associated with the specified key.
* @return previous value associated with specified key, or <code>null</code> if there was no mapping for key.
* A <code>null</code> return can also indicate that the Node previously associated <code>null</code> with the specified key, if the implementation supports null values.
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @throws IllegalStateException if the cache is not in a started state
*/
V put(String fqn, K key, V value);
@@ -201,14 +176,16 @@
*
* @param fqn <b><i>absolute</i></b> {@link Fqn} to the {@link Node} to copy the data to
* @param data mappings to copy
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @throws IllegalStateException if the cache is not in a started state
*/
void put(Fqn<?> fqn, Map<K, V> data);
/**
* Convenience method that takes a string representation of an Fqn. Otherwise identical to {@link #put(Fqn, java.util.Map)}
*
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @param fqn String representation of the Fqn
+ * @param data data map to insert
+ * @throws IllegalStateException if the cache is not in a started state
*/
void put(String fqn, Map<K, V> data);
@@ -220,14 +197,17 @@
* @param fqn <b><i>absolute</i></b> {@link Fqn} to the {@link Node} to be accessed.
* @param key key whose mapping is to be removed from the Node
* @return previous value associated with specified Node's key
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @throws IllegalStateException if the cache is not in a started state
*/
V remove(Fqn<?> fqn, K key);
/**
* Convenience method that takes a string representation of an Fqn. Otherwise identical to {@link #remove(Fqn, Object)}
*
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @param fqn string representation of the Fqn to retrieve
+ * @param key key to remove
+ * @return old value removed, or null if the fqn does not exist
+ * @throws IllegalStateException if the cache is not in a started state
*/
V remove(String fqn, K key);
@@ -236,14 +216,16 @@
*
* @param fqn {@link Node} to remove
* @return true if the node was removed, false if the node was not found
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @throws IllegalStateException if the cache is not in a started state
*/
boolean removeNode(Fqn<?> fqn);
/**
* Convenience method that takes a string representation of an Fqn. Otherwise identical to {@link #removeNode(Fqn)}
*
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @param fqn string representation of the Fqn to retrieve
+ * @return true if the node was found and removed, false otherwise
+ * @throws IllegalStateException if the cache is not in a started state
*/
boolean removeNode(String fqn);
@@ -252,14 +234,16 @@
*
* @param fqn fqn of the node to retrieve
* @return a Node object, or a null if the node does not exist.
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @throws IllegalStateException if the cache is not in a started state
*/
Node<K, V> getNode(Fqn<?> fqn);
/**
* Convenience method that takes a string representation of an Fqn. Otherwise identical to {@link #getNode(Fqn)}
*
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @param fqn string representation of the Fqn to retrieve
+ * @return node, or null if the node does not exist
+ * @throws IllegalStateException if the cache is not in a started state
*/
Node<K, V> getNode(String fqn);
@@ -270,14 +254,17 @@
* @param fqn <b><i>absolute</i></b> {@link Fqn} to the {@link Node} to be accessed.
* @param key key under which value is to be retrieved.
* @return returns data held under specified key in {@link Node} denoted by specified Fqn.
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @throws IllegalStateException if the cache is not in a started state
*/
V get(Fqn<?> fqn, K key);
/**
* Convenience method that takes a string representation of an Fqn. Otherwise identical to {@link #get(Fqn, Object)}
*
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @param fqn string representation of the Fqn to retrieve
+ * @param key key to fetch
+ * @return value, or null if the fqn does not exist.
+ * @throws IllegalStateException if the cache is not in a started state
*/
V get(String fqn, K key);
@@ -286,7 +273,7 @@
*
* @param fqn <b><i>absolute</i></b> {@link Fqn} to the {@link Node} to be evicted.
* @param recursive evicts children as well
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @throws IllegalStateException if the cache is not in a started state
*/
void evict(Fqn<?> fqn, boolean recursive);
@@ -294,7 +281,7 @@
* Eviction call that evicts the specified {@link Node} from memory. Not recursive.
*
* @param fqn <b><i>absolute</i></b> {@link Fqn} to the {@link Node} to be evicted.
- * @throws IllegalStateException if {@link #getCacheStatus()} would not return {@link CacheStatus.STARTED}.
+ * @throws IllegalStateException if the cache is not in a started state
*/
void evict(Fqn<?> fqn);
Modified: core/trunk/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java 2008-06-05 10:34:51 UTC (rev 5958)
+++ core/trunk/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java 2008-06-05 10:39:56 UTC (rev 5959)
@@ -474,21 +474,6 @@
return put(Fqn.fromString(fqn), key, value);
}
- public Set<Object> getCacheListeners(Fqn region)
- {
- throw new UnsupportedOperationException("Not implemented in this release");
- }
-
- public void removeCacheListener(Fqn region, Object listener)
- {
- throw new UnsupportedOperationException("Not implemented in this release");
- }
-
- public void addCacheListener(Fqn region, Object listener)
- {
- throw new UnsupportedOperationException("Not implemented in this release");
- }
-
/**
* Retrieves a defensively copied data map of the underlying node.
*
16 years, 7 months
JBoss Cache SVN: r5958 - in core/trunk/src: test/java/org/jboss/cache and 1 other directory.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-06-05 06:34:51 -0400 (Thu, 05 Jun 2008)
New Revision: 5958
Modified:
core/trunk/src/main/java/org/jboss/cache/DataContainerImpl.java
core/trunk/src/test/java/org/jboss/cache/DataContainerTest.java
Log:
JBCACHE-1361: NPE when doing a recursive evict
Modified: core/trunk/src/main/java/org/jboss/cache/DataContainerImpl.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/DataContainerImpl.java 2008-06-05 10:07:17 UTC (rev 5957)
+++ core/trunk/src/main/java/org/jboss/cache/DataContainerImpl.java 2008-06-05 10:34:51 UTC (rev 5958)
@@ -239,7 +239,7 @@
NodeSPI node = peek(fqn, false);
if (recursive)
{
- recursiveAddEvictionNodes(node, result);
+ if (node != null) recursiveAddEvictionNodes(node, result);
}
else
{
@@ -263,14 +263,14 @@
return result;
}
- private void recursiveAddEvictionNodes(NodeSPI node, List<Fqn> result)
+ private void recursiveAddEvictionNodes(NodeSPI<?, ?> node, List<Fqn> result)
{
- for (NodeSPI child : (Set<NodeSPI>) node.getChildrenDirect())
+ for (NodeSPI<?, ?> child : node.getChildrenDirect())
{
recursiveAddEvictionNodes(child, result);
}
Fqn fqn = node.getFqn();
- if (node != null && !fqn.isRoot() && !node.isResident())
+ if (!fqn.isRoot() && !node.isResident())
{
result.add(fqn);
}
@@ -289,6 +289,9 @@
/**
* Returns a debug string with optional details of contents.
+ *
+ * @param details if true, details are printed
+ * @return detailed contents of the container
*/
@SuppressWarnings("deprecation")
public String toString(boolean details)
@@ -358,6 +361,8 @@
* Prints information about the contents of the nodes in the cache's current
* in-memory state. Does not load any previously evicted nodes from a
* cache loader, so evicted nodes will not be included.
+ *
+ * @return details
*/
public String printDetails()
{
@@ -370,6 +375,8 @@
/**
* Returns lock information.
+ *
+ * @return lock info
*/
public String printLockInfo()
{
@@ -397,6 +404,8 @@
* the cache. Since this method doesn't acquire any locks, the number might
* be incorrect, or the method might even throw a
* ConcurrentModificationException
+ *
+ * @return number of attribs
*/
public int getNumberOfAttributes()
{
Modified: core/trunk/src/test/java/org/jboss/cache/DataContainerTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/DataContainerTest.java 2008-06-05 10:07:17 UTC (rev 5957)
+++ core/trunk/src/test/java/org/jboss/cache/DataContainerTest.java 2008-06-05 10:34:51 UTC (rev 5958)
@@ -203,6 +203,23 @@
}
/**
+ * tests {@link DataContainerImpl#getNodesForEviction(Fqn, boolean)} in a recursive scenario.
+ */
+ public void testGetNodesForEvictionRecursiveNullNodes()
+ {
+ container.removeFromDataStructure(nodes.ad, true);
+ //check for root first
+ List<Fqn> result = container.getNodesForEviction(Fqn.ROOT, true);
+ assert result.size() == 3 : "all children are considered for eviction";
+
+ //check normal
+ // this node does not exist!! Should NOT throw a NPE.
+ result = container.getNodesForEviction(nodes.ad, true);
+ assert result.isEmpty() : "Should be empty";
+ }
+
+
+ /**
* tests {@link DataContainerImpl#getNumberOfNodes()}
*/
public void testGetNumberOfNodes()
16 years, 7 months
JBoss Cache SVN: r5957 - demos/core-demo-gui/trunk/src/main/java/org/jboss/cache/demo.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-06-05 06:07:17 -0400 (Thu, 05 Jun 2008)
New Revision: 5957
Modified:
demos/core-demo-gui/trunk/src/main/java/org/jboss/cache/demo/JBossCacheDemo.java
Log:
Removed use of internal APIs
Modified: demos/core-demo-gui/trunk/src/main/java/org/jboss/cache/demo/JBossCacheDemo.java
===================================================================
--- demos/core-demo-gui/trunk/src/main/java/org/jboss/cache/demo/JBossCacheDemo.java 2008-06-04 18:11:48 UTC (rev 5956)
+++ demos/core-demo-gui/trunk/src/main/java/org/jboss/cache/demo/JBossCacheDemo.java 2008-06-05 10:07:17 UTC (rev 5957)
@@ -9,7 +9,6 @@
import org.jboss.cache.DefaultCacheFactory;
import org.jboss.cache.Fqn;
import org.jboss.cache.Node;
-import org.jboss.cache.NodeSPI;
import org.jboss.cache.notifications.annotation.BuddyGroupChanged;
import org.jboss.cache.notifications.annotation.NodeCreated;
import org.jboss.cache.notifications.annotation.NodeModified;
@@ -636,26 +635,23 @@
{
public void run()
{
-
// get all kids and add to the tree as well.
- CacheSPI<String, String> spi = (CacheSPI) cache;
- recursivelyAddNode(spi.peek(fqn, false));
+ recursivelyAddNode(cache.getNode(fqn));
// only refresh if there are no more tasks queued up
if (asyncTaskQueue.isEmpty()) treeRefresher.repaint();
}
- private void recursivelyAddNode(NodeSPI<String, String> n)
+ private void recursivelyAddNode(Node<String, String> n)
{
treeRefresher.addNode(n.getFqn());
- for (Node<String, String> child : n.getChildrenDirect())
- recursivelyAddNode((NodeSPI<String, String>) child);
+ for (Node<String, String> child : n.getChildren())
+ recursivelyAddNode(child);
}
});
}
}
-
@NodeRemoved
public void nodeRemoved(NodeRemovedEvent e)
{
16 years, 7 months
JBoss Cache SVN: r5956 - in demos/core-demo-gui/trunk: src/main/java/org/jboss/cache/demo and 1 other directory.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-06-04 14:11:48 -0400 (Wed, 04 Jun 2008)
New Revision: 5956
Modified:
demos/core-demo-gui/trunk/pom.xml
demos/core-demo-gui/trunk/src/main/java/org/jboss/cache/demo/JBossCacheDemo.java
Log:
Updated to JBC 2.2.0.CR2
Updated to deal with refreshing display for DEAD backup subtrees
Modified: demos/core-demo-gui/trunk/pom.xml
===================================================================
--- demos/core-demo-gui/trunk/pom.xml 2008-06-04 17:10:42 UTC (rev 5955)
+++ demos/core-demo-gui/trunk/pom.xml 2008-06-04 18:11:48 UTC (rev 5956)
@@ -18,7 +18,7 @@
<dependency>
<groupId>org.jboss.cache</groupId>
<artifactId>jbosscache-core</artifactId>
- <version>2.1.0.GA</version>
+ <version>2.2.0.CR2</version>
</dependency>
<dependency>
<groupId>jgoodies</groupId>
Modified: demos/core-demo-gui/trunk/src/main/java/org/jboss/cache/demo/JBossCacheDemo.java
===================================================================
--- demos/core-demo-gui/trunk/src/main/java/org/jboss/cache/demo/JBossCacheDemo.java 2008-06-04 17:10:42 UTC (rev 5955)
+++ demos/core-demo-gui/trunk/src/main/java/org/jboss/cache/demo/JBossCacheDemo.java 2008-06-04 18:11:48 UTC (rev 5956)
@@ -9,17 +9,20 @@
import org.jboss.cache.DefaultCacheFactory;
import org.jboss.cache.Fqn;
import org.jboss.cache.Node;
-import org.jboss.cache.util.CachePrinter;
+import org.jboss.cache.NodeSPI;
import org.jboss.cache.notifications.annotation.BuddyGroupChanged;
import org.jboss.cache.notifications.annotation.NodeCreated;
import org.jboss.cache.notifications.annotation.NodeModified;
+import org.jboss.cache.notifications.annotation.NodeMoved;
import org.jboss.cache.notifications.annotation.NodeRemoved;
import org.jboss.cache.notifications.annotation.ViewChanged;
import org.jboss.cache.notifications.event.BuddyGroupChangedEvent;
import org.jboss.cache.notifications.event.NodeCreatedEvent;
import org.jboss.cache.notifications.event.NodeModifiedEvent;
+import org.jboss.cache.notifications.event.NodeMovedEvent;
import org.jboss.cache.notifications.event.NodeRemovedEvent;
import org.jboss.cache.notifications.event.ViewChangedEvent;
+import org.jboss.cache.util.CachePrinter;
import org.jgroups.Address;
import javax.swing.*;
@@ -49,9 +52,6 @@
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.AbstractExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.LinkedBlockingQueue;
/**
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik(a)jboss.org</a>)
@@ -592,6 +592,7 @@
public void buddyGroupChanged(BuddyGroupChangedEvent e)
{
clusterDataModel.setBuddies();
+ treeRefresher.repaint();
}
@NodeModified
@@ -625,6 +626,36 @@
}
}
+ @NodeMoved
+ public void nodeMoved(NodeMovedEvent e)
+ {
+ if (!e.isPre())
+ {
+ final Fqn fqn = e.getTargetFqn();
+ asyncExecutor.execute(new Runnable()
+ {
+ public void run()
+ {
+
+ // get all kids and add to the tree as well.
+ CacheSPI<String, String> spi = (CacheSPI) cache;
+ recursivelyAddNode(spi.peek(fqn, false));
+ // only refresh if there are no more tasks queued up
+ if (asyncTaskQueue.isEmpty()) treeRefresher.repaint();
+ }
+
+ private void recursivelyAddNode(NodeSPI<String, String> n)
+ {
+ treeRefresher.addNode(n.getFqn());
+ for (Node<String, String> child : n.getChildrenDirect())
+ recursivelyAddNode((NodeSPI<String, String>) child);
+ }
+
+ });
+ }
+ }
+
+
@NodeRemoved
public void nodeRemoved(NodeRemovedEvent e)
{
16 years, 7 months