JBoss Cache SVN: r7736 - core/branches/flat/src/main/java/org/horizon/remoting.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2009-02-19 08:48:03 -0500 (Thu, 19 Feb 2009)
New Revision: 7736
Modified:
core/branches/flat/src/main/java/org/horizon/remoting/ReplicationQueue.java
Log:
Removed unused field
Modified: core/branches/flat/src/main/java/org/horizon/remoting/ReplicationQueue.java
===================================================================
--- core/branches/flat/src/main/java/org/horizon/remoting/ReplicationQueue.java 2009-02-19 13:40:55 UTC (rev 7735)
+++ core/branches/flat/src/main/java/org/horizon/remoting/ReplicationQueue.java 2009-02-19 13:48:03 UTC (rev 7736)
@@ -89,9 +89,7 @@
@Start
public synchronized void start() {
long interval = configuration.getReplQueueInterval();
- if (log.isTraceEnabled()) {
- log.trace("Starting replication queue, with interval=" + interval +", and maxElements=" + maxElements);
- }
+ log.trace("Starting replication queue, with interval {0} and maxElements {1}", interval, maxElements);
this.maxElements = configuration.getReplQueueMaxElements();
// check again
enabled = configuration.isUseReplQueue();
@@ -135,15 +133,15 @@
public void flush() {
List<ReplicableCommand> toReplicate;
synchronized (elements) {
- if (log.isTraceEnabled())
- log.trace("flush(): flushing repl queue (num elements=" + elements.size() + ")");
+ if (log.isTraceEnabled()) log.trace("flush(): flushing repl queue (num elements={0})", elements.size());
toReplicate = new ArrayList<ReplicableCommand>(elements);
elements.clear();
}
- if (toReplicate.size() > 0) {
+ int toReplicateSize = toReplicate.size();
+ if (toReplicateSize > 0) {
try {
- if (log.isTraceEnabled()) log.trace("Flushing " + toReplicate.size() + " elements " );
+ log.trace("Flushing {0} elements", toReplicateSize);
ReplicateCommand replicateCommand = commandsFactory.buildReplicateCommand(toReplicate);
// send to all live nodes in the cluster
rpcManager.invokeRemotely(null, replicateCommand, ResponseMode.ASYNCHRONOUS, configuration.getSyncReplTimeout());
15 years, 11 months
JBoss Cache SVN: r7735 - in core/trunk/src: test/java/org/jboss/cache/api and 2 other directories.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2009-02-19 08:40:55 -0500 (Thu, 19 Feb 2009)
New Revision: 7735
Added:
core/trunk/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java
Modified:
core/trunk/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java
core/trunk/src/test/java/org/jboss/cache/mgmt/CacheLoaderTest.java
core/trunk/src/test/java/org/jboss/cache/mgmt/PassivationTest.java
core/trunk/src/test/java/org/jboss/cache/passivation/PassivationTestsBase.java
Log:
JBCACHE-1481
Modified: core/trunk/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java 2009-02-19 13:23:08 UTC (rev 7734)
+++ core/trunk/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java 2009-02-19 13:40:55 UTC (rev 7735)
@@ -46,15 +46,15 @@
import java.util.Map;
/**
- * Utility functions to manipulate wrapping {@link org.jboss.cache.InternalNode}s as {@link org.jboss.cache.mvcc.ReadCommittedNode}
- * or {@link org.jboss.cache.mvcc.RepeatableReadNode}s. Would also entail locking, if necessary.
+ * Utility functions to manipulate wrapping {@link org.jboss.cache.InternalNode}s as {@link
+ * org.jboss.cache.mvcc.ReadCommittedNode} or {@link org.jboss.cache.mvcc.RepeatableReadNode}s. Would also entail
+ * locking, if necessary.
*
* @author Manik Surtani (<a href="mailto:manik AT jboss DOT org">manik AT jboss DOT org</a>)
* @since 3.0
*/
@NonVolatile
-public class MVCCNodeHelper
-{
+public class MVCCNodeHelper {
DataContainer dataContainer;
NodeFactory nodeFactory;
private static final Log log = LogFactory.getLog(MVCCNodeHelper.class);
@@ -66,8 +66,7 @@
private boolean lockParentForChildInsertRemove;
@Inject
- public void injectDependencies(DataContainer dataContainer, NodeFactory nodeFactory, LockManager lockManager, Configuration configuration)
- {
+ public void injectDependencies(DataContainer dataContainer, NodeFactory nodeFactory, LockManager lockManager, Configuration configuration) {
this.nodeFactory = nodeFactory;
this.dataContainer = dataContainer;
this.configuration = configuration;
@@ -75,8 +74,7 @@
}
@Start
- public void start()
- {
+ public void start() {
defaultLockAcquisitionTimeout = configuration.getLockAcquisitionTimeout();
writeSkewCheck = configuration.isWriteSkewCheck();
lockParentForChildInsertRemove = configuration.isLockParentForChildInsertRemove();
@@ -84,23 +82,23 @@
/**
- * Attempts to provide the context with a set of wrapped nodes based on the Collection of fqns passed in. If the nodes
- * already exist in the context then the node is not wrapped again.
+ * Attempts to provide the context with a set of wrapped nodes based on the Collection of fqns passed in. If the
+ * nodes already exist in the context then the node is not wrapped again.
* <p/>
- * {@link InternalNode}s are wrapped using {@link org.jboss.cache.NodeFactory#createWrappedNode(org.jboss.cache.InternalNode, org.jboss.cache.InternalNode)}
- * and as such, null internal nodes are treated according to isolation level used. See {@link org.jboss.cache.NodeFactory#createWrappedNode(org.jboss.cache.InternalNode, org.jboss.cache.InternalNode)}
- * for details on this behaviour.
+ * {@link InternalNode}s are wrapped using {@link org.jboss.cache.NodeFactory#createWrappedNode(org.jboss.cache.InternalNode,
+ * org.jboss.cache.InternalNode)} and as such, null internal nodes are treated according to isolation level used.
+ * See {@link org.jboss.cache.NodeFactory#createWrappedNode(org.jboss.cache.InternalNode,
+ * org.jboss.cache.InternalNode)} for details on this behaviour.
* <p/>
- * Note that if the context has the {@link org.jboss.cache.config.Option#isForceWriteLock()} option set, then write locks are
- * acquired and the node is copied.
+ * Note that if the context has the {@link org.jboss.cache.config.Option#isForceWriteLock()} option set, then write
+ * locks are acquired and the node is copied.
* <p/>
*
* @param ctx current invocation context
* @param fqns collection of Fqns. Should not be null.
* @throws InterruptedException if write locks are forced and the lock manager is interrupted.
*/
- public void wrapNodesForReading(InvocationContext ctx, Collection<Fqn> fqns) throws InterruptedException
- {
+ public void wrapNodesForReading(InvocationContext ctx, Collection<Fqn> fqns) throws InterruptedException {
boolean forceWriteLock = ctx.getOptionOverrides().isForceWriteLock();
// does the node exist in the context?
@@ -108,8 +106,8 @@
}
/**
- * Similar to {@link #wrapNodesForReading(org.jboss.cache.InvocationContext, java.util.Collection)} except
- * that this version takes a single Fqn parameter to wrap a single node.
+ * Similar to {@link #wrapNodesForReading(org.jboss.cache.InvocationContext, java.util.Collection)} except that this
+ * version takes a single Fqn parameter to wrap a single node.
*
* @param ctx current invocation context
* @param fqn fqn to fetch and wrap
@@ -117,31 +115,24 @@
* @return read committed node, or null if one is not found.
* @throws InterruptedException if write locks are forced and the lock manager is interrupted.
*/
- public NodeSPI wrapNodeForReading(InvocationContext ctx, Fqn fqn, boolean putInContext) throws InterruptedException
- {
+ public NodeSPI wrapNodeForReading(InvocationContext ctx, Fqn fqn, boolean putInContext) throws InterruptedException {
return wrapNodeForReading(ctx, fqn, ctx.getOptionOverrides().isForceWriteLock(), putInContext);
}
@SuppressWarnings("unchecked")
- private NodeSPI wrapNodeForReading(InvocationContext ctx, Fqn f, boolean writeLockForced, boolean putInContext) throws InterruptedException
- {
+ private NodeSPI wrapNodeForReading(InvocationContext ctx, Fqn f, boolean writeLockForced, boolean putInContext) throws InterruptedException {
NodeSPI n;
- if (writeLockForced)
- {
+ if (writeLockForced) {
if (trace) log.trace("Forcing lock on reading node " + f);
return wrapNodeForWriting(ctx, f, true, false, false, false, false);
- }
- else if ((n = ctx.lookUpNode(f)) == null)
- {
+ } else if ((n = ctx.lookUpNode(f)) == null) {
if (trace) log.trace("Node " + f + " is not in context, fetching from container.");
// simple implementation. Peek the node, wrap it, put wrapped node in the context.
InternalNode[] nodes = dataContainer.peekInternalNodeAndDirectParent(f, false);
ReadCommittedNode wrapped = nodeFactory.createWrappedNode(nodes[0], nodes[1]); // even though parents aren't needed for reading, we hold on to this ref in case the node is later written to.
if (putInContext && wrapped != null) ctx.putLookedUpNode(f, wrapped);
return wrapped;
- }
- else
- {
+ } else {
if (trace) log.trace("Node " + f + " is already in context.");
return n;
}
@@ -152,20 +143,18 @@
*
* @param ctx context
* @param fqn Fqn to lock
- * @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was already held)
+ * @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
+ * already held)
* @throws InterruptedException if interrupted
* @throws TimeoutException if we are unable to acquire the lock after a specified timeout.
*/
- private boolean acquireLock(InvocationContext ctx, Fqn fqn) throws InterruptedException, TimeoutException
- {
+ private boolean acquireLock(InvocationContext ctx, Fqn fqn) throws InterruptedException, TimeoutException {
// don't EVER use lockManager.isLocked() since with lock striping it may be the case that we hold the relevant
// lock which may be shared with another Fqn that we have a lock for already.
// nothing wrong, just means that we fail to record the lock. And that is a problem.
// Better to check our records and lock again if necessary.
- if (!ctx.hasLock(fqn))
- {
- if (!lockManager.lockAndRecord(fqn, WRITE, ctx))
- {
+ if (!ctx.hasLock(fqn)) {
+ if (!lockManager.lockAndRecord(fqn, WRITE, ctx)) {
Object owner = lockManager.getWriteOwner(fqn);
throw new TimeoutException("Unable to acquire lock on Fqn [" + fqn + "] after [" + ctx.getLockAcquisitionTimeout(defaultLockAcquisitionTimeout) + "] milliseconds for requestor [" + lockManager.getLockOwner(ctx) + "]! Lock held by [" + owner + "]");
}
@@ -176,10 +165,10 @@
/**
* First checks in contexts for the existence of the node. If it does exist, it will return it, acquiring a lock if
- * necessary. Otherwise, it will peek in the dataContainer, wrap the node, lock if necessary, and add it to the context.
- * If it doesn't even exist in the dataContainer and createIfAbsent is true, it will create a new node and add it to the
- * data structure. It will lock the node, and potentially the parent as well, if necessary. If the parent is locked,
- * it too will be added to the context if it wasn't there already.
+ * necessary. Otherwise, it will peek in the dataContainer, wrap the node, lock if necessary, and add it to the
+ * context. If it doesn't even exist in the dataContainer and createIfAbsent is true, it will create a new node and
+ * add it to the data structure. It will lock the node, and potentially the parent as well, if necessary. If the
+ * parent is locked, it too will be added to the context if it wasn't there already.
*
* @param context invocation context
* @param fqn to retrieve
@@ -192,54 +181,48 @@
* @throws InterruptedException if interrupted
*/
@SuppressWarnings("unchecked")
- public ReadCommittedNode wrapNodeForWriting(InvocationContext context, Fqn fqn, boolean lockForWriting, boolean createIfAbsent, boolean includeInvalidNodes, boolean forRemoval, boolean force) throws InterruptedException
- {
+ public ReadCommittedNode wrapNodeForWriting(InvocationContext context, Fqn fqn, boolean lockForWriting, boolean createIfAbsent, boolean includeInvalidNodes, boolean forRemoval, boolean force) throws InterruptedException {
Fqn parentFqn = null;
ReadCommittedNode n = (ReadCommittedNode) context.lookUpNode(fqn);
if (createIfAbsent && n != null && n.isNullNode()) n = null;
if (n != null) // exists in context! Just acquire lock if needed, and wrap.
{
// acquire lock if needed
- if (lockForWriting && acquireLock(context, fqn))
- {
+ if (lockForWriting && acquireLock(context, fqn)) {
// create a copy of the underlying node
n.markForUpdate(dataContainer, writeSkewCheck);
}
if (trace) log.trace("Retrieving wrapped node " + fqn);
- if (n.isDeleted() && createIfAbsent)
- {
+ if (n.isDeleted() && createIfAbsent) {
if (trace) log.trace("Node is deleted in current scope. Need to un-delete.");
n.markAsDeleted(false);
n.setValid(true, false);
+ n.clearData(); // a delete and re-add should flush any old state on the node!
+ // has the parent been deleted too? :-(
+ wrapNodeForWriting(context, fqn.getParent(), true, true, includeInvalidNodes, false, force);
}
- }
- else
- {
+ } else {
// else, fetch from dataContainer.
InternalNode[] nodes = dataContainer.peekInternalNodeAndDirectParent(fqn, includeInvalidNodes);
InternalNode in = nodes[0];
- if (in != null)
- {
+ if (in != null) {
// exists in cache! Just acquire lock if needed, and wrap.
// do we need a lock?
boolean needToCopy = false;
- if (lockForWriting && acquireLock(context, fqn))
- {
+ if (lockForWriting && acquireLock(context, fqn)) {
needToCopy = true;
}
n = nodeFactory.createWrappedNode(in, nodes[1]);
context.putLookedUpNode(fqn, n);
if (needToCopy) n.markForUpdate(dataContainer, writeSkewCheck);
- }
- else if (createIfAbsent) // else, do we need to create one?
+ } else if (createIfAbsent) // else, do we need to create one?
{
parentFqn = fqn.getParent();
NodeSPI parent = wrapNodeForWriting(context, parentFqn, false, createIfAbsent, false, false, false);
// do we need to lock the parent to create children?
boolean parentLockNeeded = isParentLockNeeded(parent.getDelegationTarget());
// get a lock on the parent.
- if (parentLockNeeded && acquireLock(context, parentFqn))
- {
+ if (parentLockNeeded && acquireLock(context, parentFqn)) {
ReadCommittedNode parentRCN = (ReadCommittedNode) context.lookUpNode(parentFqn);
parentRCN.markForUpdate(dataContainer, writeSkewCheck);
}
@@ -250,6 +233,7 @@
n = nodeFactory.createWrappedNode(in, parent.getDelegationTarget());
n.setCreated(true);
+ n.setDataLoaded(true); // created here so we are loading it here
context.putLookedUpNode(fqn, n);
n.markForUpdate(dataContainer, writeSkewCheck);
}
@@ -266,8 +250,9 @@
}
/**
- * The same as {@link #wrapNodeForWriting(org.jboss.cache.InvocationContext, org.jboss.cache.Fqn, boolean, boolean, boolean, boolean, boolean)}
- * except that it takes in an {@link org.jboss.cache.InternalNode} instead of a {@link Fqn}. Saves on a lookup.
+ * The same as {@link #wrapNodeForWriting(org.jboss.cache.InvocationContext, org.jboss.cache.Fqn, boolean, boolean,
+ * boolean, boolean, boolean)} except that it takes in an {@link org.jboss.cache.InternalNode} instead of a {@link
+ * Fqn}. Saves on a lookup.
* <p/>
* Also assumes that the node exists, and hence will not be created.
* <p/>
@@ -278,27 +263,22 @@
* @throws InterruptedException if interrupted
*/
@SuppressWarnings("unchecked")
- public NodeSPI wrapNodeForWriting(InvocationContext context, InternalNode node, InternalNode parent) throws InterruptedException
- {
+ public NodeSPI wrapNodeForWriting(InvocationContext context, InternalNode node, InternalNode parent) throws InterruptedException {
Fqn fqn = node.getFqn();
NodeSPI n = context.lookUpNode(fqn);
if (n != null) // exists in context! Just acquire lock if needed, and wrap.
{
// acquire lock if needed
- if (acquireLock(context, fqn))
- {
+ if (acquireLock(context, fqn)) {
// create a copy of the underlying node
n.markForUpdate(dataContainer, writeSkewCheck);
}
if (trace) log.trace("Retrieving wrapped node " + fqn);
- }
- else
- {
+ } else {
// exists in cache! Just acquire lock if needed, and wrap.
// do we need a lock?
boolean needToCopy = false;
- if (acquireLock(context, fqn))
- {
+ if (acquireLock(context, fqn)) {
needToCopy = true;
}
n = nodeFactory.createWrappedNode(node, parent);
@@ -318,8 +298,7 @@
* @throws InterruptedException if the lock manager is interrupted.
*/
@SuppressWarnings("unchecked")
- public List<Fqn> wrapNodesRecursivelyForRemoval(InvocationContext ctx, Fqn fqn) throws InterruptedException
- {
+ public List<Fqn> wrapNodesRecursivelyForRemoval(InvocationContext ctx, Fqn fqn) throws InterruptedException {
// when removing a node we want to get a lock on the Fqn anyway and return the wrapped node.
if (fqn.isRoot()) throw new CacheException("Attempting to remove Fqn.ROOT!");
@@ -328,8 +307,7 @@
boolean needToCopyParent = false;
boolean parentLockNeeded = isParentLockNeeded(parentFqn, ctx);
ReadCommittedNode parent = null;
- if (parentLockNeeded)
- {
+ if (parentLockNeeded) {
needToCopyParent = acquireLock(ctx, parentFqn);
// Ensure the node is in the context.
parent = wrapAndPutInContext(ctx, parentFqn, needToCopyParent);
@@ -340,16 +318,12 @@
// Ensure the node is in the context.
ReadCommittedNode node = wrapAndPutInContext(ctx, fqn, needToCopyNode);
- if (node == null || node.isNullNode())
- {
+ if (node == null || node.isNullNode()) {
// node does not exist; return an empty list since there is nothing to remove!
return Collections.emptyList();
- }
- else
- {
+ } else {
// update child ref on parent to point to child as this is now a copy.
- if (parentLockNeeded && (needToCopyNode || needToCopyParent))
- {
+ if (parentLockNeeded && (needToCopyNode || needToCopyParent)) {
if (parent == null) throw new NodeNotExistsException("Parent node " + parentFqn + " does not exist!");
parent.getDelegationTarget().addChild(node.getDelegationTarget());
}
@@ -359,8 +333,7 @@
List<Fqn> fqnsToBeRemoved = new LinkedList<Fqn>();
fqnsToBeRemoved.add(fqn);
- if (!childMap.isEmpty())
- {
+ if (!childMap.isEmpty()) {
for (InternalNode n : childMap.values()) lockForWritingRecursive(n.getFqn(), ctx, fqnsToBeRemoved);
}
@@ -373,20 +346,18 @@
*
* @param fqn Fqn to lock
* @param ctx invocation context to add wrapped node to
- * @param fqnList fqnList to update - this list should not be null but should be initially empty and will be populated
- * with a list of all Fqns locked in this call.
+ * @param fqnList fqnList to update - this list should not be null but should be initially empty and will be
+ * populated with a list of all Fqns locked in this call.
* @throws InterruptedException if interrupted
*/
@SuppressWarnings("unchecked")
- private void lockForWritingRecursive(Fqn fqn, InvocationContext ctx, List<Fqn> fqnList) throws InterruptedException
- {
+ private void lockForWritingRecursive(Fqn fqn, InvocationContext ctx, List<Fqn> fqnList) throws InterruptedException {
acquireLock(ctx, fqn); // lock node
if (fqnList != null) fqnList.add(fqn);
// now wrap and add to the context
ReadCommittedNode rcn = wrapNodeForWriting(ctx, fqn, true, false, true, false, false);
- if (rcn != null)
- {
+ if (rcn != null) {
rcn.markForUpdate(dataContainer, writeSkewCheck);
Map<Object, InternalNode<?, ?>> children = rcn.getDelegationTarget().getChildrenMap();
for (InternalNode child : children.values())
@@ -395,8 +366,9 @@
}
/**
- * Identical to {@link #lockForWritingRecursive(org.jboss.cache.Fqn, org.jboss.cache.InvocationContext, java.util.List)}
- * except that it uses an {@link org.jboss.cache.InternalNode} instead of an {@link Fqn} - saves a lookup.
+ * Identical to {@link #lockForWritingRecursive(org.jboss.cache.Fqn, org.jboss.cache.InvocationContext,
+ * java.util.List)} except that it uses an {@link org.jboss.cache.InternalNode} instead of an {@link Fqn} - saves a
+ * lookup.
*
* @param node node to lock recursively
* @param ctx invocation context
@@ -404,16 +376,14 @@
* @throws InterruptedException if interrupted
*/
@SuppressWarnings("unchecked")
- private void lockForWritingRecursive(InternalNode node, InternalNode parent, InvocationContext ctx, List<Fqn> fqnList) throws InterruptedException
- {
+ private void lockForWritingRecursive(InternalNode node, InternalNode parent, InvocationContext ctx, List<Fqn> fqnList) throws InterruptedException {
Fqn fqn = node.getFqn();
acquireLock(ctx, fqn); // lock node
if (fqnList != null) fqnList.add(fqn);
// now wrap and add to the context
NodeSPI rcn = wrapNodeForWriting(ctx, node, parent);
- if (rcn != null)
- {
+ if (rcn != null) {
rcn.markForUpdate(dataContainer, writeSkewCheck);
Map<Object, InternalNode<?, ?>> children = node.getChildrenMap();
for (InternalNode child : children.values()) lockForWritingRecursive(child, node, ctx, fqnList);
@@ -422,9 +392,9 @@
/**
- * Wraps a node and puts it in the context, optionally copying the node for updating if <tt>forUpdate</tt> is <tt>true</tt>.
- * If the node is already in the context, a new wrapped node is not created, but the existing one is still checked
- * for changes and potentially marked for update if <tt>forUpdate</tt> is <tt>true</tt>.
+ * Wraps a node and puts it in the context, optionally copying the node for updating if <tt>forUpdate</tt> is
+ * <tt>true</tt>. If the node is already in the context, a new wrapped node is not created, but the existing one is
+ * still checked for changes and potentially marked for update if <tt>forUpdate</tt> is <tt>true</tt>.
*
* @param ctx invocation context to add node to
* @param fqn fqn of node to add
@@ -432,11 +402,9 @@
* @return the ReadCommittedNode wrapper, or null if the node does not exist.
*/
@SuppressWarnings("unchecked")
- private ReadCommittedNode wrapAndPutInContext(InvocationContext ctx, Fqn fqn, boolean forUpdate)
- {
+ private ReadCommittedNode wrapAndPutInContext(InvocationContext ctx, Fqn fqn, boolean forUpdate) {
ReadCommittedNode node = (ReadCommittedNode) ctx.lookUpNode(fqn);
- if (node == null)
- {
+ if (node == null) {
InternalNode[] nodes = dataContainer.peekInternalNodeAndDirectParent(fqn, false);
node = nodeFactory.createWrappedNode(nodes[0], nodes[1]);
ctx.putLookedUpNode(fqn, node);
@@ -449,14 +417,13 @@
}
/**
- * An overloaded version of {@link #isParentLockNeeded(org.jboss.cache.Fqn, org.jboss.cache.InvocationContext)}
- * which takes in an {@link org.jboss.cache.InternalNode} instead of a {@link Fqn}.
+ * An overloaded version of {@link #isParentLockNeeded(org.jboss.cache.Fqn, org.jboss.cache.InvocationContext)} which
+ * takes in an {@link org.jboss.cache.InternalNode} instead of a {@link Fqn}.
*
* @param parent parent node to test
* @return true if parent lock is needed, false otherwise.
*/
- private boolean isParentLockNeeded(InternalNode parent)
- {
+ private boolean isParentLockNeeded(InternalNode parent) {
return lockParentForChildInsertRemove || (parent != null && parent.isLockForChildInsertRemove());
}
@@ -467,8 +434,7 @@
* @param ctx invocation context
* @return true if parent lock is needed, false otherwise.
*/
- private boolean isParentLockNeeded(Fqn parent, InvocationContext ctx)
- {
+ private boolean isParentLockNeeded(Fqn parent, InvocationContext ctx) {
ReadCommittedNode parentNodeTmp = (ReadCommittedNode) ctx.lookUpNode(parent);
InternalNode in = parentNodeTmp == null ? dataContainer.peekInternalNode(parent, true) : parentNodeTmp.getDelegationTarget();
return isParentLockNeeded(in);
Added: core/trunk/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java 2009-02-19 13:40:55 UTC (rev 7735)
@@ -0,0 +1,43 @@
+package org.jboss.cache.api;
+
+import org.jboss.cache.AbstractSingleCacheTest;
+import org.jboss.cache.CacheSPI;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.UnitTestCacheFactory;
+import org.testng.annotations.Test;
+
+import javax.transaction.TransactionManager;
+
+@Test(groups = {"functional"}, sequential = true, testName = "api.ReAddDeletedNodeTest")
+public class ReAddDeletedNodeTest extends AbstractSingleCacheTest
+{
+ private CacheSPI<String, String> cache;
+
+ public CacheSPI createCache()
+ {
+ // start a single cache instance
+ UnitTestCacheFactory<String, String> cf = new UnitTestCacheFactory<String, String>();
+ cache = (CacheSPI<String, String>) cf.createCache("configs/local-tx.xml", false, getClass());
+ cache.getConfiguration().setEvictionConfig(null);
+ cache.start();
+ return cache;
+ }
+
+ public void testReAdd() throws Exception
+ {
+ TransactionManager tm = cache.getTransactionManager();
+ Fqn<String> testFqn = Fqn.fromElements("a", "a", "a");
+
+ tm.begin();
+ cache.put(testFqn, "x", "x");
+ cache.removeNode(testFqn.getParent());
+ cache.put(testFqn, "x", "x");
+ assert cache.getNode(testFqn) != null : testFqn + " should not be null (before commit)";
+ assert cache.getNode(testFqn.getParent()) != null : testFqn.getParent() + " should not be null (before commit)";
+ assert cache.getNode(testFqn.getParent().getParent()) != null : testFqn.getParent().getParent() + " should not be null (before commit)";
+ tm.commit();
+ assert cache.getNode(testFqn) != null : testFqn + " should not be null (after commit)";
+ assert cache.getNode(testFqn.getParent()) != null : testFqn.getParent() + " should not be null (after commit)";
+ assert cache.getNode(testFqn.getParent().getParent()) != null : testFqn.getParent().getParent() + " should not be null (after commit)";
+ }
+}
Modified: core/trunk/src/test/java/org/jboss/cache/mgmt/CacheLoaderTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/mgmt/CacheLoaderTest.java 2009-02-19 13:23:08 UTC (rev 7734)
+++ core/trunk/src/test/java/org/jboss/cache/mgmt/CacheLoaderTest.java 2009-02-19 13:40:55 UTC (rev 7735)
@@ -44,8 +44,6 @@
assertNotNull("Retrieval error: expected to retrieve " + CAPITAL + " for " + AUSTRIA, cache.get(AUSTRIA, CAPITAL));
assertNull("Retrieval error: did not expect to retrieve " + AREA + " for " + AUSTRIA, cache.get(AUSTRIA, AREA));
- load++;
-
// verify statistics after retrieving entries - misses should still be same since nodes were already loaded
assertEquals("CacheLoaderLoads count error: ", load, loader.getCacheLoaderLoads());
assertEquals("CacheLoaderMisses count error: ", miss, loader.getCacheLoaderMisses());
@@ -104,7 +102,6 @@
// add two attributes - this should cause two stores
stores += 2;
- load++;
cache.put(POLAND, CAPITAL, "Warsaw");
cache.put(POLAND, CURRENCY, "Zloty");
assertEquals("CacheLoaderLoads count error: ", load, loader.getCacheLoaderLoads());
Modified: core/trunk/src/test/java/org/jboss/cache/mgmt/PassivationTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/mgmt/PassivationTest.java 2009-02-19 13:23:08 UTC (rev 7734)
+++ core/trunk/src/test/java/org/jboss/cache/mgmt/PassivationTest.java 2009-02-19 13:40:55 UTC (rev 7735)
@@ -52,8 +52,6 @@
assertNotNull("Retrieval error: expected to retrieve " + CAPITAL + " for " + AUSTRIA, cache.get(AUSTRIA, CAPITAL));
assertNull("Retrieval error: did not expect to retrieve " + AREA + " for " + AUSTRIA, cache.get(AUSTRIA, AREA));
- miss++;
-
// verify statistics after retrieving entries - no change since nodes were already loaded
assertEquals("CacheLoaderLoads count error: ", 0, act.getCacheLoaderLoads());
assertEquals("CacheLoaderMisses count error: ", miss, act.getCacheLoaderMisses());
@@ -82,7 +80,7 @@
assertNotNull("Retrieval error: expected to retrieve " + CURRENCY + " for " + AUSTRIA, cache.get(AUSTRIA, CURRENCY));
// verify statistics after retrieving evicted entry - loads and activations should now increment by 1
- activations++;
+ activations+= 3;
assertEquals("CacheLoaderLoads count error: ", 1, act.getCacheLoaderLoads());
assertEquals("CacheLoaderMisses count error: ", miss, act.getCacheLoaderMisses());
assertEquals("Activations count error: ", activations, act.getActivations());
@@ -113,7 +111,7 @@
cache.put(POLAND, new HashMap<String, Object>());
cache.put(POLAND, CAPITAL, "Warsaw");
cache.put(POLAND, CURRENCY, "Zloty");
- miss += 3;
+ miss ++;
assertEquals("CacheLoaderLoads count error: ", 1, act.getCacheLoaderLoads());
assertEquals("CacheLoaderMisses count error: ", miss, act.getCacheLoaderMisses());
assertEquals("Activations count error: ", activations, act.getActivations());
@@ -127,7 +125,7 @@
assertEquals("Passivations count error: ", 2, pass.getPassivations());
// retrieve a valid attribute - this will cause an activation and a load
- activations++;
+ activations+=3;
assertNotNull("Retrieval error: expected to retrieve " + CURRENCY + " for " + POLAND, cache.get(POLAND, CURRENCY));
assertEquals("CacheLoaderLoads count error: ", 2, act.getCacheLoaderLoads());
assertEquals("CacheLoaderMisses count error: ", miss, act.getCacheLoaderMisses());
@@ -142,7 +140,7 @@
assertEquals("Passivations count error: ", 3, pass.getPassivations());
// retrieve an invalid attribute - this will cause an activation and a load
- activations++;
+ activations+=3;
assertNull("Retrieval error: did not expect to retrieve " + AREA + " for " + POLAND, cache.get(POLAND, AREA));
assertEquals("CacheLoaderLoads count error: ", 3, act.getCacheLoaderLoads());
assertEquals("CacheLoaderMisses count error: ", miss, act.getCacheLoaderMisses());
Modified: core/trunk/src/test/java/org/jboss/cache/passivation/PassivationTestsBase.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/passivation/PassivationTestsBase.java 2009-02-19 13:23:08 UTC (rev 7734)
+++ core/trunk/src/test/java/org/jboss/cache/passivation/PassivationTestsBase.java 2009-02-19 13:40:55 UTC (rev 7735)
@@ -655,7 +655,7 @@
assertTrue(loader.exists(Fqn.fromString("/first/second")));
assert (exists("/first"));
String val = (String) cache.get("/first/second", "key1");
- assertTrue(loader.exists(Fqn.fromString("/first/second")));
+ assertFalse(loader.exists(Fqn.fromString("/first/second")));
assertEquals("val1", val);
String val2 = (String) cache.get("/first/second/third", "key2");// activate node
assertFalse(loader.exists(Fqn.fromString("/first/second/third")));
15 years, 11 months
JBoss Cache SVN: r7734 - in core/branches/3.0.X/src: test/java/org/jboss/cache/api and 1 other directory.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2009-02-19 08:23:08 -0500 (Thu, 19 Feb 2009)
New Revision: 7734
Modified:
core/branches/3.0.X/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java
core/branches/3.0.X/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java
Log:
JBCACHE-1481 - Re-adding a node deleted via parent inside transaction breaks the parent - improved the test and the fix
Modified: core/branches/3.0.X/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java
===================================================================
--- core/branches/3.0.X/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java 2009-02-19 13:21:22 UTC (rev 7733)
+++ core/branches/3.0.X/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java 2009-02-19 13:23:08 UTC (rev 7734)
@@ -86,9 +86,9 @@
* nodes already exist in the context then the node is not wrapped again.
* <p/>
* {@link InternalNode}s are wrapped using {@link org.jboss.cache.NodeFactory#createWrappedNode(org.jboss.cache.InternalNode,
- * org.jboss.cache.InternalNode)} and as such, null internal nodes are treated according to isolation level used.
- * See {@link org.jboss.cache.NodeFactory#createWrappedNode(org.jboss.cache.InternalNode,
- * org.jboss.cache.InternalNode)} for details on this behaviour.
+ * org.jboss.cache.InternalNode)} and as such, null internal nodes are treated according to isolation level used. See
+ * {@link org.jboss.cache.NodeFactory#createWrappedNode(org.jboss.cache.InternalNode, org.jboss.cache.InternalNode)}
+ * for details on this behaviour.
* <p/>
* Note that if the context has the {@link org.jboss.cache.config.Option#isForceWriteLock()} option set, then write
* locks are acquired and the node is copied.
@@ -197,6 +197,7 @@
if (trace) log.trace("Node is deleted in current scope. Need to un-delete.");
n.markAsDeleted(false);
n.setValid(true, false);
+ n.clearData(); // a delete and re-add should flush any old state on the node!
// has the parent been deleted too? :-(
wrapNodeForWriting(context, fqn.getParent(), true, true, includeInvalidNodes, false, force);
}
Modified: core/branches/3.0.X/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java
===================================================================
--- core/branches/3.0.X/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java 2009-02-19 13:21:22 UTC (rev 7733)
+++ core/branches/3.0.X/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java 2009-02-19 13:23:08 UTC (rev 7734)
@@ -37,4 +37,49 @@
assert cache.getNode(testFqn.getParent()) != null : testFqn.getParent() + " should not be null (after commit)";
assert cache.getNode(testFqn.getParent().getParent()) != null : testFqn.getParent().getParent() + " should not be null (after commit)";
}
+
+ public void testReAddWithData() throws Exception {
+ TransactionManager tm = cache.getTransactionManager();
+ Fqn<String> testFqn = Fqn.fromElements("a", "a", "a");
+
+ tm.begin();
+ cache.put(testFqn, "1", "2");
+ assert cache.get(testFqn, "1").equals("2");
+ assert cache.get(testFqn, "3") == null;
+ cache.removeNode(testFqn.getParent());
+ cache.put(testFqn, "3", "4");
+ assert cache.get(testFqn, "3").equals("4");
+ assert cache.get(testFqn, "1") == null;
+ assert cache.getNode(testFqn) != null : testFqn + " should not be null (before commit)";
+ assert cache.getNode(testFqn.getParent()) != null : testFqn.getParent() + " should not be null (before commit)";
+ assert cache.getNode(testFqn.getParent().getParent()) != null : testFqn.getParent().getParent() + " should not be null (before commit)";
+ tm.commit();
+ assert cache.getNode(testFqn) != null : testFqn + " should not be null (after commit)";
+ assert cache.getNode(testFqn.getParent()) != null : testFqn.getParent() + " should not be null (after commit)";
+ assert cache.getNode(testFqn.getParent().getParent()) != null : testFqn.getParent().getParent() + " should not be null (after commit)";
+ }
+
+ public void testReAddWithDataOnParent() throws Exception {
+ TransactionManager tm = cache.getTransactionManager();
+ Fqn<String> testFqn = Fqn.fromElements("a", "a", "a");
+
+ tm.begin();
+ cache.put(testFqn, "x", "x");
+ cache.put(testFqn.getParent(), "parent_x", "parent_x");
+ assert cache.get(testFqn, "x").equals("x");
+ assert cache.get(testFqn.getParent(), "parent_x").equals("parent_x");
+ cache.removeNode(testFqn.getParent());
+ cache.put(testFqn, "y", "y");
+ assert cache.get(testFqn, "y").equals("y");
+ assert cache.get(testFqn.getParent(), "parent_x") == null;
+ assert cache.getNode(testFqn.getParent()).getData().isEmpty();
+ assert cache.getNode(testFqn) != null : testFqn + " should not be null (before commit)";
+ assert cache.getNode(testFqn.getParent()) != null : testFqn.getParent() + " should not be null (before commit)";
+ assert cache.getNode(testFqn.getParent().getParent()) != null : testFqn.getParent().getParent() + " should not be null (before commit)";
+ tm.commit();
+ assert cache.getNode(testFqn) != null : testFqn + " should not be null (after commit)";
+ assert cache.getNode(testFqn.getParent()) != null : testFqn.getParent() + " should not be null (after commit)";
+ assert cache.getNode(testFqn.getParent().getParent()) != null : testFqn.getParent().getParent() + " should not be null (after commit)";
+ }
+
}
15 years, 11 months
JBoss Cache SVN: r7733 - core/branches/2.2.X/src/main/java/org/jboss/cache/interceptors.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2009-02-19 08:21:22 -0500 (Thu, 19 Feb 2009)
New Revision: 7733
Modified:
core/branches/2.2.X/src/main/java/org/jboss/cache/interceptors/ActivationInterceptor.java
Log:
JBCACHE-1482: ActivationInterceptor leaks memory
Modified: core/branches/2.2.X/src/main/java/org/jboss/cache/interceptors/ActivationInterceptor.java
===================================================================
--- core/branches/2.2.X/src/main/java/org/jboss/cache/interceptors/ActivationInterceptor.java 2009-02-19 13:07:43 UTC (rev 7732)
+++ core/branches/2.2.X/src/main/java/org/jboss/cache/interceptors/ActivationInterceptor.java 2009-02-19 13:21:22 UTC (rev 7733)
@@ -4,7 +4,6 @@
import org.jboss.cache.InvocationContext;
import org.jboss.cache.Modification;
import org.jboss.cache.NodeSPI;
-import org.jboss.cache.commands.AbstractVisitor;
import org.jboss.cache.commands.read.GetChildrenNamesCommand;
import org.jboss.cache.commands.read.GetKeyValueCommand;
import org.jboss.cache.commands.read.GetKeysCommand;
@@ -19,7 +18,6 @@
import org.jboss.cache.commands.write.RemoveKeyCommand;
import org.jboss.cache.commands.write.RemoveNodeCommand;
import org.jboss.cache.factories.annotations.Inject;
-import org.jboss.cache.factories.annotations.Start;
import org.jboss.cache.transaction.GlobalTransaction;
import org.jboss.cache.transaction.TransactionEntry;
@@ -45,7 +43,6 @@
protected TransactionManager txMgr = null;
private long activations = 0;
- ActivationModificationsBuilder builder;
/**
* List<Transaction> that we have registered for
@@ -65,12 +62,6 @@
this.txMgr = txMgr;
}
- @Start
- public void createModificationsBuilder()
- {
- builder = new ActivationModificationsBuilder();
- }
-
@Override
public Object visitClearDataCommand(InvocationContext ctx, ClearDataCommand command) throws Throwable
{
@@ -297,95 +288,9 @@
}
List<Modification> cacheLoaderModifications = new ArrayList<Modification>();
- builder.visitCollection(ctx, entry.getModifications());
if (cacheLoaderModifications.size() > 0)
{
loader.prepare(gtx, cacheLoaderModifications, false);
}
}
-
- public class ActivationModificationsBuilder extends AbstractVisitor
- {
-
- private List<Modification> cacheLoaderModifications = new ArrayList<Modification>();
- private int txActs = 0;
-
- @Override
- public Object visitRemoveNodeCommand(InvocationContext ctx, RemoveNodeCommand removeNodeCommand) throws Throwable
- {
- Modification mod = new Modification(Modification.ModificationType.REMOVE_NODE, removeNodeCommand.getFqn());
- cacheLoaderModifications.add(mod);
- return null;
- }
-
- @Override
- public Object visitPutDataMapCommand(InvocationContext ctx, PutDataMapCommand command) throws Throwable
- {
- Fqn fqn = command.getFqn();
- handlePutCommand(ctx, fqn);
- return null;
- }
-
- @Override
- public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable
- {
- Fqn fqn = command.getFqn();
- handlePutCommand(ctx, fqn);
- return null;
- }
-
- // On the way out, remove the node from the cache loader.
- // Only remove the node if it exists in memory, its attributes have
- // been initialized, its children have been loaded
- // AND it was found in the cache loader (nodeLoaded = true).
- // Then notify the listeners that the node has been activated.
- private void handlePutCommand(InvocationContext ctx, Fqn fqn)
- throws Exception
- {
- if (fqn != null && dataContainer.peek(fqn, false, false) != null && loader.exists(fqn))
- {
- NodeSPI n = dataContainer.peek(fqn, true, false);// don't load
- // node not null and attributes have been loaded?
- if (n != null && n.isDataLoaded())
- {
- // has children?
- boolean result = childrenLoaded(n);
- if (!n.getChildrenDirect().isEmpty() && result)
- {
- // children have been loaded, remove the node
- addRemoveMod(ctx, cacheLoaderModifications, fqn, n.getDataDirect());
- txActs++;
- }
- // doesn't have children, check the cache loader
- else if (loaderNoChildren(fqn))
- {
- addRemoveMod(ctx, cacheLoaderModifications, fqn, n.getDataDirect());
- txActs++;
- }
- }
- }
- }
-
- private boolean loaderNoChildren(Fqn fqn) throws Exception
- {
- return loader.getChildrenNames(fqn) != null;
- }
-
- private void addRemoveMod(InvocationContext ctx, List<Modification> l, Fqn fqn, Map data)
- {
- Modification mod = new Modification(Modification.ModificationType.REMOVE_NODE, fqn);
- l.add(mod);
- notifier.notifyNodeActivated(fqn, false, data, ctx);
- }
-
- public List<Modification> getCacheLoaderModifications()
- {
- return cacheLoaderModifications;
- }
-
- public int getTxActs()
- {
- return txActs;
- }
- }
}
15 years, 11 months
JBoss Cache SVN: r7732 - in core/branches/3.0.X/src: test/java/org/jboss/cache/api and 1 other directory.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2009-02-19 08:07:43 -0500 (Thu, 19 Feb 2009)
New Revision: 7732
Added:
core/branches/3.0.X/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java
Modified:
core/branches/3.0.X/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java
Log:
JBCACHE-1481 - Re-adding a node deleted via parent inside transaction breaks the parent
Modified: core/branches/3.0.X/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java
===================================================================
--- core/branches/3.0.X/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java 2009-02-19 13:03:56 UTC (rev 7731)
+++ core/branches/3.0.X/src/main/java/org/jboss/cache/mvcc/MVCCNodeHelper.java 2009-02-19 13:07:43 UTC (rev 7732)
@@ -46,15 +46,15 @@
import java.util.Map;
/**
- * Utility functions to manipulate wrapping {@link org.jboss.cache.InternalNode}s as {@link org.jboss.cache.mvcc.ReadCommittedNode}
- * or {@link org.jboss.cache.mvcc.RepeatableReadNode}s. Would also entail locking, if necessary.
+ * Utility functions to manipulate wrapping {@link org.jboss.cache.InternalNode}s as {@link
+ * org.jboss.cache.mvcc.ReadCommittedNode} or {@link org.jboss.cache.mvcc.RepeatableReadNode}s. Would also entail
+ * locking, if necessary.
*
* @author Manik Surtani (<a href="mailto:manik AT jboss DOT org">manik AT jboss DOT org</a>)
* @since 3.0
*/
@NonVolatile
-public class MVCCNodeHelper
-{
+public class MVCCNodeHelper {
DataContainer dataContainer;
NodeFactory nodeFactory;
private static final Log log = LogFactory.getLog(MVCCNodeHelper.class);
@@ -66,8 +66,7 @@
private boolean lockParentForChildInsertRemove;
@Inject
- public void injectDependencies(DataContainer dataContainer, NodeFactory nodeFactory, LockManager lockManager, Configuration configuration)
- {
+ public void injectDependencies(DataContainer dataContainer, NodeFactory nodeFactory, LockManager lockManager, Configuration configuration) {
this.nodeFactory = nodeFactory;
this.dataContainer = dataContainer;
this.configuration = configuration;
@@ -75,8 +74,7 @@
}
@Start
- public void start()
- {
+ public void start() {
defaultLockAcquisitionTimeout = configuration.getLockAcquisitionTimeout();
writeSkewCheck = configuration.isWriteSkewCheck();
lockParentForChildInsertRemove = configuration.isLockParentForChildInsertRemove();
@@ -84,23 +82,23 @@
/**
- * Attempts to provide the context with a set of wrapped nodes based on the Collection of fqns passed in. If the nodes
- * already exist in the context then the node is not wrapped again.
+ * Attempts to provide the context with a set of wrapped nodes based on the Collection of fqns passed in. If the
+ * nodes already exist in the context then the node is not wrapped again.
* <p/>
- * {@link InternalNode}s are wrapped using {@link org.jboss.cache.NodeFactory#createWrappedNode(org.jboss.cache.InternalNode, org.jboss.cache.InternalNode)}
- * and as such, null internal nodes are treated according to isolation level used. See {@link org.jboss.cache.NodeFactory#createWrappedNode(org.jboss.cache.InternalNode, org.jboss.cache.InternalNode)}
- * for details on this behaviour.
+ * {@link InternalNode}s are wrapped using {@link org.jboss.cache.NodeFactory#createWrappedNode(org.jboss.cache.InternalNode,
+ * org.jboss.cache.InternalNode)} and as such, null internal nodes are treated according to isolation level used.
+ * See {@link org.jboss.cache.NodeFactory#createWrappedNode(org.jboss.cache.InternalNode,
+ * org.jboss.cache.InternalNode)} for details on this behaviour.
* <p/>
- * Note that if the context has the {@link org.jboss.cache.config.Option#isForceWriteLock()} option set, then write locks are
- * acquired and the node is copied.
+ * Note that if the context has the {@link org.jboss.cache.config.Option#isForceWriteLock()} option set, then write
+ * locks are acquired and the node is copied.
* <p/>
*
* @param ctx current invocation context
* @param fqns collection of Fqns. Should not be null.
* @throws InterruptedException if write locks are forced and the lock manager is interrupted.
*/
- public void wrapNodesForReading(InvocationContext ctx, Collection<Fqn> fqns) throws InterruptedException
- {
+ public void wrapNodesForReading(InvocationContext ctx, Collection<Fqn> fqns) throws InterruptedException {
boolean forceWriteLock = ctx.getOptionOverrides().isForceWriteLock();
// does the node exist in the context?
@@ -108,8 +106,8 @@
}
/**
- * Similar to {@link #wrapNodesForReading(org.jboss.cache.InvocationContext, java.util.Collection)} except
- * that this version takes a single Fqn parameter to wrap a single node.
+ * Similar to {@link #wrapNodesForReading(org.jboss.cache.InvocationContext, java.util.Collection)} except that this
+ * version takes a single Fqn parameter to wrap a single node.
*
* @param ctx current invocation context
* @param fqn fqn to fetch and wrap
@@ -117,31 +115,24 @@
* @return read committed node, or null if one is not found.
* @throws InterruptedException if write locks are forced and the lock manager is interrupted.
*/
- public NodeSPI wrapNodeForReading(InvocationContext ctx, Fqn fqn, boolean putInContext) throws InterruptedException
- {
+ public NodeSPI wrapNodeForReading(InvocationContext ctx, Fqn fqn, boolean putInContext) throws InterruptedException {
return wrapNodeForReading(ctx, fqn, ctx.getOptionOverrides().isForceWriteLock(), putInContext);
}
@SuppressWarnings("unchecked")
- private NodeSPI wrapNodeForReading(InvocationContext ctx, Fqn f, boolean writeLockForced, boolean putInContext) throws InterruptedException
- {
+ private NodeSPI wrapNodeForReading(InvocationContext ctx, Fqn f, boolean writeLockForced, boolean putInContext) throws InterruptedException {
NodeSPI n;
- if (writeLockForced)
- {
+ if (writeLockForced) {
if (trace) log.trace("Forcing lock on reading node " + f);
return wrapNodeForWriting(ctx, f, true, false, false, false, false);
- }
- else if ((n = ctx.lookUpNode(f)) == null)
- {
+ } else if ((n = ctx.lookUpNode(f)) == null) {
if (trace) log.trace("Node " + f + " is not in context, fetching from container.");
// simple implementation. Peek the node, wrap it, put wrapped node in the context.
InternalNode[] nodes = dataContainer.peekInternalNodeAndDirectParent(f, false);
ReadCommittedNode wrapped = nodeFactory.createWrappedNode(nodes[0], nodes[1]); // even though parents aren't needed for reading, we hold on to this ref in case the node is later written to.
if (putInContext && wrapped != null) ctx.putLookedUpNode(f, wrapped);
return wrapped;
- }
- else
- {
+ } else {
if (trace) log.trace("Node " + f + " is already in context.");
return n;
}
@@ -152,20 +143,18 @@
*
* @param ctx context
* @param fqn Fqn to lock
- * @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was already held)
+ * @return true if a lock was needed and acquired, false if it didn't need to acquire the lock (i.e., lock was
+ * already held)
* @throws InterruptedException if interrupted
* @throws TimeoutException if we are unable to acquire the lock after a specified timeout.
*/
- private boolean acquireLock(InvocationContext ctx, Fqn fqn) throws InterruptedException, TimeoutException
- {
+ private boolean acquireLock(InvocationContext ctx, Fqn fqn) throws InterruptedException, TimeoutException {
// don't EVER use lockManager.isLocked() since with lock striping it may be the case that we hold the relevant
// lock which may be shared with another Fqn that we have a lock for already.
// nothing wrong, just means that we fail to record the lock. And that is a problem.
// Better to check our records and lock again if necessary.
- if (!ctx.hasLock(fqn))
- {
- if (!lockManager.lockAndRecord(fqn, WRITE, ctx))
- {
+ if (!ctx.hasLock(fqn)) {
+ if (!lockManager.lockAndRecord(fqn, WRITE, ctx)) {
Object owner = lockManager.getWriteOwner(fqn);
throw new TimeoutException("Unable to acquire lock on Fqn [" + fqn + "] after [" + ctx.getLockAcquisitionTimeout(defaultLockAcquisitionTimeout) + "] milliseconds for requestor [" + lockManager.getLockOwner(ctx) + "]! Lock held by [" + owner + "]");
}
@@ -176,10 +165,10 @@
/**
* First checks in contexts for the existence of the node. If it does exist, it will return it, acquiring a lock if
- * necessary. Otherwise, it will peek in the dataContainer, wrap the node, lock if necessary, and add it to the context.
- * If it doesn't even exist in the dataContainer and createIfAbsent is true, it will create a new node and add it to the
- * data structure. It will lock the node, and potentially the parent as well, if necessary. If the parent is locked,
- * it too will be added to the context if it wasn't there already.
+ * necessary. Otherwise, it will peek in the dataContainer, wrap the node, lock if necessary, and add it to the
+ * context. If it doesn't even exist in the dataContainer and createIfAbsent is true, it will create a new node and
+ * add it to the data structure. It will lock the node, and potentially the parent as well, if necessary. If the
+ * parent is locked, it too will be added to the context if it wasn't there already.
*
* @param context invocation context
* @param fqn to retrieve
@@ -192,54 +181,47 @@
* @throws InterruptedException if interrupted
*/
@SuppressWarnings("unchecked")
- public ReadCommittedNode wrapNodeForWriting(InvocationContext context, Fqn fqn, boolean lockForWriting, boolean createIfAbsent, boolean includeInvalidNodes, boolean forRemoval, boolean force) throws InterruptedException
- {
+ public ReadCommittedNode wrapNodeForWriting(InvocationContext context, Fqn fqn, boolean lockForWriting, boolean createIfAbsent, boolean includeInvalidNodes, boolean forRemoval, boolean force) throws InterruptedException {
Fqn parentFqn = null;
ReadCommittedNode n = (ReadCommittedNode) context.lookUpNode(fqn);
if (createIfAbsent && n != null && n.isNullNode()) n = null;
if (n != null) // exists in context! Just acquire lock if needed, and wrap.
{
// acquire lock if needed
- if (lockForWriting && acquireLock(context, fqn))
- {
+ if (lockForWriting && acquireLock(context, fqn)) {
// create a copy of the underlying node
n.markForUpdate(dataContainer, writeSkewCheck);
}
if (trace) log.trace("Retrieving wrapped node " + fqn);
- if (n.isDeleted() && createIfAbsent)
- {
+ if (n.isDeleted() && createIfAbsent) {
if (trace) log.trace("Node is deleted in current scope. Need to un-delete.");
n.markAsDeleted(false);
n.setValid(true, false);
+ // has the parent been deleted too? :-(
+ wrapNodeForWriting(context, fqn.getParent(), true, true, includeInvalidNodes, false, force);
}
- }
- else
- {
+ } else {
// else, fetch from dataContainer.
InternalNode[] nodes = dataContainer.peekInternalNodeAndDirectParent(fqn, includeInvalidNodes);
InternalNode in = nodes[0];
- if (in != null)
- {
+ if (in != null) {
// exists in cache! Just acquire lock if needed, and wrap.
// do we need a lock?
boolean needToCopy = false;
- if (lockForWriting && acquireLock(context, fqn))
- {
+ if (lockForWriting && acquireLock(context, fqn)) {
needToCopy = true;
}
n = nodeFactory.createWrappedNode(in, nodes[1]);
context.putLookedUpNode(fqn, n);
if (needToCopy) n.markForUpdate(dataContainer, writeSkewCheck);
- }
- else if (createIfAbsent) // else, do we need to create one?
+ } else if (createIfAbsent) // else, do we need to create one?
{
parentFqn = fqn.getParent();
NodeSPI parent = wrapNodeForWriting(context, parentFqn, false, createIfAbsent, false, false, false);
// do we need to lock the parent to create children?
boolean parentLockNeeded = isParentLockNeeded(parent.getDelegationTarget());
// get a lock on the parent.
- if (parentLockNeeded && acquireLock(context, parentFqn))
- {
+ if (parentLockNeeded && acquireLock(context, parentFqn)) {
ReadCommittedNode parentRCN = (ReadCommittedNode) context.lookUpNode(parentFqn);
parentRCN.markForUpdate(dataContainer, writeSkewCheck);
}
@@ -267,8 +249,9 @@
}
/**
- * The same as {@link #wrapNodeForWriting(org.jboss.cache.InvocationContext, org.jboss.cache.Fqn, boolean, boolean, boolean, boolean, boolean)}
- * except that it takes in an {@link org.jboss.cache.InternalNode} instead of a {@link Fqn}. Saves on a lookup.
+ * The same as {@link #wrapNodeForWriting(org.jboss.cache.InvocationContext, org.jboss.cache.Fqn, boolean, boolean,
+ * boolean, boolean, boolean)} except that it takes in an {@link org.jboss.cache.InternalNode} instead of a {@link
+ * Fqn}. Saves on a lookup.
* <p/>
* Also assumes that the node exists, and hence will not be created.
* <p/>
@@ -279,27 +262,22 @@
* @throws InterruptedException if interrupted
*/
@SuppressWarnings("unchecked")
- public NodeSPI wrapNodeForWriting(InvocationContext context, InternalNode node, InternalNode parent) throws InterruptedException
- {
+ public NodeSPI wrapNodeForWriting(InvocationContext context, InternalNode node, InternalNode parent) throws InterruptedException {
Fqn fqn = node.getFqn();
NodeSPI n = context.lookUpNode(fqn);
if (n != null) // exists in context! Just acquire lock if needed, and wrap.
{
// acquire lock if needed
- if (acquireLock(context, fqn))
- {
+ if (acquireLock(context, fqn)) {
// create a copy of the underlying node
n.markForUpdate(dataContainer, writeSkewCheck);
}
if (trace) log.trace("Retrieving wrapped node " + fqn);
- }
- else
- {
+ } else {
// exists in cache! Just acquire lock if needed, and wrap.
// do we need a lock?
boolean needToCopy = false;
- if (acquireLock(context, fqn))
- {
+ if (acquireLock(context, fqn)) {
needToCopy = true;
}
n = nodeFactory.createWrappedNode(node, parent);
@@ -319,8 +297,7 @@
* @throws InterruptedException if the lock manager is interrupted.
*/
@SuppressWarnings("unchecked")
- public List<Fqn> wrapNodesRecursivelyForRemoval(InvocationContext ctx, Fqn fqn) throws InterruptedException
- {
+ public List<Fqn> wrapNodesRecursivelyForRemoval(InvocationContext ctx, Fqn fqn) throws InterruptedException {
// when removing a node we want to get a lock on the Fqn anyway and return the wrapped node.
if (fqn.isRoot()) throw new CacheException("Attempting to remove Fqn.ROOT!");
@@ -329,8 +306,7 @@
boolean needToCopyParent = false;
boolean parentLockNeeded = isParentLockNeeded(parentFqn, ctx);
ReadCommittedNode parent = null;
- if (parentLockNeeded)
- {
+ if (parentLockNeeded) {
needToCopyParent = acquireLock(ctx, parentFqn);
// Ensure the node is in the context.
parent = wrapAndPutInContext(ctx, parentFqn, needToCopyParent);
@@ -341,16 +317,12 @@
// Ensure the node is in the context.
ReadCommittedNode node = wrapAndPutInContext(ctx, fqn, needToCopyNode);
- if (node == null || node.isNullNode())
- {
+ if (node == null || node.isNullNode()) {
// node does not exist; return an empty list since there is nothing to remove!
return Collections.emptyList();
- }
- else
- {
+ } else {
// update child ref on parent to point to child as this is now a copy.
- if (parentLockNeeded && (needToCopyNode || needToCopyParent))
- {
+ if (parentLockNeeded && (needToCopyNode || needToCopyParent)) {
if (parent == null) throw new NodeNotExistsException("Parent node " + parentFqn + " does not exist!");
parent.getDelegationTarget().addChild(node.getDelegationTarget());
}
@@ -360,8 +332,7 @@
List<Fqn> fqnsToBeRemoved = new LinkedList<Fqn>();
fqnsToBeRemoved.add(fqn);
- if (!childMap.isEmpty())
- {
+ if (!childMap.isEmpty()) {
for (InternalNode n : childMap.values()) lockForWritingRecursive(n.getFqn(), ctx, fqnsToBeRemoved);
}
@@ -374,20 +345,18 @@
*
* @param fqn Fqn to lock
* @param ctx invocation context to add wrapped node to
- * @param fqnList fqnList to update - this list should not be null but should be initially empty and will be populated
- * with a list of all Fqns locked in this call.
+ * @param fqnList fqnList to update - this list should not be null but should be initially empty and will be
+ * populated with a list of all Fqns locked in this call.
* @throws InterruptedException if interrupted
*/
@SuppressWarnings("unchecked")
- private void lockForWritingRecursive(Fqn fqn, InvocationContext ctx, List<Fqn> fqnList) throws InterruptedException
- {
+ private void lockForWritingRecursive(Fqn fqn, InvocationContext ctx, List<Fqn> fqnList) throws InterruptedException {
acquireLock(ctx, fqn); // lock node
if (fqnList != null) fqnList.add(fqn);
// now wrap and add to the context
ReadCommittedNode rcn = wrapNodeForWriting(ctx, fqn, true, false, true, false, false);
- if (rcn != null)
- {
+ if (rcn != null) {
rcn.markForUpdate(dataContainer, writeSkewCheck);
Map<Object, InternalNode<?, ?>> children = rcn.getDelegationTarget().getChildrenMap();
for (InternalNode child : children.values())
@@ -396,8 +365,9 @@
}
/**
- * Identical to {@link #lockForWritingRecursive(org.jboss.cache.Fqn, org.jboss.cache.InvocationContext, java.util.List)}
- * except that it uses an {@link org.jboss.cache.InternalNode} instead of an {@link Fqn} - saves a lookup.
+ * Identical to {@link #lockForWritingRecursive(org.jboss.cache.Fqn, org.jboss.cache.InvocationContext,
+ * java.util.List)} except that it uses an {@link org.jboss.cache.InternalNode} instead of an {@link Fqn} - saves a
+ * lookup.
*
* @param node node to lock recursively
* @param ctx invocation context
@@ -405,16 +375,14 @@
* @throws InterruptedException if interrupted
*/
@SuppressWarnings("unchecked")
- private void lockForWritingRecursive(InternalNode node, InternalNode parent, InvocationContext ctx, List<Fqn> fqnList) throws InterruptedException
- {
+ private void lockForWritingRecursive(InternalNode node, InternalNode parent, InvocationContext ctx, List<Fqn> fqnList) throws InterruptedException {
Fqn fqn = node.getFqn();
acquireLock(ctx, fqn); // lock node
if (fqnList != null) fqnList.add(fqn);
// now wrap and add to the context
NodeSPI rcn = wrapNodeForWriting(ctx, node, parent);
- if (rcn != null)
- {
+ if (rcn != null) {
rcn.markForUpdate(dataContainer, writeSkewCheck);
Map<Object, InternalNode<?, ?>> children = node.getChildrenMap();
for (InternalNode child : children.values()) lockForWritingRecursive(child, node, ctx, fqnList);
@@ -423,9 +391,9 @@
/**
- * Wraps a node and puts it in the context, optionally copying the node for updating if <tt>forUpdate</tt> is <tt>true</tt>.
- * If the node is already in the context, a new wrapped node is not created, but the existing one is still checked
- * for changes and potentially marked for update if <tt>forUpdate</tt> is <tt>true</tt>.
+ * Wraps a node and puts it in the context, optionally copying the node for updating if <tt>forUpdate</tt> is
+ * <tt>true</tt>. If the node is already in the context, a new wrapped node is not created, but the existing one is
+ * still checked for changes and potentially marked for update if <tt>forUpdate</tt> is <tt>true</tt>.
*
* @param ctx invocation context to add node to
* @param fqn fqn of node to add
@@ -433,11 +401,9 @@
* @return the ReadCommittedNode wrapper, or null if the node does not exist.
*/
@SuppressWarnings("unchecked")
- private ReadCommittedNode wrapAndPutInContext(InvocationContext ctx, Fqn fqn, boolean forUpdate)
- {
+ private ReadCommittedNode wrapAndPutInContext(InvocationContext ctx, Fqn fqn, boolean forUpdate) {
ReadCommittedNode node = (ReadCommittedNode) ctx.lookUpNode(fqn);
- if (node == null)
- {
+ if (node == null) {
InternalNode[] nodes = dataContainer.peekInternalNodeAndDirectParent(fqn, false);
node = nodeFactory.createWrappedNode(nodes[0], nodes[1]);
ctx.putLookedUpNode(fqn, node);
@@ -450,14 +416,13 @@
}
/**
- * An overloaded version of {@link #isParentLockNeeded(org.jboss.cache.Fqn, org.jboss.cache.InvocationContext)}
- * which takes in an {@link org.jboss.cache.InternalNode} instead of a {@link Fqn}.
+ * An overloaded version of {@link #isParentLockNeeded(org.jboss.cache.Fqn, org.jboss.cache.InvocationContext)} which
+ * takes in an {@link org.jboss.cache.InternalNode} instead of a {@link Fqn}.
*
* @param parent parent node to test
* @return true if parent lock is needed, false otherwise.
*/
- private boolean isParentLockNeeded(InternalNode parent)
- {
+ private boolean isParentLockNeeded(InternalNode parent) {
return lockParentForChildInsertRemove || (parent != null && parent.isLockForChildInsertRemove());
}
@@ -468,8 +433,7 @@
* @param ctx invocation context
* @return true if parent lock is needed, false otherwise.
*/
- private boolean isParentLockNeeded(Fqn parent, InvocationContext ctx)
- {
+ private boolean isParentLockNeeded(Fqn parent, InvocationContext ctx) {
ReadCommittedNode parentNodeTmp = (ReadCommittedNode) ctx.lookUpNode(parent);
InternalNode in = parentNodeTmp == null ? dataContainer.peekInternalNode(parent, true) : parentNodeTmp.getDelegationTarget();
return isParentLockNeeded(in);
Added: core/branches/3.0.X/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java
===================================================================
--- core/branches/3.0.X/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java (rev 0)
+++ core/branches/3.0.X/src/test/java/org/jboss/cache/api/ReAddDeletedNodeTest.java 2009-02-19 13:07:43 UTC (rev 7732)
@@ -0,0 +1,40 @@
+package org.jboss.cache.api;
+
+import org.jboss.cache.AbstractSingleCacheTest;
+import org.jboss.cache.CacheSPI;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.UnitTestCacheFactory;
+import org.testng.annotations.Test;
+
+import javax.transaction.TransactionManager;
+
+@Test(groups = {"functional"}, sequential = true, testName = "api.ReAddDeletedNodeTest")
+public class ReAddDeletedNodeTest extends AbstractSingleCacheTest {
+ private CacheSPI<String, String> cache;
+
+ public CacheSPI createCache() {
+ // start a single cache instance
+ UnitTestCacheFactory<String, String> cf = new UnitTestCacheFactory<String, String>();
+ cache = (CacheSPI<String, String>) cf.createCache("configs/local-tx.xml", false, getClass());
+ cache.getConfiguration().setEvictionConfig(null);
+ cache.start();
+ return cache;
+ }
+
+ public void testReAdd() throws Exception {
+ TransactionManager tm = cache.getTransactionManager();
+ Fqn<String> testFqn = Fqn.fromElements("a", "a", "a");
+
+ tm.begin();
+ cache.put(testFqn, "x", "x");
+ cache.removeNode(testFqn.getParent());
+ cache.put(testFqn, "x", "x");
+ assert cache.getNode(testFqn) != null : testFqn + " should not be null (before commit)";
+ assert cache.getNode(testFqn.getParent()) != null : testFqn.getParent() + " should not be null (before commit)";
+ assert cache.getNode(testFqn.getParent().getParent()) != null : testFqn.getParent().getParent() + " should not be null (before commit)";
+ tm.commit();
+ assert cache.getNode(testFqn) != null : testFqn + " should not be null (after commit)";
+ assert cache.getNode(testFqn.getParent()) != null : testFqn.getParent() + " should not be null (after commit)";
+ assert cache.getNode(testFqn.getParent().getParent()) != null : testFqn.getParent().getParent() + " should not be null (after commit)";
+ }
+}
15 years, 11 months
JBoss Cache SVN: r7731 - in core/branches/flat/src: main/java/org/horizon/interceptors and 7 other directories.
by jbosscache-commits@lists.jboss.org
Author: mircea.markus
Date: 2009-02-19 08:03:56 -0500 (Thu, 19 Feb 2009)
New Revision: 7731
Added:
core/branches/flat/src/test/java/org/horizon/replication/ReplicationQueueTest.java
Modified:
core/branches/flat/src/main/java/org/horizon/commands/remote/ReplicateCommand.java
core/branches/flat/src/main/java/org/horizon/interceptors/ReplicationInterceptor.java
core/branches/flat/src/main/java/org/horizon/interceptors/base/BaseRpcInterceptor.java
core/branches/flat/src/main/java/org/horizon/remoting/ReplicationQueue.java
core/branches/flat/src/main/resources/config-samples/all.xml
core/branches/flat/src/test/java/org/horizon/config/parsing/XmlFileParsingTest.java
core/branches/flat/src/test/java/org/horizon/test/AbstractCacheTest.java
core/branches/flat/src/test/java/org/horizon/test/MultipleCacheManagersTest.java
core/branches/flat/src/test/java/org/horizon/test/TestingUtil.java
core/branches/flat/src/test/resources/configs/named-cache-test.xml
Log:
added support for replication queues
Modified: core/branches/flat/src/main/java/org/horizon/commands/remote/ReplicateCommand.java
===================================================================
--- core/branches/flat/src/main/java/org/horizon/commands/remote/ReplicateCommand.java 2009-02-19 12:53:45 UTC (rev 7730)
+++ core/branches/flat/src/main/java/org/horizon/commands/remote/ReplicateCommand.java 2009-02-19 13:03:56 UTC (rev 7731)
@@ -92,8 +92,7 @@
}
}
- private Object processCommand(InvocationContext ctx, ReplicableCommand cacheCommand)
- throws Throwable {
+ private Object processCommand(InvocationContext ctx, ReplicableCommand cacheCommand) throws Throwable {
Object result;
try {
if (trace) log.trace("Invoking command " + cacheCommand + ", with originLocal flag set to false.");
@@ -107,7 +106,7 @@
result = null;
}
} else {
- throw new RuntimeException("Do we still need to deal with non-visitable commands?");
+ throw new RuntimeException("Do we still need to deal with non-visitable commands? (" + cacheCommand.getClass().getName() + ")");
// result = cacheCommand.perform(null);
}
}
Modified: core/branches/flat/src/main/java/org/horizon/interceptors/ReplicationInterceptor.java
===================================================================
--- core/branches/flat/src/main/java/org/horizon/interceptors/ReplicationInterceptor.java 2009-02-19 12:53:45 UTC (rev 7730)
+++ core/branches/flat/src/main/java/org/horizon/interceptors/ReplicationInterceptor.java 2009-02-19 13:03:56 UTC (rev 7731)
@@ -103,8 +103,7 @@
* If we are within one transaction we won't do any replication as replication would only be performed at commit
* time. If the operation didn't originate locally we won't do any replication either.
*/
- private Object handleCrudMethod(InvocationContext ctx, WriteCommand command)
- throws Throwable {
+ private Object handleCrudMethod(InvocationContext ctx, WriteCommand command) throws Throwable {
boolean local = isLocalModeForced(ctx);
if (local && ctx.getTransaction() == null) return invokeNextInterceptor(ctx, command);
// FIRST pass this call up the chain. Only if it succeeds (no exceptions) locally do we attempt to replicate.
Modified: core/branches/flat/src/main/java/org/horizon/interceptors/base/BaseRpcInterceptor.java
===================================================================
--- core/branches/flat/src/main/java/org/horizon/interceptors/base/BaseRpcInterceptor.java 2009-02-19 12:53:45 UTC (rev 7730)
+++ core/branches/flat/src/main/java/org/horizon/interceptors/base/BaseRpcInterceptor.java 2009-02-19 13:03:56 UTC (rev 7731)
@@ -25,6 +25,7 @@
import org.horizon.commands.CommandsFactory;
import org.horizon.commands.RPCCommand;
import org.horizon.commands.ReplicableCommand;
+import org.horizon.commands.remote.ReplicateCommand;
import org.horizon.context.InvocationContext;
import org.horizon.context.TransactionContext;
import org.horizon.factories.annotations.Inject;
@@ -88,7 +89,7 @@
}
protected void replicateCall(InvocationContext ctx, ReplicableCommand call, boolean sync, boolean useOutOfBandMessage) throws Throwable {
- replicateCall(ctx, null, commandsFactory.buildReplicateCommand(call), sync, useOutOfBandMessage);
+ replicateCall(ctx, null, call, sync, useOutOfBandMessage);
}
protected void replicateCall(InvocationContext ctx, RPCCommand call, boolean sync) throws Throwable {
@@ -96,10 +97,10 @@
}
protected void replicateCall(InvocationContext ctx, ReplicableCommand call, boolean sync) throws Throwable {
- replicateCall(ctx, null, commandsFactory.buildReplicateCommand(call), sync, false);
+ replicateCall(ctx, null, call, sync, false);
}
- protected void replicateCall(InvocationContext ctx, List<Address> recipients, RPCCommand c, boolean sync, boolean useOutOfBandMessage) throws Throwable {
+ protected void replicateCall(InvocationContext ctx, List<Address> recipients, ReplicableCommand c, boolean sync, boolean useOutOfBandMessage) throws Throwable {
long syncReplTimeout = configuration.getSyncReplTimeout();
if (ctx.hasOption(Options.FORCE_ASYNCHRONOUS)) sync = false;
@@ -118,7 +119,7 @@
replicateCall(recipients, c, sync, useOutOfBandMessage, syncReplTimeout);
}
- protected void replicateCall(List<Address> recipients, RPCCommand call, boolean sync, boolean useOutOfBandMessage, long timeout) throws Throwable {
+ protected void replicateCall(List<Address> recipients, ReplicableCommand call, boolean sync, boolean useOutOfBandMessage, long timeout) throws Throwable {
if (trace) log.trace("Broadcasting call " + call + " to recipient list " + recipients);
if (!sync && replicationQueue != null) {
@@ -131,9 +132,10 @@
if (trace)
log.trace("Setting call recipients to " + callRecipients + " since the original list of recipients passed in is null.");
}
+ ReplicateCommand command = commandsFactory.buildReplicateCommand(call);
List rsps = rpcManager.invokeRemotely(callRecipients,
- call,
+ command,
sync ? ResponseMode.SYNCHRONOUS : ResponseMode.ASYNCHRONOUS, // is synchronised?
timeout,
useOutOfBandMessage
Modified: core/branches/flat/src/main/java/org/horizon/remoting/ReplicationQueue.java
===================================================================
--- core/branches/flat/src/main/java/org/horizon/remoting/ReplicationQueue.java 2009-02-19 12:53:45 UTC (rev 7730)
+++ core/branches/flat/src/main/java/org/horizon/remoting/ReplicationQueue.java 2009-02-19 13:03:56 UTC (rev 7731)
@@ -22,7 +22,6 @@
package org.horizon.remoting;
import org.horizon.commands.CommandsFactory;
-import org.horizon.commands.RPCCommand;
import org.horizon.commands.ReplicableCommand;
import org.horizon.commands.remote.ReplicateCommand;
import org.horizon.config.Configuration;
@@ -59,7 +58,7 @@
/**
* Holds the replication jobs.
*/
- final List<RPCCommand> elements = new LinkedList<RPCCommand>();
+ private final List<ReplicableCommand> elements = new LinkedList<ReplicableCommand>();
/**
* For periodical replication
@@ -90,6 +89,9 @@
@Start
public synchronized void start() {
long interval = configuration.getReplQueueInterval();
+ if (log.isTraceEnabled()) {
+ log.trace("Starting replication queue, with interval=" + interval +", and maxElements=" + maxElements);
+ }
this.maxElements = configuration.getReplQueueMaxElements();
// check again
enabled = configuration.isUseReplQueue();
@@ -98,7 +100,7 @@
public void run() {
flush();
}
- }, 500l, interval, TimeUnit.MILLISECONDS);
+ }, interval, interval, TimeUnit.MILLISECONDS);
}
}
@@ -117,7 +119,7 @@
/**
* Adds a new method call.
*/
- public void add(RPCCommand job) {
+ public void add(ReplicableCommand job) {
if (job == null)
throw new NullPointerException("job is null");
synchronized (elements) {
@@ -141,6 +143,7 @@
if (toReplicate.size() > 0) {
try {
+ if (log.isTraceEnabled()) log.trace("Flushing " + toReplicate.size() + " elements " );
ReplicateCommand replicateCommand = commandsFactory.buildReplicateCommand(toReplicate);
// send to all live nodes in the cluster
rpcManager.invokeRemotely(null, replicateCommand, ResponseMode.ASYNCHRONOUS, configuration.getSyncReplTimeout());
@@ -150,4 +153,12 @@
}
}
}
+
+ public int getElementsCount() {
+ return elements.size();
+ }
+
+ public void reset() {
+ elements.clear();
+ }
}
\ No newline at end of file
Modified: core/branches/flat/src/main/resources/config-samples/all.xml
===================================================================
--- core/branches/flat/src/main/resources/config-samples/all.xml 2009-02-19 12:53:45 UTC (rev 7730)
+++ core/branches/flat/src/main/resources/config-samples/all.xml 2009-02-19 13:03:56 UTC (rev 7731)
@@ -163,6 +163,12 @@
</namedCache>
+ <namedCache name="withReplicatinQueue">
+ <clustering>
+ <async useReplQueue="true" replQueueInterval="100" replQueueMaxElements="200"/>
+ </clustering>
+ </namedCache>
+
<namedCache name="cacheWithCustomInterceptors">
<!--
Define custom interceptors. All custom interceptors need to extend org.jboss.cache.interceptors.base.CommandInterceptor
Modified: core/branches/flat/src/test/java/org/horizon/config/parsing/XmlFileParsingTest.java
===================================================================
--- core/branches/flat/src/test/java/org/horizon/config/parsing/XmlFileParsingTest.java 2009-02-19 12:53:45 UTC (rev 7730)
+++ core/branches/flat/src/test/java/org/horizon/config/parsing/XmlFileParsingTest.java 2009-02-19 13:03:56 UTC (rev 7731)
@@ -138,6 +138,8 @@
assert c.getTransactionManagerLookupClass() == null;
assert c.getCacheMode() == Configuration.CacheMode.REPL_ASYNC;
assert c.isUseReplQueue();
+ assert c.getReplQueueInterval() == 1234;
+ assert c.getReplQueueMaxElements() == 100;
assert c.isUseAsyncSerialization();
assert c.isFetchInMemoryState();
assert c.getStateRetrievalTimeout() == 15000;
Added: core/branches/flat/src/test/java/org/horizon/replication/ReplicationQueueTest.java
===================================================================
--- core/branches/flat/src/test/java/org/horizon/replication/ReplicationQueueTest.java (rev 0)
+++ core/branches/flat/src/test/java/org/horizon/replication/ReplicationQueueTest.java 2009-02-19 13:03:56 UTC (rev 7731)
@@ -0,0 +1,248 @@
+package org.horizon.replication;
+
+import org.horizon.Cache;
+import org.horizon.config.Configuration;
+import org.horizon.config.GlobalConfiguration;
+import org.horizon.executors.ScheduledExecutorFactory;
+import org.horizon.manager.CacheManager;
+import org.horizon.remoting.ReplicationQueue;
+import org.horizon.test.MultipleCacheManagersTest;
+import org.horizon.test.TestingUtil;
+import org.horizon.transaction.DummyTransactionManagerLookup;
+import org.testng.annotations.Test;
+
+import javax.transaction.TransactionManager;
+import java.util.Properties;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * Tests RepliationQueue functionality.
+ *
+ * @author Mircea.Markus(a)jboss.com
+ */
+@Test(groups = "functional", testName = "replication.ReplicationQueueTest")
+public class ReplicationQueueTest extends MultipleCacheManagersTest {
+
+ Cache cache1;
+ Cache cache2;
+ private static final int REPL_QUEUE_INTERVAL = 5000;
+ private static final int REPL_QUEUE_MAX_ELEMENTS = 10;
+ long creationTime;
+
+ protected void createCacheManagers() throws Throwable {
+ GlobalConfiguration globalConfiguration = GlobalConfiguration.getClusteredDefault();
+ globalConfiguration.setReplicationQueueScheduledExecutorFactoryClass(ReplQueueTestScheduledExecutorFactory.class.getName());
+ globalConfiguration.setReplicationQueueScheduledExecutorProperties(ReplQueueTestScheduledExecutorFactory.myProps);
+ CacheManager first = TestingUtil.createClusteredCacheManager(globalConfiguration);
+ CacheManager second = TestingUtil.createClusteredCacheManager(globalConfiguration);
+ registerCacheManager(first, second);
+
+ Configuration config = getDefaultConfig();
+ config.setTransactionManagerLookupClass(DummyTransactionManagerLookup.class.getName());
+ config.setCacheMode(Configuration.CacheMode.REPL_ASYNC);
+ config.setUseReplQueue(true);
+ config.setReplQueueInterval(REPL_QUEUE_INTERVAL);
+ config.setReplQueueMaxElements(REPL_QUEUE_MAX_ELEMENTS);
+ creationTime = System.currentTimeMillis();
+ manager(0).defineCache("replQueue", config);
+
+ Configuration conf2 = config.clone();
+ conf2.setUseReplQueue(false);
+ manager(1).defineCache("replQueue", conf2);
+
+ cache1 = cache(0, "replQueue");
+ cache2 = cache(1, "replQueue");
+ }
+
+ /**
+ * tests that the replication queue will use an appropriate executor defined through
+ * <tt>replicationQueueScheduledExecutor</tt> config param.
+ */
+ public void testApropriateExecutorIsUsed() {
+ assert ReplQueueTestScheduledExecutorFactory.methodCalled;
+ assert ReplQueueTestScheduledExecutorFactory.command != null;
+ assert ReplQueueTestScheduledExecutorFactory.delay == REPL_QUEUE_INTERVAL;
+ assert ReplQueueTestScheduledExecutorFactory.initialDelay == REPL_QUEUE_INTERVAL;
+ assert ReplQueueTestScheduledExecutorFactory.unit == TimeUnit.MILLISECONDS;
+ }
+
+ /**
+ * Make sure that replication will occur even if <tt>replQueueMaxElements</tt> are not reached, but the
+ * <tt>replQueueInterval</tt> is reached.
+ */
+ public void testReplicationBasedOnTime() throws InterruptedException {
+ //only place one element, queue size is 10.
+ cache1.put("key", "value");
+ ReplicationQueue replicationQueue = TestingUtil.extractComponent(cache1, ReplicationQueue.class);
+ assert replicationQueue != null;
+ assert replicationQueue.getElementsCount() == 1;
+ assert cache2.get("key") == null;
+ assert cache1.get("key").equals("value");
+
+ ReplQueueTestScheduledExecutorFactory.command.run();
+
+ //in next 5 secs, expect the replication to occur
+ long start = System.currentTimeMillis();
+ while (System.currentTimeMillis() - start < 5000) {
+ if (cache2.get("key") != null) break;
+ Thread.sleep(50);
+ }
+ assert cache2.get("key").equals("value");
+ assert replicationQueue.getElementsCount() == 0;
+ }
+
+ /**
+ * Make sure that replication will occur even if <tt>replQueueMaxElements</tt> are not reached, but the
+ * <tt>replQueueInterval</tt> is reached.
+ */
+ public void testReplicationBasedOnTimeWithTx() throws Exception {
+ //only place one element, queue size is 10.
+ TransactionManager transactionManager = TestingUtil.getTransactionManager(cache1);
+ transactionManager.begin();
+ cache1.put("key", "value");
+ transactionManager.commit();
+
+ ReplicationQueue replicationQueue = TestingUtil.extractComponent(cache1, ReplicationQueue.class);
+ assert replicationQueue != null;
+ assert replicationQueue.getElementsCount() == 1;
+ assert cache2.get("key") == null;
+ assert cache1.get("key").equals("value");
+
+ ReplQueueTestScheduledExecutorFactory.command.run();
+
+ //in next 5 secs, expect the replication to occur
+ long start = System.currentTimeMillis();
+ while (System.currentTimeMillis() - start < 5000) {
+ if (cache2.get("key") != null) break;
+ Thread.sleep(50);
+ }
+ assert cache2.get("key").equals("value");
+ assert replicationQueue.getElementsCount() == 0;
+ }
+
+
+ /**
+ * Make sure that replication will occur even if <tt>replQueueMaxElements</tt> is reached, but the
+ * <tt>replQueueInterval</tt> is not reached.
+ */
+ public void testReplicationBasedOnSize() throws Exception {
+ //only place one element, queue size is 10.
+ for (int i = 0; i < REPL_QUEUE_MAX_ELEMENTS; i++) {
+ cache1.put("key" + i, "value" + i);
+ }
+ //expect that in next 3 secs all commands are replicated
+ long start = System.currentTimeMillis();
+ while (System.currentTimeMillis() - start < 3000) {
+ if (cache2.size() == REPL_QUEUE_MAX_ELEMENTS) break;
+ Thread.sleep(50);
+ }
+ for (int i = 0; i < REPL_QUEUE_MAX_ELEMENTS; i++) {
+ assert cache2.get("key" + i).equals("value" + i);
+ }
+ }
+
+ /**
+ * Make sure that replication will occur even if <tt>replQueueMaxElements</tt> is reached, but the
+ * <tt>replQueueInterval</tt> is not reached.
+ */
+ public void testReplicationBasedOnSizeWithTx() throws Exception {
+ //only place one element, queue size is 10.
+ TransactionManager transactionManager = TestingUtil.getTransactionManager(cache1);
+ for (int i = 0; i < REPL_QUEUE_MAX_ELEMENTS; i++) {
+ transactionManager.begin();
+ cache1.put("key" + i, "value" + i);
+ transactionManager.commit();
+ }
+ //expect that in next 3 secs all commands are replicated
+ long start = System.currentTimeMillis();
+ while (System.currentTimeMillis() - start < 3000) {
+ if (cache2.size() == REPL_QUEUE_MAX_ELEMENTS) break;
+ Thread.sleep(50);
+ }
+ for (int i = 0; i < REPL_QUEUE_MAX_ELEMENTS; i++) {
+ assert cache2.get("key" + i).equals("value" + i);
+ }
+ }
+
+ /**
+ * Test that replication queue works fine when multiple threads are putting into the queue.
+ */
+ public void testReplicationQueueMultipleThreads() throws Exception {
+ int numThreads = 4;
+ final int numLoopsPerThread = 3;
+ Thread[] threads = new Thread[numThreads];
+ final CountDownLatch latch = new CountDownLatch(1);
+
+ for (int i = 0; i < numThreads; i++) {
+ final int i1 = i;
+ threads[i] = new Thread() {
+ int index;
+
+ {
+ index = i1;
+ }
+
+ public void run() {
+ try {
+ latch.await();
+ }
+ catch (InterruptedException e) {
+ // do nothing
+ }
+ for (int j = 0; j < numLoopsPerThread; j++) {
+ cache1.put("key" + index + "_" + j, "value");
+ }
+ }
+ };
+ threads[i].start();
+ }
+ latch.countDown();
+ // wait for threads to join
+ for (Thread t : threads) t.join();
+
+ long start = System.currentTimeMillis();
+ while (System.currentTimeMillis() - start < 3000) {
+ if (cache2.size() == REPL_QUEUE_MAX_ELEMENTS) break;
+ Thread.sleep(50);
+ }
+ assert cache2.size() == REPL_QUEUE_MAX_ELEMENTS;
+ ReplicationQueue replicationQueue = TestingUtil.extractComponent(cache1, ReplicationQueue.class);
+ assert replicationQueue.getElementsCount() == numThreads * numLoopsPerThread - REPL_QUEUE_MAX_ELEMENTS;
+ }
+
+
+ public static class ReplQueueTestScheduledExecutorFactory implements ScheduledExecutorFactory {
+ static Properties myProps = new Properties();
+ static boolean methodCalled = false;
+ static Runnable command;
+ static long initialDelay;
+ static long delay;
+ static TimeUnit unit;
+
+ static {
+ myProps.put("aaa", "bbb");
+ myProps.put("ddd", "ccc");
+ }
+
+ public ScheduledExecutorService getScheduledExecutor(Properties p) {
+ assert p.equals(myProps);
+ methodCalled = true;
+ return new ScheduledThreadPoolExecutor(1) {
+ @Override
+ public ScheduledFuture<?> scheduleWithFixedDelay(Runnable commandP, long initialDelayP, long delayP, TimeUnit unitP) {
+ command = commandP;
+ initialDelay = initialDelayP;
+ delay = delayP;
+ unit = unitP;
+ return null;
+ }
+ };
+ }
+ }
+
+
+}
Property changes on: core/branches/flat/src/test/java/org/horizon/replication/ReplicationQueueTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: core/branches/flat/src/test/java/org/horizon/test/AbstractCacheTest.java
===================================================================
--- core/branches/flat/src/test/java/org/horizon/test/AbstractCacheTest.java 2009-02-19 12:53:45 UTC (rev 7730)
+++ core/branches/flat/src/test/java/org/horizon/test/AbstractCacheTest.java 2009-02-19 13:03:56 UTC (rev 7731)
@@ -10,6 +10,7 @@
import org.horizon.logging.LogFactory;
import org.horizon.manager.CacheManager;
import org.horizon.manager.DefaultCacheManager;
+import org.horizon.remoting.ReplicationQueue;
import org.horizon.remoting.transport.Address;
import javax.transaction.TransactionManager;
@@ -35,11 +36,17 @@
for (Cache cache : runningCaches) {
removeInMemoryData(cache);
clearCacheLoader(cache);
+ clearReplicationQueues(cache);
InvocationContext invocationContext = ((AdvancedCache) cache).getInvocationContextContainer().get();
if (invocationContext != null) invocationContext.reset();
}
}
+ private void clearReplicationQueues(Cache cache) {
+ ReplicationQueue queue = TestingUtil.extractComponent(cache, ReplicationQueue.class);
+ if (queue != null) queue.reset();
+ }
+
@SuppressWarnings(value = "unchecked")
protected Set<Cache> getRunningCaches(CacheManager cacheManager) {
ConcurrentMap<String, Cache> caches = (ConcurrentMap<String, Cache>) TestingUtil.extractField(DefaultCacheManager.class, cacheManager, "caches");
Modified: core/branches/flat/src/test/java/org/horizon/test/MultipleCacheManagersTest.java
===================================================================
--- core/branches/flat/src/test/java/org/horizon/test/MultipleCacheManagersTest.java 2009-02-19 12:53:45 UTC (rev 7730)
+++ core/branches/flat/src/test/java/org/horizon/test/MultipleCacheManagersTest.java 2009-02-19 13:03:56 UTC (rev 7731)
@@ -52,12 +52,21 @@
@BeforeClass
public void createBeforeClass() throws Throwable {
- if (cleanup == CleanupPhase.AFTER_TEST) createCacheManagers();
+ if (cleanup == CleanupPhase.AFTER_TEST) callCreateCacheManagers();
}
+ private void callCreateCacheManagers() {
+ try {
+ createCacheManagers();
+ } catch (Throwable th) {
+ th.printStackTrace();
+ log.error("Error in test setup: " + th);
+ }
+ }
+
@BeforeMethod
public void createBeforeMethod() throws Throwable {
- if (cleanup == CleanupPhase.AFTER_METHOD) createCacheManagers();
+ if (cleanup == CleanupPhase.AFTER_METHOD) callCreateCacheManagers();
}
@AfterClass
@@ -98,7 +107,7 @@
}
}
- final protected void registerCaches(CacheManager... cacheManagers) {
+ final protected void registerCacheManager(CacheManager... cacheManagers) {
this.cacheManagers.addAll(Arrays.asList(cacheManagers));
}
Modified: core/branches/flat/src/test/java/org/horizon/test/TestingUtil.java
===================================================================
--- core/branches/flat/src/test/java/org/horizon/test/TestingUtil.java 2009-02-19 12:53:45 UTC (rev 7730)
+++ core/branches/flat/src/test/java/org/horizon/test/TestingUtil.java 2009-02-19 13:03:56 UTC (rev 7731)
@@ -636,4 +636,11 @@
GlobalConfiguration globalConfiguration = GlobalConfiguration.getNonClusteredDefault();
return new DefaultCacheManager(globalConfiguration);
}
+
+ public static CacheManager createClusteredCacheManager(GlobalConfiguration globalConfiguration) {
+ Properties newTransportProps = new Properties();
+ newTransportProps.put(JGroupsTransport.CONFIGURATION_STRING, JGroupsConfigBuilder.getJGroupsConfig());
+ globalConfiguration.setTransportProperties(newTransportProps);
+ return new DefaultCacheManager(globalConfiguration);
+ }
}
Modified: core/branches/flat/src/test/resources/configs/named-cache-test.xml
===================================================================
--- core/branches/flat/src/test/resources/configs/named-cache-test.xml 2009-02-19 12:53:45 UTC (rev 7730)
+++ core/branches/flat/src/test/resources/configs/named-cache-test.xml 2009-02-19 13:03:56 UTC (rev 7731)
@@ -53,7 +53,7 @@
<namedCache name="asyncReplQueue">
<clustering>
<stateRetrieval fetchInMemoryState="true" timeout="15000"/>
- <async useReplQueue="true"/>
+ <async useReplQueue="true" replQueueInterval="1234" replQueueMaxElements="100"/>
</clustering>
</namedCache>
15 years, 11 months
JBoss Cache SVN: r7730 - in core/trunk/src: test/java/org/jboss/cache/api/mvcc and 1 other directory.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2009-02-19 07:53:45 -0500 (Thu, 19 Feb 2009)
New Revision: 7730
Added:
core/trunk/src/test/java/org/jboss/cache/api/mvcc/GetChildrenNamesAfterRemoveTest.java
Modified:
core/trunk/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java
Log:
JBCACHE-1480 - getChildrenNames retrieves removed children in a transaction
Modified: core/trunk/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java 2009-02-19 12:48:29 UTC (rev 7729)
+++ core/trunk/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java 2009-02-19 12:53:45 UTC (rev 7730)
@@ -67,6 +67,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.Iterator;
/**
* The delegate that users (and ChainedInterceptor authors) interact with when they create a cache by using a cache factory.
@@ -627,6 +628,17 @@
cacheStatusCheck(ctx);
GetChildrenNamesCommand command = commandsFactory.buildGetChildrenNamesCommand(fqn);
Set<Object> retval = (Set<Object>) invoker.invoke(ctx, command);
+
+ // this is needed to work around JBCACHE-1480
+ if (retval != null && !retval.isEmpty())
+ {
+ for (Iterator i = retval.iterator(); i.hasNext();)
+ {
+ Object child = getNode(Fqn.fromRelativeElements(fqn, i.next()));
+ if (child == null) i.remove();
+ }
+ }
+
if (retval != null)
{
retval = Immutables.immutableSetWrap(retval); // this is already copied in the command
@@ -635,6 +647,7 @@
{
retval = Collections.emptySet();
}
+
return retval;
}
Added: core/trunk/src/test/java/org/jboss/cache/api/mvcc/GetChildrenNamesAfterRemoveTest.java
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/api/mvcc/GetChildrenNamesAfterRemoveTest.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/api/mvcc/GetChildrenNamesAfterRemoveTest.java 2009-02-19 12:53:45 UTC (rev 7730)
@@ -0,0 +1,59 @@
+package org.jboss.cache.api.mvcc;
+
+import org.jboss.cache.AbstractSingleCacheTest;
+import org.jboss.cache.CacheSPI;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.Node;
+import org.jboss.cache.UnitTestCacheFactory;
+import org.testng.annotations.Test;
+
+import javax.transaction.TransactionManager;
+import java.util.Collections;
+import java.util.Set;
+
+@Test(groups = {"functional", "mvcc"}, sequential = true, testName = "api.mvcc.GetChildrenNamesAfterRemoveTest")
+public class GetChildrenNamesAfterRemoveTest extends AbstractSingleCacheTest
+{
+ private CacheSPI<String, String> cache;
+
+ public CacheSPI createCache()
+ {
+ // start a single cache instance
+ UnitTestCacheFactory<String, String> cf = new UnitTestCacheFactory<String, String>();
+ cache = (CacheSPI<String, String>) cf.createCache("configs/local-tx.xml", false, getClass());
+ cache.getConfiguration().setEvictionConfig(null);
+ cache.start();
+ return cache;
+ }
+
+ public void testRemove() throws Exception
+ {
+ TransactionManager tm = cache.getTransactionManager();
+ Fqn<String> testFqn = Fqn.fromElements("test1");
+
+ tm.begin();
+ assertEmpty(testFqn);
+ cache.put(testFqn, "x", "x");
+ assertNotEmpty(testFqn);
+ cache.removeNode(testFqn);
+ assertEmpty(testFqn);
+ tm.commit();
+ assertEmpty(testFqn);
+ }
+
+ private void assertNotEmpty(Fqn<String> testFqn)
+ {
+ Set<Node<String, String>> children = cache.getNode(testFqn.getParent()).getChildren();
+ assert !children.isEmpty() : "Node " + testFqn + " should not be a leaf, but getChildren() returns: " + children;
+ Set<Object> childrenNames = cache.getNode(testFqn.getParent()).getChildrenNames();
+ assert childrenNames.equals(Collections.singleton(testFqn.getLastElement())) : "Retrieving children names on " + testFqn + " should return test1 but is: " + childrenNames;
+ }
+
+ private void assertEmpty(Fqn<String> testFqn)
+ {
+ Set<Node<String, String>> children = cache.getNode(testFqn.getParent()).getChildren();
+ assert children.isEmpty() : "Children should be empty but is " + children;
+ Set<Object> childrenNames = cache.getNode(testFqn.getParent()).getChildrenNames();
+ assert childrenNames.isEmpty() : "Children names should be empty but is " + childrenNames;
+ }
+}
15 years, 11 months
JBoss Cache SVN: r7729 - core/branches/3.0.X/src/test/java/org/jboss/cache/loader/deadlock.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2009-02-19 07:48:29 -0500 (Thu, 19 Feb 2009)
New Revision: 7729
Added:
core/branches/3.0.X/src/test/java/org/jboss/cache/loader/deadlock/ConcurrentReadsDeadlockTest.java
Log:
JBCACHE-1479 - added test
Added: core/branches/3.0.X/src/test/java/org/jboss/cache/loader/deadlock/ConcurrentReadsDeadlockTest.java
===================================================================
--- core/branches/3.0.X/src/test/java/org/jboss/cache/loader/deadlock/ConcurrentReadsDeadlockTest.java (rev 0)
+++ core/branches/3.0.X/src/test/java/org/jboss/cache/loader/deadlock/ConcurrentReadsDeadlockTest.java 2009-02-19 12:48:29 UTC (rev 7729)
@@ -0,0 +1,84 @@
+/*
+ *
+ * JBoss, the OpenSource J2EE webOS
+ *
+ * Distributable under LGPL license.
+ * See terms of license at gnu.org.
+ */
+
+package org.jboss.cache.loader.deadlock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.jboss.cache.AbstractSingleCacheTest;
+import org.jboss.cache.CacheSPI;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.config.CacheLoaderConfig;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.factories.UnitTestConfigurationFactory;
+import org.jboss.cache.loader.testloaders.DummyInMemoryCacheLoader;
+import org.jboss.cache.lock.IsolationLevel;
+import org.jboss.cache.transaction.TransactionSetup;
+import org.testng.annotations.Test;
+
+import javax.transaction.Transaction;
+import javax.transaction.TransactionManager;
+
+@Test(groups = {"functional", "transaction"}, sequential = true, enabled = true, testName = "loader.deadlock.ConcurrentReadsDeadlockTest")
+public class ConcurrentReadsDeadlockTest extends AbstractSingleCacheTest {
+ final Fqn NODE = Fqn.fromString("/a");
+ final Log log = LogFactory.getLog(ConcurrentReadsDeadlockTest.class);
+
+ protected CacheSPI createCache() throws Exception {
+ Configuration c = new Configuration();
+ c.setCacheMode(Configuration.CacheMode.LOCAL);
+ c.setTransactionManagerLookupClass(TransactionSetup.getManagerLookup());
+ c.setIsolationLevel(IsolationLevel.READ_COMMITTED);
+ c.setLockAcquisitionTimeout(3000);
+ CacheLoaderConfig clc = UnitTestConfigurationFactory.buildSingleCacheLoaderConfig(false, "", DummyInMemoryCacheLoader.class.getName(), "", false, true, false, false, false);
+
+ c.setCacheLoaderConfig(clc);
+ return (CacheSPI<String, String>) new DefaultCacheFactory<String, String>().createCache(c, true);
+ }
+
+ /**
+ * Deadlock on two concurrent read transactions in the same thread.
+ */
+ public void testConcurrentReadDeadlock() throws Exception {
+ TransactionManager tm = cache.getConfiguration().getRuntimeConfig().getTransactionManager();
+
+ tm.begin();
+ cache.put(NODE, "x", "x");
+ tm.commit();
+
+ // two loops just in case
+ for (int i = 0; i < 2; i++) {
+ try {
+ tm.begin();
+ Transaction t1 = tm.suspend();
+ tm.begin();
+ Transaction t2 = tm.suspend();
+
+ tm.resume(t1);
+ cache.getData(NODE);
+ tm.suspend();
+
+ tm.resume(t2);
+ cache.getData(NODE);
+ tm.suspend();
+
+ tm.resume(t1);
+ tm.rollback();
+
+ tm.resume(t2);
+ tm.rollback();
+ }
+ catch (Exception ex) {
+ Exception exx = new Exception("loop " + i + ": " + ex.getMessage(), ex.getCause());
+ exx.setStackTrace(ex.getStackTrace());
+ throw exx;
+ }
+ }
+ }
+}
15 years, 11 months
JBoss Cache SVN: r7728 - in core/branches/3.0.X/src: test/java/org/jboss/cache/api/mvcc and 1 other directory.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2009-02-19 07:47:56 -0500 (Thu, 19 Feb 2009)
New Revision: 7728
Added:
core/branches/3.0.X/src/test/java/org/jboss/cache/api/mvcc/GetChildrenNamesAfterRemoveTest.java
Modified:
core/branches/3.0.X/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java
Log:
JBCACHE-1480 - getChildrenNames retrieves removed children in a transaction
Modified: core/branches/3.0.X/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java
===================================================================
--- core/branches/3.0.X/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java 2009-02-19 12:03:50 UTC (rev 7727)
+++ core/branches/3.0.X/src/main/java/org/jboss/cache/invocation/CacheInvocationDelegate.java 2009-02-19 12:47:56 UTC (rev 7728)
@@ -64,20 +64,20 @@
import javax.transaction.Transaction;
import javax.transaction.TransactionManager;
import java.util.Collections;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
- * The delegate that users (and ChainedInterceptor authors) interact with when they create a cache by using a cache factory.
- * This wrapper delegates calls down the interceptor chain.
+ * The delegate that users (and ChainedInterceptor authors) interact with when they create a cache by using a cache
+ * factory. This wrapper delegates calls down the interceptor chain.
*
* @author Manik Surtani (<a href="mailto:manik AT jboss DOT org">manik AT jboss DOT org</a>)
* @since 2.1.0
*/
@NonVolatile
-public class CacheInvocationDelegate<K, V> extends AbstractInvocationDelegate implements CacheSPI<K, V>
-{
+public class CacheInvocationDelegate<K, V> extends AbstractInvocationDelegate implements CacheSPI<K, V> {
private static final Log log = LogFactory.getLog(CacheInvocationDelegate.class);
// this stuff is needed since the SPI has methods to retrieve these.
@@ -100,8 +100,7 @@
public void initialize(StateTransferManager stateTransferManager, CacheLoaderManager cacheLoaderManager, Notifier notifier,
TransactionManager transactionManager, BuddyManager buddyManager, TransactionTable transactionTable,
RPCManager rpcManager, RegionManager regionManager, Marshaller marshaller,
- CommandsFactory commandsFactory, DataContainer dataContainer, MVCCNodeHelper mvccHelper, BatchContainer batchContainer)
- {
+ CommandsFactory commandsFactory, DataContainer dataContainer, MVCCNodeHelper mvccHelper, BatchContainer batchContainer) {
this.stateTransferManager = stateTransferManager;
this.cacheLoaderManager = cacheLoaderManager;
this.notifier = notifier;
@@ -118,13 +117,11 @@
}
@Start
- void setNodeLockingScheme()
- {
+ void setNodeLockingScheme() {
usingMvcc = configuration.getNodeLockingScheme() == NodeLockingScheme.MVCC;
}
- private void reset()
- {
+ private void reset() {
this.usingMvcc = false;
this.stateTransferManager = null;
this.cacheLoaderManager = null;
@@ -138,156 +135,125 @@
}
@Override
- public String toString()
- {
+ public String toString() {
return dataContainer == null ? super.toString() : dataContainer.toString();
}
- public Configuration getConfiguration()
- {
+ public Configuration getConfiguration() {
return configuration;
}
- public NodeSPI<K, V> getRoot()
- {
+ public NodeSPI<K, V> getRoot() {
return getNode(Fqn.ROOT);
}
- public TransactionManager getTransactionManager()
- {
+ public TransactionManager getTransactionManager() {
return transactionManager;
}
- public void addInterceptor(CommandInterceptor i, int position)
- {
+ public void addInterceptor(CommandInterceptor i, int position) {
invoker.addInterceptor(i, position);
}
- public void addInterceptor(CommandInterceptor i, Class<? extends CommandInterceptor> afterInterceptor)
- {
+ public void addInterceptor(CommandInterceptor i, Class<? extends CommandInterceptor> afterInterceptor) {
invoker.addAfterInterceptor(i, afterInterceptor);
}
- public List<CommandInterceptor> getInterceptorChain()
- {
+ public List<CommandInterceptor> getInterceptorChain() {
return invoker.asList();
}
- public void removeInterceptor(int position)
- {
+ public void removeInterceptor(int position) {
invoker.removeInterceptor(position);
}
- public void removeInterceptor(Class<? extends CommandInterceptor> interceptorType)
- {
+ public void removeInterceptor(Class<? extends CommandInterceptor> interceptorType) {
invoker.removeInterceptor(interceptorType);
}
- public CacheLoaderManager getCacheLoaderManager()
- {
+ public CacheLoaderManager getCacheLoaderManager() {
return cacheLoaderManager;
}
- public BuddyManager getBuddyManager()
- {
+ public BuddyManager getBuddyManager() {
return buddyManager;
}
- public TransactionTable getTransactionTable()
- {
+ public TransactionTable getTransactionTable() {
return transactionTable;
}
- public RPCManager getRPCManager()
- {
+ public RPCManager getRPCManager() {
return rpcManager;
}
- public StateTransferManager getStateTransferManager()
- {
+ public StateTransferManager getStateTransferManager() {
return stateTransferManager;
}
- public String getClusterName()
- {
+ public String getClusterName() {
return configuration.getClusterName();
}
- public int getNumberOfAttributes()
- {
+ public int getNumberOfAttributes() {
return dataContainer.getNumberOfAttributes();
}
- public int getNumberOfNodes()
- {
+ public int getNumberOfNodes() {
return dataContainer.getNumberOfNodes();
}
- public RegionManager getRegionManager()
- {
+ public RegionManager getRegionManager() {
return regionManager;
}
- public GlobalTransaction getCurrentTransaction(Transaction tx, boolean createIfNotExists)
- {
+ public GlobalTransaction getCurrentTransaction(Transaction tx, boolean createIfNotExists) {
return transactionTable.getCurrentTransaction(tx, createIfNotExists);
}
- public GlobalTransaction getCurrentTransaction()
- {
+ public GlobalTransaction getCurrentTransaction() {
return transactionTable.getCurrentTransaction();
}
- public Set<Fqn> getInternalFqns()
- {
+ public Set<Fqn> getInternalFqns() {
return dataContainer.getInternalFqns();
}
- public int getNumberOfLocksHeld()
- {
+ public int getNumberOfLocksHeld() {
return dataContainer.getNumberOfLocksHeld();
}
- public boolean exists(String fqn)
- {
+ public boolean exists(String fqn) {
return exists(Fqn.fromString(fqn));
}
- public boolean exists(Fqn fqn)
- {
- if (usingMvcc)
- {
+ public boolean exists(Fqn fqn) {
+ if (usingMvcc) {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
ExistsCommand command = commandsFactory.buildExistsNodeCommand(fqn);
return (Boolean) invoker.invoke(ctx, command);
- }
- else
- {
+ } else {
return peek(fqn, false) != null;
}
}
- public Notifier getNotifier()
- {
+ public Notifier getNotifier() {
return notifier;
}
- public Marshaller getMarshaller()
- {
+ public Marshaller getMarshaller() {
return marshaller;
}
- public GravitateResult gravitateData(Fqn fqn, boolean searchBuddyBackupSubtrees, InvocationContext ctx)
- {
+ public GravitateResult gravitateData(Fqn fqn, boolean searchBuddyBackupSubtrees, InvocationContext ctx) {
cacheStatusCheck(ctx);
GravitateDataCommand command = commandsFactory.buildGravitateDataCommand(fqn, searchBuddyBackupSubtrees);
return (GravitateResult) invoker.invoke(ctx, command);
}
@SuppressWarnings("unchecked")
- public NodeSPI<K, V> peek(Fqn fqn, boolean includeDeletedNodes, boolean includeInvalidNodes)
- {
+ public NodeSPI<K, V> peek(Fqn fqn, boolean includeDeletedNodes, boolean includeInvalidNodes) {
// TODO: clean this up somehow! Anyway, this method should NOT be used outside of testing frameworks.
return (usingMvcc)
? mvccPeek(fqn)
@@ -295,8 +261,7 @@
}
@SuppressWarnings("unchecked")
- public NodeSPI<K, V> peek(Fqn fqn, boolean includeDeletedNodes)
- {
+ public NodeSPI<K, V> peek(Fqn fqn, boolean includeDeletedNodes) {
// TODO: clean this up somehow! Anyway, this method should NOT be used outside of testing frameworks.
return (usingMvcc)
? mvccPeek(fqn)
@@ -304,124 +269,100 @@
}
@SuppressWarnings("unchecked")
- private NodeSPI<K, V> mvccPeek(Fqn f)
- {
+ private NodeSPI<K, V> mvccPeek(Fqn f) {
NodeSPI<K, V> n;
- try
- {
+ try {
n = mvccHelper.wrapNodeForReading(getInvocationContext(), f, false);
}
- catch (InterruptedException e)
- {
+ catch (InterruptedException e) {
throw new CacheException(e);
}
if (n == null || n.isNullNode()) return null;
return n;
}
- public void addCacheListener(Object listener)
- {
+ public void addCacheListener(Object listener) {
notifier.addCacheListener(listener);
}
- public void removeCacheListener(Object listener)
- {
+ public void removeCacheListener(Object listener) {
notifier.removeCacheListener(listener);
}
- public Set<Object> getCacheListeners()
- {
+ public Set<Object> getCacheListeners() {
return notifier.getCacheListeners();
}
- public void create() throws CacheException
- {
+ public void create() throws CacheException {
componentRegistry.create();
}
- public void start() throws CacheException
- {
+ public void start() throws CacheException {
componentRegistry.start();
}
- public void stop()
- {
+ public void stop() {
componentRegistry.stop();
}
- public void destroy()
- {
+ public void destroy() {
reset();
componentRegistry.destroy();
}
- public CacheStatus getCacheStatus()
- {
+ public CacheStatus getCacheStatus() {
return componentRegistry.getState();
}
- public InvocationContext getInvocationContext()
- {
+ public InvocationContext getInvocationContext() {
assertIsConstructed();
return invocationContextContainer.get();
}
- public void setInvocationContext(InvocationContext ctx)
- {
+ public void setInvocationContext(InvocationContext ctx) {
assertIsConstructed();
// assume a null ctx is meant to "un-set" the context?
- if (ctx == null)
- {
+ if (ctx == null) {
invocationContextContainer.remove();
- }
- else
- {
+ } else {
invocationContextContainer.set(ctx);
}
}
- public Address getLocalAddress()
- {
+ public Address getLocalAddress() {
if (rpcManager == null) return null;
return rpcManager.getLocalAddress();
}
- public List<Address> getMembers()
- {
+ public List<Address> getMembers() {
if (rpcManager == null) return null;
return rpcManager.getMembers();
}
- public String getVersion()
- {
+ public String getVersion() {
return Version.printVersion();
}
- public void move(Fqn nodeToMove, Fqn newParent) throws NodeNotExistsException
- {
+ public void move(Fqn nodeToMove, Fqn newParent) throws NodeNotExistsException {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
MoveCommand command = commandsFactory.buildMoveCommand(nodeToMove, newParent);
invoker.invoke(ctx, command);
}
- public void move(String nodeToMove, String newParent) throws NodeNotExistsException
- {
+ public void move(String nodeToMove, String newParent) throws NodeNotExistsException {
move(Fqn.fromString(nodeToMove), Fqn.fromString(newParent));
}
- public boolean removeRegion(Fqn fqn)
- {
+ public boolean removeRegion(Fqn fqn) {
return regionManager.removeRegion(fqn);
}
- public Region getRegion(Fqn fqn, boolean createIfAbsent)
- {
+ public Region getRegion(Fqn fqn, boolean createIfAbsent) {
return regionManager.getRegion(fqn, createIfAbsent);
}
- public void evict(Fqn fqn, boolean recursive)
- {
+ public void evict(Fqn fqn, boolean recursive) {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
EvictCommand c = commandsFactory.buildEvictFqnCommand(fqn);
@@ -429,47 +370,38 @@
invoker.invoke(ctx, c);
}
- public void evict(Fqn fqn)
- {
+ public void evict(Fqn fqn) {
evict(fqn, false);
}
@SuppressWarnings("unchecked")
- public V get(Fqn fqn, K key)
- {
+ public V get(Fqn fqn, K key) {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
GetKeyValueCommand command = commandsFactory.buildGetKeyValueCommand(fqn, key, true);
return (V) invoker.invoke(ctx, command);
}
- public V get(String fqn, K key)
- {
+ public V get(String fqn, K key) {
return get(Fqn.fromString(fqn), key);
}
- public boolean removeNode(Fqn fqn)
- {
+ public boolean removeNode(Fqn fqn) {
// special case if we are removing the root. Remove all children instead.
- if (fqn.isRoot())
- {
+ if (fqn.isRoot()) {
boolean result = true;
// we need to preserve options
InvocationContext ctx = getInvocationContext();
Option o = ctx.getOptionOverrides();
Set<Fqn> internalFqns = getInternalFqns();
- for (Object childName : peek(fqn, false, false).getChildrenNames())
- {
- if (!internalFqns.contains(Fqn.fromElements(childName)))
- {
+ for (Object childName : peek(fqn, false, false).getChildrenNames()) {
+ if (!internalFqns.contains(Fqn.fromElements(childName))) {
ctx.setOptionOverrides(o);
result = removeNode(Fqn.fromRelativeElements(fqn, childName)) && result;
}
}
return result;
- }
- else
- {
+ } else {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
GlobalTransaction tx = transactionTable.getCurrentTransaction();
@@ -479,28 +411,24 @@
}
}
- public boolean removeNode(String fqn)
- {
+ public boolean removeNode(String fqn) {
return removeNode(Fqn.fromString(fqn));
}
@SuppressWarnings("unchecked")
- public NodeSPI<K, V> getNode(Fqn fqn)
- {
+ public NodeSPI<K, V> getNode(Fqn fqn) {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
GetNodeCommand command = commandsFactory.buildGetNodeCommand(fqn);
return (NodeSPI<K, V>) invoker.invoke(ctx, command);
}
- public NodeSPI<K, V> getNode(String fqn)
- {
+ public NodeSPI<K, V> getNode(String fqn) {
return getNode(Fqn.fromString(fqn));
}
@SuppressWarnings("unchecked")
- public V remove(Fqn fqn, K key) throws CacheException
- {
+ public V remove(Fqn fqn, K key) throws CacheException {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
GlobalTransaction tx = transactionTable.getCurrentTransaction();
@@ -508,45 +436,36 @@
return (V) invoker.invoke(ctx, command);
}
- public V remove(String fqn, K key)
- {
+ public V remove(String fqn, K key) {
return remove(Fqn.fromString(fqn), key);
}
- public void put(Fqn fqn, Map<? extends K, ? extends V> data)
- {
+ public void put(Fqn fqn, Map<? extends K, ? extends V> data) {
invokePut(fqn, data, false);
}
- public void put(String fqn, Map<? extends K, ? extends V> data)
- {
+ public void put(String fqn, Map<? extends K, ? extends V> data) {
put(Fqn.fromString(fqn), data);
}
- public void putForExternalRead(Fqn fqn, K key, V value)
- {
+ public void putForExternalRead(Fqn fqn, K key, V value) {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
// if the node exists then this should be a no-op.
- if (peek(fqn, false, false) == null)
- {
+ if (peek(fqn, false, false) == null) {
getInvocationContext().getOptionOverrides().setFailSilently(true);
getInvocationContext().getOptionOverrides().setForceAsynchronous(true);
PutForExternalReadCommand command = commandsFactory.buildPutForExternalReadCommand(null, fqn, key, value);
invoker.invoke(ctx, command);
- }
- else
- {
- if (log.isDebugEnabled())
- {
+ } else {
+ if (log.isDebugEnabled()) {
log.debug("putForExternalRead() called with Fqn " + fqn + " and this node already exists. This method is hence a no op.");
}
}
}
@SuppressWarnings("unchecked")
- public V put(Fqn fqn, K key, V value)
- {
+ public V put(Fqn fqn, K key, V value) {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
GlobalTransaction tx = transactionTable.getCurrentTransaction();
@@ -554,29 +473,25 @@
return (V) invoker.invoke(ctx, command);
}
- public V put(String fqn, K key, V value)
- {
+ public V put(String fqn, K key, V value) {
return put(Fqn.fromString(fqn), key, value);
}
@SuppressWarnings("unchecked")
- public Map<K, V> getData(Fqn fqn)
- {
+ public Map<K, V> getData(Fqn fqn) {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
GetDataMapCommand command = commandsFactory.buildGetDataMapCommand(fqn);
return (Map<K, V>) invoker.invoke(ctx, command);
}
- public Set<K> getKeys(String fqn)
- {
+ public Set<K> getKeys(String fqn) {
return getKeys(Fqn.fromString(fqn));
}
@SuppressWarnings("unchecked")
- public Set<K> getKeys(Fqn fqn)
- {
+ public Set<K> getKeys(Fqn fqn) {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
GetKeysCommand command = commandsFactory.buildGetKeysCommand(fqn);
@@ -586,85 +501,79 @@
/**
* Removes the keys and properties from a node.
*/
- public void clearData(String fqn) throws CacheException
- {
+ public void clearData(String fqn) throws CacheException {
clearData(Fqn.fromString(fqn));
}
/**
* Removes the keys and properties from a named node.
*/
- public void clearData(Fqn fqn)
- {
+ public void clearData(Fqn fqn) {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
GlobalTransaction tx = getCurrentTransaction();
invoker.invoke(ctx, commandsFactory.buildClearDataCommand(tx, fqn));
}
- public void startBatch()
- {
- if (!configuration.isInvocationBatchingEnabled())
- {
+ public void startBatch() {
+ if (!configuration.isInvocationBatchingEnabled()) {
throw new ConfigurationException("Invocation batching not enabled in current configuration! Please use the <invocationBatching /> element.");
}
batchContainer.startBatch();
}
- public void endBatch(boolean successful)
- {
- if (!configuration.isInvocationBatchingEnabled())
- {
+ public void endBatch(boolean successful) {
+ if (!configuration.isInvocationBatchingEnabled()) {
throw new ConfigurationException("Invocation batching not enabled in current configuration! Please use the <invocationBatching /> element.");
}
batchContainer.endBatch(successful);
}
@SuppressWarnings("unchecked")
- public Set<Object> getChildrenNames(Fqn fqn)
- {
+ public Set<Object> getChildrenNames(Fqn fqn) {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
GetChildrenNamesCommand command = commandsFactory.buildGetChildrenNamesCommand(fqn);
Set<Object> retval = (Set<Object>) invoker.invoke(ctx, command);
- if (retval != null)
- {
- retval = Immutables.immutableSetWrap(retval); // this is already copied in the command
+
+ // this is needed to work around JBCACHE-1480
+ if (retval != null && !retval.isEmpty()) {
+ for (Iterator i = retval.iterator(); i.hasNext();) {
+ Object child = getNode(Fqn.fromRelativeElements(fqn, i.next()));
+ if (child == null) i.remove();
+ }
}
- else
- {
+
+ if (retval != null) {
+ retval = Immutables.immutableSetWrap(retval); // this is already copied in the command
+ } else {
retval = Collections.emptySet();
}
+
return retval;
}
@SuppressWarnings("unchecked")
- public Set<String> getChildrenNames(String fqn)
- {
+ public Set<String> getChildrenNames(String fqn) {
return (Set) getChildrenNames(Fqn.fromString(fqn));
}
- public ComponentRegistry getComponentRegistry()
- {
+ public ComponentRegistry getComponentRegistry() {
return componentRegistry;
}
- public DataContainer getDataContainer()
- {
+ public DataContainer getDataContainer() {
return dataContainer;
}
- protected void cacheStatusCheck(InvocationContext ctx)
- {
+ protected void cacheStatusCheck(InvocationContext ctx) {
assertIsConstructed();
- if (!ctx.getOptionOverrides().isSkipCacheStatusCheck() && !componentRegistry.invocationsAllowed(true))
- {
+ if (!ctx.getOptionOverrides().isSkipCacheStatusCheck() && !componentRegistry.invocationsAllowed(true)) {
throw new IllegalStateException("Cache not in STARTED state!");
}
}
- private void invokePut(Fqn fqn, Map<? extends K, ? extends V> data, boolean erase)
- {
+ private void invokePut(Fqn fqn, Map<? extends K, ? extends V> data, boolean erase) {
InvocationContext ctx = invocationContextContainer.get();
cacheStatusCheck(ctx);
PutDataMapCommand command = commandsFactory.buildPutDataMapCommand(null, fqn, data);
@@ -674,13 +583,11 @@
// TODO: Add these to the public interface in 3.1.0.
- public void setData(Fqn fqn, Map<? extends K, ? extends V> data)
- {
+ public void setData(Fqn fqn, Map<? extends K, ? extends V> data) {
invokePut(fqn, data, true);
}
- public void setData(String fqn, Map<? extends K, ? extends V> data)
- {
+ public void setData(String fqn, Map<? extends K, ? extends V> data) {
setData(Fqn.fromString(fqn), data);
}
}
Added: core/branches/3.0.X/src/test/java/org/jboss/cache/api/mvcc/GetChildrenNamesAfterRemoveTest.java
===================================================================
--- core/branches/3.0.X/src/test/java/org/jboss/cache/api/mvcc/GetChildrenNamesAfterRemoveTest.java (rev 0)
+++ core/branches/3.0.X/src/test/java/org/jboss/cache/api/mvcc/GetChildrenNamesAfterRemoveTest.java 2009-02-19 12:47:56 UTC (rev 7728)
@@ -0,0 +1,54 @@
+package org.jboss.cache.api.mvcc;
+
+import org.jboss.cache.AbstractSingleCacheTest;
+import org.jboss.cache.CacheSPI;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.Node;
+import org.jboss.cache.UnitTestCacheFactory;
+import org.testng.annotations.Test;
+
+import javax.transaction.TransactionManager;
+import java.util.Collections;
+import java.util.Set;
+
+@Test(groups = {"functional", "mvcc"}, sequential = true, testName = "api.mvcc.GetChildrenNamesAfterRemoveTest")
+public class GetChildrenNamesAfterRemoveTest extends AbstractSingleCacheTest {
+ private CacheSPI<String, String> cache;
+
+ public CacheSPI createCache() {
+ // start a single cache instance
+ UnitTestCacheFactory<String, String> cf = new UnitTestCacheFactory<String, String>();
+ cache = (CacheSPI<String, String>) cf.createCache("configs/local-tx.xml", false, getClass());
+ cache.getConfiguration().setEvictionConfig(null);
+ cache.start();
+ return cache;
+ }
+
+ public void testRemove() throws Exception {
+ TransactionManager tm = cache.getTransactionManager();
+ Fqn<String> testFqn = Fqn.fromElements("test1");
+
+ tm.begin();
+ assertEmpty(testFqn);
+ cache.put(testFqn, "x", "x");
+ assertNotEmpty(testFqn);
+ cache.removeNode(testFqn);
+ assertEmpty(testFqn);
+ tm.commit();
+ assertEmpty(testFqn);
+ }
+
+ private void assertNotEmpty(Fqn<String> testFqn) {
+ Set<Node<String, String>> children = cache.getNode(testFqn.getParent()).getChildren();
+ assert !children.isEmpty() : "Node " + testFqn + " should not be a leaf, but getChildren() returns: " + children;
+ Set<Object> childrenNames = cache.getNode(testFqn.getParent()).getChildrenNames();
+ assert childrenNames.equals(Collections.singleton(testFqn.getLastElement())) : "Retrieving children names on " + testFqn + " should return test1 but is: " + childrenNames;
+ }
+
+ private void assertEmpty(Fqn<String> testFqn) {
+ Set<Node<String, String>> children = cache.getNode(testFqn.getParent()).getChildren();
+ assert children.isEmpty() : "Children should be empty but is " + children;
+ Set<Object> childrenNames = cache.getNode(testFqn.getParent()).getChildrenNames();
+ assert childrenNames.isEmpty() : "Children names should be empty but is " + childrenNames;
+ }
+}
15 years, 11 months
JBoss Cache SVN: r7727 - in core/branches/flat/src/main/java/org/horizon: util and 1 other directory.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2009-02-19 07:03:50 -0500 (Thu, 19 Feb 2009)
New Revision: 7727
Modified:
core/branches/flat/src/main/java/org/horizon/container/UnsortedDataContainer.java
core/branches/flat/src/main/java/org/horizon/util/AbstractMap.java
core/branches/flat/src/main/java/org/horizon/util/BidirectionalLinkedHashMap.java
core/branches/flat/src/main/java/org/horizon/util/FastCopyHashMap.java
Log:
Removed support for null keys
Modified: core/branches/flat/src/main/java/org/horizon/container/UnsortedDataContainer.java
===================================================================
--- core/branches/flat/src/main/java/org/horizon/container/UnsortedDataContainer.java 2009-02-18 17:40:47 UTC (rev 7726)
+++ core/branches/flat/src/main/java/org/horizon/container/UnsortedDataContainer.java 2009-02-19 12:03:50 UTC (rev 7727)
@@ -50,7 +50,6 @@
// stuff, we don't need to iterate through immortal data.
final ConcurrentMap<Object, CachedValue> immortalData = new ConcurrentHashMap<Object, CachedValue>();
final ConcurrentMap<Object, ExpirableCachedValue> expirableData = new ConcurrentHashMap<Object, ExpirableCachedValue>();
- private static final Object NULL = new Object();
private CacheLoaderManager clm;
private CacheStore cacheStore;
@@ -75,25 +74,16 @@
}
}
- private Object maskNullKey(Object o) {
- return o == null ? NULL : o;
- }
-
- private Object unmaskNullKey(Object o) {
- return (o == NULL) ? null : o;
- }
-
public Object get(Object k) {
- Object maskedKey = maskNullKey(k);
- CachedValue cv = immortalData.get(maskedKey);
+ CachedValue cv = immortalData.get(k);
if (cv != null) {
cv.touch();
return cv.getValue();
} else {
- ExpirableCachedValue ecv = expirableData.get(maskedKey);
+ ExpirableCachedValue ecv = expirableData.get(k);
if (ecv != null) {
if (ecv.isExpired()) {
- expire(maskedKey);
+ expire(k);
} else {
ecv.touch();
return ecv.getValue();
@@ -105,9 +95,7 @@
}
public void put(Object k, Object v, long lifespan) {
- Object maskedKey = maskNullKey(k);
-
- CachedValue cv = immortalData.get(maskedKey);
+ CachedValue cv = immortalData.get(k);
ExpirableCachedValue ecv;
if (cv != null) {
// do we need to move this to expirable?
@@ -117,16 +105,16 @@
cv.touch();
} else {
ecv = new ExpirableCachedValue(v, lifespan);
- immortalData.remove(maskedKey);
- expirableData.put(maskedKey, ecv);
+ immortalData.remove(k);
+ expirableData.put(k, ecv);
}
- } else if ((ecv = expirableData.get(maskedKey)) != null) {
+ } else if ((ecv = expirableData.get(k)) != null) {
// do we need to move this to immortal?
if (lifespan < 0) {
// yes.
cv = new CachedValue(v);
- expirableData.remove(maskedKey);
- immortalData.put(maskedKey, cv);
+ expirableData.remove(k);
+ immortalData.put(k, cv);
} else {
ecv.setValue(v);
ecv.touch();
@@ -135,21 +123,20 @@
// does not exist anywhere!
if (lifespan < 0) {
cv = new CachedValue(v);
- immortalData.put(maskedKey, cv);
+ immortalData.put(k, cv);
} else {
ecv = new ExpirableCachedValue(v, lifespan);
- expirableData.put(maskedKey, ecv);
+ expirableData.put(k, ecv);
}
}
}
public boolean containsKey(Object k) {
- Object maskedKey = maskNullKey(k);
- if (!immortalData.containsKey(maskedKey)) {
- ExpirableCachedValue ecv = expirableData.get(maskedKey);
+ if (!immortalData.containsKey(k)) {
+ ExpirableCachedValue ecv = expirableData.get(k);
if (ecv == null) return false;
if (ecv.isExpired()) {
- expire(maskedKey);
+ expire(k);
return false;
}
}
@@ -157,16 +144,14 @@
}
public long getModifiedTimestamp(Object key) {
- Object maskedKey = maskNullKey(key);
- CachedValue cv = immortalData.get(maskedKey);
- if (cv == null) cv = expirableData.get(maskedKey);
+ CachedValue cv = immortalData.get(key);
+ if (cv == null) cv = expirableData.get(key);
return cv == null ? -1 : cv.getModifiedTime();
}
public Object remove(Object k) {
- Object maskedKey = maskNullKey(k);
- CachedValue cv = immortalData.remove(maskedKey);
- if (cv == null) cv = expirableData.remove(maskedKey);
+ CachedValue cv = immortalData.remove(k);
+ if (cv == null) cv = expirableData.remove(k);
if (cv == null) {
return null;
@@ -238,8 +223,7 @@
}
public boolean contains(Object o) {
- Object maskedKey = maskNullKey((Object) o);
- return immortalKeys.contains(maskedKey) || expirableKeys.contains(maskedKey);
+ return immortalKeys.contains(o) || expirableKeys.contains(o);
}
public boolean remove(Object o) {
@@ -277,7 +261,7 @@
@SuppressWarnings("unchecked")
public Object next() {
- return unmaskNullKey(currentIterator.next());
+ return currentIterator.next();
}
public void remove() {
Modified: core/branches/flat/src/main/java/org/horizon/util/AbstractMap.java
===================================================================
--- core/branches/flat/src/main/java/org/horizon/util/AbstractMap.java 2009-02-18 17:40:47 UTC (rev 7726)
+++ core/branches/flat/src/main/java/org/horizon/util/AbstractMap.java 2009-02-19 12:03:50 UTC (rev 7727)
@@ -5,18 +5,13 @@
import java.util.Set;
/**
- * Similar to the JDK's AbstractMap, this provides common functionality for custom map implementations.
+ * Similar to the JDK's AbstractMap, this provides common functionality for custom map implementations. Unlike JDK's
+ * AbstractMap, there is no support for null keys.
*
* @author Manik Surtani
* @since 1.0
*/
public abstract class AbstractMap<K, V> implements Map<K, V> {
-
- /**
- * Marks null keys.
- */
- private static final Object NULL = new Object();
-
// views
protected transient Set<Map.Entry<K, V>> entrySet = null;
protected transient Set<K> keySet = null;
@@ -30,19 +25,13 @@
return h ^ (h >>> 7) ^ (h >>> 4);
}
- @SuppressWarnings("unchecked")
- protected static final <K> K maskNull(K key) {
- return key == null ? (K) NULL : key;
- }
-
- protected static final <K> K unmaskNull(K key) {
- return key == NULL ? null : key;
- }
-
protected static final boolean eq(Object o1, Object o2) {
return o1 == o2 || (o1 != null && o1.equals(o2));
}
+ protected final void assertKeyNotNull(Object key) {
+ if (key == null) throw new NullPointerException("Null keys are not supported!");
+ }
protected static class SimpleEntry<K, V> implements Map.Entry<K, V> {
private K key;
Modified: core/branches/flat/src/main/java/org/horizon/util/BidirectionalLinkedHashMap.java
===================================================================
--- core/branches/flat/src/main/java/org/horizon/util/BidirectionalLinkedHashMap.java 2009-02-18 17:40:47 UTC (rev 7726)
+++ core/branches/flat/src/main/java/org/horizon/util/BidirectionalLinkedHashMap.java 2009-02-19 12:03:50 UTC (rev 7727)
@@ -9,9 +9,18 @@
import java.util.NoSuchElementException;
/**
- * // TODO javadocs
+ * Similar to the JDK's {@link java.util.LinkedHashMap} except that this version makes use of the fact that entries are
+ * bidirectionally linked and can hence be nagigated either from the start <i>or</i> from the end. It exposes such
+ * navigability by overriding {@link java.util.Map#keySet()} and {@link java.util.Map#entrySet()} to return {@link
+ * org.horizon.util.ReversibleSet} rather than a standard JDK {@link java.util.Set}. {@link
+ * org.horizon.util.ReversibleSet}s allow you to access 2 iterators: one that iterates from start to end, as usual, and
+ * a reversed one that iterates from end to start instead.
+ * <p/>
+ * Unline the JDK {@link java.util.LinkedHashMap}, this implementation does not support null keys.
+ * <p/>
*
* @author Manik Surtani
+ * @since 1.0
*/
public class BidirectionalLinkedHashMap<K, V> extends AbstractMap<K, V> {
@@ -221,8 +230,7 @@
* with <tt>key</tt>.)
*/
public V put(K key, V value) {
- if (key == null)
- return putForNullKey(value);
+ assertKeyNotNull(key);
int hash = hash(key.hashCode());
int i = indexFor(hash, table.length);
for (LinkedEntry<K, V> e = table[i]; e != null; e = e.next) {
@@ -241,23 +249,6 @@
}
/**
- * Offloaded version of put for null keys
- */
- private V putForNullKey(V value) {
- for (LinkedEntry<K, V> e = table[0]; e != null; e = e.next) {
- if (e.key == null) {
- V oldValue = e.value;
- e.value = value;
- e.recordAccess(this);
- return oldValue;
- }
- }
- modCount++;
- addEntry(0, null, value, 0);
- return null;
- }
-
- /**
* This method is used instead of put by constructors and pseudoconstructors (clone, readObject). It does not resize
* the table, check for comodification, etc. It calls createEntry rather than addEntry.
*/
@@ -360,6 +351,7 @@
* with <tt>key</tt>.)
*/
public V remove(Object key) {
+ assertKeyNotNull(key);
LinkedEntry<K, V> e = removeEntryForKey(key);
return (e == null ? null : e.value);
}
@@ -369,7 +361,7 @@
* contains no mapping for this key.
*/
final LinkedEntry<K, V> removeEntryForKey(Object key) {
- int hash = (key == null) ? 0 : hash(key.hashCode());
+ int hash = hash(key.hashCode());
int i = indexFor(hash, table.length);
LinkedEntry<K, V> prev = table[i];
LinkedEntry<K, V> e = prev;
@@ -598,6 +590,7 @@
* operation may be used to distinguish these two cases.
*/
public V get(Object key) {
+ assertKeyNotNull(key);
LinkedEntry<K, V> e = getEntry(key);
if (e == null)
return null;
Modified: core/branches/flat/src/main/java/org/horizon/util/FastCopyHashMap.java
===================================================================
--- core/branches/flat/src/main/java/org/horizon/util/FastCopyHashMap.java 2009-02-18 17:40:47 UTC (rev 7726)
+++ core/branches/flat/src/main/java/org/horizon/util/FastCopyHashMap.java 2009-02-19 12:03:50 UTC (rev 7727)
@@ -34,6 +34,8 @@
/**
* A HashMap that is optimized for fast shallow copies.
+ * <p/>
+ * Null keys are <i>not</i> supported.
*
* @author Jason T. Greene
* @since 1.0
@@ -149,8 +151,7 @@
}
public V get(Object key) {
- key = maskNull(key);
-
+ assertKeyNotNull(key);
int hash = hash(key);
int length = table.length;
int index = index(hash, length);
@@ -168,8 +169,7 @@
}
public boolean containsKey(Object key) {
- key = maskNull(key);
-
+ assertKeyNotNull(key);
int hash = hash(key);
int length = table.length;
int index = index(hash, length);
@@ -195,8 +195,7 @@
}
public V put(K key, V value) {
- key = maskNull(key);
-
+ assertKeyNotNull(key);
Entry<K, V>[] table = this.table;
int hash = hash(key);
int length = table.length;
@@ -274,8 +273,7 @@
}
public V remove(Object key) {
- key = maskNull(key);
-
+ assertKeyNotNull(key);
Entry<K, V>[] table = this.table;
int length = table.length;
int hash = hash(key);
@@ -398,8 +396,6 @@
@SuppressWarnings("unchecked")
private void putForCreate(K key, V value) {
- key = maskNull(key);
-
Entry<K, V>[] table = this.table;
int hash = hash(key);
int length = table.length;
@@ -420,7 +416,7 @@
for (Entry<K, V> e : table) {
if (e != null) {
- s.writeObject(unmaskNull(e.key));
+ s.writeObject(e.key);
s.writeObject(e.value);
}
}
@@ -527,7 +523,7 @@
private class KeyIterator extends FasyCopyHashMapIterator<K> {
public K next() {
- return unmaskNull(nextEntry().key);
+ return nextEntry().key;
}
}
@@ -553,7 +549,7 @@
public Map.Entry<K, V> next() {
Entry<K, V> e = nextEntry();
- return new WriteThroughEntry(unmaskNull(e.key), e.value);
+ return new WriteThroughEntry(e.key, e.value);
}
}
15 years, 11 months