[jboss-cvs] JBossCache/src/org/jboss/cache ...
Manik Surtani
msurtani at jboss.com
Sat Dec 30 12:49:55 EST 2006
User: msurtani
Date: 06/12/30 12:49:55
Modified: src/org/jboss/cache Cache.java
RPCManager.java CacheSPI.java AbstractNode.java
Version.java ConsoleListener.java RegionImpl.java
TreeCacheView2.java RegionNotEmptyException.java
ReplicationQueue.java RegionManager.java
TransactionEntry.java NodeSPI.java
TreeCacheView.java Fqn.java Node.java
TransactionManagerLookup.java
Added: src/org/jboss/cache
VersionedNode.java CacheImpl.java
UnversionedNode.java
Removed: src/org/jboss/cache
OptimisticTreeNode.java TreeCacheProxyImpl.java
DataNode.java NodeImpl.java TreeNode.java
TreeCache.java
Log:
Major changes to restructure cache and node object model
Revision Changes Path
1.16 +4 -13 JBossCache/src/org/jboss/cache/Cache.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: Cache.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/Cache.java,v
retrieving revision 1.15
retrieving revision 1.16
diff -u -b -r1.15 -r1.16
--- Cache.java 15 Nov 2006 06:49:25 -0000 1.15
+++ Cache.java 30 Dec 2006 17:49:54 -0000 1.16
@@ -9,25 +9,23 @@
import org.jboss.cache.config.Configuration;
import org.jgroups.Address;
-import javax.transaction.TransactionManager;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
- * Along with {@link Node}, this is the central construct of JBoss Cache.
+ * Along with {@link Node}, this is the central construct and basic client API of JBoss Cache and is used for
+ * cache-wide operations.
* <p/>
* The cache is constructed using a {@link org.jboss.cache.factories.CacheFactory}.
* <p/>
- * The cache is essentially a wrapper around the default root {@link Node}. Basic operations on the cache are
- * inherited from {@link Node}.
*
* @author <a href="mailto:manik at jboss.org">Manik Surtani (manik at jboss.org)</a>
* @see Node
* @see org.jboss.cache.factories.CacheFactory
* @since 2.0.0
*/
-public interface Cache extends Node
+public interface Cache
{
/**
* Retrieves the configuration of this cache.
@@ -191,13 +189,6 @@
void destroy();
/**
- * Retrieves a reference to a running {@link javax.transaction.TransactionManager}, if one is configured.
- *
- * @return a TransactionManager
- */
- TransactionManager getTransactionManager();
-
- /**
* Retrieves the current invocation context for the current invocation and cache instance.
*
* @see org.jboss.cache.InvocationContext
1.5 +4 -4 JBossCache/src/org/jboss/cache/RPCManager.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: RPCManager.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/RPCManager.java,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -b -r1.4 -r1.5
--- RPCManager.java 29 Aug 2006 17:50:14 -0000 1.4
+++ RPCManager.java 30 Dec 2006 17:49:54 -0000 1.5
@@ -19,14 +19,14 @@
*/
public class RPCManager
{
- TreeCache c;
+ CacheImpl c;
- private RPCManager(TreeCache c)
+ private RPCManager(CacheImpl c)
{
this.c = c;
}
- public static RPCManager getInstance(TreeCache c)
+ public static RPCManager getInstance(CacheImpl c)
{
RPCManager rpcManager = c.getRpcManager();
if (rpcManager == null)
@@ -38,7 +38,7 @@
return rpcManager;
}
- // for now, we delegate RPC calls to deprecated methods in TreeCache.
+ // for now, we delegate RPC calls to deprecated methods in CacheImpl.
public List callRemoteMethods(List<Address> recipients, MethodCall methodCall, int mode, boolean excludeSelf, long timeout) throws Exception
{
1.23 +13 -2 JBossCache/src/org/jboss/cache/CacheSPI.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: CacheSPI.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/CacheSPI.java,v
retrieving revision 1.22
retrieving revision 1.23
diff -u -b -r1.22 -r1.23
--- CacheSPI.java 27 Nov 2006 17:07:05 -0000 1.22
+++ CacheSPI.java 30 Dec 2006 17:49:54 -0000 1.23
@@ -17,11 +17,15 @@
import org.jboss.cache.statetransfer.StateTransferManager;
import javax.transaction.Transaction;
+import javax.transaction.TransactionManager;
import java.util.List;
import java.util.Map;
/**
- * A more detailed interface to {@link Cache}, which is used when writing plugins for or extending JBoss Cache.
+ * A more detailed interface to {@link Cache}, which is used when writing plugins for or extending JBoss Cache. A reference
+ * to this interface should only be obtained when it is passed in to your code, for example when you write an
+ * {@link Interceptor}, {@link CacheLoader} or {@link CacheListener}. You should not attempt to directly cast a {@link Cache} instance
+ * to this interface, as in future, the implementation may not allow it.
*
* @author <a href="mailto:manik at jboss.org">Manik Surtani (manik at jboss.org)</a>
* @see NodeSPI
@@ -31,6 +35,13 @@
public interface CacheSPI extends Cache
{
/**
+ * Retrieves a reference to a running {@link javax.transaction.TransactionManager}, if one is configured.
+ *
+ * @return a TransactionManager
+ */
+ TransactionManager getTransactionManager();
+
+ /**
* @return an immutable {@link List} of {@link Interceptor}s configured for this cache.
*/
List<Interceptor> getInterceptorChain();
@@ -133,7 +144,7 @@
*
* @return a node if one exists or null
*/
- Node peek(Fqn fqn);
+ NodeSPI peek(Fqn fqn);
/**
* Used with buddy replication's data gravitation interceptor
1.24 +5 -5 JBossCache/src/org/jboss/cache/AbstractNode.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: AbstractNode.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/AbstractNode.java,v
retrieving revision 1.23
retrieving revision 1.24
diff -u -b -r1.23 -r1.24
--- AbstractNode.java 14 Dec 2006 17:18:47 -0000 1.23
+++ AbstractNode.java 30 Dec 2006 17:49:54 -0000 1.24
@@ -6,14 +6,14 @@
import java.util.Map;
/**
- * Base class for {@link NodeImpl}.
+ * Base class for {@link UnversionedNode}.
*
* @author manik
*/
-public abstract class AbstractNode implements DataNode, NodeSPI
+public abstract class AbstractNode implements Node
{
protected boolean deleted;
- protected Map<Object, AbstractNode> children;
+ protected Map<Object, Node> children;
public boolean isDeleted()
{
@@ -30,9 +30,9 @@
deleted = marker;
if (recursive && children != null)
{
- for (AbstractNode child : children.values())
+ for (Node child : children.values())
{
- child.markAsDeleted(marker, true);
+ ((AbstractNode) child).markAsDeleted(marker, true);
}
}
}
1.26 +3 -3 JBossCache/src/org/jboss/cache/Version.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: Version.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/Version.java,v
retrieving revision 1.25
retrieving revision 1.26
diff -u -b -r1.25 -r1.26
--- Version.java 21 Dec 2006 18:17:12 -0000 1.25
+++ Version.java 30 Dec 2006 17:49:54 -0000 1.26
@@ -3,17 +3,17 @@
import java.util.StringTokenizer;
/**
- * Contains version information about this release of TreeCache.
+ * Contains version information about this release of CacheImpl.
*
* @author Bela Ban
- * @version $Id: Version.java,v 1.25 2006/12/21 18:17:12 msurtani Exp $
+ * @version $Id: Version.java,v 1.26 2006/12/30 17:49:54 msurtani Exp $
*/
public class Version
{
public static final String version = "2.0.0.ALPHA2";
public static final String codename = "Habanero";
public static byte[] version_id = {'0', '2', '0', '0', 'a'};
- public static final String cvs = "$Id: Version.java,v 1.25 2006/12/21 18:17:12 msurtani Exp $";
+ public static final String cvs = "$Id: Version.java,v 1.26 2006/12/30 17:49:54 msurtani Exp $";
private static final int MAJOR_SHIFT = 11;
private static final int MINOR_SHIFT = 6;
1.11 +4 -4 JBossCache/src/org/jboss/cache/ConsoleListener.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: ConsoleListener.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/ConsoleListener.java,v
retrieving revision 1.10
retrieving revision 1.11
diff -u -b -r1.10 -r1.11
--- ConsoleListener.java 18 Oct 2006 11:07:54 -0000 1.10
+++ ConsoleListener.java 30 Dec 2006 17:49:54 -0000 1.11
@@ -22,7 +22,7 @@
*/
public class ConsoleListener implements CacheListener
{
- private TreeCache _cache;
+ private CacheImpl _cache;
private boolean _startCache;
/**
@@ -33,7 +33,7 @@
*
* @param cache the cache to monitor for replication events.
*/
- public ConsoleListener(TreeCache cache)
+ public ConsoleListener(CacheImpl cache)
throws Exception
{
this(cache, true, true);
@@ -48,7 +48,7 @@
* @param stopCache indicates whether or not the cache should be stopped by
* this class.
*/
- public ConsoleListener(TreeCache cache,
+ public ConsoleListener(CacheImpl cache,
boolean startCache, boolean stopCache)
throws Exception
{
@@ -364,7 +364,7 @@
try
{
- TreeCache cache = new TreeCache();
+ CacheImpl cache = new CacheImpl();
String configFileName = DEFAULT_CONFIG_FILE_NAME;
1.16 +8 -8 JBossCache/src/org/jboss/cache/RegionImpl.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: RegionImpl.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/RegionImpl.java,v
retrieving revision 1.15
retrieving revision 1.16
diff -u -b -r1.15 -r1.16
--- RegionImpl.java 8 Dec 2006 18:49:17 -0000 1.15
+++ RegionImpl.java 30 Dec 2006 17:49:54 -0000 1.16
@@ -6,10 +6,6 @@
*/
package org.jboss.cache;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jboss.cache.config.EvictionRegionConfig;
@@ -19,6 +15,10 @@
import org.jboss.cache.eviction.NodeEventType;
import org.jboss.cache.util.Util;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
/**
* Default implementation of a {@link Region}
*
@@ -220,7 +220,7 @@
{
if (log.isTraceEnabled()) log.trace("Instantiating " + className);
EvictionPolicy ep = (EvictionPolicy) Util.loadClass(className).newInstance();
- ep.configure(regionManager.treeCache);
+ ep.configure(regionManager.cache);
return ep;
}
catch (Exception e)
1.18 +26 -26 JBossCache/src/org/jboss/cache/TreeCacheView2.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: TreeCacheView2.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/TreeCacheView2.java,v
retrieving revision 1.17
retrieving revision 1.18
diff -u -b -r1.17 -r1.18
--- TreeCacheView2.java 18 Oct 2006 11:07:54 -0000 1.17
+++ TreeCacheView2.java 30 Dec 2006 17:49:54 -0000 1.18
@@ -48,26 +48,26 @@
/**
* Graphical view of a ReplicatedTree (using the MVC paradigm). An instance of this class needs to be given a
* reference to the underlying model (ReplicatedTree) and needs to registers as a ReplicatedTreeListener. Changes
- * to the tree structure are propagated from the model to the view (via ReplicatedTreeListener), changes from the
- * GUI (e.g. by a user) are executed on the tree model (which will broadcast the changes to all replicas).<p>
+ * to the cache structure are propagated from the model to the view (via ReplicatedTreeListener), changes from the
+ * GUI (e.g. by a user) are executed on the cache model (which will broadcast the changes to all replicas).<p>
* The view itself caches only the nodes, but doesn't cache any of the data (HashMap) associated with it. When
- * data needs to be displayed, the underlying tree will be accessed directly.
+ * data needs to be displayed, the underlying cache will be accessed directly.
*
- * @version $Revision: 1.17 $
+ * @version $Revision: 1.18 $
*/
public class TreeCacheView2
{
static TreeCacheGui2 gui_ = null;
static boolean useConsole = false;
- TreeCache cache_ = null;
+ CacheImpl cache_ = null;
static Log log_ = LogFactory.getLog(TreeCacheView2.class.getName());
- public TreeCacheView2(TreeCache cache) throws Exception
+ public TreeCacheView2(CacheImpl cache) throws Exception
{
this.cache_ = cache;
}
- public static void setCache(TreeCache cache)
+ public static void setCache(CacheImpl cache)
{
gui_.setCache(cache);
}
@@ -125,7 +125,7 @@
public static void main(String args[])
{
- TreeCache tree = null;
+ CacheImpl cache = null;
TreeCacheView2 demo;
String start_directory = null;
String resource = "META-INF/replSync-service.xml";
@@ -155,12 +155,12 @@
}
else
{
- tree = new TreeCache();
- tree.setConfiguration(new XmlConfigurationParser().parseFile(resource));
+ cache = new CacheImpl();
+ cache.setConfiguration(new XmlConfigurationParser().parseFile(resource));
- tree.start();
-// tree.getNotifier().addCacheListener(new TreeCacheView.MyListener());
- demo = new TreeCacheView2(tree);
+ cache.start();
+// cache.getNotifier().addCacheListener(new TreeCacheView.MyListener());
+ demo = new TreeCacheView2(cache);
demo.start();
if (start_directory != null && start_directory.length() > 0)
{
@@ -177,7 +177,7 @@
static void help()
{
System.out.println("TreeCacheView [-help] " +
- "[-mbean_name <name of TreeCache MBean>] " +
+ "[-mbean_name <name of CacheImpl MBean>] " +
"[-start_directory <dirname>] [-props <props>] " +
"[-use_queue <true/false>] [-queue_interval <ms>] [-console]" +
"[-queue_max_elements <num>]");
@@ -187,16 +187,16 @@
class ShutdownThread extends Thread
{
- TreeCache tree = null;
+ CacheImpl cache = null;
- ShutdownThread(TreeCache tree)
+ ShutdownThread(CacheImpl cache)
{
- this.tree = tree;
+ this.cache = cache;
}
public void run()
{
- tree.stop();
+ cache.stop();
}
}
@@ -205,7 +205,7 @@
{
private static final long serialVersionUID = -1242167331988194987L;
- TreeCache cache_;
+ CacheImpl cache_;
DefaultTreeModel tree_model = null;
Log log = LogFactory.getLog(getClass());
JTree jtree = null;
@@ -230,7 +230,7 @@
JPanel mainPanel;
- public TreeCacheGui2(TreeCache cache) throws Exception
+ public TreeCacheGui2(CacheImpl cache) throws Exception
{
addNotify();
@@ -318,12 +318,12 @@
}
}
- public void setCache(TreeCache tree)
+ public void setCache(CacheImpl cache)
{
- cache_ = tree;
+ cache_ = cache;
if (cache_ != null)
{
- Runtime.getRuntime().addShutdownHook(new ShutdownThread(tree));
+ Runtime.getRuntime().addShutdownHook(new ShutdownThread(cache));
cache_.getNotifier().addCacheListener(this);
setTitle("TreeCacheGui2: mbr=" + getLocalAddress());
tx_mgr = cache_.getTransactionManager();
@@ -396,7 +396,7 @@
val = (String) table_model.getValueAt(row, col + 1);
if (key != null && val != null)
{
- // tree.put(selected_node, key, val);
+ // cache.put(selected_node, key, val);
try
{
@@ -561,7 +561,7 @@
/* ----------------------------- Private Methods ---------------------------------- */
/**
- * Fetches all data from underlying tree model and display it graphically
+ * Fetches all data from underlying cache model and display it graphically
*/
void init()
{
@@ -578,7 +578,7 @@
/**
- * Fetches all data from underlying tree model and display it graphically
+ * Fetches all data from underlying cache model and display it graphically
*/
private void populateTree()
{
1.2 +10 -9 JBossCache/src/org/jboss/cache/RegionNotEmptyException.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: RegionNotEmptyException.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/RegionNotEmptyException.java,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -b -r1.1 -r1.2
--- RegionNotEmptyException.java 9 Sep 2005 07:59:06 -0000 1.1
+++ RegionNotEmptyException.java 30 Dec 2006 17:49:54 -0000 1.2
@@ -9,20 +9,21 @@
/**
* Thrown when an attempt is made to
- * {@link TreeCache#activateRegion(String) activate a subtree}
+ * {@link CacheImpl#activateRegion(String) activate a subtree}
* roote in Fqn that already has an existing node in the cache.
*
- * @see TreeCache#activateRegion(String)
- * @see TreeCache#exists(Fqn)
- * @see TreeCache#exists(String)
- *
* @author <a href="mailto://brian.stansberry@jboss.com">Brian Stansberry</a>
* @version $Revision$
+ * @see CacheImpl#activateRegion(String)
+ * @see CacheImpl#exists(Fqn)
+ * @see CacheImpl#exists(String)
*/
public class RegionNotEmptyException extends CacheException
{
- /** The serialVersionUID */
+ /**
+ * The serialVersionUID
+ */
private static final long serialVersionUID = 1L;
public RegionNotEmptyException()
1.12 +76 -45 JBossCache/src/org/jboss/cache/ReplicationQueue.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: ReplicationQueue.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/ReplicationQueue.java,v
retrieving revision 1.11
retrieving revision 1.12
diff -u -b -r1.11 -r1.12
--- ReplicationQueue.java 25 Aug 2006 14:10:09 -0000 1.11
+++ ReplicationQueue.java 30 Dec 2006 17:49:54 -0000 1.12
@@ -9,8 +9,8 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.jboss.cache.marshall.MethodDeclarations;
import org.jboss.cache.marshall.MethodCall;
+import org.jboss.cache.marshall.MethodDeclarations;
import java.util.ArrayList;
import java.util.List;
@@ -22,53 +22,68 @@
* Periodically (or when certain size is exceeded) takes elements and replicates them.
*
* @author <a href="mailto:bela at jboss.org">Bela Ban</a> May 24, 2003
- * @version $Revision: 1.11 $
+ * @version $Revision: 1.12 $
*/
-public class ReplicationQueue {
+public class ReplicationQueue
+{
- private static Log log=LogFactory.getLog(ReplicationQueue.class);
+ private static Log log = LogFactory.getLog(ReplicationQueue.class);
- private TreeCache cache=null;
+ private CacheImpl cache = null;
- /** We flush every 5 seconds. Inactive if -1 or 0 */
- private long interval=5000;
+ /**
+ * We flush every 5 seconds. Inactive if -1 or 0
+ */
+ private long interval = 5000;
- /** Max elements before we flush */
- private long max_elements=500;
+ /**
+ * Max elements before we flush
+ */
+ private long max_elements = 500;
- /** Holds the replication jobs: LinkedList<MethodCall> */
- private List elements=new ArrayList();
+ /**
+ * Holds the replication jobs: LinkedList<MethodCall>
+ */
+ private List elements = new ArrayList();
- /** For periodical replication */
- private Timer timer=null;
+ /**
+ * For periodical replication
+ */
+ private Timer timer = null;
- /** The timer task, only calls flush() when executed by Timer */
- private MyTask task=null;
+ /**
+ * The timer task, only calls flush() when executed by Timer
+ */
+ private MyTask task = null;
- public ReplicationQueue() {
+ public ReplicationQueue()
+ {
}
/**
* Constructs a new ReplicationQueue.
*/
- public ReplicationQueue(TreeCache cache, long interval, long max_elements) {
- this.cache=cache;
- this.interval=interval;
- this.max_elements=max_elements;
+ public ReplicationQueue(CacheImpl cache, long interval, long max_elements)
+ {
+ this.cache = cache;
+ this.interval = interval;
+ this.max_elements = max_elements;
}
/**
* Returns the flush interval in milliseconds.
*/
- public long getInterval() {
+ public long getInterval()
+ {
return interval;
}
/**
* Sets the flush interval in milliseconds.
*/
- public void setInterval(long interval) {
- this.interval=interval;
+ public void setInterval(long interval)
+ {
+ this.interval = interval;
stop();
start();
}
@@ -77,26 +92,31 @@
* Returns the maximum number of elements to hold.
* If the maximum number is reached, flushes in the calling thread.
*/
- public long getMax_elements() {
+ public long getMax_elements()
+ {
return max_elements;
}
/**
* Sets the maximum number of elements to hold.
*/
- public void setMax_elements(long max_elements) {
- this.max_elements=max_elements;
+ public void setMax_elements(long max_elements)
+ {
+ this.max_elements = max_elements;
}
/**
* Starts the asynchronous flush queue.
*/
- public synchronized void start() {
- if(interval > 0) {
- if(task == null)
- task=new MyTask();
- if(timer == null) {
- timer=new Timer(true);
+ public synchronized void start()
+ {
+ if (interval > 0)
+ {
+ if (task == null)
+ task = new MyTask();
+ if (timer == null)
+ {
+ timer = new Timer(true);
timer.schedule(task,
500, // delay before initial flush
interval); // interval between flushes
@@ -107,14 +127,17 @@
/**
* Stops the asynchronous flush queue.
*/
- public synchronized void stop() {
- if(task != null) {
+ public synchronized void stop()
+ {
+ if (task != null)
+ {
task.cancel();
- task=null;
+ task = null;
}
- if(timer != null) {
+ if (timer != null)
+ {
timer.cancel();
- timer=null;
+ timer = null;
}
}
@@ -122,10 +145,12 @@
/**
* Adds a new method call.
*/
- public void add(MethodCall job) {
+ public void add(MethodCall job)
+ {
if (job == null)
throw new NullPointerException("job is null");
- synchronized (elements) {
+ synchronized (elements)
+ {
elements.add(job);
if (elements.size() >= max_elements)
flush();
@@ -135,26 +160,32 @@
/**
* Flushes existing method calls.
*/
- public void flush() {
+ public void flush()
+ {
List l;
- synchronized(elements) {
+ synchronized (elements)
+ {
if (log.isTraceEnabled())
log.trace("flush(): flushing repl queue (num elements=" + elements.size() + ")");
l = new ArrayList(elements);
elements.clear();
}
- try {
+ try
+ {
// send to all live nodes in the cluster
cache.callRemoteMethods(null, MethodDeclarations.replicateAllMethod, new Object[]{l}, false, true, 5000);
}
- catch(Throwable t) {
+ catch (Throwable t)
+ {
log.error("failed replicating " + l.size() + " elements in replication queue", t);
}
}
- class MyTask extends TimerTask {
- public void run() {
+ class MyTask extends TimerTask
+ {
+ public void run()
+ {
flush();
}
}
1.17 +37 -37 JBossCache/src/org/jboss/cache/RegionManager.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: RegionManager.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/RegionManager.java,v
retrieving revision 1.16
retrieving revision 1.17
diff -u -b -r1.16 -r1.17
--- RegionManager.java 14 Dec 2006 17:18:47 -0000 1.16
+++ RegionManager.java 30 Dec 2006 17:49:54 -0000 1.17
@@ -42,7 +42,7 @@
Map<Fqn, Region> regionsRegistry = new ConcurrentHashMap<Fqn, Region>();
boolean defaultInactive;
private Log log = LogFactory.getLog(RegionManager.class);
- TreeCache treeCache;
+ CacheImpl cache;
private boolean usingEvictions;
private EvictionConfig evictionConfig;
private EvictionTimerTask evictionTimerTask = new EvictionTimerTask();
@@ -59,9 +59,9 @@
return usingEvictions;
}
- public RegionManager(TreeCache treeCache)
+ public RegionManager(CacheImpl cache)
{
- this.treeCache = treeCache;
+ this.cache = cache;
}
public boolean isDefaultInactive()
@@ -79,7 +79,7 @@
* Helper utility that checks for a classloader registered for the
* given Fqn, and if found sets it as the TCCL. If the given Fqn is
* under the _BUDDY_BACKUP_ region, the equivalent region in the main
- * tree is used to find the classloader.
+ * cache is used to find the classloader.
*
* @param fqn Fqn pointing to a region for which a special classloader
* may have been registered.
@@ -263,7 +263,7 @@
//r.activate();
r.setActive(true);
// FIXME - persistent state transfer counts too!
- if (treeCache.getConfiguration().isFetchInMemoryState())
+ if (cache.getConfiguration().isFetchInMemoryState())
{
activateRegion(r.getFqn().toString());
}
@@ -275,7 +275,7 @@
r = getRegion(fqn, true);
r.setActive(true);
// FIXME - persistent state transfer counts too!
- if (treeCache.getConfiguration().isFetchInMemoryState())
+ if (cache.getConfiguration().isFetchInMemoryState())
{
activateRegion(r.getFqn().toString());
}
@@ -293,13 +293,13 @@
* for that subtree.
* <p/>
* <strong>NOTE:</strong> This method will cause the creation of a node
- * in the local tree at <code>subtreeFqn</code> whether or not that
+ * in the local cache at <code>subtreeFqn</code> whether or not that
* node exists anywhere else in the cluster. If the node does not exist
* elsewhere, the local node will be empty. The creation of this node will
* not be replicated.
*
* @param subtreeFqn Fqn string indicating the uppermost node in the
- * portion of the tree that should be activated.
+ * portion of the cache that should be activated.
* @throws RegionNotEmptyException if the node <code>subtreeFqn</code>
* exists and has either data or children
*/
@@ -308,12 +308,12 @@
Fqn fqn = Fqn.fromString(subtreeFqn);
// Check whether the node already exists and has data
- Node subtreeRoot = treeCache.findNode(fqn);
+ Node subtreeRoot = cache.findNode(fqn);
/*
* Commented out on Nov 16,2006 Manik&Vladimir
*
- * if (!(treeCache.isNodeEmpty(subtreeRoot)))
+ * if (!(cache.isNodeEmpty(subtreeRoot)))
{
throw new RegionNotEmptyException("Node " + subtreeRoot.getFqn() + " already exists and is not empty");
}*/
@@ -340,20 +340,20 @@
// If a classloader is registered for the node's region, use it
ClassLoader cl = region.getClassLoader();
- BuddyManager buddyManager = treeCache.getBuddyManager();
+ BuddyManager buddyManager = cache.getBuddyManager();
// Request partial state from the cluster and integrate it
if (buddyManager == null)
{
// Get the state from any node that has it and put it
- // in the main tree
+ // in the main cache
if (subtreeRoot == null)
{
// We'll update this node with the state we receive
- subtreeRoot = treeCache.createSubtreeRootNode(fqn);
+ subtreeRoot = cache.createSubtreeRootNode(fqn);
}
Address[] groupMembers = null;
- Vector<Address> members = treeCache.getMembers();
+ Vector<Address> members = cache.getMembers();
synchronized (members)
{
groupMembers = members.toArray(new Address[members.size()]);
@@ -367,13 +367,13 @@
}
else
{
- treeCache.fetchPartialState(groupMembers, subtreeRoot.getFqn());
+ cache.fetchPartialState(groupMembers, subtreeRoot.getFqn());
}
}
else
{
// Get the state from each DataOwner and integrate in their
- // respective buddy backup tree
+ // respective buddy backup cache
List buddies = buddyManager.getBuddyAddresses();
for (Iterator it = buddies.iterator(); it.hasNext();)
{
@@ -381,13 +381,13 @@
Object sources[] = new Object[]{buddy};
Fqn base = new Fqn(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN, BuddyManager.getGroupNameFromAddress(buddy));
Fqn buddyRoot = new Fqn(base, fqn);
- subtreeRoot = treeCache.findNode(buddyRoot);
+ subtreeRoot = cache.findNode(buddyRoot);
if (subtreeRoot == null)
{
// We'll update this node with the state we receive
- subtreeRoot = treeCache.createSubtreeRootNode(buddyRoot);
+ subtreeRoot = cache.createSubtreeRootNode(buddyRoot);
}
- treeCache.fetchPartialState(sources, subtreeRoot.getFqn());
+ cache.fetchPartialState(sources, subtreeRoot.getFqn());
}
}
}
@@ -409,11 +409,11 @@
// Throw the exception on, wrapping if necessary
if (t instanceof RegionNotEmptyException)
{
- throw(RegionNotEmptyException) t;
+ throw (RegionNotEmptyException) t;
}
else if (t instanceof CacheException)
{
- throw(CacheException) t;
+ throw (CacheException) t;
}
else
{
@@ -432,7 +432,7 @@
* rooted at <code>subtreeFqn</code> and evict all nodes in that subtree.
*
* @param subtreeFqn Fqn string indicating the uppermost node in the
- * portion of the tree that should be activated.
+ * portion of the cache that should be activated.
* @throws RegionNameConflictException if <code>subtreeFqn</code> indicates
* a node that is part of another
* subtree that is being specially
@@ -461,21 +461,21 @@
// Record that this fqn is in status change, so can't provide state
activationChangeNodes.add(fqn);
- VersionAwareMarshaller marshaller = treeCache.getMarshaller();
+ VersionAwareMarshaller marshaller = cache.getMarshaller();
boolean inactive = marshaller.isInactive(subtreeFqn);
if (!inactive)
{
deactivate(subtreeFqn);
}
- // Create a list with the Fqn in the main tree and any buddy backup trees
- BuddyManager buddyManager = treeCache.getBuddyManager();
+ // Create a list with the Fqn in the main cache and any buddy backup trees
+ BuddyManager buddyManager = cache.getBuddyManager();
ArrayList list = new ArrayList();
list.add(fqn);
if (buddyManager != null)
{
- Set buddies = treeCache.getChildrenNames(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN);
+ Set buddies = cache.getChildrenNames(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN);
if (buddies != null)
{
for (Iterator it = buddies.iterator(); it.hasNext();)
@@ -486,16 +486,16 @@
}
}
- long stateFetchTimeout = treeCache.getConfiguration().getLockAcquisitionTimeout() + 5000;
- // Remove the subtree from the main tree and any buddy backup trees
+ long stateFetchTimeout = cache.getConfiguration().getLockAcquisitionTimeout() + 5000;
+ // Remove the subtree from the main cache and any buddy backup trees
for (Iterator it = list.iterator(); it.hasNext();)
{
Fqn subtree = (Fqn) it.next();
- subtreeRoot = treeCache.findNode(subtree);
+ subtreeRoot = cache.findNode(subtree);
if (subtreeRoot != null)
{
// Acquire locks
- Object owner = treeCache.getOwnerForLock();
+ Object owner = cache.getOwnerForLock();
subtreeLock = subtreeRoot.getNodeSPI().getLock();
subtreeLock.acquireAll(owner, stateFetchTimeout, NodeLock.LockType.WRITE);
subtreeLocked = true;
@@ -510,7 +510,7 @@
}
// Remove the subtree
- treeCache._evictSubtree(subtree);
+ cache._evictSubtree(subtree);
// Release locks
if (parent != null)
@@ -630,7 +630,7 @@
region.setActive(false);
// FIXME - we should always clean up; otherwise stale data
// is in memory!
- if (treeCache.getConfiguration().isFetchInMemoryState())
+ if (cache.getConfiguration().isFetchInMemoryState())
{
inactivateRegion(fqn.toString());
}
@@ -642,7 +642,7 @@
region.setActive(false);
// FIXME - we should always clean up; otherwise stale data
// is in memory!
- if (treeCache.getConfiguration().isFetchInMemoryState())
+ if (cache.getConfiguration().isFetchInMemoryState())
{
inactivateRegion(fqn.toString());
}
@@ -754,7 +754,7 @@
public void startEvictionThread()
{
- evictionTimerTask.init(evictionConfig.getWakeupIntervalSeconds(), treeCache.getNotifier());
+ evictionTimerTask.init(evictionConfig.getWakeupIntervalSeconds(), cache.getNotifier());
}
/**
@@ -791,7 +791,7 @@
* @author Ben Wang 02-2004
* @author Daniel Huang (dhuang at jboss.org)
* @author Brian Stansberry
- * @version $Id: RegionManager.java,v 1.16 2006/12/14 17:18:47 msurtani Exp $
+ * @version $Id: RegionManager.java,v 1.17 2006/12/30 17:49:54 msurtani Exp $
*/
/*public class ERegionManager
{
@@ -806,7 +806,7 @@
private Timer evictionThread_;
private EvictionTimerTask evictionTimerTask_;
private EvictionConfig config_;
- private TreeCache cache_;
+ private CacheImpl cache_;
*//**
* @deprecated This is provided for JBCache 1.2 backwards API compatibility.
@@ -974,7 +974,7 @@
return createRegion(Fqn.fromString(fqn), policy, config);
}
- public void configure(TreeCache cache)
+ public void configure(CacheImpl cache)
{
if (log_.isTraceEnabled())
{
1.18 +5 -5 JBossCache/src/org/jboss/cache/TransactionEntry.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: TransactionEntry.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/TransactionEntry.java,v
retrieving revision 1.17
retrieving revision 1.18
diff -u -b -r1.17 -r1.18
--- TransactionEntry.java 14 Dec 2006 17:18:47 -0000 1.17
+++ TransactionEntry.java 30 Dec 2006 17:49:54 -0000 1.18
@@ -25,7 +25,7 @@
/**
* This is the value (key being the {@link GlobalTransaction}) in the transaction table
- * of TreeCache.
+ * of CacheImpl.
* <br>A TransactionEntry maintains
* <ul>
* <li>Handle to local Transactions: there can be more than 1 local TX associated with a GlobalTransaction
@@ -37,7 +37,7 @@
* </ul>
*
* @author <a href="mailto:bela at jboss.org">Bela Ban</a> Apr 14, 2003
- * @version $Revision: 1.17 $
+ * @version $Revision: 1.18 $
*/
public class TransactionEntry
{
@@ -276,7 +276,7 @@
}
/**
- * Posts all undo operations to the TreeCache.
+ * Posts all undo operations to the CacheImpl.
*/
public void undoOperations(CacheSPI cache)
{
@@ -300,10 +300,10 @@
{
try
{
- Object retval = undo_op.invoke(((TreeCacheProxyImpl) cache).getTreeCache());
+ Object retval = undo_op.invoke(cache);
if (retval instanceof Throwable)
{
- throw(Throwable) retval;
+ throw (Throwable) retval;
}
}
catch (Throwable t)
1.6 +50 -17 JBossCache/src/org/jboss/cache/NodeSPI.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: NodeSPI.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/NodeSPI.java,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -b -r1.5 -r1.6
--- NodeSPI.java 29 Nov 2006 22:12:44 -0000 1.5
+++ NodeSPI.java 30 Dec 2006 17:49:54 -0000 1.6
@@ -6,18 +6,20 @@
*/
package org.jboss.cache;
-import java.util.Map;
-
import org.jboss.cache.lock.NodeLock;
+import org.jboss.cache.optimistic.DataVersion;
+
+import java.util.Map;
+import java.util.Set;
/**
* A more detailed interface to {@link Node}, which is used when writing plugins for or extending JBoss Cache.
*
+ * @author <a href="mailto:manik at jboss.org">Manik Surtani (manik at jboss.org)</a>
* @see Node
* @since 2.0.0
- * @author <a href="mailto:manik at jboss.org">Manik Surtani (manik at jboss.org)</a>
*/
-public interface NodeSPI
+public interface NodeSPI extends Node
{
// TODO:
// everything that is not already represented by Node
@@ -54,6 +56,7 @@
/**
* Sets the node's children explictly.
* The data should only be modified by the cache itself.
+ *
* @param children cannot be null
*/
void setChildrenMap(Map<Object, Node> children);
@@ -77,6 +80,7 @@
/**
* Returns an existing child or creates a new one using a global transaction.
+ *
* @return newly created node
*/
Node getOrCreateChild(Object name, GlobalTransaction tx);
@@ -90,4 +94,33 @@
* Sets the FQN of this node and resets the names of all children as well.
*/
void setFqn(Fqn f);
+
+ boolean isDeleted();
+
+ void markAsDeleted(boolean marker);
+
+ void markAsDeleted(boolean marker, boolean recursive);
+
+ void addChild(Object nodeName, Node nodeToAdd);
+
+ Set<Node> getChildren(boolean includeMarkedAsDeleted);
+
+ void printDetails(StringBuffer sb, int i);
+
+ void removeChildren();
+
+ void print(StringBuffer sb, int indent);
+
+ // versioning
+ /**
+ * May throw UnsupportedOperationException if versioning is not used.
+ *
+ * @param version
+ */
+ void setVersion(DataVersion version);
+
+ /**
+ * May throw UnsupportedOperationException if versioning is not used.
+ */
+ DataVersion getVersion();
}
1.18 +64 -71 JBossCache/src/org/jboss/cache/TreeCacheView.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: TreeCacheView.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/TreeCacheView.java,v
retrieving revision 1.17
retrieving revision 1.18
diff -u -b -r1.17 -r1.18
--- TreeCacheView.java 11 Dec 2006 21:17:51 -0000 1.17
+++ TreeCacheView.java 30 Dec 2006 17:49:54 -0000 1.18
@@ -8,36 +8,15 @@
package org.jboss.cache;
-import java.awt.BorderLayout;
-import java.awt.event.ActionEvent;
-import java.awt.event.MouseAdapter;
-import java.awt.event.MouseEvent;
-import java.awt.event.MouseListener;
-import java.awt.event.WindowEvent;
-import java.awt.event.WindowListener;
-import java.io.File;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Vector;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.jboss.cache.config.Configuration;
+import org.jgroups.View;
import javax.management.MBeanServer;
import javax.management.MBeanServerFactory;
import javax.management.ObjectName;
-import javax.swing.AbstractAction;
-import javax.swing.JFrame;
-import javax.swing.JMenu;
-import javax.swing.JMenuBar;
-import javax.swing.JOptionPane;
-import javax.swing.JPanel;
-import javax.swing.JPopupMenu;
-import javax.swing.JScrollPane;
-import javax.swing.JTable;
-import javax.swing.JTextField;
-import javax.swing.JTree;
+import javax.swing.*;
import javax.swing.event.TableModelEvent;
import javax.swing.event.TableModelListener;
import javax.swing.event.TreeSelectionEvent;
@@ -49,28 +28,38 @@
import javax.swing.tree.TreeNode;
import javax.swing.tree.TreePath;
import javax.swing.tree.TreeSelectionModel;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.jboss.cache.config.Configuration;
-import org.jgroups.View;
+import java.awt.*;
+import java.awt.event.ActionEvent;
+import java.awt.event.MouseAdapter;
+import java.awt.event.MouseEvent;
+import java.awt.event.MouseListener;
+import java.awt.event.WindowEvent;
+import java.awt.event.WindowListener;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Vector;
/**
* Graphical view of a ReplicatedTree (using the MVC paradigm). An instance of this class needs to be given a
* reference to the underlying model (ReplicatedTree) and needs to registers as a ReplicatedTreeListener. Changes
- * to the tree structure are propagated from the model to the view (via ReplicatedTreeListener), changes from the
- * GUI (e.g. by a user) are executed on the tree model (which will broadcast the changes to all replicas).<p>
+ * to the cache structure are propagated from the model to the view (via ReplicatedTreeListener), changes from the
+ * GUI (e.g. by a user) are executed on the cache model (which will broadcast the changes to all replicas).<p>
* The view itself caches only the nodes, but doesn't cache any of the data (HashMap) associated with it. When
- * data needs to be displayed, the underlying tree will be accessed directly.
+ * data needs to be displayed, the underlying cache will be accessed directly.
*
- * @version $Revision: 1.17 $
+ * @version $Revision: 1.18 $
* @author<a href="mailto:bela at jboss.org">Bela Ban</a> March 27 2003
*/
public class TreeCacheView implements TreeCacheViewMBean
{
/**
- * Reference to the TreeCache MBean (the model for this view)
+ * Reference to the CacheImpl MBean (the model for this view)
*/
ObjectName cache_service = null;
TreeCacheGui gui = null;
@@ -149,7 +138,7 @@
throw new Exception("TreeCacheView.init(): no MBeanServers found");
srv = (MBeanServer) servers.get(0);
log.info("init(): found MBeanServer " + srv);
- cache = (CacheSPI)srv.getAttribute(this.cache_service, "Cache");
+ cache = (CacheSPI) srv.getAttribute(this.cache_service, "Cache");
System.out.println("Cache " + cache);
TreeCacheGui gui = new TreeCacheGui(cache);
}
@@ -189,10 +178,10 @@
public static void main(String args[])
{
- TreeCache tree = null;
+ CacheImpl cache = null;
TreeCacheView demo;
String start_directory = null;
- String mbean_name = "jboss.cache:service=TreeCache";
+ String mbean_name = "jboss.cache:service=CacheImpl";
String props = getDefaultProps();
MBeanServer srv;
Log log;
@@ -241,7 +230,7 @@
try
{
- log = LogFactory.getLog(TreeCache.class);
+ log = LogFactory.getLog(CacheImpl.class);
srv = MBeanServerFactory.createMBeanServer();
// String FACTORY="org.jboss.cache.transaction.DummyContextFactory";
@@ -250,7 +239,7 @@
// DummyTransactionManager.getInstance();
- tree = new TreeCache();
+ cache = new CacheImpl();
Configuration c = new Configuration();
c.setClusterName("TreeCacheGroup");
c.setClusterConfig(props);
@@ -264,21 +253,21 @@
c.setReplQueueMaxElements(queue_max_elements);
}
- tree.getNotifier().addCacheListener(new MyListener());
+ cache.getNotifier().addCacheListener(new MyListener());
- log.info("registering the tree as " + mbean_name);
- srv.registerMBean(tree.getCacheMBeanInterface(), new ObjectName(mbean_name));
+ log.info("registering the cache as " + mbean_name);
+ srv.registerMBean(cache.getCacheMBeanInterface(), new ObjectName(mbean_name));
- tree.start();
+ cache.start();
- Runtime.getRuntime().addShutdownHook(new ShutdownThread(tree));
+ Runtime.getRuntime().addShutdownHook(new ShutdownThread(cache));
-// tree.put("/a/b/c", null);
-// tree.put("/a/b/c1", null);
-// tree.put("/a/b/c2", null);
-// tree.put("/a/b1/chat", null);
-// tree.put("/a/b1/chat2", null);
-// tree.put("/a/b1/chat5", null);
+// cache.put("/a/b/c", null);
+// cache.put("/a/b/c1", null);
+// cache.put("/a/b/c2", null);
+// cache.put("/a/b1/chat", null);
+// cache.put("/a/b1/chat2", null);
+// cache.put("/a/b1/chat5", null);
demo = new TreeCacheView(mbean_name);
demo.create();
@@ -296,16 +285,16 @@
static class ShutdownThread extends Thread
{
- TreeCache tree = null;
+ CacheImpl cache = null;
- ShutdownThread(TreeCache tree)
+ ShutdownThread(CacheImpl cache)
{
- this.tree = tree;
+ this.cache = cache;
}
public void run()
{
- tree.stop();
+ cache.stop();
}
}
@@ -332,7 +321,7 @@
static void help()
{
System.out.println("TreeCacheView [-help] " +
- "[-mbean_name <name of TreeCache MBean>] " +
+ "[-mbean_name <name of CacheImpl MBean>] " +
"[-start_directory <dirname>] [-props <props>] " +
"[-use_queue <true/false>] [-queue_interval <ms>] " +
"[-queue_max_elements <num>]");
@@ -485,7 +474,7 @@
val = (String) table_model.getValueAt(row, col + 1);
if (key != null && val != null)
{
- // tree.put(selected_node, key, val);
+ // cache.put(selected_node, key, val);
// server.invoke(cache_service, "put",
// new Object[]{selected_node, key, val},
@@ -656,7 +645,7 @@
/* ----------------------------- Private Methods ---------------------------------- */
/**
- * Fetches all data from underlying tree model and display it graphically
+ * Fetches all data from underlying cache model and display it graphically
*/
void init()
{
@@ -674,7 +663,7 @@
/**
- * Fetches all data from underlying tree model and display it graphically
+ * Fetches all data from underlying cache model and display it graphically
*/
private void populateTree()
{
@@ -696,7 +685,8 @@
// 2. Then add my children
children = getChildrenNames(fqn);
- for (Object o : children) {
+ for (Object o : children)
+ {
addGuiNode(new Fqn(fqn, o));
}
}
@@ -705,8 +695,9 @@
Fqn makeFQN(Object[] opath)
{
List l = new ArrayList();
- for (Object o : opath) {
- MyNode node = (MyNode)o;
+ for (Object o : opath)
+ {
+ MyNode node = (MyNode) o;
Object name = node.name;
if (name.equals(Fqn.SEPARATOR))
continue;
@@ -813,8 +804,9 @@
Map getData(Fqn fqn)
{
- Node n = cache.getChild(fqn);
- if (n == null) {
+ Node n = cache.getRoot().getChild(fqn);
+ if (n == null)
+ {
return null;
}
return n.getData();
@@ -850,7 +842,7 @@
{
try
{
- return cache.getChild(fqn).getKeys();
+ return cache.getRoot().getChild(fqn).getKeys();
}
catch (Throwable t)
{
@@ -863,7 +855,7 @@
{
try
{
- return cache.getChild(fqn).getData().get(key);
+ return cache.getRoot().getChild(fqn).getData().get(key);
}
catch (Throwable t)
{
@@ -876,7 +868,7 @@
{
try
{
- Node n = cache.getChild(fqn);
+ Node n = cache.getRoot().getChild(fqn);
return n.getChildrenNames();
}
catch (Throwable t)
@@ -973,7 +965,7 @@
{
try
{
- cache.removeChild(selected_node);
+ cache.getRoot().removeChild(selected_node);
}
catch (Throwable t)
{
@@ -1029,7 +1021,8 @@
if (fqn == null) return null;
curr = this;
- for (Object o : fqn.peekElements()) {
+ for (Object o : fqn.peekElements())
+ {
n = curr.findChild(o);
if (n == null)
{
1.43 +4 -4 JBossCache/src/org/jboss/cache/Fqn.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: Fqn.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/Fqn.java,v
retrieving revision 1.42
retrieving revision 1.43
diff -u -b -r1.42 -r1.43
--- Fqn.java 8 Dec 2006 19:03:56 -0000 1.42
+++ Fqn.java 30 Dec 2006 17:49:54 -0000 1.43
@@ -42,7 +42,7 @@
* <p/>
* Another way to look at it is that the "/" separarator is only parsed when it form sa part of a String passed in to Fqn.fromString() and not otherwise.
*
- * @version $Revision: 1.42 $
+ * @version $Revision: 1.43 $
*/
public class Fqn implements Cloneable, Externalizable, Comparable<Fqn>
{
@@ -57,7 +57,7 @@
/**
* Controls the implementation of read/writeExternal.
- * Package protected so TreeCache can set it when ReplicationVersion is set.
+ * Package protected so CacheImpl can set it when ReplicationVersion is set.
*/
static boolean REL_123_COMPATIBLE = false;
@@ -65,7 +65,7 @@
{
// Check if they set a system property telling us to use 1.2.3 compatibility mode
// Obscure use case for this: Server has multiple caches, only one of which has
- // TreeCache.setReplicationVersion() set to "1.2.3". If the 1.2.4+ caches start
+ // CacheImpl.setReplicationVersion() set to "1.2.3". If the 1.2.4+ caches start
// first, they will begin exchanging 1.2.4 style fqn's, and then when the 1.2.3
// cache starts the format will change when it changes REL_123_COMPATIBLE. This
// could cause chaos. The system property allows the 1.2.3 mode to be used by
1.51 +11 -6 JBossCache/src/org/jboss/cache/Node.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: Node.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/Node.java,v
retrieving revision 1.50
retrieving revision 1.51
diff -u -b -r1.50 -r1.51
--- Node.java 20 Nov 2006 03:53:54 -0000 1.50
+++ Node.java 30 Dec 2006 17:49:54 -0000 1.51
@@ -10,9 +10,9 @@
import java.util.Set;
/**
- * Along with {@link Cache}, this is a central construct in the tree structure of JBoss Cache.
+ * Along with {@link Cache}, this is a central construct in the cache structure of JBoss Cache.
* <p/>
- * A Node represents a point in the tree. A Node has references to it's children, parent
+ * A Node represents a point in the cache. A Node has references to it's children, parent
* (each Node has a single parent) and data contained within the Node (key value pairs).
*
* @author <a href="mailto:manik at jboss.org">Manik Surtani (manik at jboss.org)</a>
@@ -47,7 +47,7 @@
Set getKeys();
/**
- * @return The {@link Fqn} which represents the location of this {@link Node} in the tree structure. The {@link Fqn} returned is absolute.
+ * @return The {@link Fqn} which represents the location of this {@link Node} in the cache structure. The {@link Fqn} returned is absolute.
*/
Fqn getFqn();
@@ -82,6 +82,11 @@
Node getChild(Fqn f);
/**
+ * Returns a direct child of the current node.
+ */
+ Node getChild(Object name);
+
+ /**
* Puts a key and a value into the current node's data map.
* Overwrites if the key already exists.
*
@@ -146,11 +151,11 @@
void clearData();
/**
- * Moves a part of the tree to a different subtree.
+ * Moves a part of the cache to a different subtree.
* <p/>
* E.g.:
* <p/>
- * assume a tree structure such as:
+ * assume a cache structure such as:
* <p/>
* <pre>
* /a/b/c
1.3 +5 -3 JBossCache/src/org/jboss/cache/TransactionManagerLookup.java
(In the diff below, changes in quantity of whitespace are not shown.)
Index: TransactionManagerLookup.java
===================================================================
RCS file: /cvsroot/jboss/JBossCache/src/org/jboss/cache/TransactionManagerLookup.java,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -b -r1.2 -r1.3
--- TransactionManagerLookup.java 8 Apr 2006 20:30:07 -0000 1.2
+++ TransactionManagerLookup.java 30 Dec 2006 17:49:54 -0000 1.3
@@ -3,15 +3,17 @@
import javax.transaction.TransactionManager;
/**
- * Factory interface, allows TreeCache to use different transactional systems.
+ * Factory interface, allows CacheImpl to use different transactional systems.
*
* @author Bela Ban, Aug 26 2003
- * @version $Id: TransactionManagerLookup.java,v 1.2 2006/04/08 20:30:07 genman Exp $
+ * @version $Id: TransactionManagerLookup.java,v 1.3 2006/12/30 17:49:54 msurtani Exp $
*/
-public interface TransactionManagerLookup {
+public interface TransactionManagerLookup
+{
/**
* Returns a new TransactionManager.
+ *
* @throws Exception if lookup failed
*/
TransactionManager getTransactionManager() throws Exception;
1.1 date: 2006/12/30 17:49:54; author: msurtani; state: Exp;JBossCache/src/org/jboss/cache/VersionedNode.java
Index: VersionedNode.java
===================================================================
/*
* JBoss, Home of Professional Open Source
*
* Distributable under LGPL license.
* See terms of license at gnu.org.
*/
package org.jboss.cache;
import org.jboss.cache.optimistic.DataVersion;
import org.jboss.cache.optimistic.DefaultDataVersion;
import java.util.Map;
/**
* VersionedNode contains a data version.
*
* @author <a href="mailto:manik at jboss.org">Manik Surtani (manik at jboss.org)</a>
*/
public class VersionedNode extends UnversionedNode
{
private DataVersion version;
/**
* Although this object has a reference to the CacheImpl, the optimistic
* node is actually disconnected from the CacheImpl itself.
* The parent could be looked up from the TransactionWorkspace.
*/
private Node parent;
public VersionedNode(Object childName, Fqn fqn, Node parent, Map data, DataVersion version, CacheSPI cache)
{
this(childName, fqn, parent, data, false, cache, DefaultDataVersion.ZERO);
}
public VersionedNode(Object childName, Fqn fqn, Node parent, Map data, boolean mapSafe, CacheSPI cache)
{
this(childName, fqn, parent, data, mapSafe, cache, DefaultDataVersion.ZERO);
}
public VersionedNode(Object childName, Fqn fqn, Node parent, Map data, boolean mapSafe, CacheSPI cache, DataVersion version)
{
super(childName, fqn, data, mapSafe, cache);
if (version == null)
throw new NullPointerException("version");
if (parent == null && !fqn.isRoot())
throw new NullPointerException("parent");
this.parent = parent;
this.version = version;
}
public VersionedNode(Object childName, Fqn fqn, Node parent, Map data, CacheSPI cache)
{
this(childName, fqn, parent, data, false, cache);
}
/**
* Returns the version id of this node.
*
* @return the version
*/
public DataVersion getVersion()
{
return version;
}
/**
* Returns the parent.
*/
public Node getParent()
{
return parent;
}
/**
* Sets the version id of this node.
*
* @param version
*/
public void setVersion(DataVersion version)
{
this.version = version;
}
public String toString()
{
return "Opt" + super.toString();
}
}
1.1 date: 2006/12/30 17:49:54; author: msurtani; state: Exp;JBossCache/src/org/jboss/cache/CacheImpl.java
Index: CacheImpl.java
===================================================================
/*
* JBoss, the OpenSource J2EE webOS
*
* Distributable under LGPL license.
* See terms of license at gnu.org.
*/
package org.jboss.cache;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jboss.cache.buddyreplication.BuddyGroup;
import org.jboss.cache.buddyreplication.BuddyManager;
import org.jboss.cache.buddyreplication.BuddyNotInitException;
import org.jboss.cache.buddyreplication.GravitateResult;
import org.jboss.cache.config.BuddyReplicationConfig;
import org.jboss.cache.config.CacheLoaderConfig;
import org.jboss.cache.config.Configuration;
import org.jboss.cache.config.RuntimeConfig;
import org.jboss.cache.factories.InterceptorChainFactory;
import org.jboss.cache.factories.NodeFactory;
import org.jboss.cache.interceptors.Interceptor;
import org.jboss.cache.jmx.CacheMBean;
import org.jboss.cache.loader.CacheLoader;
import org.jboss.cache.loader.CacheLoaderManager;
import org.jboss.cache.loader.NodeData;
import org.jboss.cache.lock.IsolationLevel;
import org.jboss.cache.lock.LockStrategyFactory;
import org.jboss.cache.lock.LockUtil;
import org.jboss.cache.lock.LockingException;
import org.jboss.cache.lock.NodeLock;
import org.jboss.cache.lock.TimeoutException;
import org.jboss.cache.marshall.InactiveRegionAwareRpcDispatcher;
import org.jboss.cache.marshall.MethodCall;
import org.jboss.cache.marshall.MethodCallFactory;
import org.jboss.cache.marshall.MethodDeclarations;
import org.jboss.cache.marshall.RegionNameConflictException;
import org.jboss.cache.marshall.RegionNotFoundException;
import org.jboss.cache.marshall.VersionAwareMarshaller;
import org.jboss.cache.notifications.Notifier;
import org.jboss.cache.optimistic.DataVersion;
import org.jboss.cache.statetransfer.StateTransferManager;
import org.jboss.cache.util.ExposedByteArrayOutputStream;
import org.jboss.cache.util.MapCopy;
import org.jboss.util.stream.MarshalledValueInputStream;
import org.jboss.util.stream.MarshalledValueOutputStream;
import org.jgroups.Address;
import org.jgroups.Channel;
import org.jgroups.ChannelClosedException;
import org.jgroups.ChannelNotConnectedException;
import org.jgroups.ExtendedMembershipListener;
import org.jgroups.ExtendedMessageListener;
import org.jgroups.JChannel;
import org.jgroups.Message;
import org.jgroups.MessageListener;
import org.jgroups.View;
import org.jgroups.blocks.GroupRequest;
import org.jgroups.blocks.RpcDispatcher;
import org.jgroups.jmx.JChannelFactoryMBean;
import org.jgroups.stack.IpAddress;
import org.jgroups.util.Rsp;
import org.jgroups.util.RspList;
import org.jgroups.util.Util;
import org.w3c.dom.Element;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import javax.transaction.Status;
import javax.transaction.SystemException;
import javax.transaction.Transaction;
import javax.transaction.TransactionManager;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.NotSerializableException;
import java.io.OutputStream;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArraySet;
/**
* A cache-like structure that is replicated across several members. Updates are
* multicast to all group members reliably and in order. User has the
* option to set transaction isolation levels and other options.
*
* @author Bela Ban
* @author Ben Wang
* @author <a href="mailto:manik at jboss.org">Manik Surtani (manik at jboss.org)</a>
* @author Brian Stansberry
* @author Daniel Huang (dhuang at jboss.org)
* @version $Id: CacheImpl.java,v 1.1 2006/12/30 17:49:54 msurtani Exp $
* <p/>
* @see <a href="http://labs.jboss.com/portal/jbosscache/docs">JBossCache doc</a>
*/
public class CacheImpl implements Cloneable, ExtendedMembershipListener, CacheSPI
{
private static final String CREATE_MUX_CHANNEL = "createMultiplexerChannel";
private static final String[] MUX_TYPES = {"java.lang.String", "java.lang.String"};
// Quite poor, but for now, root may be re-initialised when setNodeLockingOptimistic() is called.
// this is because if node locking is optimistic, we need to use OptimisticTreeNodes rather than TreeNodes.
// - MANIK
/**
* Root DataNode.
*/
protected NodeSPI root;
{
root = NodeFactory.getInstance().createRootDataNode(NodeFactory.NodeType.UNVERSIONED_NODE, this);
}
private RegionManager regionManager = null;
/**
* The JGroups JChannel in use.
*/
protected JChannel channel = null;
/**
* True if this CacheImpl is the coordinator.
*/
protected boolean coordinator = false;
/**
* CacheImpl log.
*/
protected final static Log log = LogFactory.getLog(CacheImpl.class);
/**
* List of cluster group members.
*/
protected final Vector<Address> members = new Vector<Address>();
/**
* JGroups RpcDispatcher in use.
*/
protected RpcDispatcher disp = null;
/**
* JGroups message listener.
*/
protected MessageListenerAdaptor ml = new MessageListenerAdaptor();
/**
* Maintains mapping of transactions (keys) and Modifications/Undo-Operations
*/
private final TransactionTable tx_table = new TransactionTable();
/**
* HashMap<Thread, List<Lock>, maintains locks acquired by threads (used when no TXs are used)
*/
private final Map lock_table = new ConcurrentHashMap();
/**
* Set<Fqn> of Fqns of the topmost node of internal regions that should
* not included in standard state transfers.
*/
protected Set internalFqns = new CopyOnWriteArraySet();
/**
* True if state was initialized during start-up.
*/
protected volatile boolean isStateSet = false;
protected String evictionInterceptorClass = "org.jboss.cache.interceptors.EvictionInterceptor";
/**
* Marshaller if register to handle marshalling
*/
protected VersionAwareMarshaller marshaller_ = null;
/**
* {@link #invokeMethod(MethodCall)} will dispatch to this chain of interceptors.
* In the future, this will be replaced with JBossAop. This is a first step towards refactoring JBossCache.
*/
protected Interceptor interceptor_chain = null;
/**
* Method to acquire a TransactionManager. By default we use JBossTransactionManagerLookup. Has
* to be set before calling {@link #start()}
*/
protected TransactionManagerLookup tm_lookup = null;
/**
* Used to get the Transaction associated with the current thread
*/
protected TransactionManager tm = null;
protected CacheLoaderManager cacheLoaderManager;
/**
* for legacy use *
*/
protected CacheLoaderConfig cloaderConfig;
/**
* Queue used to replicate updates when mode is repl-async
*/
protected ReplicationQueue repl_queue = null;
/**
* create was called.
*/
protected boolean useCreateService = false;
/**
* Buddy Manager
*/
protected BuddyManager buddyManager;
/**
* State transfer manager. Do not access this field directly -- use the getter
*/
private StateTransferManager stateTransferManager;
private Notifier notifier;
private CacheMBean cacheMBean;
public StateTransferManager getStateTransferManager()
{
if (stateTransferManager == null)
{
stateTransferManager = new StateTransferManager(this);
}
return stateTransferManager;
}
public void setStateTransferManager(StateTransferManager manager)
{
this.stateTransferManager = manager;
}
private long stateFetchTimeout;
private ThreadLocal<InvocationContext> invocationContextContainer = new ThreadLocal<InvocationContext>();
public boolean started;
public Configuration getConfiguration()
{
return configuration;
}
private Configuration configuration = new Configuration(this);
private RPCManager rpcManager;
public RPCManager getRpcManager()
{
return rpcManager;
}
public void setRpcManager(RPCManager rpcManager)
{
this.rpcManager = rpcManager;
}
/**
* Creates a CacheImpl with a given configuration
*/
public CacheImpl(Configuration configuration) throws Exception
{
notifier = new Notifier(this);
this.configuration = configuration;
regionManager = new RegionManager(this);
}
/**
* Constructs an uninitialized CacheImpl.
*/
public CacheImpl() throws Exception
{
notifier = new Notifier(this);
regionManager = new RegionManager(this);
}
/**
* Constructs a CacheImpl with an already connected channel.
*/
public CacheImpl(JChannel channel) throws Exception
{
notifier = new Notifier(this);
this.channel = channel;
regionManager = new RegionManager(this);
}
/**
* Returns the CacheImpl implementation version.
*/
public String getVersion()
{
return Version.printVersion();
}
/**
* Used internally by interceptors.
* Don't use as client, this method will go away.
*/
public Node getRoot()
{
return root;
}
/**
* Returns the local channel address.
*/
public Object getLocalAddress()
{
return channel != null ? channel.getLocalAddress() : null;
}
/**
* Returns the members as a Vector.
*/
public Vector<Address> getMembers()
{
return members;
}
/**
* Returns <code>true</code> if this node is the group coordinator.
*/
public boolean isCoordinator()
{
return coordinator;
}
/**
* Returns the transaction table.
*/
public TransactionTable getTransactionTable()
{
return tx_table;
}
/**
* Returns the lock table.
*/
public Map getLockTable()
{
return lock_table;
}
/**
* Returns the contents of the TransactionTable as a string.
*/
public String dumpTransactionTable()
{
return tx_table.toString(true);
}
/**
* Returns false.
*
* @deprecated
*/
public boolean getDeadlockDetection()
{
return false;
}
/**
* Does nothing.
*
* @deprecated
*/
public void setDeadlockDetection(boolean dt)
{
log.warn("Using deprecated configuration element 'DeadlockDetection'. Will be ignored.");
}
/**
* Used for testing only - sets the interceptor chain.
*/
public void setInterceptorChain(Interceptor i)
{
interceptor_chain = i;
}
/**
* Returns the list of interceptors.
*/
public List<Interceptor> getInterceptors()
{
return InterceptorChainFactory.asList(interceptor_chain);
}
/**
* Returns the underlying cache loader in use.
*/
public CacheLoader getCacheLoader()
{
if (cacheLoaderManager == null) return null;
return cacheLoaderManager.getCacheLoader();
}
/**
* Used for PojoCache. No-op here.
*
* @param config
* @throws CacheException
*/
public void setPojoCacheConfig(Element config) throws CacheException
{
log.warn("setPojoCacheConfig(): You have a PojoCache config that is not used in CacheImpl.");
}
public Element getPojoCacheConfig()
{
return null;
}
/**
* Returns the MessageListener in use.
*/
public MessageListener getMessageListener()
{
return ml;
}
public String getEvictionInterceptorClass()
{
return this.evictionInterceptorClass;
}
private void setUseReplQueue(boolean flag)
{
if (flag)
{
if (repl_queue == null)
{
repl_queue = new ReplicationQueue(this, configuration.getReplQueueInterval(), configuration.getReplQueueMaxElements());
if (configuration.getReplQueueInterval() >= 0)
{
repl_queue.start();
}
}
}
else
{
if (repl_queue != null)
{
repl_queue.stop();
repl_queue = null;
}
}
}
/**
* Returns the replication queue.
*/
public ReplicationQueue getReplQueue()
{
return repl_queue;
}
/**
* @param level
*/
private void setIsolationLevel(IsolationLevel level)
{
LockStrategyFactory.setIsolationLevel(level);
}
/**
* Sets the TransactionManagerLookup object
*
* @param l
*/
public void setTransactionManagerLookup(TransactionManagerLookup l)
{
this.tm_lookup = l;
}
/**
*/
public TransactionManager getTransactionManager()
{
return tm;
}
/**
* Fetches the group state from the current coordinator. If successful, this
* will trigger JChannel setState() call.
*/
public void fetchState(long timeout) throws ChannelClosedException, ChannelNotConnectedException
{
if (channel == null)
{
throw new ChannelNotConnectedException();
}
boolean rc = channel.getState(null, timeout);
if (rc)
{
log.debug("fetchState(): state was retrieved successfully");
}
else
{
log.debug("fetchState(): state could not be retrieved (first member)");
}
}
public void fetchPartialState(Object sources[], Fqn sourceTarget, Fqn integrationTarget) throws Exception
{
String encodedStateId = sourceTarget + StateTransferManager.PARTIAL_STATE_DELIMETER + integrationTarget;
fetchPartialState(sources, encodedStateId);
}
public void fetchPartialState(Object sources[], Fqn subtree) throws Exception
{
if (subtree == null)
{
throw new IllegalArgumentException("Cannot fetch partial state. Invalid subtree " + subtree);
}
fetchPartialState(sources, subtree.toString());
}
private void fetchPartialState(Object sources[], String stateId) throws Exception
{
if (sources == null || sources.length < 1 || stateId == null)
{
// should this really be throwing an exception? Are there valid use cases where partial state may not be available? - Manik
// Yes -- cache is configured LOCAL but app doesn't know it -- Brian
//throw new IllegalArgumentException("Cannot fetch partial state, targets are " + sources + " and stateId is " + stateId);
if (log.isWarnEnabled())
{
log.warn("Cannot fetch partial state, targets are " + Arrays.asList(sources) +
" and stateId is " + stateId);
}
return;
}
ArrayList targets = new ArrayList(Arrays.asList(sources));
//skip *this* node as a target
targets.remove(getLocalAddress());
if (targets.isEmpty())
{
// Definitely no exception here -- this happens every time the 1st node in the
// cluster activates a region!! -- Brian
log.debug("Cannot fetch partial state. There are no target members specified");
return;
}
log.debug("Node " + getLocalAddress() + " fetching partial state " + stateId + " from members " + targets);
boolean successfulTransfer = false;
for (Iterator iter = targets.iterator(); iter.hasNext() && !successfulTransfer;)
{
Address target = (Address) iter.next();
log.debug("Node " + getLocalAddress() + " fetching partial state " + stateId + " from member " + target);
isStateSet = false;
successfulTransfer = channel.getState(target, stateId, stateFetchTimeout);
if (successfulTransfer)
{
try
{
ml.waitForState();
}
catch (Exception transferFailed)
{
successfulTransfer = false;
}
}
log.debug("Node " + getLocalAddress() + " fetching partial state " + stateId + " from member " + target + (successfulTransfer ? " successful" : " failed"));
}
if (!successfulTransfer)
{
log.debug("Node " + getLocalAddress() + " could not fetch partial state " + stateId + " from any member " + targets);
}
}
/**
* Lifecycle method. This is like initialize.
*
* @throws Exception
*/
public void create() throws Exception
{
if (notifier == null) notifier = new Notifier(this);
stateFetchTimeout = configuration.getLockAcquisitionTimeout() + 5000;
if (configuration.isNodeLockingOptimistic())
{
root = NodeFactory.getInstance().createRootDataNode(NodeFactory.NodeType.VERSIONED_NODE, this);
}
setUseReplQueue(configuration.isUseReplQueue());
setIsolationLevel(configuration.getIsolationLevel());
// See if we had a TransactionManager injected into our config
this.tm = configuration.getRuntimeConfig().getTransactionManager();
if (tm == null)
{
// Nope. See if we can look it up from JNDI
if (this.tm_lookup == null && configuration.getTransactionManagerLookupClass() != null)
{
Class clazz = Thread.currentThread().getContextClassLoader().loadClass(configuration.getTransactionManagerLookupClass());
this.tm_lookup = (TransactionManagerLookup) clazz.newInstance();
}
try
{
if (tm_lookup != null)
{
tm = tm_lookup.getTransactionManager();
configuration.getRuntimeConfig().setTransactionManager(tm);
}
else
{
log.warn("No transaction manager lookup class has been defined. Transactions cannot be used");
}
}
catch (Exception e)
{
log.debug("failed looking up TransactionManager, will not use transactions", e);
}
}
// create cache loader
if ((configuration.getCacheLoaderConfig() != null || cloaderConfig != null) && cacheLoaderManager == null)
{
initialiseCacheLoaderManager();
}
getRegionManager();// make sure we create one
createEvictionPolicy();
switch (configuration.getCacheMode())
{
case LOCAL:
log.debug("cache mode is local, will not create the channel");
break;
case REPL_SYNC:
case REPL_ASYNC:
case INVALIDATION_ASYNC:
case INVALIDATION_SYNC:
if (log.isDebugEnabled()) log.debug("cache mode is " + configuration.getCacheMode());
if (channel != null)
{// already started
log.info("channel is already running");
return;
}
// Try to create a multiplexer channel
channel = getMultiplexerChannel();
if (channel != null)
{// mux channel
if (log.isDebugEnabled())
{
log.debug("Created Multiplexer Channel for cache cluster " + configuration.getClusterName() +
" using stack " + configuration.getMultiplexerStack());
}
}
else
{
if (configuration.getClusterConfig() == null)
{
configuration.setClusterConfig(getDefaultProperties());
log.debug("setting cluster properties to default value");
}
channel = new JChannel(configuration.getClusterConfig());
if (log.isTraceEnabled())
{
log.trace("cache properties: " + configuration.getClusterConfig());
}
}
channel.setOpt(Channel.AUTO_RECONNECT, true);
channel.setOpt(Channel.AUTO_GETSTATE, true);
channel.setOpt(Channel.BLOCK, true);
/* Used for JMX jconsole for JDK5.0
ArrayList servers=MBeanServerFactory.findMBeanServer(null);
if(servers == null || servers.size() == 0) {
throw new Exception("No MBeanServers found;" +
"\nJmxTest needs to be run with an MBeanServer present, or inside JDK 5"); }
MBeanServer server=(MBeanServer)servers.get(0);
JmxConfigurator.registerChannel(channel, server, "JGroups:channel=" + channel.getChannelName() , true);
*/
// always use the InactiveRegionAwareRpcDispatcher - exceptions due to regions not being active should not propagate to remote
// nodes as errors. - Manik
disp = new InactiveRegionAwareRpcDispatcher(channel, ml, this, this);
// disp = new RpcDispatcher(channel, ml, this, this);
disp.setMarshaller(getMarshaller());
setBuddyReplicationConfig(configuration.getBuddyReplicationConfig());
break;
default:
throw new IllegalArgumentException("cache mode " + configuration.getCacheMode() + " is invalid");
}
// build interceptor chain
interceptor_chain = new InterceptorChainFactory().buildInterceptorChain(this);
getRegionManager().setDefaultInactive(configuration.isInactiveOnStartup());
useCreateService = true;
}
protected boolean shouldFetchStateOnStartup()
{
boolean loaderFetch = cacheLoaderManager != null && cacheLoaderManager.isFetchPersistentState();
return !configuration.isInactiveOnStartup() && buddyManager == null && (configuration.isFetchInMemoryState() || loaderFetch);
}
/**
* Lifecyle method.
*
* @throws Exception
*/
public void start() throws Exception
{
// Get around the problem of standalone user forgets to call create.
if (!useCreateService)
{
create();
}
// cache loaders should be initialised *before* any state transfers take place to prevent
// exceptions involving cache loaders not being started. - Manik
if (cacheLoaderManager != null)
{
cacheLoaderManager.startCacheLoader();
}
switch (configuration.getCacheMode())
{
case LOCAL:
break;
case REPL_SYNC:
case REPL_ASYNC:
case INVALIDATION_ASYNC:
case INVALIDATION_SYNC:
channel.connect(configuration.getClusterName());
if (log.isInfoEnabled())
{
log.info("CacheImpl local address is " + channel.getLocalAddress());
}
if (shouldFetchStateOnStartup())
{
try
{
fetchStateOnStartup();
}
catch (Exception e)
{
// make sure we disconnect from the channel before we throw this exception!
// JBCACHE-761
channel.disconnect();
channel.close();
throw e;
}
}
if (buddyManager != null)
{
buddyManager.init(this);
if (configuration.isUseReplQueue())
{
log.warn("Replication queue not supported when using buddy replication. Disabling repliction queue.");
configuration.setUseReplQueue(false);
repl_queue = null;
}
}
break;
default:
throw new IllegalArgumentException("cache mode " + configuration.getCacheMode() + " is invalid");
}
//now attempt to preload the cache from the loader - Manik
if (cacheLoaderManager != null)
{
cacheLoaderManager.preloadCache();
}
// Find out if we are coordinator (blocks until view is received)
// TODO should this be moved above the buddy manager code??
determineCoordinator();
// start any eviction threads.
if (regionManager.isUsingEvictions())
{
regionManager.startEvictionThread();
}
notifier.notifyCacheStarted(this);
started = true;
log.info("JBoss Cache version: " + getVersion());
}
/**
* No-op.
*/
public void destroy()
{
useCreateService = false;
regionManager = null;
notifier = null;
}
/**
* Lifecycle method.
*/
public void stop()
{
if (channel != null)
{
log.info("stop(): closing the channel");
channel.close();
channel = null;
}
if (disp != null)
{
log.info("stop(): stopping the dispatcher");
disp.stop();
disp = null;
}
if (members != null && members.size() > 0)
{
members.clear();
}
coordinator = false;
if (repl_queue != null)
{
repl_queue.stop();
}
if (cacheLoaderManager != null)
{
cacheLoaderManager.stopCacheLoader();
}
if (notifier != null)
{
notifier.notifyCacheStopped(this);
notifier.removeAllCacheListeners();
notifier.setEvictionPolicyListener(null);
}
useCreateService = false;
started = false;
}
/* ----------------------- End of MBeanSupport ----------------------- */
/* ----------------------- Start of buddy replication specific methods ------------*/
/**
* Sets the buddy replication configuration element
*
* @param config
*/
private void setBuddyReplicationConfig(BuddyReplicationConfig config)
{
if (config != null)
{
buddyManager = new BuddyManager(config);
if (!buddyManager.isEnabled())
{
buddyManager = null;
}
else
{
internalFqns.add(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN);
}
}
}
/**
* Retrieves the Buddy Manager configured.
*
* @return null if buddy replication is not enabled.
*/
public BuddyManager getBuddyManager()
{
return buddyManager;
}
/**
* Returns a Set<Fqn> of Fqns of the topmost node of internal regions that
* should not included in standard state transfers. Will include
* {@link BuddyManager#BUDDY_BACKUP_SUBTREE} if buddy replication is
* enabled.
*
* @return an unmodifiable Set<Fqn>. Will not return <code>null</code>.
*/
public Set getInternalFqns()
{
return Collections.unmodifiableSet(internalFqns);
}
/* ----------------------- End of buddy replication specific methods ------------*/
protected void createEvictionPolicy()
{
if (configuration.getEvictionConfig() != null
&& configuration.getEvictionConfig().isValidConfig())
{
regionManager.setEvictionConfig(configuration.getEvictionConfig());
regionManager.setUsingEvictions(true);
}
else
{
regionManager.setUsingEvictions(false);
log.debug("Not using an EvictionPolicy");
}
}
/**
* Loads the indicated Fqn, plus all parents recursively from the
* CacheLoader. If no CacheLoader is present, this is a no-op
*
* @param fqn
* @throws Exception
*/
public void load(String fqn) throws Exception
{
if (cacheLoaderManager != null)
{
cacheLoaderManager.preload(Fqn.fromString(fqn), true, true);
}
}
protected void determineCoordinator()
{
// Synchronize on members to make the answer atomic for the current view
synchronized (members)
{
Address coord = getCoordinator();
coordinator = (coord == null ? false : coord.equals(getLocalAddress()));
}
}
/**
* Returns the address of the coordinator or null if there is no
* coordinator.
*/
public Address getCoordinator()
{
if (channel == null)
{
return null;
}
synchronized (members)
{
if (members.size() == 0)
{
log.debug("getCoordinator(): waiting on viewAccepted()");
try
{
members.wait();
}
catch (InterruptedException iex)
{
log.error("getCoordinator(): Interrupted while waiting for members to be set", iex);
}
}
return members.size() > 0 ? (Address) members.get(0) : null;
}
}
// ----------- Marshalling and State Transfer -----------------------
/**
* Registers a specific classloader for a region defined by a fully
* qualified name.
* A instance of {@link org.jboss.cache.marshall.Marshaller} is used for marshalling.
*
* @param fqn The fqn region. Children of this fqn will use this classloader for (un)marshalling.
* @param cl The class loader to use
* @throws RegionNameConflictException If there is a conflict in existing registering for the fqn.
* @throws IllegalStateException if marshalling is not being used
* @see #getMarshaller
*/
public void registerClassLoader(String fqn, ClassLoader cl) throws RegionNameConflictException
{
regionManager.registerClassLoader(Fqn.fromString(fqn), cl);
}
/**
* Unregisters a class loader for a region.
*
* @param fqn The fqn region.
* @throws RegionNotFoundException If there is a conflict in fqn specification.
* @throws IllegalStateException if marshalling is not being used
*/
public void unregisterClassLoader(String fqn) throws RegionNotFoundException
{
regionManager.unregisterClassLoader(Fqn.fromString(fqn));
}
/**
* Creates a subtree in the local cache.
* Returns the DataNode created.
*/
protected Node createSubtreeRootNode(Fqn subtree) throws CacheException
{
NodeSPI parent = root;
NodeSPI child = null;
Object owner = getOwnerForLock();
Object name;
NodeFactory factory = NodeFactory.getInstance();
NodeFactory.NodeType type = configuration.isNodeLockingOptimistic()
? NodeFactory.NodeType.VERSIONED_NODE
: NodeFactory.NodeType.UNVERSIONED_NODE;
for (int i = 0; i < subtree.size(); i++)
{
name = subtree.get(i);
child = (NodeSPI) parent.getChild(new Fqn(name));
if (child == null)
{
// Lock the parent, create and add the child
try
{
parent.getLock().acquire(owner, configuration.getSyncReplTimeout(), NodeLock.LockType.WRITE);
}
catch (InterruptedException e)
{
log.error("Interrupted while locking" + parent.getFqn(), e);
throw new CacheException(e.getLocalizedMessage(), e);
}
try
{
child = (NodeSPI) factory.createDataNode(type, name,
subtree.getFqnChild(i + 1),
parent, null, true, null, this);
parent.addChild(name, child);
}
finally
{
if (log.isDebugEnabled())
{
log.debug("forcing release of locks in " + parent.getFqn());
}
try
{
parent.getNodeSPI().getLock().releaseAll();
}
catch (Throwable t)
{
log.error("failed releasing locks", t);
}
}
}
parent = child;
}
return child;
}
/**
* Evicts the node at <code>subtree</code> along with all descendant nodes.
*
* @param subtree Fqn indicating the uppermost node in the
* portion of the cache that should be evicted.
* @throws CacheException
*/
protected void _evictSubtree(Fqn subtree) throws CacheException
{
if (!exists(subtree))
{
return;// node does not exist. Maybe it has been recursively removed.
}
if (log.isTraceEnabled())
{
log.trace("_evictSubtree(" + subtree + ")");
}
// Recursively remove any children
Set children = getChildrenNames(subtree);
if (children != null)
{
Object[] kids = children.toArray();
for (int i = 0; i < kids.length; i++)
{
Object s = kids[i];
Fqn tmp = new Fqn(subtree, s);
_remove(null, // no tx
tmp,
false, // no undo ops
false, // no nodeEvent
true);// is an eviction
}
}
// Remove the root node of the subtree
_remove(null, subtree, false, false, true);
}
/**
* Returns the state for the portion of the cache named by <code>fqn</code>.
* <p/>
* State returned is a serialized byte[][], element 0 is the transient state
* (or null), and element 1 is the persistent state (or null).
*
* @param fqn Fqn indicating the uppermost node in the
* portion of the cache whose state should be returned.
* @param timeout max number of ms this method should wait to acquire
* a read lock on the nodes being transferred
* @param force if a read lock cannot be acquired after
* <code>timeout</code> ms, should the lock acquisition
* be forced, and any existing transactions holding locks
* on the nodes be rolled back? <strong>NOTE:</strong>
* In release 1.2.4, this parameter has no effect.
* @param suppressErrors should any Throwable thrown be suppressed?
* @return a serialized byte[][], element 0 is the transient state
* (or null), and element 1 is the persistent state (or null).
* <p/>
* <p/>
* TODO here only because of BuddyManager state transfer
* Consider for removal if BuddyManager transfer changes
*/
public byte[] generateState(Fqn fqn, long timeout, boolean force, boolean suppressErrors) throws Throwable
{
MarshalledValueOutputStream out = null;
byte[] result = null;
try
{
ExposedByteArrayOutputStream baos = new ExposedByteArrayOutputStream(16 * 1024);
out = new MarshalledValueOutputStream(baos);
getStateTransferManager().getState(out, fqn, timeout, force, suppressErrors);
result = baos.getRawBuffer();
}
finally
{
Util.close(out);
}
return result;
}
private void removeLocksForDeadMembers(Node node,
Vector deadMembers)
{
Set deadOwners = new HashSet();
NodeLock lock = node.getNodeSPI().getLock();
Object owner = lock.getWriterOwner();
if (isLockOwnerDead(owner, deadMembers))
{
deadOwners.add(owner);
}
Iterator iter = lock.getReaderOwners().iterator();
while (iter.hasNext())
{
owner = iter.next();
if (isLockOwnerDead(owner, deadMembers))
{
deadOwners.add(owner);
}
}
for (iter = deadOwners.iterator(); iter.hasNext();)
{
GlobalTransaction deadOwner = (GlobalTransaction) iter.next();
boolean localTx = deadOwner.getAddress().equals(getLocalAddress());
boolean broken = LockUtil.breakTransactionLock(lock, deadOwner, localTx, this);
if (broken && log.isTraceEnabled())
{
log.trace("Broke lock for node " + node.getFqn() +
" held by " + deadOwner);
}
}
// Recursively unlock children
for (Node child : node.getChildren())
{
removeLocksForDeadMembers(child, deadMembers);
}
}
private boolean isLockOwnerDead(Object owner, Vector deadMembers)
{
boolean result = false;
if (owner != null && owner instanceof GlobalTransaction)
{
Object addr = ((GlobalTransaction) owner).getAddress();
result = deadMembers.contains(addr);
}
return result;
}
protected void fetchStateOnStartup() throws Exception
{
long start, stop;
isStateSet = false;
start = System.currentTimeMillis();
boolean rc = channel.getState(null, stateFetchTimeout);
if (rc)
{
ml.waitForState();
stop = System.currentTimeMillis();
if (log.isDebugEnabled())
{
log.debug("state was retrieved successfully (in " + (stop - start) + " milliseconds)");
}
}
else
{
// No one provided us with state. We need to find out if that's because
// we are the coordinator. But we don't know if the viewAccepted() callback
// has been invoked, so call determineCoordinator(), which will block until
// viewAccepted() is called at least once
determineCoordinator();
if (isCoordinator())
{
log.debug("State could not be retrieved (we are the first member in group)");
}
else
{
throw new CacheException("Initial state transfer failed: " +
"Channel.getState() returned false");
}
}
}
// ----------- End Marshalling and State Transfer -----------------------
/**
* @param fqn fqn String name to retrieve from cache
* @return DataNode corresponding to the fqn. Null if does not exist. No guarantees wrt replication,
* cache loading are given if the underlying node is modified
*/
public Node get(String fqn) throws CacheException
{
return get(Fqn.fromString(fqn));
}
/**
* Returns a DataNode corresponding to the fully qualified name or null if
* does not exist.
* No guarantees wrt replication, cache loading are given if the underlying node is modified
*
* @param fqn name of the DataNode to retreive
*/
public Node get(Fqn fqn) throws CacheException
{
MethodCall m = MethodCallFactory.create(MethodDeclarations.getNodeMethodLocal, fqn);
return (Node) invokeMethod(m);
}
/**
* Returns the raw data of the node; called externally internally.
*/
public Node _get(Fqn fqn) throws CacheException
{
return findNode(fqn);
}
/**
* Returns the raw data of the node; called externally internally.
*/
public Map _getData(Fqn fqn)
{
Node n = findNode(fqn);
if (n == null) return null;
return n.getNodeSPI().getRawData();
}
/**
* Returns a set of attribute keys for the Fqn.
* Returns null if the node is not found, otherwise a Set.
* The set is a copy of the actual keys for this node.
*
* @param fqn name of the node
*/
public Set getKeys(String fqn) throws CacheException
{
return getKeys(Fqn.fromString(fqn));
}
/**
* Returns a set of attribute keys for the Fqn.
* Returns null if the node is not found, otherwise a Set.
* The set is a copy of the actual keys for this node.
*
* @param fqn name of the node
*/
public Set getKeys(Fqn fqn) throws CacheException
{
MethodCall m = MethodCallFactory.create(MethodDeclarations.getKeysMethodLocal, fqn);
return (Set) invokeMethod(m);
}
public Set _getKeys(Fqn fqn) throws CacheException
{
Node n = findNode(fqn);
if (n == null)
{
return null;
}
Set keys = n.getNodeSPI().getRawData().keySet();
// See http://jira.jboss.com/jira/browse/JBCACHE-551
if (keys == null)
{
return new HashSet(0);
}
return new HashSet(keys);
}
/**
* Finds a node given its name and returns the value associated with a given key in its <code>data</code>
* map. Returns null if the node was not found in the cache or the key was not found in the hashmap.
*
* @param fqn The fully qualified name of the node.
* @param key The key.
*/
public Object get(String fqn, Object key) throws CacheException
{
return get(Fqn.fromString(fqn), key);
}
/**
* Finds a node given its name and returns the value associated with a given key in its <code>data</code>
* map. Returns null if the node was not found in the cache or the key was not found in the hashmap.
*
* @param fqn The fully qualified name of the node.
* @param key The key.
*/
public Object get(Fqn fqn, Object key) throws CacheException
{
return get(fqn, key, true);
}
public Object _get(Fqn fqn, Object key, boolean sendNodeEvent) throws CacheException
{
if (log.isTraceEnabled())
{
log.trace(new StringBuffer("_get(").append("\"").append(fqn).append("\", \"").append(key).append("\", \"").
append(sendNodeEvent).append("\")"));
}
if (sendNodeEvent) notifier.notifyNodeVisited(fqn, true);
Node n = findNode(fqn);
if (n == null)
{
log.trace("node not found");
return null;
}
if (sendNodeEvent) notifier.notifyNodeVisited(fqn, false);
return n.get(key);
}
protected Object get(Fqn fqn, Object key, boolean sendNodeEvent) throws CacheException
{
MethodCall m = MethodCallFactory.create(MethodDeclarations.getKeyValueMethodLocal, fqn, key, sendNodeEvent);
return invokeMethod(m);
}
/**
* Like <code>get()</code> method but without triggering a node visit event. This is used
* to prevent refresh of the cache data in the eviction policy.
*
* @param fqn
* @param key
* @deprecated This will go away.
*/
public Object peek(Fqn fqn, Object key) throws CacheException
{
return get(fqn, key, false);
}
/**
* added so one can get nodes internally without triggering stuff
*
* @deprecated This will go away.
*/
public NodeSPI peek(Fqn fqn)
{
return findInternal(fqn, true);
}
/**
* Checks whether a given node exists in current in-memory state of the cache.
* Does not acquire any locks in doing so (result may be dirty read). Does
* not attempt to load nodes from a cache loader (may return false if a
* node has been evicted).
*
* @param fqn The fully qualified name of the node
* @return boolean Whether or not the node exists
*/
public boolean exists(String fqn)
{
return exists(Fqn.fromString(fqn));
}
/**
* Checks whether a given node exists in current in-memory state of the cache.
* Does not acquire any locks in doing so (result may be dirty read). Does
* not attempt to load nodes from a cache loader (may return false if a
* node has been evicted).
*
* @param fqn The fully qualified name of the node
* @return boolean Whether or not the node exists
*/
public boolean exists(Fqn fqn)
{
Node n = findInternal(fqn, false);
return n != null;
}
/**
* Gets node without attempt to load it from CacheLoader if not present
*
* @param fqn
*/
private NodeSPI findInternal(Fqn fqn, boolean includeNodesMarkedAsRemoved)
{
if (fqn == null || fqn.size() == 0) return root;
NodeSPI n = root;
int fqnSize = fqn.size();
for (int i = 0; i < fqnSize; i++)
{
Object obj = fqn.get(i);
n = (NodeSPI) n.getNodeSPI().getChildrenMap().get(obj);
if (n == null)
{
return null;
}
else if (!includeNodesMarkedAsRemoved && n.isDeleted())
{
return null;
}
}
return n;
}
/**
* @param fqn
* @param key
*/
public boolean exists(String fqn, Object key)
{
return exists(Fqn.fromString(fqn), key);
}
/**
* Checks whether a given key exists in the given node. Does not interact with CacheLoader, so the behavior is
* different from {@link #get(Fqn,Object)}
*
* @param fqn The fully qualified name of the node
* @param key
* @return boolean Whether or not the node exists
*/
public boolean exists(Fqn fqn, Object key)
{
Node n = findInternal(fqn, false);
if (n == null)
{
return false;
}
else
{
return n.getKeys().contains(key);
}
}
/**
* Adds a new node to the cache and sets its data. If the node doesn not yet exist, it will be created.
* Also, parent nodes will be created if not existent. If the node already has data, then the new data
* will override the old one. If the node already existed, a nodeModified() notification will be generated.
* Otherwise a nodeCreated() motification will be emitted.
*
* @param fqn The fully qualified name of the new node
* @param data The new data. May be null if no data should be set in the node.
*/
public void put(String fqn, Map data) throws CacheException
{
put(Fqn.fromString(fqn), data);
}
/**
* Sets a node's data. If the node does not yet exist, it will be created.
* Also, parent nodes will be created if not existent. If the node already has data, then the new data
* will override the old one. If the node already existed, a nodeModified() notification will be generated.
* Otherwise a nodeCreated() motification will be emitted.
*
* @param fqn The fully qualified name of the new node
* @param data The new data. May be null if no data should be set in the node.
*/
public void put(Fqn fqn, Map data) throws CacheException
{
GlobalTransaction tx = getCurrentTransaction();
MethodCall m = MethodCallFactory.create(MethodDeclarations.putDataMethodLocal, tx, fqn, data, true);
invokeMethod(m);
}
/**
* Adds a key and value to a given node. If the node doesn't exist, it will be created. If the node
* already existed, a nodeModified() notification will be generated. Otherwise a
* nodeCreated() motification will be emitted.
*
* @param fqn The fully qualified name of the node
* @param key The key
* @param value The value
* @return Object The previous value (if any), if node was present
*/
public Object put(String fqn, Object key, Object value) throws CacheException
{
return put(Fqn.fromString(fqn), key, value);
}
/**
* Adds a key and value to a given node. If the node doesn't exist, it will be created. If the node
* already existed, a nodeModified() notification will be generated. Otherwise a
* nodeCreated() motification will be emitted.
*
* @param fqn The fully qualified name of the node
* @param key The key
* @param value The value
* @return Object The previous value (if any), if node was present
*/
public Object put(Fqn fqn, Object key, Object value) throws CacheException
{
GlobalTransaction tx = getCurrentTransaction();
MethodCall m = MethodCallFactory.create(MethodDeclarations.putKeyValMethodLocal, tx, fqn, key, value, true);
return invokeMethod(m);
}
/**
* Removes the node from the cache.
*
* @param fqn The fully qualified name of the node.
*/
public void remove(String fqn) throws CacheException
{
remove(Fqn.fromString(fqn));
}
/**
* Removes the node from the cache.
*
* @param fqn The fully qualified name of the node.
*/
public void remove(Fqn fqn) throws CacheException
{
GlobalTransaction tx = getCurrentTransaction();
MethodCall m = MethodCallFactory.create(MethodDeclarations.removeNodeMethodLocal, tx, fqn, true);
invokeMethod(m);
}
/**
* Called by eviction policy provider. Note that eviction is done only in
* local mode, that is, it doesn't replicate the node removal. This will
* cause the replication nodes to not be synchronizing, which is fine since
* the value will be fetched again when {@link #get} returns null. After
* that, the contents will be in sync.
*
* @param fqn Will remove everythign assoicated with this fqn.
* @throws CacheException
*/
public void evict(Fqn fqn) throws CacheException
{
MethodCall m = MethodCallFactory.create(MethodDeclarations.evictNodeMethodLocal, fqn);
invokeMethod(m);
}
/**
* Removes <code>key</code> from the node's hashmap
*
* @param fqn The fullly qualified name of the node
* @param key The key to be removed
* @return The previous value, or null if none was associated with the given key
*/
public Object remove(String fqn, Object key) throws CacheException
{
return remove(Fqn.fromString(fqn), key);
}
/**
* Removes <code>key</code> from the node's hashmap
*
* @param fqn The fullly qualified name of the node
* @param key The key to be removed
* @return The previous value, or null if none was associated with the given key
*/
public Object remove(Fqn fqn, Object key) throws CacheException
{
GlobalTransaction tx = getCurrentTransaction();
MethodCall m = MethodCallFactory.create(MethodDeclarations.removeKeyMethodLocal, tx, fqn, key, true);
return invokeMethod(m);
}
/**
* Removes the keys and properties from a node.
*/
public void removeData(String fqn) throws CacheException
{
removeData(Fqn.fromString(fqn));
}
/**
* Removes the keys and properties from a named node.
*/
public void removeData(Fqn fqn) throws CacheException
{
GlobalTransaction tx = getCurrentTransaction();
MethodCall m = MethodCallFactory.create(MethodDeclarations.removeDataMethodLocal, tx, fqn, true);
invokeMethod(m);
}
/**
* Lock a given node (or the entire subtree starting at this node)
* @param fqn The FQN of the node
* @param owner The owner. This is simply a key into a hashtable, and can be anything, e.g.
* a GlobalTransaction, the current thread, or a special object. If null, it is set to Thread.currentThread()
* @param lock_type The type of lock (RO, RW). Needs to be of type DataNode.LOCK_TYPE_READ or DataNode.LOCK_TYPE_WRITE
* @param lock_recursive If true, the entire subtree is locked, else only the given node
* @throws CacheException If node doesn't exist, a NodeNotExistsException is throw. Other exceptions are
* LockingException, TimeoutException and UpgradeException
*/
// public void lock(Fqn fqn, Object owner, int lock_type, boolean lock_recursive) throws CacheException {
//
// }
/**
* Unlock a given node (or the entire subtree starting at this node)
* @param fqn The FQN of the node
* @param owner The owner. This is simply a key into a hashtable, and can be anything, e.g.
* a GlobalTransaction, the current thread, or a special object. If null, it is set to Thread.currentThread()
* @param unlock_recursive If true, the entire subtree is unlocked, else only the given node
* @param force Release the lock even if we're not the owner
*/
// public void unlock(Fqn fqn, Object owner, boolean unlock_recursive, boolean force) {
//
// }
/**
* Releases all locks for this node and the entire node subtree.
*/
public void releaseAllLocks(String fqn)
{
releaseAllLocks(Fqn.fromString(fqn));
}
/**
* Releases all locks for this node and the entire node subtree.
*/
public void releaseAllLocks(Fqn fqn)
{
MethodCall m = MethodCallFactory.create(MethodDeclarations.releaseAllLocksMethodLocal, fqn);
try
{
invokeMethod(m);
}
catch (CacheException e)
{
log.error("failed releasing all locks for " + fqn, e);
}
}
/**
* Prints a representation of the node defined by <code>fqn</code>.
* Output includes name, fqn and data.
*/
public String print(String fqn)
{
return print(Fqn.fromString(fqn));
}
/**
* Prints a representation of the node defined by <code>fqn</code>.
* Output includes name, fqn and data.
*/
public String print(Fqn fqn)
{
MethodCall m = MethodCallFactory.create(MethodDeclarations.printMethodLocal, fqn);
Object retval = null;
try
{
retval = invokeMethod(m);
}
catch (Throwable e)
{
retval = e;
}
if (retval != null)
{
return retval.toString();
}
else
{
return "";
}
}
/**
* Returns all children of a given node.
* Returns null of the parent node was not found, or if there are no
* children.
* The set is unmodifiable.
*
* @param fqn The fully qualified name of the node
* @return Set A list of child names (as Strings)
* @see #getChildrenNames(Fqn)
*/
public Set getChildrenNames(String fqn) throws CacheException
{
return getChildrenNames(Fqn.fromString(fqn));
}
/**
* Returns all children of a given node.
* Returns null of the parent node was not found, or if there are no
* children.
* The set is unmodifiable.
*
* @param fqn The fully qualified name of the node
* @return Set an unmodifiable set of children names, Object.
*/
public Set getChildrenNames(Fqn fqn) throws CacheException
{
MethodCall m = MethodCallFactory.create(MethodDeclarations.getChildrenNamesMethodLocal, fqn);
return (Set) invokeMethod(m);
}
public Set _getChildrenNames(Fqn fqn) throws CacheException
{
Node n = findNode(fqn);
if (n == null) return null;
Set s = n.getChildrenNames();
return s;
}
/**
* Returns true if the FQN exists and the node has children.
*/
public boolean hasChild(Fqn fqn)
{
if (fqn == null) return false;
Node n = root;
Object obj;
for (int i = 0; i < fqn.size(); i++)
{
obj = fqn.get(i);
n = n.getNodeSPI().getChildrenMap().get(obj);
if (n == null)
{
return false;
}
}
return !n.getNodeSPI().getChildrenMap().isEmpty();
}
/**
* Returns a debug string with few details.
*/
public String toString()
{
return toString(true);
}
/**
* Returns a debug string with optional details of contents.
*/
public String toString(boolean details)
{
StringBuffer sb = new StringBuffer();
int indent = 0;
if (!details)
{
sb.append(getClass().getName()).append(" [").append(getNumberOfNodes()).append(" nodes, ");
sb.append(getNumberOfLocksHeld()).append(" locks]");
}
else
{
Map<Object, Node> children;
children = root.getNodeSPI().getChildrenMap();
for (Node n : children.values())
{
n.getNodeSPI().print(sb, indent);
sb.append("\n");
}
}
return sb.toString();
}
/**
* Prints information about the contents of the nodes in the cache's current
* in-memory state. Does not load any previously evicted nodes from a
* cache loader, so evicted nodes will not be included.
*/
public String printDetails()
{
StringBuffer sb = new StringBuffer();
root.printDetails(sb, 0);
sb.append("\n");
return sb.toString();
}
/**
* Returns lock information.
*/
public String printLockInfo()
{
StringBuffer sb = new StringBuffer("\n");
int indent = 0;
for (Node n : root.getNodeSPI().getChildrenMap().values())
{
n.getNodeSPI().getLock().printLockInfo(sb, indent);
sb.append("\n");
}
return sb.toString();
}
/**
* Returns the number of read or write locks held across the entire cache.
*/
public int getNumberOfLocksHeld()
{
return numLocks(root);
}
private int numLocks(Node n)
{
int num = 0;
if (n.getNodeSPI().getLock().isLocked())
{
num++;
}
for (Node cn : n.getNodeSPI().getChildren(true))
{
num += numLocks(cn);
}
return num;
}
/**
* Returns an <em>approximation</em> of the total number of nodes in the
* cache. Since this method doesn't acquire any locks, the number might be
* incorrect, or the method might even throw a
* ConcurrentModificationException
*/
public int getNumberOfNodes()
{
return numNodes(root) - 1;
}
private int numNodes(Node n)
{
if (n == null)
{
return 0;
}
int count = 1;// for n
for (Node child : n.getNodeSPI().getChildrenMap().values())
{
count += numNodes(child);
}
return count;
}
/**
* Returns an <em>approximation</em> of the total number of attributes in
* the cache. Since this method doesn't acquire any locks, the number might
* be incorrect, or the method might even throw a
* ConcurrentModificationException
*/
public int getNumberOfAttributes()
{
return numAttributes(getRoot());
}
/**
* Returns an <em>approximation</em> of the total number of attributes in
* this sub cache.
*
* @see #getNumberOfAttributes
*/
public int getNumberOfAttributes(Fqn fqn)
{
Node n = findNode(fqn);
return numAttributes(n);
}
private int numAttributes(Node n)
{
Map<Object, Node> children = n.getNodeSPI().getChildrenMap();
int count = 0;
for (Node child : children.values())
{
count += numAttributes(child);
}
count += n.getData().size();
return count;
}
/* ---------------------- Remote method calls -------------------- */
/**
* @param mbrs
* @param method_call
* @param synchronous
* @param exclude_self
* @param timeout
* @return
* @throws Exception
* @deprecated Note this is due to be moved to an interceptor.
*/
public List callRemoteMethods(List mbrs, MethodCall method_call,
boolean synchronous, boolean exclude_self, long timeout)
throws Exception
{
return callRemoteMethods(mbrs, method_call, synchronous ? GroupRequest.GET_ALL : GroupRequest.GET_NONE, exclude_self, timeout);
}
/**
* Overloaded to allow a finer grained control over JGroups mode
*
* @param mbrs
* @param method_call
* @param mode
* @param exclude_self
* @param timeout
* @return
* @throws Exception
* @deprecated Note this is due to be moved to an interceptor.
*/
public List callRemoteMethods(List mbrs, MethodCall method_call, int mode, boolean exclude_self, long timeout)
throws Exception
{
RspList rsps;
Rsp rsp;
List retval;
Vector validMembers;
if (disp == null)
{
return null;
}
validMembers = mbrs != null ? new Vector(mbrs) : new Vector(this.members);
if (exclude_self && validMembers.size() > 0)
{
Object local_addr = getLocalAddress();
if (local_addr != null)
{
validMembers.remove(local_addr);
}
}
if (validMembers.size() == 0)
{
if (log.isTraceEnabled())
{
log.trace("destination list is empty, discarding call");
}
return null;
}
if (log.isTraceEnabled())
{
log.trace("callRemoteMethods(): valid members are " + validMembers + " methods: " + method_call.getArgs()[0]);
}
rsps = disp.callRemoteMethods(validMembers, method_call, mode, timeout, buddyManager != null && buddyManager.isEnabled());
// a null response is 99% likely to be due to a marshalling problem - we throw a NSE, this needs to be changed when
// JGroups supports http://jira.jboss.com/jira/browse/JGRP-193
if (rsps == null)
{
// return null;
throw new NotSerializableException("RpcDispatcher returned a null. This is most often caused by args for " + method_call + " not being serializable.");
}
if (mode == GroupRequest.GET_NONE)
{
return new ArrayList();// async case
}
if (log.isTraceEnabled())
{
log.trace("(" + getLocalAddress() + "): responses for method " + method_call.getName() + ":\n" + rsps);
}
retval = new ArrayList(rsps.size());
for (int i = 0; i < rsps.size(); i++)
{
rsp = (Rsp) rsps.elementAt(i);
if (rsp.wasSuspected() || !rsp.wasReceived())
{
CacheException ex;
if (rsp.wasSuspected())
{
ex = new SuspectException("suspected member: " + rsp.getSender());
}
else
{
ex = new TimeoutException("timeout for " + rsp.getSender());
}
retval.add(new ReplicationException("rsp=" + rsp, ex));
}
else
{
retval.add(rsp.getValue());
}
}
return retval;
}
/**
* @param members
* @param method
* @param args
* @param synchronous
* @param exclude_self
* @param timeout
* @return
* @throws Exception
* @deprecated Note this is due to be moved to an interceptor.
*/
public List callRemoteMethods(List members, Method method, Object[] args,
boolean synchronous, boolean exclude_self, long timeout)
throws Exception
{
return callRemoteMethods(members, MethodCallFactory.create(method, args), synchronous, exclude_self, timeout);
}
public List callRemoteMethods(Vector members, Method method, Object[] args,
boolean synchronous, boolean exclude_self, long timeout)
throws Exception
{
return callRemoteMethods(members, MethodCallFactory.create(method, args), synchronous, exclude_self, timeout);
}
/**
* @param members
* @param method_name
* @param types
* @param args
* @param synchronous
* @param exclude_self
* @param timeout
* @return
* @throws Exception
* @deprecated Note this is due to be moved to an interceptor.
*/
public List callRemoteMethods(Vector members, String method_name,
Class[] types, Object[] args,
boolean synchronous, boolean exclude_self, long timeout)
throws Exception
{
Method method = getClass().getDeclaredMethod(method_name, types);
return callRemoteMethods(members, method, args, synchronous, exclude_self, timeout);
}
/* -------------------- End Remote method calls ------------------ */
/* --------------------- Callbacks -------------------------- */
/* ----- These are VERSIONED callbacks to facilitate JBCACHE-843. Also see docs/design/DataVersion.txt --- */
public void _put(GlobalTransaction tx, Fqn fqn, Map data, boolean create_undo_ops, DataVersion dv) throws CacheException
{
_put(tx, fqn, data, create_undo_ops, false, dv);
}
public void _put(GlobalTransaction tx, Fqn fqn, Map data, boolean create_undo_ops, boolean erase_contents, DataVersion dv) throws CacheException
{
_put(tx, fqn, data, create_undo_ops, erase_contents);
}
public Object _put(GlobalTransaction tx, Fqn fqn, Object key, Object value, boolean create_undo_ops, DataVersion dv) throws CacheException
{
return _put(tx, fqn, key, value, create_undo_ops);
}
public void _remove(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, DataVersion dv) throws CacheException
{
_remove(tx, fqn, create_undo_ops, true);
}
public Object _remove(GlobalTransaction tx, Fqn fqn, Object key, boolean create_undo_ops, DataVersion dv) throws CacheException
{
return _remove(tx, fqn, key, create_undo_ops);
}
public void _removeData(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, DataVersion dv) throws CacheException
{
_removeData(tx, fqn, create_undo_ops, true);
}
/* ----- End VERSIONED callbacks - Now for the NORMAL callbacks. -------- */
/**
* Internal put method.
* Does the real work. Needs to acquire locks if accessing nodes, depending on
* the value of <tt>locking</tt>. If run inside a transaction, needs to (a) add
* newly acquired locks to {@link TransactionEntry}'s lock list, (b) add nodes
* that were created to {@link TransactionEntry}'s node list and (c) create
* {@link Modification}s and add them to {@link TransactionEntry}'s modification
* list and (d) create compensating modifications to undo the changes in case
* of a rollback
*
* @param fqn
* @param data
* @param create_undo_ops If true, undo operations will be created (default is true).
* Otherwise they will not be created (used by rollback()).
*/
public void _put(GlobalTransaction tx, String fqn, Map data, boolean create_undo_ops)
throws CacheException
{
_put(tx, Fqn.fromString(fqn), data, create_undo_ops);
}
/**
* Internal put method.
* Does the real work. Needs to acquire locks if accessing nodes, depending on
* the value of <tt>locking</tt>. If run inside a transaction, needs to (a) add
* newly acquired locks to {@link TransactionEntry}'s lock list, (b) add nodes
* that were created to {@link TransactionEntry}'s node list and (c) create
* {@link Modification}s and add them to {@link TransactionEntry}'s modification
* list and (d) create compensating modifications to undo the changes in case
* of a rollback
*
* @param fqn
* @param data
* @param create_undo_ops If true, undo operations will be created (default is true).
* Otherwise they will not be created (used by rollback()).
*/
public void _put(GlobalTransaction tx, Fqn fqn, Map data, boolean create_undo_ops)
throws CacheException
{
_put(tx, fqn, data, create_undo_ops, false);
}
/**
* Internal put method.
* Does the real work. Needs to acquire locks if accessing nodes, depending on
* the value of <tt>locking</tt>. If run inside a transaction, needs to (a) add
* newly acquired locks to {@link TransactionEntry}'s lock list, (b) add nodes
* that were created to {@link TransactionEntry}'s node list and (c) create
* {@link Modification}s and add them to {@link TransactionEntry}'s modification
* list and (d) create compensating modifications to undo the changes in case
* of a rollback
*
* @param fqn
* @param data
* @param create_undo_ops If true, undo operations will be created (default is true).
* @param erase_contents Clear the existing hashmap before putting the new data into it
* Otherwise they will not be created (used by rollback()).
*/
public void _put(GlobalTransaction tx, Fqn fqn, Map data, boolean create_undo_ops, boolean erase_contents)
throws CacheException
{
if (log.isTraceEnabled())
{
log.trace("_put(" + tx + ", \"" + fqn + "\", " + data + " undo=" + create_undo_ops + " erase=" + erase_contents + ")");
}
Node n = findNodeCheck(tx, fqn);
Map rawData = n.getNodeSPI().getRawData();
notifier.notifyNodeModified(fqn, true, rawData);
getInvocationContext().getOptionOverrides().setBypassInterceptorChain(true);
// create a compensating method call (reverting the effect of
// this modification) and put it into the TX's undo list.
if (tx != null && create_undo_ops)
{
// erase and set to previous hashmap contents
MethodCall undo_op = MethodCallFactory.create(MethodDeclarations.putDataEraseMethodLocal, tx, fqn,
new MapCopy(rawData),
false, true);
tx_table.addUndoOperation(tx, undo_op);
}
if (erase_contents) n.clearData();
n.put(data);
notifier.notifyNodeModified(fqn, false, rawData);
}
/**
* Internal put method.
*
* @return Previous value (if any)
*/
public Object _put(GlobalTransaction tx, String fqn, Object key, Object value, boolean create_undo_ops)
throws CacheException
{
return _put(tx, Fqn.fromString(fqn), key, value, create_undo_ops);
}
/**
* Internal put method.
*
* @return Previous value (if any)
*/
public Object _put(GlobalTransaction tx, Fqn fqn, Object key, Object value, boolean create_undo_ops)
throws CacheException
{
if (log.isTraceEnabled())
{
log.trace(new StringBuffer("_put(").append(tx).append(", \"").
append(fqn).append("\", k=").append(key).append(", v=").append(value).append(")"));
}
// TODO remove before release
if (key instanceof Map)
{
log.warn("using a map as a key in a map, did you mean to do that?");
}
Node n = findNodeCheck(tx, fqn);
Map rawData = n.getNodeSPI().getRawData();
notifier.notifyNodeModified(fqn, true, rawData);
getInvocationContext().getOptionOverrides().setBypassInterceptorChain(true);
Object old_value = n.put(key, value);
// create a compensating method call (reverting the effect of
// this modification) and put it into the TX's undo list.
if (tx != null && create_undo_ops)
{
MethodCall undo_op;
if (old_value == null)
{
undo_op = MethodCallFactory.create(MethodDeclarations.removeKeyMethodLocal, tx, fqn, key, false);
}
else
{
undo_op = MethodCallFactory.create(MethodDeclarations.putKeyValMethodLocal, tx, fqn, key, old_value, false);
}
// 1. put undo-op in TX' undo-operations list (needed to rollback TX)
tx_table.addUndoOperation(tx, undo_op);
}
notifier.notifyNodeModified(fqn, false, rawData);
return old_value;
}
/**
* Internal remove method.
*/
public void _remove(GlobalTransaction tx, String fqn, boolean create_undo_ops) throws CacheException
{
_remove(tx, Fqn.fromString(fqn), create_undo_ops);
}
/**
* Internal remove method.
*/
public void _remove(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops) throws CacheException
{
_remove(tx, fqn, create_undo_ops, true);
}
public void _remove(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean sendNodeEvent)
throws CacheException
{
_remove(tx, fqn, create_undo_ops, sendNodeEvent, false);
}
/**
* Internal method to remove a node.
*
* @param tx
* @param fqn
* @param create_undo_ops
* @param sendNodeEvent
*/
public void _remove(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean sendNodeEvent, boolean eviction)
throws CacheException
{
_remove(tx, fqn, create_undo_ops, sendNodeEvent, eviction, null);
}
/**
* Internal method to remove a node.
* Performs a remove on a node, passing in a {@link DataVersion} which is used with optimistically locked nodes. Pass
* in a null if optimistic locking is not used.
*
* @param tx
* @param fqn
* @param create_undo_ops
* @param sendNodeEvent
* @param eviction
* @param version
* @throws CacheException
*/
public void _remove(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean sendNodeEvent, boolean eviction, DataVersion version)
throws CacheException
{
NodeSPI n;
Node parent_node;
MethodCall undo_op = null;
if (log.isTraceEnabled())
{
log.trace("_remove(" + tx + ", \"" + fqn + "\", undo=" + create_undo_ops + ")");
}
// check if this is triggered by a rollback operation ...
if (tx != null)
{
try
{
int status = getTransactionManager().getTransaction().getStatus();
if (status == Status.STATUS_MARKED_ROLLBACK || status == Status.STATUS_ROLLEDBACK || status == Status.STATUS_ROLLING_BACK)
{
log.debug("This remove call is triggered by a transaction rollback, as a compensation operation. Do a realRemove() instead.");
realRemove(fqn, true);
return;
}
}
catch (Exception e)
{
// what do we do here?
log.trace("Caught exception dealing with transaction manager", e);
}
}
if (fqn.size() == 0)
{
Set children = getChildrenNames(fqn);
if (children != null)
{
Object[] kids = children.toArray();
for (int i = 0; i < kids.length; i++)
{
Object s = kids[i];
Fqn tmp = new Fqn(fqn, s);
try
{
_remove(tx, tmp, create_undo_ops, true, eviction);
}
catch (Exception e)
{
log.error("failure removing node " + tmp);
}
}
}
return;
}
// Find the node. This will add the temporarily created parent nodes to the TX's node list if tx != null)
n = findNode(fqn, version);
if (n == null)
{
if (log.isTraceEnabled())
{
log.trace("node " + fqn + " not found");
}
return;
}
if (eviction)
{
notifier.notifyNodeEvicted(fqn, true);
}
else
{
notifier.notifyNodeRemoved(fqn, true, n.getNodeSPI().getRawData());
}
parent_node = n.getParent();
// remove subtree from parent
if (eviction || configuration.isNodeLockingOptimistic())
{
parent_node.getNodeSPI().getChildrenMap().remove(n.getFqn().getLast());
}
else
{
n.markAsDeleted(true);
}
if (eviction)
{
parent_node.getNodeSPI().setChildrenLoaded(false);
}
// release all locks for the entire subtree
// n.getNodeSPI().getLock().releaseAll(tx != null ? tx : (Object) Thread.currentThread());
// create a compensating method call (reverting the effect of
// this modification) and put it into the TX's undo list.
if (tx != null && create_undo_ops && !eviction)
{
undo_op = MethodCallFactory.create(MethodDeclarations.addChildMethodLocal, tx, parent_node.getFqn(), n.getFqn().getLast(), n, false);
// 1. put undo-op in TX' undo-operations list (needed to rollback TX)
tx_table.addUndoOperation(tx, undo_op);
}
if (eviction)
{
notifier.notifyNodeEvicted(fqn, false);
}
else
{
notifier.notifyNodeRemoved(fqn, false, null);
}
}
/**
* Internal method to remove a key.
*
* @param fqn
* @param key
* @return Object
*/
public Object _remove(GlobalTransaction tx, String fqn, Object key, boolean create_undo_ops)
throws CacheException
{
return _remove(tx, Fqn.fromString(fqn), key, create_undo_ops);
}
/**
* Internal method to remove a key.
*
* @param fqn
* @param key
* @return Object
*/
public Object _remove(GlobalTransaction tx, Fqn fqn, Object key, boolean create_undo_ops)
throws CacheException
{
Node n = null;
MethodCall undo_op = null;
Object old_value = null;
if (log.isTraceEnabled())
{
log.trace("_remove(" + tx + ", \"" + fqn + "\", key=" + key + ")");
}
// Find the node. This will lock it (if <tt>locking</tt> is true) and
// add the temporarily created parent nodes to the TX's node list if tx != null)
n = findNode(fqn);
if (n == null)
{
log.warn("node " + fqn + " not found");
return null;
}
notifier.notifyNodeModified(fqn, true, n.getNodeSPI().getRawData());
getInvocationContext().getOptionOverrides().setBypassInterceptorChain(true);
old_value = n.remove(key);
// create a compensating method call (reverting the effect of
// this modification) and put it into the TX's undo list.
if (tx != null && create_undo_ops && old_value != null)
{
undo_op = MethodCallFactory.create(MethodDeclarations.putKeyValMethodLocal, tx, fqn, key, old_value, false);
// 1. put undo-op in TX' undo-operations list (needed to rollback TX)
tx_table.addUndoOperation(tx, undo_op);
}
notifier.notifyNodeModified(fqn, false, n.getNodeSPI().getRawData());
return old_value;
}
/**
* Internal method to remove data from a node.
*/
public void _removeData(GlobalTransaction tx, String fqn, boolean create_undo_ops)
throws CacheException
{
_removeData(tx, Fqn.fromString(fqn), create_undo_ops);
}
/**
* Internal method to remove data from a node.
*/
public void _removeData(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops)
throws CacheException
{
_removeData(tx, fqn, create_undo_ops, true);
}
/**
* Internal method to remove data from a node.
*/
public void _removeData(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean sendNodeEvent)
throws CacheException
{
_removeData(tx, fqn, create_undo_ops, sendNodeEvent, false);
}
/**
* Internal method to remove data from a node.
*/
public void _removeData(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean sendNodeEvent, boolean eviction)
throws CacheException
{
_removeData(tx, fqn, create_undo_ops, sendNodeEvent, eviction, null);
}
/**
* Internal method to remove data from a node.
*/
public void _removeData(GlobalTransaction tx, Fqn fqn, boolean create_undo_ops, boolean sendNodeEvent, boolean eviction, DataVersion version)
throws CacheException
{
Node n = null;
MethodCall undo_op = null;
if (log.isTraceEnabled())
{
log.trace("_removeData(" + tx + ", \"" + fqn + "\")");
}
// Find the node. This will lock it (if <tt>locking</tt> is true) and
// add the temporarily created parent nodes to the TX's node list if tx != null)
n = findNode(fqn, version);
if (n == null)
{
log.warn("node " + fqn + " not found");
return;
}
// create a compensating method call (reverting the effect of
// this modification) and put it into the TX's undo list.
if (tx != null && create_undo_ops && !eviction)
{
Map data = n.getData();
if (!data.isEmpty())
{
undo_op = MethodCallFactory.create(MethodDeclarations.putDataMethodLocal,
tx, fqn, new MapCopy(data), false);
}
}
if (eviction)
{
notifier.notifyNodeEvicted(fqn, true);
}
else
{
notifier.notifyNodeModified(fqn, true, n.getNodeSPI().getRawData());
}
Map raw = n.getNodeSPI().getRawData();
raw.clear();
if (eviction)
{
n.getNodeSPI().setDataLoaded(false);
}
if (sendNodeEvent)
{
notifier.notifyNodeVisited(fqn, false);
}
else
{// FIXME Bela did this so GUI view can refresh the view after node is evicted. But this breaks eviction policy, especially AOP!!!!
if (eviction)
{
notifier.notifyNodeEvicted(fqn, false);
}
else
{
notifier.notifyNodeModified(fqn, false, n.getData());
}
}
// put undo-op in TX' undo-operations list (needed to rollback TX)
if (tx != null && create_undo_ops)
{
tx_table.addUndoOperation(tx, undo_op);
}
}
/**
* Internal evict method called by eviction policy provider.
*
* @param fqn removes everything assoicated with this FQN
* @throws CacheException
*/
public void _evict(Fqn fqn) throws CacheException
{
if (!exists(fqn)) return;// node does not exist. Maybe it has been recursively removed.
// use remove method now if there is a child node. Otherwise, it is removed
boolean create_undo_ops = false;
boolean sendNodeEvent = false;
boolean eviction = true;
if (log.isTraceEnabled())
{
log.trace("_evict(" + fqn + ")");
}
if (hasChild(fqn))
{
_removeData(null, fqn, create_undo_ops, sendNodeEvent, eviction);
}
else
{
_remove(null, fqn, create_undo_ops, sendNodeEvent, eviction);
}
}
/**
* Internal evict method called by eviction policy provider.
*
* @param fqn
* @param version
* @throws CacheException
*/
public void _evict(Fqn fqn, DataVersion version) throws CacheException
{
if (!exists(fqn)) return;// node does not exist
boolean create_undo_ops = false;
boolean sendNodeEvent = false;
boolean eviction = true;
if (log.isTraceEnabled())
{
log.trace("_evict(" + fqn + ", " + version + ")");
}
if (hasChild(fqn))
{
_removeData(null, fqn, create_undo_ops, sendNodeEvent, eviction, version);
}
else
{
_remove(null, fqn, create_undo_ops, sendNodeEvent, eviction, version);
}
}
/**
* Evicts a key/value pair from a node's attributes. Note that this is <em>local</em>, will not be replicated.
* @param fqn
* @param key
* @throws CacheException
*/
// public void _evict(Fqn fqn, Object key) throws CacheException {
// if(!exists(fqn)) return;
// boolean create_undo_ops = false;
// boolean sendNodeEvent = false;
// boolean eviction=true;
// _removeData(null, fqn, create_undo_ops, sendNodeEvent, eviction);
// }
/**
* Compensating method to {@link #_remove(GlobalTransaction,Fqn,boolean)}.
*/
public void _addChild(GlobalTransaction gtx, Fqn parent_fqn, Object child_name, Node cn, boolean undoOps)
throws CacheException
{
NodeSPI childNode = (NodeSPI) cn;
if (log.isTraceEnabled())
{
log.trace("_addChild(\"" + parent_fqn + "\", \"" + child_name + "\", node=" + childNode + ")");
}
if (parent_fqn == null || child_name == null || childNode == null)
{
log.error("parent_fqn or child_name or childNode was null");
return;
}
NodeSPI parentNode = findNode(parent_fqn);
if (parentNode == null)
{
log.warn("node " + parent_fqn + " not found");
return;
}
Fqn fqn = new Fqn(parent_fqn, child_name);
notifier.notifyNodeCreated(fqn, true);
parentNode.addChild(child_name, childNode);
childNode.markAsDeleted(false, true);
if (gtx != null && undoOps)
{
// 1. put undo-op in TX' undo-operations list (needed to rollback TX)
tx_table.addUndoOperation(gtx, MethodCallFactory.create(MethodDeclarations.removeNodeMethodLocal, gtx, fqn, false));
}
notifier.notifyNodeCreated(fqn, false);
}
/**
* Replicates changes across to other nodes in the cluster. Invoked by the
* ReplicationInterceptor. Calls need to be forwarded to the
* ReplicationInterceptor in this interceptor chain. This method will later
* be moved entirely into the ReplicationInterceptor.
*/
public Object _replicate(MethodCall method_call) throws Throwable
{
try
{
InvocationContext ctx = getInvocationContext();
ctx.setOriginLocal(false);
setInvocationContext(ctx);
return invokeMethod(method_call);
}
catch (Exception ex)
{
log.warn("replication failure with method_call " + method_call + " exception: " + ex);
throw ex;
}
finally
{
InvocationContext ctx = getInvocationContext();
ctx.setOriginLocal(true);
setInvocationContext(ctx);
}
}
/**
* Replicates a list of method calls.
*/
public void _replicate(List method_calls) throws Throwable
{
Iterator it = method_calls.iterator();
while (it.hasNext()) _replicate((MethodCall) it.next());
}
/**
* A 'clustered get' call, called from a remote ClusteredCacheLoader.
*
* @return a List containing 2 elements: (true or false) and a value (Object). If buddy replication
* is used one further element is added - an Fqn of the backup subtree in which this node may be found.
*/
public List _clusteredGet(MethodCall methodCall, Boolean searchBackupSubtrees)
{
MethodCall call = methodCall;
if (log.isTraceEnabled()) log.trace("Clustered Get called with params: " + call + ", " + searchBackupSubtrees);
Method m = call.getMethod();
Object[] args = call.getArgs();
Object callResults = null;
try
{
Fqn fqn = (Fqn) args[0];
if (log.isTraceEnabled()) log.trace("Clustered get: invoking call " + m + " with Fqn " + fqn);
callResults = m.invoke(this, args);
boolean found = validResult(callResults, call, fqn);
if (log.isTraceEnabled()) log.trace("Got result " + callResults + ", found=" + found);
if (found && callResults == null) callResults = createEmptyResults(call);
}
catch (Exception e)
{
log.warn("Problems processing clusteredGet call", e);
}
List results = new ArrayList(2);
if (callResults != null)
{
results.add(true);
results.add(callResults);
}
else
{
results.add(false);
results.add(null);
}
return results;
}
/**
* Used with buddy replication's data gravitation interceptor
*
* @param fqn the fqn to gravitate
* @param searchSubtrees should _BUDDY_BACKUP_ subtrees be searched
* @param marshal should the list of NodeData being gravitated be marshalled into
* a byte[] or returned as a List
* @return <code>List</code> with 1 or 3 elements. First element is a
* <code>Boolean</code> indicating whether data was found. If
* <code>false</code>, the list will only have one element.
* Otherwise, second element is the data itself, structured as
* a <code>List</code> of <code>NodeData</code> objects, each of
* which represents one <code>Node</code> in the subtree rooted
* at <code>fqn</code>. If param <code>mnarshal</code> is
* <code>true</code>, this second element will have been marshalled
* to a <code>byte[]</code>, otherwise it will be the raw list.
* The third element represents the Fqn in the _BUDDY_BACKUP_
* region that needs to be cleaned in order to remove this data
* once the new owner has acquired it.
*/
public List _gravitateData(Fqn fqn, boolean searchSubtrees, boolean marshal)
throws CacheException
{
GravitateResult result = gravitateData(fqn, searchSubtrees, marshal);
return result.asList();
}
public GravitateResult gravitateData(Fqn fqn, boolean searchSubtrees, boolean marshal)
throws CacheException
{
// we need to get the state for this Fqn and its sub-nodes.
// for now, perform a very simple series of getData calls.
Node actualNode = findNode(fqn);
Fqn backupNodeFqn = null;
if (actualNode == null && searchSubtrees)
{
Node backupSubtree = findNode(BuddyManager.BUDDY_BACKUP_SUBTREE_FQN);
if (backupSubtree != null)
{
Map children = backupSubtree.getNodeSPI().getChildrenMap();
if (children != null)
{
Iterator childNames = children.keySet().iterator();
while (childNames.hasNext() && actualNode == null)
{
backupNodeFqn = BuddyManager.getBackupFqn(childNames.next().toString(), fqn);
actualNode = findNode(backupNodeFqn);
}
}
}
}
if (actualNode == null)
{
return GravitateResult.noDataFound();
}
if (backupNodeFqn == null)
{
backupNodeFqn = BuddyManager.getBackupFqn(BuddyManager.getGroupNameFromAddress(getLocalAddress()), fqn);
}
List list = getNodeData(new LinkedList(), actualNode);
if (marshal)
{
try
{
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
MarshalledValueOutputStream maos = new MarshalledValueOutputStream(baos);
maos.writeObject(list);
maos.close();
return GravitateResult.marshalledResult(baos.toByteArray(), backupNodeFqn);
}
catch (IOException e)
{
throw new CacheException("Failure marshalling subtree at " + fqn, e);
}
}
else
{
GravitateResult gr = GravitateResult.subtreeResult(list, backupNodeFqn);
return gr;
}
}
private List getNodeData(List list, Node node)
{
NodeData data = new NodeData(BuddyManager.getActualFqn(node.getFqn()), node.getData());
list.add(data);
Map<Object, Node> children = node.getNodeSPI().getChildrenMap();
for (Node childNode : children.values())
{
getNodeData(list, childNode);
}
return list;
}
// ------------- start: buddy replication specific 'lifecycle' method calls
public void _remoteAssignToBuddyGroup(BuddyGroup group, Map state) throws Exception
{
if (buddyManager != null) buddyManager.handleAssignToBuddyGroup(group, state);
}
public void _remoteRemoveFromBuddyGroup(String groupName) throws BuddyNotInitException
{
if (buddyManager != null) buddyManager.handleRemoveFromBuddyGroup(groupName);
}
public void _remoteAnnounceBuddyPoolName(IpAddress address, String buddyPoolName)
{
if (buddyManager != null) buddyManager.handlePoolNameBroadcast(address, buddyPoolName);
}
public void _dataGravitationCleanup(GlobalTransaction gtx, Fqn primary, Fqn backup) throws Exception
{
MethodCall primaryDataCleanup, backupDataCleanup;
if (buddyManager.isDataGravitationRemoveOnFind())
{
primaryDataCleanup = MethodCallFactory.create(MethodDeclarations.removeNodeMethodLocal, null, primary, false);
backupDataCleanup = MethodCallFactory.create(MethodDeclarations.removeNodeMethodLocal, null, backup, false);
}
else
{
primaryDataCleanup = MethodCallFactory.create(MethodDeclarations.evictNodeMethodLocal, primary);
backupDataCleanup = MethodCallFactory.create(MethodDeclarations.evictNodeMethodLocal, backup);
}
invokeMethod(primaryDataCleanup);
invokeMethod(backupDataCleanup);
}
// ------------- end: buddy replication specific 'lifecycle' method calls
/**
* Returns true if the call results returned a valid result.
*/
private boolean validResult(Object callResults, MethodCall mc, Fqn fqn)
{
switch (mc.getMethodId())
{
case MethodDeclarations.getDataMapMethodLocal_id:
case MethodDeclarations.getChildrenNamesMethodLocal_id:
return callResults != null || exists(fqn);
case MethodDeclarations.existsMethod_id:
return (Boolean) callResults;
default:
return false;
}
}
/**
* Creates an empty Collection class based on the return type of the method called.
*/
private Object createEmptyResults(MethodCall mc)
{
switch (mc.getMethodId())
{
case MethodDeclarations.getDataMapMethodLocal_id:
case MethodDeclarations.getChildrenNamesMethodLocal_id:
return Collections.emptyMap();
default:
return null;
}
}
/**
* Releases all locks for a FQN.
*/
public void _releaseAllLocks(Fqn fqn)
{
Node n;
try
{
n = findNode(fqn);
if (n == null)
{
log.error("releaseAllLocks(): node " + fqn + " not found");
return;
}
releaseAll(n);
}
catch (Throwable t)
{
log.error("releaseAllLocks(): failed", t);
}
}
private void releaseAll(Node n)
{
for (Node child : n.getChildren())
{
releaseAll(child);
}
n.getNodeSPI().getLock().releaseAll();
}
/**
* Finds and returns the string value for the Fqn.
* Returns null if not found or upon error.
*/
public String _print(Fqn fqn)
{
try
{
Node n = findNode(fqn);
if (n == null) return null;
return n.toString();
}
catch (Throwable t)
{
return null;
}
}
/**
* Should not be called.
*/
public void _lock(Fqn fqn, NodeLock.LockType lock_type, boolean recursive)
throws TimeoutException, LockingException
{
log.warn("method _lock() should not be invoked on CacheImpl");
}
// todo: these methods can be removed once we move 2PC entirely into {Replication/Lock}Interceptor
/**
* Throws UnsupportedOperationException.
*/
public void optimisticPrepare(GlobalTransaction gtx, List modifications, Map data, Address address, boolean onePhaseCommit)
{
throw new UnsupportedOperationException("optimisticPrepare() should not be called on CacheImpl directly");
}
/**
* Throws UnsupportedOperationException.
*/
public void prepare(GlobalTransaction global_tx, List modifications, Address coord, boolean onePhaseCommit)
{
throw new UnsupportedOperationException("prepare() should not be called on CacheImpl directly");
}
/**
* Throws UnsupportedOperationException.
*/
public void commit(GlobalTransaction tx)//, Boolean hasMods)
{
throw new UnsupportedOperationException("commit() should not be called on CacheImpl directly");
}
/**
* Throws UnsupportedOperationException.
*/
public void rollback(GlobalTransaction tx)//, Boolean hasMods)
{
throw new UnsupportedOperationException("rollback() should not be called on CacheImpl directly");
}
/* ----------------- End of Callbacks ---------------------- */
/**
* Adds an undo operatoin to the transaction table.
*/
public void addUndoOperation(GlobalTransaction gtx, MethodCall undo_op)
{
tx_table.addUndoOperation(gtx, undo_op);
}
/**
* Returns the CacheLoaderManager.
*/
public CacheLoaderManager getCacheLoaderManager()
{
return cacheLoaderManager;
}
/**
* Sets the CacheLoaderManager.
*/
public void setCacheLoaderManager(CacheLoaderManager cacheLoaderManager)
{
this.cacheLoaderManager = cacheLoaderManager;
}
public void setConfiguration(Configuration configuration)
{
this.configuration = configuration;
configuration.setTreeCache(this);
}
/**
* @return an instance of {@link Notifier} which has been configured with this instance of CacheImpl.
*/
public Notifier getNotifier()
{
return notifier;
}
public InvocationContext getInvocationContext()
{
InvocationContext ctx = invocationContextContainer.get();
if (ctx == null)
{
ctx = new InvocationContext();
invocationContextContainer.set(ctx);
}
return ctx;
}
public void setInvocationContext(InvocationContext ctx)
{
invocationContextContainer.set(ctx);
}
public CacheMBean getCacheMBeanInterface()
{
if (cacheMBean == null)
{
cacheMBean = new org.jboss.cache.jmx.Cache(this);
}
return cacheMBean;
}
public void setCacheMBeanInterface(CacheMBean mbean)
{
this.cacheMBean = mbean;
}
/**
* New API to efficiently relocate a node
*
* @since 2.0.0
*/
public void move(Fqn newParent, Fqn nodeToMove)
{
// this needs to be passed up the interceptor chain
MethodCall m = MethodCallFactory.create(MethodDeclarations.moveMethodLocal, newParent, nodeToMove);
invokeMethod(m);
}
/**
* Called by reflection
*
* @param newParentFqn
* @param nodeToMoveFqn
*/
public void _move(Fqn newParentFqn, Fqn nodeToMoveFqn)
{
// the actual move algorithm.
NodeSPI newParent = findNode(newParentFqn);
if (newParent == null)
{
throw new NodeNotExistsException("New parent node " + newParentFqn + " does not exist when attempting to move node!!");
}
Node node = findNode(nodeToMoveFqn);
if (node == null)
{
throw new NodeNotExistsException("Node " + nodeToMoveFqn + " does not exist when attempting to move node!!");
}
NodeSPI oldParent = (NodeSPI) node.getParent();
Object nodeName = nodeToMoveFqn.getLast();
// now that we have the parent and target nodes:
// first correct the pointers at the pruning point
oldParent.removeChild(new Fqn(nodeName));
newParent.addChild(nodeName, node);
// parent pointer is calculated on the fly using Fqns.
// now adjust Fqns of node and all children.
moveFqns(node, newParent.getFqn());
// now register an undo op
if (getInvocationContext().getTransaction() != null)
{
MethodCall undo = MethodCallFactory.create(MethodDeclarations.moveMethodLocal, oldParent.getFqn(), new Fqn(newParentFqn, nodeToMoveFqn.getLast()));
tx_table.addUndoOperation(getInvocationContext().getGlobalTransaction(), undo);
}
}
public void _block()
{
//intentionally empty, used only for reflection in MethodDeclarations.blockChannelLocal
}
public void _unblock()
{
//intentionally empty, used only for reflection in MethodDeclarations.unblockChannelLocal
}
private void moveFqns(Node node, Fqn newBase)
{
Fqn newFqn = new Fqn(newBase, node.getFqn().getLast());
node.getNodeSPI().setFqn(newFqn);
}
protected class MessageListenerAdaptor implements ExtendedMessageListener
{
/**
* Reference to an exception that was raised during
* state installation on this node.
*/
protected volatile Exception setStateException;
private final Object stateLock = new Object();
protected MessageListenerAdaptor()
{
}
public void waitForState() throws Exception
{
synchronized (stateLock)
{
while (!isStateSet)
{
if (setStateException != null)
{
throw setStateException;
}
try
{
stateLock.wait();
}
catch (InterruptedException iex)
{
}
}
}
}
protected void stateReceivedSuccess()
{
isStateSet = true;
setStateException = null;
}
protected void stateReceivingFailed(Throwable t)
{
if (t instanceof CacheException)
{
log.debug(t);
}
else
{
log.error("failed setting state", t);
}
if (t instanceof Exception)
{
setStateException = (Exception) t;
}
else
{
setStateException = new Exception(t);
}
}
protected void stateProducingFailed(Throwable t)
{
if (t instanceof CacheException)
{
log.debug(t);
}
else
{
log.error("Caught " + t.getClass().getName()
+ " while responding to state transfer request", t);
}
}
/**
* Callback, does nothing.
*/
public void receive(Message msg)
{
}
public byte[] getState()
{
MarshalledValueOutputStream out = null;
byte[] result = null;
try
{
ExposedByteArrayOutputStream baos = new ExposedByteArrayOutputStream(16 * 1024);
out = new MarshalledValueOutputStream(baos);
getStateTransferManager().getState(out, Fqn.ROOT, configuration.getInitialStateRetrievalTimeout(), true, true);
result = baos.getRawBuffer();
}
catch (Throwable t)
{
stateProducingFailed(t);
}
finally
{
Util.close(out);
}
return result;
}
public void setState(byte[] new_state)
{
if (new_state == null)
{
log.debug("transferred state is null (may be first member in cluster)");
return;
}
ByteArrayInputStream bais = new ByteArrayInputStream(new_state);
MarshalledValueInputStream in = null;
try
{
in = new MarshalledValueInputStream(bais);
getStateTransferManager().setState(in, Fqn.ROOT, null);
stateReceivedSuccess();
}
catch (Throwable t)
{
stateReceivingFailed(t);
}
finally
{
Util.close(in);
synchronized (stateLock)
{
// Notify wait that state has been set.
stateLock.notifyAll();
}
}
}
public byte[] getState(String state_id)
{
MarshalledValueOutputStream out = null;
String sourceRoot = state_id;
byte[] result = null;
boolean hasDifferentSourceAndIntegrationRoots = state_id.indexOf(StateTransferManager.PARTIAL_STATE_DELIMETER) > 0;
if (hasDifferentSourceAndIntegrationRoots)
{
sourceRoot = state_id.split(StateTransferManager.PARTIAL_STATE_DELIMETER)[0];
}
try
{
ExposedByteArrayOutputStream baos = new ExposedByteArrayOutputStream(16 * 1024);
out = new MarshalledValueOutputStream(baos);
getStateTransferManager().getState(out, Fqn.fromString(sourceRoot),
configuration.getInitialStateRetrievalTimeout(), true, true);
result = baos.getRawBuffer();
}
catch (Throwable t)
{
stateProducingFailed(t);
}
finally
{
Util.close(out);
}
return result;
}
public void getState(OutputStream ostream)
{
MarshalledValueOutputStream out = null;
try
{
out = new MarshalledValueOutputStream(ostream);
getStateTransferManager().getState(out, Fqn.ROOT, configuration.getInitialStateRetrievalTimeout(), true, true);
}
catch (Throwable t)
{
stateProducingFailed(t);
}
finally
{
Util.close(out);
}
}
public void getState(String state_id, OutputStream ostream)
{
String sourceRoot = state_id;
MarshalledValueOutputStream out = null;
boolean hasDifferentSourceAndIntegrationRoots = state_id.indexOf(StateTransferManager.PARTIAL_STATE_DELIMETER) > 0;
if (hasDifferentSourceAndIntegrationRoots)
{
sourceRoot = state_id.split(StateTransferManager.PARTIAL_STATE_DELIMETER)[0];
}
try
{
out = new MarshalledValueOutputStream(ostream);
getStateTransferManager().getState(out, Fqn.fromString(sourceRoot), configuration.getInitialStateRetrievalTimeout(), true, true);
}
catch (Throwable t)
{
stateProducingFailed(t);
}
finally
{
Util.close(out);
}
}
public void setState(InputStream istream)
{
if (istream == null)
{
log.debug("stream is null (may be first member in cluster)");
return;
}
MarshalledValueInputStream in = null;
try
{
in = new MarshalledValueInputStream(istream);
getStateTransferManager().setState(in, Fqn.ROOT, null);
stateReceivedSuccess();
}
catch (Throwable t)
{
stateReceivingFailed(t);
}
finally
{
Util.close(in);
synchronized (stateLock)
{
// Notify wait that state has been set.
stateLock.notifyAll();
}
}
}
public void setState(String state_id, byte[] state)
{
if (state == null)
{
log.debug("partial transferred state is null");
return;
}
MarshalledValueInputStream in = null;
String targetRoot = state_id;
boolean hasDifferentSourceAndIntegrationRoots = state_id.indexOf(StateTransferManager.PARTIAL_STATE_DELIMETER) > 0;
if (hasDifferentSourceAndIntegrationRoots)
{
targetRoot = state_id.split(StateTransferManager.PARTIAL_STATE_DELIMETER)[1];
}
try
{
log.debug("Setting received partial state for subroot " + state_id);
Fqn subroot = Fqn.fromString(targetRoot);
Region region = regionManager.getRegion(subroot, false);
ClassLoader cl = null;
if (region != null)
{
// If a classloader is registered for the node's region, use it
cl = region.getClassLoader();
}
ByteArrayInputStream bais = new ByteArrayInputStream(state);
in = new MarshalledValueInputStream(bais);
getStateTransferManager().setState(in, subroot, cl);
stateReceivedSuccess();
}
catch (Throwable t)
{
stateReceivingFailed(t);
}
finally
{
Util.close(in);
synchronized (stateLock)
{
// Notify wait that state has been set.
stateLock.notifyAll();
}
}
}
public void setState(String state_id, InputStream istream)
{
String targetRoot = state_id;
MarshalledValueInputStream in = null;
boolean hasDifferentSourceAndIntegrationRoots = state_id.indexOf(StateTransferManager.PARTIAL_STATE_DELIMETER) > 0;
if (hasDifferentSourceAndIntegrationRoots)
{
targetRoot = state_id.split(StateTransferManager.PARTIAL_STATE_DELIMETER)[1];
}
if (istream == null)
{
log.debug("stream is null (may be first member in cluster). State is not set");
return;
}
try
{
log.debug("Setting received partial state for subroot " + state_id);
in = new MarshalledValueInputStream(istream);
Fqn subroot = Fqn.fromString(targetRoot);
Region region = regionManager.getRegion(subroot, false);
ClassLoader cl = null;
if (region != null)
{
// If a classloader is registered for the node's region, use it
cl = region.getClassLoader();
}
getStateTransferManager().setState(in, subroot, cl);
stateReceivedSuccess();
}
catch (Throwable t)
{
stateReceivingFailed(t);
}
finally
{
Util.close(in);
synchronized (stateLock)
{
// Notify wait that state has been set.
stateLock.notifyAll();
}
}
}
}
/*-------------------- End of MessageListener ----------------------*/
/*----------------------- MembershipListener ------------------------*/
public void viewAccepted(View new_view)
{
Vector new_mbrs = new_view.getMembers();
// todo: if MergeView, fetch and reconcile state from coordinator
// actually maybe this is best left up to the application ? we just notify them and let
// the appl handle it ?
log.info("viewAccepted(): " + new_view);
synchronized (members)
{
boolean needNotification = false;
if (new_mbrs != null)
{
// Determine what members have been removed
// and roll back any tx and break any locks
Vector removed = (Vector) members.clone();
removed.removeAll(new_mbrs);
removeLocksForDeadMembers(root, removed);
members.removeAllElements();
members.addAll(new_view.getMembers());
needNotification = true;
}
// Now that we have a view, figure out if we are the coordinator
coordinator = (members.size() == 0 ? false : members.get(0).equals(getLocalAddress()));
// now notify listeners - *after* updating the coordinator. - JBCACHE-662
if (needNotification) notifier.notifyViewChange(new_view);
// Wake up any threads that are waiting to know who the members
// are so they can figure out who the coordinator is
members.notifyAll();
}
}
/**
* Called when a member is suspected.
*/
public void suspect(Address suspected_mbr)
{
}
/**
* Indicates that a channel has received a BLOCK event from FLUSH protocol.
*/
public void block()
{
if (log.isDebugEnabled())
{
log.debug("Block received at " + getLocalAddress());
}
MethodCall m = MethodCallFactory.create(MethodDeclarations.blockChannelLocal);
invokeMethod(m);
if (log.isDebugEnabled())
{
log.debug("Block processed at " + getLocalAddress());
}
}
/**
* Indicates that a channel has received a UNBLOCK event from FLUSH protocol.
*/
public void unblock()
{
if (log.isDebugEnabled())
{
log.debug("UnBlock received at " + getLocalAddress());
}
MethodCall m = MethodCallFactory.create(MethodDeclarations.unblockChannelLocal);
invokeMethod(m);
if (log.isDebugEnabled())
{
log.debug("UnBlock processed at " + getLocalAddress());
}
}
/*------------------- End of MembershipListener ----------------------*/
/* ------------------------------ Private methods --------------------------- */
/**
* Returns the transaction associated with the current thread. We get the
* initial context and a reference to the TransactionManager to get the
* transaction. This method is used by {@link #getCurrentTransaction()}
*/
protected Transaction getLocalTransaction()
{
if (tm == null)
{
return null;
}
try
{
return tm.getTransaction();
}
catch (Throwable t)
{
return null;
}
}
/**
* Returns true if transaction is ACTIVE or PREPARING, false otherwise.
*/
private boolean isValid(Transaction tx)
{
if (tx == null) return false;
int status = -1;
try
{
status = tx.getStatus();
return status == Status.STATUS_ACTIVE || status == Status.STATUS_PREPARING;
}
catch (SystemException e)
{
log.error("failed getting transaction status", e);
return false;
}
}
/**
* Returns the transaction associated with the current thread.
* If a local transaction exists, but doesn't yet have a mapping to a
* GlobalTransaction, a new GlobalTransaction will be created and mapped to
* the local transaction. Note that if a local transaction exists, but is
* not ACTIVE or PREPARING, null is returned.
*
* @return A GlobalTransaction, or null if no (local) transaction was associated with the current thread
*/
public GlobalTransaction getCurrentTransaction()
{
return getCurrentTransaction(true);
}
/**
* Returns the transaction associated with the thread; optionally creating
* it if is does not exist.
*/
public GlobalTransaction getCurrentTransaction(boolean createIfNotExists)
{
Transaction tx;
if ((tx = getLocalTransaction()) == null)
{// no transaction is associated with the current thread
return null;
}
if (!isValid(tx))
{// we got a non-null transaction, but it is not active anymore
int status = -1;
try
{
status = tx.getStatus();
}
catch (SystemException e)
{
}
log.warn("status is " + status + " (not ACTIVE or PREPARING); returning null)", new Throwable());
return null;
}
return getCurrentTransaction(tx, createIfNotExists);
}
/**
* Returns the global transaction for this local transaction.
*/
public GlobalTransaction getCurrentTransaction(Transaction tx)
{
return getCurrentTransaction(tx, true);
}
/**
* Returns the global transaction for this local transaction.
*
* @param createIfNotExists if true, if a global transaction is not found; one is created
*/
public GlobalTransaction getCurrentTransaction(Transaction tx, boolean createIfNotExists)
{
// removed synchronization on tx_table because underlying implementation is thread safe
// and JTA spec (section 3.4.3 Thread of Control, par 2) says that only one thread may
// operate on the transaction at one time so no concern about 2 threads trying to call
// this method for the same Transaction instance at the same time
//
GlobalTransaction gtx = tx_table.get(tx);
if (gtx == null && createIfNotExists)
{
Address addr = (Address) getLocalAddress();
gtx = GlobalTransaction.create(addr);
tx_table.put(tx, gtx);
TransactionEntry ent = configuration.isNodeLockingOptimistic() ? new OptimisticTransactionEntry() : new TransactionEntry();
ent.setTransaction(tx);
tx_table.put(gtx, ent);
if (log.isTraceEnabled())
{
log.trace("created new GTX: " + gtx + ", local TX=" + tx);
}
}
return gtx;
}
/**
* Invokes a method against this object. Contains the logger_ic for handling
* the various use cases, e.g. mode (local, repl_async, repl_sync),
* transaction (yes or no) and locking (yes or no).
*/
protected Object invokeMethod(MethodCall m) throws CacheException
{
try
{
return interceptor_chain.invoke(m);
}
catch (Throwable t)
{
if (t instanceof CacheException)
{
throw (CacheException) t;
}
throw new RuntimeException(t);
}
}
/**
* Returns an object suitable for use in node locking, either the current
* transaction or the current thread if there is no transaction.
*/
protected Object getOwnerForLock()
{
Object owner = getCurrentTransaction();
if (owner == null)
{
owner = Thread.currentThread();
}
return owner;
}
/**
* Finds a node given a fully qualified name.
* Whenever nodes are created, and the global transaction is not null, the created
* nodes have to be added to the transaction's {@link TransactionEntry}
* field.<br>
* When a lock is acquired on a node, a reference to the lock has to be
* {@link TransactionEntry#addLock(org.jboss.cache.lock.NodeLock) added to the list of locked nodes}
* in the {@link TransactionEntry}.
* <p>This operation will also apply different locking to the cache nodes, depending on
* <tt>operation_type</tt>. If it is <tt>read</tt> type, all nodes will be acquired with
* read lock. Otherwise, the operation is <tt>write</tt> type, all parent nodes will be acquired
* with read lock while the destination node acquires write lock.</p>
*
* @param fqn Fully qualified name for the corresponding node.
* @return DataNode
*/
public NodeSPI findNode(Fqn fqn)
{
try
{
return findNode(fqn, null);
}
catch (CacheException e)
{
log.warn("Unexpected error", e);
return null;
}
}
private NodeSPI findNodeCheck(GlobalTransaction tx, Fqn fqn)
{
NodeSPI n = findNode(fqn);
if (n == null)
{
String errStr = "node " + fqn + " not found (gtx=" + tx + ", caller=" + Thread.currentThread() + ")";
if (log.isTraceEnabled())
{
log.trace(errStr);
}
throw new NodeNotExistsException(errStr);
}
return n;
}
/**
* Internal method; not to be used externally.
*
* @param f
*/
public void realRemove(Fqn f, boolean skipMarkerCheck)
{
NodeSPI n = findInternal(f, true);
if (n == null)
{
return;
}
if (log.isDebugEnabled()) log.debug("Performing a real remove for node " + f + ", marked for removal.");
if (skipMarkerCheck || n.isDeleted())
{
if (n.getFqn().isRoot())
{
// do not actually delete; just remove deletion marker
n.markAsDeleted(false);
// but now remove all children, since the call has been to remove("/")
n.removeChildren();
}
else
{
getInvocationContext().getOptionOverrides().setBypassInterceptorChain(true);
n.getParent().removeChild(n.getFqn());
}
}
else
{
if (log.isDebugEnabled()) log.debug("Node " + f + " NOT marked for removal as expected, not removing!");
}
}
/**
* Finds a node given a fully qualified name and DataVersion.
*/
private NodeSPI findNode(Fqn fqn, DataVersion version) throws CacheException
{
if (fqn == null) return null;
NodeSPI toReturn = findInternal(fqn, false);
if (version != null && configuration.isNodeLockingOptimistic())
{
// we need to check the version of the data node...
DataVersion nodeVersion = toReturn.getVersion();
if (log.isDebugEnabled())
{
log.debug("looking for optimistic node [" + fqn + "] with version [" + version + "]. My version is [" + nodeVersion + "]");
}
if (nodeVersion.newerThan(version))
{
// we have a versioning problem; throw an exception!
throw new CacheException("Unable to validate versions.");
}
}
return toReturn;
}
public synchronized RegionManager getRegionManager()
{
if (regionManager == null)
{
regionManager = new RegionManager(this);
}
return regionManager;
}
public VersionAwareMarshaller getMarshaller()
{
if (marshaller_ == null)
{
marshaller_ = new VersionAwareMarshaller(getRegionManager(), configuration.isInactiveOnStartup(), configuration.isUseRegionBasedMarshalling(), configuration.getReplVersionString());
}
return marshaller_;
}
/**
* Returns the default JGroup properties.
* Subclasses may wish to override this method.
*/
protected String getDefaultProperties()
{
return "UDP(mcast_addr=224.0.0.36;mcast_port=55566;ip_ttl=32;" +
"mcast_send_buf_size=150000;mcast_recv_buf_size=80000):" +
"PING(timeout=1000;num_initial_members=2):" +
"MERGE2(min_interval=5000;max_interval=10000):" +
"FD_SOCK:" +
"VERIFY_SUSPECT(timeout=1500):" +
"pbcast.NAKACK(gc_lag=50;max_xmit_size=8192;retransmit_timeout=600,1200,2400,4800):" +
"UNICAST(timeout=600,1200,2400,4800):" +
"pbcast.STABLE(desired_avg_gossip=20000):" +
"FRAG(frag_size=8192;down_thread=false;up_thread=false):" +
"pbcast.GMS(join_timeout=5000;join_retry_timeout=2000;" +
"shun=false;print_local_addr=true):" +
"pbcast.STATE_TRANSFER";
}
private void initialiseCacheLoaderManager() throws Exception
{
if (cacheLoaderManager == null)
{
cacheLoaderManager = new CacheLoaderManager();
}
cacheLoaderManager.setConfig(configuration.getCacheLoaderConfig(), this);
}
/**
* Sets the CacheLoader to use.
* Provided for backwards compatibility.
*
* @param loader
* @deprecated
*/
public void setCacheLoader(CacheLoader loader)
{
log.warn("Using deprecated config method setCacheLoader. This element will be removed in future, please use CacheLoaderConfiguration instead.");
try
{
if (cacheLoaderManager == null) initialiseCacheLoaderManager();
}
catch (Exception e)
{
log.warn("Problem setting cache loader. Perhaps your cache loader config has not been set yet?");
}
cacheLoaderManager.setCacheLoader(loader);
}
/**
* Purges the contents of all configured {@link CacheLoader}s
*/
public void purgeCacheLoaders() throws Exception
{
if (cacheLoaderManager != null) cacheLoaderManager.purgeLoaders(true);
}
// ---------------------------------------------------------------
// END: Methods to provide backward compatibility with older cache loader config settings
// ---------------------------------------------------------------
private JChannel getMultiplexerChannel()
{
String stackName = configuration.getMultiplexerStack();
RuntimeConfig rtc = configuration.getRuntimeConfig();
JChannelFactoryMBean channelFactory = rtc.getMuxChannelFactory();
try
{
if (channelFactory != null)
{
return (JChannel) channelFactory.createMultiplexerChannel(stackName, configuration.getClusterName());
}
else
{
// FIXME -- why do we support this? Move into a specialized JMX class
// meant for dealing with old-style config files
String serviceName = configuration.getMultiplexerService();
if (serviceName == null || serviceName.length() == 0)
{
return null;
}
MBeanServer mbserver = rtc.getMbeanServer();
if (mbserver == null)
{
log.warn("Multiplexer service specified but MBean server not found." +
" Multiplexer will not be used for cache cluster " + configuration.getClusterName() + ".");
return null;
}
ObjectName muxName = new ObjectName(serviceName);
// see if Multiplexer service is registered
if (!mbserver.isRegistered(muxName))
{
log.warn("Multiplexer service specified but '" + serviceName + "' not registered." +
" Multiplexer will not be used for cache cluster " + configuration.getClusterName() + ".");
return null;
}
// create the multiplexer channel and return as a JChannel instance
Object[] params = {stackName, configuration.getClusterName()};
return (JChannel) mbserver.invoke(muxName, CREATE_MUX_CHANNEL, params, MUX_TYPES);
}
}
catch (Exception e)
{
log.error("Multiplexer channel creation failed." +
" Multiplexer will not be used for cache cluster " + configuration.getClusterName() + ".", e);
return null;
}
}
// ================== methods to implement Cache and CacheSPI interfaces ============================
public List<Interceptor> getInterceptorChain()
{
return Collections.unmodifiableList(getInterceptors());
}
public void addCacheListener(CacheListener l)
{
getNotifier().addCacheListener(l);
}
public void addCacheListener(Fqn region, CacheListener l)
{
throw new UnsupportedOperationException("Not implemented in this release");
}
public void removeCacheListener(CacheListener l)
{
getNotifier().removeCacheListener(l);
}
public void removeCacheListener(Fqn region, CacheListener l)
{
throw new UnsupportedOperationException("Not implemented in this release");
}
public Set<CacheListener> getCacheListeners()
{
return getNotifier().getCacheListeners();
}
public Set<CacheListener> getCacheListeners(Fqn region)
{
throw new UnsupportedOperationException("Not implemented in this release");
}
public synchronized void addInterceptor(Interceptor i, int position)
{
List<Interceptor> interceptors = getInterceptors();
i.setCache(this);
interceptors.add(position, i);
// now correct the chaining of interceptors...
Interceptor linkedChain = InterceptorChainFactory.correctInterceptorChaining(interceptors);
setInterceptorChain(linkedChain);
}
public synchronized void removeInterceptor(int position)
{
List<Interceptor> i = getInterceptors();
i.remove(position);
setInterceptorChain(InterceptorChainFactory.correctInterceptorChaining(i));
}
public RPCManager getRPCManager()
{
return RPCManager.getInstance(this);
}
public String getClusterName()
{
return getConfiguration().getClusterName();
}
public void evict(Fqn fqn, boolean recursive)
{
if (recursive)
{
Node n = get(fqn);
if (n != null)
{
evictChildren(n);
}
}
else
{
evict(fqn);
}
}
private void evictChildren(Node n)
{
Set<Node> children = n.getChildren();
for (Node child : children)
{
evictChildren(child);
}
evict(n.getFqn());
}
public Region getRegion(Fqn fqn, boolean createIfAbsent)
{
return getRegionManager().getRegion(fqn, createIfAbsent);
}
public void removeNode(Fqn fqn)
{
remove(fqn);
}
public void putForExternalRead(Fqn fqn, Object key, Object value)
{
throw new UnsupportedOperationException("Not yet implemented.");
// TODO Implement this method PROPERLY as per JBCACHE-848
// getInvocationContext().getOptionOverrides().setFailSilently(true);
// put(fqn, key, value);
}
public boolean isStarted()
{
return started;
}
}
1.1 date: 2006/12/30 17:49:54; author: msurtani; state: Exp;JBossCache/src/org/jboss/cache/UnversionedNode.java
Index: UnversionedNode.java
===================================================================
/*
* JBoss, Home of Professional Open Source
*
* Distributable under LGPL license.
* See terms of license at gnu.org.
*/
package org.jboss.cache;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jboss.cache.factories.NodeFactory;
import org.jboss.cache.lock.IdentityLock;
import org.jboss.cache.lock.LockingException;
import org.jboss.cache.lock.NodeLock;
import org.jboss.cache.lock.TimeoutException;
import org.jboss.cache.marshall.MethodCall;
import org.jboss.cache.marshall.MethodCallFactory;
import org.jboss.cache.marshall.MethodDeclarations;
import org.jboss.cache.optimistic.DataVersion;
import org.jboss.cache.util.MapCopy;
import java.io.Serializable;
import java.util.AbstractSet;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
/**
* Basic data node class.
*/
public class UnversionedNode extends AbstractNode implements NodeSPI
{
/**
* Default output indent for printing.
*/
private static final int INDENT = 4;
/**
* Debug log.
*/
private static Log log = LogFactory.getLog(UnversionedNode.class);
/**
* True if all children have been loaded. This is set when CacheImpl.getChildrenNames() is called.
*/
private boolean childrenLoaded = false;
/**
* True if data has been loaded from the cache loader.
*/
private boolean dataLoaded = false;
/**
* Lock manager that manages locks to be acquired when accessing the node inside a transaction. Lazy set just in case
* locking is not needed.
*/
private transient IdentityLock lock_ = null;
/**
* A reference of the CacheImpl instance.
*/
private transient CacheSPI cache;
/**
* Name of the node.
*/
private Fqn fqn;
/**
* Map of general data keys to values.
*/
private Map data;
/**
* Constructs a new node with an FQN of Root.
*/
public UnversionedNode()
{
this(Fqn.ROOT);
}
/**
* Constructs a new node with an FQN.
*/
public UnversionedNode(Fqn fqn)
{
this.fqn = fqn;
}
/**
* Constructs a new node with a name, etc.
*/
public UnversionedNode(Object child_name, Fqn fqn, Map data, CacheSPI cache)
{
init(child_name, fqn, cache);
if (data != null)
{
this.data().putAll(data);
}
}
/**
* Constructs a new node with a name, etc.
*
* @param mapSafe <code>true</code> if param <code>data</code> can safely be directly assigned to this object's
* {@link #data} field; <code>false</code> if param <code>data</code>'s contents should be copied into
* this object's {@link #data} field.
*/
public UnversionedNode(Object child_name, Fqn fqn, Map data, boolean mapSafe, CacheSPI cache)
{
init(child_name, fqn, cache);
if (data != null)
{
if (mapSafe)
{
if (data.getClass().getName().startsWith("java.util.Collections"))
{
throw new IllegalArgumentException("What's this crap? " + data);
}
this.data = data;// ? is this safe
}
else
{
this.data = new HashMap(data);
}
}
}
/**
* Initializes with a name and FQN and cache.
*/
private void init(Object child_name, Fqn fqn, CacheSPI cache)
{
if (cache == null)
{
throw new IllegalArgumentException("no cache init for " + fqn);
}
this.cache = cache;
this.fqn = fqn;
if (!fqn.isRoot() && !child_name.equals(fqn.getLast()))
{
throw new IllegalArgumentException("Child " + child_name + " must be last part of " + fqn);
}
}
/**
* Returns a parent by checking the TreeMap by name.
*/
public Node getParent()
{
if (fqn.isRoot())
{
return null;
}
return cache.peek(fqn.getParent());
}
private synchronized void initLock()
{
if (lock_ == null)
{
lock_ = new IdentityLock(cache.getConfiguration().getIsolationLevel(), this);
}
}
private synchronized Map<Object, Node> children()
{
if (children == null)
{
if (getFqn().isRoot())
{
children = new ConcurrentHashMap<Object, Node>(64, .5f, 16);
}
else
{
// Less segments to save memory
children = new ConcurrentHashMap<Object, Node>(4, .75f, 4);
}
}
return children;
}
public void setCache(CacheSPI cache)
{
this.cache = cache;
this.lock_ = null;
if (children != null)
{
for (Node n : children.values())
{
n.getNodeSPI().setCache(cache);
}
}
}
public CacheSPI getCache()
{
return cache;
}
public boolean getChildrenLoaded()
{
return childrenLoaded;
}
public void setChildrenLoaded(boolean flag)
{
childrenLoaded = flag;
}
public synchronized Object get(Object key)
{
if (data == null)
{
return null;
}
return data.get(key);
}
public synchronized boolean containsKey(Object key)
{
return data != null && data.containsKey(key);
}
/**
* Returns the data keys, or an empty set if there are no keys.
*/
public Set getDataKeys()
{
return getKeys();
}
private boolean isReadLocked()
{
return lock_ != null && lock_.isReadLocked();
}
private boolean isWriteLocked()
{
return lock_ != null && lock_.isWriteLocked();
}
public IdentityLock getLock()
{
initLock();
return lock_;
}
public synchronized Map getData()
{
if (data == null)
{
return Collections.EMPTY_MAP;
}
return Collections.unmodifiableMap(data);
}
protected void put(Map data, boolean erase)
{
if (cache.getInvocationContext().getOptionOverrides().isBypassInterceptorChain())
{
try
{
if (data == null)
{
return;
}
if (log.isTraceEnabled())
{
log.trace("put " + data.size() + " erase=" + erase);
}
synchronized (this)
{
if (erase)
{
if (this.data != null)
{
this.data.clear();
}
}
data().putAll(data);
}
}
finally
{
cache.getInvocationContext().getOptionOverrides().setBypassInterceptorChain(false);
}
}
else
{
// TODO add erase option to Cache (?)
if (erase)
{
((CacheImpl) cache).removeData(fqn);
}
cache.put(fqn, data);
}
}
public Object put(Object key, Object value)
{
if (cache.getInvocationContext().getOptionOverrides().isBypassInterceptorChain())
{
Object result;
try
{
synchronized (this)
{
result = data().put(key, value);
}
}
finally
{
cache.getInvocationContext().getOptionOverrides().setBypassInterceptorChain(false);
}
return result;
}
else
{
return cache.put(getFqn(), key, value);
}
}
public Node getOrCreateChild(Object child_name, GlobalTransaction gtx)
{
return getOrCreateChild(child_name, gtx, true);
}
private Node getOrCreateChild(Object child_name, GlobalTransaction gtx, boolean createIfNotExists)
{
Node child;
if (child_name == null)
{
throw new IllegalArgumentException("null child name");
}
child = children().get(child_name);
if (createIfNotExists && child == null)
{
// construct the new child outside the synchronized block to avoid
// spending any more time than necessary in the synchronized section
Fqn child_fqn = new Fqn(this.fqn, child_name);
Node newChild = NodeFactory.getInstance().createNode(child_name, child_fqn, this, null, cache);
if (newChild == null)
{
throw new IllegalStateException();
}
synchronized (this)
{
// check again to see if the child exists
// after acquiring exclusive lock
child = children().get(child_name);
if (child == null)
{
cache.getNotifier().notifyNodeCreated(child_fqn, true);
child = newChild;
children.put(child_name, child);
if (gtx != null)
{
MethodCall undo_op = MethodCallFactory.create(MethodDeclarations.removeNodeMethodLocal, gtx,
child_fqn, false);
cache.addUndoOperation(gtx, undo_op);
// add the node name to the list maintained for the current tx
// (needed for abort/rollback of transaction)
// cache.addNode(gtx, child.getFqn());
}
}
}
// notify if we actually created a new child
if (newChild == child)
{
if (log.isTraceEnabled())
{
log.trace("created child: fqn=" + child_fqn);
}
cache.getNotifier().notifyNodeCreated(child_fqn, false);
}
}
return child;
}
public Node createChild(Object child_name, Fqn fqn, Node parent)
{
return getOrCreateChild(child_name, null, true);
}
public Node createChild(Object child_name, Fqn fqn, Node parent, Object key, Object value)
{
Node n = getOrCreateChild(child_name, null, true);
n.put(key, value);
return n;
}
public Object remove(Object key)
{
if (cache.getInvocationContext().getOptionOverrides().isBypassInterceptorChain())
{
Object result;
try
{
synchronized (this)
{
if (data == null)
{
result = null;
}
else
{
result = data.remove(key);
}
}
}
finally
{
cache.getInvocationContext().getOptionOverrides().setBypassInterceptorChain(false);
}
return result;
}
else
{
return cache.remove(getFqn(), key);
}
}
public void printDetails(StringBuffer sb, int indent)
{
printDetailsInMap(sb, indent);
}
/**
* Returns a debug string.
*/
@Override
public String toString()
{
StringBuffer sb = new StringBuffer();
if (deleted)
sb.append("Node (deleted) [ ").append(fqn);
else
sb.append("Node[ ").append(fqn);
synchronized (this)
{
if (data != null)
{
sb.append(" data=").append(data.keySet());
}
}
if (children != null && !children.isEmpty())
{
sb.append(" child=").append(children.keySet());
}
if (lock_ != null)
{
if (isReadLocked())
{
sb.append(" RL");
}
if (isWriteLocked())
{
sb.append(" WL");
}
}
sb.append("]");
return sb.toString();
}
public void release(Object caller)
{
if (lock_ != null)
{
if (log.isTraceEnabled())
{
boolean wOwner = lock_.isWriteLocked() && lock_.getWriterOwner().equals(caller);
log.trace("releasing " + (wOwner ? "WL" : "RL") + ": fqn=" + fqn + ", caller=" + caller);
}
lock_.release(caller);
if (log.isTraceEnabled())
{
boolean wOwner = lock_.isWriteLocked() && lock_.getWriterOwner().equals(caller);
log.trace("released " + (wOwner ? "WL" : "RL") + ": fqn=" + fqn + ", caller=" + caller);
}
}
}
public Node addChild(Fqn f)
{
if (log.isTraceEnabled())
{
log.trace("adding child " + f + " to " + getFqn());
}
if (cache.getInvocationContext().getOptionOverrides().isBypassInterceptorChain())
{
Node newNode = this;
try
{
GlobalTransaction gtx = cache.getInvocationContext().getGlobalTransaction();
if (f.size() == 1)
{
newNode = getOrCreateChild(f.getLast(), gtx);
}
else
{
// recursively create children
Node currentParent = this;
for (Object o : f.peekElements())
{
newNode = currentParent.getNodeSPI().getOrCreateChild(o, gtx);
currentParent = newNode;
}
}
}
finally
{
cache.getInvocationContext().getOptionOverrides().setBypassInterceptorChain(false);
}
return newNode;
}
else
{
Fqn nf = new Fqn(getFqn(), f);
cache.put(nf, Collections.emptyMap());
return getChild(f);
}
}
public void clearData()
{
if (cache.getInvocationContext().getOptionOverrides().isBypassInterceptorChain())
{
try
{
synchronized (this)
{
if (data != null)
{
data.clear();
}
}
}
finally
{
cache.getInvocationContext().getOptionOverrides().setBypassInterceptorChain(false);
}
}
else
{
((CacheImpl) cache).removeData(getFqn());
}
}
public Node getChild(Fqn fqn)
{
if (cache.getInvocationContext().getOptionOverrides().isBypassInterceptorChain())
{
try
{
Node child = getChild(fqn.getLast());
cache.getInvocationContext().getOptionOverrides().setBypassInterceptorChain(false);
return child;
}
finally
{
cache.getInvocationContext().getOptionOverrides().setBypassInterceptorChain(false);
}
}
else
{
return ((CacheImpl) cache).get(new Fqn(getFqn(), fqn));
}
}
public Set<Object> getChildrenNames()
{
return new ChildrenNames();
}
public synchronized Set getKeys()
{
if (data == null)
{
return Collections.emptySet();
}
return Collections.unmodifiableSet(data.keySet());
}
public boolean hasChild(Fqn f)
{
return ((CacheImpl) cache).exists(new Fqn(getFqn(), f));
}
public void move(Node newParent) throws NodeNotExistsException
{
if (log.isTraceEnabled())
{
log.trace(this.getFqn() + " move to " + newParent.getFqn());
}
// TODO
// move must be added to Cache
((CacheImpl) cache).move(newParent.getFqn(), getFqn());
}
public void putIfNull(Object k, Object v)
{
if (cache.get(getFqn(), k) == null)
{
put(k, v);
}
}
public void putIfNull(Map m)
{
if (getData().isEmpty())
{
put(m);
}
}
public void removeChild(Fqn fqn)
{
if (cache.getInvocationContext().getOptionOverrides().isBypassInterceptorChain())
{
if (fqn.size() == 1)
{
children.remove(fqn.getName());
}
else
{
Node c = getChild(fqn);
cache.getInvocationContext().getOptionOverrides().setBypassInterceptorChain(true);
c.getParent().removeChild(new Fqn(fqn.getName()));
}
cache.getInvocationContext().getOptionOverrides().setBypassInterceptorChain(false);
}
else
{
cache.removeNode(new Fqn(getFqn(), fqn));
}
}
public Map<Object, Node> getChildrenMap()
{
return children();
}
public boolean acquire(Object caller, long lock_acquisition_timeout, NodeLock.LockType lockType) throws InterruptedException,
LockingException, TimeoutException
{
return getLock().acquire(caller, lock_acquisition_timeout, lockType);
}
public Set acquireAll(Object caller, long timeout, NodeLock.LockType lockType) throws LockingException, TimeoutException,
InterruptedException
{
return getLock().acquireAll(caller, timeout, lockType);
}
public void setChildrenMap(Map children)
{
this.children = children;
}
public boolean hasChildren()
{
return children != null && children.size() > 0;
}
public void put(Map data)
{
put(data, false);
}
public void removeChild(Object child_name)
{
if (children != null)
{
children.remove(child_name);
if (log.isTraceEnabled())
{
log.trace(getName() + " removed child " + child_name);
}
}
}
public void removeChildren()
{
if (children != null)
{
children.clear();
}
children = null;
}
public void print(StringBuffer sb, int indent)
{
printIndent(sb, indent);
sb.append(Fqn.SEPARATOR).append(getName()).append(" ").append(getData().size());
if (children != null)
{
for (Node node : children.values())
{
sb.append("\n");
node.getNodeSPI().print(sb, indent + INDENT);
}
}
}
// versioning
public void setVersion(DataVersion version)
{
throw new UnsupportedOperationException("Versioning not supported");
}
public DataVersion getVersion()
{
throw new UnsupportedOperationException("Versioning not supported");
}
private void printIndent(StringBuffer sb, int indent)
{
if (sb != null)
{
for (int i = 0; i < indent; i++)
{
sb.append(" ");
}
}
}
public void addChild(Object child_name, Node n)
{
if (child_name != null)
{
children().put(child_name, n);
}
}
/**
* Returns the name of this node.
*/
public Object getName()
{
return fqn.getLast();
}
/**
* Returns the name of this node.
*/
public Fqn getFqn()
{
return fqn;
}
public void setFqn(Fqn fqn)
{
if (log.isTraceEnabled())
{
log.trace(getFqn() + " set FQN " + fqn);
}
this.fqn = fqn;
if (children == null)
{
return;
}
// process children
for (Map.Entry<Object, Node> me : children.entrySet())
{
NodeSPI n = me.getValue().getNodeSPI();
Fqn cfqn = new Fqn(fqn, me.getKey());
n.setFqn(cfqn);
}
}
public Node getChild(Object child_name)
{
if (child_name == null)
{
return null;
}
return children == null ? null : children.get(child_name);
}
public boolean childExists(Object child_name)
{
return child_name != null && children != null && children.containsKey(child_name);
}
public Set<Node> getChildren()
{
// strip out deleted child nodes...
if (children == null || children.size() == 0) return Collections.emptySet();
Set<Node> exclDeleted = new HashSet<Node>();
for (Node n : children.values())
{
if (!((NodeSPI) n).isDeleted()) exclDeleted.add(n);
}
return Collections.unmodifiableSet(exclDeleted);
}
public Set<Node> getChildren(boolean includeMarkedForRemoval)
{
if (includeMarkedForRemoval)
{
if (children != null && !children.isEmpty())
{
return Collections.unmodifiableSet(new HashSet<Node>(children.values()));
}
else
{
return Collections.emptySet();
}
}
else
{
return getChildren();
}
}
public synchronized Map<Object, Object> getRawData()
{
return data();
}
/**
* Returns null or the Map in use with the data.
*/
private Map data()
{
if (data == null)
{
data = new HashMap<Object, Object>();
}
return data;
}
/**
* Adds details of the node into a map as strings.
*/
private void printDetailsInMap(StringBuffer sb, int indent)
{
printIndent(sb, indent);
indent += 2;// increse it
if (!(getFqn()).isRoot())
{
sb.append(Fqn.SEPARATOR);
}
sb.append(getName());
sb.append(" ");
sb.append(data());
if (children != null)
{
for (Node n : children.values())
{
sb.append("\n");
n.getNodeSPI().printDetails(sb, indent);
}
}
}
/**
* Returns true if the data was loaded from the cache loader.
*/
public boolean getDataLoaded()
{
return dataLoaded;
}
/**
* Sets if the data was loaded from the cache loader.
*/
public void setDataLoaded(boolean dataLoaded)
{
this.dataLoaded = dataLoaded;
}
public NodeSPI getNodeSPI()
{
return this;
}
/**
* Might be useful to expose; a debug feature for now.
*/
void setReadOnly()
{
this.data = new MapCopy(data());
this.children = new MapCopy(children());
}
private class ChildrenNames extends AbstractSet implements Serializable
{
/**
* Since writeReplace() returns a different class, this isn't really necessary.
*/
private static final long serialVersionUID = 5468697840097489795L;
@Override
public Iterator iterator()
{
if (children == null)
{
return Collections.emptySet().iterator();
}
return children.keySet().iterator();
}
@Override
public boolean contains(Object o)
{
return children != null && children.containsKey(o);
}
@Override
public int size()
{
if (children == null)
{
return 0;
}
return children.size();
}
private Object writeReplace()
{
if (children == null)
{
return Collections.emptySet();
}
return Collections.unmodifiableSet(new HashSet(children.keySet()));
}
}
/*
private class ChildrenNodes<T> extends AbstractSet
{
private boolean includeDeleted;
public ChildrenNodes(boolean includeDeleted)
{
this.includeDeleted = includeDeleted;
}
@Override
public boolean contains(Object o)
{
return children != null && children.containsValue(o);
}
@Override
public Iterator iterator()
{
if (children == null)
{
return Collections.emptySet().iterator();
}
return children.values().iterator();
}
// although the map is empty
@Override
public void clear()
{
throw new UnsupportedOperationException();
}
@Override
public int size()
{
if (children == null)
{
return 0;
}
return children.size();
}
}
*/
}
More information about the jboss-cvs-commits
mailing list