[hornetq-commits] JBoss hornetq SVN: r11150 - in branches/Branch_2_2_EAP: src/main/org/hornetq/api/core and 36 other directories.

do-not-reply at jboss.org do-not-reply at jboss.org
Mon Aug 8 17:17:06 EDT 2011


Author: clebert.suconic at jboss.com
Date: 2011-08-08 17:17:04 -0400 (Mon, 08 Aug 2011)
New Revision: 11150

Added:
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/JMSPagingFileDeleteTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SimpleSymmetricClusterTest.java
Modified:
   branches/Branch_2_2_EAP/build-hornetq.xml
   branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/DiscoveryGroupConfiguration.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/client/ClientSessionFactory.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/client/ServerLocator.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionFactoryImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionFactoryInternal.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ServerLocatorImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ServerLocatorInternal.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/Topology.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/journal/impl/JournalImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/paging/cursor/impl/PageCursorProviderImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/paging/cursor/impl/PageSubscriptionImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/QueueInfo.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/impl/LocalQueueBinding.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/CoreProtocolManager.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/PacketImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/RemotingConnectionImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/wireformat/NodeAnnounceMessage.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/wireformat/SubscribeClusterTopologyUpdatesMessage.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMAcceptor.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMConnection.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMConnector.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/HttpAcceptorHandler.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/NettyAcceptor.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/NettyConnector.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/server/impl/RemotingServiceImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/HornetQServer.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/ClusterConnection.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/ClusterManager.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/MessageFlowRecord.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/BridgeImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterConnectionBridge.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterConnectionImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterManagerImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/QueueImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/ServerConsumerImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/ServerSessionImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/management/impl/ManagementServiceImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/core/transaction/impl/TransactionImpl.java
   branches/Branch_2_2_EAP/src/main/org/hornetq/utils/HornetQThreadFactory.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/JMSMessageCounterTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/NIOvsOIOTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/PagingTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/bridge/BridgeReconnectTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/bridge/BridgeTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/ClusterTestBase.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/ClusterWithBackupTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterWithBackupTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterWithDiscoveryTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettySymmetricClusterWithBackupTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/OneWayChainClusterTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/OnewayTwoNodeClusterTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SymmetricClusterTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SymmetricClusterWithBackupTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/TwoWayTwoNodeClusterTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/AsynchronousFailoverTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailBackAutoTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverOnFlowControlTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/GroupingFailoverSharedServerTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/GroupingFailoverTestBase.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleLivesMultipleBackupsFailoverTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousReattachTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/PagingFailoverTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/SingleLiveMultipleBackupsFailoverTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/reattach/OrderReattachTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/restart/ClusterRestartTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/topology/TopologyClusterTestBase.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServer.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/SameProcessHornetQServer.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/TestableServer.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/http/CoreClientOverHttpTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/bridge/BridgeTestBase.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/AutoGroupingTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/GroupIDTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/SessionClosedOnRemotingConnectionFailureTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/management/QueueControlTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/persistence/JMSDynamicConfigTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/spring/MessageSender.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/spring/SpringIntegrationTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/stomp/StompTestBase.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/twitter/TwitterTest.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/JMSTestBase.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/ServiceTestBase.java
   branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/UnitTestCase.java
Log:
Several issues on this commit: https://issues.jboss.org/browse/HORNETQ-711, https://issues.jboss.org/browse/HORNETQ-716, https://issues.jboss.org/browse/HORNETQ-743 (should indirectly fix JBPAPP-6522)

Modified: branches/Branch_2_2_EAP/build-hornetq.xml
===================================================================
--- branches/Branch_2_2_EAP/build-hornetq.xml	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/build-hornetq.xml	2011-08-08 21:17:04 UTC (rev 11150)
@@ -44,6 +44,7 @@
 	   <!-- Version properties are read from hornetq-version.properties instead of duplicating them here -->
    <property file="src/config/common/hornetq-version.properties"/>
    <property name="hornetq.version.revision" value="0" />
+   <property name="twitter.consumerKey" value="null"/>
    <property name="hornetq.version.svnurl" value="https://svn.jboss.org/repos/hornetq/branches/Branch_2_2_EAP"/>
    <property name="hornetq.version.string"
              value="${hornetq.version.majorVersion}.${hornetq.version.minorVersion}.${hornetq.version.microVersion}.${hornetq.version.versionSuffix} (${hornetq.version.versionName}, ${hornetq.version.incrementingVersion})"/>

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/DiscoveryGroupConfiguration.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/DiscoveryGroupConfiguration.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/DiscoveryGroupConfiguration.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -179,4 +179,26 @@
       result = 31 * result + (int) (discoveryInitialWaitTimeout ^ (discoveryInitialWaitTimeout >>> 32));
       return result;
    }
+
+   /* (non-Javadoc)
+    * @see java.lang.Object#toString()
+    */
+   @Override
+   public String toString()
+   {
+      return "DiscoveryGroupConfiguration [discoveryInitialWaitTimeout=" + discoveryInitialWaitTimeout +
+             ", groupAddress=" +
+             groupAddress +
+             ", groupPort=" +
+             groupPort +
+             ", localBindAddress=" +
+             localBindAddress +
+             ", name=" +
+             name +
+             ", refreshTimeout=" +
+             refreshTimeout +
+             "]";
+   }
+   
+   
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/client/ClientSessionFactory.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/client/ClientSessionFactory.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/client/ClientSessionFactory.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -133,10 +133,16 @@
                                int ackBatchSize) throws HornetQException;
 
    void close();
+
+   /**
+    * Opposed to close, will call cleanup only on every created session and children objects.
+    */
+   void cleanup();
    
    ServerLocator getServerLocator();
    
    CoreRemotingConnection getConnection();
 
     boolean isClosed();
+
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/client/ServerLocator.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/client/ServerLocator.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/api/core/client/ServerLocator.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -26,6 +26,12 @@
  */
 public interface ServerLocator
 {
+   
+   /**
+    * Returns true if close was already called
+    * @return
+    */
+   boolean isClosed();
 
 	/**
 	 * This method will disable any checks when a GarbageCollection happens

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionFactoryImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionFactoryImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionFactoryImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -22,7 +22,6 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Executor;
-import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
@@ -83,9 +82,11 @@
    private static final long serialVersionUID = 2512460695662741413L;
 
    private static final Logger log = Logger.getLogger(ClientSessionFactoryImpl.class);
-   
+
    private static final boolean isTrace = log.isTraceEnabled();
 
+   private static final boolean isDebug = log.isDebugEnabled();
+
    // Attributes
    // -----------------------------------------------------------------------------------
 
@@ -115,7 +116,7 @@
 
    private final ExecutorFactory orderedExecutorFactory;
 
-   private final ExecutorService threadPool;
+   private final Executor threadPool;
 
    private final ScheduledExecutorService scheduledThreadPool;
 
@@ -129,7 +130,7 @@
 
    private final long maxRetryInterval;
 
-   private final int reconnectAttempts;
+   private int reconnectAttempts;
 
    private final Set<SessionFailureListener> listeners = new ConcurrentHashSet<SessionFailureListener>();
 
@@ -166,7 +167,7 @@
                                    final double retryIntervalMultiplier,
                                    final long maxRetryInterval,
                                    final int reconnectAttempts,
-                                   final ExecutorService threadPool,
+                                   final Executor threadPool,
                                    final ScheduledExecutorService scheduledThreadPool,
                                    final List<Interceptor> interceptors)
    {
@@ -204,7 +205,7 @@
       closeExecutor = orderedExecutorFactory.getExecutor();
 
       this.interceptors = interceptors;
- 
+
    }
 
    public void connect(int initialConnectAttempts, boolean failoverOnInitialConnection) throws HornetQException
@@ -215,12 +216,11 @@
       if (connection == null)
       {
          StringBuffer msg = new StringBuffer("Unable to connect to server using configuration ").append(connectorConfig);
-         if(backupConfig != null)
+         if (backupConfig != null)
          {
             msg.append(" and backup configuration ").append(backupConfig);
          }
-         throw new HornetQException(HornetQException.NOT_CONNECTED,
-               msg.toString());
+         throw new HornetQException(HornetQException.NOT_CONNECTED, msg.toString());
       }
 
    }
@@ -232,19 +232,23 @@
 
    public void setBackupConnector(TransportConfiguration live, TransportConfiguration backUp)
    {
-      if(live.equals(connectorConfig) && backUp != null)
+      if (live.equals(connectorConfig) && backUp != null)
       {
-         if (log.isDebugEnabled())
+         if (isDebug)
          {
-              log.debug("Setting up backup config = " + backUp + " for live = " + live);
+            log.debug("Setting up backup config = " + backUp + " for live = " + live);
          }
          backupConfig = backUp;
       }
       else
       {
-         if (log.isDebugEnabled())
+         if (isDebug)
          {
-            log.debug("ClientSessionFactoryImpl received backup update for live/backup pair = " + live + " / " + backUp + " but it didn't belong to " + this.connectorConfig);
+            log.debug("ClientSessionFactoryImpl received backup update for live/backup pair = " + live +
+                      " / " +
+                      backUp +
+                      " but it didn't belong to " +
+                      this.connectorConfig);
          }
       }
    }
@@ -361,8 +365,18 @@
 
    public void connectionDestroyed(final Object connectionID)
    {
-      handleConnectionFailure(connectionID,
-                              new HornetQException(HornetQException.NOT_CONNECTED, "Channel disconnected"));
+      // It has to use the same executor as the disconnect message is being sent through
+      
+      final HornetQException ex = new HornetQException(HornetQException.NOT_CONNECTED, "Channel disconnected");
+      
+      closeExecutor.execute(new Runnable()
+      {
+         public void run()
+         {
+            handleConnectionFailure(connectionID, ex);
+         }
+      });
+
    }
 
    public void connectionException(final Object connectionID, final HornetQException me)
@@ -379,7 +393,7 @@
          sessions.remove(session);
       }
    }
-   
+
    public void connectionReadyForWrites(final Object connectionID, final boolean ready)
    {
    }
@@ -420,6 +434,13 @@
          return;
       }
 
+      synchronized (exitLock)
+      {
+         exitLock.notifyAll();
+      }
+
+      forceReturnChannel1();
+
       // we need to stop the factory from connecting if it is in the middle of trying to failover before we get the lock
       causeExit();
       synchronized (createSessionLock)
@@ -449,15 +470,52 @@
       }
 
       closed = true;
+
+      serverLocator.factoryClosed(this);
    }
 
-    public boolean isClosed()
-    {
-        return closed;
-    }
+   public void cleanup()
+   {
+      if (closed)
+      {
+         return;
+      }
 
-    public ServerLocator getServerLocator()
+      // we need to stop the factory from connecting if it is in the middle of trying to failover before we get the lock
+      causeExit();
+      synchronized (createSessionLock)
+      {
+         HashSet<ClientSessionInternal> sessionsToClose;
+         synchronized (sessions)
+         {
+            sessionsToClose = new HashSet<ClientSessionInternal>(sessions);
+         }
+         // work on a copied set. the session will be removed from sessions when session.close() is called
+         for (ClientSessionInternal session : sessionsToClose)
+         {
+            try
+            {
+               session.cleanUp(false);
+            }
+            catch (Exception e)
+            {
+               log.warn("Unable to close session", e);
+            }
+         }
+
+         checkCloseConnection();
+      }
+
+      closed = true;
+   }
+
+   public boolean isClosed()
    {
+      return closed || serverLocator.isClosed();
+   }
+
+   public ServerLocator getServerLocator()
+   {
       return serverLocator;
    }
 
@@ -468,7 +526,7 @@
    {
       stopPingingAfterOne = true;
    }
-   
+
    public void resumePinging()
    {
       stopPingingAfterOne = false;
@@ -504,12 +562,11 @@
             return;
          }
 
-         
          if (isTrace)
          {
             log.trace("Client Connection failed, calling failure listeners and trying to reconnect, reconnectAttempts=" + reconnectAttempts);
          }
-         
+
          // We call before reconnection occurs to give the user a chance to do cleanup, like cancel messages
          callFailureListeners(me, false, false);
 
@@ -538,7 +595,6 @@
          // It can then release the channel 1 lock, and retry (which will cause locking on failoverLock
          // until failover is complete
 
-
          if (reconnectAttempts != 0)
          {
             lockChannel1();
@@ -603,7 +659,10 @@
          }
          else
          {
-            connection.destroy();
+            if (connection != null)
+            {
+               connection.destroy();
+            }
 
             connection = null;
          }
@@ -879,7 +938,7 @@
       {
          sessionsToFailover = new HashSet<ClientSessionInternal>(sessions);
       }
-      
+
       for (ClientSessionInternal session : sessionsToFailover)
       {
          session.handleFailover(connection);
@@ -888,6 +947,11 @@
 
    private void getConnectionWithRetry(final int reconnectAttempts)
    {
+      if (log.isTraceEnabled())
+      {
+         log.trace("getConnectionWithRetry::" + reconnectAttempts + " with retryInterval = " + retryInterval + " multiplier = " + retryIntervalMultiplier, new Exception ("trace"));
+      }
+
       long interval = retryInterval;
 
       int count = 0;
@@ -896,9 +960,9 @@
       {
          while (!exitLoop)
          {
-            if (log.isDebugEnabled())
+            if (isDebug)
             {
-               log.debug("Trying reconnection attempt " + count);
+               log.debug("Trying reconnection attempt " + count + "/" + reconnectAttempts);
             }
 
             getConnection();
@@ -910,29 +974,39 @@
                if (reconnectAttempts != 0)
                {
                   count++;
-                  
+
                   if (reconnectAttempts != -1 && count == reconnectAttempts)
                   {
-                     log.warn("Tried " + reconnectAttempts + " times to connect. Now giving up on reconnecting it.");
+                     if (reconnectAttempts != 1)
+                     {
+                        log.warn("Tried " + reconnectAttempts + " times to connect. Now giving up on reconnecting it.");
+                     }
+                     else
+                     if (reconnectAttempts == 1)
+                     {
+                        log.debug("Trying to connect towards " + this);
+                     }
 
                      return;
                   }
 
                   if (isTrace)
                   {
-                     log.trace("Waiting " + interval + 
-                               " milliseconds before next retry. RetryInterval=" + retryInterval + 
-                                  " and multiplier = " + retryIntervalMultiplier);
+                     log.trace("Waiting " + interval +
+                               " milliseconds before next retry. RetryInterval=" +
+                               retryInterval +
+                               " and multiplier = " +
+                               retryIntervalMultiplier);
                   }
-                  
+
                   try
                   {
-                      waitLock.wait(interval);
+                     waitLock.wait(interval);
                   }
                   catch (InterruptedException ignore)
                   {
                   }
-                  
+
                   // Exponential back-off
                   long newInterval = (long)(interval * retryIntervalMultiplier);
 
@@ -945,6 +1019,7 @@
                }
                else
                {
+                  log.debug("Could not connect to any server. Didn't have reconnection configured on the ClientSessionFactory");
                   return;
                }
             }
@@ -1018,24 +1093,33 @@
                                                          threadPool,
                                                          scheduledThreadPool);
 
+            if (log.isDebugEnabled())
+            {
+               log.debug("Trying to connect with connector = " + connectorFactory +
+                         ", parameters = " +
+                         connectorConfig.getParams() + " connector = " + connector);
+            }
+
+            
+            
             if (connector != null)
             {
                connector.start();
 
-               if (log.isDebugEnabled())
+               if (isDebug)
                {
                   log.debug("Trying to connect at the main server using connector :" + connectorConfig);
                }
-               
+
                tc = connector.createConnection();
 
                if (tc == null)
                {
-                  if (log.isDebugEnabled())
+                  if (isDebug)
                   {
                      log.debug("Main server is not up. Hopefully there's a backup configured now!");
                   }
-                  
+
                   try
                   {
                      connector.close();
@@ -1047,67 +1131,70 @@
                   connector = null;
                }
             }
-            //if connection fails we can try the backup in case it has come live
-            if(connector == null && backupConfig != null)
+            // if connection fails we can try the backup in case it has come live
+            if (connector == null)
             {
-               if (log.isDebugEnabled())
+               if (backupConfig != null)
                {
-                  log.debug("Trying backup config = " + backupConfig);
-               }
-               ConnectorFactory backupConnectorFactory = instantiateConnectorFactory(backupConfig.getFactoryClassName());
-               connector = backupConnectorFactory.createConnector(backupConfig.getParams(),
-                                                         handler,
-                                                         this,
-                                                         closeExecutor,
-                                                         threadPool,
-                                                         scheduledThreadPool);
-               if (connector != null)
-               {
-                  connector.start();
-
-                  tc = connector.createConnection();
-
-                  if (tc == null)
+                  if (isDebug)
                   {
-                     if (log.isDebugEnabled())
+                     log.debug("Trying backup config = " + backupConfig);
+                  }
+                  ConnectorFactory backupConnectorFactory = instantiateConnectorFactory(backupConfig.getFactoryClassName());
+                  connector = backupConnectorFactory.createConnector(backupConfig.getParams(),
+                                                                     handler,
+                                                                     this,
+                                                                     closeExecutor,
+                                                                     threadPool,
+                                                                     scheduledThreadPool);
+                  if (connector != null)
+                  {
+                     connector.start();
+   
+                     tc = connector.createConnection();
+   
+                     if (tc == null)
                      {
-                        log.debug("Backup is not active yet");
+                        if (isDebug)
+                        {
+                           log.debug("Backup is not active yet");
+                        }
+   
+                        try
+                        {
+                           connector.close();
+                        }
+                        catch (Throwable t)
+                        {
+                        }
+   
+                        connector = null;
                      }
-                     
-                     try
+                     else
                      {
-                        connector.close();
+                        /*looks like the backup is now live, lets use that*/
+   
+                        if (isDebug)
+                        {
+                           log.debug("Connected to the backup at " + backupConfig);
+                        }
+   
+                        connectorConfig = backupConfig;
+   
+                        backupConfig = null;
+   
+                        connectorFactory = backupConnectorFactory;
                      }
-                     catch (Throwable t)
-                     {
-                     }
-
-                     connector = null;
                   }
-                  else
+               }
+               else
+               {
+                  if (isTrace)
                   {
-                     /*looks like the backup is now live, lets use that*/
-                     
-                     if (log.isDebugEnabled())
-                     {
-                        log.debug("Connected to the backup at " + backupConfig);
-                     }
-                     
-                     connectorConfig = backupConfig;
-
-                     backupConfig = null;
-
-                     connectorFactory = backupConnectorFactory;
+                     log.trace("No Backup configured!", new Exception("trace"));
                   }
                }
             }
-            else
-            {
-               if (isTrace)
-               {
-                  log.trace("No Backup configured!");
-               }
-            }
          }
          catch (Exception e)
          {
@@ -1145,6 +1232,10 @@
 
          if (tc == null)
          {
+            if (isTrace)
+            {
+               log.trace("returning connection = " + connection + " as tc == null");
+            }
             return connection;
          }
 
@@ -1177,18 +1268,30 @@
             }
          }
 
-         if (serverLocator.isHA())
+         if (serverLocator.isHA() || serverLocator.isClusterConnection())
          {
+            if (isTrace)
+            {
+               log.trace(this + "::Subscribing Topology");
+            }
+
             channel0.send(new SubscribeClusterTopologyUpdatesMessage(serverLocator.isClusterConnection()));
             if (serverLocator.isClusterConnection())
             {
                TransportConfiguration config = serverLocator.getClusterTransportConfiguration();
-               channel0.send(new NodeAnnounceMessage(serverLocator.getNodeID(),
-                                                     serverLocator.isBackup(),
-                                                     config));
+               if (isDebug)
+               {
+                  log.debug("Announcing node " + serverLocator.getNodeID() + ", isBackup=" + serverLocator.isBackup());
+               }
+               channel0.send(new NodeAnnounceMessage(serverLocator.getNodeID(), serverLocator.isBackup(), config));
             }
          }
       }
+      
+      if (log.isTraceEnabled())
+      {
+         log.trace("returning " + connection);
+      }
 
       return connection;
    }
@@ -1246,9 +1349,15 @@
 
    private void forceReturnChannel1()
    {
-      Channel channel1 = connection.getChannel(1, -1);
+      if (connection != null)
+      {
+         Channel channel1 = connection.getChannel(1, -1);
 
-      channel1.returnBlocking();
+         if (channel1 != null)
+         {
+            channel1.returnBlocking();
+         }
+      }
    }
 
    private void checkTransportKeys(final ConnectorFactory factory, final Map<String, Object> params)
@@ -1285,6 +1394,11 @@
          {
             final DisconnectMessage msg = (DisconnectMessage)packet;
             
+            if (log.isTraceEnabled())
+            {
+               log.trace("Disconnect being called on client:" + msg + " server locator = " + serverLocator, new Exception ("trace"));
+            }
+
             closeExecutor.execute(new Runnable()
             {
                // Must be executed on new thread since cannot block the netty thread for a long time and fail can
@@ -1292,6 +1406,10 @@
                public void run()
                {
                   SimpleString nodeID = msg.getNodeID();
+                  if (log.isTraceEnabled())
+                  {
+                     log.trace("notifyDown nodeID=" + msg.getNodeID() + " on serverLocator=" + serverLocator + " csf created at ", ClientSessionFactoryImpl.this.e);
+                  }
                   if (nodeID != null)
                   {
                      serverLocator.notifyNodeDown(msg.getNodeID().toString());
@@ -1309,7 +1427,7 @@
 
             if (topMessage.isExit())
             {
-               if (log.isDebugEnabled())
+               if (isDebug)
                {
                   log.debug("Notifying " + topMessage.getNodeID() + " going down");
                }
@@ -1317,13 +1435,15 @@
             }
             else
             {
-               if (log.isDebugEnabled())
+               if (isDebug)
                {
-                  log.debug("Node " + topMessage.getNodeID() + " going up, connector = " + topMessage.getPair() + ", isLast=" + topMessage.isLast());
+                  log.debug("Node " + topMessage.getNodeID() +
+                            " going up, connector = " +
+                            topMessage.getPair() +
+                            ", isLast=" +
+                            topMessage.isLast() + " csf created at\nserverLocator=" + serverLocator, ClientSessionFactoryImpl.this.e);
                }
-               serverLocator.notifyNodeUp(topMessage.getNodeID(),
-                                          topMessage.getPair(),
-                                          topMessage.isLast());
+               serverLocator.notifyNodeUp(topMessage.getNodeID(), topMessage.getPair(), topMessage.isLast());
             }
          }
       }
@@ -1396,8 +1516,8 @@
          first = false;
 
          long now = System.currentTimeMillis();
-         
-         if (clientFailureCheckPeriod != -1 && connectionTTL != -1 && now >= lastCheck + connectionTTL )
+
+         if (clientFailureCheckPeriod != -1 && connectionTTL != -1 && now >= lastCheck + connectionTTL)
          {
             if (!connection.checkDataReceived())
             {
@@ -1447,4 +1567,13 @@
          cancelled = true;
       }
    }
+
+   /* (non-Javadoc)
+    * @see org.hornetq.core.client.impl.ClientSessionFactoryInternal#setReconnectAttempts(int)
+    */
+   public void setReconnectAttempts(int attempts)
+   {
+      this.reconnectAttempts = attempts;
+   }
+
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionFactoryInternal.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionFactoryInternal.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionFactoryInternal.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -46,4 +46,6 @@
    void setBackupConnector(TransportConfiguration live, TransportConfiguration backUp);
 
    Object getBackupConnector();
+
+   void setReconnectAttempts(int i);
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ClientSessionImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -516,6 +516,11 @@
    {
       checkClosed();
 
+      if (log.isTraceEnabled())
+      {
+         log.trace("Sending commit");
+      }
+      
       if (rollbackOnly)
       {
          rollbackOnFailover();
@@ -928,6 +933,10 @@
 
             if (response.isReattached())
             {
+               if (log.isDebugEnabled())
+               {
+                  log.debug("ClientSession reattached fine, replaying commands");
+               }
                // The session was found on the server - we reattached transparently ok
 
                channel.replayCommands(response.getLastConfirmedCommandID());
@@ -935,6 +944,11 @@
             else
             {
 
+               if (log.isDebugEnabled())
+               {
+                  log.debug("ClientSession couldn't be reattached, creating a new session");
+               }
+
                // The session wasn't found on the server - probably we're failing over onto a backup server where the
                // session won't exist or the target server has been restarted - in this case the session will need to be
                // recreated,

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ServerLocatorImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ServerLocatorImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ServerLocatorImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -18,8 +18,19 @@
 import java.net.InetAddress;
 import java.security.AccessController;
 import java.security.PrivilegedAction;
-import java.util.*;
-import java.util.concurrent.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
 
 import org.hornetq.api.core.DiscoveryGroupConfiguration;
 import org.hornetq.api.core.HornetQException;
@@ -35,6 +46,7 @@
 import org.hornetq.core.cluster.DiscoveryListener;
 import org.hornetq.core.cluster.impl.DiscoveryGroupImpl;
 import org.hornetq.core.logging.Logger;
+import org.hornetq.core.remoting.FailureListener;
 import org.hornetq.utils.HornetQThreadFactory;
 import org.hornetq.utils.UUIDGenerator;
 
@@ -55,7 +67,7 @@
 
    private boolean clusterConnection;
 
-   private final Set<ClusterTopologyListener> topologyListeners = new HashSet<ClusterTopologyListener>();
+   private transient String identity;
 
    private Set<ClientSessionFactory> factories = new HashSet<ClientSessionFactory>();
 
@@ -65,13 +77,16 @@
 
    private StaticConnector staticConnector = new StaticConnector();
 
-   private Topology topology = new Topology();
+   private final Topology topology;
 
    private Pair<TransportConfiguration, TransportConfiguration>[] topologyArray;
 
    private boolean receivedTopology;
 
    private boolean compressLargeMessage;
+   
+   // if the system should shutdown the pool when shutting down
+   private transient boolean shutdownPool;
 
    private ExecutorService threadPool;
 
@@ -147,6 +162,8 @@
 
    private static ExecutorService globalThreadPool;
 
+   private Executor startExecutor;
+
    private static ScheduledExecutorService globalScheduledThreadPool;
 
    private String groupID;
@@ -158,14 +175,14 @@
    private boolean backup;
 
    private final Exception e = new Exception();
-   
+
    // To be called when there are ServerLocator being finalized.
    // To be used on test assertions
    public static Runnable finalizeCallback = null;
-   
+
    public static synchronized void clearThreadPools()
    {
-      
+
       if (globalThreadPool != null)
       {
          globalThreadPool.shutdown();
@@ -184,7 +201,7 @@
             globalThreadPool = null;
          }
       }
-      
+
       if (globalScheduledThreadPool != null)
       {
          globalScheduledThreadPool.shutdown();
@@ -235,6 +252,11 @@
 
    private void setThreadPools()
    {
+	  if (threadPool != null)
+	  {
+		  return;
+	  }
+	  else
       if (useGlobalPools)
       {
          threadPool = getGlobalThreadPool();
@@ -243,6 +265,8 @@
       }
       else
       {
+         this.shutdownPool = true;
+         
          ThreadFactory factory = new HornetQThreadFactory("HornetQ-client-factory-threads-" + System.identityHashCode(this),
                                                           true,
                                                           getThisClassLoader());
@@ -343,11 +367,21 @@
       }
    }
 
-   private ServerLocatorImpl(final boolean useHA,
+   private ServerLocatorImpl(final Topology topology,
+                             final boolean useHA,
+                             final ExecutorService threadPool, 
+                             final ScheduledExecutorService scheduledExecutor, 
                              final DiscoveryGroupConfiguration discoveryGroupConfiguration,
                              final TransportConfiguration[] transportConfigs)
    {
       e.fillInStackTrace();
+      
+      this.scheduledThreadPool = scheduledExecutor;
+      
+      this.threadPool = threadPool;
+      
+      this.topology = topology;
+      
       this.ha = useHA;
 
       this.discoveryGroupConfiguration = discoveryGroupConfiguration;
@@ -425,7 +459,8 @@
     */
    public ServerLocatorImpl(final boolean useHA, final DiscoveryGroupConfiguration groupConfiguration)
    {
-      this(useHA, groupConfiguration, null);
+      this(new Topology(null), useHA, null, null, groupConfiguration, null);
+      topology.setOwner(this);
    }
 
    /**
@@ -435,9 +470,32 @@
     */
    public ServerLocatorImpl(final boolean useHA, final TransportConfiguration... transportConfigs)
    {
-      this(useHA, null, transportConfigs);
+      this(new Topology(null), useHA, null, null, null, transportConfigs);
+      topology.setOwner(this);
    }
 
+   /**
+    * Create a ServerLocatorImpl using UDP discovery to lookup cluster
+    *
+    * @param discoveryAddress
+    * @param discoveryPort
+    */
+   public ServerLocatorImpl(final Topology topology, final boolean useHA, final ExecutorService threadPool, final ScheduledExecutorService scheduledExecutor, final DiscoveryGroupConfiguration groupConfiguration)
+   {
+      this(topology, useHA, threadPool, scheduledExecutor, groupConfiguration, null);
+      
+   }
+
+   /**
+    * Create a ServerLocatorImpl using a static list of live servers
+    *
+    * @param transportConfigs
+    */
+   public ServerLocatorImpl(final Topology topology, final boolean useHA, final ExecutorService threadPool, final ScheduledExecutorService scheduledExecutor, final TransportConfiguration... transportConfigs)
+   {
+      this(topology, useHA, threadPool, scheduledExecutor, null, transportConfigs);
+   }
+
    private TransportConfiguration selectConnector()
    {
       if (receivedTopology)
@@ -462,6 +520,8 @@
    {
       initialise();
 
+      this.startExecutor = executor;
+
       executor.execute(new Runnable()
       {
          public void run()
@@ -505,6 +565,11 @@
       addFactory(sf);
       return sf;
    }
+   
+   public boolean isClosed()
+   {
+      return closed || closing;
+   }
 
    public ClientSessionFactory createSessionFactory(final TransportConfiguration transportConfiguration) throws Exception
    {
@@ -629,294 +694,290 @@
          }
          while (retry);
 
-         if (ha)
+         if (ha || clusterConnection)
          {
-            long toWait = 30000;
-            long start = System.currentTimeMillis();
-            while (!receivedTopology && toWait > 0)
+            long timeout = System.currentTimeMillis() + 30000;
+            while (!ServerLocatorImpl.this.closed && !ServerLocatorImpl.this.closing && !receivedTopology && timeout > System.currentTimeMillis())
             {
                // Now wait for the topology
-
+               
                try
                {
-                  wait(toWait);
+                  wait(1000);
                }
                catch (InterruptedException ignore)
                {
                }
 
-               long now = System.currentTimeMillis();
-
-               toWait -= now - start;
-
-               start = now;
             }
 
-            if (toWait <= 0)
+            if (System.currentTimeMillis() > timeout && ! receivedTopology && !closed && !closing)
             {
                throw new HornetQException(HornetQException.CONNECTION_TIMEDOUT,
                                           "Timed out waiting to receive cluster topology");
             }
+
          }
 
          addFactory(factory);
 
          return factory;
       }
+
    }
 
-   public synchronized boolean isHA()
+   public boolean isHA()
    {
       return ha;
    }
 
-   public synchronized boolean isCacheLargeMessagesClient()
+   public boolean isCacheLargeMessagesClient()
    {
       return cacheLargeMessagesClient;
    }
 
-   public synchronized void setCacheLargeMessagesClient(final boolean cached)
+   public void setCacheLargeMessagesClient(final boolean cached)
    {
       cacheLargeMessagesClient = cached;
    }
 
-   public synchronized long getClientFailureCheckPeriod()
+   public long getClientFailureCheckPeriod()
    {
       return clientFailureCheckPeriod;
    }
 
-   public synchronized void setClientFailureCheckPeriod(final long clientFailureCheckPeriod)
+   public void setClientFailureCheckPeriod(final long clientFailureCheckPeriod)
    {
       checkWrite();
       this.clientFailureCheckPeriod = clientFailureCheckPeriod;
    }
 
-   public synchronized long getConnectionTTL()
+   public long getConnectionTTL()
    {
       return connectionTTL;
    }
 
-   public synchronized void setConnectionTTL(final long connectionTTL)
+   public void setConnectionTTL(final long connectionTTL)
    {
       checkWrite();
       this.connectionTTL = connectionTTL;
    }
 
-   public synchronized long getCallTimeout()
+   public long getCallTimeout()
    {
       return callTimeout;
    }
 
-   public synchronized void setCallTimeout(final long callTimeout)
+   public void setCallTimeout(final long callTimeout)
    {
       checkWrite();
       this.callTimeout = callTimeout;
    }
 
-   public synchronized int getMinLargeMessageSize()
+   public int getMinLargeMessageSize()
    {
       return minLargeMessageSize;
    }
 
-   public synchronized void setMinLargeMessageSize(final int minLargeMessageSize)
+   public void setMinLargeMessageSize(final int minLargeMessageSize)
    {
       checkWrite();
       this.minLargeMessageSize = minLargeMessageSize;
    }
 
-   public synchronized int getConsumerWindowSize()
+   public int getConsumerWindowSize()
    {
       return consumerWindowSize;
    }
 
-   public synchronized void setConsumerWindowSize(final int consumerWindowSize)
+   public void setConsumerWindowSize(final int consumerWindowSize)
    {
       checkWrite();
       this.consumerWindowSize = consumerWindowSize;
    }
 
-   public synchronized int getConsumerMaxRate()
+   public int getConsumerMaxRate()
    {
       return consumerMaxRate;
    }
 
-   public synchronized void setConsumerMaxRate(final int consumerMaxRate)
+   public void setConsumerMaxRate(final int consumerMaxRate)
    {
       checkWrite();
       this.consumerMaxRate = consumerMaxRate;
    }
 
-   public synchronized int getConfirmationWindowSize()
+   public int getConfirmationWindowSize()
    {
       return confirmationWindowSize;
    }
 
-   public synchronized void setConfirmationWindowSize(final int confirmationWindowSize)
+   public void setConfirmationWindowSize(final int confirmationWindowSize)
    {
       checkWrite();
       this.confirmationWindowSize = confirmationWindowSize;
    }
 
-   public synchronized int getProducerWindowSize()
+   public int getProducerWindowSize()
    {
       return producerWindowSize;
    }
 
-   public synchronized void setProducerWindowSize(final int producerWindowSize)
+   public void setProducerWindowSize(final int producerWindowSize)
    {
       checkWrite();
       this.producerWindowSize = producerWindowSize;
    }
 
-   public synchronized int getProducerMaxRate()
+   public int getProducerMaxRate()
    {
       return producerMaxRate;
    }
 
-   public synchronized void setProducerMaxRate(final int producerMaxRate)
+   public void setProducerMaxRate(final int producerMaxRate)
    {
       checkWrite();
       this.producerMaxRate = producerMaxRate;
    }
 
-   public synchronized boolean isBlockOnAcknowledge()
+   public boolean isBlockOnAcknowledge()
    {
       return blockOnAcknowledge;
    }
 
-   public synchronized void setBlockOnAcknowledge(final boolean blockOnAcknowledge)
+   public void setBlockOnAcknowledge(final boolean blockOnAcknowledge)
    {
       checkWrite();
       this.blockOnAcknowledge = blockOnAcknowledge;
    }
 
-   public synchronized boolean isBlockOnDurableSend()
+   public boolean isBlockOnDurableSend()
    {
       return blockOnDurableSend;
    }
 
-   public synchronized void setBlockOnDurableSend(final boolean blockOnDurableSend)
+   public void setBlockOnDurableSend(final boolean blockOnDurableSend)
    {
       checkWrite();
       this.blockOnDurableSend = blockOnDurableSend;
    }
 
-   public synchronized boolean isBlockOnNonDurableSend()
+   public boolean isBlockOnNonDurableSend()
    {
       return blockOnNonDurableSend;
    }
 
-   public synchronized void setBlockOnNonDurableSend(final boolean blockOnNonDurableSend)
+   public void setBlockOnNonDurableSend(final boolean blockOnNonDurableSend)
    {
       checkWrite();
       this.blockOnNonDurableSend = blockOnNonDurableSend;
    }
 
-   public synchronized boolean isAutoGroup()
+   public boolean isAutoGroup()
    {
       return autoGroup;
    }
 
-   public synchronized void setAutoGroup(final boolean autoGroup)
+   public void setAutoGroup(final boolean autoGroup)
    {
       checkWrite();
       this.autoGroup = autoGroup;
    }
 
-   public synchronized boolean isPreAcknowledge()
+   public boolean isPreAcknowledge()
    {
       return preAcknowledge;
    }
 
-   public synchronized void setPreAcknowledge(final boolean preAcknowledge)
+   public void setPreAcknowledge(final boolean preAcknowledge)
    {
       checkWrite();
       this.preAcknowledge = preAcknowledge;
    }
 
-   public synchronized int getAckBatchSize()
+   public int getAckBatchSize()
    {
       return ackBatchSize;
    }
 
-   public synchronized void setAckBatchSize(final int ackBatchSize)
+   public void setAckBatchSize(final int ackBatchSize)
    {
       checkWrite();
       this.ackBatchSize = ackBatchSize;
    }
 
-   public synchronized boolean isUseGlobalPools()
+   public boolean isUseGlobalPools()
    {
       return useGlobalPools;
    }
 
-   public synchronized void setUseGlobalPools(final boolean useGlobalPools)
+   public void setUseGlobalPools(final boolean useGlobalPools)
    {
       checkWrite();
       this.useGlobalPools = useGlobalPools;
    }
 
-   public synchronized int getScheduledThreadPoolMaxSize()
+   public int getScheduledThreadPoolMaxSize()
    {
       return scheduledThreadPoolMaxSize;
    }
 
-   public synchronized void setScheduledThreadPoolMaxSize(final int scheduledThreadPoolMaxSize)
+   public void setScheduledThreadPoolMaxSize(final int scheduledThreadPoolMaxSize)
    {
       checkWrite();
       this.scheduledThreadPoolMaxSize = scheduledThreadPoolMaxSize;
    }
 
-   public synchronized int getThreadPoolMaxSize()
+   public int getThreadPoolMaxSize()
    {
       return threadPoolMaxSize;
    }
 
-   public synchronized void setThreadPoolMaxSize(final int threadPoolMaxSize)
+   public void setThreadPoolMaxSize(final int threadPoolMaxSize)
    {
       checkWrite();
       this.threadPoolMaxSize = threadPoolMaxSize;
    }
 
-   public synchronized long getRetryInterval()
+   public long getRetryInterval()
    {
       return retryInterval;
    }
 
-   public synchronized void setRetryInterval(final long retryInterval)
+   public void setRetryInterval(final long retryInterval)
    {
       checkWrite();
       this.retryInterval = retryInterval;
    }
 
-   public synchronized long getMaxRetryInterval()
+   public long getMaxRetryInterval()
    {
       return maxRetryInterval;
    }
 
-   public synchronized void setMaxRetryInterval(final long retryInterval)
+   public void setMaxRetryInterval(final long retryInterval)
    {
       checkWrite();
       maxRetryInterval = retryInterval;
    }
 
-   public synchronized double getRetryIntervalMultiplier()
+   public double getRetryIntervalMultiplier()
    {
       return retryIntervalMultiplier;
    }
 
-   public synchronized void setRetryIntervalMultiplier(final double retryIntervalMultiplier)
+   public void setRetryIntervalMultiplier(final double retryIntervalMultiplier)
    {
       checkWrite();
       this.retryIntervalMultiplier = retryIntervalMultiplier;
    }
 
-   public synchronized int getReconnectAttempts()
+   public int getReconnectAttempts()
    {
       return reconnectAttempts;
    }
 
-   public synchronized void setReconnectAttempts(final int reconnectAttempts)
+   public void setReconnectAttempts(final int reconnectAttempts)
    {
       checkWrite();
       this.reconnectAttempts = reconnectAttempts;
@@ -933,23 +994,23 @@
       return initialConnectAttempts;
    }
 
-   public synchronized boolean isFailoverOnInitialConnection()
+   public boolean isFailoverOnInitialConnection()
    {
       return this.failoverOnInitialConnection;
    }
 
-   public synchronized void setFailoverOnInitialConnection(final boolean failover)
+   public void setFailoverOnInitialConnection(final boolean failover)
    {
       checkWrite();
       this.failoverOnInitialConnection = failover;
    }
 
-   public synchronized String getConnectionLoadBalancingPolicyClassName()
+   public String getConnectionLoadBalancingPolicyClassName()
    {
       return connectionLoadBalancingPolicyClassName;
    }
 
-   public synchronized void setConnectionLoadBalancingPolicyClassName(final String loadBalancingPolicyClassName)
+   public void setConnectionLoadBalancingPolicyClassName(final String loadBalancingPolicyClassName)
    {
       checkWrite();
       connectionLoadBalancingPolicyClassName = loadBalancingPolicyClassName;
@@ -975,12 +1036,12 @@
       return interceptors.remove(interceptor);
    }
 
-   public synchronized int getInitialMessagePacketSize()
+   public int getInitialMessagePacketSize()
    {
       return initialMessagePacketSize;
    }
 
-   public synchronized void setInitialMessagePacketSize(final int size)
+   public void setInitialMessagePacketSize(final int size)
    {
       checkWrite();
       initialMessagePacketSize = size;
@@ -1021,6 +1082,11 @@
       }
    }
 
+   public void setIdentity(String identity)
+   {
+      this.identity = identity;
+   }
+
    public void setNodeID(String nodeID)
    {
       this.nodeID = nodeID;
@@ -1076,9 +1142,18 @@
    {
       if (closed)
       {
+         if (log.isDebugEnabled())
+         {
+            log.debug(this + " is already closed when calling closed");
+         }
          return;
       }
 
+      if (log.isDebugEnabled())
+      {
+         log.debug(this + " is calling close", new Exception("trace"));
+      }
+
       closing = true;
 
       if (discoveryGroup != null)
@@ -1097,14 +1172,16 @@
          staticConnector.disconnect();
       }
 
-      for (ClientSessionFactory factory : factories)
+      Set<ClientSessionFactory> clonedFactory = new HashSet<ClientSessionFactory>(factories);
+
+      for (ClientSessionFactory factory : clonedFactory)
       {
          factory.close();
       }
 
       factories.clear();
 
-      if (!useGlobalPools)
+      if (shutdownPool)
       {
          if (threadPool != null)
          {
@@ -1143,17 +1220,24 @@
       closed = true;
    }
 
-   public synchronized void notifyNodeDown(final String nodeID)
+   public void notifyNodeDown(final String nodeID)
    {
-      boolean removed = false;
-
-      if (!ha)
+      if (!clusterConnection && !ha)
       {
+         if (log.isDebugEnabled())
+         {
+            log.debug(this + "::ignoring notifyNodeDown=" + nodeID + " as isHA=false");
+         }
          return;
       }
 
-      removed = topology.removeMember(nodeID);
+      if (log.isDebugEnabled())
+      {
+         log.debug("nodeDown " + this + " nodeID=" + nodeID + " as being down", new Exception("trace"));
+      }
 
+      topology.removeMember(nodeID);
+ 
       if (!topology.isEmpty())
       {
          updateArraysAndPairs();
@@ -1170,29 +1254,35 @@
          receivedTopology = false;
       }
 
-      if (removed)
-      {
-         for (ClusterTopologyListener listener : topologyListeners)
-         {
-            listener.nodeDown(nodeID);
-         }
-      }
    }
 
-   public synchronized void notifyNodeUp(final String nodeID,
+   public void notifyNodeUp(final String nodeID,
                                          final Pair<TransportConfiguration, TransportConfiguration> connectorPair,
                                          final boolean last)
    {
-      if (!ha)
+      if (!clusterConnection && !ha)
       {
+         if (log.isDebugEnabled())
+         {
+            log.debug(this + "::Ignoring notifyNodeUp for " +
+                      nodeID +
+                      " connectorPair=" +
+                      connectorPair +
+                      ", since ha=false and clusterConnection=false");
+         }
          return;
       }
 
-      topology.addMember(nodeID, new TopologyMember(connectorPair));
+      if (log.isDebugEnabled())
+      {
+         log.debug("NodeUp " + this + "::nodeID=" + nodeID + ", connectorPair=" + connectorPair, new Exception ("trace"));
+      }
 
+      topology.addMember(nodeID, new TopologyMember(connectorPair), last);
+
       TopologyMember actMember = topology.getMember(nodeID);
 
-      if (actMember.getConnector().a != null && actMember.getConnector().b != null)
+      if (actMember != null && actMember.getConnector().a != null && actMember.getConnector().b != null)
       {
          for (ClientSessionFactory factory : factories)
          {
@@ -1206,27 +1296,51 @@
          updateArraysAndPairs();
       }
 
-      if (last)
+      synchronized (this)
       {
-         receivedTopology = true;
+         if (last)
+         {
+            receivedTopology = true;
+         }
+
+         // Notify if waiting on getting topology
+         notifyAll();
       }
+   }
 
-      for (ClusterTopologyListener listener : topologyListeners)
+   /* (non-Javadoc)
+    * @see java.lang.Object#toString()
+    */
+   @Override
+   public String toString()
+   {
+      if (identity != null)
       {
-         listener.nodeUP(nodeID, connectorPair, last);
+         return "ServerLocatorImpl (identity=" + identity +
+                ") [initialConnectors=" +
+                Arrays.toString(initialConnectors) +
+                ", discoveryGroupConfiguration=" +
+                discoveryGroupConfiguration +
+                "]";
       }
-
-      // Notify if waiting on getting topology
-      notify();
+      else
+      {
+         return "ServerLocatorImpl [initialConnectors=" + Arrays.toString(initialConnectors) +
+                ", discoveryGroupConfiguration=" +
+                discoveryGroupConfiguration +
+                "]";
+      }
    }
 
-   private void updateArraysAndPairs()
+   private synchronized void updateArraysAndPairs()
    {
+      Collection<TopologyMember> membersCopy = topology.getMembers();
+      
       topologyArray = (Pair<TransportConfiguration, TransportConfiguration>[])Array.newInstance(Pair.class,
-                                                                                                topology.members());
+                                                                                                membersCopy.size());
 
       int count = 0;
-      for (TopologyMember pair : topology.getMembers())
+      for (TopologyMember pair : membersCopy)
       {
          topologyArray[count++] = pair.getConnector();
       }
@@ -1245,7 +1359,7 @@
          this.initialConnectors[count++] = entry.getConnector();
       }
 
-      if (ha && clusterConnection && !receivedTopology && initialConnectors.length > 0)
+      if (clusterConnection && !receivedTopology && initialConnectors.length > 0)
       {
          // FIXME the node is alone in the cluster. We create a connection to the new node
          // to trigger the node notification to form the cluster.
@@ -1264,14 +1378,13 @@
    {
       factories.remove(factory);
 
-      if (factories.isEmpty())
+      if (!clusterConnection  && factories.isEmpty())
       {
          // Go back to using the broadcast or static list
 
          receivedTopology = false;
-
-         topology = null;
-
+         
+         topologyArray = null;
       }
    }
 
@@ -1282,16 +1395,12 @@
 
    public void addClusterTopologyListener(final ClusterTopologyListener listener)
    {
-      topologyListeners.add(listener);
-      if(topology.members() > 0)
-      {
-         log.debug("ServerLocatorImpl.addClusterTopologyListener");
-      }
+      topology.addClusterTopologyListener(listener);
    }
 
    public void removeClusterTopologyListener(final ClusterTopologyListener listener)
    {
-      topologyListeners.remove(listener);
+      topology.removeClusterTopologyListener(listener);
    }
 
    public synchronized void addFactory(ClientSessionFactoryInternal factory)
@@ -1304,22 +1413,10 @@
       }
    }
 
-   public static void shutdown()
+   class StaticConnector implements Serializable
    {
-      if (globalScheduledThreadPool != null)
-      {
-         globalScheduledThreadPool.shutdown();
-         globalScheduledThreadPool = null;
-      }
-      if (globalThreadPool != null)
-      {
-         globalThreadPool.shutdown();
-         globalThreadPool = null;
-      }
-   }
+      private static final long serialVersionUID = 6772279632415242634l;
 
-   class StaticConnector implements Serializable
-   {
       private List<Connector> connectors;
 
       public ClientSessionFactory connect() throws HornetQException
@@ -1344,47 +1441,97 @@
 
          try
          {
-            
-            List<Future<ClientSessionFactory>> futuresList = new ArrayList<Future<ClientSessionFactory>>();
-            
-            for (Connector conn : connectors)
+
+            int retryNumber = 0;
+            while (csf == null && !ServerLocatorImpl.this.closed && !ServerLocatorImpl.this.closing)
             {
-               futuresList.add(threadPool.submit(conn));
-            }
-            
-            for (int i = 0, futuresSize = futuresList.size(); i < futuresSize; i++)
-            {
-               Future<ClientSessionFactory> future = futuresList.get(i);
-               try
+               retryNumber++;
+               for (Connector conn : connectors)
                {
-                  csf = future.get();
+                  if (log.isDebugEnabled())
+                  {
+                     log.debug(this + "::Submitting connect towards " + conn);
+                  }
+
+                  csf = conn.tryConnect();
+
                   if (csf != null)
-                     break;
+                  {
+                     csf.getConnection().addFailureListener(new FailureListener()
+                     {
+                        // Case the node where we were connected is gone, we need to restart the connection
+                        public void connectionFailed(HornetQException exception, boolean failedOver)
+                        {
+                           if (exception.getCode() == HornetQException.DISCONNECTED)
+                           {
+                              try
+                              {
+                                 ServerLocatorImpl.this.start(startExecutor);
+                              }
+                              catch (Exception e)
+                              {
+                                 // There isn't much to be done if this happens here
+                                 log.warn(e.getMessage());
+                              }
+                           }
+                        }
+                     });
+
+                     if (log.isDebugEnabled())
+                     {
+                        log.debug("XXX Returning " + csf +
+                                  " after " +
+                                  retryNumber +
+                                  " retries on StaticConnector " +
+                                  ServerLocatorImpl.this);
+                     }
+
+                     return csf;
+                  }
                }
-               catch (Exception e)
+
+               if (initialConnectAttempts >= 0 && retryNumber > initialConnectAttempts)
                {
-                  log.debug("unable to connect with static connector " + connectors.get(i).initialConnector);
+                  break;
                }
+
+               if (!closed && !closing)
+               {
+                  Thread.sleep(retryInterval);
+               }
             }
-            if (csf == null && !closed)
-            {
-               throw new HornetQException(HornetQException.NOT_CONNECTED, "Failed to connect to any static connectors");
-            }
+
          }
          catch (Exception e)
          {
+            log.warn(e.getMessage(), e);
             throw new HornetQException(HornetQException.NOT_CONNECTED, "Failed to connect to any static connectors", e);
          }
 
          if (csf == null && !closed)
          {
+            log.warn("Failed to connecto to any static connector, throwing exception now");
             throw new HornetQException(HornetQException.NOT_CONNECTED, "Failed to connect to any static connectors");
          }
+         if (log.isDebugEnabled())
+         {
+            log.debug("Returning " + csf + " on " + ServerLocatorImpl.this);
+         }
          return csf;
       }
 
       private synchronized void createConnectors()
       {
+         if (connectors != null)
+         {
+            for (Connector conn : connectors)
+            {
+               if (conn != null)
+               {
+                  conn.disconnect();
+               }
+            }
+         }
          connectors = new ArrayList<Connector>();
          for (TransportConfiguration initialConnector : initialConnectors)
          {
@@ -1423,7 +1570,7 @@
                      System.identityHashCode(this));
 
             log.warn("The ServerLocator you didn't close was created here:", e);
-            
+
             if (ServerLocatorImpl.finalizeCallback != null)
             {
                ServerLocatorImpl.finalizeCallback.run();
@@ -1435,14 +1582,12 @@
          super.finalize();
       }
 
-      class Connector implements Callable<ClientSessionFactory>
+      class Connector
       {
          private TransportConfiguration initialConnector;
 
          private volatile ClientSessionFactoryInternal factory;
 
-         private boolean isConnected = false;
-
          private boolean interrupted = false;
 
          private Exception e;
@@ -1453,42 +1598,28 @@
             this.factory = factory;
          }
 
-         public ClientSessionFactory call() throws HornetQException
+         public ClientSessionFactory tryConnect() throws HornetQException
          {
-            try
+            if (log.isDebugEnabled())
             {
-               factory.connect(reconnectAttempts, failoverOnInitialConnection);
+               log.debug(this + "::Trying to connect to " + factory);
             }
-            catch (HornetQException e)
+            try
             {
-               if (!interrupted)
+               ClientSessionFactoryInternal factoryToUse = factory;
+               if (factoryToUse != null)
                {
-                  this.e = e;
-                  throw e;
+                  factory.connect(1, false);
                }
-               /*if(factory != null)
-               {
-                  factory.close();
-                  factory = null;
-               }*/
-               return null;
+               return factoryToUse;
             }
-            isConnected = true;
-            for (Connector connector : connectors)
+            catch (HornetQException e)
             {
-               if (!connector.isConnected())
-               {
-                  connector.disconnect();
-               }
+               log.debug(this + "::Exception on establish connector initial connection", e);
+               return null;
             }
-            return factory;
          }
 
-         public boolean isConnected()
-         {
-            return isConnected;
-         }
-
          public void disconnect()
          {
             interrupted = true;
@@ -1496,10 +1627,20 @@
             if (factory != null)
             {
                factory.causeExit();
-               factory.close();
+               factory.cleanup();
                factory = null;
             }
          }
+
+         /* (non-Javadoc)
+          * @see java.lang.Object#toString()
+          */
+         @Override
+         public String toString()
+         {
+            return "Connector [initialConnector=" + initialConnector + "]";
+         }
+
       }
    }
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ServerLocatorInternal.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ServerLocatorInternal.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/ServerLocatorInternal.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -13,14 +13,13 @@
 
 package org.hornetq.core.client.impl;
 
-import org.hornetq.api.core.HornetQException;
+import java.util.concurrent.Executor;
+
 import org.hornetq.api.core.Pair;
 import org.hornetq.api.core.TransportConfiguration;
 import org.hornetq.api.core.client.ClientSessionFactory;
 import org.hornetq.api.core.client.ServerLocator;
 
-import java.util.concurrent.Executor;
-
 /**
  * A ServerLocatorInternal
  *
@@ -33,6 +32,9 @@
    void start(Executor executor) throws Exception;
    
    void factoryClosed(final ClientSessionFactory factory);
+   
+   /** Used to better identify Cluster Connection Locators on logs while debugging logs */
+   void setIdentity(String identity);
 
    void setNodeID(String nodeID);
 

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/Topology.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/Topology.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/client/impl/Topology.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -13,10 +13,15 @@
 package org.hornetq.core.client.impl;
 
 import java.io.Serializable;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executor;
 
 import org.hornetq.api.core.TransportConfiguration;
 import org.hornetq.api.core.client.ClusterTopologyListener;
@@ -28,96 +33,262 @@
  */
 public class Topology implements Serializable
 {
-   
-   /**
-    * 
-    */
+
    private static final long serialVersionUID = -9037171688692471371L;
 
-   
+   private final Set<ClusterTopologyListener> topologyListeners = new HashSet<ClusterTopologyListener>();
 
    private static final Logger log = Logger.getLogger(Topology.class);
 
+   /** Used to debug operations.
+    * 
+    *  Someone may argue this is not needed. But it's impossible to debg anything related to topology without knowing what node
+    *  or what object missed a Topology update.
+    *  
+    *  Hence I added some information to locate debugging here. 
+    *  */
+   private volatile Object owner;
+
+   private volatile Executor executor;
+
+   public Topology(final Object owner)
+   {
+      this.owner = owner;
+      Topology.log.debug("Topology@" + Integer.toHexString(System.identityHashCode(this)) + " CREATE",
+                         new Exception("trace")); // Delete this line
+   }
+
    /*
     * topology describes the other cluster nodes that this server knows about:
     *
     * keys are node IDs
     * values are a pair of live/backup transport configurations
     */
-   private Map<String, TopologyMember> topology = new HashMap<String, TopologyMember>();
+   private final Map<String, TopologyMember> topology = new ConcurrentHashMap<String, TopologyMember>();
 
-   private boolean debug = log.isDebugEnabled();
+   public void setExecutor(Executor executor)
+   {
+      this.executor = executor;
+   }
 
-   public synchronized boolean addMember(String nodeId, TopologyMember member)
+   public void addClusterTopologyListener(final ClusterTopologyListener listener)
    {
-      boolean replaced = false;
-      TopologyMember currentMember = topology.get(nodeId);
-      if (debug)
+      if (log.isDebugEnabled())
       {
-         log.debug("adding = " + nodeId + ":" + member.getConnector());
-         log.debug("before----------------------------------");
-         log.debug(describe());
+         log.debug(this + "::PPP Adding topology listener " + listener, new Exception("Trace"));
       }
-      if(currentMember == null)
+      synchronized (topologyListeners)
       {
-         topology.put(nodeId, member);
-         replaced = true;
+         topologyListeners.add(listener);
       }
-      else
+   }
+
+   public void removeClusterTopologyListener(final ClusterTopologyListener listener)
+   {
+      if (log.isDebugEnabled())
       {
-         if(hasChanged(currentMember.getConnector().a, member.getConnector().a) && member.getConnector().a != null)
+         log.debug(this + "::PPP Removing topology listener " + listener, new Exception("Trace"));
+      }
+      synchronized (topologyListeners)
+      {
+         topologyListeners.remove(listener);
+      }
+   }
+
+   public  boolean addMember(final String nodeId, final TopologyMember member, final boolean last)
+   {
+      boolean replaced = false;
+
+      synchronized (this)
+      {
+         TopologyMember currentMember = topology.get(nodeId);
+
+         if (Topology.log.isDebugEnabled())
          {
-            currentMember.getConnector().a =  member.getConnector().a;
-            replaced = true;
+            Topology.log.debug(this + "::adding = " + nodeId + ":" + member.getConnector(), new Exception("trace"));
+            Topology.log.debug(describe("Before:"));
          }
-         if(hasChanged(currentMember.getConnector().b, member.getConnector().b) && member.getConnector().b != null)
+
+         if (currentMember == null)
          {
-            currentMember.getConnector().b =  member.getConnector().b;
             replaced = true;
+            if (Topology.log.isDebugEnabled())
+            {
+               Topology.log.debug("Add " + this +
+                                  " MEMBER WAS NULL, Add member nodeId=" +
+                                  nodeId +
+                                  " member = " +
+                                  member +
+                                  " replaced = " +
+                                  replaced +
+                                  " size = " +
+                                  topology.size(), new Exception("trace"));
+            }
+            topology.put(nodeId, member);
          }
+         else
+         {
+            if (hasChanged(currentMember.getConnector().a, member.getConnector().a) && member.getConnector().a != null)
+            {
+               currentMember.getConnector().a = member.getConnector().a;
+               replaced = true;
+            }
+            if (hasChanged(currentMember.getConnector().b, member.getConnector().b) && member.getConnector().b != null)
+            {
+               currentMember.getConnector().b = member.getConnector().b;
+               replaced = true;
+            }
 
-         if(member.getConnector().a == null)
+            if (member.getConnector().a == null)
+            {
+               member.getConnector().a = currentMember.getConnector().a;
+            }
+            if (member.getConnector().b == null)
+            {
+               member.getConnector().b = currentMember.getConnector().b;
+            }
+         }
+
+         if (Topology.log.isDebugEnabled())
          {
-            member.getConnector().a = currentMember.getConnector().a;
+            Topology.log.debug(this + "::Topology updated=" + replaced);
+            Topology.log.debug(describe(this + "::After:"));
          }
-         if(member.getConnector().b == null)
+
+         if (Topology.log.isDebugEnabled())
          {
-            member.getConnector().b = currentMember.getConnector().b;
+            Topology.log.debug(this +
+                               " Add member nodeId=" +
+                               nodeId +
+                               " member = " +
+                               member +
+                               " replaced = " +
+                               replaced +
+                               " size = " +
+                               topology.size(), new Exception("trace"));
          }
+
       }
-      if(debug)
+      
+      if (replaced)
       {
-         log.debug("Topology updated=" + replaced);
-         log.debug(describe());
+         ArrayList<ClusterTopologyListener> copy = copyListeners();
+         for (ClusterTopologyListener listener : copy)
+         {
+            if (Topology.log.isTraceEnabled())
+            {
+               Topology.log.trace(this + " informing " + listener + " about node up = " + nodeId);
+            }
+
+            try
+            {
+               listener.nodeUP(nodeId, member.getConnector(), last);
+            }
+            catch (Throwable e)
+            {
+               log.warn (e.getMessage(), e);
+            }
+         }
       }
+
       return replaced;
    }
 
-   public synchronized boolean removeMember(String nodeId)
+   /**
+    * @return
+    */
+   private ArrayList<ClusterTopologyListener> copyListeners()
    {
-      TopologyMember member = topology.remove(nodeId);
-      if (debug)
+      ArrayList <ClusterTopologyListener> listenersCopy;
+      synchronized (topologyListeners)
       {
-         log.debug("Removing member " + member);
+         listenersCopy = new ArrayList<ClusterTopologyListener>(topologyListeners);
       }
-      return (member != null);
+      return listenersCopy;
    }
 
-   public void sendTopology(ClusterTopologyListener listener)
+   public boolean removeMember(final String nodeId)
    {
+      TopologyMember member;
+      
+      synchronized (this)
+      {
+         member = topology.remove(nodeId);
+      }
+      
+
+      if (Topology.log.isDebugEnabled())
+      {
+         Topology.log.debug("ZZZ removeMember " + this +
+                            " removing nodeID=" +
+                            nodeId +
+                            ", result=" +
+                            member +
+                            ", size = " +
+                            topology.size(), new Exception("trace"));
+      }
+
+      if (member != null)
+      {
+         ArrayList<ClusterTopologyListener> copy = copyListeners();
+
+         for (ClusterTopologyListener listener : copy)
+         {
+            if (Topology.log.isTraceEnabled())
+            {
+               Topology.log.trace(this + " informing " + listener + " about node down = " + nodeId);
+            }
+            listener.nodeDown(nodeId);
+         }
+      }
+      return member != null;
+   }
+
+   /**
+    * it will send all the member updates to listeners, independently of being changed or not
+    * @param nodeID
+    * @param member
+    */
+   public void sendMemberToListeners(String nodeID, TopologyMember member)
+   {
+      // To make sure it was updated
+      addMember(nodeID, member, false);
+      
+      ArrayList<ClusterTopologyListener> copy = copyListeners();
+
+      // Now force sending it
+      for (ClusterTopologyListener listener : copy)
+      {
+         if (log.isDebugEnabled())
+         {
+            log.debug("Informing client listener " + listener +
+                      " about itself node " +
+                      nodeID +
+                      " with connector=" +
+                      member.getConnector());
+         }
+         listener.nodeUP(nodeID, member.getConnector(), false);
+      }
+   }
+
+   public synchronized void sendTopology(final ClusterTopologyListener listener)
+   {
       int count = 0;
+
       Map<String, TopologyMember> copy;
+
       synchronized (this)
       {
          copy = new HashMap<String, TopologyMember>(topology);
       }
+
       for (Map.Entry<String, TopologyMember> entry : copy.entrySet())
       {
          listener.nodeUP(entry.getKey(), entry.getValue().getConnector(), ++count == copy.size());
       }
    }
 
-   public TopologyMember getMember(String nodeID)
+   public TopologyMember getMember(final String nodeID)
    {
       return topology.get(nodeID);
    }
@@ -129,10 +300,15 @@
 
    public Collection<TopologyMember> getMembers()
    {
-      return topology.values();
+      ArrayList<TopologyMember> members;
+      synchronized (this)
+      {
+         members = new ArrayList<TopologyMember>(topology.values());
+      }
+      return members;
    }
 
-   public int nodes()
+   public synchronized int nodes()
    {
       int count = 0;
       for (TopologyMember member : topology.values())
@@ -149,10 +325,15 @@
       return count;
    }
 
-   public String describe()
+   public synchronized String describe()
    {
+      return describe("");
+   }
 
-      String desc = "";
+   public synchronized String describe(final String text)
+   {
+
+      String desc = text + "\n";
       for (Entry<String, TopologyMember> entry : new HashMap<String, TopologyMember>(topology).entrySet())
       {
          desc += "\t" + entry.getKey() + " => " + entry.getValue() + "\n";
@@ -163,6 +344,10 @@
 
    public void clear()
    {
+      if (Topology.log.isDebugEnabled())
+      {
+         Topology.log.debug(this + "::clear", new Exception("trace"));
+      }
       topology.clear();
    }
 
@@ -171,25 +356,44 @@
       return topology.size();
    }
 
-   private boolean hasChanged(TransportConfiguration currentConnector, TransportConfiguration connector)
+   public void setOwner(final Object owner)
    {
-      return (currentConnector == null && connector != null) || (currentConnector != null && !currentConnector.equals(connector));
+      this.owner = owner;
    }
 
-   public TransportConfiguration getBackupForConnector(TransportConfiguration connectorConfiguration)
+   private boolean hasChanged(final TransportConfiguration currentConnector, final TransportConfiguration connector)
    {
+      return currentConnector == null && connector != null ||
+             currentConnector != null &&
+             !currentConnector.equals(connector);
+   }
+
+   public TransportConfiguration getBackupForConnector(final TransportConfiguration connectorConfiguration)
+   {
       for (TopologyMember member : topology.values())
       {
-         if(member.getConnector().a != null && member.getConnector().a.equals(connectorConfiguration))
+         if (member.getConnector().a != null && member.getConnector().a.equals(connectorConfiguration))
          {
-            return member.getConnector().b;  
+            return member.getConnector().b;
          }
       }
       return null;
    }
 
-   public void setDebug(boolean b)
+   /* (non-Javadoc)
+    * @see java.lang.Object#toString()
+    */
+   @Override
+   public String toString()
    {
-      debug = b;
+      if (owner == null)
+      {
+         return super.toString();
+      }
+      else
+      {
+         return "Topology@" + Integer.toHexString(System.identityHashCode(this)) + "[owner=" + owner + "]";
+      }
    }
+
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/journal/impl/JournalImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/journal/impl/JournalImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/journal/impl/JournalImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -97,7 +97,7 @@
 
    // This is useful at debug time...
    // if you set it to true, all the appends, deletes, rollbacks, commits, etc.. are sent to System.out
-   private static final boolean TRACE_RECORDS = false;
+   private static final boolean TRACE_RECORDS = trace;
 
    // This method exists just to make debug easier.
    // I could replace log.trace by log.info temporarily while I was debugging

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/paging/cursor/impl/PageCursorProviderImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/paging/cursor/impl/PageCursorProviderImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/paging/cursor/impl/PageCursorProviderImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -89,6 +89,10 @@
 
    public synchronized PageSubscription createSubscription(long cursorID, Filter filter, boolean persistent)
    {
+      if (log.isDebugEnabled())
+      {
+         log.debug(this.pagingStore.getAddress() + " creating subscription " + cursorID + " with filter " + filter, new Exception ("trace"));
+      }
       PageSubscription activeCursor = activeCursors.get(cursorID);
       if (activeCursor != null)
       {
@@ -330,6 +334,11 @@
             {
                return;
             }
+            
+            if (log.isDebugEnabled())
+            {
+               log.debug("Asserting cleanup for address " + this.pagingStore.getAddress());
+            }
 
             ArrayList<PageSubscription> cursorList = new ArrayList<PageSubscription>();
             cursorList.addAll(activeCursors.values());
@@ -344,9 +353,21 @@
                {
                   if (!cursor.isComplete(minPage))
                   {
+                     if (log.isDebugEnabled())
+                     {
+                        log.debug("Cursor " + cursor + " was considered incomplete at page " + minPage);
+                     }
+                     
                      complete = false;
                      break;
                   }
+                  else
+                  {
+                     if (log.isDebugEnabled())
+                     {
+                        log.debug("Cursor " + cursor + "was considered **complete** at page " + minPage);
+                     }
+                  }
                }
 
                if (complete)
@@ -516,12 +537,21 @@
       for (PageSubscription cursor : cursorList)
       {
          long firstPage = cursor.getFirstPage();
+         if (log.isDebugEnabled())
+         {
+            log.debug(this.pagingStore.getAddress() + " has a cursor " + cursor + " with first page=" + firstPage);
+         }
          if (firstPage < minPage)
          {
             minPage = firstPage;
          }
       }
 
+      if (log.isDebugEnabled())
+      {
+         log.debug(this.pagingStore.getAddress() + " has minPage=" + minPage);
+      }
+
       return minPage;
 
    }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/paging/cursor/impl/PageSubscriptionImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/paging/cursor/impl/PageSubscriptionImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/paging/cursor/impl/PageSubscriptionImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -308,7 +308,7 @@
    @Override
    public String toString()
    {
-      return "PageSubscriptionImpl [cursorId=" + cursorId + ", queue=" + queue + "]";
+      return "PageSubscriptionImpl [cursorId=" + cursorId + ", queue=" + queue + ", filter = " + filter + "]";
    }
 
 
@@ -648,22 +648,42 @@
          Collections.sort(recoveredACK);
 
          boolean first = true;
+         
+         long txDeleteCursorOnReload = -1;
 
          for (PagePosition pos : recoveredACK)
          {
             lastAckedPosition = pos;
-            PageCursorInfo positions = getPageInfo(pos);
-            if (first)
+            PageCursorInfo pageInfo = getPageInfo(pos);
+            
+            if (pageInfo == null)
             {
-               first = false;
-               if (pos.getMessageNr() > 0)
+               log.warn("Couldn't find page cache for page " + pos + ", removing it from the journal");
+               if (txDeleteCursorOnReload == -1)
                {
-                  positions.confirmed.addAndGet(pos.getMessageNr());
+                  txDeleteCursorOnReload = store.generateUniqueID();
                }
+               store.deleteCursorAcknowledgeTransactional(txDeleteCursorOnReload, pos.getRecordID());
+             }
+            else
+            {
+               if (first)
+               {
+                  first = false;
+                  if (pos.getMessageNr() > 0)
+                  {
+                     pageInfo.confirmed.addAndGet(pos.getMessageNr());
+                  }
+               }
+   
+               pageInfo.addACK(pos);
             }
-
-            positions.addACK(pos);
          }
+         
+         if (txDeleteCursorOnReload >= 0)
+         {
+            store.commit(txDeleteCursorOnReload);
+         }
 
          recoveredACK.clear();
          recoveredACK = null;
@@ -723,6 +743,10 @@
       if (create && pageInfo == null)
       {
          PageCache cache = cursorProvider.getPageCache(pos);
+         if (cache == null)
+         {
+            return null;
+         }
          pageInfo = new PageCursorInfo(pos.getPageNr(), cache.getNumberOfMessages(), cache);
          consumedPages.put(pos.getPageNr(), pageInfo);
       }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -1748,13 +1748,15 @@
 
                   // TODO - this involves a scan - we should find a quicker way of doing it
                   MessageReference removed = queue.removeReferenceWithID(messageID);
-
-                  referencesToAck.add(removed);
-
+                  
                   if (removed == null)
                   {
                      log.warn("Failed to remove reference for " + messageID);
                   }
+                  else
+                  {
+                     referencesToAck.add(removed);
+                  }
 
                   break;
                }
@@ -2558,6 +2560,13 @@
       @Override
       public String toString()
       {
+         // this would be useful when testing. Most tests on the testsuite will use a SimpleString on the duplicate ID
+         // and this may be useful to validate the journal on those tests
+         // You may uncomment these two lines on that case and replcate the toString for the PrintData
+
+         // SimpleString simpleStr = new SimpleString(duplID);
+         // return "DuplicateIDEncoding [address=" + address + ", duplID=" + simpleStr + "]";
+         
          return "DuplicateIDEncoding [address=" + address + ", duplID=" + Arrays.toString(duplID) + "]";
       }
 

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/QueueInfo.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/QueueInfo.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/QueueInfo.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -129,4 +129,30 @@
    {
       numberOfConsumers--;
    }
+
+   /* (non-Javadoc)
+    * @see java.lang.Object#toString()
+    */
+   @Override
+   public String toString()
+   {
+      return "QueueInfo [routingName=" + routingName +
+             ", clusterName=" +
+             clusterName +
+             ", address=" +
+             address +
+             ", filterString=" +
+             filterString +
+             ", id=" +
+             id +
+             ", filterStrings=" +
+             filterStrings +
+             ", numberOfConsumers=" +
+             numberOfConsumers +
+             ", distance=" +
+             distance +
+             "]";
+   }
+   
+   
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/impl/LocalQueueBinding.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/impl/LocalQueueBinding.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/impl/LocalQueueBinding.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -135,6 +135,11 @@
    {
       return BindingType.LOCAL_QUEUE;
    }
+   
+   public void close() throws Exception
+   {
+      queue.close();
+   }
 
    /* (non-Javadoc)
     * @see java.lang.Object#toString()
@@ -142,12 +147,16 @@
    @Override
    public String toString()
    {
-      return "LocalQueueBinding [address=" + address + ", name=" + name + ", filter=" + filter + "]";
+      return "LocalQueueBinding [address=" + address +
+             ", queue=" +
+             queue +
+             ", filter=" +
+             filter +
+             ", name=" +
+             name +
+             ", clusterName=" +
+             clusterName +
+             "]";
    }
-   
-   public void close() throws Exception
-   {
-      queue.close();
-   }
 
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -213,7 +213,7 @@
    {
       if (isTrace)
       {
-         log.trace("Receiving notification : " + notification);
+         log.trace("Receiving notification : " + notification + " on server " + this.server);
       }
       synchronized (notificationLock)
       {
@@ -470,6 +470,11 @@
       }
 
       String uid = UUIDGenerator.getInstance().generateStringUUID();
+      
+      if (isTrace)
+      {
+         log.trace("Sending notification for addBinding " + binding + " from server " + server);
+      }
 
       managementService.sendNotification(new Notification(uid, NotificationType.BINDING_ADDED, props));
    }
@@ -747,6 +752,11 @@
       {
          throw new IllegalStateException("Cannot find queue " + queueName);
       }
+      
+      if (log.isDebugEnabled())
+      {
+         log.debug("PostOffice.sendQueueInfoToQueue on server=" + this.server + ", queueName=" + queueName + " and address=" + address);
+      }
 
       Queue queue = (Queue)binding.getBindable();
 
@@ -763,6 +773,10 @@
 
          for (QueueInfo info : queueInfos.values())
          {
+            if (log.isTraceEnabled())
+            {
+               log.trace("QueueInfo on sendQueueInfoToQueue = " + info);
+            }
             if (info.getAddress().startsWith(address))
             {
                message = createQueueInfoMessage(NotificationType.BINDING_ADDED, queueName);
@@ -783,7 +797,7 @@
                   message = createQueueInfoMessage(NotificationType.CONSUMER_CREATED, queueName);
 
                   message.putStringProperty(ManagementHelper.HDR_ADDRESS, info.getAddress());
-                  message.putStringProperty(ManagementHelper.HDR_CLUSTER_NAME, info.getClusterName());
+                  message.putStringProperty(ManagementHelper.HDR_CLUSTER_NAME, info.getClusterName());   
                   message.putStringProperty(ManagementHelper.HDR_ROUTING_NAME, info.getRoutingName());
                   message.putIntProperty(ManagementHelper.HDR_DISTANCE, info.getDistance());
 
@@ -811,6 +825,15 @@
 
    }
 
+   /* (non-Javadoc)
+    * @see java.lang.Object#toString()
+    */
+   @Override
+   public String toString()
+   {
+      return "PostOfficeImpl [server=" + server + "]";
+   }
+
    // Private -----------------------------------------------------------------
 
    /**
@@ -1106,14 +1129,12 @@
 
          if (rejectDuplicates && isDuplicate)
          {
-            StringBuffer warnMessage = new StringBuffer();
-            warnMessage.append("Duplicate message detected - message will not be routed. Message information:\n");
-            warnMessage.append(message.toString());
-            PostOfficeImpl.log.warn(warnMessage.toString());
+            String warnMessage = "Duplicate message detected - message will not be routed. Message information:" + message.toString();
+            PostOfficeImpl.log.warn(warnMessage);
 
             if (context.getTransaction() != null)
             {
-               context.getTransaction().markAsRollbackOnly(new HornetQException(HornetQException.DUPLICATE_ID_REJECTED, warnMessage.toString()));
+               context.getTransaction().markAsRollbackOnly(new HornetQException(HornetQException.DUPLICATE_ID_REJECTED, warnMessage));
             }
 
             return false;

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/CoreProtocolManager.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/CoreProtocolManager.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/CoreProtocolManager.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -24,6 +24,7 @@
 import org.hornetq.api.core.client.ClusterTopologyListener;
 import org.hornetq.api.core.client.HornetQClient;
 import org.hornetq.core.config.Configuration;
+import org.hornetq.core.logging.Logger;
 import org.hornetq.core.protocol.core.Channel;
 import org.hornetq.core.protocol.core.ChannelHandler;
 import org.hornetq.core.protocol.core.CoreRemotingConnection;
@@ -49,6 +50,10 @@
  */
 public class CoreProtocolManager implements ProtocolManager
 {
+   private static final Logger log = Logger.getLogger(CoreProtocolManager.class);
+   
+   private static final boolean isTrace = log.isTraceEnabled();
+   
    private final HornetQServer server;
 
    private final List<Interceptor> interceptors;
@@ -113,13 +118,18 @@
                {
                   public void nodeUP(String nodeID, Pair<TransportConfiguration, TransportConfiguration> connectorPair, boolean last)
                   {
-                     channel0.send(new ClusterTopologyChangeMessage(nodeID, connectorPair, last));
+                      channel0.send(new ClusterTopologyChangeMessage(nodeID, connectorPair, last));
                   }
                   
                   public void nodeDown(String nodeID)
                   {
-                     channel0.send(new ClusterTopologyChangeMessage(nodeID));
+                      channel0.send(new ClusterTopologyChangeMessage(nodeID));
                   }
+                  
+                  public String toString()
+                  {
+                     return "Remote Proxy on channel " + Integer.toHexString(System.identityHashCode(this));
+                  }
                };
                
                final boolean isCC = msg.isClusterConnection();
@@ -147,6 +157,10 @@
                {
                   pair = new Pair<TransportConfiguration, TransportConfiguration>(msg.getConnector(), null);
                }
+               if (isTrace)
+               {
+                  log.trace("Server " + server + " receiving nodeUp from NodeID=" + msg.getNodeID() + ", pair=" + pair);
+               }
                server.getClusterManager().notifyNodeUp(msg.getNodeID(), pair, false, true);
             }
          }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/PacketImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/PacketImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/PacketImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -305,7 +305,7 @@
 
    protected String getParentString()
    {
-      return "PACKET[type=" + type + ", channelID=" + channelID;
+      return "PACKET("  + this.getClass().getSimpleName() + ")[type=" + type + ", channelID=" + channelID + ", packetObject=" + this.getClass().getSimpleName();
    }
 
    // Protected -----------------------------------------------------

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/RemotingConnectionImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/RemotingConnectionImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/RemotingConnectionImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -48,6 +48,8 @@
    // ------------------------------------------------------------------------------------
 
    private static final Logger log = Logger.getLogger(RemotingConnectionImpl.class);
+   
+   private static final boolean isTrace = log.isTraceEnabled();
 
    // Static
    // ---------------------------------------------------------------------------------------
@@ -145,9 +147,26 @@
       this.creationTime = System.currentTimeMillis();
    }
 
+   
+   
+   
    // RemotingConnection implementation
    // ------------------------------------------------------------
 
+   /* (non-Javadoc)
+    * @see java.lang.Object#toString()
+    */
+   @Override
+   public String toString()
+   {
+      return "RemotingConnectionImpl [clientID=" + clientID +
+             ", nodeID=" +
+             nodeID +
+             ", transportConnection=" +
+             transportConnection +
+             "]";
+   }
+
    public Connection getTransportConnection()
    {
       return transportConnection;
@@ -422,6 +441,11 @@
       try
       {
          final Packet packet = decoder.decode(buffer);
+         
+         if (isTrace)
+         {
+            log.trace("handling packet " + packet);
+         }
             
          if (packet.isAsyncExec() && executor != null)
          {

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/wireformat/NodeAnnounceMessage.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/wireformat/NodeAnnounceMessage.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/wireformat/NodeAnnounceMessage.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -92,7 +92,23 @@
       connector.decode(buffer);
    }
 
+   /* (non-Javadoc)
+    * @see java.lang.Object#toString()
+    */
+   @Override
+   public String toString()
+   {
+      return "NodeAnnounceMessage [backup=" + backup +
+             ", connector=" +
+             connector +
+             ", nodeID=" +
+             nodeID +
+             ", toString()=" +
+             super.toString() +
+             "]";
+   }
 
+
    // Package protected ---------------------------------------------
 
    // Protected -----------------------------------------------------

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/wireformat/SubscribeClusterTopologyUpdatesMessage.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/wireformat/SubscribeClusterTopologyUpdatesMessage.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/protocol/core/impl/wireformat/SubscribeClusterTopologyUpdatesMessage.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -66,6 +66,17 @@
       clusterConnection = buffer.readBoolean();
    }
 
+   /* (non-Javadoc)
+    * @see java.lang.Object#toString()
+    */
+   @Override
+   public String toString()
+   {
+      return "SubscribeClusterTopologyUpdatesMessage [clusterConnection=" + clusterConnection +
+             ", toString()=" +
+             super.toString() +
+             "]";
+   }
 
    // Package protected ---------------------------------------------
 

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMAcceptor.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMAcceptor.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMAcceptor.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -228,20 +228,22 @@
 
       public void connectionDestroyed(final Object connectionID)
       {
-         if (connections.remove(connectionID) != null)
+         InVMConnection connection = (InVMConnection)connections.remove(connectionID);
+         
+         if (connection != null)
          {
+ 
             listener.connectionDestroyed(connectionID);
 
-            // Execute on different thread to avoid deadlocks
-            new Thread()
+              // Execute on different thread after all the packets are sent, to avoid deadlocks
+            connection.getExecutor().execute(new Runnable()
             {
-               @Override
                public void run()
                {
-                  // Remove on the other side too
-                  connector.disconnect((String)connectionID);
+                 // Remove on the other side too
+                   connector.disconnect((String)connectionID);
                }
-            }.start();
+            });
          }
       }
 

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMConnection.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMConnection.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMConnection.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -12,8 +12,10 @@
  */
 package org.hornetq.core.remoting.impl.invm;
 
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.Executor;
 import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
 
 import org.hornetq.api.core.HornetQBuffer;
 import org.hornetq.api.core.HornetQBuffers;
@@ -35,6 +37,8 @@
 {
 
    private static final Logger log = Logger.getLogger(InVMConnection.class);
+   
+   private static final boolean isTrace = log.isTraceEnabled();
 
    private final BufferHandler handler;
 
@@ -43,6 +47,9 @@
    private final String id;
 
    private boolean closed;
+   
+   // Used on tests
+   public static boolean flushEnabled = true;
 
    private final int serverID;
 
@@ -133,23 +140,57 @@
                   if (!closed)
                   {
                      copied.readInt(); // read and discard
-
+                     if (isTrace)
+                     {
+                        log.trace(InVMConnection.this + "::Sending inVM packet");
+                     }
                      handler.bufferReceived(id, copied);
                   }
                }
                catch (Exception e)
                {
-                  final String msg = "Failed to write to handler";
+                  final String msg = "Failed to write to handler on connector " + this;
                   InVMConnection.log.error(msg, e);
                   throw new IllegalStateException(msg, e);
                }
+               finally
+               {
+                  if (isTrace)
+                  {
+                     log.trace(InVMConnection.this + "::packet sent done");
+                  }
+               }
             }
          });
+         
+         if (flush && flushEnabled)
+         {
+            final CountDownLatch latch = new CountDownLatch(1);
+            executor.execute(new Runnable(){
+               public void run()
+               {
+                  latch.countDown();
+               }
+            });
+            
+            try
+            {
+               if (!latch.await(10, TimeUnit.SECONDS))
+               {
+                  log.warn("Timed out flushing channel on InVMConnection");
+               }
+            }
+            catch (InterruptedException e)
+            {
+               log.debug(e.getMessage(), e);
+            }
+         }
       }
       catch (RejectedExecutionException e)
       {
          // Ignore - this can happen if server/client is shutdown and another request comes in
       }
+      
    }
 
    public String getRemoteAddress()
@@ -169,4 +210,25 @@
    public void removeReadyListener(ReadyListener listener)
    {
    }
+   
+   public void disableFlush()
+   {
+      flushEnabled = false;
+   }
+   
+   public Executor getExecutor()
+   {
+      return executor;
+   }
+
+   /* (non-Javadoc)
+    * @see java.lang.Object#toString()
+    */
+   @Override
+   public String toString()
+   {
+      return "InVMConnection [serverID=" + serverID + ", id=" + id + "]";
+   }
+   
+   
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMConnector.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMConnector.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/invm/InVMConnector.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -128,6 +128,8 @@
       if (InVMConnector.failOnCreateConnection)
       {
          InVMConnector.incFailures();
+         
+         log.debug("Returning null on InVMConnector for tests");
          // For testing only
          return null;
       }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/HttpAcceptorHandler.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/HttpAcceptorHandler.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/HttpAcceptorHandler.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -13,7 +13,7 @@
 package org.hornetq.core.remoting.impl.netty;
 
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
@@ -45,7 +45,7 @@
 
    private final BlockingQueue<Runnable> delayedResponses = new LinkedBlockingQueue<Runnable>();
 
-   private final Executor executor = new ThreadPoolExecutor(1, 1, 0, TimeUnit.SECONDS, delayedResponses);
+   private final ExecutorService executor = new ThreadPoolExecutor(1, 1, 0, TimeUnit.SECONDS, delayedResponses);
 
    private final HttpKeepAliveRunnable httpKeepAliveTask;
 
@@ -211,6 +211,19 @@
       }
 
    }
+   
+   
+   public void shutdown()
+   {
+      executor.shutdown();
+      try
+      {
+         executor.awaitTermination(10, TimeUnit.SECONDS);
+      }
+      catch (Exception e)
+      {
+      }
+   }
 
    /**
     * a holder class so we know what time  the request first arrived

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/NettyAcceptor.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/NettyAcceptor.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/NettyAcceptor.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -136,6 +136,8 @@
    private final int nioRemotingThreads;
 
    private final HttpKeepAliveRunnable httpKeepAliveRunnable;
+   
+   private HttpAcceptorHandler httpHandler = null;
 
    private final ConcurrentMap<Object, NettyConnection> connections = new ConcurrentHashMap<Object, NettyConnection>();
 
@@ -352,7 +354,8 @@
 
                handlers.put("http-encoder", new HttpResponseEncoder());
 
-               handlers.put("http-handler", new HttpAcceptorHandler(httpKeepAliveRunnable, httpResponseTime));
+               httpHandler = new HttpAcceptorHandler(httpKeepAliveRunnable, httpResponseTime);
+               handlers.put("http-handler", httpHandler);
             }
 
             if (protocol == ProtocolType.CORE)
@@ -555,6 +558,11 @@
             e.printStackTrace();
          }
       }
+      
+      if (httpHandler != null)
+      {
+         httpHandler.shutdown();
+      }
 
       paused = false;
    }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/NettyConnector.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/NettyConnector.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/impl/netty/NettyConnector.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -256,6 +256,28 @@
       this.scheduledThreadPool = scheduledThreadPool;
    }
 
+   /* (non-Javadoc)
+    * @see java.lang.Object#toString()
+    */
+   @Override
+   public String toString()
+   {
+      return "NettyConnector [host=" + host +
+             ", port=" +
+             port +
+             ", httpEnabled=" +
+             httpEnabled +
+             ", useServlet=" +
+             useServlet +
+             ", servletPath=" +
+             servletPath +
+             ", sslEnabled=" +
+             sslEnabled +
+             ", useNio=" +
+             useNio +
+             "]";
+   }
+
    public synchronized void start()
    {
       if (channelFactory != null)

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/server/impl/RemotingServiceImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/server/impl/RemotingServiceImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/remoting/server/impl/RemotingServiceImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -64,6 +64,8 @@
    // Constants -----------------------------------------------------
 
    private static final Logger log = Logger.getLogger(RemotingServiceImpl.class);
+   
+   private static final boolean isTrace = log.isTraceEnabled();
 
    public static final long CONNECTION_TTL_CHECK_INTERVAL = 2000;
 
@@ -265,18 +267,33 @@
       }
 
       failureCheckAndFlushThread.close();
+      
 
       // We need to stop them accepting first so no new connections are accepted after we send the disconnect message
       for (Acceptor acceptor : acceptors)
       {
+         if (log.isDebugEnabled())
+         {
+            log.debug("Pausing acceptor " + acceptor);
+         }
          acceptor.pause();
       }
 
+      if (log.isDebugEnabled())
+      {
+         log.debug("Sending disconnect on live connections");
+      }
+
       // Now we ensure that no connections will process any more packets after this method is complete
       // then send a disconnect packet
       for (ConnectionEntry entry : connections.values())
       {
          RemotingConnection conn = entry.connection;
+         
+         if (log.isTraceEnabled())
+         {
+            log.trace("Sending connection.disconnection packet to " + conn);
+         }
 
          conn.disconnect();
       }
@@ -369,6 +386,11 @@
 
       ConnectionEntry entry = pmgr.createConnectionEntry(connection);
 
+      if (isTrace)
+      {
+         log.trace("Connection created " + connection);
+      }
+      
       connections.put(connection.getID(), entry);
 
       if (config.isBackup())
@@ -379,6 +401,12 @@
    
    public void connectionDestroyed(final Object connectionID)
    {
+
+	  if (isTrace)
+	  {
+	     log.trace("Connection removed " + connectionID + " from server " + this.server, new Exception ("trace"));
+	  }
+      
       ConnectionEntry conn = connections.get(connectionID);
 
       if (conn != null)
@@ -458,6 +486,13 @@
          {
             conn.connection.bufferReceived(connectionID, buffer);
          }
+         else
+         {
+        	if (log.isTraceEnabled())
+        	{
+        	   log.trace("ConnectionID = "  + connectionID + " was already closed, so ignoring packet");
+        	}
+         }
       }
    }
 
@@ -474,7 +509,7 @@
          this.pauseInterval = pauseInterval;
       }
 
-      public synchronized void close()
+      public void close()
       {
          closed = true;
 

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/HornetQServer.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/HornetQServer.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/HornetQServer.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -15,6 +15,7 @@
 
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.ScheduledExecutorService;
 
 import javax.management.MBeanServer;
@@ -56,10 +57,15 @@
 public interface HornetQServer extends HornetQComponent
 {
    
+   /** This method was created mainly for testing but it may be used in scenarios where 
+    *  you need to have more than one Server inside the same VM.
+    *  This identity will be exposed on logs what may help you to debug issues on the log traces and debugs.*/
    void setIdentity(String identity);
    
    String getIdentity();
    
+   String describe();
+   
    Configuration getConfiguration();
 
    RemotingService getRemotingService();
@@ -150,6 +156,8 @@
 
    ScheduledExecutorService getScheduledPool();
    
+   ExecutorService getThreadPool();
+   
    ExecutorFactory getExecutorFactory();
 
    void setGroupingHandler(GroupingHandler groupingHandler);

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/ClusterConnection.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/ClusterConnection.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/ClusterConnection.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -15,11 +15,11 @@
 
 import java.util.Map;
 
-import org.hornetq.api.core.Pair;
 import org.hornetq.api.core.SimpleString;
 import org.hornetq.api.core.TransportConfiguration;
 import org.hornetq.api.core.client.ClusterTopologyListener;
 import org.hornetq.core.server.HornetQComponent;
+import org.hornetq.core.server.HornetQServer;
 
 /**
  * A ClusterConnection
@@ -35,26 +35,18 @@
    SimpleString getName();
 
    String getNodeID();
+   
+   HornetQServer getServer();
 
    /**
     * @return a Map of node ID and addresses
     */
    Map<String, String> getNodes();
 
-   void handleReplicatedAddBinding(SimpleString address,
-                                   SimpleString uniqueName,
-                                   SimpleString routingName,
-                                   long queueID,
-                                   SimpleString filterString,
-                                   SimpleString queueName,
-                                   int distance) throws Exception;
-
    void activate() throws Exception;
    
    TransportConfiguration getConnector();
 
    // for debug
-   String description();
-
-   void nodeAnnounced(String nodeID, Pair<TransportConfiguration,TransportConfiguration> connectorPair);
+   String describe();
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/ClusterManager.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/ClusterManager.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/ClusterManager.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -59,4 +59,9 @@
    void deployBridge(BridgeConfiguration config) throws Exception;
 
    void destroyBridge(String name) throws Exception;
+
+   /**
+    * @return
+    */
+   String describe();
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/MessageFlowRecord.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/MessageFlowRecord.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/MessageFlowRecord.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -33,14 +33,8 @@
    Bridge getBridge();
 
    void close() throws Exception;
-   
-   public void resume() throws Exception;
-   
+
    boolean isClosed();
 
    void reset() throws Exception;
-
-   void pause() throws Exception;
-
-    boolean isPaused();
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/BridgeImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/BridgeImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/BridgeImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -18,6 +18,8 @@
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.Executor;
 import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
 
 import org.hornetq.api.core.HornetQException;
 import org.hornetq.api.core.Message;
@@ -26,10 +28,11 @@
 import org.hornetq.api.core.client.ClientSession.BindingQuery;
 import org.hornetq.api.core.client.ClientSessionFactory;
 import org.hornetq.api.core.client.SendAcknowledgementHandler;
+import org.hornetq.api.core.client.ServerLocator;
 import org.hornetq.api.core.client.SessionFailureListener;
 import org.hornetq.api.core.management.NotificationType;
+import org.hornetq.core.client.impl.ClientSessionFactoryInternal;
 import org.hornetq.core.client.impl.ClientSessionInternal;
-import org.hornetq.core.client.impl.ServerLocatorInternal;
 import org.hornetq.core.filter.Filter;
 import org.hornetq.core.filter.impl.FilterImpl;
 import org.hornetq.core.logging.Logger;
@@ -53,6 +56,7 @@
  *
  * @author <a href="mailto:tim.fox at jboss.com">Tim Fox</a>
  * @author <a href="mailto:jmesnil at redhat.com">Jeff Mesnil</a>
+ * @author Clebert Suconic
  *
  * Created 12 Nov 2008 11:37:35
  *
@@ -64,16 +68,16 @@
    // Constants -----------------------------------------------------
 
    private static final Logger log = Logger.getLogger(BridgeImpl.class);
-   
+
    private static final boolean isTrace = log.isTraceEnabled();
 
    // Attributes ----------------------------------------------------
-   
+
    private static final SimpleString JMS_QUEUE_ADDRESS_PREFIX = new SimpleString("jms.queue.");
-   
+
    private static final SimpleString JMS_TOPIC_ADDRESS_PREFIX = new SimpleString("jms.topic.");
 
-   protected final ServerLocatorInternal serverLocator;
+   protected final ServerLocator serverLocator;
 
    private final UUID nodeUUID;
 
@@ -83,6 +87,11 @@
 
    protected final Executor executor;
 
+   protected final ScheduledExecutorService scheduledExecutor;
+
+   /** Used when there's a scheduled reconnection */
+   protected ScheduledFuture<?> futureScheduledReconnection;
+
    private final Filter filter;
 
    private final SimpleString forwardingAddress;
@@ -102,8 +111,6 @@
    private final boolean useDuplicateDetection;
 
    private volatile boolean active;
-   
-   private volatile boolean stopping;
 
    private final String user;
 
@@ -111,6 +118,18 @@
 
    private boolean activated;
 
+   private final int reconnectAttempts;
+
+   private int reconnectAttemptsInUse;
+
+   private final long retryInterval;
+
+   private final double retryMultiplier;
+
+   private final long maxRetryInterval;
+
+   private int retryCount = 0;
+
    private NotificationService notificationService;
 
    // Static --------------------------------------------------------
@@ -119,7 +138,11 @@
 
    // Public --------------------------------------------------------
 
-   public BridgeImpl(final ServerLocatorInternal serverLocator,
+   public BridgeImpl(final ServerLocator serverLocator,
+                     final int reconnectAttempts,
+                     final long retryInterval,
+                     final double retryMultiplier,
+                     final long maxRetryInterval,
                      final UUID nodeUUID,
                      final SimpleString name,
                      final Queue queue,
@@ -134,6 +157,17 @@
                      final boolean activated,
                      final StorageManager storageManager) throws Exception
    {
+
+      this.reconnectAttempts = reconnectAttempts;
+      
+      this.reconnectAttemptsInUse = -1;
+
+      this.retryInterval = retryInterval;
+
+      this.retryMultiplier = retryMultiplier;
+
+      this.maxRetryInterval = maxRetryInterval;
+
       this.serverLocator = serverLocator;
 
       this.nodeUUID = nodeUUID;
@@ -144,6 +178,8 @@
 
       this.executor = executor;
 
+      this.scheduledExecutor = scheduledExecutor;
+
       filter = FilterImpl.createFilter(filterString);
 
       this.forwardingAddress = forwardingAddress;
@@ -187,7 +223,7 @@
       }
    }
 
-   private void cancelRefs() throws Exception
+   private void cancelRefs()
    {
       MessageReference ref;
 
@@ -201,25 +237,32 @@
          }
          list.addFirst(ref);
       }
-      
+
       if (isTrace && list.isEmpty())
       {
-         log.trace("didn't have any references to cancel on bridge "  + this);
+         log.trace("didn't have any references to cancel on bridge " + this);
       }
 
-      Queue queue = null;
-      
+      Queue refqueue = null;
+
       long timeBase = System.currentTimeMillis();
 
       for (MessageReference ref2 : list)
       {
-         queue = ref2.getQueue();
+         refqueue = ref2.getQueue();
 
-         queue.cancel(ref2, timeBase);
+         try
+         {
+            refqueue.cancel(ref2, timeBase);
+         }
+         catch (Exception e)
+         {
+            // There isn't much we can do besides log an error
+            log.error("Couldn't cancel reference " + ref2, e);
+         }
       }
+   }
 
-   }
-   
    public void flushExecutor()
    {
       // Wait for any create objects runnable to complete
@@ -235,18 +278,20 @@
       }
    }
 
-
    public void stop() throws Exception
    {
-	  if (log.isDebugEnabled())
-	  {
-	     log.debug("Bridge " + this.name + " being stopped");
-	  }
+      if (log.isDebugEnabled())
+      {
+         log.debug("Bridge " + this.name + " being stopped");
+      }
       
-      stopping = true;
+      if (futureScheduledReconnection != null)
+      {
+         futureScheduledReconnection.cancel(true);
+      }
 
       executor.execute(new StopRunnable());
-      
+
       if (notificationService != null)
       {
          TypedProperties props = new TypedProperties();
@@ -265,10 +310,10 @@
 
    public void pause() throws Exception
    {
-	  if (log.isDebugEnabled())
-	  {
-	     log.debug("Bridge " + this.name + " being paused");
-	  }
+      if (log.isDebugEnabled())
+      {
+         log.debug("Bridge " + this.name + " being paused");
+      }
 
       executor.execute(new PauseRunnable());
 
@@ -288,13 +333,13 @@
       }
    }
 
-    public void resume() throws Exception
-    {
-        queue.addConsumer(BridgeImpl.this);
-        queue.deliverAsync();
-    }
+   public void resume() throws Exception
+   {
+      queue.addConsumer(BridgeImpl.this);
+      queue.deliverAsync();
+   }
 
-    public boolean isStarted()
+   public boolean isStarted()
    {
       return started;
    }
@@ -303,7 +348,7 @@
    {
       activated = true;
 
-      executor.execute(new CreateObjectsRunnable());
+      executor.execute(new ConnectRunnable());
    }
 
    public SimpleString getName()
@@ -359,6 +404,10 @@
 
          if (ref != null)
          {
+            if (isTrace)
+            {
+               log.trace(this + " Acking " + ref + " on queue " + ref.getQueue());
+            }
             ref.getQueue().acknowledge(ref);
          }
       }
@@ -377,7 +426,7 @@
       {
          // We keep our own DuplicateID for the Bridge, so bouncing back and forths will work fine
          byte[] bytes = getDuplicateBytes(nodeUUID, message.getMessageID());
-   
+
          message.putBytesProperty(MessageImpl.HDR_BRIDGE_DUPLICATE_ID, bytes);
       }
 
@@ -396,13 +445,13 @@
    public static byte[] getDuplicateBytes(final UUID nodeUUID, final long messageID)
    {
       byte[] bytes = new byte[24];
-  
+
       ByteBuffer bb = ByteBuffer.wrap(bytes);
-  
+
       bb.put(nodeUUID.asBytes());
-  
+
       bb.putLong(messageID);
-      
+
       return bytes;
    }
 
@@ -417,20 +466,23 @@
       {
          if (!active)
          {
-            log.debug(name + "::Ignoring reference on bridge as it is set to iniactive ref=" + ref);
+            if (log.isDebugEnabled())
+            {
+            	log.debug(this + "::Ignoring reference on bridge as it is set to iniactive ref=" + ref);
+            }
             return HandleStatus.BUSY;
          }
 
-		   if (isTrace)
-		   {
-		      log.trace("Bridge " + name + " is handling reference=" + ref); 
-		   }
+         if (isTrace)
+         {
+            log.trace("Bridge " + this + " is handling reference=" + ref);
+         }
          ref.handled();
 
          ServerMessage message = ref.getMessage();
 
          refs.add(ref);
-         
+
          message = beforeForward(message);
 
          SimpleString dest;
@@ -444,17 +496,17 @@
             // Preserve the original address
             dest = message.getAddress();
          }
-         //if we failover during send then there is a chance that the
-         //that this will throw a disconnect, we need to remove the message
-         //from the acks so it will get resent, duplicate detection will cope
-         //with any messages resent
+         // if we failover during send then there is a chance that the
+         // that this will throw a disconnect, we need to remove the message
+         // from the acks so it will get resent, duplicate detection will cope
+         // with any messages resent
          try
          {
             producer.send(dest, message);
          }
          catch (HornetQException e)
          {
-            log.warn("Unable to send message, will try again once bridge reconnects", e);
+            log.warn("Unable to send message " + ref + ", will try again once bridge reconnects", e);
 
             refs.remove(ref);
 
@@ -469,21 +521,40 @@
 
    public void connectionFailed(final HornetQException me, boolean failedOver)
    {
-      log.warn(name + "::Connection failed with failedOver=" + failedOver, me);
-      if (isTrace)
+      
+      log.warn(this + "::Connection failed with failedOver=" + failedOver + "-" + me, me);
+      
+      try
       {
-         log.trace("Calling BridgeImpl::connectionFailed(HOrnetQException me=" + me + ", boolean failedOver=" + failedOver);
+         csf.cleanup();
       }
-      fail(false);
+      catch (Throwable dontCare)
+      {
+      }
+
+      try
+      {
+         session.cleanUp(false);
+      }
+      catch (Throwable dontCare)
+      {
+      }
+      
+      fail(me.getCode() == HornetQException.DISCONNECTED);
+
+      tryScheduleRetryReconnect(me.getCode());
    }
+   
+   protected void tryScheduleRetryReconnect(final int code)
+   {
+      scheduleRetryConnect();
+   }
 
    public void beforeReconnect(final HornetQException exception)
    {
-      log.warn(name + "::Connection failed before reconnect ", exception);
-      fail(true);
+//      log.warn(name + "::Connection failed before reconnect ", exception);
+//      fail(false);
    }
-   
-   
 
    // Package protected ---------------------------------------------
 
@@ -497,231 +568,209 @@
    @Override
    public String toString()
    {
-      return this.getClass().getName() +
-             " [name=" + name +
-             ", nodeUUID=" +
-             nodeUUID +
-             ", queue=" +
-             queue +
-             ", filter=" +
-             filter +
-             ", forwardingAddress=" +
-             forwardingAddress +
-             ", useDuplicateDetection=" +
-             useDuplicateDetection +
-             ", active=" +
-             active +
-             ", stopping=" +
-             stopping +
-             "]";
+      return this.getClass().getSimpleName() + "@" + Integer.toHexString(System.identityHashCode(this)) + " [name=" + name + ", queue=" + queue + " targetConnector=" + this.serverLocator + "]";
    }
 
-   private void fail(final boolean beforeReconnect)
+   protected void fail(final boolean permanently)
    {
-      // This will get called even after the bridge reconnects - in this case
-      // we want to cancel all unacked refs so they get resent
-      // duplicate detection will ensure no dups are routed on the other side
+      log.debug(this + "::fail being called, permanently=" + permanently);
 
-      log.debug(name + "::BridgeImpl::fail being called, beforeReconnect=" + beforeReconnect);
-      
-      if (session.getConnection().isDestroyed())
+      if (queue != null)
       {
-         log.debug(name + "::Connection is destroyed, active = false now");
-         active = false;
-      }
-
-
-         if (!session.getConnection().isDestroyed())
+         try
          {
-            if (beforeReconnect)
+            if (isTrace)
             {
-               try {
-            	  log.debug(name + "::Connection is destroyed, active = false now");
-
-                  cancelRefs();
-               }
-               catch (Exception e)
-               {
-                   BridgeImpl.log.error("Failed to cancel refs", e);
-               }
+               log.trace("Removing consumer on fail " + this + " from queue " + queue);
             }
-            else
-            {
-               try
-               {
-                  afterConnect();
-
-                  log.debug(name + "::After reconnect, setting active=true now");
-                  active = true;
-
-                  if (queue != null)
-                  {
-                     queue.deliverAsync();
-                  }
-               }
-               catch (Exception e)
-               {
-                  BridgeImpl.log.error("Failed to call after connect", e);
-               }
-            }
+            queue.removeConsumer(this);
          }
+         catch (Exception dontcare)
+         {
+            log.debug(dontcare);
+         }
+      }
+      
+      cancelRefs();
+      if (queue != null)
+      {
+         queue.deliverAsync();
+      }
    }
 
    /* Hook for doing extra stuff after connection */
    protected void afterConnect() throws Exception
    {
-      //NOOP
+      retryCount = 0;
+      reconnectAttemptsInUse = reconnectAttempts;
+      if (futureScheduledReconnection != null)
+      {
+         futureScheduledReconnection.cancel(true);
+         futureScheduledReconnection = null;
+      }
    }
 
    /* Hook for creating session factory */
-   protected ClientSessionFactory createSessionFactory() throws Exception
+   protected ClientSessionFactoryInternal createSessionFactory() throws Exception
    {
-      return serverLocator.createSessionFactory();
+      ClientSessionFactoryInternal csf = (ClientSessionFactoryInternal)serverLocator.createSessionFactory();
+      csf.setReconnectAttempts(0);
+      //csf.setInitialReconnectAttempts(1);
+      return csf;
    }
 
    /* This is called only when the bridge is activated */
-   protected synchronized boolean createObjects()
+   protected void connect()
    {
-      if (!started)
-      {
-         return false;
-      }
+      BridgeImpl.log.debug("Connecting  " + this + " to its destination [" + nodeUUID.toString() + "], csf=" + this.csf);
 
-      boolean retry = false;
-      int retryCount = 0;
-
-      do
+      retryCount++;
+      
+      try
       {
-         BridgeImpl.log.info("Connecting bridge " + name + " to its destination [" + nodeUUID.toString() + "]");
+         if (csf == null || csf.isClosed())
+         {
+            csf = createSessionFactory();
+            // Session is pre-acknowledge
+            session = (ClientSessionInternal)csf.createSession(user, password, false, true, true, true, 1);
+         }
 
-         try
+         if (forwardingAddress != null)
          {
-            if (csf == null || csf.isClosed())
+            BindingQuery query = null;
+
+            try
             {
-                csf = createSessionFactory();
-                // Session is pre-acknowledge
-                session = (ClientSessionInternal)csf.createSession(user, password, false, true, true, true, 1);
-                try
-                {
-                   session.addMetaData("Session-for-bridge", name.toString());
-                   session.addMetaData("nodeUUID", nodeUUID.toString());
-                }
-                catch (Throwable dontCare)
-                {
-                   // addMetaData here is just for debug purposes
-                }
+               query = session.bindingQuery(forwardingAddress);
             }
+            catch (Throwable e)
+            {
+               log.warn("Error on querying binding on bridge " + this.name + ". Retrying in 100 milliseconds", e);
+               // This was an issue during startup, we will not count this retry
+               retryCount--;
 
-            if (forwardingAddress != null)
+               scheduleRetryConnectFixedTimeout(100);
+               return;
+            }
+
+            if (forwardingAddress.startsWith(BridgeImpl.JMS_QUEUE_ADDRESS_PREFIX) || forwardingAddress.startsWith(BridgeImpl.JMS_TOPIC_ADDRESS_PREFIX))
             {
-               BindingQuery query = null;
-               
-               try
+               if (!query.isExists())
                {
-                  query = session.bindingQuery(forwardingAddress);
+                  log.warn("Address " + forwardingAddress +
+                           " doesn't have any bindings yet, retry #(" +
+                           retryCount +
+                           ")");
+                  scheduleRetryConnect();
+                  return;
                }
-               catch (Throwable e)
+            }
+            else
+            {
+               if (!query.isExists())
                {
-                  log.warn("Error on querying binding. Retrying", e);
-                  retry = true;
-                  Thread.sleep(100);
-                  continue;
+                  log.info("Bridge " + this.getName() +
+                           " connected to fowardingAddress=" +
+                           this.getForwardingAddress() +
+                           ". " +
+                           getForwardingAddress() +
+                           " doesn't have any bindings what means messages will be ignored until a binding is created.");
                }
-   
-               if (forwardingAddress.startsWith(BridgeImpl.JMS_QUEUE_ADDRESS_PREFIX) || forwardingAddress.startsWith(BridgeImpl.JMS_TOPIC_ADDRESS_PREFIX))
-               {
-                  if (!query.isExists())
-                  {
-                     retryCount ++;
-                     if (serverLocator.getReconnectAttempts() > 0)
-                     {
-                        if (retryCount > serverLocator.getReconnectAttempts())
-                        {
-                           log.warn("Retried " + forwardingAddress + " up to the configured reconnectAttempts(" + serverLocator.getReconnectAttempts() + "). Giving up now. The bridge " + this.getName() + " will not be activated");
-                           return false;
-                        }
-                     }
-   
-                     log.warn("Address " + forwardingAddress + " doesn't have any bindings yet, retry #(" + retryCount + ")");
-                     Thread.sleep(serverLocator.getRetryInterval());
-                     retry = true;
-                     csf.close();
-                     session.close();
-                     continue;
-                  }
-               }
-               else
-               {
-                  if (!query.isExists())
-                  {
-                     log.info("Bridge " + this.getName() + " connected to fowardingAddress=" + this.getForwardingAddress() + ". " + getForwardingAddress() + " doesn't have any bindings what means messages will be ignored until a binding is created.");
-                  }
-               }
             }
+         }
 
-            if (session == null)
-            {
-               // This can happen if the bridge is shutdown
-               return false;
-            }
+         producer = session.createProducer();
+         session.addFailureListener(BridgeImpl.this);
+         session.setSendAcknowledgementHandler(BridgeImpl.this);
 
-            producer = session.createProducer();
-            session.addFailureListener(BridgeImpl.this);
-            session.setSendAcknowledgementHandler(BridgeImpl.this);
+         afterConnect();
 
-            afterConnect();
+         active = true;
 
-            active = true;
+         queue.addConsumer(BridgeImpl.this);
+         queue.deliverAsync();
 
-            queue.addConsumer(BridgeImpl.this);
-            queue.deliverAsync();
+         BridgeImpl.log.info("Bridge " + this + " is connected");
 
-            BridgeImpl.log.info("Bridge " + name + " is connected [" + nodeUUID + "-> " +  name +"]");
+         return;
+      }
+      catch (HornetQException e)
+      {
+         // the session was created while its server was starting, retry it:
+         if (e.getCode() == HornetQException.SESSION_CREATION_REJECTED)
+         {
+            BridgeImpl.log.warn("Server is starting, retry to create the session for bridge " + name);
 
-            return true;
+            // We are not going to count this one as a retry
+            retryCount--;
+            scheduleRetryConnectFixedTimeout(this.retryInterval);
+            return;
          }
-         catch (HornetQException e)
+         else
          {
-            if (csf != null)
+            if (log.isDebugEnabled())
             {
-               csf.close();
+               log.debug("Bridge " + this + " is unable to connect to destination. Retrying", e);
             }
+         }
+      }
+      catch (Exception e)
+      {
+         BridgeImpl.log.warn("Bridge " + this + " is unable to connect to destination. It will be disabled.", e);
+      }
 
-            // the session was created while its server was starting, retry it:
-            if (e.getCode() == HornetQException.SESSION_CREATION_REJECTED)
-            {
-               BridgeImpl.log.warn("Server is starting, retry to create the session for bridge " + name);
+      scheduleRetryConnect();
 
-               // Sleep a little to prevent spinning too much
-               try
-               {
-                  Thread.sleep(10);
-               }
-               catch (InterruptedException ignore)
-               {
-               }
+   }
 
-               retry = true;
+   protected void scheduleRetryConnect()
+   {
+      if (reconnectAttemptsInUse >= 0 && retryCount > reconnectAttempts)
+      {
+         log.warn("Bridge " + this.name +
+                  " achieved " +
+                  retryCount +
+                  " maxattempts=" +
+                  reconnectAttempts +
+                  " it will stop retrying to reconnect");
+         fail(true);
+         return;
+      }
 
-               continue;
-            }
-            else
-            {
-               BridgeImpl.log.warn("Bridge " + name + " is unable to connect to destination. It will be disabled.", e);
+      long timeout = (long)(this.retryInterval * Math.pow(this.retryMultiplier, retryCount));
+      if (timeout == 0)
+      {
+         timeout = this.retryInterval;
+      }
+      if (timeout > maxRetryInterval)
+      {
+         timeout = maxRetryInterval;
+      }
+      
+      log.debug("Bridge " + this + " retrying connection #" + retryCount + ", maxRetry=" + reconnectAttemptsInUse + ", timeout=" + timeout);
 
-               return false;
-            }
+      scheduleRetryConnectFixedTimeout(timeout);
+   }
+
+   protected void scheduleRetryConnectFixedTimeout(final long milliseconds)
+   {
+      if (csf != null)
+      {
+         try
+         {
+            csf.cleanup();
          }
-         catch (Exception e)
+         catch (Throwable ignored)
          {
-            BridgeImpl.log.warn("Bridge " + name + " is unable to connect to destination. It will be disabled.", e);
-
-            return false;
          }
       }
-      while (retry && !stopping);
+      
+      if (log.isDebugEnabled())
+      {
+         log.debug("Scheduling retry for bridge " + this.name + " in " + milliseconds + " milliseconds");
+      }
 
-      return false;
+      futureScheduledReconnection = scheduledExecutor.schedule(new FutureConnectRunnable(), milliseconds, TimeUnit.MILLISECONDS);
    }
 
    // Inner classes -------------------------------------------------
@@ -732,14 +781,8 @@
       {
          try
          {
-            // We need to close the session outside of the lock,
-            // so any pending operation will be canceled right away
+            log.debug("stopping bridge " + BridgeImpl.this);
             
-            // TODO: Why closing the CSF will make a few clustering and failover tests to 
-            //       either deadlock or take forever on waiting 
-            //       locks
-            csf.close();
-            csf = null;
             if (session != null)
             {
                log.debug("Cleaning up session " + session);
@@ -747,6 +790,11 @@
                session.removeFailureListener(BridgeImpl.this);
             }
 
+            if (csf != null)
+            {
+               csf.cleanup();
+            }
+
             synchronized (BridgeImpl.this)
             {
                log.debug("Closing Session for bridge " + BridgeImpl.this.name);
@@ -757,15 +805,14 @@
 
             }
 
+            if (isTrace)
+            {
+               log.trace("Removing consumer on stopRunnable " + this + " from queue " + queue);
+            }
             queue.removeConsumer(BridgeImpl.this);
 
-            cancelRefs();
+            internalCancelReferences();
 
-            if (queue != null)
-            {
-               queue.deliverAsync();
-            }
-
             log.info("stopped bridge " + name);
          }
          catch (Exception e)
@@ -783,23 +830,15 @@
          {
             synchronized (BridgeImpl.this)
             {
-               log.debug("Closing Session for bridge " + BridgeImpl.this.name);
-
                started = false;
 
                active = false;
-
             }
 
             queue.removeConsumer(BridgeImpl.this);
 
-            cancelRefs();
+            internalCancelReferences();
 
-            if (queue != null)
-            {
-               queue.deliverAsync();
-            }
-
             log.info("paused bridge " + name);
          }
          catch (Exception e)
@@ -807,18 +846,33 @@
             BridgeImpl.log.error("Failed to pause bridge", e);
          }
       }
+
    }
 
-   private class CreateObjectsRunnable implements Runnable
+   private void internalCancelReferences()
    {
-      public synchronized void run()
+      cancelRefs();
+
+      if (queue != null)
       {
-         if (!createObjects())
-         {
-            active = false;
+         queue.deliverAsync();
+      }
+   }
 
-            started = false;
-         }
+   // The scheduling will still use the main executor here
+   private class FutureConnectRunnable implements Runnable
+   {
+      public void run()
+      {
+         executor.execute(new ConnectRunnable());
       }
    }
+
+   private class ConnectRunnable implements Runnable
+   {
+      public synchronized void run()
+      {
+         connect();
+      }
+   }
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterConnectionBridge.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterConnectionBridge.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterConnectionBridge.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -24,7 +24,7 @@
 import org.hornetq.api.core.client.ClientConsumer;
 import org.hornetq.api.core.client.ClientMessage;
 import org.hornetq.api.core.client.ClientProducer;
-import org.hornetq.api.core.client.ClientSessionFactory;
+import org.hornetq.api.core.client.ServerLocator;
 import org.hornetq.api.core.management.ManagementHelper;
 import org.hornetq.api.core.management.NotificationType;
 import org.hornetq.api.core.management.ResourceNames;
@@ -35,6 +35,7 @@
 import org.hornetq.core.postoffice.BindingType;
 import org.hornetq.core.server.Queue;
 import org.hornetq.core.server.ServerMessage;
+import org.hornetq.core.server.cluster.ClusterConnection;
 import org.hornetq.core.server.cluster.MessageFlowRecord;
 import org.hornetq.core.server.cluster.Transformer;
 import org.hornetq.utils.UUID;
@@ -44,15 +45,18 @@
  * A ClusterConnectionBridge
  *
  * @author tim
+ * @author Clebert Suconic
  *
  *
  */
 public class ClusterConnectionBridge extends BridgeImpl
 {
    private static final Logger log = Logger.getLogger(ClusterConnectionBridge.class);
-   
+
    private static final boolean isTrace = log.isTraceEnabled();
 
+   private final ClusterConnection clusterConnection;
+
    private final MessageFlowRecord flowRecord;
 
    private final SimpleString managementAddress;
@@ -63,11 +67,19 @@
 
    private final SimpleString idsHeaderName;
 
+   private final String targetNodeID;
+
    private final TransportConfiguration connector;
 
-   private final String targetNodeID;
+   private final ServerLocatorInternal discoveryLocator;
 
-   public ClusterConnectionBridge(final ServerLocatorInternal serverLocator,
+   public ClusterConnectionBridge(final ClusterConnection clusterConnection,
+                                  final ServerLocator targetLocator,
+                                  final ServerLocatorInternal discoveryLocator,
+                                  final int reconnectAttempts,
+                                  final long retryInterval,
+                                  final double retryMultiplier,
+                                  final long maxRetryInterval,
                                   final UUID nodeUUID,
                                   final String targetNodeID,
                                   final SimpleString name,
@@ -87,7 +99,11 @@
                                   final MessageFlowRecord flowRecord,
                                   final TransportConfiguration connector) throws Exception
    {
-      super(serverLocator,
+      super(targetLocator,
+            reconnectAttempts,
+            retryInterval,
+            retryMultiplier,
+            maxRetryInterval,
             nodeUUID,
             name,
             queue,
@@ -102,18 +118,27 @@
             activated,
             storageManager);
 
+      this.discoveryLocator = discoveryLocator;
+
       idsHeaderName = MessageImpl.HDR_ROUTE_TO_IDS.concat(name);
 
+      this.clusterConnection = clusterConnection;
+
       this.targetNodeID = targetNodeID;
       this.managementAddress = managementAddress;
       this.managementNotificationAddress = managementNotificationAddress;
       this.flowRecord = flowRecord;
       this.connector = connector;
-      
+
       // we need to disable DLQ check on the clustered bridges
       queue.setInternalQueue(true);
+      
+      if (log.isDebugEnabled())
+      {
+         log.debug("Setting up bridge between " + clusterConnection.getConnector() + " and " + targetLocator, new Exception ("trace"));
+      }
    }
-
+   
    @Override
    protected ServerMessage beforeForward(ServerMessage message)
    {
@@ -147,6 +172,16 @@
 
    private void setupNotificationConsumer() throws Exception
    {
+      if (log.isDebugEnabled())
+      {
+         log.debug("Setting up notificationConsumer between " + this.clusterConnection.getConnector() +
+                   " and " +
+                   flowRecord.getBridge().getForwardingConnection() +
+                   " clusterConnection = " +
+                   this.clusterConnection.getName() +
+                   " on server " +
+                   clusterConnection.getServer());
+      }
       if (flowRecord != null)
       {
          flowRecord.reset();
@@ -155,6 +190,9 @@
          {
             try
             {
+               log.debug("Closing notification Consumer for reopening " + notifConsumer +
+                         " on bridge " +
+                         this.getName());
                notifConsumer.close();
 
                notifConsumer = null;
@@ -167,7 +205,9 @@
 
          // Get the queue data
 
-         String qName = "notif." + UUIDGenerator.getInstance().generateStringUUID();
+         String qName = "notif." + UUIDGenerator.getInstance().generateStringUUID() +
+                        "." +
+                        clusterConnection.getServer();
 
          SimpleString notifQueueName = new SimpleString(qName);
 
@@ -207,6 +247,7 @@
 
          ClientMessage message = session.createMessage(false);
 
+         log.debug("Requesting sendQueueInfoToQueue through " + this, new Exception ("trace"));
          ManagementHelper.putOperationInvocation(message,
                                                  ResourceNames.CORE_SERVER,
                                                  "sendQueueInfoToQueue",
@@ -215,6 +256,11 @@
 
          ClientProducer prod = session.createProducer(managementAddress);
 
+         if (log.isDebugEnabled())
+         {
+            log.debug("Cluster connetion bridge on " + clusterConnection + " requesting information on queues");
+         }
+
          prod.send(message);
       }
    }
@@ -222,43 +268,35 @@
    @Override
    protected void afterConnect() throws Exception
    {
+      super.afterConnect();
       setupNotificationConsumer();
    }
-   
+
    @Override
    public void stop() throws Exception
    {
       super.stop();
    }
-   
-   @Override
-   protected ClientSessionFactory createSessionFactory() throws Exception
+
+   protected void tryScheduleRetryReconnect(final int code)
    {
-      //We create the session factory using the specified connector
-      
-      return serverLocator.createSessionFactory(connector);      
+      if (code != HornetQException.DISCONNECTED)
+      {
+         scheduleRetryConnect();
+      }
    }
-   
-   @Override
-   public void connectionFailed(HornetQException me, boolean failedOver)
+
+
+   protected void fail(final boolean permanently)
    {
-	  if (isTrace)
-	  {
-	     log.trace("Connection Failed on ClusterConnectionBridge, failedOver = " + failedOver + ", sessionClosed = " + session.isClosed(), new Exception ("trace"));
-	  }
+      log.debug("Cluster Bridge " + this.getName() + " failed, permanently=" + permanently);
+      super.fail(permanently);
 
-      if (!failedOver && !session.isClosed())
+      if (permanently)
       {
-         try
-         {
-            session.cleanUp(true);
-         }
-         catch (Exception e)
-         {
-            log.warn("Unable to clean up the session after a connection failure", e);
-         }
-         serverLocator.notifyNodeDown(targetNodeID);
+         log.debug("cluster node for bridge " + this.getName() + " is permanently down");
+         discoveryLocator.notifyNodeDown(targetNodeID);
       }
-      super.connectionFailed(me, failedOver);
+
    }
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterConnectionImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterConnectionImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterConnectionImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -16,12 +16,16 @@
 import static org.hornetq.api.core.management.NotificationType.CONSUMER_CLOSED;
 import static org.hornetq.api.core.management.NotificationType.CONSUMER_CREATED;
 
+import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executor;
 import java.util.concurrent.ScheduledExecutorService;
 
 import org.hornetq.api.core.DiscoveryGroupConfiguration;
@@ -29,10 +33,11 @@
 import org.hornetq.api.core.SimpleString;
 import org.hornetq.api.core.TransportConfiguration;
 import org.hornetq.api.core.client.ClientMessage;
-import org.hornetq.api.core.client.HornetQClient;
 import org.hornetq.api.core.management.ManagementHelper;
 import org.hornetq.api.core.management.NotificationType;
+import org.hornetq.core.client.impl.ServerLocatorImpl;
 import org.hornetq.core.client.impl.ServerLocatorInternal;
+import org.hornetq.core.client.impl.Topology;
 import org.hornetq.core.logging.Logger;
 import org.hornetq.core.postoffice.Binding;
 import org.hornetq.core.postoffice.Bindings;
@@ -65,11 +70,15 @@
 public class ClusterConnectionImpl implements ClusterConnection
 {
    private static final Logger log = Logger.getLogger(ClusterConnectionImpl.class);
-   
+
    private static final boolean isTrace = log.isTraceEnabled();
 
-   private final org.hornetq.utils.ExecutorFactory executorFactory;
+   private final ExecutorFactory executorFactory;
 
+   private final Topology clusterManagerTopology;
+
+   private final Executor executor;
+
    private final HornetQServer server;
 
    private final PostOffice postOffice;
@@ -81,22 +90,22 @@
    private final SimpleString address;
 
    private final long clientFailureCheckPeriod;
-   
+
    private final long connectionTTL;
-   
+
    private final long retryInterval;
-   
+
    private final double retryIntervalMultiplier;
-   
+
    private final long maxRetryInterval;
-   
+
    private final int reconnectAttempts;
 
    private final boolean useDuplicateDetection;
 
    private final boolean routeWhenNoConsumers;
 
-   private final Map<String, MessageFlowRecord> records = new HashMap<String, MessageFlowRecord>();
+   private final Map<String, MessageFlowRecord> records = new ConcurrentHashMap<String, MessageFlowRecord>();
 
    private final ScheduledExecutorService scheduledExecutor;
 
@@ -115,14 +124,18 @@
    private final ClusterConnector clusterConnector;
 
    private ServerLocatorInternal serverLocator;
-   
+
    private final TransportConfiguration connector;
 
    private final boolean allowDirectConnectionsOnly;
 
    private final Set<TransportConfiguration> allowableConnections = new HashSet<TransportConfiguration>();
-   
-   public ClusterConnectionImpl(final TransportConfiguration[] tcConfigs,
+
+   private final ClusterManagerImpl manager;
+
+   public ClusterConnectionImpl(final ClusterManagerImpl manager,
+                                final Topology clusterManagerTopology,
+                                final TransportConfiguration[] tcConfigs,
                                 final TransportConfiguration connector,
                                 final SimpleString name,
                                 final SimpleString address,
@@ -162,15 +175,15 @@
       this.address = address;
 
       this.clientFailureCheckPeriod = clientFailureCheckPeriod;
-      
+
       this.connectionTTL = connectionTTL;
-      
+
       this.retryInterval = retryInterval;
-      
+
       this.retryIntervalMultiplier = retryIntervalMultiplier;
-      
+
       this.maxRetryInterval = maxRetryInterval;
-      
+
       this.reconnectAttempts = reconnectAttempts;
 
       this.useDuplicateDetection = useDuplicateDetection;
@@ -179,6 +192,8 @@
 
       this.executorFactory = executorFactory;
 
+      this.executor = executorFactory.getExecutor();
+
       this.server = server;
 
       this.postOffice = postOffice;
@@ -197,13 +212,17 @@
 
       this.allowDirectConnectionsOnly = allowDirectConnectionsOnly;
 
+      this.manager = manager;
+
+      this.clusterManagerTopology = clusterManagerTopology;
+
       clusterConnector = new StaticClusterConnector(tcConfigs);
 
       if (tcConfigs != null && tcConfigs.length > 0)
       {
          // a cluster connection will connect to other nodes only if they are directly connected
          // through a static list of connectors or broadcasting using UDP.
-         if(allowDirectConnectionsOnly)
+         if (allowDirectConnectionsOnly)
          {
             allowableConnections.addAll(Arrays.asList(tcConfigs));
          }
@@ -211,16 +230,18 @@
 
    }
 
-   public ClusterConnectionImpl(DiscoveryGroupConfiguration dg,
+   public ClusterConnectionImpl(final ClusterManagerImpl manager,
+                                final Topology clusterManagerTopology,
+                                DiscoveryGroupConfiguration dg,
                                 final TransportConfiguration connector,
                                 final SimpleString name,
                                 final SimpleString address,
-								        final long clientFailureCheckPeriod,
-								        final long connectionTTL,
-								        final long retryInterval,
-								        final double retryIntervalMultiplier,
-								        final long maxRetryInterval,
-								        final int reconnectAttempts,
+                                final long clientFailureCheckPeriod,
+                                final long connectionTTL,
+                                final long retryInterval,
+                                final double retryIntervalMultiplier,
+                                final long maxRetryInterval,
+                                final int reconnectAttempts,
                                 final boolean useDuplicateDetection,
                                 final boolean routeWhenNoConsumers,
                                 final int confirmationWindowSize,
@@ -251,15 +272,15 @@
       this.address = address;
 
       this.clientFailureCheckPeriod = clientFailureCheckPeriod;
-      
+
       this.connectionTTL = connectionTTL;
-      
+
       this.retryInterval = retryInterval;
-      
+
       this.retryIntervalMultiplier = retryIntervalMultiplier;
-      
+
       this.maxRetryInterval = maxRetryInterval;
-      
+
       this.reconnectAttempts = reconnectAttempts;
 
       this.useDuplicateDetection = useDuplicateDetection;
@@ -268,6 +289,8 @@
 
       this.executorFactory = executorFactory;
 
+      this.executor = executorFactory.getExecutor();
+
       this.server = server;
 
       this.postOffice = postOffice;
@@ -287,6 +310,10 @@
       this.allowDirectConnectionsOnly = allowDirectConnectionsOnly;
 
       clusterConnector = new DiscoveryClusterConnector(dg);
+
+      this.manager = manager;
+
+      this.clusterManagerTopology = clusterManagerTopology;
    }
 
    public synchronized void start() throws Exception
@@ -297,13 +324,12 @@
       }
 
       started = true;
-      
-      if(!backup)
+
+      if (!backup)
       {
          activate();
       }
 
-
    }
 
    public void stop() throws Exception
@@ -312,14 +338,23 @@
       {
          return;
       }
+      
+      if (log.isDebugEnabled())
+      {
+         log.debug(this + "::stopping ClusterConnection");
+      }
 
       if (serverLocator != null)
       {
          serverLocator.removeClusterTopologyListener(this);
       }
-      
-      log.debug("Cluster connection being stopped for node" + nodeUUID);
 
+      log.debug("Cluster connection being stopped for node" + nodeUUID +
+                ", server = " +
+                this.server +
+                " serverLocator = " +
+                serverLocator);
+
       synchronized (this)
       {
          for (MessageFlowRecord record : records.values())
@@ -343,12 +378,19 @@
             managementService.sendNotification(notification);
          }
 
-         if(serverLocator != null)
+         executor.execute(new Runnable()
          {
-            serverLocator.close();
-            serverLocator = null;
-         }
+            public void run()
+            {
+               if (serverLocator != null)
+               {
+                  serverLocator.close();
+                  serverLocator = null;
+               }
 
+            }
+         });
+
          started = false;
       }
    }
@@ -368,17 +410,25 @@
       return nodeUUID.toString();
    }
 
+   public HornetQServer getServer()
+   {
+      return server;
+   }
+
    public synchronized Map<String, String> getNodes()
    {
-      Map<String, String> nodes = new HashMap<String, String>();
-      for (Entry<String, MessageFlowRecord> record : records.entrySet())
+      synchronized (records)
       {
-         if (record.getValue().getBridge().getForwardingConnection() != null)
+         Map<String, String> nodes = new HashMap<String, String>();
+         for (Entry<String, MessageFlowRecord> record : records.entrySet())
          {
-            nodes.put(record.getKey(), record.getValue().getBridge().getForwardingConnection().getRemoteAddress());
+            if (record.getValue().getBridge().getForwardingConnection() != null)
+            {
+               nodes.put(record.getKey(), record.getValue().getBridge().getForwardingConnection().getRemoteAddress());
+            }
          }
+         return nodes;
       }
-      return nodes;
    }
 
    public synchronized void activate() throws Exception
@@ -388,28 +438,37 @@
          return;
       }
 
+      if (log.isDebugEnabled())
+      {
+         log.debug("Activating cluster connection nodeID=" + nodeUUID + " for server=" + this.server);
+      }
+
       backup = false;
 
       serverLocator = clusterConnector.createServerLocator();
 
-
       if (serverLocator != null)
       {
          serverLocator.setNodeID(nodeUUID.toString());
+         serverLocator.setIdentity("(main-ClusterConnection::" + server.toString() + ")");
 
-         serverLocator.setReconnectAttempts(reconnectAttempts);
+         serverLocator.setReconnectAttempts(0);
 
          serverLocator.setClusterConnection(true);
          serverLocator.setClusterTransportConfiguration(connector);
          serverLocator.setBackup(server.getConfiguration().isBackup());
          serverLocator.setInitialConnectAttempts(-1);
+
+         serverLocator.setClientFailureCheckPeriod(clientFailureCheckPeriod);
+         serverLocator.setConnectionTTL(connectionTTL);
+
          if (serverLocator.getConfirmationWindowSize() < 0)
          {
-        	// We can't have confirmationSize = -1 on the cluster Bridge
-        	// Otherwise we won't have confirmation working
+            // We can't have confirmationSize = -1 on the cluster Bridge
+            // Otherwise we won't have confirmation working
             serverLocator.setConfirmationWindowSize(0);
          }
-         
+
          if (!useDuplicateDetection)
          {
             log.debug("DuplicateDetection is disabled, sending clustered messages blocked");
@@ -418,7 +477,7 @@
          serverLocator.setBlockOnDurableSend(!useDuplicateDetection);
          serverLocator.setBlockOnNonDurableSend(!useDuplicateDetection);
 
-         if(retryInterval > 0)
+         if (retryInterval > 0)
          {
             this.serverLocator.setRetryInterval(retryInterval);
          }
@@ -435,10 +494,11 @@
          Notification notification = new Notification(nodeUUID.toString(),
                                                       NotificationType.CLUSTER_CONNECTION_STARTED,
                                                       props);
+         log.debug("sending notification: " + notification);
          managementService.sendNotification(notification);
       }
    }
-   
+
    public TransportConfiguration getConnector()
    {
       return connector;
@@ -446,18 +506,21 @@
 
    // ClusterTopologyListener implementation ------------------------------------------------------------------
 
-   public synchronized void nodeDown(final String nodeID)
+   public void nodeDown(final String nodeID)
    {
-      log.debug("node " + nodeID + " being considered down on cluster connection for nodeID=" + nodeUUID, new Exception ("trace"));
+      if (log.isDebugEnabled())
+      {
+         log.debug(this + " receiving nodeDown for nodeID=" + nodeID, new Exception("trace"));
+      }
       if (nodeID.equals(nodeUUID.toString()))
       {
          return;
       }
-      
-      //Remove the flow record for that node
-      
-      MessageFlowRecord record = records.get(nodeID);
 
+      // Remove the flow record for that node
+
+      MessageFlowRecord record = records.remove(nodeID);
+
       if (record != null)
       {
          try
@@ -466,26 +529,31 @@
             {
                log.trace("Closing clustering record " + record);
             }
-            record.pause();
+            record.close();
          }
          catch (Exception e)
          {
             log.error("Failed to close flow record", e);
          }
+
+         server.getClusterManager().notifyNodeDown(nodeID);
       }
-      
-      server.getClusterManager().notifyNodeDown(nodeID);
    }
 
-   public synchronized void nodeUP(final String nodeID,
-                                   final Pair<TransportConfiguration, TransportConfiguration> connectorPair,
-                                   final boolean last)
+   public void nodeUP(final String nodeID,
+                      final Pair<TransportConfiguration, TransportConfiguration> connectorPair,
+                      final boolean last)
    {
+      if (log.isDebugEnabled())
+      {
+         String ClusterTestBase = "receiving nodeUP for nodeID=";
+         log.debug(this + ClusterTestBase + nodeID + " connectionPair=" + connectorPair);
+      }
       // discard notifications about ourselves unless its from our backup
 
       if (nodeID.equals(nodeUUID.toString()))
       {
-         if(connectorPair.b != null)
+         if (connectorPair.b != null)
          {
             server.getClusterManager().notifyNodeUp(nodeID, connectorPair, last, false);
          }
@@ -501,15 +569,24 @@
          return;
       }
 
-      // FIXME required to prevent cluster connections w/o discovery group 
+      // FIXME required to prevent cluster connections w/o discovery group
       // and empty static connectors to create bridges... ulgy!
       if (serverLocator == null)
       {
          return;
       }
       /*we dont create bridges to backups*/
-      if(connectorPair.a == null)
+      if (connectorPair.a == null)
       {
+         if (isTrace)
+         {
+            log.trace(this + " ignoring call with nodeID=" +
+                      nodeID +
+                      ", connectorPair=" +
+                      connectorPair +
+                      ", last=" +
+                      last);
+         }
          return;
       }
 
@@ -521,98 +598,11 @@
 
             if (record == null)
             {
-               // New node - create a new flow record
-
-               final SimpleString queueName = new SimpleString("sf." + name + "." + nodeID);
-
-               Binding queueBinding = postOffice.getBinding(queueName);
-
-               Queue queue;
-
-               if (queueBinding != null)
+               if (log.isDebugEnabled())
                {
-                  queue = (Queue)queueBinding.getBindable();
+                  log.debug(this + "::Creating record for nodeID=" + nodeID + ", connectorPair=" + connectorPair);
                }
-               else
-               {
-                  // Add binding in storage so the queue will get reloaded on startup and we can find it - it's never
-                  // actually routed to at that address though
-                  queue = server.createQueue(queueName, queueName, null, true, false);
-               }
 
-               createNewRecord(nodeID, connectorPair.a, queueName, queue, true);
-            }
-            else
-            {
-               log.info("Reattaching nodeID=" + nodeID);
-               if (record.isPaused())
-               {
-                  record.resume();
-               }
-            }
-         }
-         catch (Exception e)
-         {
-            log.error("Failed to update topology", e);
-         }
-      }
-   }
-
-   public void nodeAnnounced(final String nodeID,
-                                   final Pair<TransportConfiguration, TransportConfiguration> connectorPair)
-   {
-      if (isTrace)
-      {
-         log.trace("nodeAnnouncedUp:" + nodeID);
-      }
-      
-      if (nodeID.equals(nodeUUID.toString()))
-      {
-         return;
-      }
-
-      // if the node is more than 1 hop away, we do not create a bridge for direct cluster connection
-      if (allowDirectConnectionsOnly && !allowableConnections.contains(connectorPair.a))
-      {
-         if (isTrace)
-         {
-            log.trace("Ignoring nodeUp message as it only allows direct connections");
-         }
-         return;
-      }
-
-      // FIXME required to prevent cluster connections w/o discovery group
-      // and empty static connectors to create bridges... ulgy!
-      if (serverLocator == null)
-      {
-         if (isTrace)
-         {
-            log.trace("Ignoring nodeUp as serverLocator==null");
-         }
-         return;
-      }
-      /*we dont create bridges to backups*/
-      if(connectorPair.a == null)
-      {
-         if (isTrace)
-         {
-            log.trace("Igoring nodeup as connectorPair.a==null (backup)");
-         }
-         return;
-      }
-
-      synchronized (records)
-      {
-         if (isTrace)
-         {
-            log.trace("Adding record for nodeID=" + nodeID);
-         }
-         try
-         {
-            MessageFlowRecord record = records.get(nodeID);
-            
-            if (record == null)
-            {
                // New node - create a new flow record
 
                final SimpleString queueName = new SimpleString("sf." + name + "." + nodeID);
@@ -638,7 +628,12 @@
             {
                if (isTrace)
                {
-                  log.trace("It already had a node created before, ignoring the nodeUp message");
+                  log.trace(this +
+                            " ignored nodeUp record for " +
+                            connectorPair +
+                            " on nodeID=" +
+                            nodeID +
+                            " as the record already existed");
                }
             }
          }
@@ -648,21 +643,26 @@
          }
       }
    }
-   
-   private void createNewRecord(final String nodeID,
+
+   private void createNewRecord(final String targetNodeID,
                                 final TransportConfiguration connector,
                                 final SimpleString queueName,
                                 final Queue queue,
                                 final boolean start) throws Exception
    {
-      MessageFlowRecordImpl record = new MessageFlowRecordImpl(nodeID, connector, queueName, queue);
+      MessageFlowRecordImpl record = new MessageFlowRecordImpl(targetNodeID, connector, queueName, queue);
 
-      records.put(nodeID, record);
+      Bridge bridge = createClusteredBridge(record);
 
-      Bridge bridge = createBridge(record);
-      
+      if (log.isDebugEnabled())
+      {
+         log.debug("creating record between " + this.connector + " and " + connector + bridge);
+      }
+
       record.setBridge(bridge);
 
+      records.put(targetNodeID, record);
+
       if (start)
       {
          bridge.start();
@@ -674,29 +674,71 @@
     * @return
     * @throws Exception
     */
-   protected Bridge createBridge(MessageFlowRecordImpl record) throws Exception
+   protected Bridge createClusteredBridge(MessageFlowRecordImpl record) throws Exception
    {
-      ClusterConnectionBridge bridge = new ClusterConnectionBridge(serverLocator,
-                                                  nodeUUID,
-                                                  record.getNodeID(),
-                                                  record.getQueueName(),
-                                                  record.getQueue(),
-                                                  executorFactory.getExecutor(),
-                                                  null,
-                                                  null,
-                                                  scheduledExecutor,
-                                                  null,
-                                                  useDuplicateDetection,
-                                                  clusterUser,
-                                                  clusterPassword,
-                                                  !backup,
-                                                  server.getStorageManager(),
-                                                  managementService.getManagementAddress(),
-                                                  managementService.getManagementNotificationAddress(),
-                                                  record,
-                                                  record.getConnector());
 
-       return bridge;
+      final ServerLocatorInternal targetLocator = new ServerLocatorImpl(this.clusterManagerTopology,
+                                                                        false,
+                                                                        server.getThreadPool(),
+                                                                        server.getScheduledPool(),
+                                                                        record.getConnector());
+
+      targetLocator.setReconnectAttempts(0);
+
+      targetLocator.setInitialConnectAttempts(0);
+      targetLocator.setClientFailureCheckPeriod(clientFailureCheckPeriod);
+      targetLocator.setConnectionTTL(connectionTTL);
+      targetLocator.setInitialConnectAttempts(0);
+
+      targetLocator.setConfirmationWindowSize(serverLocator.getConfirmationWindowSize());
+      targetLocator.setBlockOnDurableSend(!useDuplicateDetection);
+      targetLocator.setBlockOnNonDurableSend(!useDuplicateDetection);
+      targetLocator.setClusterConnection(true);
+
+      targetLocator.setRetryInterval(retryInterval);
+      targetLocator.setMaxRetryInterval(maxRetryInterval);
+      targetLocator.setRetryIntervalMultiplier(retryIntervalMultiplier);
+
+      targetLocator.setNodeID(serverLocator.getNodeID());
+
+      targetLocator.setClusterTransportConfiguration(serverLocator.getClusterTransportConfiguration());
+
+      if (retryInterval > 0)
+      {
+         targetLocator.setRetryInterval(retryInterval);
+      }
+
+      manager.addClusterLocator(targetLocator);
+
+      ClusterConnectionBridge bridge = new ClusterConnectionBridge(this,
+                                                                   targetLocator,
+                                                                   serverLocator,
+                                                                   reconnectAttempts,
+                                                                   retryInterval,
+                                                                   retryIntervalMultiplier,
+                                                                   maxRetryInterval,
+                                                                   nodeUUID,
+                                                                   record.getTargetNodeID(),
+                                                                   record.getQueueName(),
+                                                                   record.getQueue(),
+                                                                   executorFactory.getExecutor(),
+                                                                   null,
+                                                                   null,
+                                                                   scheduledExecutor,
+                                                                   null,
+                                                                   useDuplicateDetection,
+                                                                   clusterUser,
+                                                                   clusterPassword,
+                                                                   !backup,
+                                                                   server.getStorageManager(),
+                                                                   managementService.getManagementAddress(),
+                                                                   managementService.getManagementNotificationAddress(),
+                                                                   record,
+                                                                   record.getConnector());
+
+      targetLocator.setIdentity("(Cluster-connection-bridge::" + bridge.toString() + "::" + this.toString() + ")");
+
+      return bridge;
    }
 
    // Inner classes -----------------------------------------------------------------------------------
@@ -705,30 +747,51 @@
    {
       private Bridge bridge;
 
-      private final String nodeID;
+      private final String targetNodeID;
+
       private final TransportConfiguration connector;
+
       private final SimpleString queueName;
+
       private final Queue queue;
 
       private final Map<SimpleString, RemoteQueueBinding> bindings = new HashMap<SimpleString, RemoteQueueBinding>();
-      
+
       private volatile boolean isClosed = false;
 
-      private volatile boolean paused = false;
-
       private volatile boolean firstReset = false;
 
-      public MessageFlowRecordImpl(final String nodeID,
+      public MessageFlowRecordImpl(final String targetNodeID,
                                    final TransportConfiguration connector,
                                    final SimpleString queueName,
                                    final Queue queue)
       {
          this.queue = queue;
-         this.nodeID = nodeID;
+         this.targetNodeID = targetNodeID;
          this.connector = connector;
          this.queueName = queueName;
       }
 
+      /* (non-Javadoc)
+       * @see java.lang.Object#toString()
+       */
+      @Override
+      public String toString()
+      {
+         return "MessageFlowRecordImpl [nodeID=" + targetNodeID +
+                ", connector=" +
+                connector +
+                ", queueName=" +
+                queueName +
+                ", queue=" +
+                queue +
+                ", isClosed=" +
+                isClosed +
+                ", firstReset=" +
+                firstReset +
+                "]";
+      }
+
       public String getAddress()
       {
          return address.toString();
@@ -737,9 +800,9 @@
       /**
        * @return the nodeID
        */
-      public String getNodeID()
+      public String getTargetNodeID()
       {
-         return nodeID;
+         return targetNodeID;
       }
 
       /**
@@ -777,31 +840,13 @@
          {
             log.trace("Stopping bridge " + bridge);
          }
-         
+
          isClosed = true;
          clearBindings();
-         
+
          bridge.stop();
       }
 
-      public void pause() throws Exception
-      {
-           paused = true;
-           clearBindings();
-           bridge.pause();
-      }
-
-       public boolean isPaused()
-       {
-           return paused;
-       }
-
-       public void resume() throws Exception
-      {
-         paused = false;
-         bridge.resume();
-      }
-      
       public boolean isClosed()
       {
          return isClosed;
@@ -812,8 +857,7 @@
          clearBindings();
       }
 
-
-       public void setBridge(final Bridge bridge)
+      public void setBridge(final Bridge bridge)
       {
          this.bridge = bridge;
       }
@@ -827,7 +871,7 @@
       {
          if (isTrace)
          {
-            log.trace("Receiving message "  + message);
+            log.trace("Flow record on " + clusterConnector + " Receiving message " + message);
          }
          try
          {
@@ -948,6 +992,7 @@
 
       private synchronized void clearBindings() throws Exception
       {
+         log.debug(ClusterConnectionImpl.this + " clearing bindings");
          for (RemoteQueueBinding binding : new HashSet<RemoteQueueBinding>(bindings.values()))
          {
             removeBinding(binding.getClusterName());
@@ -956,6 +1001,10 @@
 
       private synchronized void doBindingAdded(final ClientMessage message) throws Exception
       {
+         if (log.isTraceEnabled())
+         {
+            log.trace(ClusterConnectionImpl.this + " Adding binding " + message);
+         }
          if (!message.containsProperty(ManagementHelper.HDR_DISTANCE))
          {
             throw new IllegalStateException("distance is null");
@@ -1016,6 +1065,11 @@
             return;
          }
 
+         if (isTrace)
+         {
+            log.trace("Adding binding " + clusterName + " into " + ClusterConnectionImpl.this);
+         }
+
          bindings.put(clusterName, binding);
 
          try
@@ -1034,6 +1088,10 @@
 
       private void doBindingRemoved(final ClientMessage message) throws Exception
       {
+         if (log.isTraceEnabled())
+         {
+            log.trace(ClusterConnectionImpl.this + " Removing binding " + message);
+         }
          if (!message.containsProperty(ManagementHelper.HDR_CLUSTER_NAME))
          {
             throw new IllegalStateException("clusterName is null");
@@ -1041,6 +1099,8 @@
 
          SimpleString clusterName = message.getSimpleStringProperty(ManagementHelper.HDR_CLUSTER_NAME);
 
+         System.out.println("Removing clusterName=" + clusterName + " on " + ClusterConnectionImpl.this);
+
          removeBinding(clusterName);
       }
 
@@ -1058,6 +1118,10 @@
 
       private synchronized void doConsumerCreated(final ClientMessage message) throws Exception
       {
+         if (log.isTraceEnabled())
+         {
+            log.trace(ClusterConnectionImpl.this + " Consumer created " + message);
+         }
          if (!message.containsProperty(ManagementHelper.HDR_DISTANCE))
          {
             throw new IllegalStateException("distance is null");
@@ -1080,7 +1144,9 @@
 
          if (binding == null)
          {
-            throw new IllegalStateException("Cannot find binding for " + clusterName);
+            throw new IllegalStateException("Cannot find binding for " + clusterName +
+                                            " on " +
+                                            ClusterConnectionImpl.this);
          }
 
          binding.addConsumer(filterString);
@@ -1112,6 +1178,10 @@
 
       private synchronized void doConsumerClosed(final ClientMessage message) throws Exception
       {
+         if (log.isTraceEnabled())
+         {
+            log.trace(ClusterConnectionImpl.this + " Consumer closed " + message);
+         }
          if (!message.containsProperty(ManagementHelper.HDR_DISTANCE))
          {
             throw new IllegalStateException("distance is null");
@@ -1165,67 +1235,44 @@
 
    }
 
-   public void handleReplicatedAddBinding(final SimpleString address,
-                                          final SimpleString uniqueName,
-                                          final SimpleString routingName,
-                                          final long queueID,
-                                          final SimpleString filterString,
-                                          final SimpleString queueName,
-                                          final int distance) throws Exception
-   {
-      Binding queueBinding = postOffice.getBinding(queueName);
-
-      if (queueBinding == null)
-      {
-         throw new IllegalStateException("Cannot find s & f queue " + queueName);
-      }
-
-      Queue queue = (Queue)queueBinding.getBindable();
-
-      RemoteQueueBinding binding = new RemoteQueueBindingImpl(server.getStorageManager().generateUniqueID(),
-                                                              address,
-                                                              uniqueName,
-                                                              routingName,
-                                                              queueID,
-                                                              filterString,
-                                                              queue,
-                                                              queueName,
-                                                              distance);
-
-      if (postOffice.getBinding(uniqueName) != null)
-      {
-         ClusterConnectionImpl.log.warn("Remoting queue binding " + uniqueName +
-                                        " has already been bound in the post office. Most likely cause for this is you have a loop " +
-                                        "in your cluster due to cluster max-hops being too large or you have multiple cluster connections to the same nodes using overlapping addresses");
-
-         return;
-      }
-
-      postOffice.addBinding(binding);
-
-      Bindings theBindings = postOffice.getBindingsForAddress(address);
-
-      theBindings.setRouteWhenNoConsumers(routeWhenNoConsumers);
-   }
-
    // for testing only
    public Map<String, MessageFlowRecord> getRecords()
    {
       return records;
    }
-   
-   public String description()
+
+   /* (non-Javadoc)
+    * @see java.lang.Object#toString()
+    */
+   @Override
+   public String toString()
    {
-      String out = name + " connected to\n";
+      return "ClusterConnectionImpl [nodeUUID=" + nodeUUID +
+             ", connector=" +
+             connector +
+             ", address=" +
+             address +
+             ", server=" +
+             server +
+             "]";
+   }
+
+   public String describe()
+   {
+      StringWriter str = new StringWriter();
+      PrintWriter out = new PrintWriter(str);
+
+      out.println(this);
+      out.println("***************************************");
+      out.println(name + " connected to");
       for (Entry<String, MessageFlowRecord> messageFlow : records.entrySet())
       {
-         String nodeID = messageFlow.getKey();
-         Bridge bridge = messageFlow.getValue().getBridge();
-         
-         out += "\t" + nodeID + " -- " + bridge.isStarted() + "\n";
+         out.println("\t Bridge = " + messageFlow.getValue().getBridge());
+         out.println("\t Flow Record = " + messageFlow.getValue());
       }
-      
-      return out;
+      out.println("***************************************");
+
+      return str.toString();
    }
 
    interface ClusterConnector
@@ -1244,15 +1291,33 @@
 
       public ServerLocatorInternal createServerLocator()
       {
-         if(tcConfigs != null && tcConfigs.length > 0)
+         if (tcConfigs != null && tcConfigs.length > 0)
          {
-            return (ServerLocatorInternal) HornetQClient.createServerLocatorWithHA(tcConfigs);
+            if (log.isDebugEnabled())
+            {
+               log.debug(ClusterConnectionImpl.this + "Creating a serverLocator for " + Arrays.toString(tcConfigs));
+            }
+            return new ServerLocatorImpl(clusterManagerTopology,
+                                         true,
+                                         server.getThreadPool(),
+                                         server.getScheduledPool(),
+                                         tcConfigs);
          }
          else
          {
             return null;
          }
       }
+
+      /* (non-Javadoc)
+       * @see java.lang.Object#toString()
+       */
+      @Override
+      public String toString()
+      {
+         return "StaticClusterConnector [tcConfigs=" + Arrays.toString(tcConfigs) + "]";
+      }
+
    }
 
    private class DiscoveryClusterConnector implements ClusterConnector
@@ -1266,7 +1331,11 @@
 
       public ServerLocatorInternal createServerLocator()
       {
-         return (ServerLocatorInternal) HornetQClient.createServerLocatorWithHA(dg);
+         return new ServerLocatorImpl(clusterManagerTopology,
+                                      true,
+                                      server.getThreadPool(),
+                                      server.getScheduledPool(),
+                                      dg);
       }
    }
 }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterManagerImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterManagerImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/cluster/impl/ClusterManagerImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -15,9 +15,11 @@
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
+import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.lang.reflect.Array;
 import java.net.InetAddress;
-import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -34,6 +36,7 @@
 import org.hornetq.api.core.client.ClientSessionFactory;
 import org.hornetq.api.core.client.ClusterTopologyListener;
 import org.hornetq.api.core.client.HornetQClient;
+import org.hornetq.api.core.client.ServerLocator;
 import org.hornetq.core.client.impl.ServerLocatorInternal;
 import org.hornetq.core.client.impl.Topology;
 import org.hornetq.core.client.impl.TopologyMember;
@@ -90,28 +93,20 @@
 
    private volatile boolean started;
 
-   private boolean backup;
+   private volatile boolean backup;
 
    private final boolean clustered;
 
    // the cluster connections which links this node to other cluster nodes
    private final Map<String, ClusterConnection> clusterConnections = new HashMap<String, ClusterConnection>();
 
-   // regular client listeners to be notified of cluster topology changes.
-   // they correspond to regular clients using a HA ServerLocator
-   private Set<ClusterTopologyListener> clientListeners = new ConcurrentHashSet<ClusterTopologyListener>();
+   private final Topology topology = new Topology(this);
 
-   // cluster connections listeners to be notified of cluster topology changes
-   // they correspond to cluster connections on *other nodes connected to this one*
-   private Set<ClusterTopologyListener> clusterConnectionListeners = new ConcurrentHashSet<ClusterTopologyListener>();
-
-   private Topology topology = new Topology();
-
    private volatile ServerLocatorInternal backupServerLocator;
 
-   private final List<ServerLocatorInternal> clusterLocators = new ArrayList<ServerLocatorInternal>();
+   private final Set<ServerLocator> clusterLocators = new ConcurrentHashSet<ServerLocator>();
 
-   private Executor executor;
+   private final Executor executor;
 
    public ClusterManagerImpl(final ExecutorFactory executorFactory,
                              final HornetQServer server,
@@ -148,7 +143,31 @@
 
       this.clustered = clustered;
    }
+   
+   public String describe()
+   {
+      StringWriter str = new StringWriter();
+      PrintWriter out = new PrintWriter(str);
+      
+      out.println("Information on " + this);
+      out.println("*******************************************************");
+      out.println("Topology: " + topology.describe("Toopology on " + this));
+      
+      for (ClusterConnection conn : this.clusterConnections.values())
+      {
+         out.println(conn.describe());
+      }
+      
+      out.println("*******************************************************");
 
+      return str.toString();
+   }
+
+   public String toString()
+   {
+      return "ClusterManagerImpl[server=" + server + "]@" + System.identityHashCode(this);
+   }
+   
    public synchronized void start() throws Exception
    {
       if (started)
@@ -208,11 +227,6 @@
             managementService.unregisterCluster(clusterConnection.getName().toString());
          }
 
-         clusterConnectionListeners.clear();
-         clientListeners.clear();
-         clusterConnections.clear();
-         topology.clear();
-
       }
 
       for (Bridge bridge : bridges.values())
@@ -229,12 +243,20 @@
          backupServerLocator = null;
       }
 
-      for (ServerLocatorInternal clusterLocator : clusterLocators)
+      executor.execute(new Runnable()
       {
-         clusterLocator.close();
-      }
-      clusterLocators.clear();
+         public void run()
+         {
+            for (ServerLocator clusterLocator : clusterLocators)
+            {
+               clusterLocator.close();
+            }
+            clusterLocators.clear();
+         }
+      });
       started = false;
+
+      clusterConnections.clear();
    }
 
    public void notifyNodeDown(String nodeID)
@@ -243,22 +265,11 @@
       {
          return;
       }
+      
+      log.debug(this + "::removing nodeID=" + nodeID, new Exception ("trace"));
 
-      boolean removed = topology.removeMember(nodeID);
+      topology.removeMember(nodeID);
 
-      if (removed)
-      {
-
-         for (ClusterTopologyListener listener : clientListeners)
-         {
-            listener.nodeDown(nodeID);
-         }
-
-         for (ClusterTopologyListener listener : clusterConnectionListeners)
-         {
-            listener.nodeDown(nodeID);
-         }
-      }
    }
 
    public void notifyNodeUp(final String nodeID,
@@ -266,31 +277,45 @@
                             final boolean last,
                             final boolean nodeAnnounce)
    {
+      if (log.isDebugEnabled())
+      {
+         log.debug(this + "::NodeUp " + nodeID + connectorPair + ", nodeAnnounce=" + nodeAnnounce);
+      }
+
       TopologyMember member = new TopologyMember(connectorPair);
-      boolean updated = topology.addMember(nodeID, member);
-
+      boolean updated = topology.addMember(nodeID, member, last);
+      
       if (!updated)
       {
+         if (log.isDebugEnabled())
+         {
+            log.debug(this + " ignored notifyNodeUp on nodeID=" + nodeID + " pair=" + connectorPair + " as the topology already knew about it");
+         }
          return;
       }
 
-      for (ClusterTopologyListener listener : clientListeners)
+      if (log.isDebugEnabled())
       {
-         listener.nodeUP(nodeID, member.getConnector(), last);
+         log.debug(this + " received notifyNodeUp nodeID=" + nodeID + " connectorPair=" + connectorPair + 
+                   ", nodeAnnounce=" + nodeAnnounce + ", last=" + last);
       }
-
-      for (ClusterTopologyListener listener : clusterConnectionListeners)
-      {
-         listener.nodeUP(nodeID, member.getConnector(), last);
-      }
-
+      
       // if this is a node being announced we are hearing it direct from the nodes CM so need to inform our cluster
       // connections.
       if (nodeAnnounce)
       {
+         if (log.isDebugEnabled())
+         {
+            log.debug("Informing " + nodeID + " to " + clusterConnections.toString());
+         }
          for (ClusterConnection clusterConnection : clusterConnections.values())
          {
-            clusterConnection.nodeAnnounced(nodeID, connectorPair);
+            if (log.isTraceEnabled())
+            {
+               log.trace(this + " information clusterConnection=" + clusterConnection + 
+                         " nodeID=" + nodeID + " connectorPair=" + connectorPair + " last=" + last);
+            }
+            clusterConnection.nodeUP(nodeID, connectorPair, last);
          }
       }
    }
@@ -322,33 +347,22 @@
 
    public void addClusterTopologyListener(final ClusterTopologyListener listener, final boolean clusterConnection)
    {
-      synchronized (this)
-      {
-         if (clusterConnection)
+      topology.addClusterTopologyListener(listener);
+
+      // We now need to send the current topology to the client
+      executor.execute(new Runnable(){
+         public void run()
          {
-            this.clusterConnectionListeners.add(listener);
+            topology.sendTopology(listener);
+            
          }
-         else
-         {
-            this.clientListeners.add(listener);
-         }
-      }
-
-      // We now need to send the current topology to the client
-      topology.sendTopology(listener);
+      });
    }
 
-   public synchronized void removeClusterTopologyListener(final ClusterTopologyListener listener,
+   public void removeClusterTopologyListener(final ClusterTopologyListener listener,
                                                           final boolean clusterConnection)
    {
-      if (clusterConnection)
-      {
-         this.clusterConnectionListeners.remove(listener);
-      }
-      else
-      {
-         this.clientListeners.remove(listener);
-      }
+      topology.removeClusterTopologyListener(listener);
    }
 
    public Topology getTopology()
@@ -366,14 +380,10 @@
          String nodeID = server.getNodeID().toString();
 
          TopologyMember member = topology.getMember(nodeID);
-         // we swap the topology backup now = live
-         if (member != null)
-         {
-            member.getConnector().a = member.getConnector().b;
+         //swap backup as live and send it to everybody
+         member = new TopologyMember(new Pair<TransportConfiguration, TransportConfiguration>(member.getConnector().b, null));
+         topology.addMember(nodeID, member, false);
 
-            member.getConnector().b = null;
-         }
-
          if (backupServerLocator != null)
          {
             // todo we could use the topology of this to preempt it arriving from the cc
@@ -424,16 +434,8 @@
                log.warn("unable to start bridge " + bridge.getName(), e);
             }
          }
-
-         for (ClusterTopologyListener listener : clientListeners)
-         {
-            listener.nodeUP(nodeID, member.getConnector(), false);
-         }
-
-         for (ClusterTopologyListener listener : clusterConnectionListeners)
-         {
-            listener.nodeUP(nodeID, member.getConnector(), false);
-         }
+         
+         topology.sendMemberToListeners(nodeID, member);
       }
    }
 
@@ -458,6 +460,11 @@
          log.warn("no cluster connections defined, unable to announce backup");
       }
    }
+   
+   void addClusterLocator(final ServerLocatorInternal serverLocator)
+   {
+      this.clusterLocators.add(serverLocator);
+   }
 
    private synchronized void announceNode()
    {
@@ -483,7 +490,7 @@
                                                                                                  null));
          }
 
-         topology.addMember(nodeID, member);
+         topology.addMember(nodeID, member, false);
       }
       else
       {
@@ -496,19 +503,6 @@
             // pair.a = cc.getConnector();
          }
       }
-
-      // Propagate the announcement
-
-      for (ClusterTopologyListener listener : clientListeners)
-      {
-         listener.nodeUP(nodeID, member.getConnector(), false);
-      }
-
-      for (ClusterTopologyListener listener : clusterConnectionListeners)
-      {
-         listener.nodeUP(nodeID, member.getConnector(), false);
-      }
-
    }
 
    private synchronized void deployBroadcastGroup(final BroadcastGroupConfiguration config) throws Exception
@@ -687,12 +681,14 @@
       }
 
       serverLocator.setConfirmationWindowSize(config.getConfirmationWindowSize());
-      serverLocator.setReconnectAttempts(config.getReconnectAttempts());
+      
+      // We are going to manually retry on the bridge in case of failure
+      serverLocator.setReconnectAttempts(0);
+      serverLocator.setInitialConnectAttempts(-1);
       serverLocator.setRetryInterval(config.getRetryInterval());
+      serverLocator.setMaxRetryInterval(config.getMaxRetryInterval());
       serverLocator.setRetryIntervalMultiplier(config.getRetryIntervalMultiplier());
-      serverLocator.setMaxRetryInterval(config.getMaxRetryInterval());
       serverLocator.setClientFailureCheckPeriod(config.getClientFailureCheckPeriod());
-      serverLocator.setInitialConnectAttempts(config.getReconnectAttempts());
       serverLocator.setBlockOnDurableSend(!config.isUseDuplicateDetection());
       serverLocator.setBlockOnNonDurableSend(!config.isUseDuplicateDetection());
       if (!config.isUseDuplicateDetection())
@@ -700,8 +696,14 @@
          log.debug("Bridge " + config.getName() + 
                    " is configured to not use duplicate detecion, it will send messages synchronously");
       }
+      
       clusterLocators.add(serverLocator);
+      
       Bridge bridge = new BridgeImpl(serverLocator,
+                                     config.getReconnectAttempts(),
+                                     config.getRetryInterval(),
+                                     config.getRetryIntervalMultiplier(),
+                                     config.getMaxRetryInterval(),
                                      nodeUUID,
                                      new SimpleString(config.getName()),
                                      queue,
@@ -786,9 +788,17 @@
          {
             ClusterManagerImpl.log.warn("No discovery group with name '" + config.getDiscoveryGroupName() +
                                         "'. The cluster connection will not be deployed.");
+            return;
          }
+         
+         if (log.isDebugEnabled())
+         {
+            log.debug(this + " Starting a Discovery Group Cluster Connection, name=" + config.getDiscoveryGroupName() + ", dg=" + dg);
+         }
 
-         clusterConnection = new ClusterConnectionImpl(dg,
+         clusterConnection = new ClusterConnectionImpl(this,
+                                                       topology,
+                                                       dg,
                                                        connector,
                                                        new SimpleString(config.getName()),
                                                        new SimpleString(config.getAddress()),
@@ -817,8 +827,15 @@
       {
          TransportConfiguration[] tcConfigs = config.getStaticConnectors() != null ? connectorNameListToArray(config.getStaticConnectors())
                                                                                   : null;
+         
+         if (log.isDebugEnabled())
+         {
+            log.debug(this + " defining cluster connection towards " + Arrays.toString(tcConfigs));
+         }
 
-         clusterConnection = new ClusterConnectionImpl(tcConfigs,
+         clusterConnection = new ClusterConnectionImpl(this,
+                                                       topology,
+                                                       tcConfigs,
                                                        connector,
                                                        new SimpleString(config.getName()),
                                                        new SimpleString(config.getAddress()),
@@ -848,6 +865,10 @@
 
       clusterConnections.put(config.getName(), clusterConnection);
 
+      if (log.isDebugEnabled())
+      {
+         log.debug("ClusterConnection.start at " + clusterConnection, new Exception ("trace"));
+      }
       clusterConnection.start();
 
       if (backup)
@@ -864,6 +885,7 @@
 
          backupServerLocator = (ServerLocatorInternal)HornetQClient.createServerLocatorWithoutHA(tcConfigs);
          backupServerLocator.setReconnectAttempts(-1);
+         backupServerLocator.setInitialConnectAttempts(-1);
       }
       else if (config.getDiscoveryGroupName() != null)
       {
@@ -878,6 +900,7 @@
 
          backupServerLocator = (ServerLocatorInternal)HornetQClient.createServerLocatorWithoutHA(dg);
          backupServerLocator.setReconnectAttempts(-1);
+         backupServerLocator.setInitialConnectAttempts(-1);
       }
       else
       {
@@ -890,6 +913,10 @@
          {
             try
             {
+               if (log.isDebugEnabled())
+               {
+                  log.debug(ClusterManagerImpl.this + ":: announcing " + connector + " to " + backupServerLocator);
+               }
                ClientSessionFactory backupSessionFactory = backupServerLocator.connect();
                if (backupSessionFactory != null)
                {
@@ -901,7 +928,7 @@
             }
             catch (Exception e)
             {
-               log.warn("Unable to announce backup", e);
+               log.warn("Unable to announce backup, retrying", e);
             }
          }
       });
@@ -931,6 +958,17 @@
    // for testing
    public void clear()
    {
+      for (Bridge bridge : bridges.values())
+      {
+         try
+         {
+            bridge.stop();
+         }
+         catch (Exception e)
+         {
+            log.warn(e.getMessage(), e);
+         }
+      }
       bridges.clear();
       for (ClusterConnection clusterConnection : clusterConnections.values())
       {

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -14,6 +14,8 @@
 package org.hornetq.core.server.impl;
 
 import java.io.File;
+import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.lang.management.ManagementFactory;
 import java.nio.channels.ClosedChannelException;
 import java.security.AccessController;
@@ -317,7 +319,14 @@
 
             initialisePart2();
 
-            log.info("Server is now live");
+            if (identity != null)
+            {
+               log.info("Server " + identity + " is now live");
+            }
+            else
+            {
+               log.info("Server is now live");
+            }
          }
          catch (Exception e)
          {
@@ -436,13 +445,18 @@
                               {
                                  try
                                  {
+                                    log.debug(HornetQServerImpl.this + "::Stopping live node in favor of failback");
                                     stop(true);
+                                    // We need to wait some time before we start the backup again
+                                    // otherwise we may eventually start before the live had a chance to get it
+                                    Thread.sleep(configuration.getFailbackDelay());
                                     configuration.setBackup(true);
+                                    log.debug(HornetQServerImpl.this + "::Starting backup node now after failback");
                                     start();
                                  }
                                  catch (Exception e)
                                  {
-                                    log.info("unable to restart server, please kill and restart manually", e);
+                                    log.warn("unable to restart server, please kill and restart manually", e);
                                  }
                               }
                            });
@@ -451,6 +465,7 @@
                      }
                      catch (Exception e)
                      {
+                        log.debug(e.getMessage(), e);
                         //hopefully it will work next call
                      }
                   }
@@ -492,9 +507,9 @@
                nodeManager.interrupt();
 
                backupActivationThread.interrupt();
+               
+               backupActivationThread.join(1000);
 
-               // TODO: do we really need this?
-               Thread.sleep(1000);
             }
 
             if (System.currentTimeMillis() - start >= timeout)
@@ -593,7 +608,7 @@
          }
          started = true;
 
-         HornetQServerImpl.log.info("HornetQ Server version " + getVersion().getFullVersion() + " [" + nodeManager.getNodeId() + "] started");
+         HornetQServerImpl.log.info("HornetQ Server version " + getVersion().getFullVersion() + " [" + nodeManager.getNodeId() + "]" + (this.identity != null ? " (" + identity : ")") + " started");
       }
 
 
@@ -741,7 +756,7 @@
 
          for (Runnable task : tasks)
          {
-            HornetQServerImpl.log.debug("Waiting for " + task);
+            HornetQServerImpl.log.debug(this + "::Waiting for " + task);
          }
 
          if (memoryManager != null)
@@ -824,6 +839,16 @@
    // HornetQServer implementation
    // -----------------------------------------------------------
 
+   public String describe()
+   {
+      StringWriter str = new StringWriter();
+      PrintWriter out = new PrintWriter(str);
+      
+      out.println("Information about server " + this.identity);
+      out.println("Cluster Connection:" + this.getClusterManager().describe());
+      
+      return str.toString();
+   }
    
    public void setIdentity(String identity)
    {
@@ -840,6 +865,11 @@
       return scheduledPool;
    }
    
+   public ExecutorService getThreadPool()
+   {
+      return threadPool;
+   }
+   
    public Configuration getConfiguration()
    {
       return configuration;
@@ -905,7 +935,7 @@
       return version;
    }
 
-   public synchronized boolean isStarted()
+   public boolean isStarted()
    {
       return started;
    }
@@ -1135,7 +1165,7 @@
       activateCallbacks.remove(callback);
    }
 
-   public synchronized ExecutorFactory getExecutorFactory()
+   public ExecutorFactory getExecutorFactory()
    {
       return executorFactory;
    }
@@ -1325,7 +1355,7 @@
    {
       // Create the pools - we have two pools - one for non scheduled - and another for scheduled
 
-      ThreadFactory tFactory = new HornetQThreadFactory("HornetQ-server-threads" + System.identityHashCode(this),
+      ThreadFactory tFactory = new HornetQThreadFactory("HornetQ-server-" + this.toString(),
                                                         false,
                                                         getThisClassLoader());
 
@@ -1603,28 +1633,31 @@
       {
          queueBindingInfosMap.put(queueBindingInfo.getId(), queueBindingInfo);
          
-         Filter filter = FilterImpl.createFilter(queueBindingInfo.getFilterString());
-
-         PageSubscription subscription = pagingManager.getPageStore(queueBindingInfo.getAddress()).getCursorProvier().createSubscription(queueBindingInfo.getId(), filter, true);
+         if (queueBindingInfo.getFilterString() == null || !queueBindingInfo.getFilterString().toString().equals(GENERIC_IGNORED_FILTER))
+         {
+            Filter filter = FilterImpl.createFilter(queueBindingInfo.getFilterString());
+   
+            PageSubscription subscription = pagingManager.getPageStore(queueBindingInfo.getAddress()).getCursorProvier().createSubscription(queueBindingInfo.getId(), filter, true);
+            
+            Queue queue = queueFactory.createQueue(queueBindingInfo.getId(),
+                                                   queueBindingInfo.getAddress(),
+                                                   queueBindingInfo.getQueueName(),
+                                                   filter,
+                                                   subscription,
+                                                   true,
+                                                   false);
+   
+            Binding binding = new LocalQueueBinding(queueBindingInfo.getAddress(), queue, nodeManager.getNodeId());
+   
+            queues.put(queueBindingInfo.getId(), queue);
+   
+            postOffice.addBinding(binding);
+   
+            managementService.registerAddress(queueBindingInfo.getAddress());
+            managementService.registerQueue(queue, queueBindingInfo.getAddress(), storageManager);
+         }
          
-         Queue queue = queueFactory.createQueue(queueBindingInfo.getId(),
-                                                queueBindingInfo.getAddress(),
-                                                queueBindingInfo.getQueueName(),
-                                                filter,
-                                                subscription,
-                                                true,
-                                                false);
-
-         Binding binding = new LocalQueueBinding(queueBindingInfo.getAddress(), queue, nodeManager.getNodeId());
-
-         queues.put(queueBindingInfo.getId(), queue);
-
-         postOffice.addBinding(binding);
-
-         managementService.registerAddress(queueBindingInfo.getAddress());
-         managementService.registerQueue(queue, queueBindingInfo.getAddress(), storageManager);
          
-         
       }
 
       for (GroupingInfo groupingInfo : groupingInfos)

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/QueueImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/QueueImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/QueueImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -22,11 +22,14 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.Executor;
+import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.hornetq.api.core.Message;
 import org.hornetq.api.core.SimpleString;
@@ -400,7 +403,7 @@
 
       directDeliver = false;
 
-      executor.execute(concurrentPoller);
+      getExecutor().execute(concurrentPoller);
    }
 
    public void forceDelivery()
@@ -424,7 +427,13 @@
    
    public void deliverAsync()
    {
-      getExecutor().execute(deliverRunner);
+      try
+      {
+         getExecutor().execute(deliverRunner);
+      }
+      catch (RejectedExecutionException ignored)
+      {
+      }
    }
 
    public void close() throws Exception
@@ -726,9 +735,31 @@
 
    public long getMessageCount()
    {
-      blockOnExecutorFuture();
+      final CountDownLatch latch = new CountDownLatch(1);
+      final AtomicLong count = new AtomicLong(0);
       
-      return getInstantMessageCount();
+      getExecutor().execute(new Runnable()
+      {
+         public void run()
+         {
+            count.set(getInstantMessageCount());
+            latch.countDown();
+         }
+      });
+      
+      try
+      {
+         if (!latch.await(10, TimeUnit.SECONDS))
+         {
+            throw new IllegalStateException("Timed out on waiting for MessageCount");
+         }
+      }
+      catch (Exception e)
+      {
+         log.warn(e.getMessage(), e);
+      }
+      
+      return count.get();
    }
    
    public long getInstantMessageCount()
@@ -897,9 +928,31 @@
 
    public long getMessagesAdded()
    {
-      blockOnExecutorFuture();
+      final CountDownLatch latch = new CountDownLatch(1);
+      final AtomicLong count = new AtomicLong(0);
       
-      return getInstantMessagesAdded();
+      getExecutor().execute(new Runnable()
+      {
+         public void run()
+         {
+            count.set(getInstantMessagesAdded());
+            latch.countDown();
+         }
+      });
+      
+      try
+      {
+         if (!latch.await(10, TimeUnit.SECONDS))
+         {
+            throw new IllegalStateException("Timed out on waiting for MessagesAdded");
+         }
+      }
+      catch (Exception e)
+      {
+         log.warn(e.getMessage(), e);
+      }
+      
+      return count.get();
   }
    
    public long getInstantMessagesAdded()
@@ -1433,7 +1486,7 @@
    @Override
    public String toString()
    {
-      return "QueueImpl[name=" + name.toString() + "]@" + Integer.toHexString(System.identityHashCode(this));
+      return "QueueImpl[name=" + name.toString() + ", postOffice=" + this.postOffice + "]@" + Integer.toHexString(System.identityHashCode(this));
    }
 
    // Private

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/ServerConsumerImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/ServerConsumerImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/ServerConsumerImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -232,7 +232,7 @@
          {
             return HandleStatus.BUSY;
          }
-
+         
          // If there is a pendingLargeMessage we can't take another message
          // This has to be checked inside the lock as the set to null is done inside the lock
          if (largeMessageInDelivery)
@@ -240,6 +240,11 @@
             return HandleStatus.BUSY;
          }
 
+         if (log.isTraceEnabled())
+         {
+            log.trace("Handling reference " + ref);
+         }
+
          final ServerMessage message = ref.getMessage();
 
          if (filter != null && !filter.match(message))
@@ -260,9 +265,9 @@
 
             // If updateDeliveries = false (set by strict-update),
             // the updateDeliveryCount would still be updated after c
-            if (strictUpdateDeliveryCount)
+            if (strictUpdateDeliveryCount && !ref.isPaged())
             {
-               if (ref.getMessage().isDurable() && ref.getQueue().isDurable() && !ref.getQueue().isInternalQueue())
+               if (ref.getMessage().isDurable() && ref.getQueue().isDurable() && !ref.getQueue().isInternalQueue() && !ref.isPaged())
                {
                   storageManager.updateDeliveryCount(ref);
                }

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/ServerSessionImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/ServerSessionImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/impl/ServerSessionImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -443,6 +443,10 @@
       {
          try
          {
+        	if (log.isDebugEnabled())
+        	{
+        	   log.debug("deleting temporary queue " + bindingName);
+        	}
             if (postOffice.getBinding(bindingName) != null)
             {
                postOffice.removeBinding(bindingName);

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/management/impl/ManagementServiceImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/management/impl/ManagementServiceImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/server/management/impl/ManagementServiceImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -641,7 +641,7 @@
       {
          log.trace("Sending Notification = "  + notification + 
                    ", notificationEnabled=" + notificationsEnabled + 
-                   " messagingServerControl=" + messagingServerControl, new Exception ("trace"));
+                   " messagingServerControl=" + messagingServerControl);
       }
       if (messagingServerControl != null && notificationsEnabled)
       {
@@ -673,6 +673,10 @@
                // https://jira.jboss.org/jira/browse/HORNETQ-317
                if (messagingServer == null || !messagingServer.isInitialised())
                {
+            	  if (log.isDebugEnabled())
+            	  {
+            	     log.debug("ignoring message " + notification + " as the server is not initialized");
+            	  }
                   return;
                }
 

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/core/transaction/impl/TransactionImpl.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/core/transaction/impl/TransactionImpl.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/core/transaction/impl/TransactionImpl.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -424,6 +424,10 @@
 
    public void markAsRollbackOnly(final HornetQException exception)
    {
+      if (log.isDebugEnabled())
+      {
+         log.debug("Marking Transaction " + this.id + " as rollback only");
+      }
       state = State.ROLLBACK_ONLY;
 
       this.exception = exception;

Modified: branches/Branch_2_2_EAP/src/main/org/hornetq/utils/HornetQThreadFactory.java
===================================================================
--- branches/Branch_2_2_EAP/src/main/org/hornetq/utils/HornetQThreadFactory.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/src/main/org/hornetq/utils/HornetQThreadFactory.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -63,7 +63,7 @@
       // when sandboxed, the code does not have the RuntimePermission modifyThreadGroup
       if (System.getSecurityManager() == null)
       {
-         t = new Thread(group, command, "Thread-" + threadCount.getAndIncrement() + " (group:" + group.getName() + ")");
+         t = new Thread(group, command, "Thread-" + threadCount.getAndIncrement() + " (" + group.getName() + ")");
       }
       else
       {

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/JMSMessageCounterTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/JMSMessageCounterTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/JMSMessageCounterTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -13,6 +13,7 @@
 package org.hornetq.tests.integration.client;
 
 import javax.jms.Connection;
+import javax.jms.DeliveryMode;
 import javax.jms.MessageProducer;
 import javax.jms.Queue;
 import javax.jms.Session;
@@ -42,6 +43,16 @@
    }
 
 
+   public void testLoop() throws Exception
+   {
+      for (int i = 0 ; i < 100; i++)
+      {
+         log.info("#test " + i);
+         testMessageCounter();
+         tearDown();
+         setUp();
+      }
+   }
 
    public void testMessageCounter() throws Exception
    {
@@ -52,6 +63,7 @@
       Queue queue = createQueue(true, "Test");
       
       MessageProducer producer = sess.createProducer(queue);
+      producer.setDeliveryMode(DeliveryMode.NON_PERSISTENT);
 
       final int numMessages = 100;
 

Added: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/JMSPagingFileDeleteTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/JMSPagingFileDeleteTest.java	                        (rev 0)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/JMSPagingFileDeleteTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2010 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.  See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.client;
+
+import javax.jms.BytesMessage;
+import javax.jms.Connection;
+import javax.jms.Message;
+import javax.jms.MessageConsumer;
+import javax.jms.MessageProducer;
+import javax.jms.Session;
+import javax.jms.Topic;
+
+import org.hornetq.api.core.SimpleString;
+import org.hornetq.core.logging.Logger;
+import org.hornetq.core.paging.PagingStore;
+import org.hornetq.core.settings.impl.AddressSettings;
+import org.hornetq.tests.util.JMSTestBase;
+
+public class JMSPagingFileDeleteTest extends JMSTestBase
+{
+   static Logger log = Logger.getLogger(JMSPagingFileDeleteTest.class);
+   
+   Topic topic1;
+
+   Connection connection;
+
+   Session session;
+
+   MessageConsumer subscriber1;
+
+   MessageConsumer subscriber2;
+
+   PagingStore pagingStore;
+
+   private static final int MESSAGE_SIZE = 1024;
+
+   private static final int PAGE_SIZE = 10 * 1024;
+
+   private static final int PAGE_MAX = 20 * 1024;
+
+   private static final int RECEIVE_TIMEOUT = 500;
+
+   private static final int MESSAGE_NUM = 50;
+
+   @Override
+   protected boolean usePersistence()
+   {
+      return true;
+   }
+
+   @Override
+   protected void setUp() throws Exception
+   {
+      clearData();
+      super.setUp();
+ 
+      topic1 = createTopic("topic1");
+
+      // Paging Setting
+      AddressSettings setting = new AddressSettings();
+      setting.setPageSizeBytes(JMSPagingFileDeleteTest.PAGE_SIZE);
+      setting.setMaxSizeBytes(JMSPagingFileDeleteTest.PAGE_MAX);
+      server.getAddressSettingsRepository().addMatch("#", setting);
+   }
+
+   @Override
+   protected void tearDown() throws Exception
+   {
+      log.info("#tearDown");
+      topic1 = null;
+      super.tearDown();
+   }
+   
+   public void testTopics() throws Exception
+   {
+      connection = null;
+
+      try
+      {
+         connection = cf.createConnection();
+         connection.setClientID("cid");
+
+         session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
+
+         MessageProducer producer = session.createProducer(topic1);
+         subscriber1 = session.createDurableSubscriber(topic1, "subscriber-1");
+         subscriber2 = session.createDurableSubscriber(topic1, "subscriber-2");
+
+         // -----------------(Step1) Publish Messages to make Paging Files. --------------------
+         System.out.println("---------- Send messages. ----------");
+         BytesMessage bytesMessage = session.createBytesMessage();
+         bytesMessage.writeBytes(new byte[JMSPagingFileDeleteTest.MESSAGE_SIZE]);
+         for (int i = 0; i < JMSPagingFileDeleteTest.MESSAGE_NUM; i++)
+         {
+            producer.send(bytesMessage);
+         }
+         System.out.println("Sent " + JMSPagingFileDeleteTest.MESSAGE_NUM + " messages.");
+
+         pagingStore = server.getPagingManager().getPageStore(new SimpleString("jms.topic.topic1"));
+         printPageStoreInfo(pagingStore);
+
+         assertTrue(pagingStore.isPaging());
+
+         connection.start();
+
+         // -----------------(Step2) Restart the server. --------------------------------------
+         stopAndStartServer(); // If try this test without restarting server, please comment out this line;
+
+         // -----------------(Step3) Subscribe to all the messages from the topic.--------------
+         System.out.println("---------- Receive all messages. ----------");
+         for (int i = 0; i < JMSPagingFileDeleteTest.MESSAGE_NUM; i++)
+         {
+            Message message1 = subscriber1.receive(JMSPagingFileDeleteTest.RECEIVE_TIMEOUT);
+            assertNotNull(message1);
+            Message message2 = subscriber2.receive(JMSPagingFileDeleteTest.RECEIVE_TIMEOUT);
+            assertNotNull(message2);
+         }
+
+         pagingStore = server.getPagingManager().getPageStore(new SimpleString("jms.topic.topic1"));
+         long timeout = System.currentTimeMillis() + 5000;
+         while (timeout > System.currentTimeMillis() && pagingStore.isPaging())
+         {
+            Thread.sleep(100);
+         }
+         assertFalse(pagingStore.isPaging());
+         
+         printPageStoreInfo(pagingStore);
+
+         assertEquals(0, pagingStore.getAddressSize());
+         // assertEquals(1, pagingStore.getNumberOfPages()); //I expected number of the page is 1, but It was not.
+         assertFalse(pagingStore.isPaging()); // I expected IsPaging is false, but It was true.
+         // If the server is not restart, this test pass.
+
+         // -----------------(Step4) Publish a message. the message is stored in the paging file.
+         producer = session.createProducer(topic1);
+         bytesMessage = session.createBytesMessage();
+         bytesMessage.writeBytes(new byte[JMSPagingFileDeleteTest.MESSAGE_SIZE]);
+         producer.send(bytesMessage);
+
+         printPageStoreInfo(pagingStore);
+
+         assertEquals(1, pagingStore.getNumberOfPages()); //I expected number of the page is 1, but It was not.
+      }
+      finally
+      {
+         if (connection != null)
+         {
+            connection.close();
+         }
+      }
+   }
+
+   private void stopAndStartServer() throws Exception
+   {
+      System.out.println("---------- Restart server. ----------");
+      connection.close();
+
+      jmsServer.stop();
+
+      jmsServer.start();
+      jmsServer.activated();
+      registerConnectionFactory();
+
+      printPageStoreInfo(pagingStore);
+      reconnect();
+   }
+
+   private void reconnect() throws Exception
+   {
+      connection = cf.createConnection();
+      connection.setClientID("cid");
+      session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
+      subscriber1 = session.createDurableSubscriber(topic1, "subscriber-1");
+      subscriber2 = session.createDurableSubscriber(topic1, "subscriber-2");
+      connection.start();
+   }
+
+   private void printPageStoreInfo(PagingStore pagingStore) throws Exception
+   {
+      System.out.println("---------- Paging Store Info ----------");
+      System.out.println(" CurrentPage = " + pagingStore.getCurrentPage());
+      System.out.println(" FirstPage = " + pagingStore.getFirstPage());
+      System.out.println(" Number of Pages = " + pagingStore.getNumberOfPages());
+      System.out.println(" Address Size = " + pagingStore.getAddressSize());
+      System.out.println(" Is Paging = " + pagingStore.isPaging());
+   }
+}
\ No newline at end of file

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/NIOvsOIOTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/NIOvsOIOTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/NIOvsOIOTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -49,45 +49,35 @@
 
    // Public --------------------------------------------------------
 
-   public static TestSuite suite()
+   public void testNIOPerf() throws Exception
    {
-      return new TestSuite();
+      testPerf(true);
    }
    
-//   public void testNIOPerf() throws Exception
-//   {
-//      log.info("************* Testing NIO");
-//      testPerf(true);
-//   }
-//   
-//   public void testOIOPerf() throws Exception
-//   {
-//      log.info("************ Testing OIO");
-//      testPerf(false);
-//   }
+   public void testOIOPerf() throws Exception
+   {
+      testPerf(false);
+   }
    
    private void doTest(String dest) throws Exception
    {
-      System.getProperties().put("hq.batchHQ", "true");
       
-      String connectorFactoryClassName = "org.hornetq.core.remoting.impl.netty.NettyConnectorFactory";
-      
-      
       final int numSenders = 1;
 
       final int numReceivers = 1;
       
-      final int numMessages = 200000;
+      final int numMessages = 20000;
       
       Receiver[] receivers = new Receiver[numReceivers];
       
       Sender[] senders = new Sender[numSenders];
       
       List<ClientSessionFactory> factories = new ArrayList<ClientSessionFactory>();
+
+      ServerLocator locator = HornetQClient.createServerLocatorWithoutHA(new TransportConfiguration(UnitTestCase.INVM_CONNECTOR_FACTORY));
       
       for (int i = 0; i < numReceivers; i++)
       {
-         ServerLocator locator = HornetQClient.createServerLocatorWithoutHA(new TransportConfiguration(UnitTestCase.INVM_CONNECTOR_FACTORY));
 
          ClientSessionFactory sf = locator.createSessionFactory();
          
@@ -102,8 +92,6 @@
       
       for (int i = 0; i < numSenders; i++)
       {
-         ServerLocator locator = HornetQClient.createServerLocatorWithoutHA(new TransportConfiguration(UnitTestCase.INVM_CONNECTOR_FACTORY));
-
          ClientSessionFactory sf = locator.createSessionFactory();
 
          factories.add(sf);
@@ -134,7 +122,7 @@
       
       double rate = 1000 * (double)(numMessages * numSenders) / (end - start);
       
-      log.info("Rate is " + rate + " msgs sec");
+      logAndSystemOut("Rate is " + rate + " msgs sec");
       
       for (int i = 0; i < numSenders; i++)
       {         
@@ -150,6 +138,8 @@
       {      
          sf.close();
       }
+      
+      locator.close();
    }
 
    private void testPerf(boolean nio) throws Exception
@@ -217,7 +207,7 @@
 
       void prepare() throws Exception
       {
-         session = sf.createSession();
+         session = sf.createSession(true, true);
 
          producer = session.createProducer(dest);
       }
@@ -277,7 +267,7 @@
 
       void prepare() throws Exception
       {
-         session = sf.createSession();
+         session = sf.createSession(true, true, 0);
 
          queueName = UUIDGenerator.getInstance().generateStringUUID();
 

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/PagingTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/PagingTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/client/PagingTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -13,6 +13,7 @@
 
 package org.hornetq.tests.integration.client;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
@@ -23,6 +24,7 @@
 import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import javax.transaction.xa.XAResource;
@@ -497,6 +499,248 @@
 
    }
 
+   /**
+    * This test will remove all the page directories during a restart, simulating a crash scenario. The server should still start after this
+    */
+   public void testDeletePhisicalPages() throws Exception
+   {
+      clearData();
+
+      Configuration config = createDefaultConfig();
+      config.setPersistDeliveryCountBeforeDelivery(true);
+
+      config.setJournalSyncNonTransactional(false);
+
+      HornetQServer server = createServer(true,
+                                          config,
+                                          PagingTest.PAGE_SIZE,
+                                          PagingTest.PAGE_MAX,
+                                          new HashMap<String, AddressSettings>());
+
+      server.start();
+
+      final int messageSize = 1024;
+
+      final int numberOfMessages = 1000;
+
+      try
+      {
+         ServerLocator locator = createInVMNonHALocator();
+
+         locator.setBlockOnNonDurableSend(true);
+         locator.setBlockOnDurableSend(true);
+         locator.setBlockOnAcknowledge(true);
+
+         ClientSessionFactory sf = locator.createSessionFactory();
+
+         ClientSession session = sf.createSession(false, false, false);
+
+         session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
+
+         ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
+
+         ClientMessage message = null;
+
+         byte[] body = new byte[messageSize];
+
+         ByteBuffer bb = ByteBuffer.wrap(body);
+
+         for (int j = 1; j <= messageSize; j++)
+         {
+            bb.put(getSamplebyte(j));
+         }
+
+         for (int i = 0; i < numberOfMessages; i++)
+         {
+            message = session.createMessage(true);
+
+            HornetQBuffer bodyLocal = message.getBodyBuffer();
+
+            bodyLocal.writeBytes(body);
+
+            message.putIntProperty(new SimpleString("id"), i);
+
+            producer.send(message);
+            if (i % 1000 == 0)
+            {
+               session.commit();
+            }
+         }
+         session.commit();
+         session.close();
+
+         session = null;
+
+         sf.close();
+         locator.close();
+
+         server.stop();
+
+         server = createServer(true,
+                               config,
+                               PagingTest.PAGE_SIZE,
+                               PagingTest.PAGE_MAX,
+                               new HashMap<String, AddressSettings>());
+         server.start();
+
+         locator = createInVMNonHALocator();
+         sf = locator.createSessionFactory();
+
+         Queue queue = server.locateQueue(ADDRESS);
+
+         assertEquals(numberOfMessages, queue.getMessageCount());
+
+         LinkedList<Xid> xids = new LinkedList<Xid>();
+
+         int msgReceived = 0;
+         ClientSession sessionConsumer = sf.createSession(false, false, false);
+         sessionConsumer.start();
+         ClientConsumer consumer = sessionConsumer.createConsumer(PagingTest.ADDRESS);
+         for (int msgCount = 0; msgCount < numberOfMessages; msgCount++)
+         {
+            log.info("Received " + msgCount);
+            msgReceived++;
+            ClientMessage msg = consumer.receiveImmediate();
+            if (msg == null)
+            {
+               log.info("It's null. leaving now");
+               sessionConsumer.commit();
+               fail("Didn't receive a message");
+            }
+            msg.acknowledge();
+
+            if (msgCount % 5 == 0)
+            {
+               log.info("commit");
+               sessionConsumer.commit();
+            }
+         }
+
+         sessionConsumer.commit();
+
+         sessionConsumer.close();
+
+         sf.close();
+
+         locator.close();
+
+         assertEquals(0, queue.getMessageCount());
+
+         long timeout = System.currentTimeMillis() + 5000;
+         while (timeout > System.currentTimeMillis() && queue.getPageSubscription().getPagingStore().isPaging())
+         {
+            Thread.sleep(100);
+         }
+         assertFalse(queue.getPageSubscription().getPagingStore().isPaging());
+
+         server.stop();
+
+         // Deleting the paging data. Simulating a failure
+         // a dumb user, or anything that will remove the data
+         deleteDirectory(new File(getPageDir()));
+
+         server = createServer(true,
+                               config,
+                               PagingTest.PAGE_SIZE,
+                               PagingTest.PAGE_MAX,
+                               new HashMap<String, AddressSettings>());
+         server.start();
+         
+         
+         locator = createInVMNonHALocator();
+         locator.setBlockOnNonDurableSend(true);
+         locator.setBlockOnDurableSend(true);
+         locator.setBlockOnAcknowledge(true);
+
+         sf = locator.createSessionFactory();
+
+         queue = server.locateQueue(ADDRESS);
+
+         sf = locator.createSessionFactory();
+         session = sf.createSession(false, false, false);
+
+         producer = session.createProducer(PagingTest.ADDRESS);
+         
+         for (int i = 0; i < numberOfMessages; i++)
+         {
+            message = session.createMessage(true);
+
+            HornetQBuffer bodyLocal = message.getBodyBuffer();
+
+            bodyLocal.writeBytes(body);
+
+            message.putIntProperty(new SimpleString("id"), i);
+
+            producer.send(message);
+            if (i % 1000 == 0)
+            {
+               session.commit();
+            }
+         }
+         
+         session.commit();
+         
+         server.stop();
+
+         server = createServer(true,
+                               config,
+                               PagingTest.PAGE_SIZE,
+                               PagingTest.PAGE_MAX,
+                               new HashMap<String, AddressSettings>());
+         server.start();
+
+         locator = createInVMNonHALocator();
+         sf = locator.createSessionFactory();
+
+         queue = server.locateQueue(ADDRESS);
+
+        // assertEquals(numberOfMessages, queue.getMessageCount());
+
+         xids = new LinkedList<Xid>();
+
+         msgReceived = 0;
+         sessionConsumer = sf.createSession(false, false, false);
+         sessionConsumer.start();
+         consumer = sessionConsumer.createConsumer(PagingTest.ADDRESS);
+         for (int msgCount = 0; msgCount < numberOfMessages; msgCount++)
+         {
+            log.info("Received " + msgCount);
+            msgReceived++;
+            ClientMessage msg = consumer.receiveImmediate();
+            if (msg == null)
+            {
+               log.info("It's null. leaving now");
+               sessionConsumer.commit();
+               fail("Didn't receive a message");
+            }
+            msg.acknowledge();
+
+            if (msgCount % 5 == 0)
+            {
+               log.info("commit");
+               sessionConsumer.commit();
+            }
+         }
+
+         sessionConsumer.commit();
+
+         sessionConsumer.close();
+
+
+      }
+      finally
+      {
+         try
+         {
+            server.stop();
+         }
+         catch (Throwable ignored)
+         {
+         }
+      }
+
+   }
+
    public void testMissingTXEverythingAcked() throws Exception
    {
       clearData();
@@ -1031,6 +1275,40 @@
       {
          bb.put(getSamplebyte(j));
       }
+      
+      final AtomicBoolean running = new AtomicBoolean(true);
+      
+      class TCount extends Thread
+      {
+         Queue queue;
+         
+         TCount(Queue queue)
+         {
+            this.queue = queue;
+         }
+         public void run()
+         {
+            try
+            {
+               while (running.get())
+               {
+                 // log.info("Message count = " + queue.getMessageCount() + " on queue " + queue.getName());
+                  queue.getMessagesAdded();
+                  queue.getMessageCount();
+                  //log.info("Message added = " + queue.getMessagesAdded() + " on queue " + queue.getName());
+                  Thread.sleep(10);
+               }
+            }
+            catch (InterruptedException e)
+            {
+               log.info("Thread interrupted");
+            }
+         }
+      };
+      
+      TCount tcount1 = null;
+      TCount tcount2 = null;
+      
 
       try
       {
@@ -1057,7 +1335,8 @@
 
                session.createQueue(PagingTest.ADDRESS.toString(), PagingTest.ADDRESS + "-2", null, true);
             }
-
+            
+            
             ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
 
             ClientMessage message = null;
@@ -1066,6 +1345,7 @@
             {
                if (i % 500 == 0)
                {
+                  log.info("Sent " + i + " messages");
                   session.commit();
                }
                message = session.createMessage(true);
@@ -1095,7 +1375,24 @@
                                PagingTest.PAGE_MAX,
                                new HashMap<String, AddressSettings>());
          server.start();
+         
+         Queue queue1 = server.locateQueue(PagingTest.ADDRESS.concat("-1"));
+         
+         Queue queue2 = server.locateQueue(PagingTest.ADDRESS.concat("-2"));
+         
+         assertNotNull(queue1);
+         
+         assertNotNull(queue2);
+         
+         assertNotSame(queue1, queue2);
 
+         tcount1 = new TCount(queue1);
+         
+         tcount2 = new TCount(queue2);
+         
+         tcount1.start();
+         tcount2.start();
+
          ServerLocator locator = createInVMNonHALocator();
          final ClientSessionFactory sf2 = locator.createSessionFactory();
 
@@ -1132,8 +1429,14 @@
 
                         Assert.assertNotNull(message2);
 
-                        if (i % 1000 == 0)
+                        if (i % 100 == 0)
+                        {
+                           if (i % 5000 == 0)
+                           {
+                              log.info(addressToSubscribe + " consumed " + i + " messages");
+                           }
                            session.commit();
+                        }
 
                         try
                         {
@@ -1194,6 +1497,20 @@
       }
       finally
       {
+         running.set(false);
+         
+         if (tcount1 != null)
+         {
+            tcount1.interrupt();
+            tcount1.join();
+         }
+         
+         if (tcount2 != null)
+         {
+            tcount2.interrupt();
+            tcount2.join();
+         }
+         
          try
          {
             server.stop();
@@ -1281,7 +1598,7 @@
             sf.close();
             locator.close();
          }
-         
+
          server = createServer(true,
                                config,
                                PagingTest.PAGE_SIZE,
@@ -1353,7 +1670,6 @@
          t.start();
          t.join();
 
-         
          assertEquals(0, errors.get());
 
          for (int i = 0; i < 20 && server.getPostOffice().getPagingManager().getPageStore(ADDRESS).isPaging(); i++)
@@ -1361,9 +1677,8 @@
             // The delete may be asynchronous, giving some time case it eventually happen asynchronously
             Thread.sleep(500);
          }
-         
-         assertFalse (server.getPostOffice().getPagingManager().getPageStore(ADDRESS).isPaging());
 
+         assertFalse(server.getPostOffice().getPagingManager().getPageStore(ADDRESS).isPaging());
 
          for (int i = 0; i < 20 && server.getPostOffice().getPagingManager().getTransactions().size() != 0; i++)
          {
@@ -1372,7 +1687,6 @@
          }
 
          assertEquals(0, server.getPostOffice().getPagingManager().getTransactions().size());
-         
 
       }
       finally

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/bridge/BridgeReconnectTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/bridge/BridgeReconnectTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/bridge/BridgeReconnectTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -57,6 +57,7 @@
 {
    private static final Logger log = Logger.getLogger(BridgeReconnectTest.class);
 
+   private final int NUM_MESSAGES = 100;
    protected boolean isNetty()
    {
       return false;
@@ -129,7 +130,7 @@
                                                                         HornetQClient.DEFAULT_MAX_RETRY_INTERVAL,
                                                                         retryIntervalMultiplier,
                                                                         reconnectAttempts,
-                                                                        false,
+                                                                        true,
                                                                         confirmationWindowSize,
                                                                         staticConnectors,
                                                                         false,
@@ -181,7 +182,7 @@
 
          session2.start();
 
-         final int numMessages = 10;
+         final int numMessages = NUM_MESSAGES;
 
          SimpleString propKey = new SimpleString("propkey");
 
@@ -273,7 +274,7 @@
                                                                         HornetQClient.DEFAULT_MAX_RETRY_INTERVAL,
                                                                         retryIntervalMultiplier,
                                                                         reconnectAttempts,
-                                                                        false,
+                                                                        true,
                                                                         confirmationWindowSize,
                                                                         staticConnectors,
                                                                         false,
@@ -318,7 +319,7 @@
 
          session2.start();
 
-         final int numMessages = 10;
+         final int numMessages = NUM_MESSAGES;
 
          SimpleString propKey = new SimpleString("propkey");
 
@@ -401,7 +402,7 @@
                                                                         HornetQClient.DEFAULT_MAX_RETRY_INTERVAL,
                                                                         retryIntervalMultiplier,
                                                                         reconnectAttempts,
-                                                                        false,
+                                                                        true,
                                                                         confirmationWindowSize,
                                                                         staticConnectors,
                                                                         false,
@@ -451,7 +452,7 @@
          forwardingConnection = getForwardingConnection(bridge);
          forwardingConnection.fail(new HornetQException(HornetQException.NOT_CONNECTED));
 
-         final int numMessages = 10;
+         final int numMessages = NUM_MESSAGES;
 
          SimpleString propKey = new SimpleString("propkey");
 
@@ -544,7 +545,7 @@
                                                                         HornetQClient.DEFAULT_MAX_RETRY_INTERVAL,
                                                                         retryIntervalMultiplier,
                                                                         reconnectAttempts,
-                                                                        false,
+                                                                        true,
                                                                         confirmationWindowSize,
                                                                         staticConnectors,
                                                                         false,
@@ -596,7 +597,7 @@
 
          session1.start();
 
-         final int numMessages = 10;
+         final int numMessages = NUM_MESSAGES;
 
          SimpleString propKey = new SimpleString("propkey");
 
@@ -637,7 +638,7 @@
       Assert.assertEquals(0, server0.getRemotingService().getConnections().size());
       Assert.assertEquals(0, server1.getRemotingService().getConnections().size());
    }
-
+   
    public void testFailoverThenFailAgainAndReconnect() throws Exception
    {
       Map<String, Object> server0Params = new HashMap<String, Object>();
@@ -680,7 +681,7 @@
                                                                         HornetQClient.DEFAULT_MAX_RETRY_INTERVAL,
                                                                         retryIntervalMultiplier,
                                                                         reconnectAttempts,
-                                                                        false,
+                                                                        true,
                                                                         confirmationWindowSize,
                                                                         staticConnectors,
                                                                         false,
@@ -725,9 +726,9 @@
          InVMConnector.failOnCreateConnection = true;
          InVMConnector.numberOfFailures = reconnectAttempts - 1;
          forwardingConnection.fail(new HornetQException(HornetQException.NOT_CONNECTED));
+         
+         final int numMessages = NUM_MESSAGES;
 
-         final int numMessages = 10;
-
          SimpleString propKey = new SimpleString("propkey");
 
          for (int i = 0; i < numMessages; i++)
@@ -737,19 +738,32 @@
 
             prod0.send(message);
          }
+         int outOfOrder = -1;
+         int supposed = -1;
 
          for (int i = 0; i < numMessages; i++)
          {
             ClientMessage r1 = cons1.receive(1500);
             Assert.assertNotNull(r1);
-            Assert.assertEquals(i, r1.getObjectProperty(propKey));
+            if (outOfOrder == -1 && i != r1.getIntProperty(propKey).intValue())
+            {
+               outOfOrder = r1.getIntProperty(propKey).intValue();
+               supposed = i;
+            }
          }
+         if (outOfOrder != -1)
+         {
+            fail("Message " + outOfOrder + " was received out of order, it was supposed to be " + supposed);
+         }
 
+         log.info("=========== second failure, sending message");
+
+
          // Fail again - should reconnect
          forwardingConnection = ((BridgeImpl)bridge).getForwardingConnection();
          InVMConnector.failOnCreateConnection = true;
          InVMConnector.numberOfFailures = reconnectAttempts - 1;
-         forwardingConnection.fail(new HornetQException(HornetQException.NOT_CONNECTED));
+         forwardingConnection.fail(new HornetQException(5));
 
          for (int i = 0; i < numMessages; i++)
          {
@@ -762,12 +776,22 @@
          for (int i = 0; i < numMessages; i++)
          {
             ClientMessage r1 = cons1.receive(1500);
-            Assert.assertNotNull(r1);
-            Assert.assertEquals(i, r1.getObjectProperty(propKey));
+            Assert.assertNotNull("Didn't receive message", r1);
+            if (outOfOrder == -1 && i != r1.getIntProperty(propKey).intValue())
+            {
+               outOfOrder = r1.getIntProperty(propKey).intValue();
+               supposed = i;
+            }
          }
+         
 
          session0.close();
          session1.close();
+
+         if (outOfOrder != -1)
+         {
+            fail("Message " + outOfOrder + " was received out of order, it was supposed to be " + supposed);
+         }
       }
       finally
       {

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/bridge/BridgeTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/bridge/BridgeTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/bridge/BridgeTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -1043,7 +1043,7 @@
          server1.start();
          server0.start();
 
-         final int numMessages = 1000;
+         final int numMessages = 300;
 
          final int totalrepeats = 3;
 

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/ClusterTestBase.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/ClusterTestBase.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/ClusterTestBase.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -13,7 +13,10 @@
 
 package org.hornetq.tests.integration.cluster.distribution;
 
+import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -35,6 +38,7 @@
 import org.hornetq.api.core.client.ClientSessionFactory;
 import org.hornetq.api.core.client.HornetQClient;
 import org.hornetq.api.core.client.ServerLocator;
+import org.hornetq.core.client.impl.Topology;
 import org.hornetq.core.config.BroadcastGroupConfiguration;
 import org.hornetq.core.config.ClusterConnectionConfiguration;
 import org.hornetq.core.config.Configuration;
@@ -82,7 +86,7 @@
                                        TransportConstants.DEFAULT_PORT + 8,
                                        TransportConstants.DEFAULT_PORT + 9, };
 
-   private static final long WAIT_TIMEOUT = 5000;
+   private static final long WAIT_TIMEOUT = 10000;
 
    @Override
    protected void setUp() throws Exception
@@ -107,12 +111,16 @@
       }
 
       locators = new ServerLocator[ClusterTestBase.MAX_SERVERS];
+      
+      // To make sure the test will start with a clean VM
+      forceGC();
 
    }
 
    @Override
    protected void tearDown() throws Exception
    {
+      log.info("#test tearDown");
       for (ServerLocator locator : locators)
       {
          try
@@ -143,8 +151,6 @@
       nodeManagers = null;
 
       super.tearDown();
-
-    //  ServerLocatorImpl.shutdown();
    }
 
    // Private -------------------------------------------------------------------------------------------------------
@@ -241,22 +247,55 @@
 
       throw new IllegalStateException(msg);
    }
+   
+   protected void waitForTopology(final HornetQServer server, final int nodes) throws Exception
+   {
+      waitForTopology(server, nodes, WAIT_TIMEOUT);
+   }
+   
+   protected void waitForTopology(final HornetQServer server, final int nodes, final long timeout) throws Exception
+   {
+      log.debug("waiting for " + nodes + " on the topology for server = " + server);
 
+
+      long start = System.currentTimeMillis();
+      
+      Topology topology = server.getClusterManager().getTopology();
+
+      do
+      {
+         if (nodes == topology.getMembers().size())
+         {
+           return;
+         }
+
+         Thread.sleep(10);
+      }
+      while (System.currentTimeMillis() - start < timeout);
+      
+      String msg = "Timed out waiting for cluster topology of " + nodes + " (received " + topology.getMembers().size() + ") topology = " + topology + ")";
+
+      ClusterTestBase.log.error(msg);
+      
+      throw new Exception (msg);
+   }
+
    protected void waitForBindings(final int node,
                                   final String address,
-                                  final int count,
-                                  final int consumerCount,
+                                  final int expectedBindingCount,
+                                  final int expectedConsumerCount,
                                   final boolean local) throws Exception
    {
-      // System.out.println("waiting for bindings on node " + node +
-      // " address " +
-      // address +
-      // " count " +
-      // count +
-      // " consumerCount " +
-      // consumerCount +
-      // " local " +
-      // local);
+      log.debug("waiting for bindings on node " + node +
+                " address " +
+                address +
+                " expectedBindingCount " +
+                expectedBindingCount +
+                " consumerCount " +
+                expectedConsumerCount +
+                " local " +
+                local);
+
       HornetQServer server = servers[node];
 
       if (server == null)
@@ -292,7 +331,7 @@
             }
          }
 
-         if (bindingCount == count && totConsumers == consumerCount)
+         if (bindingCount == expectedBindingCount && totConsumers == expectedConsumerCount)
          {
             return;
          }
@@ -301,19 +340,13 @@
       }
       while (System.currentTimeMillis() - start < ClusterTestBase.WAIT_TIMEOUT);
 
-      // System.out.println(threadDump(" - fired by ClusterTestBase::waitForBindings"));
-
-      String msg = "Timed out waiting for bindings (bindingCount = " + bindingCount +
+      String msg = "Timed out waiting for bindings (bindingCount = " + bindingCount + " (expecting " + expectedBindingCount + ") "+
                    ", totConsumers = " +
-                   totConsumers +
+                   totConsumers + " (expecting " + expectedConsumerCount + ")" + 
                    ")";
 
       ClusterTestBase.log.error(msg);
 
-      // Sending thread dump into junit report.. trying to get some information about the server case the binding didn't
-      // arrive
-      System.out.println(UnitTestCase.threadDump(msg));
-
       Bindings bindings = po.getBindingsForAddress(new SimpleString(address));
 
       System.out.println("=======================================================================");
@@ -328,18 +361,73 @@
             System.out.println("Binding = " + qBinding + ", queue=" + qBinding.getQueue());
          }
       }
-      System.out.println("=======================================================================");
 
-      for (HornetQServer hornetQServer : servers)
+      StringWriter writer = new StringWriter();
+      PrintWriter out = new PrintWriter(writer);
+      
+      try
       {
-         if (hornetQServer != null)
+         for (HornetQServer hornetQServer : servers)
          {
-            System.out.println(clusterDescription(hornetQServer));
+            if (hornetQServer != null)
+            {
+               out.println(clusterDescription(hornetQServer));
+               out.println(debugBindings(hornetQServer, hornetQServer.getConfiguration().getManagementNotificationAddress().toString()));
+            }
          }
+         
+         for (HornetQServer hornetQServer : servers)
+         {
+            out.println("Management bindings on " + hornetQServer);
+            if (hornetQServer != null)
+            {
+               out.println(debugBindings(hornetQServer, hornetQServer.getConfiguration().getManagementNotificationAddress().toString()));
+            }
+         }
       }
+      catch (Throwable dontCare)
+      {
+      }
+      
+      logAndSystemOut(writer.toString());
+      
       throw new IllegalStateException(msg);
    }
+   
+   
+   protected String debugBindings(final HornetQServer server, final String address) throws Exception
+   {
+      
+      StringWriter str = new StringWriter();
+      PrintWriter out = new PrintWriter(str);
+      
+      if (server == null)
+      {
+         return "server is shutdown";
+      }
+      PostOffice po = server.getPostOffice();
 
+      if (po == null)
+      {
+         return "server is shutdown";
+      }
+      Bindings bindings = po.getBindingsForAddress(new SimpleString(address));
+
+      out.println("=======================================================================");
+      out.println("Binding information for address = " + address + " on "  + server);
+
+      for (Binding binding : bindings.getBindings())
+      {
+         QueueBinding qBinding = (QueueBinding)binding;
+
+         out.println("Binding = " + qBinding + ", queue=" + qBinding.getQueue());
+      }
+      out.println("=======================================================================");
+      
+      return str.toString();
+
+   }
+
    protected void createQueue(final int node,
                               final String address,
                               final String queueName,
@@ -361,6 +449,8 @@
       {
          filterString = ClusterTestBase.FILTER_PROP.toString() + "='" + filterVal + "'";
       }
+      
+      log.info("Creating " + queueName + " , address " + address + " on " + servers[node]);
 
       session.createQueue(address, queueName, filterString, durable);
 
@@ -749,6 +839,7 @@
                                                    final int... consumerIDs) throws Exception
    {
       boolean outOfOrder = false;
+      String firstOutOfOrderMessage = null;
       for (int consumerID : consumerIDs)
       {
          ConsumerHolder holder = consumers[consumerID];
@@ -770,11 +861,11 @@
 
                dumpConsumers();
 
-               Assert.assertNotNull("consumer " + consumerID + " did not receive message " + j, message);
+               Assert.fail("consumer " + consumerID + " did not receive message " + j);
             }
+            
+            log.info("msg on ClusterTestBase = " + message);            
 
-
-
             if (ack)
             {
                message.acknowledge();
@@ -787,15 +878,22 @@
 
             if (j != (Integer)message.getObjectProperty(ClusterTestBase.COUNT_PROP))
             {
+               if (firstOutOfOrderMessage == null)
+               {
+                  firstOutOfOrderMessage = "expected " + j + " received " + message.getObjectProperty(ClusterTestBase.COUNT_PROP);
+               }
                outOfOrder = true;
                System.out.println("Message j=" + j +
                                   " was received out of order = " +
                                   message.getObjectProperty(ClusterTestBase.COUNT_PROP));
+               log.info("Message j=" + j +
+                                  " was received out of order = " +
+                                  message.getObjectProperty(ClusterTestBase.COUNT_PROP));
             }
          }
       }
 
-      Assert.assertFalse("Messages were consumed out of order, look at System.out for more information", outOfOrder);
+      Assert.assertFalse("Messages were consumed out of order::" + firstOutOfOrderMessage, outOfOrder);
    }
 
    private void dumpConsumers() throws Exception
@@ -815,7 +913,7 @@
    {
       String br = "-------------------------\n";
       String out = br;
-      out += "HornetQ server " + server.getNodeID() + "\n";
+      out += "HornetQ server " + server + "\n";
       ClusterManager clusterManager = server.getClusterManager();
       if (clusterManager == null)
       {
@@ -825,7 +923,7 @@
       {
          for (ClusterConnection cc : clusterManager.getClusterConnections())
          {
-            out += cc.description() + "\n";
+            out += cc.describe() + "\n";
          }
       }
       out += "\n\nfull topology:";
@@ -888,21 +986,32 @@
 
       for (int i = 0; i < numMessages; i++)
       {
-         ConsumerHolder holder = consumers[consumerIDs[count]];
+         // We may use a negative number in some tests to ignore the consumer, case we know the server is down
+         if (consumerIDs[count] >= 0)
+         {
+            ConsumerHolder holder = consumers[consumerIDs[count]];
+   
+            if (holder == null)
+            {
+               throw new IllegalArgumentException("No consumer at " + consumerIDs[i]);
+            }
+   
+            ClientMessage message = holder.consumer.receive(WAIT_TIMEOUT);
+            
+            message.acknowledge();
+            
+            consumers[consumerIDs[count]].session.commit();
+            
+            System.out.println("Msg: " + message);
+   
+            Assert.assertNotNull("consumer " + consumerIDs[count] + " did not receive message " + i, message);
+   
+            Assert.assertEquals("consumer " + consumerIDs[count] + " message " + i,
+                                i,
+                                message.getObjectProperty(ClusterTestBase.COUNT_PROP));
 
-         if (holder == null)
-         {
-            throw new IllegalArgumentException("No consumer at " + consumerIDs[i]);
          }
-
-         ClientMessage message = holder.consumer.receive(WAIT_TIMEOUT);
-
-         Assert.assertNotNull("consumer " + consumerIDs[count] + " did not receive message " + i, message);
-
-         Assert.assertEquals("consumer " + consumerIDs[count] + " message " + i,
-                             i,
-                             message.getObjectProperty(ClusterTestBase.COUNT_PROP));
-
+         
          count++;
 
          if (count == consumerIDs.length)
@@ -1202,7 +1311,7 @@
    {
       if (sfs[node] != null)
       {
-         throw new IllegalArgumentException("Already a server at " + node);
+         throw new IllegalArgumentException("Already a factory at " + node);
       }
 
       Map<String, Object> params = generateParams(node, netty);
@@ -1231,6 +1340,7 @@
       locators[node].setBlockOnDurableSend(true);
       ClientSessionFactory sf = locators[node].createSessionFactory();
 
+      sf.createSession().close();
       sfs[node] = sf;
    }
 
@@ -1363,24 +1473,28 @@
          {
             if (sharedStorage)
             {
-               server = createInVMFailoverServer(true, configuration, nodeManagers[node]);
+               server = createInVMFailoverServer(true, configuration, nodeManagers[node], node);
             }
             else
             {
                server = HornetQServers.newHornetQServer(configuration);
+               server.setIdentity("Server " + node);
             }
          }
          else
          {
             if (sharedStorage)
             {
-               server = createInVMFailoverServer(false, configuration,  nodeManagers[node]);
+               server = createInVMFailoverServer(false, configuration,  nodeManagers[node], node);
             }
             else
             {
                server = HornetQServers.newHornetQServer(configuration, false);
+               server.setIdentity("Server " + node);
             }
          }
+         
+         server.setIdentity(this.getClass().getSimpleName() + "/Live(" + node + ")");
          servers[node] = server;
       }
 
@@ -1438,24 +1552,27 @@
       {
          if (sharedStorage)
          {
-            server = createInVMFailoverServer(true, configuration, nodeManagers[liveNode]);
+            server = createInVMFailoverServer(true, configuration, nodeManagers[liveNode], liveNode);
          }
          else
          {
             server = HornetQServers.newHornetQServer(configuration);
+            server.setIdentity("Server " + liveNode);
          }
       }
       else
       {
          if (sharedStorage)
          {
-            server = createInVMFailoverServer(true, configuration, nodeManagers[liveNode]);
+            server = createInVMFailoverServer(true, configuration, nodeManagers[liveNode], liveNode);
          }
          else
          {
             server = HornetQServers.newHornetQServer(configuration, false);
+            server.setIdentity("Server " + liveNode);
          }
       }
+      server.setIdentity(this.getClass().getSimpleName() + "/Backup(" + node + " of live " + liveNode + ")");
       servers[node] = server;
    }
 
@@ -1516,22 +1633,24 @@
         {
            if (sharedStorage)
            {
-              server = createInVMFailoverServer(true, configuration, nodeManagers[node]);
+              server = createInVMFailoverServer(true, configuration, nodeManagers[node], node);
            }
            else
            {
               server = HornetQServers.newHornetQServer(configuration);
+              server.setIdentity("Server " + node);
            }
         }
         else
         {
            if (sharedStorage)
            {
-              server = createInVMFailoverServer(false, configuration, nodeManagers[node]);
+              server = createInVMFailoverServer(false, configuration, nodeManagers[node], node);
            }
            else
            {
               server = HornetQServers.newHornetQServer(configuration, false);
+              server.setIdentity("Server " + node);
            }
         }
         servers[node] = server;
@@ -1603,18 +1722,19 @@
         {
            if (sharedStorage)
            {
-              server = createInVMFailoverServer(true, configuration, nodeManagers[liveNode]);
+              server = createInVMFailoverServer(true, configuration, nodeManagers[liveNode], liveNode);
            }
            else
            {
               server = HornetQServers.newHornetQServer(configuration);
+              server.setIdentity("Server " + liveNode);
            }
         }
         else
         {
            if (sharedStorage)
            {
-              server = createInVMFailoverServer(false, configuration, nodeManagers[liveNode]);
+              server = createInVMFailoverServer(false, configuration, nodeManagers[liveNode], liveNode);
            }
            else
            {
@@ -1714,19 +1834,90 @@
          pairs.add(serverTotc.getName());
       }
 
+      ClusterConnectionConfiguration clusterConf = createClusterConfig(name,
+                                                                       address,
+                                                                       forwardWhenNoConsumers,
+                                                                       maxHops,
+                                                                       connectorFrom,
+                                                                       pairs);
+
+      serverFrom.getConfiguration().getClusterConfigurations().add(clusterConf);
+   }
+
+   protected void setupClusterConnection(final String name,
+                                         final String address,
+                                         final boolean forwardWhenNoConsumers,
+                                         final int maxHops,
+                                         final int reconnectAttempts,
+                                         final long retryInterval,
+                                         final boolean netty,
+                                         final int nodeFrom,
+                                         final int... nodesTo)
+   {
+      HornetQServer serverFrom = servers[nodeFrom];
+
+      if (serverFrom == null)
+      {
+         throw new IllegalStateException("No server at node " + nodeFrom);
+      }
+
+      TransportConfiguration connectorFrom = createTransportConfiguration(netty, false, generateParams(nodeFrom, netty));
+      serverFrom.getConfiguration().getConnectorConfigurations().put(connectorFrom.getName(), connectorFrom);
+      
+      List<String> pairs = new ArrayList<String>();
+      for (int element : nodesTo)
+      {
+         TransportConfiguration serverTotc = createTransportConfiguration(netty, false, generateParams(element, netty));
+         serverFrom.getConfiguration().getConnectorConfigurations().put(serverTotc.getName(), serverTotc);
+         pairs.add(serverTotc.getName());
+      }
       ClusterConnectionConfiguration clusterConf = new ClusterConnectionConfiguration(name,
-                                                                                      address,
-                                                                                      connectorFrom.getName(),
-                                                                                      250,
-                                                                                      true,
-                                                                                      forwardWhenNoConsumers,
-                                                                                      maxHops,
-                                                                                      1024,
-                                                                                      pairs, false);
+           address,
+           connectorFrom.getName(),
+           ConfigurationImpl.DEFAULT_CLUSTER_FAILURE_CHECK_PERIOD,
+           ConfigurationImpl.DEFAULT_CLUSTER_CONNECTION_TTL,
+           retryInterval,
+           ConfigurationImpl.DEFAULT_CLUSTER_RETRY_INTERVAL_MULTIPLIER,
+           ConfigurationImpl.DEFAULT_CLUSTER_MAX_RETRY_INTERVAL,
+           reconnectAttempts,
+           true,
+           forwardWhenNoConsumers,
+           maxHops,
+           1024,
+           pairs,
+           false);
 
       serverFrom.getConfiguration().getClusterConfigurations().add(clusterConf);
    }
 
+   /**
+    * @param name
+    * @param address
+    * @param forwardWhenNoConsumers
+    * @param maxHops
+    * @param connectorFrom
+    * @param pairs
+    * @return
+    */
+   protected ClusterConnectionConfiguration createClusterConfig(final String name,
+                                                                final String address,
+                                                                final boolean forwardWhenNoConsumers,
+                                                                final int maxHops,
+                                                                TransportConfiguration connectorFrom,
+                                                                List<String> pairs)
+     {
+        ClusterConnectionConfiguration clusterConf = new ClusterConnectionConfiguration(name,
+                                                                                        address,
+                                                                                        connectorFrom.getName(),
+                                                                                        250,
+                                                                                        true,
+                                                                                        forwardWhenNoConsumers,
+                                                                                        maxHops,
+                                                                                        1024,
+                                                                                        pairs, false);
+        return clusterConf;
+     }
+
    protected void setupClusterConnectionWithBackups(final String name,
                                                     final String address,
                                                     final boolean forwardWhenNoConsumers,
@@ -1802,27 +1993,28 @@
    {
       for (int node : nodes)
       {
+         log.info("#test start node " + node);
          servers[node].setIdentity("server " + node);
          ClusterTestBase.log.info("starting server " + servers[node]);
          servers[node].start();
+
          ClusterTestBase.log.info("started server " + servers[node]);
 
          ClusterTestBase.log.info("started server " + node);
+
+         waitForServer(servers[node]);
+
          /*
-         * we need to wait a lil while between server start up to allow the server to communicate in some order.
-         * This is to avoid split brain on startup
-         * */
-         // TODO: Do we really need this?
+          * we need to wait a little while between server start up to allow the server to communicate in some order.
+          * This is to avoid split brain on startup
+          * */
          Thread.sleep(500);
       }
-      for (int node : nodes)
-      {
-         //wait for each server to start, it may be a backup and started in a separate thread
-         waitForServer(servers[node]);
-      }
+      
+      
    }
 
-   private void waitForServer(HornetQServer server)
+   protected void waitForServer(HornetQServer server)
          throws InterruptedException
    {
       long timetowait =System.currentTimeMillis() + 5000;
@@ -1856,15 +2048,17 @@
 
    protected void stopServers(final int... nodes) throws Exception
    {
+      log.info("Stopping nodes "  + Arrays.toString(nodes));
       for (int node : nodes)
       {
-         if (servers[node].isStarted())
+         log.info("#test stop server " + node);
+         if (servers[node] != null && servers[node].isStarted())
          {
             try
             {
                ClusterTestBase.log.info("stopping server " + node);
                servers[node].stop();
-               ClusterTestBase.log.info("server stopped");
+               ClusterTestBase.log.info("server " + node + " stopped");
             }
             catch (Exception e)
             {

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/ClusterWithBackupTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/ClusterWithBackupTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/ClusterWithBackupTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -58,38 +58,47 @@
    {
       return false;
    }
-
-   public void testBasicRoundRobin() throws Exception
+   
+   public void testBasicRoundRobin() throws Throwable
    {
-      setupCluster();
-
-      startServers(0, 1, 2, 3, 4, 5);
-
-      setupSessionFactory(3, isNetty());
-      setupSessionFactory(4, isNetty());
-      setupSessionFactory(5, isNetty());
-
-      createQueue(3, "queues.testaddress", "queue0", null, false);
-      createQueue(4, "queues.testaddress", "queue0", null, false);
-      createQueue(5, "queues.testaddress", "queue0", null, false);
-
-      addConsumer(0, 3, "queue0", null);
-      addConsumer(1, 4, "queue0", null);
-      addConsumer(2, 5, "queue0", null);
-
-      waitForBindings(3, "queues.testaddress", 1, 1, true);
-      waitForBindings(4, "queues.testaddress", 1, 1, true);
-      waitForBindings(5, "queues.testaddress", 1, 1, true);
-
-      waitForBindings(3, "queues.testaddress", 2, 2, false);
-      waitForBindings(4, "queues.testaddress", 2, 2, false);
-      waitForBindings(5, "queues.testaddress", 2, 2, false);
-
-      send(3, "queues.testaddress", 100, false, null);
-
-      verifyReceiveRoundRobinInSomeOrder(100, 0, 1, 2);
-
-      verifyNotReceive(0, 0, 1, 2);
+      try
+      {
+         setupCluster();
+   
+         startServers(0, 1, 2, 3, 4, 5);
+   
+         setupSessionFactory(3, isNetty());
+         setupSessionFactory(4, isNetty());
+         setupSessionFactory(5, isNetty());
+   
+         createQueue(3, "queues.testaddress", "queue0", null, false);
+         createQueue(4, "queues.testaddress", "queue0", null, false);
+         createQueue(5, "queues.testaddress", "queue0", null, false);
+   
+         addConsumer(0, 3, "queue0", null);
+         addConsumer(1, 4, "queue0", null);
+         addConsumer(2, 5, "queue0", null);
+   
+         waitForBindings(3, "queues.testaddress", 1, 1, true);
+         waitForBindings(4, "queues.testaddress", 1, 1, true);
+         waitForBindings(5, "queues.testaddress", 1, 1, true);
+   
+         waitForBindings(3, "queues.testaddress", 2, 2, false);
+         waitForBindings(4, "queues.testaddress", 2, 2, false);
+         waitForBindings(5, "queues.testaddress", 2, 2, false);
+   
+         send(3, "queues.testaddress", 100, false, null);
+   
+         verifyReceiveRoundRobinInSomeOrder(100, 0, 1, 2);
+   
+         verifyNotReceive(0, 0, 1, 2);
+      }
+      catch (Throwable e)
+      {
+         e.printStackTrace();
+         log.error(e.getMessage(), e);
+         throw e;
+      }
    }
 
    protected void setupCluster() throws Exception

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -27,4 +27,12 @@
    {
       return true;
    }
+   
+   
+   protected boolean isFileStorage()
+   {
+      return true;
+   }
+
+
 }

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterWithBackupTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterWithBackupTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterWithBackupTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -36,5 +36,11 @@
    {
       return true;
    }
+   
+   protected boolean isFileStorage()
+   {
+      return true;
+   }
 
+
 }

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterWithDiscoveryTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterWithDiscoveryTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettyFileStorageSymmetricClusterWithDiscoveryTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -13,6 +13,8 @@
 
 package org.hornetq.tests.integration.cluster.distribution;
 
+import org.hornetq.core.logging.Logger;
+
 /**
  * A NettyFileStorageSymmetricClusterWithDiscoveryTest
  *
@@ -22,10 +24,18 @@
  */
 public class NettyFileStorageSymmetricClusterWithDiscoveryTest extends SymmetricClusterWithDiscoveryTest
 {
+   Logger log = Logger.getLogger(NettyFileStorageSymmetricClusterWithDiscoveryTest.class);
+   
    @Override
    protected boolean isNetty()
    {
       return true;
    }
+   
+   protected boolean isFileStorage()
+   {
+      return true;
+   }
+   
 
 }

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettySymmetricClusterWithBackupTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettySymmetricClusterWithBackupTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/NettySymmetricClusterWithBackupTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -22,6 +22,8 @@
 
 package org.hornetq.tests.integration.cluster.distribution;
 
+import org.hornetq.core.logging.Logger;
+
 /**
  * A NettySymmetricClusterWithBackupTest
  *
@@ -31,20 +33,12 @@
  */
 public class NettySymmetricClusterWithBackupTest extends SymmetricClusterWithBackupTest
 {
+   private Logger log = Logger.getLogger(NettySymmetricClusterWithBackupTest.class);
+   
    @Override
    protected boolean isNetty()
    {
       return true;
    }
 
-   public void _test() throws Exception
-   {
-      for (int i = 0; i < 50; i++)
-      {
-         System.out.println("\n\n" + i + "\n\n");
-         _testStartStopServers();
-         tearDown();
-         setUp();
-      }
-   }
 }

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/OneWayChainClusterTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/OneWayChainClusterTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/OneWayChainClusterTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -314,29 +314,33 @@
       verifyReceiveRoundRobin(10, 0, 1);
       verifyNotReceive(0, 1);
 
-      System.out.println(clusterDescription(servers[0]));
-      System.out.println(clusterDescription(servers[1]));
-      System.out.println(clusterDescription(servers[2]));
-      System.out.println(clusterDescription(servers[3]));
-      System.out.println(clusterDescription(servers[4]));
+      log.info("============================================ before restart");
+      log.info(clusterDescription(servers[0]));
+      log.info(clusterDescription(servers[1]));
+      log.info(clusterDescription(servers[2]));
+      log.info(clusterDescription(servers[3]));
+      log.info(clusterDescription(servers[4]));
 
       stopServers(2);
 
       Thread.sleep(2000);
-      System.out.println(clusterDescription(servers[0]));
-      System.out.println(clusterDescription(servers[1]));
-      System.out.println(clusterDescription(servers[3]));
-      System.out.println(clusterDescription(servers[4]));
 
+      log.info("============================================ after stop");
+      log.info(clusterDescription(servers[0]));
+      log.info(clusterDescription(servers[1]));
+      log.info(clusterDescription(servers[3]));
+      log.info(clusterDescription(servers[4]));
+
       startServers(2);
 
       Thread.sleep(2000);
 
-      System.out.println(clusterDescription(servers[0]));
-      System.out.println(clusterDescription(servers[1]));
-      System.out.println(clusterDescription(servers[2]));
-      System.out.println(clusterDescription(servers[3]));
-      System.out.println(clusterDescription(servers[4]));
+      log.info("============================================ after start");
+      log.info(clusterDescription(servers[0]));
+      log.info(clusterDescription(servers[1]));
+      log.info(clusterDescription(servers[2]));
+      log.info(clusterDescription(servers[3]));
+      log.info(clusterDescription(servers[4]));
 
       
       send(0, "queues.testaddress", 10, false, null);

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/OnewayTwoNodeClusterTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/OnewayTwoNodeClusterTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/OnewayTwoNodeClusterTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -47,6 +47,7 @@
    @Override
    protected void tearDown() throws Exception
    {
+      log.info("#test Tear down");
       closeAllConsumers();
 
       closeAllSessionFactories();

Added: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SimpleSymmetricClusterTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SimpleSymmetricClusterTest.java	                        (rev 0)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SimpleSymmetricClusterTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2010 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.  See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.distribution;
+
+import java.util.List;
+
+import org.hornetq.api.core.TransportConfiguration;
+import org.hornetq.core.config.ClusterConnectionConfiguration;
+import org.hornetq.core.logging.Logger;
+
+/**
+ * A SimpleSymmetricClusterTest
+ *
+ * @author clebert
+ *
+ *
+ */
+public class SimpleSymmetricClusterTest extends ClusterTestBase
+{
+
+   // Constants -----------------------------------------------------
+
+   static final Logger log = Logger.getLogger(SimpleSymmetricClusterTest.class);
+
+   // Attributes ----------------------------------------------------
+
+   // Static --------------------------------------------------------
+
+   // Constructors --------------------------------------------------
+
+   // Public --------------------------------------------------------
+
+   public void setUp() throws Exception
+   {
+      super.setUp();
+   }
+
+   /**
+    * @param name
+    * @param address
+    * @param forwardWhenNoConsumers
+    * @param maxHops
+    * @param connectorFrom
+    * @param pairs
+    * @return
+    */
+   protected ClusterConnectionConfiguration createClusterConfig(final String name,
+                                                                final String address,
+                                                                final boolean forwardWhenNoConsumers,
+                                                                final int maxHops,
+                                                                TransportConfiguration connectorFrom,
+                                                                List<String> pairs)
+   {
+      ClusterConnectionConfiguration clusterConf = new ClusterConnectionConfiguration(name,
+                                                                                      address,
+                                                                                      connectorFrom.getName(),
+                                                                                      2000,
+                                                                                      true,
+                                                                                      forwardWhenNoConsumers,
+                                                                                      maxHops,
+                                                                                      1024,
+                                                                                      pairs,
+                                                                                      false);
+      return clusterConf;
+   }
+
+   public void tearDown() throws Exception
+   {
+      log.info("#test tearDown " + loopNumber);
+      stopServers(0, 1, 2, 3, 4);
+      super.tearDown();
+   }
+
+   public boolean isNetty()
+   {
+      return false;
+   }
+
+   public void testSimpleWithBackup() throws Exception
+   {
+      // The backups
+      setupBackupServer(0, 3, isFileStorage(), true, isNetty());
+      setupBackupServer(1, 4, isFileStorage(), true, isNetty());
+      setupBackupServer(2, 5, isFileStorage(), true, isNetty());
+
+      // The lives
+      setupLiveServer(3, isFileStorage(), true, isNetty());
+      setupLiveServer(4, isFileStorage(), true, isNetty());
+      setupLiveServer(5, isFileStorage(), true, isNetty());
+
+      setupClusterConnection("cluster0", "queues", false, 1, isNetty(), 3, 4, 5);
+
+      setupClusterConnection("cluster1", "queues", false, 1, isNetty(), 4, 3, 5);
+
+      setupClusterConnection("cluster2", "queues", false, 1, isNetty(), 5, 3, 4);
+
+      setupClusterConnection("cluster0", "queues", false, 1, isNetty(), 0, 4, 5);
+
+      setupClusterConnection("cluster1", "queues", false, 1, isNetty(), 1, 3, 5);
+
+      setupClusterConnection("cluster2", "queues", false, 1, isNetty(), 2, 3, 4);
+
+
+      // startServers(3, 4, 5, 0, 1, 2);
+      startServers(0, 1, 2, 3, 4, 5);
+
+      log.info("");
+      for (int i = 0; i <= 5; i++)
+      {
+         log.info(servers[i].describe());
+         log.info(debugBindings(servers[i], servers[i].getConfiguration().getManagementNotificationAddress().toString()));
+      }
+      log.info("");
+      
+      //stopServers(3);
+      
+      Thread.sleep(1000);
+
+      log.info("");
+      for (int i = 0; i <= 5; i++)
+      {
+         log.info(servers[i].describe());
+         log.info(debugBindings(servers[i], servers[i].getConfiguration().getManagementNotificationAddress().toString()));
+      }
+      log.info("");
+      
+
+      
+      stopServers(0, 1, 2, 3, 4, 5);
+
+   }
+   
+   
+   public void testSimple() throws Exception
+   {
+      setupServer(0, true, isNetty());
+      setupServer(1, true, isNetty());
+      setupServer(2, true, isNetty());
+
+      setupClusterConnection("cluster0", "queues", false, 1, isNetty(), 0, 1, 2);
+      setupClusterConnection("cluster1", "queues", false, 1, isNetty(), 1, 2, 0);
+      setupClusterConnection("cluster2", "queues", false, 1, isNetty(), 2, 0, 1);
+
+      startServers(0, 1, 2);
+
+      for (int i = 0; i < 10; i++)
+         log.info("****************************");
+      for (int i = 0; i <= 2; i++)
+      {
+         log.info("*************************************\n " + servers[i] +
+                  " topology:\n" +
+                  servers[i].getClusterManager().getTopology().describe());
+      }
+      for (int i = 0; i < 10; i++)
+         log.info("****************************");
+      setupSessionFactory(0, isNetty());
+      setupSessionFactory(1, isNetty());
+      setupSessionFactory(2, isNetty());
+
+      // Thread.sleep(1500);
+
+      createQueue(0, "queues.testaddress", "queue0", null, false);
+      // Thread.sleep(1500);
+      createQueue(1, "queues.testaddress", "queue0", null, false);
+      // Thread.sleep(1500);
+      createQueue(2, "queues.testaddress", "queue0", null, false);
+      // Thread.sleep(1500);
+
+      addConsumer(0, 0, "queue0", null);
+      // Thread.sleep(1500);
+      addConsumer(1, 1, "queue0", null);
+      // Thread.sleep(1500);
+      addConsumer(2, 2, "queue0", null);
+      // Thread.sleep(1500);
+
+      waitForBindings(0, "queues.testaddress", 1, 1, true);
+      waitForBindings(1, "queues.testaddress", 1, 1, true);
+      waitForBindings(2, "queues.testaddress", 1, 1, true);
+
+      waitForBindings(0, "queues.testaddress", 2, 2, false);
+      waitForBindings(1, "queues.testaddress", 2, 2, false);
+      waitForBindings(2, "queues.testaddress", 2, 2, false);
+
+   }
+   static int loopNumber;
+   public void _testLoop() throws Throwable
+   {
+      for (int i = 0 ; i < 1000; i++)
+      {
+         loopNumber = i;
+         log.info("#test " + i);
+         testSimple2();
+         if (i + 1  < 1000)
+         {
+            tearDown();
+            setUp();
+         }
+      }
+   }
+
+
+   
+
+   public void testSimple2() throws Exception
+   {
+      setupServer(0, true, isNetty());
+      setupServer(1, true, isNetty());
+      setupServer(2, true, isNetty());
+      setupServer(3, true, isNetty());
+      setupServer(4, true, isNetty());
+
+      setupClusterConnection("cluster0", "queues", false, 1, isNetty(), 0, 1, 2, 3, 4);
+
+      setupClusterConnection("cluster1", "queues", false, 1, isNetty(), 1, 0, 2, 3, 4);
+
+      setupClusterConnection("cluster2", "queues", false, 1, isNetty(), 2, 0, 1, 3, 4);
+
+      setupClusterConnection("cluster3", "queues", false, 1, isNetty(), 3, 0, 1, 2, 4);
+
+      setupClusterConnection("cluster4", "queues", false, 1, isNetty(), 4, 0, 1, 2, 3);
+
+      startServers(0, 1, 2, 3, 4);
+      
+      for (int i = 0 ; i <= 4; i++)
+      {
+         waitForTopology(servers[i], 5);
+      }
+      
+      log.info("All the servers have been started already!");
+
+      for (int i = 0; i <= 4; i++)
+      {
+         log.info("*************************************\n " + servers[i] +
+                  " topology:\n" +
+                  servers[i].getClusterManager().getTopology().describe());
+      }
+      
+      for (int i = 0; i <= 4; i++)
+      {
+         setupSessionFactory(i, isNetty());
+      }
+
+      for (int i = 0 ; i <= 4; i++)
+      {
+         createQueue(i, "queues.testaddress", "queue0", null, false);
+      }
+
+      for (int i = 0 ; i <= 4; i++)
+      {
+         addConsumer(i, i, "queue0", null);
+      }
+
+      waitForBindings(0, "queues.testaddress", 1, 1, true);
+      waitForBindings(1, "queues.testaddress", 1, 1, true);
+      waitForBindings(2, "queues.testaddress", 1, 1, true);
+
+      waitForBindings(0, "queues.testaddress", 4, 4, false);
+      waitForBindings(1, "queues.testaddress", 4, 4, false);
+      waitForBindings(2, "queues.testaddress", 4, 4, false);
+
+   }
+   
+   public void testSimpleRoundRobbin() throws Exception
+   {
+      
+      //TODO make this test to crash a node
+      setupServer(0, true, isNetty());
+      setupServer(1, true, isNetty());
+      setupServer(2, true, isNetty());
+
+      setupClusterConnection("cluster0", "queues", false, 1, 10, 100, isNetty(), 0, 1, 2);
+      setupClusterConnection("cluster1", "queues", false, 1, 10, 100, isNetty(), 1, 2, 0);
+      setupClusterConnection("cluster1", "queues", false, 1, 10, 100, isNetty(), 2, 0, 1);
+
+      startServers(0, 1, 2);
+
+      setupSessionFactory(0, isNetty());
+      setupSessionFactory(1, isNetty());
+      setupSessionFactory(2, isNetty());
+
+      // Need to wait some time so the bridges and 
+      // connectors had time to connect properly between the nodes
+      Thread.sleep(1000);
+
+      createQueue(0, "queues.testaddress", "queue0", null, true);
+      // Thread.sleep(1500);
+      createQueue(1, "queues.testaddress", "queue0", null, true);
+      // Thread.sleep(1500);
+      createQueue(2, "queues.testaddress", "queue0", null, true);
+      // Thread.sleep(1500);
+
+      addConsumer(0, 0, "queue0", null);
+      // Thread.sleep(1500);
+      addConsumer(1, 1, "queue0", null);
+      // Thread.sleep(1500);
+      addConsumer(2, 2, "queue0", null);
+      // Thread.sleep(1500);
+
+      waitForBindings(0, "queues.testaddress", 1, 1, true);
+      waitForBindings(1, "queues.testaddress", 1, 1, true);
+      waitForBindings(2, "queues.testaddress", 1, 1, true);
+
+      waitForBindings(0, "queues.testaddress", 2, 2, false);
+      waitForBindings(1, "queues.testaddress", 2, 2, false);
+      waitForBindings(2, "queues.testaddress", 2, 2, false);
+
+      send(0, "queues.testaddress", 33, true, null);
+
+      verifyReceiveRoundRobin(33, 0, 1, 2);
+      
+      stopServers(2);
+
+
+      waitForBindings(0, "queues.testaddress", 1, 1, false);
+      waitForBindings(1, "queues.testaddress", 1, 1, false);
+
+
+      send(0, "queues.testaddress", 100, true, null);
+
+      verifyReceiveRoundRobin(100, 0, 1);
+      
+      sfs[2] = null;
+      consumers[2] = null;
+      
+      
+      startServers(2);
+      
+      setupSessionFactory(2, isNetty());
+
+      addConsumer(2, 2, "queue0", null);
+      
+      waitForBindings(0, "queues.testaddress", 1, 1, true);
+      waitForBindings(1, "queues.testaddress", 1, 1, true);
+      waitForBindings(2, "queues.testaddress", 1, 1, true);
+
+      waitForBindings(0, "queues.testaddress", 2, 2, false);
+      waitForBindings(1, "queues.testaddress", 2, 2, false);
+      waitForBindings(2, "queues.testaddress", 2, 2, false);
+
+      send(0, "queues.testaddress", 33, true, null);
+
+      verifyReceiveRoundRobin(33, 0, 1, 2);
+
+      
+      
+
+   }
+
+   
+   public void _testSimpleRoundRobbinNoFailure() throws Exception
+   {
+      //TODO make this test to crash a node
+      setupServer(0, true, isNetty());
+      setupServer(1, true, isNetty());
+      setupServer(2, true, isNetty());
+
+      setupClusterConnection("cluster0", "queues", false, 1, -1, 1000, isNetty(), 0, 1, 2);
+      setupClusterConnection("cluster1", "queues", false, 1, -1, 1000, isNetty(), 1, 2, 0);
+      setupClusterConnection("cluster1", "queues", false, 1, -1, 1000, isNetty(), 2, 0, 1);
+
+      startServers(0, 1, 2);
+
+      setupSessionFactory(0, isNetty());
+      setupSessionFactory(1, isNetty());
+      setupSessionFactory(2, isNetty());
+
+      // Thread.sleep(1500);
+
+      createQueue(0, "queues.testaddress", "queue0", null, true);
+      // Thread.sleep(1500);
+      createQueue(1, "queues.testaddress", "queue0", null, true);
+      // Thread.sleep(1500);
+      createQueue(2, "queues.testaddress", "queue0", null, true);
+      // Thread.sleep(1500);
+
+      addConsumer(0, 0, "queue0", null);
+      // Thread.sleep(1500);
+      addConsumer(1, 1, "queue0", null);
+      // Thread.sleep(1500);
+      addConsumer(2, 2, "queue0", null);
+      // Thread.sleep(1500);
+
+      waitForBindings(0, "queues.testaddress", 1, 1, true);
+      waitForBindings(1, "queues.testaddress", 1, 1, true);
+      waitForBindings(2, "queues.testaddress", 1, 1, true);
+
+      waitForBindings(0, "queues.testaddress", 2, 2, false);
+      waitForBindings(1, "queues.testaddress", 2, 2, false);
+      waitForBindings(2, "queues.testaddress", 2, 2, false);
+
+      send(0, "queues.testaddress", 33, true, null);
+
+      verifyReceiveRoundRobin(33, 0, 1, 2);
+
+      Thread.sleep(1000);
+      
+      // TODO: need to make sure the shutdown won't be send, what will affect the test
+      stopServers(2);
+//      
+//      Thread.sleep(5000);
+//
+//      waitForBindings(0, "queues.testaddress", 2, 2, false);
+//      waitForBindings(1, "queues.testaddress", 2, 2, false);
+
+
+      send(0, "queues.testaddress", 100, true, null);
+      
+      verifyReceiveRoundRobin(100, 0, 1, -1);
+      
+      sfs[2] = null;
+      consumers[2] = null;
+      
+      startServers(2);
+      
+      setupSessionFactory(2, isNetty());
+
+      addConsumer(2, 2, "queue0", null);
+      
+      waitForBindings(0, "queues.testaddress", 1, 1, true);
+      waitForBindings(1, "queues.testaddress", 1, 1, true);
+      waitForBindings(2, "queues.testaddress", 1, 1, true);
+
+      waitForBindings(0, "queues.testaddress", 2, 2, false);
+      waitForBindings(1, "queues.testaddress", 2, 2, false);
+      waitForBindings(2, "queues.testaddress", 2, 2, false);
+
+      verifyReceiveRoundRobin(100, -1, -1, 2);
+
+      
+      
+
+   }
+
+   // Package protected ---------------------------------------------
+
+   // Protected -----------------------------------------------------
+
+   // Private -------------------------------------------------------
+
+   // Inner classes -------------------------------------------------
+
+}

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SymmetricClusterTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SymmetricClusterTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SymmetricClusterTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -42,6 +42,7 @@
    @Override
    protected void tearDown() throws Exception
    {
+      log.info("#test tearDown");
       stopServers();
 
       super.tearDown();
@@ -1827,5 +1828,12 @@
 
       stopServers(0, 1, 2, 3, 4);
    }
+   
 
+   protected boolean isFileStorage()
+   {
+      return false;
+   }
+
+
 }

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SymmetricClusterWithBackupTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SymmetricClusterWithBackupTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/SymmetricClusterWithBackupTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -149,7 +149,6 @@
       setupCluster();
 
       startServers(5, 0);
-      servers[0].getClusterManager().getTopology().setDebug(true);
       setupSessionFactory(0, isNetty());
 
       createQueue(0, "queues.testaddress", "queue0", null, false);

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/TwoWayTwoNodeClusterTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/TwoWayTwoNodeClusterTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/distribution/TwoWayTwoNodeClusterTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -36,11 +36,11 @@
       setupServers();
       setupClusters();
    }
-   
+
    protected void setupServers()
    {
       setupServer(0, isFileStorage(), isNetty());
-      setupServer(1, isFileStorage(), isNetty());      
+      setupServer(1, isFileStorage(), isNetty());
    }
 
    protected void setupClusters()
@@ -52,6 +52,7 @@
    @Override
    protected void tearDown() throws Exception
    {
+      log.info("#test tearDown");
       closeAllConsumers();
 
       closeAllSessionFactories();
@@ -94,7 +95,7 @@
 
       stopServers(0, 1);
    }
-   
+
    public void testStartPauseStartOther() throws Exception
    {
 
@@ -103,11 +104,11 @@
       setupSessionFactory(0, isNetty());
       createQueue(0, "queues", "queue0", null, false);
       addConsumer(0, 0, "queue0", null);
-      
-      // we let the discovery initial timeout expire, 
+
+      // we let the discovery initial timeout expire,
       // #0 will be alone in the cluster
       Thread.sleep(12000);
-      
+
       startServers(1);
       setupSessionFactory(1, isNetty());
       createQueue(1, "queues", "queue0", null, false);
@@ -127,6 +128,35 @@
       stopServers(0, 1);
    }
 
+   public void testRestartTest() throws Throwable
+   {
+      String name = Thread.currentThread().getName();
+      try
+      {
+         Thread.currentThread().setName("ThreadOnTestRestartTest");
+         startServers(0, 1);
+         waitForTopology(servers[0], 2);
+         waitForTopology(servers[1], 2);
+
+         for (int i = 0; i < 5; i++)
+         {
+            log.info("Sleep #test " + i);
+            log.info("#stop #test #" + i);
+            stopServers(1);
+            waitForTopology(servers[0], 1, 2000);
+            log.info("#start #test #" + i);
+            startServers(1);
+            waitForTopology(servers[0], 2, 2000);
+            waitForTopology(servers[1], 2, 2000);
+         }
+      }
+      finally
+      {
+         Thread.currentThread().setName(name);
+      }
+
+   }
+
    public void testStopStart() throws Exception
    {
       startServers(0, 1);
@@ -183,6 +213,6 @@
       verifyReceiveRoundRobin(10, 0, 1);
       verifyNotReceive(0, 1);
 
-       stopServers(0, 1);
+      stopServers(0, 1);
    }
 }

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/AsynchronousFailoverTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/AsynchronousFailoverTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/AsynchronousFailoverTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -170,65 +170,74 @@
             locator.setBlockOnNonDurableSend(true);
             locator.setBlockOnDurableSend(true);
             locator.setReconnectAttempts(-1);
-            sf = (ClientSessionFactoryInternal) createSessionFactoryAndWaitForTopology(locator, 2);
+            locator.setConfirmationWindowSize(10 * 1024 * 1024);
+            sf = (ClientSessionFactoryInternal)createSessionFactoryAndWaitForTopology(locator, 2);
+            try
+            {
 
+               ClientSession createSession = sf.createSession(true, true);
 
-            ClientSession createSession = sf.createSession(true, true);
+               createSession.createQueue(FailoverTestBase.ADDRESS, FailoverTestBase.ADDRESS, null, true);
 
-            createSession.createQueue(FailoverTestBase.ADDRESS, FailoverTestBase.ADDRESS, null, true);
+               RemotingConnection conn = ((ClientSessionInternal)createSession).getConnection();
 
-            RemotingConnection conn = ((ClientSessionInternal)createSession).getConnection();
+               Thread t = new Thread(runnable);
 
-            Thread t = new Thread(runnable);
+               t.setName("MainTEST");
 
-            t.setName("MainTEST");
+               t.start();
 
-            t.start();
+               long randomDelay = (long)(2000 * Math.random());
 
-            long randomDelay = (long)(2000 * Math.random());
+               AsynchronousFailoverTest.log.info("Sleeping " + randomDelay);
 
-            AsynchronousFailoverTest.log.info("Sleeping " + randomDelay);
+               Thread.sleep(randomDelay);
 
-            Thread.sleep(randomDelay);
+               AsynchronousFailoverTest.log.info("Failing asynchronously");
 
-            AsynchronousFailoverTest.log.info("Failing asynchronously");
+               // Simulate failure on connection
+               synchronized (lockFail)
+               {
+                  if (log.isDebugEnabled())
+                  {
+                     log.debug("#test crashing test");
+                  }
+                  crash((ClientSession)createSession);
+               }
 
-            MyListener listener = this.listener;
+               /*if (listener != null)
+               {
+                  boolean ok = listener.latch.await(10000, TimeUnit.MILLISECONDS);
 
-            // Simulate failure on connection
-            synchronized (lockFail)
-            {
-               crash((ClientSession) createSession);
-            }
+                  Assert.assertTrue(ok);
+               }*/
 
-            /*if (listener != null)
-            {
-               boolean ok = listener.latch.await(10000, TimeUnit.MILLISECONDS);
+               runnable.setFailed();
 
-               Assert.assertTrue(ok);
-            }*/
+               AsynchronousFailoverTest.log.info("Fail complete");
 
-            runnable.setFailed();
+               t.join();
 
-            AsynchronousFailoverTest.log.info("Fail complete");
+               runnable.checkForExceptions();
 
-            t.join();
+               createSession.close();
 
-            runnable.checkForExceptions();
+               if (sf.numSessions() != 0)
+               {
+                  DelegatingSession.dumpSessionCreationStacks();
+               }
 
-            createSession.close();
+               Assert.assertEquals(0, sf.numSessions());
 
-            if (sf.numSessions() != 0)
+               locator.close();
+            }
+            finally
             {
-               DelegatingSession.dumpSessionCreationStacks();
+               locator.close();
+
+               Assert.assertEquals(0, sf.numConnections());
             }
 
-            Assert.assertEquals(0, sf.numSessions());
-
-            locator.close();
-            
-            Assert.assertEquals(0, sf.numConnections());
-
             if (i != numIts - 1)
             {
                tearDown();
@@ -243,7 +252,7 @@
          DelegatingSession.debug = false;
       }
    }
-   
+
    protected void addPayload(ClientMessage msg)
    {
    }
@@ -278,7 +287,7 @@
                   message.getBodyBuffer().writeString("message" + i);
 
                   message.putIntProperty("counter", i);
-                  
+
                   addPayload(message);
 
                   producer.send(message);
@@ -288,7 +297,7 @@
                catch (HornetQException e)
                {
                   AsynchronousFailoverTest.log.info("exception when sending message with counter " + i);
-                  if(e.getCode() != HornetQException.UNBLOCKED)
+                  if (e.getCode() != HornetQException.UNBLOCKED)
                   {
                      e.printStackTrace();
                   }
@@ -366,7 +375,7 @@
       }
    }
 
-   private void doTestTransactional(final TestRunner runner) throws Exception
+   private void doTestTransactional(final TestRunner runner) throws Throwable
    {
       // For duplication detection
       int executionId = 0;
@@ -377,6 +386,8 @@
 
          executionId++;
 
+         log.info("#test doTestTransactional starting now. Execution " + executionId);
+
          try
          {
 
@@ -408,13 +419,18 @@
                      message.putStringProperty(Message.HDR_DUPLICATE_DETECTION_ID, new SimpleString("id:" + i +
                                                                                                     ",exec:" +
                                                                                                     executionId));
-                     
+
                      addPayload(message);
 
+                     if (log.isDebugEnabled())
+                     {
+                        log.debug("Sending message " + message);
+                     }
 
                      producer.send(message);
                   }
 
+                  log.debug("Sending commit");
                   session.commit();
 
                   retry = false;
@@ -423,32 +439,35 @@
                {
                   if (e.getCode() == HornetQException.DUPLICATE_ID_REJECTED)
                   {
+                     logAndSystemOut("#test duplicate id rejected on sending");
                      break;
                   }
-                  else
-                  if (e.getCode() == HornetQException.TRANSACTION_ROLLED_BACK || e.getCode() == HornetQException.UNBLOCKED)
+                  else if (e.getCode() == HornetQException.TRANSACTION_ROLLED_BACK || e.getCode() == HornetQException.UNBLOCKED)
                   {
+                     log.info("#test transaction rollback retrying on sending");
                      // OK
                      retry = true;
                   }
                   else
                   {
+                     log.info("#test Exception " + e, e);
                      throw e;
                   }
                }
             }
             while (retry);
 
-            
-            
+            logAndSystemOut("#test Finished sending, starting consumption now");
+
             boolean blocked = false;
 
             retry = false;
-            
-            ClientConsumer consumer = null; 
+            ArrayList<Integer> msgs = new ArrayList<Integer>();
+
+            ClientConsumer consumer = null;
             do
             {
-               ArrayList<Integer> msgs = new ArrayList<Integer>();
+               msgs.clear();
                try
                {
                   if (consumer == null)
@@ -459,28 +478,58 @@
 
                   for (int i = 0; i < numMessages; i++)
                   {
-                     ClientMessage message = consumer.receive(500);
+                     if (log.isDebugEnabled())
+                     {
+                        log.debug("Consumer receiving message " + i);
+                     }
+                     ClientMessage message = consumer.receive(10000);
                      if (message == null)
                      {
                         break;
                      }
 
+                     if (log.isDebugEnabled())
+                     {
+                        log.debug("Received message " + message);
+                     }
+
                      int count = message.getIntProperty("counter");
 
+                     if (count != i)
+                     {
+                        log.warn("count was received out of order, " + count + "!=" + i);
+                     }
+
                      msgs.add(count);
 
                      message.acknowledge();
                   }
 
+                  log.info("#test commit");
                   session.commit();
-                  
-                  if (blocked)
+
+                  try
                   {
-                     assertTrue("msgs.size is expected to be 0 or "  + numMessages + " but it was " + msgs.size(), msgs.size() == 0 || msgs.size() == numMessages);
+                     if (blocked)
+                     {
+                        assertTrue("msgs.size is expected to be 0 or " + numMessages + " but it was " + msgs.size(),
+                                   msgs.size() == 0 || msgs.size() == numMessages);
+                     }
+                     else
+                     {
+                        assertTrue("msgs.size is expected to be " + numMessages + " but it was " + msgs.size(),
+                                   msgs.size() == numMessages);
+                     }
                   }
-                  else
+                  catch (Throwable e)
                   {
-                     assertTrue("msgs.size is expected to be "  + numMessages  + " but it was " + msgs.size(), msgs.size() == numMessages);
+                     log.info(threadDump("Thread dump, messagesReceived = " + msgs.size()));
+                     logAndSystemOut(e.getMessage() + " messages received");
+                     for (Integer msg : msgs)
+                     {
+                        logAndSystemOut(msg.toString());
+                     }
+                     throw e;
                   }
 
                   int i = 0;
@@ -496,6 +545,7 @@
                {
                   if (e.getCode() == HornetQException.TRANSACTION_ROLLED_BACK)
                   {
+                     logAndSystemOut("Transaction rolled back with " + msgs.size(), e);
                      // TODO: https://jira.jboss.org/jira/browse/HORNETQ-369
                      // ATM RolledBack exception is being called with the transaction is committed.
                      // the test will fail if you remove this next line
@@ -503,6 +553,7 @@
                   }
                   else if (e.getCode() == HornetQException.UNBLOCKED)
                   {
+                     logAndSystemOut("Unblocked with " + msgs.size(), e);
                      // TODO: https://jira.jboss.org/jira/browse/HORNETQ-369
                      // This part of the test is never being called.
                      blocked = true;
@@ -514,6 +565,7 @@
                   }
                   else
                   {
+                     logAndSystemOut(e.getMessage(), e);
                      throw e;
                   }
                }

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailBackAutoTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailBackAutoTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailBackAutoTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -13,23 +13,29 @@
 
 package org.hornetq.tests.integration.cluster.failover;
 
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
 import junit.framework.Assert;
+
 import org.hornetq.api.core.HornetQException;
 import org.hornetq.api.core.SimpleString;
 import org.hornetq.api.core.TransportConfiguration;
-import org.hornetq.api.core.client.*;
+import org.hornetq.api.core.client.ClientConsumer;
+import org.hornetq.api.core.client.ClientMessage;
+import org.hornetq.api.core.client.ClientProducer;
+import org.hornetq.api.core.client.ClientSession;
+import org.hornetq.api.core.client.ClientSessionFactory;
+import org.hornetq.api.core.client.SessionFailureListener;
 import org.hornetq.core.client.impl.ClientSessionFactoryInternal;
 import org.hornetq.core.client.impl.ServerLocatorInternal;
 import org.hornetq.core.config.ClusterConnectionConfiguration;
+import org.hornetq.core.logging.Logger;
 import org.hornetq.core.server.impl.InVMNodeManager;
 import org.hornetq.jms.client.HornetQTextMessage;
-import org.hornetq.utils.ReusableLatch;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
 /**
  * @author <a href="mailto:andy.taylor at jboss.com">Andy Taylor</a>
  *         Date: Dec 21, 2010
@@ -37,6 +43,7 @@
  */
 public class FailBackAutoTest extends FailoverTestBase
 {
+   Logger log = Logger.getLogger(FailBackAutoTest.class);
    private ServerLocatorInternal locator;
 
    @Override
@@ -62,7 +69,7 @@
       }
       super.tearDown();
    }
-
+ 
    public void testAutoFailback() throws Exception
    {
       locator.setBlockOnNonDurableSend(true);
@@ -78,12 +85,8 @@
 
       session.addFailureListener(listener);
 
-      backupServer.stop();
-
       liveServer.crash();
 
-      backupServer.start();
-
       assertTrue(latch.await(5, TimeUnit.SECONDS));
 
       ClientProducer producer = session.createProducer(FailoverTestBase.ADDRESS);
@@ -102,6 +105,7 @@
 
       session.addFailureListener(listener);
 
+      log.info("******* starting live server back");
       liveServer.start();
 
       assertTrue(latch2.await(5, TimeUnit.SECONDS));
@@ -136,12 +140,8 @@
 
       session.addFailureListener(listener);
 
-      backupServer.stop();
-
       liveServer.crash();
 
-      backupServer.start();
-
       assertTrue(latch.await(5, TimeUnit.SECONDS));
 
       ClientProducer producer = session.createProducer(FailoverTestBase.ADDRESS);
@@ -160,8 +160,9 @@
 
       session.addFailureListener(listener);
 
+      log.info("restarting live node now");
       liveServer.start();
-
+      
       assertTrue(latch2.await(5, TimeUnit.SECONDS));
 
       message = session.createMessage(true);
@@ -178,7 +179,7 @@
 
       session.addFailureListener(listener);
 
-      waitForBackup(sf, 5);
+      waitForBackup(sf, 10);
 
       liveServer.crash();
 
@@ -204,6 +205,7 @@
       backupConfig.setSharedStore(true);
       backupConfig.setBackup(true);
       backupConfig.setClustered(true);
+      backupConfig.setFailbackDelay(1000);
       TransportConfiguration liveConnector = getConnectorTransportConfiguration(true);
       TransportConfiguration backupConnector = getConnectorTransportConfiguration(false);
       backupConfig.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
@@ -220,6 +222,7 @@
       liveConfig.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(true));
       liveConfig.setSecurityEnabled(false);
       liveConfig.setSharedStore(true);
+      liveConfig.setFailbackDelay(1000);
       liveConfig.setClustered(true);
       List<String> pairs = new ArrayList<String>();
       pairs.add(backupConnector.getName());

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverOnFlowControlTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverOnFlowControlTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverOnFlowControlTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -25,8 +25,10 @@
 import org.hornetq.api.core.client.ServerLocator;
 import org.hornetq.core.client.impl.ClientSessionFactoryInternal;
 import org.hornetq.core.client.impl.ServerLocatorInternal;
+import org.hornetq.core.logging.Logger;
 import org.hornetq.core.protocol.core.Packet;
 import org.hornetq.core.protocol.core.impl.wireformat.SessionProducerCreditsMessage;
+import org.hornetq.core.remoting.impl.invm.InVMConnection;
 import org.hornetq.spi.core.protocol.RemotingConnection;
 
 /**
@@ -39,6 +41,8 @@
 public class FailoverOnFlowControlTest extends FailoverTestBase
 {
 
+   
+   private static Logger log = Logger.getLogger(FailoverOnFlowControlTest.class);
 
    // Constants -----------------------------------------------------
 
@@ -58,24 +62,35 @@
       locator.setBlockOnDurableSend(true);
       locator.setReconnectAttempts(-1);
       locator.setProducerWindowSize(1000);
+      locator.setRetryInterval(123);
       final ArrayList<ClientSession> sessionList = new ArrayList<ClientSession>();
       Interceptor interceptorClient = new Interceptor()
       {
          AtomicInteger count = new AtomicInteger(0);
          public boolean intercept(Packet packet, RemotingConnection connection) throws HornetQException
          {
-            System.out.println("Intercept..." + packet.getClass().getName());
+            log.debug("Intercept..." + packet.getClass().getName());
             
             if (packet instanceof SessionProducerCreditsMessage )
             {
                SessionProducerCreditsMessage credit = (SessionProducerCreditsMessage)packet;
                
-               System.out.println("Credits: " + credit.getCredits());
+               log.debug("Credits: " + credit.getCredits());
                if (count.incrementAndGet() == 2)
                {
                   try
                   {
-                     crash(sessionList.get(0));
+                     log.debug("### crashing server");
+                      
+                     InVMConnection.flushEnabled = false;
+                     try
+                     {
+                        crash(false, sessionList.get(0));
+                     }
+                     finally
+                     {
+                        InVMConnection.flushEnabled = true;
+                     }
                   }
                   catch (Exception e)
                   {

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -97,23 +97,28 @@
       super.setUp();
       clearData();
       createConfigs();
+      
+      
+      
+      liveServer.setIdentity(this.getClass().getSimpleName() + "/liveServer");
 
       liveServer.start();
 
       if (backupServer != null)
       {
+         backupServer.setIdentity(this.getClass().getSimpleName() + "/backupServer");
          backupServer.start();
       }
    }
 
    protected TestableServer createLiveServer()
    {
-      return new SameProcessHornetQServer(createInVMFailoverServer(true, liveConfig, nodeManager));
+      return new SameProcessHornetQServer(createInVMFailoverServer(true, liveConfig, nodeManager, 1));
    }
 
    protected TestableServer createBackupServer()
    {
-      return new SameProcessHornetQServer(createInVMFailoverServer(true, backupConfig, nodeManager));
+      return new SameProcessHornetQServer(createInVMFailoverServer(true, backupConfig, nodeManager, 2));
    }
 
    /**
@@ -187,6 +192,7 @@
    @Override
    protected void tearDown() throws Exception
    {
+      logAndSystemOut("#test tearDown");
       backupServer.stop();
 
       liveServer.stop();
@@ -209,8 +215,7 @@
       }
       catch (IOException e)
       {
-         e.printStackTrace();
-         System.exit(9);
+         throw e; 
       }
       try
       {
@@ -219,8 +224,7 @@
       }
       catch (IOException e)
       {
-         e.printStackTrace();
-         System.exit(9);
+         throw e;
       }
    }
 
@@ -400,6 +404,11 @@
       liveServer.crash(sessions);
    }
 
+   protected void crash(final boolean waitFailure, final ClientSession... sessions) throws Exception
+   {
+      liveServer.crash(waitFailure, sessions);
+   }
+
    // Private -------------------------------------------------------
 
    // Inner classes -------------------------------------------------

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/GroupingFailoverSharedServerTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/GroupingFailoverSharedServerTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/GroupingFailoverSharedServerTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -12,16 +12,7 @@
  */
 package org.hornetq.tests.integration.cluster.failover;
 
-import java.util.Map;
 
-import org.hornetq.api.core.TransportConfiguration;
-import org.hornetq.core.config.Configuration;
-import org.hornetq.core.config.impl.ConfigurationImpl;
-import org.hornetq.core.server.HornetQServer;
-import org.hornetq.core.server.HornetQServers;
-import org.hornetq.core.server.impl.InVMNodeManager;
-import org.hornetq.tests.util.ServiceTestBase;
-
 /**
  * @author <a href="mailto:andy.taylor at jboss.org">Andy Taylor</a>
  *         Created Oct 26, 2009
@@ -34,4 +25,4 @@
    {
       return true;
    }
-}
+}  

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/GroupingFailoverTestBase.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/GroupingFailoverTestBase.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/GroupingFailoverTestBase.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -12,7 +12,6 @@
  */
 package org.hornetq.tests.integration.cluster.failover;
 
-import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
@@ -21,11 +20,6 @@
 import org.hornetq.api.core.HornetQException;
 import org.hornetq.api.core.Message;
 import org.hornetq.api.core.SimpleString;
-import org.hornetq.core.client.impl.Topology;
-import org.hornetq.core.remoting.FailureListener;
-import org.hornetq.core.server.HornetQServer;
-import org.hornetq.core.server.cluster.MessageFlowRecord;
-import org.hornetq.core.server.cluster.impl.ClusterConnectionImpl;
 import org.hornetq.core.server.group.impl.GroupingHandlerConfiguration;
 import org.hornetq.spi.core.protocol.RemotingConnection;
 import org.hornetq.tests.integration.cluster.distribution.ClusterTestBase;
@@ -39,7 +33,7 @@
 
    public void testGroupingLocalHandlerFails() throws Exception
    {
-     setupBackupServer(2, 0, isFileStorage(), isSharedServer(), isNetty());
+      setupBackupServer(2, 0, isFileStorage(), isSharedServer(), isNetty());
 
       setupLiveServer(0, isFileStorage(), isSharedServer(), isNetty());
 
@@ -57,7 +51,6 @@
 
       setUpGroupHandler(GroupingHandlerConfiguration.TYPE.LOCAL, 2);
 
-
       try
       {
          startServers(2, 0, 1);
@@ -79,7 +72,7 @@
          waitForBindings(0, "queues.testaddress", 1, 1, true);
          waitForBindings(1, "queues.testaddress", 1, 1, true);
 
-         waitForServerTopology(servers[1], 3, 5);
+         waitForTopology(servers[1], 2);
 
          sendWithProperty(0, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id1"));
 
@@ -88,7 +81,7 @@
          closeSessionFactory(0);
 
          servers[0].stop(true);
-         
+
          waitForServerRestart(2);
 
          setupSessionFactory(2, isNetty());
@@ -129,14 +122,12 @@
 
       setupClusterConnection("cluster0", "queues", false, 1, isNetty(), 2, 1);
 
-
       setUpGroupHandler(GroupingHandlerConfiguration.TYPE.LOCAL, 0);
 
       setUpGroupHandler(GroupingHandlerConfiguration.TYPE.REMOTE, 1);
 
       setUpGroupHandler(GroupingHandlerConfiguration.TYPE.LOCAL, 2);
 
-
       try
       {
          startServers(2, 0, 1);
@@ -162,9 +153,8 @@
          waitForBindings(0, "queues.testaddress", 1, 1, true);
          waitForBindings(1, "queues.testaddress", 1, 1, true);
 
-         waitForServerTopology(servers[1], 3, 5);
+         waitForTopology(servers[1], 2);
 
-
          sendWithProperty(0, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id1"));
          sendWithProperty(0, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id2"));
          sendWithProperty(0, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id3"));
@@ -215,21 +205,6 @@
       }
    }
 
-   private void waitForServerTopology(HornetQServer server, int nodes, int seconds)
-         throws InterruptedException
-   {
-      Topology topology = server.getClusterManager().getTopology();
-      long timeToWait = System.currentTimeMillis() + (seconds * 1000);
-      while(topology.nodes()!= nodes)
-      {
-         Thread.sleep(100);
-         if(System.currentTimeMillis() > timeToWait)
-         {
-            fail("timed out waiting for server topology");
-         }
-      }
-   }
-
    public boolean isNetty()
    {
       return true;

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleLivesMultipleBackupsFailoverTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleLivesMultipleBackupsFailoverTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleLivesMultipleBackupsFailoverTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -25,6 +25,7 @@
 import org.hornetq.core.client.impl.ServerLocatorInternal;
 import org.hornetq.core.config.ClusterConnectionConfiguration;
 import org.hornetq.core.config.Configuration;
+import org.hornetq.core.logging.Logger;
 import org.hornetq.core.server.NodeManager;
 import org.hornetq.core.server.impl.InVMNodeManager;
 import org.hornetq.tests.integration.cluster.util.SameProcessHornetQServer;
@@ -55,7 +56,7 @@
       }
       super.tearDown();
    }
-
+   
    public void testMultipleFailovers2LiveServers() throws Exception
    {
       NodeManager nodeManager1 = new InVMNodeManager();
@@ -157,7 +158,7 @@
       config1.setPagingDirectory(config1.getPagingDirectory() + "_" + liveNode);
       config1.setLargeMessagesDirectory(config1.getLargeMessagesDirectory() + "_" + liveNode);
 
-      servers.put(nodeid, new SameProcessHornetQServer(createInVMFailoverServer(true, config1, nodeManager)));
+      servers.put(nodeid, new SameProcessHornetQServer(createInVMFailoverServer(true, config1, nodeManager, liveNode)));
    }
 
    protected void createLiveConfig(NodeManager nodeManager, int liveNode, int ... otherLiveNodes)
@@ -187,7 +188,7 @@
       config0.setPagingDirectory(config0.getPagingDirectory() + "_" + liveNode);
       config0.setLargeMessagesDirectory(config0.getLargeMessagesDirectory() + "_" + liveNode);
 
-      servers.put(liveNode, new SameProcessHornetQServer(createInVMFailoverServer(true, config0, nodeManager)));
+      servers.put(liveNode, new SameProcessHornetQServer(createInVMFailoverServer(true, config0, nodeManager, liveNode)));
    }
 
    protected boolean isNetty()

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousReattachTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousReattachTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousReattachTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -16,6 +16,7 @@
 import org.hornetq.api.core.HornetQException;
 import org.hornetq.api.core.client.ClientSession;
 import org.hornetq.core.client.impl.ClientSessionInternal;
+import org.hornetq.core.logging.Logger;
 
 /**
  * A NettyAsynchronousReattachTest
@@ -40,12 +41,14 @@
    // Package protected ---------------------------------------------
 
    // Protected -----------------------------------------------------
-   
 
+   private final Logger log = Logger.getLogger(NettyAsynchronousReattachTest.class);
+
    protected void crash(final ClientSession... sessions) throws Exception
    {
       for (ClientSession session : sessions)
       {
+         log.debug("Crashing session " + session);
          ClientSessionInternal internalSession = (ClientSessionInternal) session;
          internalSession.getConnection().fail(new HornetQException(HornetQException.NOT_CONNECTED, "oops"));
       }

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/PagingFailoverTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/PagingFailoverTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/PagingFailoverTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -243,7 +243,8 @@
                                       PagingFailoverTest.PAGE_SIZE,
                                       PagingFailoverTest.PAGE_MAX,
                                       new HashMap<String, AddressSettings>(),
-                                      nodeManager);
+                                      nodeManager,
+                                      2);
    }
 
    @Override

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/SingleLiveMultipleBackupsFailoverTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/SingleLiveMultipleBackupsFailoverTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/failover/SingleLiveMultipleBackupsFailoverTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -125,7 +125,7 @@
       config1.setPagingDirectory(config1.getPagingDirectory() + "_" + liveNode);
       config1.setLargeMessagesDirectory(config1.getLargeMessagesDirectory() + "_" + liveNode);
 
-      servers.put(nodeid, new SameProcessHornetQServer(createInVMFailoverServer(true, config1, nodeManager)));
+      servers.put(nodeid, new SameProcessHornetQServer(createInVMFailoverServer(true, config1, nodeManager, nodeid)));
    }
 
    protected void createLiveConfig(int liveNode, int ... otherLiveNodes)
@@ -155,7 +155,7 @@
       config0.setPagingDirectory(config0.getPagingDirectory() + "_" + liveNode);
       config0.setLargeMessagesDirectory(config0.getLargeMessagesDirectory() + "_" + liveNode);
 
-      servers.put(liveNode, new SameProcessHornetQServer(createInVMFailoverServer(true, config0, nodeManager)));
+      servers.put(liveNode, new SameProcessHornetQServer(createInVMFailoverServer(true, config0, nodeManager, liveNode)));
    }
 
    protected boolean isNetty()

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/reattach/OrderReattachTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/reattach/OrderReattachTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/reattach/OrderReattachTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -41,15 +41,6 @@
  */
 public class OrderReattachTest extends ServiceTestBase
 {
-
-   // Disabled for now... under investigation (Clebert)
-   public static TestSuite suite()
-   {
-      TestSuite suite = new TestSuite();
-
-      return suite;
-   }
-
    // Constants -----------------------------------------------------
 
    final SimpleString ADDRESS = new SimpleString("address");
@@ -67,13 +58,7 @@
 
    public void testOrderOnSendInVM() throws Throwable
    {
-      for (int i = 0; i < 500; i++)
-      {
-         log.info("#" + getName() + " # " + i);
-         doTestOrderOnSend(false);
-         tearDown();
-         setUp();
-      }
+      doTestOrderOnSend(false);
    }
 
    public void doTestOrderOnSend(final boolean isNetty) throws Throwable
@@ -83,7 +68,7 @@
       server.start();
       ServerLocator locator = createFactory(isNetty);
       locator.setReconnectAttempts(-1);
-      locator.setConfirmationWindowSize(100 * 1024 * 1024);
+      locator.setConfirmationWindowSize(1024 * 1024);
       locator.setBlockOnNonDurableSend(false);
       locator.setBlockOnAcknowledge(false);
       ClientSessionFactory sf = locator.createSessionFactory();
@@ -192,7 +177,7 @@
 
       final int numMessages = 500;
 
-      final int numSessions = 100;
+      final int numSessions = 10;
 
       Set<ClientConsumer> consumers = new HashSet<ClientConsumer>();
       Set<ClientSession> sessions = new HashSet<ClientSession>();

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/restart/ClusterRestartTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/restart/ClusterRestartTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/restart/ClusterRestartTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -15,6 +15,7 @@
 import java.util.Collection;
 
 import org.hornetq.api.core.SimpleString;
+import org.hornetq.core.logging.Logger;
 import org.hornetq.core.postoffice.Binding;
 import org.hornetq.tests.integration.cluster.distribution.ClusterTestBase;
 
@@ -24,6 +25,8 @@
  */
 public class ClusterRestartTest extends ClusterTestBase
 {
+   Logger log = Logger.getLogger(ClusterRestartTest.class);
+     
    public void testRestartWithQueuesCreateInDiffOrder() throws Exception
    {
       setupServer(0, isFileStorage(), isNetty());
@@ -71,12 +74,11 @@
 
          sendInRange(1, "queues.testaddress", 0, 10, true, null);
 
-         System.out.println("stopping******************************************************");
+         log.info("stopping******************************************************");
          stopServers(0);
+         // Waiting some time after stopped
          Thread.sleep(2000);
-         System.out.println("stopped******************************************************");
          startServers(0);
-
          
          waitForBindings(0, "queues.testaddress", 1, 1, true);
          waitForBindings(1, "queues.testaddress", 1, 0, true);

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/topology/TopologyClusterTestBase.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/topology/TopologyClusterTestBase.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/topology/TopologyClusterTestBase.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -26,6 +26,7 @@
 import org.hornetq.api.core.client.ClientSessionFactory;
 import org.hornetq.api.core.client.ClusterTopologyListener;
 import org.hornetq.api.core.client.ServerLocator;
+import org.hornetq.core.client.impl.ServerLocatorImpl;
 import org.hornetq.core.logging.Logger;
 import org.hornetq.core.server.HornetQServer;
 import org.hornetq.core.server.cluster.ClusterConnection;
@@ -194,6 +195,8 @@
       startServers(0);
 
       ServerLocator locator = createHAServerLocator();
+      
+      ((ServerLocatorImpl)locator).getTopology().setOwner("testReceive");
 
       final List<String> nodes = new ArrayList<String>();
       final CountDownLatch upLatch = new CountDownLatch(5);
@@ -207,18 +210,32 @@
          {
             if(!nodes.contains(nodeID))
             {
+               System.out.println("Node UP " + nodeID + " added");
+               log.info("Node UP " + nodeID + " added");
                nodes.add(nodeID);
                upLatch.countDown();
             }
+            else
+            {
+               System.out.println("Node UP " + nodeID + " was already here");
+               log.info("Node UP " + nodeID + " was already here");
+            }
          }
 
          public void nodeDown(String nodeID)
          {
             if (nodes.contains(nodeID))
             {
+               log.info("Node down " + nodeID + " accepted");
+               System.out.println("Node down " + nodeID + " accepted");
                nodes.remove(nodeID);
                downLatch.countDown();
             }
+            else
+            {
+               log.info("Node down " + nodeID + " already removed");
+               System.out.println("Node down " + nodeID + " already removed");
+            }
          }
       });
 

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServer.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServer.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServer.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -116,6 +116,11 @@
 
    public void crash(ClientSession... sessions) throws Exception
    {
+      crash(true, sessions);
+   }
+   
+   public void crash(final boolean waitFailure, ClientSession... sessions) throws Exception
+   {
       final CountDownLatch latch = new CountDownLatch(sessions.length);
 
       class MyListener implements SessionFailureListener
@@ -140,10 +145,13 @@
          serverProcess = null;
       }
       
-      // Wait to be informed of failure
-      boolean ok = latch.await(10000, TimeUnit.MILLISECONDS);
-
-      Assert.assertTrue(ok);
+      if (waitFailure)
+      {
+         // Wait to be informed of failure
+         boolean ok = latch.await(10000, TimeUnit.MILLISECONDS);
+   
+         Assert.assertTrue(ok);
+      }
    }
 
 
@@ -178,4 +186,11 @@
    {
       return null;
    }
+
+   /* (non-Javadoc)
+    * @see org.hornetq.tests.integration.cluster.util.TestableServer#setIdentity(java.lang.String)
+    */
+   public void setIdentity(String identity)
+   {
+   }
 }

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/SameProcessHornetQServer.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/SameProcessHornetQServer.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/SameProcessHornetQServer.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -22,6 +22,7 @@
 import org.hornetq.api.core.Interceptor;
 import org.hornetq.api.core.client.ClientSession;
 import org.hornetq.api.core.client.SessionFailureListener;
+import org.hornetq.core.logging.Logger;
 import org.hornetq.core.server.HornetQServer;
 import org.hornetq.core.server.cluster.impl.ClusterManagerImpl;
 
@@ -34,6 +35,7 @@
  */
 public class SameProcessHornetQServer implements TestableServer
 {
+   private static Logger log = Logger.getLogger(SameProcessHornetQServer.class);
    
    private HornetQServer server;
 
@@ -52,6 +54,11 @@
       //To change body of implemented methods use File | Settings | File Templates.
    }
 
+   public void setIdentity(String identity)
+   {
+      server.setIdentity(identity);
+   }
+   
    public boolean isStarted()
    {
       return server.isStarted();
@@ -79,18 +86,24 @@
 
    public void crash(ClientSession... sessions) throws Exception
    {
+      crash(true, sessions);
+   }
+
+   public void crash(boolean waitFailure, ClientSession... sessions) throws Exception
+   {
       final CountDownLatch latch = new CountDownLatch(sessions.length);
 
       class MyListener implements SessionFailureListener
       {
          public void connectionFailed(final HornetQException me, boolean failedOver)
          {
+            log.debug("MyListener.connectionFailed failedOver=" + failedOver, me);
             latch.countDown();
          }
 
          public void beforeReconnect(HornetQException exception)
          {
-            System.out.println("MyListener.beforeReconnect");
+            log.debug("MyListener.beforeReconnect", exception);
          }
       }
       for (ClientSession session : sessions)
@@ -102,11 +115,12 @@
       clusterManager.clear();
       server.stop(true);
 
-
-      // Wait to be informed of failure
-      boolean ok = latch.await(10000, TimeUnit.MILLISECONDS);
-
-      Assert.assertTrue(ok);
+      if (waitFailure)
+      {
+         // Wait to be informed of failure
+         boolean ok = latch.await(10000, TimeUnit.MILLISECONDS);
+         Assert.assertTrue(ok);
+      }
    }
 
    /* (non-Javadoc)

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/TestableServer.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/TestableServer.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/cluster/util/TestableServer.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -28,12 +28,16 @@
 {
 
    HornetQServer getServer();
+   
+   public void setIdentity(String identity);
 
    public void start() throws Exception;
 
    public void stop() throws Exception;
 
    public void crash(ClientSession... sessions) throws Exception;
+   
+   public void crash(boolean waitFailure, ClientSession... sessions) throws Exception;
 
    public boolean isInitialised();
 

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/http/CoreClientOverHttpTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/http/CoreClientOverHttpTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/http/CoreClientOverHttpTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -19,9 +19,14 @@
 
 import org.hornetq.api.core.SimpleString;
 import org.hornetq.api.core.TransportConfiguration;
-import org.hornetq.api.core.client.*;
+import org.hornetq.api.core.client.ClientConsumer;
+import org.hornetq.api.core.client.ClientMessage;
+import org.hornetq.api.core.client.ClientProducer;
+import org.hornetq.api.core.client.ClientSession;
+import org.hornetq.api.core.client.ClientSessionFactory;
+import org.hornetq.api.core.client.HornetQClient;
+import org.hornetq.api.core.client.ServerLocator;
 import org.hornetq.core.config.Configuration;
-import org.hornetq.core.config.impl.ConfigurationImpl;
 import org.hornetq.core.remoting.impl.netty.NettyAcceptorFactory;
 import org.hornetq.core.remoting.impl.netty.NettyConnectorFactory;
 import org.hornetq.core.remoting.impl.netty.TransportConstants;

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/bridge/BridgeTestBase.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/bridge/BridgeTestBase.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/bridge/BridgeTestBase.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -33,6 +33,8 @@
 
 import junit.framework.Assert;
 
+import com.arjuna.ats.arjuna.coordinator.TransactionReaper;
+import com.arjuna.ats.arjuna.coordinator.TxControl;
 import com.arjuna.ats.internal.jta.transaction.arjunacore.TransactionManagerImple;
 
 import org.hornetq.api.core.TransportConfiguration;
@@ -42,7 +44,6 @@
 import org.hornetq.api.jms.management.JMSQueueControl;
 import org.hornetq.api.jms.management.TopicControl;
 import org.hornetq.core.config.Configuration;
-import org.hornetq.core.config.impl.ConfigurationImpl;
 import org.hornetq.core.logging.Logger;
 import org.hornetq.core.remoting.impl.invm.InVMConnectorFactory;
 import org.hornetq.core.remoting.impl.invm.TransportConstants;
@@ -209,6 +210,11 @@
       context0 = null;
 
       context1 = null;
+      
+      // Shutting down Arjuna threads
+      TxControl.disable(true);
+      
+      TransactionReaper.terminate(false);
 
       super.tearDown();
    }

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/AutoGroupingTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/AutoGroupingTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/AutoGroupingTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -19,7 +19,6 @@
 import org.hornetq.api.core.TransportConfiguration;
 import org.hornetq.api.jms.HornetQJMSClient;
 import org.hornetq.api.jms.JMSFactoryType;
-import org.hornetq.core.remoting.impl.netty.NettyConnectorFactory;
 import org.hornetq.jms.client.HornetQJMSConnectionFactory;
 
 /**
@@ -35,7 +34,7 @@
    @Override
    protected ConnectionFactory getCF() throws Exception
    {
-      HornetQJMSConnectionFactory cf = (HornetQJMSConnectionFactory)HornetQJMSClient.createConnectionFactoryWithoutHA(JMSFactoryType.CF, new TransportConfiguration(NettyConnectorFactory.class.getName()));
+      HornetQJMSConnectionFactory cf = (HornetQJMSConnectionFactory)HornetQJMSClient.createConnectionFactoryWithoutHA(JMSFactoryType.CF, new TransportConfiguration(INVM_CONNECTOR_FACTORY));
       
       cf.setAutoGroup(true);
       

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/GroupIDTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/GroupIDTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/GroupIDTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -19,7 +19,6 @@
 import org.hornetq.api.core.TransportConfiguration;
 import org.hornetq.api.jms.HornetQJMSClient;
 import org.hornetq.api.jms.JMSFactoryType;
-import org.hornetq.core.remoting.impl.netty.NettyConnectorFactory;
 import org.hornetq.jms.client.HornetQJMSConnectionFactory;
 
 /**
@@ -35,7 +34,7 @@
    @Override
    protected ConnectionFactory getCF() throws Exception
    {
-      HornetQJMSConnectionFactory cf = (HornetQJMSConnectionFactory)HornetQJMSClient.createConnectionFactoryWithoutHA(JMSFactoryType.CF, new TransportConfiguration(NettyConnectorFactory.class.getName()));
+      HornetQJMSConnectionFactory cf = (HornetQJMSConnectionFactory)HornetQJMSClient.createConnectionFactoryWithoutHA(JMSFactoryType.CF, new TransportConfiguration(INVM_CONNECTOR_FACTORY));
       
       cf.setGroupID("wibble");
       

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/SessionClosedOnRemotingConnectionFailureTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/SessionClosedOnRemotingConnectionFailureTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/jms/client/SessionClosedOnRemotingConnectionFailureTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -27,13 +27,11 @@
 import junit.framework.Assert;
 
 import org.hornetq.api.core.HornetQException;
-import org.hornetq.api.core.Pair;
 import org.hornetq.api.core.TransportConfiguration;
 import org.hornetq.api.core.client.HornetQClient;
 import org.hornetq.api.jms.JMSFactoryType;
 import org.hornetq.core.client.impl.ClientSessionInternal;
 import org.hornetq.core.logging.Logger;
-import org.hornetq.core.remoting.impl.netty.NettyConnectorFactory;
 import org.hornetq.jms.client.HornetQSession;
 import org.hornetq.spi.core.protocol.RemotingConnection;
 import org.hornetq.tests.util.JMSTestBase;
@@ -63,7 +61,7 @@
    public void testSessionClosedOnRemotingConnectionFailure() throws Exception
    {
       List<TransportConfiguration> connectorConfigs = new ArrayList<TransportConfiguration>();
-      connectorConfigs.add(new TransportConfiguration(NettyConnectorFactory.class.getName()));
+      connectorConfigs.add(new TransportConfiguration(INVM_CONNECTOR_FACTORY));
 
 
       jmsServer.createConnectionFactory("cffoo",

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/management/QueueControlTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/management/QueueControlTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/management/QueueControlTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -13,6 +13,7 @@
 
 package org.hornetq.tests.integration.management;
 
+import java.util.LinkedList;
 import java.util.Map;
 
 import junit.framework.Assert;
@@ -888,6 +889,55 @@
       session.deleteQueue(queue);
    }
 
+   public void testRemoveMessage2() throws Exception
+   {
+      SimpleString address = RandomUtil.randomSimpleString();
+      SimpleString queue = RandomUtil.randomSimpleString();
+
+      session.createQueue(address, queue, null, false);
+      ClientProducer producer = session.createProducer(address);
+
+      // send messages on queue
+      
+      for (int i = 0 ; i < 100; i++)
+      {
+         
+         ClientMessage msg = session.createMessage(false);
+         msg.putIntProperty("count", i);
+         producer.send(msg);
+      }
+      
+      ClientConsumer cons = session.createConsumer(queue);
+      session.start();
+      LinkedList<ClientMessage> msgs = new LinkedList<ClientMessage>();
+      for (int i = 0; i < 50; i++)
+      {
+         ClientMessage msg = cons.receive(1000);
+         msgs.add(msg);
+      }
+
+      QueueControl queueControl = createManagementControl(address, queue);
+      Assert.assertEquals(100, queueControl.getMessageCount());
+
+      // the message IDs are set on the server
+      Map<String, Object>[] messages = queueControl.listMessages(null);
+      Assert.assertEquals(50, messages.length);
+      assertEquals(50, ((Number)messages[0].get("count")).intValue());
+      long messageID = (Long)messages[0].get("messageID");
+
+      // delete 1st message
+      boolean deleted = queueControl.removeMessage(messageID);
+      Assert.assertTrue(deleted);
+      Assert.assertEquals(99, queueControl.getMessageCount());
+      
+      cons.close();
+
+      // check there is a single message to consume from queue
+      ManagementTestBase.consumeMessages(99, session, queue);
+
+      session.deleteQueue(queue);
+   }
+
    public void testCountMessagesWithFilter() throws Exception
    {
       SimpleString key = new SimpleString("key");
@@ -1477,6 +1527,7 @@
       locator = HornetQClient.createServerLocatorWithoutHA(new TransportConfiguration(UnitTestCase.INVM_CONNECTOR_FACTORY));
       locator.setBlockOnNonDurableSend(true);
       locator.setBlockOnNonDurableSend(true);
+      locator.setConsumerWindowSize(0);
       ClientSessionFactory sf = locator.createSessionFactory();
       session = sf.createSession(false, true, false);
       session.start();

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/persistence/JMSDynamicConfigTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/persistence/JMSDynamicConfigTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/persistence/JMSDynamicConfigTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -52,7 +52,7 @@
    {
       ArrayList<String> connectors = new ArrayList<String>();
 
-      connectors.add("netty");
+      connectors.add("invm");
 
       ConnectionFactoryConfiguration cfg = new ConnectionFactoryConfigurationImpl("tst", false, connectors, "tt");
       jmsServer.createConnectionFactory(true, cfg, "tst");

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/spring/MessageSender.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/spring/MessageSender.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/spring/MessageSender.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -47,6 +47,7 @@
          MessageProducer producer = session.createProducer(destination);
          TextMessage message = session.createTextMessage(msg);
          producer.send(message);
+         conn.close();
       }
       catch (Exception ex)
       {

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/spring/SpringIntegrationTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/spring/SpringIntegrationTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/spring/SpringIntegrationTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -2,10 +2,12 @@
 
 import junit.framework.Assert;
 
+import org.hornetq.jms.client.HornetQConnectionFactory;
 import org.hornetq.jms.server.embedded.EmbeddedJMS;
 import org.hornetq.tests.util.UnitTestCase;
 import org.springframework.context.ApplicationContext;
 import org.springframework.context.support.ClassPathXmlApplicationContext;
+import org.springframework.jms.listener.DefaultMessageListenerContainer;
 
 /**
  * @author <a href="mailto:bill at burkecentral.com">Bill Burke</a>
@@ -24,11 +26,21 @@
          sender.send("Hello world");
          Thread.sleep(100);
          Assert.assertEquals(ExampleListener.lastMessage, "Hello world");
+         ((HornetQConnectionFactory)sender.getConnectionFactory()).close();
       }
       finally
       {
          try
          {
+            DefaultMessageListenerContainer container = (DefaultMessageListenerContainer)context.getBean("listenerContainer");
+            container.stop();
+         }
+         catch (Throwable ignored)
+         {
+            ignored.printStackTrace();
+         }
+         try
+         {
             EmbeddedJMS jms = (EmbeddedJMS)context.getBean("EmbeddedJms");
             jms.stop();
          }

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/stomp/StompTestBase.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/stomp/StompTestBase.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/stomp/StompTestBase.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -87,6 +87,8 @@
    protected void setUp() throws Exception
    {
       super.setUp();
+      
+      forceGC();
 
       server = createServer();
       server.start();

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/twitter/TwitterTest.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/twitter/TwitterTest.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/integration/twitter/TwitterTest.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -18,6 +18,7 @@
 import java.util.Set;
 
 import junit.framework.Assert;
+import junit.framework.TestSuite;
 
 import org.hornetq.api.core.TransportConfiguration;
 import org.hornetq.api.core.client.ClientConsumer;
@@ -63,18 +64,35 @@
    private static final String TWITTER_ACCESS_TOKEN = System.getProperty("twitter.accessToken");
    private static final String TWITTER_ACCESS_TOKEN_SECRET = System.getProperty("twitter.accessTokenSecret");
 
-   @Override
-   protected void setUp() throws Exception
+   // incoming
+   
+   public void setUp() throws Exception
    {
-      if(TWITTER_CONSUMER_KEY == null || TWITTER_CONSUMER_SECRET == null || TWITTER_ACCESS_TOKEN == null || TWITTER_ACCESS_TOKEN_SECRET == null)
+      super.setUp();
+   }
+   
+   
+
+   public static TestSuite suite()
+   {
+      TestSuite suite = new TestSuite(TwitterTest.class.getName() + " testsuite");
+
+      if (TWITTER_CONSUMER_KEY != null && !TWITTER_CONSUMER_KEY.equals("null"))
       {
-         throw new Exception("* * *  Please set twitter.consumerKey, twitter.consumerSecret, twitter.accessToken and twitter.accessTokenSecuret in system property  * * *");
+         suite.addTestSuite(TwitterTest.class);
       }
-      super.setUp();
+      else
+      {
+         // System.out goes towards JUnit report
+         String errorMsg = "Test " + TwitterTest.class.getName() +
+                           " ignored as twitter.consumerKey, twitter.consumerSecret, twitter.accessToken and twitter.accessTokenSecuret is not set in system property  * * *";
+         System.out.println(errorMsg);
+         log.warn(errorMsg);
+      }
+
+      return suite;
    }
 
-   // incoming
-   
    public void testSimpleIncoming() throws Exception
    {
       internalTestIncoming(true,false);

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/JMSTestBase.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/JMSTestBase.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/JMSTestBase.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -27,8 +27,6 @@
 import org.hornetq.api.core.client.HornetQClient;
 import org.hornetq.api.jms.JMSFactoryType;
 import org.hornetq.core.config.Configuration;
-import org.hornetq.core.remoting.impl.netty.NettyAcceptorFactory;
-import org.hornetq.core.remoting.impl.netty.NettyConnectorFactory;
 import org.hornetq.core.server.HornetQServer;
 import org.hornetq.core.server.HornetQServers;
 import org.hornetq.jms.server.impl.JMSServerManagerImpl;
@@ -124,8 +122,8 @@
 
       Configuration conf = createDefaultConfig(false);
 
-      conf.getAcceptorConfigurations().add(new TransportConfiguration(NettyAcceptorFactory.class.getName()));
-      conf.getConnectorConfigurations().put("netty", new TransportConfiguration(NettyConnectorFactory.class.getName()));
+      conf.getAcceptorConfigurations().add(new TransportConfiguration(INVM_ACCEPTOR_FACTORY));
+      conf.getConnectorConfigurations().put("invm", new TransportConfiguration(INVM_CONNECTOR_FACTORY));
 
       server = HornetQServers.newHornetQServer(conf, mbeanServer, usePersistence());
 
@@ -195,7 +193,7 @@
    protected void registerConnectionFactory() throws Exception
    {
       List<TransportConfiguration> connectorConfigs = new ArrayList<TransportConfiguration>();
-      connectorConfigs.add(new TransportConfiguration(NettyConnectorFactory.class.getName()));
+      connectorConfigs.add(new TransportConfiguration(INVM_CONNECTOR_FACTORY));
 
       createCF(connectorConfigs, "/cf");
 

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/ServiceTestBase.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/ServiceTestBase.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/ServiceTestBase.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -284,14 +284,16 @@
 
    protected HornetQServer createInVMFailoverServer(final boolean realFiles,
                                                     final Configuration configuration,
-                                                    NodeManager nodeManager)
+                                                    final NodeManager nodeManager,
+                                                    final int id)
    {
       return createInVMFailoverServer(realFiles,
                                       configuration,
                                       -1,
                                       -1,
                                       new HashMap<String, AddressSettings>(),
-                                      nodeManager);
+                                      nodeManager,
+                                      id);
    }
 
    protected HornetQServer createInVMFailoverServer(final boolean realFiles,
@@ -299,7 +301,8 @@
                                                     final int pageSize,
                                                     final int maxAddressSize,
                                                     final Map<String, AddressSettings> settings,
-                                                    NodeManager nodeManager)
+                                                    NodeManager nodeManager,
+                                                    final int id)
    {
       HornetQServer server;
       HornetQSecurityManager securityManager = new HornetQSecurityManagerImpl();
@@ -308,6 +311,8 @@
                                          ManagementFactory.getPlatformMBeanServer(),
                                          securityManager,
                                          nodeManager);
+      
+      server.setIdentity("Server " + id);
 
       for (Map.Entry<String, AddressSettings> setting : settings.entrySet())
       {

Modified: branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/UnitTestCase.java
===================================================================
--- branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/UnitTestCase.java	2011-08-08 17:51:00 UTC (rev 11149)
+++ branches/Branch_2_2_EAP/tests/src/org/hornetq/tests/util/UnitTestCase.java	2011-08-08 21:17:04 UTC (rev 11150)
@@ -259,6 +259,7 @@
 
    public static void forceGC()
    {
+      log.info("#test forceGC");
       WeakReference<Object> dumbReference = new WeakReference<Object>(new Object());
       // A loop that will wait GC, using the minimal time as possible
       while (dumbReference.get() != null)
@@ -266,12 +267,13 @@
          System.gc();
          try
          {
-            Thread.sleep(500);
+            Thread.sleep(100);
          }
          catch (InterruptedException e)
          {
          }
       }
+      log.info("#test forceGC Done");
    }
 
    public static void forceGC(Reference<?> ref, long timeout)
@@ -363,7 +365,24 @@
 
       return str.toString();
    }
+   
+   /** Sends the message to both logger and System.out (for unit report) */
+   public void logAndSystemOut(String message, Exception e)
+   {
+      Logger log = Logger.getLogger(this.getClass());
+      log.info(message, e);
+      System.out.println(message);
+      e.printStackTrace(System.out);
+   }
 
+   /** Sends the message to both logger and System.out (for unit report) */
+   public void logAndSystemOut(String message)
+   {
+      Logger log = Logger.getLogger(this.getClass());
+      log.info(message);
+      System.out.println(this.getClass().getName() + "::" + message);
+   }
+
    protected static TestSuite createAIOTestSuite(final Class<?> clazz)
    {
       TestSuite suite = new TestSuite(clazz.getName() + " testsuite");
@@ -870,7 +889,7 @@
 
       previousThreads = Thread.getAllStackTraces();
 
-      UnitTestCase.log.info("###### starting test " + this.getClass().getName() + "." + getName());
+      logAndSystemOut("#test " + getName());
    }
 
    @Override
@@ -908,18 +927,53 @@
                                              this.getName() +
                                              " on this following dump"));
                fail("test left broadcastgroupimpl running, this could effect other tests");
-               // System.exit(0);
             }
          }
       }
 
+      
+      
+      StringBuffer buffer = null;
+      
+      boolean failed =  true;
+      
+      long timeout = System.currentTimeMillis() + 10000;
+      while (failed && timeout > System.currentTimeMillis())
+      {
+         buffer = new StringBuffer();
+         
+         failed = checkThread(buffer);
+         
+         if (failed)
+         {
+            forceGC();
+            Thread.sleep(500);
+            log.info("There are still threads running, trying again");
+         }
+      }
+      
+      if (failed)
+      {
+         logAndSystemOut("Thread leaged on test " + this.getClass().getName() + "::" + 
+                         this.getName() + "\n" + buffer.toString());
+         fail("Thread leakage");
+      }
+
+      super.tearDown();
+   }
+
+   /**
+    * @param buffer
+    * @return
+    */
+   private boolean checkThread(StringBuffer buffer)
+   {
+      boolean failedThread = false;
+
       Map<Thread, StackTraceElement[]> postThreads = Thread.getAllStackTraces();
 
-      boolean failedThread = false;
       if (postThreads.size() > previousThreads.size())
       {
-         StringBuffer buffer = new StringBuffer();
-
          
          buffer.append("*********************************************************************************\n");
          buffer.append("LEAKING THREADS\n");
@@ -941,13 +995,8 @@
          }
          buffer.append("*********************************************************************************\n");
 
-         System.out.println(buffer.toString());
-
       }
-      
-      //assertFalse("Thread Failed", failedThread);
-
-      super.tearDown();
+      return failedThread;
    }
 
    /**
@@ -961,6 +1010,7 @@
       if (invmSize > 0)
       {
          InVMRegistry.instance.clear();
+         log.info(threadDump("Thread dump"));
          fail("invm registry still had acceptors registered");
       }
 
@@ -978,6 +1028,7 @@
       catch (Throwable e)
       {
          log.info(threadDump(e.getMessage()));
+         System.err.println(threadDump(e.getMessage()));
          throw new RuntimeException (e.getMessage(), e);
       }
    }



More information about the hornetq-commits mailing list