JBoss hornetq SVN: r9679 - in trunk: hornetq-rest/hornetq-rest and 2 other directories.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2010-09-13 17:33:55 -0400 (Mon, 13 Sep 2010)
New Revision: 9679
Added:
trunk/tests/src/org/hornetq/tests/integration/jms/client/ConnectionTest.java
Modified:
trunk/examples/soak/tx-restarts/
trunk/hornetq-rest/hornetq-rest/
trunk/src/main/org/hornetq/jms/client/HornetQConnection.java
Log:
HORNETQ-510 - Letting getClientID to be called before a setClientID
Property changes on: trunk/examples/soak/tx-restarts
___________________________________________________________________
Name: svn:ignore
+ build
Property changes on: trunk/hornetq-rest/hornetq-rest
___________________________________________________________________
Name: svn:ignore
+ target
Modified: trunk/src/main/org/hornetq/jms/client/HornetQConnection.java
===================================================================
--- trunk/src/main/org/hornetq/jms/client/HornetQConnection.java 2010-09-13 21:30:19 UTC (rev 9678)
+++ trunk/src/main/org/hornetq/jms/client/HornetQConnection.java 2010-09-13 21:33:55 UTC (rev 9679)
@@ -163,8 +163,6 @@
{
checkClosed();
- justCreated = false;
-
return clientID;
}
Added: trunk/tests/src/org/hornetq/tests/integration/jms/client/ConnectionTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/jms/client/ConnectionTest.java (rev 0)
+++ trunk/tests/src/org/hornetq/tests/integration/jms/client/ConnectionTest.java 2010-09-13 21:33:55 UTC (rev 9679)
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2010 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.jms.client;
+
+import javax.jms.Connection;
+
+import org.hornetq.tests.util.JMSTestBase;
+
+/**
+ * A ConnectionTest
+ *
+ * @author <a href="mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ConnectionTest extends JMSTestBase
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+
+ public void testGetSetConnectionFactory() throws Exception
+ {
+ Connection conn = cf.createConnection();
+
+ conn.getClientID();
+
+ conn.setClientID("somethingElse");
+ }
+
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
13 years, 9 months
JBoss hornetq SVN: r9678 - in branches/Branch_2_1: tests/src/org/hornetq/tests/integration/jms/client and 1 other directory.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2010-09-13 17:30:19 -0400 (Mon, 13 Sep 2010)
New Revision: 9678
Added:
branches/Branch_2_1/tests/src/org/hornetq/tests/integration/jms/client/ConnectionTest.java
Modified:
branches/Branch_2_1/src/main/org/hornetq/jms/client/HornetQConnection.java
Log:
HORNETQ-510 - Allowing getClientID calls without changing the new flag
Modified: branches/Branch_2_1/src/main/org/hornetq/jms/client/HornetQConnection.java
===================================================================
--- branches/Branch_2_1/src/main/org/hornetq/jms/client/HornetQConnection.java 2010-09-13 18:08:17 UTC (rev 9677)
+++ branches/Branch_2_1/src/main/org/hornetq/jms/client/HornetQConnection.java 2010-09-13 21:30:19 UTC (rev 9678)
@@ -163,8 +163,6 @@
{
checkClosed();
- justCreated = false;
-
return clientID;
}
Added: branches/Branch_2_1/tests/src/org/hornetq/tests/integration/jms/client/ConnectionTest.java
===================================================================
--- branches/Branch_2_1/tests/src/org/hornetq/tests/integration/jms/client/ConnectionTest.java (rev 0)
+++ branches/Branch_2_1/tests/src/org/hornetq/tests/integration/jms/client/ConnectionTest.java 2010-09-13 21:30:19 UTC (rev 9678)
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2010 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.jms.client;
+
+import javax.jms.Connection;
+
+import org.hornetq.tests.util.JMSTestBase;
+
+/**
+ * A ConnectionTest
+ *
+ * @author <a href="mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ConnectionTest extends JMSTestBase
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+
+ public void testGetSetConnectionFactory() throws Exception
+ {
+ Connection conn = cf.createConnection();
+
+ conn.getClientID();
+
+ conn.setClientID("somethingElse");
+ }
+
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
13 years, 9 months
JBoss hornetq SVN: r9677 - trunk/src/main/org/hornetq/jms/client.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2010-09-13 14:08:17 -0400 (Mon, 13 Sep 2010)
New Revision: 9677
Modified:
trunk/src/main/org/hornetq/jms/client/HornetQDestination.java
Log:
Making HornetQSession transient
Modified: trunk/src/main/org/hornetq/jms/client/HornetQDestination.java
===================================================================
--- trunk/src/main/org/hornetq/jms/client/HornetQDestination.java 2010-09-13 18:06:11 UTC (rev 9676)
+++ trunk/src/main/org/hornetq/jms/client/HornetQDestination.java 2010-09-13 18:08:17 UTC (rev 9677)
@@ -225,7 +225,7 @@
private final boolean queue;
- private final HornetQSession session;
+ private transient final HornetQSession session;
// Constructors --------------------------------------------------
13 years, 9 months
JBoss hornetq SVN: r9676 - branches/Branch_2_1/src/main/org/hornetq/jms/client.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2010-09-13 14:06:11 -0400 (Mon, 13 Sep 2010)
New Revision: 9676
Modified:
branches/Branch_2_1/src/main/org/hornetq/jms/client/HornetQDestination.java
Log:
HornetQSession should be transient
Modified: branches/Branch_2_1/src/main/org/hornetq/jms/client/HornetQDestination.java
===================================================================
--- branches/Branch_2_1/src/main/org/hornetq/jms/client/HornetQDestination.java 2010-09-13 13:35:19 UTC (rev 9675)
+++ branches/Branch_2_1/src/main/org/hornetq/jms/client/HornetQDestination.java 2010-09-13 18:06:11 UTC (rev 9676)
@@ -225,7 +225,7 @@
private final boolean queue;
- private final HornetQSession session;
+ private transient final HornetQSession session;
// Constructors --------------------------------------------------
13 years, 9 months
JBoss hornetq SVN: r9675 - in branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster: util and 1 other directory.
by do-not-reply@jboss.org
Author: jmesnil
Date: 2010-09-13 09:35:19 -0400 (Mon, 13 Sep 2010)
New Revision: 9675
Added:
branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleBackupsFailoverTestBase.java
branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleLivesMultipleBackupsFailoverTest.java
branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/RemoteMultipleLivesMultipleBackupsFailoverTest.java
branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/RemoteSingleLiveMultipleBackupsFailoverTest.java
branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/SingleLiveMultipleBackupsFailoverTest.java
Removed:
branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleBackupFailoverTest.java
Modified:
branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java
branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServer.java
branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServerSupport.java
Log:
add tests for multiple backups on single/multiple live nodes (using either invm or remote processes)
Modified: branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java
===================================================================
--- branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java 2010-09-13 07:44:26 UTC (rev 9674)
+++ branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java 2010-09-13 13:35:19 UTC (rev 9675)
@@ -97,13 +97,12 @@
FakeLockFile.clearLocks();
createConfigs();
+ liveServer.start();
+
if (backupServer != null)
{
backupServer.start();
}
-
- liveServer.start();
-
}
protected TestableServer createLiveServer()
Deleted: branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleBackupFailoverTest.java
===================================================================
--- branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleBackupFailoverTest.java 2010-09-13 07:44:26 UTC (rev 9674)
+++ branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleBackupFailoverTest.java 2010-09-13 13:35:19 UTC (rev 9675)
@@ -1,370 +0,0 @@
-/*
- * Copyright 2009 Red Hat, Inc.
- * Red Hat licenses this file to you under the Apache License, version
- * 2.0 (the "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package org.hornetq.tests.integration.cluster.failover;
-
-import junit.framework.Assert;
-import org.hornetq.api.core.HornetQException;
-import org.hornetq.api.core.Pair;
-import org.hornetq.api.core.SimpleString;
-import org.hornetq.api.core.TransportConfiguration;
-import org.hornetq.api.core.client.*;
-import org.hornetq.core.client.impl.ClientSessionFactoryInternal;
-import org.hornetq.core.client.impl.ServerLocatorImpl;
-import org.hornetq.core.config.BackupConnectorConfiguration;
-import org.hornetq.core.config.ClusterConnectionConfiguration;
-import org.hornetq.core.config.Configuration;
-import org.hornetq.core.remoting.impl.invm.TransportConstants;
-import org.hornetq.core.server.ActivateCallback;
-import org.hornetq.core.server.HornetQServer;
-import org.hornetq.core.server.cluster.impl.FakeLockFile;
-import org.hornetq.jms.client.HornetQTextMessage;
-import org.hornetq.tests.integration.cluster.util.SameProcessHornetQServer;
-import org.hornetq.tests.integration.cluster.util.TestableServer;
-import org.hornetq.tests.util.ServiceTestBase;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-/**
- */
-public class MultipleBackupFailoverTest extends ServiceTestBase
-{
- private ArrayList<TestableServer> servers = new ArrayList<TestableServer>(5);
-
- @Override
- protected void setUp() throws Exception
- {
- super.setUp();
- clearData();
- FakeLockFile.clearLocks();
- }
-
- public void testMultipleFailovers() throws Exception
- {
- createLiveConfig(0);
- createBackupConfig(0, 1,false, 0, 2, 3, 4, 5);
- createBackupConfig(0, 2,false, 0, 1, 3, 4, 5);
- createBackupConfig(0, 3,false, 0, 1, 2, 4, 5);
- createBackupConfig(0, 4, false, 0, 1, 2, 3, 4);
- createBackupConfig(0, 5, false, 0, 1, 2, 3, 4);
- servers.get(1).start();
- servers.get(2).start();
- servers.get(3).start();
- servers.get(4).start();
- servers.get(5).start();
- servers.get(0).start();
-
- ServerLocator locator = getServerLocator(0);
-
- locator.setBlockOnNonDurableSend(true);
- locator.setBlockOnDurableSend(true);
- locator.setBlockOnAcknowledge(true);
- locator.setReconnectAttempts(-1);
- ClientSessionFactoryInternal sf = createSessionFactoryAndWaitForTopology(locator, 2);
- int backupNode;
- ClientSession session = sendAndConsume(sf, true);
- System.out.println("failing node 0");
- fail(0, session);
- session.close();
- backupNode = waitForBackup(5);
- session = sendAndConsume(sf, false);
- System.out.println("failing node " + backupNode);
- fail(backupNode, session);
- session.close();
- backupNode = waitForBackup(5);
- session = sendAndConsume(sf, false);
- System.out.println("failing node " + backupNode);
- fail(backupNode, session);
- session.close();
- backupNode = waitForBackup(5);
- session = sendAndConsume(sf, false);
- System.out.println("failing node " + backupNode);
- fail(backupNode, session);
- session.close();
- backupNode = waitForBackup(5);
- session = sendAndConsume(sf, false);
- System.out.println("failing node " + backupNode);
- fail(backupNode, session);
- session.close();
- backupNode = waitForBackup(5);
- session = sendAndConsume(sf, false);
- session.close();
- servers.get(backupNode).stop();
- System.out.println("MultipleBackupFailoverTest.test");
- }
-
- public void testMultipleFailovers2liveservers() throws Exception
- {
- createLiveConfig(0, 3);
- createBackupConfig(0, 1, true, 0, 3);
- createBackupConfig(0, 2,true, 0, 3);
- createLiveConfig(3, 0);
- createBackupConfig(3, 4, true,0, 3);
- createBackupConfig(3, 5, true,0, 3);
- servers.get(1).start();
- servers.get(2).start();
- servers.get(0).start();
- servers.get(4).start();
- servers.get(5).start();
- servers.get(3).start();
- ServerLocator locator = getServerLocator(0);
-
- locator.setBlockOnNonDurableSend(true);
- locator.setBlockOnDurableSend(true);
- locator.setBlockOnAcknowledge(true);
- locator.setReconnectAttempts(-1);
- ClientSessionFactoryInternal sf = createSessionFactoryAndWaitForTopology(locator, 4);
- ClientSession session = sendAndConsume(sf, true);
-
- fail(0, session);
-
- ServerLocator locator2 = getServerLocator(3);
- locator2.setBlockOnNonDurableSend(true);
- locator2.setBlockOnDurableSend(true);
- locator2.setBlockOnAcknowledge(true);
- locator2.setReconnectAttempts(-1);
- ClientSessionFactoryInternal sf2 = createSessionFactoryAndWaitForTopology(locator2, 4);
- ClientSession session2 = sendAndConsume(sf2, true);
- fail(3, session2);
- servers.get(2).stop();
- servers.get(4).stop();
- servers.get(1).stop();
- servers.get(3).stop();
- }
-
- protected void fail(int node, final ClientSession... sessions) throws Exception
- {
- servers.get(node).crash(sessions);
- }
-
- protected int waitForBackup(long seconds)
- {
- long time = System.currentTimeMillis();
- long toWait = seconds * 1000;
- while (true)
- {
- for (int i = 0, serversSize = servers.size(); i < serversSize; i++)
- {
- TestableServer backupServer = servers.get(i);
- if (backupServer.isInitialised())
- {
- return i;
- }
- }
- try
- {
- Thread.sleep(100);
- }
- catch (InterruptedException e)
- {
- //ignore
- }
- if (System.currentTimeMillis() > (time + toWait))
- {
- fail("backup server never started");
- }
- }
- }
-
-
- private void createBackupConfig(int liveNode, int nodeid, boolean createClusterConnections, int... nodes)
- {
- Configuration config1 = super.createDefaultConfig();
- config1.getAcceptorConfigurations().clear();
- config1.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(nodeid));
- config1.setSecurityEnabled(false);
- config1.setSharedStore(true);
- config1.setBackup(true);
- config1.setClustered(true);
- List<String> staticConnectors = new ArrayList<String>();
-
- for (int node : nodes)
- {
- TransportConfiguration liveConnector = getConnectorTransportConfiguration(node);
- config1.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
- staticConnectors.add(liveConnector.getName());
- }
- TransportConfiguration backupConnector = getConnectorTransportConfiguration(nodeid);
- List<String> pairs = null;
- ClusterConnectionConfiguration ccc1 = new ClusterConnectionConfiguration("cluster1", "jms", backupConnector.getName(), -1, false, false, 1, 1,
- createClusterConnections? staticConnectors:pairs);
- config1.getClusterConfigurations().add(ccc1);
- BackupConnectorConfiguration connectorConfiguration = new BackupConnectorConfiguration(staticConnectors, backupConnector.getName());
- config1.setBackupConnectorConfiguration(connectorConfiguration);
- config1.getConnectorConfigurations().put(backupConnector.getName(), backupConnector);
-
-
- config1.setBindingsDirectory(config1.getBindingsDirectory() + "_" + liveNode);
- config1.setJournalDirectory(config1.getJournalDirectory() + "_" + liveNode);
- config1.setPagingDirectory(config1.getPagingDirectory() + "_" + liveNode);
- config1.setLargeMessagesDirectory(config1.getLargeMessagesDirectory() + "_" + liveNode);
-
- servers.add(new SameProcessHornetQServer(createFakeLockServer(true, config1)));
- }
-
- public void createLiveConfig(int liveNode, int ... otherLiveNodes)
- {
- TransportConfiguration liveConnector = getConnectorTransportConfiguration(liveNode);
- Configuration config0 = super.createDefaultConfig();
- config0.getAcceptorConfigurations().clear();
- config0.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(liveNode));
- config0.setSecurityEnabled(false);
- config0.setSharedStore(true);
- config0.setClustered(true);
- List<String> pairs = new ArrayList<String>();
- for (int node : otherLiveNodes)
- {
- TransportConfiguration otherLiveConnector = getConnectorTransportConfiguration(node);
- config0.getConnectorConfigurations().put(otherLiveConnector.getName(), otherLiveConnector);
- pairs.add(otherLiveConnector.getName());
-
- }
- ClusterConnectionConfiguration ccc0 = new ClusterConnectionConfiguration("cluster1", "jms", liveConnector.getName(), -1, false, false, 1, 1,
- pairs);
- config0.getClusterConfigurations().add(ccc0);
- config0.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
-
- config0.setBindingsDirectory(config0.getBindingsDirectory() + "_" + liveNode);
- config0.setJournalDirectory(config0.getJournalDirectory() + "_" + liveNode);
- config0.setPagingDirectory(config0.getPagingDirectory() + "_" + liveNode);
- config0.setLargeMessagesDirectory(config0.getLargeMessagesDirectory() + "_" + liveNode);
-
- servers.add(new SameProcessHornetQServer(createFakeLockServer(true, config0)));
- }
-
- private TransportConfiguration getConnectorTransportConfiguration(int node)
- {
- HashMap<String, Object> map = new HashMap<String, Object>();
- map.put(TransportConstants.SERVER_ID_PROP_NAME, node);
- return new TransportConfiguration(INVM_CONNECTOR_FACTORY, map);
- }
-
- private TransportConfiguration getAcceptorTransportConfiguration(int node)
- {
- HashMap<String, Object> map = new HashMap<String, Object>();
- map.put(TransportConstants.SERVER_ID_PROP_NAME, node);
- return new TransportConfiguration(INVM_ACCEPTOR_FACTORY, map);
- }
-
- protected ClientSessionFactoryInternal createSessionFactoryAndWaitForTopology(ServerLocator locator, int topologyMembers)
- throws Exception
- {
- ClientSessionFactoryInternal sf;
- CountDownLatch countDownLatch = new CountDownLatch(topologyMembers);
-
- locator.addClusterTopologyListener(new LatchClusterTopologyListener(countDownLatch));
-
- sf = (ClientSessionFactoryInternal) locator.createSessionFactory();
-
- boolean ok = countDownLatch.await(5, TimeUnit.SECONDS);
- assertTrue(ok);
- return sf;
- }
-
- public ServerLocator getServerLocator(int... nodes)
- {
- TransportConfiguration[] configs = new TransportConfiguration[nodes.length];
- for (int i = 0, configsLength = configs.length; i < configsLength; i++)
- {
- HashMap<String, Object> map = new HashMap<String, Object>();
- map.put(TransportConstants.SERVER_ID_PROP_NAME, nodes[i]);
- configs[i] = new TransportConfiguration(INVM_CONNECTOR_FACTORY, map);
-
- }
- return new ServerLocatorImpl(true, configs);
- }
-
- private ClientSession sendAndConsume(final ClientSessionFactory sf, final boolean createQueue) throws Exception
- {
- ClientSession session = sf.createSession(false, true, true);
-
- if (createQueue)
- {
- session.createQueue(FailoverTestBase.ADDRESS, FailoverTestBase.ADDRESS, null, false);
- }
-
- ClientProducer producer = session.createProducer(FailoverTestBase.ADDRESS);
-
- final int numMessages = 1000;
-
- for (int i = 0; i < numMessages; i++)
- {
- ClientMessage message = session.createMessage(HornetQTextMessage.TYPE,
- false,
- 0,
- System.currentTimeMillis(),
- (byte) 1);
- message.putIntProperty(new SimpleString("count"), i);
- message.getBodyBuffer().writeString("aardvarks");
- producer.send(message);
- }
-
- ClientConsumer consumer = session.createConsumer(FailoverTestBase.ADDRESS);
-
- session.start();
-
- for (int i = 0; i < numMessages; i++)
- {
- ClientMessage message2 = consumer.receive();
-
- Assert.assertEquals("aardvarks", message2.getBodyBuffer().readString());
-
- Assert.assertEquals(i, message2.getObjectProperty(new SimpleString("count")));
-
- message2.acknowledge();
- }
-
- ClientMessage message3 = consumer.receiveImmediate();
-
- Assert.assertNull(message3);
-
- return session;
- }
-
- class LatchClusterTopologyListener implements ClusterTopologyListener
- {
- final CountDownLatch latch;
- int liveNodes = 0;
- int backUpNodes = 0;
- List<String> liveNode = new ArrayList<String>();
- List<String> backupNode = new ArrayList<String>();
-
- public LatchClusterTopologyListener(CountDownLatch latch)
- {
- this.latch = latch;
- }
-
- public void nodeUP(String nodeID, Pair<TransportConfiguration, TransportConfiguration> connectorPair, boolean last, int distance)
- {
- if (connectorPair.a != null && !liveNode.contains(connectorPair.a.getName()))
- {
- liveNode.add(connectorPair.a.getName());
- latch.countDown();
- }
- if (connectorPair.b != null && !backupNode.contains(connectorPair.b.getName()))
- {
- backupNode.add(connectorPair.b.getName());
- latch.countDown();
- }
- }
-
- public void nodeDown(String nodeID)
- {
- //To change body of implemented methods use File | Settings | File Templates.
- }
- }
-}
Added: branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleBackupsFailoverTestBase.java
===================================================================
--- branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleBackupsFailoverTestBase.java (rev 0)
+++ branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleBackupsFailoverTestBase.java 2010-09-13 13:35:19 UTC (rev 9675)
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2010 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import junit.framework.Assert;
+
+import org.hornetq.api.core.Pair;
+import org.hornetq.api.core.SimpleString;
+import org.hornetq.api.core.TransportConfiguration;
+import org.hornetq.api.core.client.ClientConsumer;
+import org.hornetq.api.core.client.ClientMessage;
+import org.hornetq.api.core.client.ClientProducer;
+import org.hornetq.api.core.client.ClientSession;
+import org.hornetq.api.core.client.ClientSessionFactory;
+import org.hornetq.api.core.client.ClusterTopologyListener;
+import org.hornetq.api.core.client.ServerLocator;
+import org.hornetq.core.client.impl.ClientSessionFactoryInternal;
+import org.hornetq.core.client.impl.ServerLocatorImpl;
+import org.hornetq.core.server.cluster.impl.FakeLockFile;
+import org.hornetq.jms.client.HornetQTextMessage;
+import org.hornetq.tests.integration.cluster.util.TestableServer;
+import org.hornetq.tests.util.ServiceTestBase;
+
+/**
+ * A MultipleBackupsFailoverTestBase
+ *
+ * @author jmesnil
+ *
+ *
+ */
+public abstract class MultipleBackupsFailoverTestBase extends ServiceTestBase
+{
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static -------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ @Override
+ protected void setUp() throws Exception
+ {
+ super.setUp();
+ clearData();
+ FakeLockFile.clearLocks();
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ protected abstract boolean isNetty();
+
+ protected int waitForBackup(long seconds, List<TestableServer> servers, int... nodes)
+ {
+ long time = System.currentTimeMillis();
+ long toWait = seconds * 1000;
+ while (true)
+ {
+ for (int node : nodes)
+ {
+ TestableServer backupServer = servers.get(node);
+ if (backupServer.isInitialised())
+ {
+ return node;
+ }
+ }
+ try
+ {
+ Thread.sleep(100);
+ }
+ catch (InterruptedException e)
+ {
+ // ignore
+ }
+ if (System.currentTimeMillis() > (time + toWait))
+ {
+ fail("backup server never started");
+ }
+ }
+ }
+
+ protected ClientSession sendAndConsume(final ClientSessionFactory sf, final boolean createQueue) throws Exception
+ {
+ ClientSession session = sf.createSession(false, true, true);
+
+ if (createQueue)
+ {
+ session.createQueue(FailoverTestBase.ADDRESS, FailoverTestBase.ADDRESS, null, false);
+ }
+
+ ClientProducer producer = session.createProducer(FailoverTestBase.ADDRESS);
+
+ final int numMessages = 1000;
+
+ for (int i = 0; i < numMessages; i++)
+ {
+ ClientMessage message = session.createMessage(HornetQTextMessage.TYPE,
+ false,
+ 0,
+ System.currentTimeMillis(),
+ (byte)1);
+ message.putIntProperty(new SimpleString("count"), i);
+ message.getBodyBuffer().writeString("aardvarks");
+ producer.send(message);
+ }
+
+ ClientConsumer consumer = session.createConsumer(FailoverTestBase.ADDRESS);
+
+ session.start();
+
+ for (int i = 0; i < numMessages; i++)
+ {
+ ClientMessage message2 = consumer.receive();
+
+ Assert.assertEquals("aardvarks", message2.getBodyBuffer().readString());
+
+ Assert.assertEquals(i, message2.getObjectProperty(new SimpleString("count")));
+
+ message2.acknowledge();
+ }
+
+ ClientMessage message3 = consumer.receiveImmediate();
+
+ Assert.assertNull(message3);
+
+ return session;
+ }
+
+ protected ClientSessionFactoryInternal createSessionFactoryAndWaitForTopology(ServerLocator locator,
+ int topologyMembers) throws Exception
+ {
+ ClientSessionFactoryInternal sf;
+ CountDownLatch countDownLatch = new CountDownLatch(topologyMembers);
+
+ locator.addClusterTopologyListener(new LatchClusterTopologyListener(countDownLatch));
+
+ sf = (ClientSessionFactoryInternal)locator.createSessionFactory();
+
+ boolean ok = countDownLatch.await(5, TimeUnit.SECONDS);
+ assertTrue(ok);
+ return sf;
+ }
+
+ public ServerLocator getServerLocator(int... nodes)
+ {
+ TransportConfiguration[] configs = new TransportConfiguration[nodes.length];
+ for (int i = 0, configsLength = configs.length; i < configsLength; i++)
+ {
+ configs[i] = createTransportConfiguration(isNetty(), false, generateParams(nodes[i], isNetty()));
+ }
+ return new ServerLocatorImpl(true, configs);
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+ class LatchClusterTopologyListener implements ClusterTopologyListener
+ {
+ final CountDownLatch latch;
+
+ int liveNodes = 0;
+
+ int backUpNodes = 0;
+
+ List<String> liveNode = new ArrayList<String>();
+
+ List<String> backupNode = new ArrayList<String>();
+
+ public LatchClusterTopologyListener(CountDownLatch latch)
+ {
+ this.latch = latch;
+ }
+
+ public void nodeUP(String nodeID,
+ Pair<TransportConfiguration, TransportConfiguration> connectorPair,
+ boolean last,
+ int distance)
+ {
+ if (connectorPair.a != null && !liveNode.contains(connectorPair.a.getName()))
+ {
+ liveNode.add(connectorPair.a.getName());
+ latch.countDown();
+ }
+ if (connectorPair.b != null && !backupNode.contains(connectorPair.b.getName()))
+ {
+ backupNode.add(connectorPair.b.getName());
+ latch.countDown();
+ }
+ }
+
+ public void nodeDown(String nodeID)
+ {
+ }
+ }
+}
Added: branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleLivesMultipleBackupsFailoverTest.java
===================================================================
--- branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleLivesMultipleBackupsFailoverTest.java (rev 0)
+++ branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleLivesMultipleBackupsFailoverTest.java 2010-09-13 13:35:19 UTC (rev 9675)
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.hornetq.api.core.TransportConfiguration;
+import org.hornetq.api.core.client.ClientSession;
+import org.hornetq.api.core.client.ServerLocator;
+import org.hornetq.core.client.impl.ClientSessionFactoryInternal;
+import org.hornetq.core.config.BackupConnectorConfiguration;
+import org.hornetq.core.config.ClusterConnectionConfiguration;
+import org.hornetq.core.config.Configuration;
+import org.hornetq.tests.integration.cluster.util.SameProcessHornetQServer;
+import org.hornetq.tests.integration.cluster.util.TestableServer;
+
+/**
+ */
+public class MultipleLivesMultipleBackupsFailoverTest extends MultipleBackupsFailoverTestBase
+{
+ protected ArrayList<TestableServer> servers = new ArrayList<TestableServer>(5);
+
+ public void testMultipleFailovers2LiveServers() throws Exception
+ {
+ createLiveConfig(0, 3);
+ createBackupConfig(0, 1, true, 0, 3);
+ createBackupConfig(0, 2,true, 0, 3);
+ createLiveConfig(3, 0);
+ createBackupConfig(3, 4, true,0, 3);
+ createBackupConfig(3, 5, true,0, 3);
+ servers.get(0).start();
+ servers.get(3).start();
+ servers.get(1).start();
+ servers.get(2).start();
+ servers.get(4).start();
+ servers.get(5).start();
+ ServerLocator locator = getServerLocator(0);
+
+ locator.setBlockOnNonDurableSend(true);
+ locator.setBlockOnDurableSend(true);
+ locator.setBlockOnAcknowledge(true);
+ locator.setReconnectAttempts(-1);
+ ClientSessionFactoryInternal sf = createSessionFactoryAndWaitForTopology(locator, 4);
+ ClientSession session = sendAndConsume(sf, true);
+
+ servers.get(0).crash(session);
+
+ int liveAfter0 = waitForBackup(10000, servers, 1, 2);
+
+ ServerLocator locator2 = getServerLocator(3);
+ locator2.setBlockOnNonDurableSend(true);
+ locator2.setBlockOnDurableSend(true);
+ locator2.setBlockOnAcknowledge(true);
+ locator2.setReconnectAttempts(-1);
+ ClientSessionFactoryInternal sf2 = createSessionFactoryAndWaitForTopology(locator2, 4);
+ ClientSession session2 = sendAndConsume(sf2, true);
+ servers.get(3).crash(session2);
+ int liveAfter3 = waitForBackup(10000, servers, 4, 5);
+
+ if (liveAfter0 == 2)
+ {
+ servers.get(1).stop();
+ servers.get(2).stop();
+ }
+ else
+ {
+ servers.get(2).stop();
+ servers.get(1).stop();
+ }
+
+ if (liveAfter3 == 4)
+ {
+ servers.get(5).stop();
+ servers.get(4).stop();
+ }
+ else
+ {
+ servers.get(4).stop();
+ servers.get(5).stop();
+ }
+ }
+
+ protected void createBackupConfig(int liveNode, int nodeid, boolean createClusterConnections, int... nodes)
+ {
+ Configuration config1 = super.createDefaultConfig();
+ config1.getAcceptorConfigurations().clear();
+ config1.getAcceptorConfigurations().add(createTransportConfiguration(isNetty(), true, generateParams(nodeid, isNetty())));
+ config1.setSecurityEnabled(false);
+ config1.setSharedStore(true);
+ config1.setBackup(true);
+ config1.setClustered(true);
+ List<String> staticConnectors = new ArrayList<String>();
+
+ for (int node : nodes)
+ {
+ TransportConfiguration liveConnector = createTransportConfiguration(isNetty(), false, generateParams(node, isNetty()));
+ config1.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
+ staticConnectors.add(liveConnector.getName());
+ }
+ TransportConfiguration backupConnector = createTransportConfiguration(isNetty(), false, generateParams(nodeid, isNetty()));
+ List<String> pairs = null;
+ ClusterConnectionConfiguration ccc1 = new ClusterConnectionConfiguration("cluster1", "jms", backupConnector.getName(), -1, false, false, 1, 1,
+ createClusterConnections? staticConnectors:pairs);
+ config1.getClusterConfigurations().add(ccc1);
+ BackupConnectorConfiguration connectorConfiguration = new BackupConnectorConfiguration(staticConnectors, backupConnector.getName());
+ config1.setBackupConnectorConfiguration(connectorConfiguration);
+ config1.getConnectorConfigurations().put(backupConnector.getName(), backupConnector);
+
+
+ config1.setBindingsDirectory(config1.getBindingsDirectory() + "_" + liveNode);
+ config1.setJournalDirectory(config1.getJournalDirectory() + "_" + liveNode);
+ config1.setPagingDirectory(config1.getPagingDirectory() + "_" + liveNode);
+ config1.setLargeMessagesDirectory(config1.getLargeMessagesDirectory() + "_" + liveNode);
+
+ servers.add(new SameProcessHornetQServer(createFakeLockServer(true, config1)));
+ }
+
+ protected void createLiveConfig(int liveNode, int ... otherLiveNodes)
+ {
+ TransportConfiguration liveConnector = createTransportConfiguration(isNetty(), false, generateParams(liveNode, isNetty()));
+ Configuration config0 = super.createDefaultConfig();
+ config0.getAcceptorConfigurations().clear();
+ config0.getAcceptorConfigurations().add(createTransportConfiguration(isNetty(), true, generateParams(liveNode, isNetty())));
+ config0.setSecurityEnabled(false);
+ config0.setSharedStore(true);
+ config0.setClustered(true);
+ List<String> pairs = new ArrayList<String>();
+ for (int node : otherLiveNodes)
+ {
+ TransportConfiguration otherLiveConnector = createTransportConfiguration(isNetty(), false, generateParams(node, isNetty()));
+ config0.getConnectorConfigurations().put(otherLiveConnector.getName(), otherLiveConnector);
+ pairs.add(otherLiveConnector.getName());
+
+ }
+ ClusterConnectionConfiguration ccc0 = new ClusterConnectionConfiguration("cluster1", "jms", liveConnector.getName(), -1, false, false, 1, 1,
+ pairs);
+ config0.getClusterConfigurations().add(ccc0);
+ config0.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
+
+ config0.setBindingsDirectory(config0.getBindingsDirectory() + "_" + liveNode);
+ config0.setJournalDirectory(config0.getJournalDirectory() + "_" + liveNode);
+ config0.setPagingDirectory(config0.getPagingDirectory() + "_" + liveNode);
+ config0.setLargeMessagesDirectory(config0.getLargeMessagesDirectory() + "_" + liveNode);
+
+ servers.add(new SameProcessHornetQServer(createFakeLockServer(true, config0)));
+ }
+
+ protected boolean isNetty()
+ {
+ return false;
+ }
+}
Added: branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/RemoteMultipleLivesMultipleBackupsFailoverTest.java
===================================================================
--- branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/RemoteMultipleLivesMultipleBackupsFailoverTest.java (rev 0)
+++ branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/RemoteMultipleLivesMultipleBackupsFailoverTest.java 2010-09-13 13:35:19 UTC (rev 9675)
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2010 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.hornetq.api.core.TransportConfiguration;
+import org.hornetq.core.config.BackupConnectorConfiguration;
+import org.hornetq.core.config.ClusterConnectionConfiguration;
+import org.hornetq.core.config.Configuration;
+import org.hornetq.core.config.impl.ConfigurationImpl;
+import org.hornetq.core.server.JournalType;
+import org.hornetq.tests.integration.cluster.util.RemoteProcessHornetQServer;
+import org.hornetq.tests.integration.cluster.util.RemoteServerConfiguration;
+
+/**
+ * A RemoteMultipleLivesMultipleBackupsFailoverTest
+ *
+ * @author jmesnil
+ *
+ *
+ */
+public class RemoteMultipleLivesMultipleBackupsFailoverTest extends MultipleLivesMultipleBackupsFailoverTest
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ private static Map<Integer, String> lives = new HashMap<Integer, String>();
+ private static Map<Integer, String> backups = new HashMap<Integer, String>();
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ @Override
+ protected void setUp() throws Exception
+ {
+ super.setUp();
+
+ lives.put(0, LiveServerConfiguration0.class.getName());
+ lives.put(3, LiveServerConfiguration3.class.getName());
+
+ backups.put(1, SharedBackupServerConfiguration1.class.getName());
+ backups.put(2, SharedBackupServerConfiguration2.class.getName());
+ backups.put(4, SharedBackupServerConfiguration4.class.getName());
+ backups.put(5, SharedBackupServerConfiguration5.class.getName());
+ }
+
+ protected boolean isNetty()
+ {
+ return true;
+ }
+
+ @Override
+ protected void createLiveConfig(int liveNode, int... otherLiveNodes)
+ {
+ servers.add(new RemoteProcessHornetQServer(lives.get(liveNode)));
+ }
+
+ @Override
+ protected void createBackupConfig(int liveNode, int nodeid, boolean createClusterConnections, int... nodes)
+ {
+ servers.add(new RemoteProcessHornetQServer(backups.get(nodeid)));
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+ public static class LiveServerConfiguration0 extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ return createLiveConfiguration(0, 3);
+ }
+ }
+
+ public static class LiveServerConfiguration3 extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ return createLiveConfiguration(3, 0);
+ }
+ }
+
+ public static class SharedBackupServerConfiguration1 extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ return createBackupConfiguration(0, 1, true, 0, 3);
+ }
+ }
+
+ public static class SharedBackupServerConfiguration2 extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ return createBackupConfiguration(0, 2, true, 0, 3);
+ }
+ }
+
+ public static class SharedBackupServerConfiguration4 extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ return createBackupConfiguration(3, 4, true, 0, 3);
+ }
+ }
+
+ public static class SharedBackupServerConfiguration5 extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ return createBackupConfiguration(3, 5, true, 0, 3);
+ }
+ }
+
+ protected static Configuration createBackupConfiguration(int liveNode, int nodeid, boolean createClusterConnections, int... nodes)
+ {
+ Configuration config1 = new ConfigurationImpl();
+ config1.getAcceptorConfigurations().add(createTransportConfiguration(true, true, generateParams(nodeid, true)));
+ config1.setSecurityEnabled(false);
+ config1.setSharedStore(true);
+ config1.setBackup(true);
+ config1.setJournalType(JournalType.NIO);
+ config1.setClustered(true);
+ List<String> staticConnectors = new ArrayList<String>();
+
+ for (int node : nodes)
+ {
+ TransportConfiguration liveConnector = createTransportConfiguration(true, false, generateParams(node, true));
+ config1.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
+ staticConnectors.add(liveConnector.getName());
+ }
+ TransportConfiguration backupConnector = createTransportConfiguration(true, false, generateParams(nodeid, true));
+ List<String> pairs = null;
+ ClusterConnectionConfiguration ccc1 = new ClusterConnectionConfiguration("cluster1", "jms", backupConnector.getName(), -1, false, false, 1, 1,
+ createClusterConnections? staticConnectors:pairs);
+ config1.getClusterConfigurations().add(ccc1);
+ BackupConnectorConfiguration connectorConfiguration = new BackupConnectorConfiguration(staticConnectors, backupConnector.getName());
+ config1.setBackupConnectorConfiguration(connectorConfiguration);
+ config1.getConnectorConfigurations().put(backupConnector.getName(), backupConnector);
+
+
+ config1.setBindingsDirectory(config1.getBindingsDirectory() + "_" + liveNode);
+ config1.setJournalDirectory(config1.getJournalDirectory() + "_" + liveNode);
+ config1.setPagingDirectory(config1.getPagingDirectory() + "_" + liveNode);
+ config1.setLargeMessagesDirectory(config1.getLargeMessagesDirectory() + "_" + liveNode);
+
+ return config1;
+ }
+
+ protected static Configuration createLiveConfiguration(int liveNode, int... otherLiveNodes)
+ {
+ Configuration config0 = new ConfigurationImpl();
+ TransportConfiguration liveConnector = createTransportConfiguration(true, false, generateParams(liveNode, true));
+ config0.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
+ config0.getAcceptorConfigurations().add(createTransportConfiguration(true, true, generateParams(liveNode, true)));
+ config0.setSecurityEnabled(false);
+ config0.setSharedStore(true);
+ config0.setJournalType(JournalType.NIO);
+ config0.setClustered(true);
+ List<String> pairs = new ArrayList<String>();
+ for (int node : otherLiveNodes)
+ {
+ TransportConfiguration otherLiveConnector = createTransportConfiguration(true, false, generateParams(node, true));
+ config0.getConnectorConfigurations().put(otherLiveConnector.getName(), otherLiveConnector);
+ pairs.add(otherLiveConnector.getName());
+
+ }
+ ClusterConnectionConfiguration ccc0 = new ClusterConnectionConfiguration("cluster1", "jms", liveConnector.getName(), -1, false, false, 1, 1,
+ pairs);
+ config0.getClusterConfigurations().add(ccc0);
+ config0.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
+
+ config0.setBindingsDirectory(config0.getBindingsDirectory() + "_" + liveNode);
+ config0.setJournalDirectory(config0.getJournalDirectory() + "_" + liveNode);
+ config0.setPagingDirectory(config0.getPagingDirectory() + "_" + liveNode);
+ config0.setLargeMessagesDirectory(config0.getLargeMessagesDirectory() + "_" + liveNode);
+
+ return config0;
+ }
+}
Added: branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/RemoteSingleLiveMultipleBackupsFailoverTest.java
===================================================================
--- branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/RemoteSingleLiveMultipleBackupsFailoverTest.java (rev 0)
+++ branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/RemoteSingleLiveMultipleBackupsFailoverTest.java 2010-09-13 13:35:19 UTC (rev 9675)
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2010 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.hornetq.api.core.TransportConfiguration;
+import org.hornetq.core.config.BackupConnectorConfiguration;
+import org.hornetq.core.config.ClusterConnectionConfiguration;
+import org.hornetq.core.config.Configuration;
+import org.hornetq.core.config.impl.ConfigurationImpl;
+import org.hornetq.core.server.JournalType;
+import org.hornetq.tests.integration.cluster.util.RemoteProcessHornetQServer;
+import org.hornetq.tests.integration.cluster.util.RemoteServerConfiguration;
+
+public class RemoteSingleLiveMultipleBackupsFailoverTest extends SingleLiveMultipleBackupsFailoverTest
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ private static Map<Integer, String> backups = new HashMap<Integer, String>();
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ @Override
+ protected void setUp() throws Exception
+ {
+ super.setUp();
+
+ backups.put(1, SharedBackupServerConfiguration1.class.getName());
+ backups.put(2, SharedBackupServerConfiguration2.class.getName());
+ backups.put(3, SharedBackupServerConfiguration3.class.getName());
+ backups.put(4, SharedBackupServerConfiguration4.class.getName());
+ backups.put(5, SharedBackupServerConfiguration5.class.getName());
+ }
+
+ protected boolean isNetty()
+ {
+ return true;
+ }
+
+ @Override
+ protected void createLiveConfig(int liveNode, int... otherLiveNodes)
+ {
+ servers.add(new RemoteProcessHornetQServer(SharedLiveServerConfiguration.class.getName()));
+ }
+
+ @Override
+ protected void createBackupConfig(int liveNode, int nodeid, boolean createClusterConnections, int... nodes)
+ {
+ servers.add(new RemoteProcessHornetQServer(backups.get(nodeid)));
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+ public static class SharedLiveServerConfiguration extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ int liveNode = 0;
+ int[] otherLiveNodes = new int[0];
+
+ Configuration config0 = new ConfigurationImpl();
+ TransportConfiguration liveConnector = createTransportConfiguration(true, false, generateParams(liveNode, true));
+ config0.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
+ config0.getAcceptorConfigurations().add(createTransportConfiguration(true, true, generateParams(liveNode, true)));
+ config0.setSecurityEnabled(false);
+ config0.setSharedStore(true);
+ config0.setJournalType(JournalType.NIO);
+ config0.setClustered(true);
+ List<String> pairs = new ArrayList<String>();
+ for (int node : otherLiveNodes)
+ {
+ TransportConfiguration otherLiveConnector = createTransportConfiguration(true, false, generateParams(node, true));
+ config0.getConnectorConfigurations().put(otherLiveConnector.getName(), otherLiveConnector);
+ pairs.add(otherLiveConnector.getName());
+
+ }
+ ClusterConnectionConfiguration ccc0 = new ClusterConnectionConfiguration("cluster1", "jms", liveConnector.getName(), -1, false, false, 1, 1,
+ pairs);
+ config0.getClusterConfigurations().add(ccc0);
+ config0.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
+
+ config0.setBindingsDirectory(config0.getBindingsDirectory() + "_" + liveNode);
+ config0.setJournalDirectory(config0.getJournalDirectory() + "_" + liveNode);
+ config0.setPagingDirectory(config0.getPagingDirectory() + "_" + liveNode);
+ config0.setLargeMessagesDirectory(config0.getLargeMessagesDirectory() + "_" + liveNode);
+
+ return config0;
+ }
+ }
+
+ public static class SharedBackupServerConfiguration1 extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ return createBackupConfiguration(0, 1, false, 0, 2, 3, 4, 5);
+ }
+ }
+
+ public static class SharedBackupServerConfiguration2 extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ return createBackupConfiguration(0, 2, false, 0, 1, 3, 4, 5);
+ }
+ }
+
+ public static class SharedBackupServerConfiguration3 extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ return createBackupConfiguration(0, 3, false, 0, 1, 2, 4, 5);
+ }
+ }
+
+ public static class SharedBackupServerConfiguration4 extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ return createBackupConfiguration(0, 4, false, 0, 1, 2, 3, 5);
+ }
+ }
+
+ public static class SharedBackupServerConfiguration5 extends RemoteServerConfiguration
+ {
+ @Override
+ public Configuration getConfiguration()
+ {
+ return createBackupConfiguration(0, 5, false, 0, 1, 2, 3, 4);
+ }
+ }
+
+ protected static Configuration createBackupConfiguration(int liveNode, int nodeid, boolean createClusterConnections, int... nodes)
+ {
+ Configuration config1 = new ConfigurationImpl();
+ config1.getAcceptorConfigurations().add(createTransportConfiguration(true, true, generateParams(nodeid, true)));
+ config1.setSecurityEnabled(false);
+ config1.setSharedStore(true);
+ config1.setBackup(true);
+ config1.setJournalType(JournalType.NIO);
+ config1.setClustered(true);
+ List<String> staticConnectors = new ArrayList<String>();
+
+ for (int node : nodes)
+ {
+ TransportConfiguration liveConnector = createTransportConfiguration(true, false, generateParams(node, true));
+ config1.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
+ staticConnectors.add(liveConnector.getName());
+ }
+ TransportConfiguration backupConnector = createTransportConfiguration(true, false, generateParams(nodeid, true));
+ List<String> pairs = null;
+ ClusterConnectionConfiguration ccc1 = new ClusterConnectionConfiguration("cluster1", "jms", backupConnector.getName(), -1, false, false, 1, 1,
+ createClusterConnections? staticConnectors:pairs);
+ config1.getClusterConfigurations().add(ccc1);
+ BackupConnectorConfiguration connectorConfiguration = new BackupConnectorConfiguration(staticConnectors, backupConnector.getName());
+ config1.setBackupConnectorConfiguration(connectorConfiguration);
+ config1.getConnectorConfigurations().put(backupConnector.getName(), backupConnector);
+
+
+ config1.setBindingsDirectory(config1.getBindingsDirectory() + "_" + liveNode);
+ config1.setJournalDirectory(config1.getJournalDirectory() + "_" + liveNode);
+ config1.setPagingDirectory(config1.getPagingDirectory() + "_" + liveNode);
+ config1.setLargeMessagesDirectory(config1.getLargeMessagesDirectory() + "_" + liveNode);
+
+ return config1;
+ }
+}
Copied: branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/SingleLiveMultipleBackupsFailoverTest.java (from rev 9654, branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/MultipleBackupFailoverTest.java)
===================================================================
--- branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/SingleLiveMultipleBackupsFailoverTest.java (rev 0)
+++ branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/failover/SingleLiveMultipleBackupsFailoverTest.java 2010-09-13 13:35:19 UTC (rev 9675)
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.hornetq.api.core.TransportConfiguration;
+import org.hornetq.api.core.client.ClientSession;
+import org.hornetq.api.core.client.ServerLocator;
+import org.hornetq.core.client.impl.ClientSessionFactoryInternal;
+import org.hornetq.core.config.BackupConnectorConfiguration;
+import org.hornetq.core.config.ClusterConnectionConfiguration;
+import org.hornetq.core.config.Configuration;
+import org.hornetq.tests.integration.cluster.util.SameProcessHornetQServer;
+import org.hornetq.tests.integration.cluster.util.TestableServer;
+
+/**
+ */
+public class SingleLiveMultipleBackupsFailoverTest extends MultipleBackupsFailoverTestBase
+{
+
+ protected ArrayList<TestableServer> servers = new ArrayList<TestableServer>(5);
+
+ public void testMultipleFailovers() throws Exception
+ {
+ createLiveConfig(0);
+ createBackupConfig(0, 1,false, 0, 2, 3, 4, 5);
+ createBackupConfig(0, 2,false, 0, 1, 3, 4, 5);
+ createBackupConfig(0, 3,false, 0, 1, 2, 4, 5);
+ createBackupConfig(0, 4, false, 0, 1, 2, 3, 4);
+ createBackupConfig(0, 5, false, 0, 1, 2, 3, 4);
+ servers.get(0).start();
+ servers.get(1).start();
+ servers.get(2).start();
+ servers.get(3).start();
+ servers.get(4).start();
+ servers.get(5).start();
+
+ ServerLocator locator = getServerLocator(0);
+
+ locator.setBlockOnNonDurableSend(true);
+ locator.setBlockOnDurableSend(true);
+ locator.setBlockOnAcknowledge(true);
+ locator.setReconnectAttempts(-1);
+ ClientSessionFactoryInternal sf = createSessionFactoryAndWaitForTopology(locator, 2);
+ int backupNode;
+ ClientSession session = sendAndConsume(sf, true);
+ System.out.println("failing node 0");
+ servers.get(0).crash(session);
+
+ session.close();
+ backupNode = waitForBackup(5, servers, 1, 2, 3, 4, 5);
+ session = sendAndConsume(sf, false);
+ System.out.println("failing node " + backupNode);
+ servers.get(backupNode).crash(session);
+
+ session.close();
+ backupNode = waitForBackup(5, servers, 1, 2, 3, 4, 5);
+ session = sendAndConsume(sf, false);
+ System.out.println("failing node " + backupNode);
+ servers.get(backupNode).crash(session);
+
+ session.close();
+ backupNode = waitForBackup(5, servers, 1, 2, 3, 4, 5);
+ session = sendAndConsume(sf, false);
+ System.out.println("failing node " + backupNode);
+ servers.get(backupNode).crash(session);
+
+ session.close();
+ backupNode = waitForBackup(5, servers, 1, 2, 3, 4, 5);
+ session = sendAndConsume(sf, false);
+ System.out.println("failing node " + backupNode);
+ servers.get(backupNode).crash(session);
+
+ session.close();
+ backupNode = waitForBackup(5, servers, 1, 2, 3, 4, 5);
+ session = sendAndConsume(sf, false);
+ session.close();
+ servers.get(backupNode).stop();
+ }
+
+ protected void createBackupConfig(int liveNode, int nodeid, boolean createClusterConnections, int... nodes)
+ {
+ Configuration config1 = super.createDefaultConfig();
+ config1.getAcceptorConfigurations().clear();
+ config1.getAcceptorConfigurations().add(createTransportConfiguration(isNetty(), true, generateParams(nodeid, isNetty())));
+ config1.setSecurityEnabled(false);
+ config1.setSharedStore(true);
+ config1.setBackup(true);
+ config1.setClustered(true);
+ List<String> staticConnectors = new ArrayList<String>();
+
+ for (int node : nodes)
+ {
+ TransportConfiguration liveConnector = createTransportConfiguration(isNetty(), false, generateParams(node, isNetty()));
+ config1.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
+ staticConnectors.add(liveConnector.getName());
+ }
+ TransportConfiguration backupConnector = createTransportConfiguration(isNetty(), false, generateParams(nodeid, isNetty()));
+ List<String> pairs = null;
+ ClusterConnectionConfiguration ccc1 = new ClusterConnectionConfiguration("cluster1", "jms", backupConnector.getName(), -1, false, false, 1, 1,
+ createClusterConnections? staticConnectors:pairs);
+ config1.getClusterConfigurations().add(ccc1);
+ BackupConnectorConfiguration connectorConfiguration = new BackupConnectorConfiguration(staticConnectors, backupConnector.getName());
+ config1.setBackupConnectorConfiguration(connectorConfiguration);
+ config1.getConnectorConfigurations().put(backupConnector.getName(), backupConnector);
+
+
+ config1.setBindingsDirectory(config1.getBindingsDirectory() + "_" + liveNode);
+ config1.setJournalDirectory(config1.getJournalDirectory() + "_" + liveNode);
+ config1.setPagingDirectory(config1.getPagingDirectory() + "_" + liveNode);
+ config1.setLargeMessagesDirectory(config1.getLargeMessagesDirectory() + "_" + liveNode);
+
+ servers.add(new SameProcessHornetQServer(createFakeLockServer(true, config1)));
+ }
+
+ protected void createLiveConfig(int liveNode, int ... otherLiveNodes)
+ {
+ TransportConfiguration liveConnector = createTransportConfiguration(isNetty(), false, generateParams(liveNode, isNetty()));
+ Configuration config0 = super.createDefaultConfig();
+ config0.getAcceptorConfigurations().clear();
+ config0.getAcceptorConfigurations().add(createTransportConfiguration(isNetty(), true, generateParams(liveNode, isNetty())));
+ config0.setSecurityEnabled(false);
+ config0.setSharedStore(true);
+ config0.setClustered(true);
+ List<String> pairs = new ArrayList<String>();
+ for (int node : otherLiveNodes)
+ {
+ TransportConfiguration otherLiveConnector = createTransportConfiguration(isNetty(), false, generateParams(node, isNetty()));
+ config0.getConnectorConfigurations().put(otherLiveConnector.getName(), otherLiveConnector);
+ pairs.add(otherLiveConnector.getName());
+
+ }
+ ClusterConnectionConfiguration ccc0 = new ClusterConnectionConfiguration("cluster1", "jms", liveConnector.getName(), -1, false, false, 1, 1,
+ pairs);
+ config0.getClusterConfigurations().add(ccc0);
+ config0.getConnectorConfigurations().put(liveConnector.getName(), liveConnector);
+
+ config0.setBindingsDirectory(config0.getBindingsDirectory() + "_" + liveNode);
+ config0.setJournalDirectory(config0.getJournalDirectory() + "_" + liveNode);
+ config0.setPagingDirectory(config0.getPagingDirectory() + "_" + liveNode);
+ config0.setLargeMessagesDirectory(config0.getLargeMessagesDirectory() + "_" + liveNode);
+
+ servers.add(new SameProcessHornetQServer(createFakeLockServer(true, config0)));
+ }
+
+ protected boolean isNetty()
+ {
+ return false;
+ }
+
+
+
+
+}
Modified: branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServer.java
===================================================================
--- branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServer.java 2010-09-13 07:44:26 UTC (rev 9674)
+++ branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServer.java 2010-09-13 13:35:19 UTC (rev 9675)
@@ -34,7 +34,9 @@
private String configurationClassName;
private Process serverProcess;
-
+ private boolean initialised = false;
+ private CountDownLatch initLatch;
+
public RemoteProcessHornetQServer(String configurationClassName)
{
this.configurationClassName = configurationClassName;
@@ -42,12 +44,37 @@
public boolean isInitialised()
{
- return (serverProcess != null);
+ if (serverProcess == null)
+ {
+ return false;
+ }
+ try
+ {
+ initLatch = new CountDownLatch(1);
+ RemoteProcessHornetQServerSupport.isInitialised(serverProcess);
+ boolean ok = initLatch.await(10, TimeUnit.SECONDS);
+ if (ok)
+ {
+ return initialised;
+ }
+ }
+ catch (Exception e)
+ {
+ e.printStackTrace();
+ return false;
+ }
+ return false;
}
+
+ public void setInitialised(boolean initialised)
+ {
+ this.initialised = initialised;
+ initLatch.countDown();
+ }
public void start() throws Exception
{
- serverProcess = RemoteProcessHornetQServerSupport.start(configurationClassName);
+ serverProcess = RemoteProcessHornetQServerSupport.start(configurationClassName, this);
Thread.sleep(2000);
}
Modified: branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServerSupport.java
===================================================================
--- branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServerSupport.java 2010-09-13 07:44:26 UTC (rev 9674)
+++ branches/2_2_0_HA_Improvements/tests/src/org/hornetq/tests/integration/cluster/util/RemoteProcessHornetQServerSupport.java 2010-09-13 13:35:19 UTC (rev 9675)
@@ -60,8 +60,12 @@
String line = null;
while ((line = br.readLine()) != null)
{
- if ("STOP".equals(line.trim()))
+ if ("INIT?".equals(line.trim()))
{
+ System.out.println("INIT:" + server.isInitialised());
+ }
+ else if ("STOP".equals(line.trim()))
+ {
server.stop();
System.out.println("Server stopped");
System.exit(0);
@@ -90,7 +94,7 @@
}
- public static Process start(String serverClassName) throws Exception
+ public static Process start(String serverClassName, final RemoteProcessHornetQServer remoteProcessHornetQServer) throws Exception
{
String[] vmArgs = new String[] { "-Dorg.hornetq.logger-delegate-factory-class-name=org.hornetq.jms.SysoutLoggerDelegateFactory" };
Process serverProcess = SpawnedVMSupport.spawnVM(RemoteProcessHornetQServerSupport.class.getName(), vmArgs, false, serverClassName);
@@ -115,6 +119,11 @@
while ((line = br.readLine()) != null)
{
System.out.println("SERVER: " + line);
+ if (line.startsWith("INIT:"))
+ {
+ boolean init = Boolean.parseBoolean(line.substring("INIT:".length(), line.length()));
+ remoteProcessHornetQServer.setInitialised(init);
+ }
}
}
catch (Exception e)
@@ -146,6 +155,13 @@
serverProcess.destroy();
}
}
+
+ public static void isInitialised(Process serverProcess) throws Exception
+ {
+ OutputStreamWriter osw = new OutputStreamWriter(serverProcess.getOutputStream());
+ osw.write("INIT?\n");
+ osw.flush();
+ }
public static void crash(Process serverProcess) throws Exception
{
13 years, 9 months
JBoss hornetq SVN: r9674 - trunk/examples/jms/non-transaction-failover.
by do-not-reply@jboss.org
Author: gaohoward
Date: 2010-09-13 03:44:26 -0400 (Mon, 13 Sep 2010)
New Revision: 9674
Modified:
trunk/examples/jms/non-transaction-failover/readme.html
Log:
typo
Modified: trunk/examples/jms/non-transaction-failover/readme.html
===================================================================
--- trunk/examples/jms/non-transaction-failover/readme.html 2010-09-11 22:16:35 UTC (rev 9673)
+++ trunk/examples/jms/non-transaction-failover/readme.html 2010-09-13 07:44:26 UTC (rev 9674)
@@ -10,7 +10,7 @@
<p>This example demonstrates two servers coupled as a live-backup pair for high availability (HA), and a client
connection failing over from live to backup when the live server is crashed.</p>
- <p>Failover behavior differs wether the JMS session is transacter or not.</p>
+ <p>Failover behavior differs wether the JMS session is transacted or not.</p>
<p>When a <em>non-transacted</em> JMS session is used, once and only once delivery is not guaranteed
and it is possible some messages will be lost or delivered twice, depending when the failover to the backup server occurs.</p>
<p>It is up to the client to deal with such cases. To ensure once and only once delivery, the client must
@@ -133,4 +133,4 @@
</ol>
</body>
-</html>
\ No newline at end of file
+</html>
13 years, 9 months
JBoss hornetq SVN: r9673 - trunk/native/src.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2010-09-11 18:16:35 -0400 (Sat, 11 Sep 2010)
New Revision: 9673
Modified:
trunk/native/src/AsyncFile.cpp
Log:
tweak on native code
Modified: trunk/native/src/AsyncFile.cpp
===================================================================
--- trunk/native/src/AsyncFile.cpp 2010-09-11 22:10:16 UTC (rev 9672)
+++ trunk/native/src/AsyncFile.cpp 2010-09-11 22:16:35 UTC (rev 9673)
@@ -312,9 +312,9 @@
long AsyncFile::getSize()
{
- struct stat64 statBuffer;
+ struct stat statBuffer;
- if (fstat64(fileHandle, &statBuffer) < 0)
+ if (fstat(fileHandle, &statBuffer) < 0)
{
return -1l;
}
13 years, 9 months
JBoss hornetq SVN: r9672 - branches/Branch_2_1/native/src.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2010-09-11 18:10:16 -0400 (Sat, 11 Sep 2010)
New Revision: 9672
Modified:
branches/Branch_2_1/native/src/AsyncFile.cpp
Log:
small tweak on native code
Modified: branches/Branch_2_1/native/src/AsyncFile.cpp
===================================================================
--- branches/Branch_2_1/native/src/AsyncFile.cpp 2010-09-10 20:41:46 UTC (rev 9671)
+++ branches/Branch_2_1/native/src/AsyncFile.cpp 2010-09-11 22:10:16 UTC (rev 9672)
@@ -312,9 +312,9 @@
long AsyncFile::getSize()
{
- struct stat64 statBuffer;
+ struct stat statBuffer;
- if (fstat64(fileHandle, &statBuffer) < 0)
+ if (fstat(fileHandle, &statBuffer) < 0)
{
return -1l;
}
13 years, 9 months
JBoss hornetq SVN: r9670 - trunk/tests/src/org/hornetq/tests/util.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2010-09-10 13:52:53 -0400 (Fri, 10 Sep 2010)
New Revision: 9670
Modified:
trunk/tests/src/org/hornetq/tests/util/UnitTestCase.java
Log:
Changing the UDP IP (having each branch on its own UDP)
Modified: trunk/tests/src/org/hornetq/tests/util/UnitTestCase.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/util/UnitTestCase.java 2010-09-10 17:49:00 UTC (rev 9669)
+++ trunk/tests/src/org/hornetq/tests/util/UnitTestCase.java 2010-09-10 17:52:53 UTC (rev 9670)
@@ -100,7 +100,7 @@
protected static String getUDPDiscoveryAddress()
{
- return System.getProperty("TEST-UDP-ADDRESS", "230.10.20.1");
+ return System.getProperty("TEST-UDP-ADDRESS", "230.1.2.3");
}
protected static String getUDPDiscoveryAddress(int variant)
13 years, 9 months
JBoss hornetq SVN: r9669 - branches/Branch_2_1.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2010-09-10 13:49:00 -0400 (Fri, 10 Sep 2010)
New Revision: 9669
Modified:
branches/Branch_2_1/merge-activity.txt
Log:
update on merge-activity.txt
Modified: branches/Branch_2_1/merge-activity.txt
===================================================================
--- branches/Branch_2_1/merge-activity.txt 2010-09-10 17:44:13 UTC (rev 9668)
+++ branches/Branch_2_1/merge-activity.txt 2010-09-10 17:49:00 UTC (rev 9669)
@@ -11,3 +11,8 @@
- 30-aug-2010 - clebert - merge from trunk -r9590:9598
- 31-aug-2010 - jmesnil - merge from trunk -r9615:9616
+
+- 10-sep-2010 - clebert - merge from this branch to trunk, r9591-9608, r9612-9659, r9660-9662
+ trunk is aligned with all the changes from Branch_2_1 up to r9668
+
+TODO: Bring changes from HORNETQ-469 as soon as it's stable (remove this line as soon as it's done)
13 years, 9 months