[jboss-cvs] JBoss Messaging SVN: r2808 - in trunk: src/etc/xmdesc and 12 other directories.
jboss-cvs-commits at lists.jboss.org
jboss-cvs-commits at lists.jboss.org
Wed Jun 27 16:23:20 EDT 2007
Author: timfox
Date: 2007-06-27 16:23:20 -0400 (Wed, 27 Jun 2007)
New Revision: 2808
Added:
trunk/src/main/org/jboss/messaging/util/ConcurrentHashSet.java
trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedQueueTest.java
trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedRequestResponseTest.java
trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedTopicTest.java
Removed:
trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedDestinationsTest.java
trunk/tests/src/org/jboss/test/messaging/jms/clustering/RequestResponseWithPullTest.java
Modified:
trunk/src/etc/server/default/deploy/db2-persistence-service.xml
trunk/src/etc/server/default/deploy/mssql-persistence-service.xml
trunk/src/etc/server/default/deploy/mysql-persistence-service.xml
trunk/src/etc/server/default/deploy/oracle-persistence-service.xml
trunk/src/etc/server/default/deploy/postgresql-persistence-service.xml
trunk/src/etc/server/default/deploy/sybase-persistence-service.xml
trunk/src/etc/xmdesc/MessagingPostOffice-xmbean.xml
trunk/src/main/org/jboss/jms/server/connectionmanager/SimpleConnectionManager.java
trunk/src/main/org/jboss/jms/server/endpoint/ServerConsumerEndpoint.java
trunk/src/main/org/jboss/jms/server/endpoint/ServerSessionEndpoint.java
trunk/src/main/org/jboss/messaging/core/contract/Binding.java
trunk/src/main/org/jboss/messaging/core/contract/JChannelFactory.java
trunk/src/main/org/jboss/messaging/core/impl/ChannelSupport.java
trunk/src/main/org/jboss/messaging/core/impl/ClusterRoundRobinDistributor.java
trunk/src/main/org/jboss/messaging/core/impl/MessagingQueue.java
trunk/src/main/org/jboss/messaging/core/impl/clusterconnection/ClusterConnectionManager.java
trunk/src/main/org/jboss/messaging/core/impl/clusterconnection/MessageSucker.java
trunk/src/main/org/jboss/messaging/core/impl/jchannelfactory/MultiplexerJChannelFactory.java
trunk/src/main/org/jboss/messaging/core/impl/jchannelfactory/XMLJChannelFactory.java
trunk/src/main/org/jboss/messaging/core/impl/postoffice/GroupMember.java
trunk/src/main/org/jboss/messaging/core/impl/postoffice/MessagingPostOffice.java
trunk/src/main/org/jboss/messaging/core/impl/postoffice/PostOfficeAddressInfo.java
trunk/src/main/org/jboss/messaging/core/jmx/MessagingPostOfficeService.java
trunk/tests/build.xml
trunk/tests/src/org/jboss/test/messaging/core/plugin/postoffice/ClusteredPersistenceServiceConfigFileJChannelFactory.java
trunk/tests/src/org/jboss/test/messaging/core/plugin/postoffice/SimpleJChannelFactory.java
trunk/tests/src/org/jboss/test/messaging/jms/clustering/HATest.java
trunk/tests/src/org/jboss/test/messaging/jms/clustering/MergeQueueTest.java
trunk/tests/src/org/jboss/test/messaging/jms/clustering/TemporaryDestinationTest.java
Log:
Clustering pull interim commit
Modified: trunk/src/etc/server/default/deploy/db2-persistence-service.xml
===================================================================
--- trunk/src/etc/server/default/deploy/db2-persistence-service.xml 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/etc/server/default/deploy/db2-persistence-service.xml 2007-06-27 20:23:20 UTC (rev 2808)
@@ -105,10 +105,10 @@
<attribute name="CreateTablesOnStartup">true</attribute>
<attribute name="SqlProperties"><![CDATA[
-CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR(255), NODE_ID INTEGER, QUEUE_NAME VARCHAR(1023), COND VARCHAR(1023), SELECTOR VARCHAR(1023), CHANNEL_ID BIGINT, CLUSTERED CHAR(1))
-INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED) VALUES (?, ?, ?, ?, ?, ?, ?)
+CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR(255), NODE_ID INTEGER, QUEUE_NAME VARCHAR(1023), COND VARCHAR(1023), SELECTOR VARCHAR(1023), CHANNEL_ID BIGINT, CLUSTERED CHAR(1), ALL_NODES CHAR(1))
+INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED, ALL_NODES) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
DELETE_BINDING=DELETE FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=? AND QUEUE_NAME=?
-LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
+LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED, ALL_NODES FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
]]></attribute>
<!-- This post office is clustered. If you don't want a clustered post office then set to false -->
@@ -132,14 +132,14 @@
<!-- Enable this when the JGroups multiplexer comes of age
<attribute name="ChannelFactoryName">jgroups.mux:name=Multiplexer</attribute>
- <attribute name="SyncChannelName">udp-sync</attribute>
- <attribute name="AsyncChannelName">udp</attribute>
+ <attribute name="ControlChannelName">udp-sync</attribute>
+ <attribute name="DataChannelName">udp</attribute>
<attribute name="ChannelPartitionName">${jboss.partition.name:DefaultPartition}-JMS</attribute>
-->
<!-- JGroups stack configuration for the data channel - used when casting messages across the cluster -->
- <attribute name="AsyncChannelConfig">
+ <attribute name="DataChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45567" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
@@ -162,7 +162,7 @@
<!-- JGroups stack configuration to use for the control channel - used for bind/unbind requests amongst others -->
- <attribute name="SyncChannelConfig">
+ <attribute name="ControlChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45568" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
Modified: trunk/src/etc/server/default/deploy/mssql-persistence-service.xml
===================================================================
--- trunk/src/etc/server/default/deploy/mssql-persistence-service.xml 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/etc/server/default/deploy/mssql-persistence-service.xml 2007-06-27 20:23:20 UTC (rev 2808)
@@ -108,10 +108,10 @@
<attribute name="CreateTablesOnStartup">true</attribute>
<attribute name="SqlProperties"><![CDATA[
-CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR(255), NODE_ID SMALLINT, QUEUE_NAME VARCHAR(1023), COND VARCHAR(1023), SELECTOR VARCHAR(1023), CHANNEL_ID INTEGER, CLSTERED CHAR(1))
-INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLSTERED) VALUES (?, ?, ?, ?, ?, ?, ?)
+CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR(255), NODE_ID SMALLINT, QUEUE_NAME VARCHAR(1023), COND VARCHAR(1023), SELECTOR VARCHAR(1023), CHANNEL_ID INTEGER, CLSTERED CHAR(1), ALL_NODES CHAR(1))
+INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLSTERED, ALL_NODES) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
DELETE_BINDING=DELETE FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=? AND QUEUE_NAME=?
-LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLSTERED FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
+LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLSTERED, ALL_NODES FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
]]></attribute>
<!-- This post office is clustered. If you don't want a clustered post office then set to false -->
@@ -135,14 +135,14 @@
<!-- Enable this when the JGroups multiplexer comes of age
<attribute name="ChannelFactoryName">jgroups.mux:name=Multiplexer</attribute>
- <attribute name="SyncChannelName">udp-sync</attribute>
- <attribute name="AsyncChannelName">udp</attribute>
+ <attribute name="ControlChannelName">udp-sync</attribute>
+ <attribute name="DataChannelName">udp</attribute>
<attribute name="ChannelPartitionName">${jboss.partition.name:DefaultPartition}-JMS</attribute>
-->
<!-- JGroups stack configuration for the data channel - used when casting messages across the cluster -->
- <attribute name="AsyncChannelConfig">
+ <attribute name="DataChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45567" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
@@ -165,7 +165,7 @@
<!-- JGroups stack configuration to use for the control channel - used for bind/unbind requests amongst others -->
- <attribute name="SyncChannelConfig">
+ <attribute name="ControlChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45568" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
Modified: trunk/src/etc/server/default/deploy/mysql-persistence-service.xml
===================================================================
--- trunk/src/etc/server/default/deploy/mysql-persistence-service.xml 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/etc/server/default/deploy/mysql-persistence-service.xml 2007-06-27 20:23:20 UTC (rev 2808)
@@ -108,10 +108,10 @@
<attribute name="CreateTablesOnStartup">true</attribute>
<attribute name="SqlProperties"><![CDATA[
-CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR(255), NODE_ID INTEGER, QUEUE_NAME VARCHAR(1023), COND VARCHAR(1023), SELECTOR VARCHAR(1023), CHANNEL_ID BIGINT, CLUSTERED CHAR(1))
-INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED) VALUES (?, ?, ?, ?, ?, ?, ?)
+CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR(255), NODE_ID INTEGER, QUEUE_NAME VARCHAR(1023), COND VARCHAR(1023), SELECTOR VARCHAR(1023), CHANNEL_ID BIGINT, CLUSTERED CHAR(1), ALL_NODES CHAR(1))
+INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED, ALL_NODES) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
DELETE_BINDING=DELETE FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=? AND QUEUE_NAME=?
-LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
+LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED, ALL_NODES FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
]]></attribute>
<!-- This post office is clustered. If you don't want a clustered post office then set to false -->
@@ -131,18 +131,18 @@
<!-- Max time to wait for a synchronous call to node members using the MessageDispatcher -->
- <attribute name="CastTimeout">5000</attribute>
+ <attribute name="CastTimeout">50000</attribute>
<!-- Enable this when the JGroups multiplexer comes of age
<attribute name="ChannelFactoryName">jgroups.mux:name=Multiplexer</attribute>
- <attribute name="SyncChannelName">udp-sync</attribute>
- <attribute name="AsyncChannelName">udp</attribute>
+ <attribute name="ControlChannelName">udp-sync</attribute>
+ <attribute name="DataChannelName">udp</attribute>
<attribute name="ChannelPartitionName">${jboss.partition.name:DefaultPartition}-JMS</attribute>
-->
<!-- JGroups stack configuration for the data channel - used when casting messages across the cluster -->
- <attribute name="AsyncChannelConfig">
+ <attribute name="DataChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45567" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
@@ -165,7 +165,7 @@
<!-- JGroups stack configuration to use for the control channel - used for bind/unbind requests amongst others -->
- <attribute name="SyncChannelConfig">
+ <attribute name="ControlChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45568" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
Modified: trunk/src/etc/server/default/deploy/oracle-persistence-service.xml
===================================================================
--- trunk/src/etc/server/default/deploy/oracle-persistence-service.xml 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/etc/server/default/deploy/oracle-persistence-service.xml 2007-06-27 20:23:20 UTC (rev 2808)
@@ -108,10 +108,10 @@
<attribute name="CreateTablesOnStartup">true</attribute>
<attribute name="SqlProperties"><![CDATA[
-CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR2(255), NODE_ID INTEGER, QUEUE_NAME VARCHAR2(1023), COND VARCHAR2(1023), SELECTOR VARCHAR2(1023), CHANNEL_ID INTEGER, CLUSTERED CHAR(1))
-INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED) VALUES (?, ?, ?, ?, ?, ?, ?)
+CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR2(255), NODE_ID INTEGER, QUEUE_NAME VARCHAR2(1023), COND VARCHAR2(1023), SELECTOR VARCHAR2(1023), CHANNEL_ID INTEGER, CLUSTERED CHAR(1), ALL_NODES CHAR(1))
+INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED, ALL_NODES) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
DELETE_BINDING=DELETE FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=? AND QUEUE_NAME=?
-LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
+LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED, ALL_NODES FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
]]></attribute>
<!-- This post office is clustered. If you don't want a clustered post office then set to false -->
@@ -135,14 +135,14 @@
<!-- Enable this when the JGroups multiplexer comes of age
<attribute name="ChannelFactoryName">jgroups.mux:name=Multiplexer</attribute>
- <attribute name="SyncChannelName">udp-sync</attribute>
- <attribute name="AsyncChannelName">udp</attribute>
+ <attribute name="ControlChannelName">udp-sync</attribute>
+ <attribute name="DataChannelName">udp</attribute>
<attribute name="ChannelPartitionName">${jboss.partition.name:DefaultPartition}-JMS</attribute>
-->
<!-- JGroups stack configuration for the data channel - used when casting messages across the cluster -->
- <attribute name="AsyncChannelConfig">
+ <attribute name="DataChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45567" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
@@ -165,7 +165,7 @@
<!-- JGroups stack configuration to use for the control channel - used for bind/unbind requests amongst others -->
- <attribute name="SyncChannelConfig">
+ <attribute name="ControlChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45568" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
Modified: trunk/src/etc/server/default/deploy/postgresql-persistence-service.xml
===================================================================
--- trunk/src/etc/server/default/deploy/postgresql-persistence-service.xml 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/etc/server/default/deploy/postgresql-persistence-service.xml 2007-06-27 20:23:20 UTC (rev 2808)
@@ -108,10 +108,10 @@
<attribute name="CreateTablesOnStartup">true</attribute>
<attribute name="SqlProperties"><![CDATA[
-CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR(255), NODE_ID INTEGER, QUEUE_NAME VARCHAR(1023), COND VARCHAR(1023), SELECTOR VARCHAR(1023), CHANNEL_ID BIGINT, CLUSTERED CHAR(1))
-INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED) VALUES (?, ?, ?, ?, ?, ?, ?)
+CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR(255), NODE_ID INTEGER, QUEUE_NAME VARCHAR(1023), COND VARCHAR(1023), SELECTOR VARCHAR(1023), CHANNEL_ID BIGINT, CLUSTERED CHAR(1), ALL_NODES CHAR(1))
+INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED, ALL_NODES) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
DELETE_BINDING=DELETE FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=? AND QUEUE_NAME=?
-LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
+LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLUSTERED, ALL_NODES FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
]]></attribute>
<!-- This post office is clustered. If you don't want a clustered post office then set to false -->
@@ -135,14 +135,14 @@
<!-- Enable this when the JGroups multiplexer comes of age
<attribute name="ChannelFactoryName">jgroups.mux:name=Multiplexer</attribute>
- <attribute name="SyncChannelName">udp-sync</attribute>
- <attribute name="AsyncChannelName">udp</attribute>
+ <attribute name="ControlChannelName">udp-sync</attribute>
+ <attribute name="DataChannelName">udp</attribute>
<attribute name="ChannelPartitionName">${jboss.partition.name:DefaultPartition}-JMS</attribute>
-->
<!-- JGroups stack configuration for the data channel - used when casting messages across the cluster -->
- <attribute name="AsyncChannelConfig">
+ <attribute name="DataChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45567" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
@@ -165,7 +165,7 @@
<!-- JGroups stack configuration to use for the control channel - used for bind/unbind requests amongst others -->
- <attribute name="SyncChannelConfig">
+ <attribute name="ControlChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45568" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
Modified: trunk/src/etc/server/default/deploy/sybase-persistence-service.xml
===================================================================
--- trunk/src/etc/server/default/deploy/sybase-persistence-service.xml 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/etc/server/default/deploy/sybase-persistence-service.xml 2007-06-27 20:23:20 UTC (rev 2808)
@@ -113,10 +113,10 @@
<attribute name="CreateTablesOnStartup">true</attribute>
<attribute name="SqlProperties"><![CDATA[
-CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR(255), NODE_ID SMALLINT, QUEUE_NAME VARCHAR(1023), COND VARCHAR(1023), SELECTOR VARCHAR(1023) NULL, CHANNEL_ID INTEGER, CLSTERED CHAR(1))
-INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLSTERED) VALUES (?, ?, ?, ?, ?, ?, ?)
+CREATE_POSTOFFICE_TABLE=CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR(255), NODE_ID SMALLINT, QUEUE_NAME VARCHAR(1023), COND VARCHAR(1023), SELECTOR VARCHAR(1023) NULL, CHANNEL_ID INTEGER, CLSTERED CHAR(1), ALL_NODES CHAR(1))
+INSERT_BINDING=INSERT INTO JBM_POSTOFFICE (POSTOFFICE_NAME, NODE_ID, QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLSTERED, ALL_NODES) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
DELETE_BINDING=DELETE FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=? AND QUEUE_NAME=?
-LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLSTERED FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
+LOAD_BINDINGS=SELECT QUEUE_NAME, COND, SELECTOR, CHANNEL_ID, CLSTERED, ALL_NODES FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?
]]></attribute>
<!-- This post office is clustered. If you don't want a clustered post office then set to false -->
@@ -140,14 +140,14 @@
<!-- Enable this when the JGroups multiplexer comes of age
<attribute name="ChannelFactoryName">jgroups.mux:name=Multiplexer</attribute>
- <attribute name="SyncChannelName">udp-sync</attribute>
- <attribute name="AsyncChannelName">udp</attribute>
+ <attribute name="ControlChannelName">udp-sync</attribute>
+ <attribute name="DataChannelName">udp</attribute>
<attribute name="ChannelPartitionName">${jboss.partition.name:DefaultPartition}-JMS</attribute>
-->
<!-- JGroups stack configuration for the data channel - used when casting messages across the cluster -->
- <attribute name="AsyncChannelConfig">
+ <attribute name="DataChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45567" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
@@ -170,7 +170,7 @@
<!-- JGroups stack configuration to use for the control channel - used for bind/unbind requests amongst others -->
- <attribute name="SyncChannelConfig">
+ <attribute name="ControlChannelConfig">
<config>
<UDP mcast_recv_buf_size="500000" down_thread="false" ip_mcast="true" mcast_send_buf_size="32000"
mcast_port="45568" ucast_recv_buf_size="500000" use_incoming_packet_handler="false"
Modified: trunk/src/etc/xmdesc/MessagingPostOffice-xmbean.xml
===================================================================
--- trunk/src/etc/xmdesc/MessagingPostOffice-xmbean.xml 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/etc/xmdesc/MessagingPostOffice-xmbean.xml 2007-06-27 20:23:20 UTC (rev 2808)
@@ -77,15 +77,15 @@
<type>boolean</type>
</attribute>
- <attribute access="read-write" getMethod="getSyncChannelConfig" setMethod="setSyncChannelConfig">
- <description>The JGroups stack configuration for the synchronous channel</description>
- <name>SyncChannelConfig</name>
+ <attribute access="read-write" getMethod="getControlChannelConfig" setMethod="setControlChannelConfig">
+ <description>The JGroups stack configuration for the control channel</description>
+ <name>ControlChannelConfig</name>
<type>org.w3c.dom.Element</type>
</attribute>
- <attribute access="read-write" getMethod="getAsyncChannelConfig" setMethod="setAsyncChannelConfig">
- <description>The JGroups stack configuration for the asynchronous channel</description>
- <name>AsyncChannelConfig</name>
+ <attribute access="read-write" getMethod="getDataChannelConfig" setMethod="setDataChannelConfig">
+ <description>The JGroups stack configuration for the data channel</description>
+ <name>DataChannelConfig</name>
<type>org.w3c.dom.Element</type>
</attribute>
@@ -101,15 +101,15 @@
<type>javax.management.ObjectName</type>
</attribute>
- <attribute access="read-write" getMethod="getSyncChannelName" setMethod="setSyncChannelName">
- <description>The name of the stack used on multiplexer for Sync Channels</description>
- <name>SyncChannelName</name>
+ <attribute access="read-write" getMethod="getControlChannelName" setMethod="setControlChannelName">
+ <description>The name of the stack used on multiplexer for Control Channels</description>
+ <name>ControlChannelName</name>
<type>java.lang.String</type>
</attribute>
- <attribute access="read-write" getMethod="getAsyncChannelName" setMethod="setAsyncChannelName">
- <description>The name of the stack used on multiplexer for Async Channels</description>
- <name>AsyncChannelName</name>
+ <attribute access="read-write" getMethod="getDataChannelName" setMethod="setDataChannelName">
+ <description>The name of the stack used on multiplexer for Data Channels</description>
+ <name>DataChannelName</name>
<type>java.lang.String</type>
</attribute>
Modified: trunk/src/main/org/jboss/jms/server/connectionmanager/SimpleConnectionManager.java
===================================================================
--- trunk/src/main/org/jboss/jms/server/connectionmanager/SimpleConnectionManager.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/jms/server/connectionmanager/SimpleConnectionManager.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -186,19 +186,37 @@
{
ConnectionEndpoint sce = (ConnectionEndpoint)i.next();
- try
- {
- sce.closing();
- sce.close();
- log.debug("cleared up state for connection " + sce);
- }
- catch (JMSException e)
- {
- log.error("Failed to close connection", e);
- }
+ //Close on own thread
+
+ new Thread(new Closer(sce)).start();
}
}
}
+
+ class Closer implements Runnable
+ {
+ private ConnectionEndpoint ce;
+
+ Closer(ConnectionEndpoint ce)
+ {
+ this.ce = ce;
+ }
+
+ public void run()
+ {
+ try
+ {
+ log.debug("clearing up state for connection " + ce);
+ ce.closing();
+ ce.close();
+ log.debug("cleared up state for connection " + ce);
+ }
+ catch (JMSException e)
+ {
+ log.error("Failed to close connection", e);
+ }
+ }
+ }
// ConnectionListener implementation ------------------------------------------------------------
Modified: trunk/src/main/org/jboss/jms/server/endpoint/ServerConsumerEndpoint.java
===================================================================
--- trunk/src/main/org/jboss/jms/server/endpoint/ServerConsumerEndpoint.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/jms/server/endpoint/ServerConsumerEndpoint.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -43,6 +43,7 @@
import org.jboss.messaging.core.contract.PostOffice;
import org.jboss.messaging.core.contract.Queue;
import org.jboss.messaging.core.contract.Receiver;
+import org.jboss.messaging.core.contract.Replicator;
import org.jboss.messaging.core.impl.SimpleDelivery;
import org.jboss.messaging.core.impl.tx.Transaction;
import org.jboss.messaging.util.ExceptionUtil;
@@ -507,7 +508,7 @@
ServerPeer sp = sessionEndpoint.getConnectionEndpoint().getServerPeer();
- Queue queue = sp.getPostOfficeInstance().getBindingForQueueName(queueName).queue;
+ Queue queue = postOffice.getBindingForQueueName(queueName).queue;
ManagedDestination mDest = sp.getDestinationManager().getDestination(destination.getName(), false);
@@ -527,6 +528,19 @@
}
}
}
+ else
+ {
+ //Durable sub consumer
+
+ if (queue.isClustered() && postOffice.isClustered())
+ {
+ //Clustered durable sub consumer created - we need to remove this info from the replicator
+
+ Replicator rep = (Replicator)postOffice;
+
+ rep.put(queue.getName(), ServerSessionEndpoint.DUR_SUB_STATE_NO_CONSUMERS);
+ }
+ }
}
}
Modified: trunk/src/main/org/jboss/jms/server/endpoint/ServerSessionEndpoint.java
===================================================================
--- trunk/src/main/org/jboss/jms/server/endpoint/ServerSessionEndpoint.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/jms/server/endpoint/ServerSessionEndpoint.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -72,6 +72,7 @@
import org.jboss.messaging.core.contract.PersistenceManager;
import org.jboss.messaging.core.contract.PostOffice;
import org.jboss.messaging.core.contract.Queue;
+import org.jboss.messaging.core.contract.Replicator;
import org.jboss.messaging.core.impl.IDManager;
import org.jboss.messaging.core.impl.MessagingQueue;
import org.jboss.messaging.core.impl.tx.Transaction;
@@ -114,6 +115,10 @@
private static final Logger log = Logger.getLogger(ServerSessionEndpoint.class);
+ static final String DUR_SUB_STATE_CONSUMERS = "C";
+
+ static final String DUR_SUB_STATE_NO_CONSUMERS = "N";
+
static final String TEMP_QUEUE_MESSAGECOUNTER_PREFIX = "TempQueue.";
// Static ---------------------------------------------------------------------------------------
@@ -684,10 +689,21 @@
subscriptionName + " since it has active subscribers");
}
- //Unbind it
+ //Also if it is clustered we must disallow unsubscribing if it has active consumers on other nodes
- // Durable subs must be unbound on ALL nodes of the cluster
-
+ if (sub.isClustered() && postOffice.isClustered())
+ {
+ Replicator rep = (Replicator)postOffice;
+
+ Map map = rep.get(sub.getName());
+
+ if (!map.isEmpty())
+ {
+ throw new IllegalStateException("Cannot unsubscribe durable subscription " +
+ subscriptionName + " since it has active subscribers on other nodes");
+ }
+ }
+
postOffice.removeBinding(sub.getName(), true);
String counterName = TopicService.SUBSCRIPTION_MESSAGECOUNTER_PREFIX + sub.getName();
@@ -1351,6 +1367,18 @@
{
throw new IllegalStateException("Cannot create a subscriber on the durable subscription since it already has subscriber(s)");
}
+
+ // If the durable sub exists because it is clustered and was created on this node due to a bind on another node
+ // then it will have no message counter
+
+ String counterName = TopicService.SUBSCRIPTION_MESSAGECOUNTER_PREFIX + queue.getName();
+
+ boolean createCounter = false;
+
+ if (sp.getMessageCounterManager().getMessageCounter(counterName) == null)
+ {
+ createCounter = true;
+ }
// From javax.jms.Session Javadoc (and also JMS 1.1 6.11.1):
// A client can change an existing durable subscription by creating a durable
@@ -1397,18 +1425,21 @@
// Durable subs must be bound on ALL nodes of the cluster
postOffice.addBinding(new Binding(new JMSCondition(false, jmsDestination.getName()), queue), true);
-
- String counterName = TopicService.SUBSCRIPTION_MESSAGECOUNTER_PREFIX + queue.getName();
-
+
if (!mDest.isTemporary())
{
- MessageCounter counter =
- new MessageCounter(counterName, subscriptionName, queue, true, true,
- mDest.getMessageCounterHistoryDayLimit());
-
- sp.getMessageCounterManager().registerMessageCounter(counterName, counter);
+ createCounter = true;
}
}
+
+ if (createCounter)
+ {
+ MessageCounter counter =
+ new MessageCounter(counterName, subscriptionName, queue, true, true,
+ mDest.getMessageCounterHistoryDayLimit());
+
+ sp.getMessageCounterManager().registerMessageCounter(counterName, counter);
+ }
}
}
}
@@ -1446,6 +1477,20 @@
queue.getName(), this, selectorString, noLocal,
jmsDestination, dlqToUse, expiryQueueToUse, redeliveryDelay, maxDeliveryAttemptsToUse, false);
+ if (queue.isClustered() && postOffice.isClustered() && jmsDestination.isTopic() && subscriptionName != null)
+ {
+ //Clustered durable sub consumer created - we need to add this info in the replicator - it is needed by other nodes
+
+ //This is also used to prevent a possible race condition where a clustered durable sub is bound on all nodes
+ //but then unsubscribed before the bind is complete on all nodes, leaving it bound on some nodes and not on others
+ //The bind all is synchronous so by the time we add the x to the replicator we know it is bound on all nodes
+ //and same to unsubscribe
+
+ Replicator rep = (Replicator)postOffice;
+
+ rep.put(queue.getName(), DUR_SUB_STATE_CONSUMERS);
+ }
+
ConsumerAdvised advised;
// Need to synchronized to prevent a deadlock
Modified: trunk/src/main/org/jboss/messaging/core/contract/Binding.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/contract/Binding.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/contract/Binding.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -42,4 +42,9 @@
public Condition condition;
public Queue queue;
+
+ public String toString()
+ {
+ return "Binding:" + System.identityHashCode(this) + " condition: " + condition + " queue: " + queue;
+ }
}
\ No newline at end of file
Modified: trunk/src/main/org/jboss/messaging/core/contract/JChannelFactory.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/contract/JChannelFactory.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/contract/JChannelFactory.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -10,7 +10,7 @@
*/
public interface JChannelFactory
{
- JChannel createSyncChannel() throws Exception;
+ JChannel createControlChannel() throws Exception;
- JChannel createASyncChannel() throws Exception;
+ JChannel createDataChannel() throws Exception;
}
Modified: trunk/src/main/org/jboss/messaging/core/impl/ChannelSupport.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/impl/ChannelSupport.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/impl/ChannelSupport.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -38,7 +38,6 @@
import org.jboss.messaging.core.contract.MessageReference;
import org.jboss.messaging.core.contract.MessageStore;
import org.jboss.messaging.core.contract.PersistenceManager;
-import org.jboss.messaging.core.contract.Receiver;
import org.jboss.messaging.core.impl.tx.Transaction;
import org.jboss.messaging.core.impl.tx.TransactionException;
import org.jboss.messaging.core.impl.tx.TxCallback;
@@ -222,7 +221,9 @@
synchronized (lock)
{
if (distributor != null && distributor.getNumberOfReceivers() > 0)
- {
+ {
+ log.info("Deliver was called");
+
setReceiversReady(true);
deliverInternal();
Modified: trunk/src/main/org/jboss/messaging/core/impl/ClusterRoundRobinDistributor.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/impl/ClusterRoundRobinDistributor.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/impl/ClusterRoundRobinDistributor.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -105,6 +105,14 @@
log.info("*** sending to remote distributor");
+ String wib = (String)ref.getMessage().getHeader("wib");
+ if (wib == null)
+ {
+ wib = "nodes:";
+ }
+ wib += ((MessagingQueue)observer).getNodeID() + "-";
+ ref.getMessage().putHeader("wib", wib);
+
del = remoteDistributor.handle(observer, ref, tx);
log.info("** remote distributor returned " + del);
Modified: trunk/src/main/org/jboss/messaging/core/impl/MessagingQueue.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/impl/MessagingQueue.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/impl/MessagingQueue.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -215,12 +215,16 @@
handleFlowControlForConsumers = true;
- if (getReceiversReady())
+ if (getReceiversReady() && localDistributor.getNumberOfReceivers() > 0)
{
if (trace) { log.trace(this + " receivers ready so setting consumer to true"); }
sucker.setConsuming(true);
}
+ else
+ {
+ log.info("No receivers ready set setting consuming to false");
+ }
}
}
}
@@ -352,13 +356,13 @@
if (trace) { log.trace(this + " attempting to add receiver " + receiver); }
synchronized (lock)
- {
+ {
boolean added = distributor.add(receiver);
-
+
if (trace) { log.trace("receiver " + receiver + (added ? "" : " NOT") + " added"); }
-
+
setReceiversReady(true);
-
+
return added;
}
}
Modified: trunk/src/main/org/jboss/messaging/core/impl/clusterconnection/ClusterConnectionManager.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/impl/clusterconnection/ClusterConnectionManager.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/impl/clusterconnection/ClusterConnectionManager.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -132,7 +132,7 @@
if (trace) { log.trace(this + " stopped"); }
}
-
+
/*
* We respond to two types of events -
*
@@ -233,9 +233,13 @@
{
Integer nid = (Integer)iter.next();
+ log.info("*********** CLOSING CLUSTER CONNECTION FOR NODE " + nid);
+
ConnectionInfo info = (ConnectionInfo)connections.remove(nid);
-
+
info.close();
+
+ log.info("******* CLOSED");
}
}
}
@@ -250,7 +254,7 @@
{
//Local bind
- if (trace) { log.trace("Local bind"); }
+ if (trace) { log.trace(this + " Local bind"); }
ensureAllConnectionsCreated();
@@ -260,14 +264,20 @@
Iterator iter = bindings.iterator();
+ if (trace) { log.trace(this + " Looking for remote bindings"); }
+
while (iter.hasNext())
{
Binding binding = (Binding)iter.next();
+ if (trace) { log.trace(this + " Remote binding is " + binding); }
+
//This will only create it if it doesn't already exist
if (binding.queue.getNodeID() != this.nodeID)
- {
+ {
+ if (trace) { log.trace(this + " Creating sucker"); }
+
createSucker(queueName, binding.queue.getNodeID());
}
}
@@ -276,7 +286,7 @@
{
//Remote bind
- if (trace) { log.trace("Remote bind"); }
+ if (trace) { log.trace(this + " Remote bind"); }
ensureAllConnectionsCreated();
@@ -287,11 +297,14 @@
if (localBinding == null)
{
//This is ok - the queue was deployed on the remote node before being deployed on the local node - do nothing for now
+ if (trace) { log.trace(this + " There's no local binding"); }
}
else
{
//The queue has already been deployed on the local node so create a sucker
+ if (trace) { log.trace(this + " Creating sucker"); }
+
createSucker(queueName, notification.nodeID);
}
}
@@ -426,10 +439,12 @@
if (sucker == null)
{
- throw new IllegalStateException("Cannot find sucker to remove " + sucker);
+ //This is OK too
}
-
- sucker.stop();
+ else
+ {
+ sucker.stop();
+ }
}
private void removeAllSuckers(String queueName)
@@ -589,7 +604,7 @@
}
catch (Throwable t)
{
- if (trace) { log.trace("Failure in closing source connection", t); }
+ //Ignore - the other server might have closed so this is ok
}
connection = null;
Modified: trunk/src/main/org/jboss/messaging/core/impl/clusterconnection/MessageSucker.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/impl/clusterconnection/MessageSucker.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/impl/clusterconnection/MessageSucker.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -77,6 +77,11 @@
private boolean consuming;
private ConsumerDelegate consumer;
+
+ public String toString()
+ {
+ return "MessageSucker:" + System.identityHashCode(this) + " queue:" + localQueue.getName();
+ }
MessageSucker(Queue localQueue, JBossConnection sourceConnection, JBossConnection localConnection, boolean xa)
{
@@ -147,6 +152,8 @@
if (trace) { log.trace(this + " Registering sucker"); }
+ log.info("**** starting sucker sucking from queue " + this.getQueueName());
+
localQueue.registerSucker(this);
if (trace) { log.trace(this + " Registered sucker"); }
@@ -161,8 +168,6 @@
setConsuming(false);
- //FIXME - need to do the stopping properly
-
localQueue.unregisterSucker(this);
try
@@ -171,7 +176,7 @@
}
catch (Throwable t)
{
- if (trace) { log.trace("Failure in closing source session", t); }
+ //Ignore
}
try
@@ -180,7 +185,7 @@
}
catch (Throwable t)
{
- if (trace) { log.trace("Failure in closing local session", t); }
+ //Ignore
}
}
@@ -254,8 +259,7 @@
if (trace) { log.trace(this + " forwarded message to queue"); }
if (startTx)
- {
-
+ {
if (trace) { log.trace("Committing JTA transaction"); }
tx.delistResource(sourceSession.getXAResource(), XAResource.TMSUCCESS);
Modified: trunk/src/main/org/jboss/messaging/core/impl/jchannelfactory/MultiplexerJChannelFactory.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/impl/jchannelfactory/MultiplexerJChannelFactory.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/impl/jchannelfactory/MultiplexerJChannelFactory.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -120,13 +120,13 @@
this.uniqueID = uniqueID;
}
- public JChannel createSyncChannel() throws Exception
+ public JChannel createControlChannel() throws Exception
{
return (JChannel) server.invoke(this.channelFactory, MUX_OPERATION,
new Object[]{syncStack, uniqueID, Boolean.TRUE, uniqueID}, MUX_SIGNATURE);
}
- public JChannel createASyncChannel() throws Exception
+ public JChannel createDataChannel() throws Exception
{
return (JChannel) server.invoke(this.channelFactory, MUX_OPERATION,
new Object[]{asyncStack, uniqueID, Boolean.TRUE, uniqueID}, MUX_SIGNATURE);
Modified: trunk/src/main/org/jboss/messaging/core/impl/jchannelfactory/XMLJChannelFactory.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/impl/jchannelfactory/XMLJChannelFactory.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/impl/jchannelfactory/XMLJChannelFactory.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -74,12 +74,12 @@
}
// implementation of JChannelFactory ------------------------------------------------------------
- public JChannel createSyncChannel() throws Exception
+ public JChannel createControlChannel() throws Exception
{
return new JChannel(syncConfig);
}
- public JChannel createASyncChannel() throws Exception
+ public JChannel createDataChannel() throws Exception
{
return new JChannel(asyncConfig);
}
Modified: trunk/src/main/org/jboss/messaging/core/impl/postoffice/GroupMember.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/impl/postoffice/GroupMember.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/impl/postoffice/GroupMember.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -25,10 +25,12 @@
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
+import java.util.Collection;
import java.util.Iterator;
import org.jboss.logging.Logger;
import org.jboss.messaging.core.contract.JChannelFactory;
+import org.jboss.messaging.util.Future;
import org.jgroups.Address;
import org.jgroups.Channel;
import org.jgroups.MembershipListener;
@@ -39,9 +41,13 @@
import org.jgroups.blocks.GroupRequest;
import org.jgroups.blocks.MessageDispatcher;
import org.jgroups.blocks.RequestHandler;
+import org.jgroups.util.Rsp;
+import org.jgroups.util.RspList;
import EDU.oswego.cs.dl.util.concurrent.LinkedQueue;
import EDU.oswego.cs.dl.util.concurrent.QueuedExecutor;
+import EDU.oswego.cs.dl.util.concurrent.ReadWriteLock;
+import EDU.oswego.cs.dl.util.concurrent.ReentrantWriterPreferenceReadWriteLock;
/**
*
@@ -67,9 +73,9 @@
private JChannelFactory jChannelFactory;
- private Channel syncChannel;
+ private Channel controlChannel;
- private Channel asyncChannel;
+ private Channel dataChannel;
private RequestTarget requestTarget;
@@ -77,16 +83,17 @@
private MessageDispatcher dispatcher;
- private volatile boolean stopping;
-
private View currentView;
private QueuedExecutor viewExecutor;
private Object setStateLock = new Object();
- private boolean stateSet;
+ //Still needs to be volatile since the ReadWriteLock won't synchronize between threads
+ private volatile boolean started;
+ private ReadWriteLock lock;
+
public GroupMember(String groupName, long stateTimeout, long castTimeout,
JChannelFactory jChannelFactory, RequestTarget requestTarget,
GroupListener groupListener)
@@ -104,104 +111,280 @@
this.groupListener = groupListener;
this.viewExecutor = new QueuedExecutor(new LinkedQueue());
+
+ this.lock = new ReentrantWriterPreferenceReadWriteLock();
}
public void start() throws Exception
{
- this.syncChannel = jChannelFactory.createSyncChannel();
+ lock.writeLock().acquire();
- this.asyncChannel = jChannelFactory.createASyncChannel();
-
- // We don't want to receive local messages on any of the channels
- syncChannel.setOpt(Channel.LOCAL, Boolean.FALSE);
-
- asyncChannel.setOpt(Channel.LOCAL, Boolean.FALSE);
-
- MessageListener messageListener = new ControlMessageListener();
-
- MembershipListener membershipListener = new ControlMembershipListener();
-
- RequestHandler requestHandler = new PostOfficeRequestHandler();
-
- dispatcher = new MessageDispatcher(syncChannel, messageListener, membershipListener, requestHandler, true);
-
- Receiver dataReceiver = new DataReceiver();
-
- asyncChannel.setReceiver(dataReceiver);
-
- syncChannel.connect(groupName);
-
- asyncChannel.connect(groupName);
+ try
+ {
+ this.controlChannel = jChannelFactory.createControlChannel();
+
+ this.dataChannel = jChannelFactory.createDataChannel();
+
+ // We don't want to receive local messages on any of the channels
+ controlChannel.setOpt(Channel.LOCAL, Boolean.FALSE);
+
+ dataChannel.setOpt(Channel.LOCAL, Boolean.FALSE);
+
+ MessageListener messageListener = new ControlMessageListener();
+
+ MembershipListener membershipListener = new ControlMembershipListener();
+
+ RequestHandler requestHandler = new ControlRequestHandler();
+
+ dispatcher = new MessageDispatcher(controlChannel, messageListener, membershipListener, requestHandler, true);
+
+ //Receiver controlReceiver = new ControlReceiver();
+
+ //controlChannel.setReceiver(controlReceiver);
+
+ Receiver dataReceiver = new DataReceiver();
+
+ dataChannel.setReceiver(dataReceiver);
+
+ controlChannel.connect(groupName);
+
+ dataChannel.connect(groupName);
+
+ //Note we're not started until getState() is successfullly called
+
+ if (!getState())
+ {
+ if (trace) { log.trace(this + " is the first member of group"); }
+ }
+ else
+ {
+ if (trace) { log.trace(this + " is not the first member of group"); }
+ }
+
+ //Now we can be considered started
+ started = true;
+ }
+ finally
+ {
+ lock.writeLock().release();
+ }
}
- public void stop()
+ public void stop() throws Exception
{
- //FIXME - we should use a ReadWriteLock here
+ lock.writeLock().acquire();
- stopping = true;
-
try
+ {
+ viewExecutor.shutdownAfterProcessingCurrentTask();
+
+ controlChannel.close();
+
+ dataChannel.close();
+
+ started = false;
+ }
+ finally
{
- Thread.sleep(500);
+ lock.writeLock().release();
}
- catch (Exception ignore)
- {
- }
-
- syncChannel.close();
-
- asyncChannel.close();
}
public Address getSyncAddress()
{
- return syncChannel.getLocalAddress();
+ return controlChannel.getLocalAddress();
}
public Address getAsyncAddress()
{
- return asyncChannel.getLocalAddress();
+ return dataChannel.getLocalAddress();
}
- public void multicastRequest(ClusterRequest request) throws Exception
+ public void multicastControl(ClusterRequest request, boolean sync) throws Exception
{
- if (!stopping)
- {
- if (trace) { log.trace(this + " multicasting " + request + " to group"); }
-
- byte[] bytes = writeRequest(request);
-
- asyncChannel.send(new Message(null, null, bytes));
+ lock.readLock().acquire();
+
+ try
+ {
+ if (started)
+ {
+ if (trace) { log.trace(this + " multicasting " + request + " to control channel, sync=" + sync); }
+
+ byte[] bytes = writeRequest(request);
+
+ controlChannel.send(new Message(null, null, bytes));
+
+ Message message = new Message(null, null, writeRequest(request));
+
+ RspList rspList =
+ dispatcher.castMessage(null, message, sync ? GroupRequest.GET_ALL: GroupRequest.GET_NONE, castTimeout);
+
+// Future future = new Future();
+//
+// new Thread(new CastRunner(request, sync, future)).start();
+//
+// Object result = future.getResult();
+//
+// if (result instanceof Exception)
+// {
+// throw (Exception)result;
+// }
+// else if (result instanceof Error)
+// {
+// throw (Error)result;
+// }
+//
+// RspList list = (RspList)result;
+
+ if (sync)
+ {
+ Iterator iter = rspList.values().iterator();
+
+ while (iter.hasNext())
+ {
+ Rsp rsp = (Rsp)iter.next();
+
+ if (!rsp.wasReceived())
+ {
+ throw new IllegalStateException(this + " response not received from " + rsp.getSender() + " - there may be others");
+ }
+ }
+ }
+ }
}
+ finally
+ {
+ lock.readLock().release();
+ }
}
- public void unicastRequest(ClusterRequest request, Address address) throws Exception
+// class CastRunner implements Runnable
+// {
+// private ClusterRequest request;
+//
+// private boolean sync;
+//
+// private Future future;
+//
+// CastRunner(ClusterRequest request, boolean sync, Future future)
+// {
+// this.request = request;
+//
+// this.sync = sync;
+//
+// this.future = future;
+// }
+//
+// public void run()
+// {
+// try
+// {
+// Message message = new Message(null, null, writeRequest(request));
+//
+// RspList rspList =
+// dispatcher.castMessage(null, message, sync ? GroupRequest.GET_ALL: GroupRequest.GET_NONE, castTimeout);
+//
+// future.setResult(rspList);
+// }
+// catch (Throwable t)
+// {
+// future.setException(t);
+// }
+// }
+// }
+
+ public void multicastData(ClusterRequest request) throws Exception
{
- if (!stopping)
+ lock.readLock().acquire();
+
+ try
+ {
+ if (started)
+ {
+ if (trace) { log.trace(this + " multicasting " + request + " to data channel"); }
+
+ byte[] bytes = writeRequest(request);
+
+ dataChannel.send(new Message(null, null, bytes));
+ }
+ }
+ finally
{
- if (trace) { log.trace(this + " unicasting " + request + " to address " + address); }
-
- byte[] bytes = writeRequest(request);
-
- asyncChannel.send(new Message(address, null, bytes));
+ lock.readLock().release();
}
}
- public void sendSyncRequest(ClusterRequest request) throws Exception
+ public void unicastData(ClusterRequest request, Address address) throws Exception
{
- if (!stopping)
+ lock.readLock().acquire();
+
+ try
+ {
+ if (started)
+ {
+ if (trace) { log.trace(this + " unicasting " + request + " to address " + address); }
+
+ byte[] bytes = writeRequest(request);
+
+ dataChannel.send(new Message(address, null, bytes));
+ }
+ }
+ finally
{
- if (trace) { log.trace(this + " Sending sync request " + request); }
-
- Message message = new Message(null, null, writeRequest(request));
-
- dispatcher.castMessage(null, message, GroupRequest.GET_ALL, castTimeout);
+ lock.readLock().release();
}
}
+
+// public void sendSyncRequest(ClusterRequest request) throws Exception
+// {
+// lock.readLock().acquire();
+//
+// try
+// {
+// if (started)
+// {
+// if (trace) { log.trace(this + " Sending sync request " + request); }
+//
+// Message message = new Message(null, null, writeRequest(request));
+//
+// RspList rspList = dispatcher.castMessage(null, message, GroupRequest.GET_ALL, castTimeout);
+// }
+// }
+// finally
+// {
+// lock.readLock().release();
+// }
+// }
+//
+// //These methods need renaming
+// public void sendAsyncRequest(ClusterRequest request) throws Exception
+// {
+// lock.readLock().acquire();
+//
+// try
+// {
+// if (started)
+// {
+// if (trace) { log.trace(this + " Sending async request " + request); }
+//
+// byte[] bytes = writeRequest(request);
+//
+// controlChannel.send(new Message(null, null, bytes));
+// }
+// }
+// finally
+// {
+// lock.readLock().release();
+// }
+// }
public boolean getState() throws Exception
{
- if (syncChannel.getState(null, stateTimeout))
+ boolean retrievedState = false;
+
+ log.info("***** waiting for state to arrive");
+
+ if (controlChannel.getState(null, stateTimeout))
{
//We are not the first member of the group, so let's wait for state to be got and processed
@@ -211,30 +394,28 @@
long start = System.currentTimeMillis();
- while (!stateSet && timeRemaining > 0)
+ while (!started && timeRemaining > 0)
{
setStateLock.wait(stateTimeout);
- if (!stateSet)
+ if (!started)
{
long waited = System.currentTimeMillis() - start;
timeRemaining -= waited;
}
}
+
+ if (!started)
+ {
+ throw new IllegalStateException("Timed out waiting for state to arrive");
+ }
}
- if (!stateSet)
- {
- throw new IllegalStateException("Timed out waiting for state to arrive");
- }
-
- return true;
+ retrievedState = true;
}
- else
- {
- return false;
- }
+
+ return retrievedState;
}
@@ -272,19 +453,34 @@
{
public byte[] getState()
{
- if (stopping)
- {
- return null;
- }
-
- if (trace) { log.trace(this + ".ControlMessageListener got state"); }
-
+ log.info("*** getting state");
+
try
{
- byte[] state = groupListener.getState();
-
- return state;
- }
+ lock.readLock().acquire();
+
+ try
+ {
+ if (!started)
+ {
+ //Ignore if received after stopped
+
+ return null;
+ }
+
+ if (trace) { log.trace(this + ".ControlMessageListener got state"); }
+
+ byte[] state = groupListener.getState();
+
+ log.info("**** got state " + state);
+
+ return state;
+ }
+ finally
+ {
+ lock.readLock().release();
+ }
+ }
catch (Exception e)
{
log.error("Failed to get state", e);
@@ -295,32 +491,25 @@
public void receive(Message message)
{
- if (stopping)
- {
- return;
- }
}
public void setState(byte[] bytes)
{
- if (stopping)
- {
- return;
- }
-
+ log.info("************* setting state");
synchronized (setStateLock)
{
try
{
groupListener.setState(bytes);
+ log.info("* set it");
}
catch (Exception e)
{
log.error("Failed to set state", e);
}
- stateSet = true;
-
+ started = true;
+
setStateLock.notify();
}
}
@@ -343,11 +532,6 @@
public void viewAccepted(View newView)
{
- if (stopping)
- {
- return;
- }
-
try
{
// We queue up changes and execute them asynchronously.
@@ -402,10 +586,27 @@
try
{
- byte[] bytes = message.getBuffer();
-
- ClusterRequest request = readRequest(bytes);
- request.execute(requestTarget);
+ lock.readLock().acquire();
+
+ try
+ {
+ if (!started)
+ {
+ //Ignore messages received when not started
+
+ return;
+ }
+
+ byte[] bytes = message.getBuffer();
+
+ ClusterRequest request = readRequest(bytes);
+
+ request.execute(requestTarget);
+ }
+ finally
+ {
+ lock.readLock().release();
+ }
}
catch (Throwable e)
{
@@ -421,28 +622,180 @@
//NOOP
}
}
+
+// private class ControlReceiver implements Receiver
+// {
+// public void block()
+// {
+// //NOOP
+// }
+//
+// public void suspect(Address address)
+// {
+// //NOOP
+// }
+//
+// public void viewAccepted(View newView)
+// {
+// try
+// {
+// lock.readLock().acquire();
+//
+// try
+// {
+// if (!started)
+// {
+// //Ignore any views received after stopped
+// return;
+// }
+//
+// // We queue up changes and execute them asynchronously.
+// // This is because JGroups will not let us do stuff like send synch messages using the
+// // same thread that delivered the view change and this is what we need to do in
+// // failover, for example.
+//
+// viewExecutor.execute(new HandleViewAcceptedRunnable(newView));
+// }
+// finally
+// {
+// lock.readLock().release();
+// }
+// }
+// catch (InterruptedException e)
+// {
+// log.warn("Caught InterruptedException", e);
+// }
+// }
+//
+// public byte[] getState()
+// {
+// log.info("*** getting state");
+//
+// try
+// {
+// lock.readLock().acquire();
+//
+// try
+// {
+// if (!started)
+// {
+// //Ignore if received after stopped
+//
+// return null;
+// }
+//
+// if (trace) { log.trace(this + ".ControlMessageListener got state"); }
+//
+// byte[] state = groupListener.getState();
+//
+// log.info("**** got state " + state);
+//
+// return state;
+// }
+// finally
+// {
+// lock.readLock().release();
+// }
+// }
+// catch (Exception e)
+// {
+// log.error("Failed to get state", e);
+//
+// return null;
+// }
+// }
+//
+// public void receive(Message message)
+// {
+// if (trace) { log.trace(this + " received " + message + " on the ASYNC channel"); }
+//
+// try
+// {
+// lock.readLock().acquire();
+//
+// try
+// {
+// if (!started)
+// {
+// //Ignore messages received when not started
+//
+// return;
+// }
+//
+// byte[] bytes = message.getBuffer();
+//
+// ClusterRequest request = readRequest(bytes);
+//
+// request.execute(requestTarget);
+// }
+// finally
+// {
+// lock.readLock().release();
+// }
+// }
+// catch (Throwable e)
+// {
+// log.error("Caught Exception in Receiver", e);
+// IllegalStateException e2 = new IllegalStateException(e.getMessage());
+// e2.setStackTrace(e.getStackTrace());
+// throw e2;
+// }
+// }
+//
+// public void setState(byte[] bytes)
+// {
+// log.info("************* setting state");
+// synchronized (setStateLock)
+// {
+// try
+// {
+// groupListener.setState(bytes);
+// log.info("* set it");
+// }
+// catch (Exception e)
+// {
+// log.error("Failed to set state", e);
+// }
+//
+// started = true;
+//
+// setStateLock.notify();
+// }
+// }
+// }
/*
- * This class is used to handle synchronous requests
+ * This class is used to handle control channel requests
*/
- private class PostOfficeRequestHandler implements RequestHandler
+ private class ControlRequestHandler implements RequestHandler
{
public Object handle(Message message)
{
- if (stopping)
- {
- return null;
- }
+ if (trace) { log.trace(this + ".RequestHandler received " + message + " on the control channel"); }
- if (trace) { log.trace(this + ".RequestHandler received " + message + " on the SYNC channel"); }
-
try
{
- byte[] bytes = message.getBuffer();
-
- ClusterRequest request = readRequest(bytes);
-
- return request.execute(requestTarget);
+ lock.readLock().acquire();
+
+ try
+ {
+ if (!started)
+ {
+ //Ignore messages received when stopped
+
+ return null;
+ }
+
+ byte[] bytes = message.getBuffer();
+
+ ClusterRequest request = readRequest(bytes);
+
+ return request.execute(requestTarget);
+ }
+ finally
+ {
+ lock.readLock().release();
+ }
}
catch (Throwable e)
{
@@ -465,11 +818,12 @@
public void run()
{
- log.debug(this + " got new view " + newView);
+ log.debug(this + " got new view " + newView + ", old view is " + currentView);
// JGroups will make sure this method is never called by more than one thread concurrently
View oldView = currentView;
+
currentView = newView;
try
Modified: trunk/src/main/org/jboss/messaging/core/impl/postoffice/MessagingPostOffice.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/impl/postoffice/MessagingPostOffice.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/impl/postoffice/MessagingPostOffice.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -71,6 +71,7 @@
import org.jboss.messaging.core.impl.MessagingQueue;
import org.jboss.messaging.core.impl.tx.Transaction;
import org.jboss.messaging.core.impl.tx.TransactionRepository;
+import org.jboss.messaging.util.ConcurrentHashSet;
import org.jboss.messaging.util.StreamUtils;
import org.jgroups.Address;
@@ -159,31 +160,24 @@
// Map <Condition, List <Queue>> - for ALL nodes
private Map mappings;
- // Map <queue name, Binding> - only for the current node
- private Map nameMap;
+ // Map <node, Map < queue name, binding> >
+ private Map nameMaps;
+ //We cache a reference to the local name map for fast lookup
+ private Map localNameMap;
+
// Map <channel id, Binding> - only for the current node
private Map channelIDMap;
- // this lock protects the bindings
- private ReadWriteLock bindingsLock;
+ private ReadWriteLock lock;
private String officeName;
private boolean clustered;
+ //Started still needs to be volatile since the ReadWriteLock won't synchronize between threads
private volatile boolean started;
-
- //FIXME using a stopping flag is not a good approach and introduces a race condition
- //http://jira.jboss.org/jira/browse/JBMESSAGING-819
- //the code can check stopping and find it to be false, then the service can stop, setting stopping to true
- //then actually stopping the post office, then the same thread that checked stopping continues and performs
- //its action only to find the service stopped
- //Should use a read-write lock instead
- //One way to minimise the chance of the race happening is to sleep for a little while after setting stopping to true
- //before actually stopping the service (see below)
- private volatile boolean stopping;
-
+
private GroupMember groupMember;
private Map replicatedData;
@@ -246,11 +240,11 @@
this.clusterNotifier = clusterNotifier;
- bindingsLock = new ReentrantWriterPreferenceReadWriteLock();
+ lock = new ReentrantWriterPreferenceReadWriteLock();
mappings = new HashMap();
- nameMap = new HashMap();
+ nameMaps = new HashMap();
channelIDMap = new HashMap();
@@ -290,7 +284,7 @@
failoverMap = new LinkedHashMap();
- leftSet = new HashSet();
+ leftSet = new ConcurrentHashSet();
groupMember = new GroupMember(groupName, stateTimeout, castTimeout, jChannelFactory, this, this);
@@ -299,13 +293,8 @@
// MessagingComponent overrides -----------------------------------------------------------------
- public synchronized void start() throws Exception
+ public void start() throws Exception
{
- if (started)
- {
- log.warn("Attempt to start() but " + this + " is already started");
- }
-
if (trace) { log.trace(this + " starting"); }
super.start();
@@ -314,17 +303,6 @@
{
groupMember.start();
- //First get the shared state from the cluster if appropriate
-
- if (!groupMember.getState())
- {
- if (trace) { log.trace(this + " is the first member of group"); }
- }
- else
- {
- if (trace) { log.trace(this + " is not the first member of group"); }
- }
-
//Sanity check - we check there aren't any other nodes already in the cluster with the same node id
if (knowAboutNodeId(thisNodeID))
{
@@ -340,22 +318,22 @@
//calculate the failover map
calculateFailoverMap();
- syncSendRequest(new JoinClusterRequest(thisNodeID, info));
+ groupMember.multicastControl(new JoinClusterRequest(thisNodeID, info), true);
}
-
+
//Now load the bindings for this node
loadBindingsFromStorage();
started = true;
-
- log.debug(this + " started");
+
+ log.debug(this + " started");
}
public synchronized void stop() throws Exception
{
if (trace) { log.trace(this + " stopping"); }
-
+
if (!started)
{
log.warn("Attempt to stop() but " + this + " is not started");
@@ -365,23 +343,11 @@
super.stop();
if (clustered)
- {
- try
- {
- //Need to send this *before* stopping
- syncSendRequest(new LeaveClusterRequest(thisNodeID));
-
- stopping = true;
-
- //FIXME http://jira.jboss.org/jira/browse/JBMESSAGING-819 this is a temporary kludge for now
- Thread.sleep(999);
-
- groupMember.stop();
- }
- catch (Throwable t)
- {
- if (trace) { log.trace("Failure in stopping post office", t); }
- }
+ {
+ //Need to send this *before* stopping
+ groupMember.multicastControl(new LeaveClusterRequest(thisNodeID), true);
+
+ groupMember.stop();
}
started = false;
@@ -418,6 +384,11 @@
public void addBinding(Binding binding, boolean allNodes) throws Exception
{
+ internalAddBinding(binding, allNodes, true);
+ }
+
+ public void internalAddBinding(Binding binding, boolean allNodes, boolean sync) throws Exception
+ {
if (trace) { log.trace(this.thisNodeID + " binding " + binding.queue + " with condition " + binding.condition + " all nodes " + allNodes); }
if (binding == null)
@@ -444,21 +415,12 @@
throw new IllegalArgumentException("Condition is null");
}
- bindingsLock.writeLock().acquire();
-
- try
- {
- addBindingInMemory(binding);
- }
- finally
- {
- bindingsLock.writeLock().release();
- }
+ addBindingInMemory(binding);
if (queue.isRecoverable())
{
// Need to write the mapping to the database
- insertBindingInStorage(condition, queue);
+ insertBindingInStorage(condition, queue, allNodes);
}
if (clustered && queue.isClustered())
@@ -473,12 +435,27 @@
ClusterRequest request = new BindRequest(info, allNodes);
- syncSendRequest(request);
+// if (sync)
+// {
+// syncSendRequest(request);
+// }
+// else
+// {
+ //When sending as a result of an all binding being received from the cluster we send the bind on asynchronously
+ //To avoid a deadlock which happens when you have one sync request hitting a node which then tries to send another back
+ //to the original node
+ groupMember.multicastControl(request, sync);
+ // }
}
}
public void removeBinding(String queueName, boolean allNodes) throws Throwable
{
+ this.internalRemoveBinding(queueName, allNodes, true);
+ }
+
+ private void internalRemoveBinding(String queueName, boolean allNodes, boolean sync) throws Throwable
+ {
if (trace) { log.trace(this.thisNodeID + " unbind queue: " + queueName + " all nodes " + allNodes); }
if (queueName == null)
@@ -486,26 +463,8 @@
throw new IllegalArgumentException("Queue name is null");
}
- bindingsLock.writeLock().acquire();
+ Binding removed = removeBindingInMemory(thisNodeID, queueName);
- Binding removed;
-
- try
- {
- removed = (Binding)nameMap.get(queueName);
-
- if (removed == null)
- {
- throw new IllegalStateException("Cannot find queue to unbind " + queueName);
- }
-
- removeBindingInMemory(removed);
- }
- finally
- {
- bindingsLock.writeLock().release();
- }
-
Queue queue = removed.queue;
Condition condition = removed.condition;
@@ -528,7 +487,7 @@
UnbindRequest request = new UnbindRequest(info, allNodes);
- syncSendRequest(request);
+ groupMember.multicastControl(request, sync);
}
}
@@ -559,7 +518,7 @@
throw new IllegalArgumentException("Cannot request clustered queues on non clustered post office");
}
- bindingsLock.readLock().acquire();
+ lock.readLock().acquire();
try
{
@@ -592,7 +551,7 @@
}
finally
{
- bindingsLock.readLock().release();
+ lock.readLock().release();
}
}
@@ -603,23 +562,30 @@
throw new IllegalArgumentException("Queue name is null");
}
- bindingsLock.readLock().acquire();
+ lock.readLock().acquire();
try
{
- Binding binding = (Binding)nameMap.get(queueName);
-
- return binding;
+ if (localNameMap != null)
+ {
+ Binding binding = (Binding)localNameMap.get(queueName);
+
+ return binding;
+ }
+ else
+ {
+ return null;
+ }
}
finally
{
- bindingsLock.readLock().release();
+ lock.readLock().release();
}
}
public Binding getBindingForChannelID(long channelID) throws Exception
{
- bindingsLock.readLock().acquire();
+ lock.readLock().acquire();
try
{
@@ -629,7 +595,7 @@
}
finally
{
- bindingsLock.readLock().release();
+ lock.readLock().release();
}
}
@@ -709,14 +675,14 @@
replicatedData = copyReplicatedData(state.getReplicatedData());
}
- nodeIDAddressMap = new ConcurrentHashMap(state.getNodeIDAddressMap());
+ nodeIDAddressMap = new HashMap(state.getNodeIDAddressMap());
}
public byte[] getState() throws Exception
{
List list = new ArrayList();
- bindingsLock.readLock().acquire();
+ lock.readLock().acquire();
try
{
@@ -751,7 +717,7 @@
}
finally
{
- bindingsLock.readLock().release();
+ lock.readLock().release();
}
//Need to copy
@@ -763,7 +729,7 @@
copy = copyReplicatedData(replicatedData);
}
- SharedState state = new SharedState(list, copy, new HashMap(nodeIDAddressMap));
+ SharedState state = new SharedState(list, copy, new ConcurrentHashMap(nodeIDAddressMap));
return StreamUtils.toBytes(state);
}
@@ -799,18 +765,25 @@
if (crashed)
{
// Need to evaluate this before we regenerate the failover map
- Integer failoverNode = (Integer)failoverMap.get(leftNodeID);
+ Integer failoverNode;
+
+ synchronized (failoverMap)
+ {
+ failoverNode = (Integer)failoverMap.get(leftNodeID);
+ }
if (failoverNode == null)
{
throw new IllegalStateException(this + " cannot find failover node for node " + leftNodeID);
}
+
+ log.debug(this + " the failover node for the crashed node is " + failoverNode);
if (thisNodeID == failoverNode.intValue())
{
// The node crashed and we are the failover node so let's perform failover
- log.info(this + ": I am the failover node for node " + leftNodeID + " that crashed");
+ log.debug(this + ": I am the failover node for node " + leftNodeID + " that crashed");
performFailover(leftNodeID);
}
@@ -855,36 +828,51 @@
Condition condition = conditionFactory.createCondition(mapping.getConditionText());
+ addBindingInMemory(new Binding(condition, queue));
- bindingsLock.writeLock().acquire();
-
- try
- {
- addBindingInMemory(new Binding(condition, queue));
- }
- finally
- {
- bindingsLock.writeLock().release();
- }
-
if (allNodes)
{
if (trace) { log.trace("allNodes is true, so also forcing a local bind"); }
- //Also bind locally
-
- long channelID = channelIDManager.getID();
+ //There is the possibility that two nodes send a bind all with the same name simultaneously OR
+ //a node starts and sends a bind "ALL" and the other nodes already have a queue with that name
+ //This is ok - but we must check for this and not create the local binding in this case
- Queue queue2 = new MessagingQueue(thisNodeID, mapping.getQueueName(), channelID, ms, pm,
+ lock.readLock().acquire();
+
+ Queue queue2 = null;
+
+ try
+ {
+ if (localNameMap != null && localNameMap.get(mapping.getQueueName()) != null)
+ {
+ //Already exists - don't create it again!
+ }
+ else
+ {
+ //Bind locally
+
+ long channelID = channelIDManager.getID();
+
+ queue2 = new MessagingQueue(thisNodeID, mapping.getQueueName(), channelID, ms, pm,
mapping.isRecoverable(), mapping.getMaxSize(), filter,
mapping.getFullSize(), mapping.getPageSize(), mapping.getDownCacheSize(), true,
mapping.isPreserveOrdering());
-
- addBinding(new Binding(condition, queue2), false);
+
+ internalAddBinding(new Binding(condition, queue2), false, false);
+ }
+ }
+ finally
+ {
+ lock.readLock().release();
+ }
- queue2.load();
-
- queue2.activate();
+ if (queue2 != null)
+ {
+ queue2.load();
+
+ queue2.activate();
+ }
}
}
@@ -901,29 +889,7 @@
throw new IllegalStateException("Don't know about node id: " + mapping.getNodeId());
}
- Filter filter = null;
-
- if (mapping.getFilterString() != null)
- {
- filter = filterFactory.createFilter(mapping.getFilterString());
- }
-
- Queue queue = new MessagingQueue(mapping.getNodeId(), mapping.getQueueName(), mapping.getChannelId(),
- mapping.isRecoverable(), filter, mapping.isClustered());
-
- Condition condition = conditionFactory.createCondition(mapping.getConditionText());
-
-
- bindingsLock.writeLock().acquire();
-
- try
- {
- removeBindingInMemory(new Binding(condition, queue));
- }
- finally
- {
- bindingsLock.writeLock().release();
- }
+ removeBindingInMemory(mapping.getNodeId(), mapping.getQueueName());
if (allNodes)
{
@@ -939,10 +905,7 @@
{
//No need to remove the nodeid-address map info, this will be removed when data cleaned for node
- synchronized (leftSet)
- {
- leftSet.add(new Integer(nodeId));
- }
+ leftSet.add(new Integer(nodeId));
//We don't update the failover map here since this doesn't get called if the node crashed
}
@@ -951,7 +914,7 @@
{
nodeIDAddressMap.put(new Integer(nodeId), info);
- log.info(this + " handleNodeJoined: " + nodeId + " size: " + nodeIDAddressMap.size());
+ log.debug(this + " handleNodeJoined: " + nodeId + " size: " + nodeIDAddressMap.size());
calculateFailoverMap();
@@ -1065,7 +1028,7 @@
PutReplicantRequest request = new PutReplicantRequest(thisNodeID, key, replicant);
- syncSendRequest(request);
+ groupMember.multicastControl(request, true);
}
public Map get(Serializable key) throws Exception
@@ -1084,7 +1047,7 @@
{
RemoveReplicantRequest request = new RemoveReplicantRequest(this.thisNodeID, key);
- syncSendRequest(request);
+ groupMember.multicastControl(request, true);
return true;
}
@@ -1094,11 +1057,6 @@
}
}
- public FailoverMapper getFailoverMapper()
- {
- return failoverMapper;
- }
-
// JDBCSupport overrides ------------------------------------------------------------------------
protected Map getDefaultDMLStatements()
@@ -1113,7 +1071,8 @@
"CONDITION, " +
"SELECTOR, " +
"CHANNEL_ID, " +
- "CLUSTERED) " +
+ "CLUSTERED, " +
+ "ALL_NODES) " +
"VALUES (?, ?, ?, ?, ?, ?, ?)");
map.put("DELETE_BINDING",
@@ -1125,7 +1084,8 @@
"CONDITION, " +
"SELECTOR, " +
"CHANNEL_ID, " +
- "CLUSTERED " +
+ "CLUSTERED, " +
+ "ALL_NODES " +
"FROM JBM_POSTOFFICE WHERE POSTOFFICE_NAME=? AND NODE_ID=?");
return map;
@@ -1138,7 +1098,7 @@
"CREATE TABLE JBM_POSTOFFICE (POSTOFFICE_NAME VARCHAR(255), NODE_ID INTEGER," +
"QUEUE_NAME VARCHAR(1023), CONDITION VARCHAR(1023), " +
"SELECTOR VARCHAR(1023), CHANNEL_ID BIGINT, " +
- "CLUSTERED CHAR(1))");
+ "CLUSTERED CHAR(1), ALL_NODES CHAR(1))");
return map;
}
@@ -1263,32 +1223,23 @@
private Collection getBindings(String queueName) throws Exception
{
- bindingsLock.readLock().acquire();
+ lock.readLock().acquire();
try
{
- Iterator iter = this.mappings.entrySet().iterator();
+ Iterator iter = this.nameMaps.values().iterator();
List bindings = new ArrayList();
while (iter.hasNext())
{
- Map.Entry entry = (Map.Entry)iter.next();
+ Map nameMap = (Map)iter.next();
- Condition condition = (Condition)entry.getKey();
+ Binding binding = (Binding)nameMap.get(queueName);
- List queues = (List)entry.getValue();
-
- Iterator iter2 = queues.iterator();
-
- while (iter2.hasNext())
+ if (binding != null)
{
- Queue queue = (Queue)iter2.next();
-
- if (queueName == null || queue.getName().equals(queueName))
- {
- bindings.add(new Binding(condition, queue));
- }
+ bindings.add(binding);
}
}
@@ -1296,10 +1247,9 @@
}
finally
{
- bindingsLock.readLock().release();
+ lock.readLock().release();
}
}
-
private boolean routeInternal(MessageReference ref, Condition condition, Transaction tx, boolean fromCluster) throws Exception
{
@@ -1309,7 +1259,7 @@
boolean routed = false;
- bindingsLock.readLock().acquire();
+ lock.readLock().acquire();
try
{
@@ -1331,10 +1281,14 @@
{
Queue queue = (Queue)iter.next();
+ if (trace) { log.trace(this + " considering queue " + queue); }
+
//TODO optimise this
if (queue.getNodeID() == thisNodeID)
{
+ if (trace) { log.trace(this + " is a local queue"); }
+
//Local queue
//TODO - There is a slight kludge here -
@@ -1365,6 +1319,8 @@
{
//Remote queue
+ if (trace) { log.trace(this + " is a remote queue"); }
+
if (!queue.isRecoverable())
{
//When we send to the cluster we never send to reliable queues
@@ -1379,8 +1335,14 @@
}
remoteSet.add(new Integer(queue.getNodeID()));
+
+ if (trace) { log.trace(this + " added it to the remote set for casting"); }
}
}
+ else
+ {
+ if (trace) { log.trace(this + " is recoverable so not casting"); }
+ }
}
}
@@ -1393,11 +1355,13 @@
ClusterRequest request = new MessageRequest(condition.toText(), ref.getMessage());
+ if (trace) { log.trace(this + " casting message to other node(s)"); }
+
if (remoteSet.size() == 1)
{
//Only one node requires the message, so we can unicast
- unicastRequest(request, thisNodeID);
+ unicastRequest(request, ((Integer)remoteSet.iterator().next()).intValue());
}
else
{
@@ -1456,96 +1420,149 @@
}
finally
{
- bindingsLock.readLock().release();
+ lock.readLock().release();
}
return routed;
}
- private void removeBindingInMemory(Binding binding)
+ private Binding removeBindingInMemory(int nodeID, String queueName) throws Exception
{
- Queue queue = binding.queue;
+ lock.writeLock().acquire();
- Condition condition = binding.condition;
-
- if (queue.getNodeID() == this.thisNodeID)
+ try
{
- Binding b = (Binding)nameMap.remove(queue.getName());
+ Integer nid = new Integer(nodeID);
- if (b == null)
- {
- throw new IllegalStateException("Cannot find binding in name map for queue " + queue.getName());
- }
-
- b = (Binding)channelIDMap.remove(new Long(queue.getChannelID()));
-
- if (b == null)
- {
- throw new IllegalStateException("Cannot find binding in channel id map for queue " + queue.getName());
- }
- }
-
- List queues = (List)mappings.get(condition);
+ Map nameMap = (Map)this.nameMaps.get(nid);
+
+ if (nameMap == null)
+ {
+ throw new IllegalArgumentException("Cannot find name maps for node " + nodeID);
+ }
+
+ Binding binding = (Binding)nameMap.remove(queueName);
+
+ if (binding == null)
+ {
+ throw new IllegalArgumentException("Cannot find binding for queue name " + queueName);
+ }
+
+ if (nameMap.isEmpty())
+ {
+ nameMaps.remove(nid);
+
+ if (nodeID == thisNodeID)
+ {
+ localNameMap = null;
+ }
+ }
+
+ binding = (Binding)channelIDMap.remove(new Long(binding.queue.getChannelID()));
+
+ if (binding == null)
+ {
+ throw new IllegalStateException("Cannot find binding in channel id map for queue " + queueName);
+ }
+
+ List queues = (List)mappings.get(binding.condition);
+
+ if (queues == null)
+ {
+ throw new IllegalStateException("Cannot find queues in condition map for condition " + binding.condition);
+ }
- if (queues == null)
- {
- throw new IllegalStateException("Cannot find queues in condition map for condition " + condition);
- }
-
- boolean removed = queues.remove(queue);
-
- if (!removed)
- {
- throw new IllegalStateException("Cannot find queue in list for queue " + queue.getName());
- }
-
- if (queues.isEmpty())
- {
- mappings.remove(condition);
- }
-
- // Send a notification
- ClusterNotification notification = new ClusterNotification(ClusterNotification.TYPE_UNBIND, queue.getNodeID(), queue.getName());
-
- clusterNotifier.sendNotification(notification);
+ boolean removed = queues.remove(binding.queue);
+
+ if (!removed)
+ {
+ throw new IllegalStateException("Cannot find queue in list for queue " + queueName);
+ }
+
+ if (queues.isEmpty())
+ {
+ mappings.remove(binding.condition);
+ }
+
+ // Send a notification
+ ClusterNotification notification = new ClusterNotification(ClusterNotification.TYPE_UNBIND, nodeID, queueName);
+
+ clusterNotifier.sendNotification(notification);
+
+ return binding;
+ }
+ finally
+ {
+ lock.writeLock().release();
+ }
}
- private void addBindingInMemory(Binding binding)
+ private void addBindingInMemory(Binding binding) throws Exception
{
Queue queue = binding.queue;
+
+ lock.writeLock().acquire();
- //The name map and the channel id map only hold the bindings for the *current* node
+ if (trace) { log.trace(this + " Adding binding in memory " + binding); }
- if (queue.getNodeID() == this.thisNodeID)
- {
- if (nameMap.get(queue.getName()) != null)
+ try
+ {
+ Integer nid = new Integer(queue.getNodeID());
+
+ Map nameMap = (Map)this.nameMaps.get(nid);
+
+ if (nameMap != null && nameMap.containsKey(queue.getName()))
+ {
+ throw new IllegalArgumentException("Name map for node " + nid + " already contains binding for queue " + queue.getName());
+ }
+
+ Long cid = new Long(queue.getChannelID());
+
+ if (channelIDMap.containsKey(cid))
+ {
+ throw new IllegalArgumentException("Channel id map for node " + nid + " already contains binding for queue " + cid);
+ }
+
+ if (nameMap == null)
+ {
+ nameMap = new HashMap();
+
+ nameMaps.put(nid, nameMap);
+
+ if (queue.getNodeID() == thisNodeID)
+ {
+ localNameMap = nameMap;
+ }
+ }
+
+ nameMap.put(queue.getName(), binding);
+
+ channelIDMap.put(cid, binding);
+
+ Condition condition = binding.condition;
+
+ List queues = (List)mappings.get(condition);
+
+ if (queues == null)
{
- throw new IllegalArgumentException("Cannot bind queue, it is already bound " + queue.getName());
+ queues = new ArrayList();
+
+ if (queues.contains(queue))
+ {
+ throw new IllegalArgumentException("Queue is already bound with condition " + condition);
+ }
+
+ mappings.put(condition, queues);
}
- log.info("*********** putting queue name " + queue.getName() + " in map");
- nameMap.put(queue.getName(), binding);
-
- channelIDMap.put(new Long(queue.getChannelID()), binding);
+ queues.add(queue);
}
-
- Condition condition = binding.condition;
-
- List queues = (List)mappings.get(condition);
-
- if (queues == null)
+ finally
{
- queues = new ArrayList();
-
- if (queues.contains(queue))
- {
- throw new IllegalArgumentException("Queue is already bound with condition " + condition);
- }
-
- mappings.put(condition, queues);
+ lock.writeLock().release();
}
- queues.add(queue);
+ if (trace) { log.trace(this + " Sending cluster notification"); }
//Send a notification
ClusterNotification notification = new ClusterNotification(ClusterNotification.TYPE_BIND, queue.getNodeID(), queue.getName());
@@ -1554,30 +1571,20 @@
}
/*
- * Multicast a message to all members of the group
+ * Multicast a message on the data channel to all members of the group
*/
private void multicastRequest(ClusterRequest request) throws Exception
{
- if (stopping)
- {
- return;
- }
-
if (trace) { log.trace(this + " Unicasting request " + request); }
- groupMember.multicastRequest(request);
+ groupMember.multicastData(request);
}
/*
- * Unicast a message to one member of the group
+ * Unicast a message on the data channel to one member of the group
*/
private void unicastRequest(ClusterRequest request, int nodeId) throws Exception
{
- if (stopping)
- {
- return;
- }
-
Address address = this.getAddressForNodeId(nodeId, false);
if (address == null)
@@ -1587,12 +1594,12 @@
if (trace) { log.trace(this + "Unicasting request " + request + " to node " + nodeId); }
- groupMember.unicastRequest(request, address);
+ groupMember.unicastData(request, address);
}
private void loadBindingsFromStorage() throws Exception
{
- bindingsLock.writeLock().acquire();
+ lock.writeLock().acquire();
Connection conn = null;
PreparedStatement ps = null;
@@ -1626,6 +1633,8 @@
boolean bindingClustered = rs.getString(5).equals("Y");
+ boolean allNodes = rs.getString(6).equals("Y");
+
//If the node is not clustered then we load the bindings as non clustered
Filter filter = null;
@@ -1655,9 +1664,9 @@
queue.getMaxSize(),
queue.isPreserveOrdering());
- ClusterRequest request = new BindRequest(info, false);
+ ClusterRequest request = new BindRequest(info, allNodes);
- syncSendRequest(request);
+ groupMember.multicastControl(request, false);
}
}
}
@@ -1668,7 +1677,7 @@
}
finally
{
- bindingsLock.writeLock().release();
+ lock.writeLock().release();
closeResultSet(rs);
@@ -1680,7 +1689,7 @@
}
}
- private void insertBindingInStorage(Condition condition, Queue queue) throws Exception
+ private void insertBindingInStorage(Condition condition, Queue queue, boolean allNodes) throws Exception
{
Connection conn = null;
PreparedStatement ps = null;
@@ -1714,6 +1723,14 @@
{
ps.setString(7, "N");
}
+ if (allNodes)
+ {
+ ps.setString(8, "Y");
+ }
+ else
+ {
+ ps.setString(8, "N");
+ }
ps.executeUpdate();
}
@@ -1765,10 +1782,7 @@
private boolean leaveMessageReceived(Integer nodeId) throws Exception
{
- synchronized (leftSet)
- {
- return leftSet.remove(nodeId);
- }
+ return leftSet.remove(nodeId);
}
/*
@@ -1778,7 +1792,7 @@
{
log.debug(this + " cleaning data for node " + nodeToRemove);
- bindingsLock.writeLock().acquire();
+ lock.writeLock().acquire();
log.info("** cleaning data for node " + nodeToRemove);
@@ -1821,12 +1835,12 @@
{
Binding binding = (Binding)iter.next();
- removeBindingInMemory(binding);
+ removeBindingInMemory(nodeToRemove.intValue(), binding.queue.getName());
}
}
finally
{
- bindingsLock.writeLock().release();
+ lock.writeLock().release();
}
Map toNotify = new HashMap();
@@ -1872,21 +1886,6 @@
}
}
- /*
- * Multicast a sync request
- */
- private void syncSendRequest(ClusterRequest request) throws Exception
- {
- if (stopping)
- {
- return;
- }
-
- if (trace) { log.trace(this + " sending sync request " + request); }
-
- groupMember.sendSyncRequest(request);
- }
-
//TODO - can optimise this with a reverse map
private Integer getNodeIDForSyncAddress(Address address) throws Exception
{
@@ -1900,7 +1899,7 @@
PostOfficeAddressInfo info = (PostOfficeAddressInfo)entry.getValue();
- if (info.getSyncChannelAddress().equals(address))
+ if (info.getControlChannelAddress().equals(address))
{
nodeID = (Integer)entry.getKey();
@@ -1950,11 +1949,11 @@
}
else if (sync)
{
- return info.getSyncChannelAddress();
+ return info.getControlChannelAddress();
}
else
{
- return info.getAsyncChannelAddress();
+ return info.getDataChannelAddress();
}
}
@@ -1978,7 +1977,7 @@
log.debug(this + " announced it is starting failover procedure");
// Need to lock
- bindingsLock.writeLock().acquire();
+ lock.writeLock().acquire();
try
{
@@ -2032,7 +2031,7 @@
}
//Remove from the in-memory map
- removeBindingInMemory(binding);
+ removeBindingInMemory(binding.queue.getNodeID(), binding.queue.getName());
//Delete from storage
deleteBindingFromStorage(queue);
@@ -2096,13 +2095,11 @@
}
}
- log.debug(this + " finished failing over destinations");
-
- log.info(this + ": server side fail over is now complete");
+ log.debug(this + ": server side fail over is now complete");
}
finally
{
- bindingsLock.writeLock().release();
+ lock.writeLock().release();
}
log.debug(this + " announcing that failover procedure is complete");
Modified: trunk/src/main/org/jboss/messaging/core/impl/postoffice/PostOfficeAddressInfo.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/impl/postoffice/PostOfficeAddressInfo.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/impl/postoffice/PostOfficeAddressInfo.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -51,8 +51,8 @@
// Attributes ----------------------------------------------------
- private Address syncChannelAddress;
- private Address asyncChannelAddress;
+ private Address controlChannelAddress;
+ private Address dataChannelAddress;
// Constructors --------------------------------------------------
@@ -60,40 +60,40 @@
{
}
- PostOfficeAddressInfo(Address syncChannelAddress, Address asyncChannelAddress)
+ PostOfficeAddressInfo(Address controlChannelAddress, Address dataChannelAddress)
{
- this.syncChannelAddress = syncChannelAddress;
- this.asyncChannelAddress = asyncChannelAddress;
+ this.controlChannelAddress = controlChannelAddress;
+ this.dataChannelAddress = dataChannelAddress;
}
// Streamable implementation -------------------------------------
public void read(DataInputStream in) throws Exception
{
- syncChannelAddress = new IpAddress();
+ controlChannelAddress = new IpAddress();
- syncChannelAddress.readFrom(in);
+ controlChannelAddress.readFrom(in);
- asyncChannelAddress = new IpAddress();
+ dataChannelAddress = new IpAddress();
- asyncChannelAddress.readFrom(in);
+ dataChannelAddress.readFrom(in);
}
public void write(DataOutputStream out) throws Exception
{
- if (!(syncChannelAddress instanceof IpAddress))
+ if (!(controlChannelAddress instanceof IpAddress))
{
throw new IllegalStateException("Address must be IpAddress");
}
- if (!(asyncChannelAddress instanceof IpAddress))
+ if (!(dataChannelAddress instanceof IpAddress))
{
throw new IllegalStateException("Address must be IpAddress");
}
- syncChannelAddress.writeTo(out);
+ controlChannelAddress.writeTo(out);
- asyncChannelAddress.writeTo(out);
+ dataChannelAddress.writeTo(out);
}
// Public --------------------------------------------------------
@@ -101,8 +101,8 @@
public String toString()
{
StringBuffer sb = new StringBuffer("[");
- sb.append("synch addr ").append(syncChannelAddress);
- sb.append(", asynch addr ").append(asyncChannelAddress);
+ sb.append("synch addr ").append(controlChannelAddress);
+ sb.append(", asynch addr ").append(dataChannelAddress);
sb.append("]");
return sb.toString();
@@ -111,14 +111,14 @@
// Package protected ---------------------------------------------
- Address getSyncChannelAddress()
+ Address getControlChannelAddress()
{
- return syncChannelAddress;
+ return controlChannelAddress;
}
- Address getAsyncChannelAddress()
+ Address getDataChannelAddress()
{
- return asyncChannelAddress;
+ return dataChannelAddress;
}
// Protected -----------------------------------------------------
Modified: trunk/src/main/org/jboss/messaging/core/jmx/MessagingPostOfficeService.java
===================================================================
--- trunk/src/main/org/jboss/messaging/core/jmx/MessagingPostOfficeService.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/src/main/org/jboss/messaging/core/jmx/MessagingPostOfficeService.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -73,15 +73,15 @@
private boolean started;
// This group of properties is used on JGroups Channel configuration
- private Element syncChannelConfig;
+ private Element controlChannelConfig;
- private Element asyncChannelConfig;
+ private Element dataChannelConfig;
private ObjectName channelFactoryName;
- private String syncChannelName;
+ private String controlChannelName;
- private String asyncChannelName;
+ private String dataChannelName;
private String channelPartitionName;
@@ -176,34 +176,34 @@
this.channelFactoryName = channelFactoryName;
}
- public String getSyncChannelName()
+ public String getControlChannelName()
{
- return syncChannelName;
+ return controlChannelName;
}
- public void setSyncChannelName(String syncChannelName)
+ public void setControlChannelName(String controlChannelName)
{
if (started)
{
log.warn("Cannot set attribute when service is started");
return;
}
- this.syncChannelName = syncChannelName;
+ this.controlChannelName = controlChannelName;
}
- public String getAsyncChannelName()
+ public String getDataChannelName()
{
- return asyncChannelName;
+ return dataChannelName;
}
- public void setAsyncChannelName(String asyncChannelName)
+ public void setDataChannelName(String dataChannelName)
{
if (started)
{
log.warn("Cannot set attribute when service is started");
return;
}
- this.asyncChannelName = asyncChannelName;
+ this.dataChannelName = dataChannelName;
}
public String getChannelPartitionName()
@@ -221,34 +221,34 @@
this.channelPartitionName = channelPartitionName;
}
- public void setSyncChannelConfig(Element config) throws Exception
+ public void setControlChannelConfig(Element config) throws Exception
{
if (started)
{
log.warn("Cannot set attribute when service is started");
return;
}
- syncChannelConfig = config;
+ controlChannelConfig = config;
}
- public Element getSyncChannelConfig()
+ public Element getControlChannelConfig()
{
- return syncChannelConfig;
+ return controlChannelConfig;
}
- public void setAsyncChannelConfig(Element config) throws Exception
+ public void setDataChannelConfig(Element config) throws Exception
{
if (started)
{
log.warn("Cannot set attribute when service is started");
return;
}
- asyncChannelConfig = config;
+ dataChannelConfig = config;
}
- public Element getAsyncChannelConfig()
+ public Element getDataChannelConfig()
{
- return asyncChannelConfig;
+ return dataChannelConfig;
}
public void setStateTimeout(long timeout)
@@ -377,18 +377,18 @@
jChannelFactory =
new MultiplexerJChannelFactory(server, channelFactoryName, channelPartitionName,
- syncChannelName, asyncChannelName);
+ controlChannelName, dataChannelName);
}
else
{
log.debug(this + " uses XMLJChannelFactory");
- jChannelFactory = new XMLJChannelFactory(syncChannelConfig, asyncChannelConfig);
+ jChannelFactory = new XMLJChannelFactory(controlChannelConfig, dataChannelConfig);
}
}
else
{
log.debug(this + " uses XMLJChannelFactory");
- jChannelFactory = new XMLJChannelFactory(syncChannelConfig, asyncChannelConfig);
+ jChannelFactory = new XMLJChannelFactory(controlChannelConfig, dataChannelConfig);
}
if (clustered)
Added: trunk/src/main/org/jboss/messaging/util/ConcurrentHashSet.java
===================================================================
--- trunk/src/main/org/jboss/messaging/util/ConcurrentHashSet.java (rev 0)
+++ trunk/src/main/org/jboss/messaging/util/ConcurrentHashSet.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -0,0 +1,87 @@
+/*
+ * JBoss, Home of Professional Open Source
+ * Copyright 2005, JBoss Inc., and individual contributors as indicated
+ * by the @authors tag. See the copyright.txt in the distribution for a
+ * full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.messaging.util;
+
+import java.util.AbstractSet;
+import java.util.Iterator;
+import java.util.Map;
+
+import EDU.oswego.cs.dl.util.concurrent.ConcurrentHashMap;
+
+/**
+ *
+ * A ConcurrentHashSet.
+ *
+ * Offers same concurrency as ConcurrentHashMap but for a Set
+ *
+ * @author <a href="tim.fox at jboss.com">Tim Fox</a>
+ * @version <tt>$Revision: 1935 $</tt>
+ *
+ * $Id: ConcurrentReaderHashSet.java 1935 2007-01-09 23:29:20Z clebert.suconic at jboss.com $
+ */
+public class ConcurrentHashSet extends AbstractSet
+{
+ private Map theMap;
+
+ private static Object dummy = new Object();
+
+ public ConcurrentHashSet()
+ {
+ theMap = new ConcurrentHashMap();
+ }
+
+ public int size()
+ {
+ return theMap.size();
+ }
+
+ public Iterator iterator()
+ {
+ return theMap.keySet().iterator();
+ }
+
+ public boolean isEmpty()
+ {
+ return theMap.isEmpty();
+ }
+
+ public boolean add(Object o)
+ {
+ return theMap.put(o, dummy) == dummy;
+ }
+
+ public boolean contains(Object o)
+ {
+ return theMap.containsKey(o);
+ }
+
+ public void clear()
+ {
+ theMap.clear();
+ }
+
+ public boolean remove(Object o)
+ {
+ return theMap.remove(o) == dummy;
+ }
+
+}
Modified: trunk/tests/build.xml
===================================================================
--- trunk/tests/build.xml 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/tests/build.xml 2007-06-27 20:23:20 UTC (rev 2808)
@@ -128,7 +128,7 @@
JDBC Drivers.
-->
<path id="any.jdbc.driver.classpath">
- <fileset dir="${tests.root}/lib/jdbc-drivers" includes="*.jar"/>
+ <fileset dir="${tests.root}/lib/jdbc-drivers" includes="*.jar"/>
</path>
Modified: trunk/tests/src/org/jboss/test/messaging/core/plugin/postoffice/ClusteredPersistenceServiceConfigFileJChannelFactory.java
===================================================================
--- trunk/tests/src/org/jboss/test/messaging/core/plugin/postoffice/ClusteredPersistenceServiceConfigFileJChannelFactory.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/tests/src/org/jboss/test/messaging/core/plugin/postoffice/ClusteredPersistenceServiceConfigFileJChannelFactory.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -47,8 +47,8 @@
private JChannelFactory multiplexorDelegate;
// ... or just plain XML configuration.
- private Element syncConfig;
- private Element asyncConfig;
+ private Element controlConfig;
+ private Element dataConfig;
// Constructors ---------------------------------------------------------------------------------
@@ -73,27 +73,27 @@
// JChannelFactory ------------------------------------------------------------------------------
- public JChannel createSyncChannel() throws Exception
+ public JChannel createControlChannel() throws Exception
{
if (multiplexorDelegate != null)
{
- return multiplexorDelegate.createSyncChannel();
+ return multiplexorDelegate.createControlChannel();
}
else
{
- return new JChannel(syncConfig);
+ return new JChannel(controlConfig);
}
}
- public JChannel createASyncChannel() throws Exception
+ public JChannel createDataChannel() throws Exception
{
if (multiplexorDelegate != null)
{
- return multiplexorDelegate.createASyncChannel();
+ return multiplexorDelegate.createDataChannel();
}
else
{
- return new JChannel(asyncConfig);
+ return new JChannel(dataConfig);
}
}
@@ -134,18 +134,18 @@
throw new IllegalStateException("Cannot find ChannelPartitionName");
}
- String syncChannelName = (String)postOfficeConfig.getAttributeValue("SyncChannelName");
+ String controlChannelName = (String)postOfficeConfig.getAttributeValue("ControlChannelName");
- if (syncChannelName == null)
+ if (controlChannelName == null)
{
- throw new IllegalStateException("Cannot find SyncChannelName");
+ throw new IllegalStateException("Cannot find ControlChannelName");
}
- String asyncChannelName = (String)postOfficeConfig.getAttributeValue("AsyncChannelName");
+ String dataChannelName = (String)postOfficeConfig.getAttributeValue("DataChannelName");
- if (asyncChannelName == null)
+ if (dataChannelName == null)
{
- throw new IllegalStateException("Cannot find AsyncChannelName");
+ throw new IllegalStateException("Cannot find DataChannelName");
}
try
@@ -154,8 +154,8 @@
{
multiplexorDelegate =
new MultiplexerJChannelFactory(mbeanServer, channelFactoryName,
- channelPartitionName, syncChannelName,
- asyncChannelName);
+ channelPartitionName, controlChannelName,
+ dataChannelName);
// initialization ends here, we've found what we were looking for
return;
@@ -170,23 +170,23 @@
// the only chance now is to use the XML configurations
- s = (String)postOfficeConfig.getAttributeValue("SyncChannelConfig");
+ s = (String)postOfficeConfig.getAttributeValue("ControlChannelConfig");
if (s == null)
{
- throw new IllegalStateException("Cannot find SyncChannelConfig");
+ throw new IllegalStateException("Cannot find ControlChannelConfig");
}
- syncConfig = XMLUtil.stringToElement(s);
+ controlConfig = XMLUtil.stringToElement(s);
- s = (String)postOfficeConfig.getAttributeValue("AsyncChannelConfig");
+ s = (String)postOfficeConfig.getAttributeValue("DataChannelConfig");
if (s == null)
{
- throw new IllegalStateException("Cannot find AsyncChannelConfig");
+ throw new IllegalStateException("Cannot find DataChannelConfig");
}
- asyncConfig = XMLUtil.stringToElement(s);
+ dataConfig = XMLUtil.stringToElement(s);
}
// Inner classes --------------------------------------------------------------------------------
Modified: trunk/tests/src/org/jboss/test/messaging/core/plugin/postoffice/SimpleJChannelFactory.java
===================================================================
--- trunk/tests/src/org/jboss/test/messaging/core/plugin/postoffice/SimpleJChannelFactory.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/tests/src/org/jboss/test/messaging/core/plugin/postoffice/SimpleJChannelFactory.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -54,12 +54,12 @@
// JChannelFactory ------------------------------------------------------------------------------
- public JChannel createSyncChannel() throws Exception
+ public JChannel createControlChannel() throws Exception
{
return new JChannel(syncConfig);
}
- public JChannel createASyncChannel() throws Exception
+ public JChannel createDataChannel() throws Exception
{
return new JChannel(asyncConfig);
}
Deleted: trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedDestinationsTest.java
===================================================================
--- trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedDestinationsTest.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedDestinationsTest.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -1,1419 +0,0 @@
-/*
- * JBoss, Home of Professional Open Source
- * Copyright 2005, JBoss Inc., and individual contributors as indicated
- * by the @authors tag. See the copyright.txt in the distribution for a
- * full listing of individual contributors.
- *
- * This is free software; you can redistribute it and/or modify it
- * under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * This software is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.test.messaging.jms.clustering;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import javax.jms.Connection;
-import javax.jms.DeliveryMode;
-import javax.jms.Message;
-import javax.jms.MessageConsumer;
-import javax.jms.MessageListener;
-import javax.jms.MessageProducer;
-import javax.jms.Session;
-import javax.jms.TextMessage;
-
-import org.jboss.test.messaging.jms.clustering.base.ClusteringTestBase;
-
-/**
- *
- * A DistributedDestinationsTest
- *
- * @author <a href="mailto:tim.fox at jboss.com">Tim Fox</a>
- * @version <tt>$Revision$</tt>
- *
- * $Id$
- *
- */
-public class DistributedDestinationsTest extends ClusteringTestBase
-{
-
- // Constants -----------------------------------------------------
-
- // Static --------------------------------------------------------
-
- // Attributes ----------------------------------------------------
-
- // Constructors --------------------------------------------------
-
- public DistributedDestinationsTest(String name)
- {
- super(name);
- }
-
- // Public --------------------------------------------------------
-
- public void testClusteredQueueNonPersistent() throws Exception
- {
- clusteredQueue(false);
- }
-
- public void testClusteredQueuePersistent() throws Exception
- {
- clusteredQueue(true);
- }
-
- public void testClusteredTopicNonDurableNonPersistent() throws Exception
- {
- clusteredTopicNonDurable(false);
- }
-
- public void testClusteredTopicNonDurablePersistent() throws Exception
- {
- clusteredTopicNonDurable(true);
- }
-
- public void testClusteredTopicNonDurableWithSelectorsNonPersistent() throws Exception
- {
- clusteredTopicNonDurableWithSelectors(false);
- }
-
- public void testClusteredTopicNonDurableWithSelectorsPersistent() throws Exception
- {
- clusteredTopicNonDurableWithSelectors(true);
- }
-
- public void testClusteredTopicDurableNonPersistent() throws Exception
- {
- clusteredTopicDurable(false);
- }
-
- public void testClusteredTopicDurablePersistent() throws Exception
- {
- clusteredTopicDurable(true);
- }
-
- public void testClusteredTopicSharedDurableLocalConsumerNonPersistent() throws Exception
- {
- clusteredTopicSharedDurableLocalConsumer(false);
- }
-
- public void testClusteredTopicSharedDurableLocalConsumerPersistent() throws Exception
- {
- clusteredTopicSharedDurableLocalConsumer(true);
- }
-
- public void testClusteredTopicSharedDurableNoLocalSubNonPersistent() throws Exception
- {
- clusteredTopicSharedDurableNoLocalSub(false);
- }
-
- public void testClusteredTopicSharedDurableNoLocalSubPersistent() throws Exception
- {
- clusteredTopicSharedDurableNoLocalSub(true);
- }
-
- // Package protected ---------------------------------------------
-
- // Protected -----------------------------------------------------
-
- protected void setUp() throws Exception
- {
- nodeCount = 3;
-
- super.setUp();
-
- log.debug("setup done");
- }
-
- protected void tearDown() throws Exception
- {
- super.tearDown();
- }
-
- protected void clusteredQueue(boolean persistent) throws Exception
- {
- Connection conn0 = null;
- Connection conn1 = null;
- Connection conn2 = null;
-
- try
- {
- //This will create 3 different connection on 3 different nodes, since
- //the cf is clustered
- conn0 = cf.createConnection();
- conn1 = cf.createConnection();
- conn2 = cf.createConnection();
-
- log.info("Created connections");
-
- checkConnectionsDifferentServers(new Connection[] {conn0, conn1, conn2});
-
- Session sess0 = conn0.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
-
- log.info("Created sessions");
-
- MessageConsumer cons0 = sess0.createConsumer(queue[0]);
- MessageConsumer cons1 = sess1.createConsumer(queue[1]);
- MessageConsumer cons2 = sess2.createConsumer(queue[2]);
-
- log.info("Created consumers");
-
- conn0.start();
- conn1.start();
- conn2.start();
-
- // Send at node 0
-
- MessageProducer prod0 = sess0.createProducer(queue[0]);
-
- prod0.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
-
- final int NUM_MESSAGES = 100;
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = sess0.createTextMessage("message" + i);
-
- prod0.send(tm);
- }
-
- log.info("Sent messages");
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons0.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- Message m = cons0.receive(2000);
-
- assertNull(m);
-
- m = cons1.receive(2000);
-
- assertNull(m);
-
- m = cons2.receive(2000);
-
- assertNull(m);
-
- // Send at node 1
-
- MessageProducer prod1 = sess1.createProducer(queue[1]);
-
- prod1.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = sess1.createTextMessage("message" + i);
-
- prod1.send(tm);
- }
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons1.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- m = cons0.receive(2000);
-
- assertNull(m);
-
- m = cons1.receive(2000);
-
- assertNull(m);
-
- m = cons2.receive(2000);
-
- assertNull(m);
-
- // Send at node 2
-
- MessageProducer prod2 = sess2.createProducer(queue[2]);
-
- prod2.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = sess2.createTextMessage("message" + i);
-
- prod2.send(tm);
- }
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons2.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- m = cons0.receive(2000);
-
- assertNull(m);
-
- m = cons1.receive(2000);
-
- assertNull(m);
-
- m = cons2.receive(2000);
-
- assertNull(m);
-
-
- //Now close the consumers at node 0 and node 1
-
- cons0.close();
-
- cons1.close();
-
- //Send more messages at node 0
-
- log.info("Sending more at node 0");
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = sess0.createTextMessage("message2-" + i);
-
- prod0.send(tm);
- }
-
- log.info("Sent messages");
-
- // consume them on node2
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons2.receive(1000);
-
- log.info("*** got message " + tm.getText());
-
- assertNotNull(tm);
-
- assertEquals("message2-" + i, tm.getText());
- }
-
- m = cons2.receive(2000);
-
- assertNull(m);
-
- //Send more messages at node 0 and node 1
-
- for (int i = 0; i < NUM_MESSAGES / 2; i++)
- {
- TextMessage tm = sess0.createTextMessage("message3-" + i);
-
- prod0.send(tm);
- }
-
- for (int i = NUM_MESSAGES / 2; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = sess1.createTextMessage("message3-" + i);
-
- prod2.send(tm);
- }
-
- //consume them on node 2 - we will get messages from both nodes so the order is undefined
-
- Set msgs = new HashSet();
-
- TextMessage tm = null;
-
- do
- {
- tm = (TextMessage)cons2.receive(1000);
-
- if (tm != null)
- {
- log.info("*** got message " + tm.getText());
-
- assertNotNull(tm);
-
- msgs.add(tm.getText());
- }
- }
- while (tm != null);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- assertTrue(msgs.contains("message3-" + i));
- }
-
- // Now repeat but this time creating the consumer after send
-
- cons2.close();
-
- // Send more messages at node 0 and node 1
-
- for (int i = 0; i < NUM_MESSAGES / 2; i++)
- {
- tm = sess0.createTextMessage("message3-" + i);
-
- prod0.send(tm);
- }
-
- for (int i = NUM_MESSAGES / 2; i < NUM_MESSAGES; i++)
- {
- tm = sess1.createTextMessage("message3-" + i);
-
- prod2.send(tm);
- }
-
- cons2 = sess2.createConsumer(queue[2]);
-
- //consume them on node 2 - we will get messages from both nodes so the order is undefined
-
- msgs = new HashSet();
-
- do
- {
- tm = (TextMessage)cons2.receive(1000);
-
- if (tm != null)
- {
- log.info("*** got message " + tm.getText());
-
- assertNotNull(tm);
-
- msgs.add(tm.getText());
- }
- }
- while (tm != null);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- assertTrue(msgs.contains("message3-" + i));
- }
-
-
- //Now send messages at node 0 - but consume from node 1 AND node 2
-
- //order is undefined
-
- cons2.close();
-
- cons1 = sess1.createConsumer(queue[1]);
-
- cons2 = sess2.createConsumer(queue[2]);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- tm = sess0.createTextMessage("message4-" + i);
-
- prod0.send(tm);
- }
-
- msgs = new HashSet();
-
- int count = 0;
-
- do
- {
- tm = (TextMessage)cons1.receive(1000);
-
- if (tm != null)
- {
- log.info("*** got message " + tm.getText());
-
- msgs.add(tm.getText());
-
- count++;
- }
- }
- while (tm != null);
-
- do
- {
- tm = (TextMessage)cons2.receive(1000);
-
- if (tm != null)
- {
- log.info("*** got message " + tm.getText());
-
-
- msgs.add(tm.getText());
-
- count++;
- }
- }
- while (tm != null);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- assertTrue(msgs.contains("message4-" + i));
- }
-
- assertEquals(NUM_MESSAGES, count);
-
- //as above but start consumers AFTER sending
-
- cons1.close();
-
- cons2.close();
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- tm = sess0.createTextMessage("message4-" + i);
-
- prod0.send(tm);
- }
-
- cons1 = sess1.createConsumer(queue[1]);
-
- cons2 = sess2.createConsumer(queue[2]);
-
-
- msgs = new HashSet();
-
- count = 0;
-
- do
- {
- tm = (TextMessage)cons1.receive(1000);
-
- if (tm != null)
- {
- log.info("*** got message " + tm.getText());
-
- msgs.add(tm.getText());
-
- count++;
- }
- }
- while (tm != null);
-
- do
- {
- tm = (TextMessage)cons2.receive(1000);
-
- if (tm != null)
- {
-
- log.info("*** got message " + tm.getText());
-
- msgs.add(tm.getText());
-
- count++;
- }
- }
- while (tm != null);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- assertTrue(msgs.contains("message4-" + i));
- }
-
- assertEquals(NUM_MESSAGES, count);
-
-
- // Now send message on node 0, consume on node2, then cancel, consume on node1, cancel, consume on node 0
-
- cons1.close();
-
- cons2.close();
-
- sess2.close();
-
- sess2 = conn2.createSession(false, Session.CLIENT_ACKNOWLEDGE);
-
- cons2 = sess2.createConsumer(queue[2]);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- tm = sess0.createTextMessage("message5-" + i);
-
- prod0.send(tm);
- }
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- tm = (TextMessage)cons2.receive(1000);
-
- log.info("*** got message " + tm.getText());
-
- assertNotNull(tm);
-
- assertEquals("message5-" + i, tm.getText());
- }
-
- sess2.close(); // messages should go back on queue
-
- //Now try on node 1
-
- sess1.close();
-
- sess1 = conn1.createSession(false, Session.CLIENT_ACKNOWLEDGE);
-
- cons1 = sess1.createConsumer(queue[1]);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- tm = (TextMessage)cons1.receive(1000);
-
- log.info("*** got message " + tm.getText());
-
- assertNotNull(tm);
-
- assertEquals("message5-" + i, tm.getText());
- }
-
- sess1.close(); // messages should go back on queue
-
- //Now try on node 0
-
- cons0 = sess0.createConsumer(queue[0]);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- tm = (TextMessage)cons0.receive(1000);
-
- log.info("*** got message " + tm.getText());
-
- assertNotNull(tm);
-
- assertEquals("message5-" + i, tm.getText());
- }
-
-
- }
- finally
- {
- if (conn0 != null)
- {
- conn0.close();
- }
-
- if (conn1 != null)
- {
- conn1.close();
- }
-
- if (conn2 != null)
- {
- conn2.close();
- }
- }
- }
-
- // Private -------------------------------------------------------
-
- /*
- * Create non durable subscriptions on all nodes of the cluster.
- * Ensure all messages are receive as appropriate
- */
- private void clusteredTopicNonDurable(boolean persistent) throws Exception
- {
- Connection conn0 = null;
- Connection conn1 = null;
- Connection conn2 = null;
- try
- {
- //This will create 3 different connection on 3 different nodes, since
- //the cf is clustered
- conn0 = cf.createConnection();
- conn1 = cf.createConnection();
- conn2 = cf.createConnection();
-
- log.info("Created connections");
-
- checkConnectionsDifferentServers(new Connection[] {conn0, conn1, conn2});
-
- Session sess0 = conn0.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
-
- MessageConsumer cons0 = sess0.createConsumer(topic[0]);
- MessageConsumer cons1 = sess1.createConsumer(topic[1]);
- MessageConsumer cons2 = sess2.createConsumer(topic[2]);
- MessageConsumer cons3 = sess0.createConsumer(topic[0]);
- MessageConsumer cons4 = sess1.createConsumer(topic[1]);
-
- conn0.start();
- conn1.start();
- conn2.start();
-
- // Send at node 0
-
- MessageProducer prod = sess0.createProducer(topic[0]);
-
- prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
-
- final int NUM_MESSAGES = 100;
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = sess0.createTextMessage("message" + i);
-
- prod.send(tm);
- }
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons0.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- Message msg = cons0.receive(1000);
-
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons1.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- msg = cons1.receive(1000);
-
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons2.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- msg = cons2.receive(1000);
-
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons3.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- msg = cons3.receive(1000);
-
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons4.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- msg = cons4.receive(1000);
-
- assertNull(msg);
- }
- finally
- {
- if (conn0 != null)
- {
- conn0.close();
- }
-
- if (conn1 != null)
- {
- conn1.close();
- }
-
- if (conn2 != null)
- {
- conn2.close();
- }
- }
- }
-
- /*
- * Create non durable subscriptions on all nodes of the cluster.
- * Include some with selectors
- * Ensure all messages are receive as appropriate
- */
- private void clusteredTopicNonDurableWithSelectors(boolean persistent) throws Exception
- {
- Connection conn0 = null;
- Connection conn1 = null;
- Connection conn2 = null;
-
- try
- {
- //This will create 3 different connection on 3 different nodes, since
- //the cf is clustered
- conn0 = cf.createConnection();
- conn1 = cf.createConnection();
- conn2 = cf.createConnection();
-
- log.info("Created connections");
-
- checkConnectionsDifferentServers(new Connection[] {conn0, conn1, conn2});
-
- Session sess0 = conn0.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
-
- MessageConsumer cons0 = sess0.createConsumer(topic[0]);
- MessageConsumer cons1 = sess1.createConsumer(topic[1]);
- MessageConsumer cons2 = sess2.createConsumer(topic[2]);
- MessageConsumer cons3 = sess0.createConsumer(topic[0], "COLOUR='red'");
- MessageConsumer cons4 = sess1.createConsumer(topic[1], "COLOUR='blue'");
-
- conn0.start();
- conn1.start();
- conn2.start();
-
- // Send at node 0
-
- MessageProducer prod = sess0.createProducer(topic[0]);
-
- prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
-
- final int NUM_MESSAGES = 100;
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = sess0.createTextMessage("message" + i);
-
- int c = i % 3;
- if (c == 0)
- {
- tm.setStringProperty("COLOUR", "red");
- }
- else if (c == 1)
- {
- tm.setStringProperty("COLOUR", "blue");
- }
-
- prod.send(tm);
- }
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons0.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- Message msg = cons0.receive(1000);
-
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons1.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- msg = cons1.receive(1000);
-
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons2.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- msg = cons2.receive(1000);
-
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- int c = i % 3;
-
- if (c == 0)
- {
- TextMessage tm = (TextMessage)cons3.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
- }
-
- msg = cons3.receive(1000);
-
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- int c = i % 3;
-
- if (c == 1)
- {
- TextMessage tm = (TextMessage)cons4.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
- }
-
- msg = cons4.receive(1000);
-
- assertNull(msg);
- }
- finally
- {
- if (conn0 != null)
- {
- conn0.close();
- }
-
- if (conn1 != null)
- {
- conn1.close();
- }
-
- if (conn2 != null)
- {
- conn2.close();
- }
- }
- }
-
- /**
- * Create durable subscriptions on all nodes of the cluster. Include a couple with selectors.
- * Ensure all messages are receive as appropriate. None of the durable subs are shared.
- */
- private void clusteredTopicDurable(boolean persistent) throws Exception
- {
- Connection conn0 = null;
- Connection conn1 = null;
- Connection conn2 = null;
-
- try
- {
- // This will create 3 different connection on 3 different nodes, since the cf is clustered
- conn0 = cf.createConnection();
- conn1 = cf.createConnection();
- conn2 = cf.createConnection();
-
- log.info("Created connections");
-
- checkConnectionsDifferentServers(new Connection[] {conn0, conn1, conn2});
-
- conn0.setClientID("wib1");
- conn1.setClientID("wib1");
- conn2.setClientID("wib1");
-
- Session sess0 = conn0.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
-
- try
- {
- sess0.unsubscribe("alpha");
- }
- catch (Exception ignore) {}
- try
- {
- sess1.unsubscribe("beta");
- }
- catch (Exception ignore) {}
- try
- {
- sess2.unsubscribe("gamma");
- }
- catch (Exception ignore) {}
- try
- {
- sess0.unsubscribe("delta");
- }
- catch (Exception ignore) {}
- try
- {
- sess1.unsubscribe("epsilon");
- }
- catch (Exception ignore) {}
-
- log.info("creating subs");
-
- MessageConsumer alpha = sess0.createDurableSubscriber(topic[0], "alpha");
-
- log.info("created 0");
-
- MessageConsumer beta = sess1.createDurableSubscriber(topic[1], "beta");
-
- log.info("created 1");
-
- MessageConsumer gamma = sess2.createDurableSubscriber(topic[2], "gamma");
-
- log.info("created 2");
- MessageConsumer delta = sess0.createDurableSubscriber(topic[0], "delta");
-
- log.info("created 3");
-
- MessageConsumer epsilon = sess1.createDurableSubscriber(topic[1], "epsilon");
-
- log.info("created 4");
-
- conn0.start();
- conn1.start();
- conn2.start();
-
- log.info("started");
-
- // Send at node 0
-
- MessageProducer prod = sess0.createProducer(topic[0]);
-
- prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
-
- final int NUM_MESSAGES = 1;
-
- log.info("sending messages");
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- prod.send(sess0.createTextMessage("message" + i));
- }
-
- log.info("Sent messages");
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)alpha.receive(1000);
- assertNotNull(tm);
- assertEquals("message" + i, tm.getText());
- }
-
- log.info("got 1");
-
- Message msg = alpha.receive(1000);
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)beta.receive(1000);
- assertNotNull(tm);
- assertEquals("message" + i, tm.getText());
- }
-
- log.info("got 2");
-
- msg = beta.receive(1000);
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)gamma.receive(1000);
- assertNotNull(tm);
- assertEquals("message" + i, tm.getText());
- }
-
- log.info("got 3");
-
- msg = gamma.receive(1000);
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)delta.receive(1000);
- assertNotNull(tm);
- assertEquals("message" + i, tm.getText());
- }
-
- log.info("got 4");
-
- msg = delta.receive(1000);
- assertNull(msg);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)epsilon.receive(1000);
- assertNotNull(tm);
- assertEquals("message" + i, tm.getText());
- }
-
- log.info("got 5");
-
- msg = epsilon.receive(1000);
- assertNull(msg);
-
- alpha.close();
- beta.close();
- gamma.close();
- delta.close();
- epsilon.close();
-
- log.info("got 6");
-
- sess0.unsubscribe("alpha");
- sess1.unsubscribe("beta");
- sess2.unsubscribe("gamma");
- sess0.unsubscribe("delta");
- sess1.unsubscribe("epsilon");
-
- log.info("got 7");
-
- }
- finally
- {
- if (conn0 != null)
- {
- conn0.close();
- }
-
- if (conn1 != null)
- {
- conn1.close();
- }
-
- if (conn2 != null)
- {
- conn2.close();
- }
- }
- }
-
- /*
- * Create shared durable subs on multiple nodes, the local instance should always get the message
- */
- private void clusteredTopicSharedDurableLocalConsumer(boolean persistent) throws Exception
- {
- Connection conn1 = null;
- Connection conn2 = null;
- Connection conn3 = null;
- try
-
- {
- //This will create 3 different connection on 3 different nodes, since
- //the cf is clustered
- conn1 = cf.createConnection();
- conn2 = cf.createConnection();
- conn3 = cf.createConnection();
-
- log.info("Created connections");
-
- checkConnectionsDifferentServers(new Connection[] {conn1, conn2, conn3});
- conn1.setClientID("wib1");
- conn2.setClientID("wib1");
- conn3.setClientID("wib1");
-
- Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess3 = conn3.createSession(false, Session.AUTO_ACKNOWLEDGE);
-
- try
- {
- sess1.unsubscribe("sub");
- }
- catch (Exception ignore) {}
- try
- {
- sess2.unsubscribe("sub");
- }
- catch (Exception ignore) {}
- try
- {
- sess3.unsubscribe("sub");
- }
- catch (Exception ignore) {}
-
- MessageConsumer cons1 = sess1.createDurableSubscriber(topic[0], "sub");
- MessageConsumer cons2 = sess2.createDurableSubscriber(topic[1], "sub");
- MessageConsumer cons3 = sess3.createDurableSubscriber(topic[2], "sub");
-
- conn1.start();
- conn2.start();
- conn3.start();
-
- // Send at node 0
-
- MessageProducer prod = sess1.createProducer(topic[0]);
-
- prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
-
- final int NUM_MESSAGES = 100;
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = sess1.createTextMessage("message" + i);
-
- prod.send(tm);
- }
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons1.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- Message m = cons2.receive(2000);
-
- assertNull(m);
-
- m = cons3.receive(2000);
-
- assertNull(m);
-
- // Send at node 1
-
- MessageProducer prod1 = sess2.createProducer(topic[1]);
-
- prod1.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = sess3.createTextMessage("message" + i);
-
- prod1.send(tm);
- }
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons2.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- m = cons1.receive(2000);
-
- assertNull(m);
-
- m = cons3.receive(2000);
-
- assertNull(m);
-
- // Send at node 2
-
- MessageProducer prod2 = sess3.createProducer(topic[2]);
-
- prod2.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = sess3.createTextMessage("message" + i);
-
- prod2.send(tm);
- }
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = (TextMessage)cons3.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i, tm.getText());
- }
-
- m = cons1.receive(2000);
-
- assertNull(m);
-
- m = cons2.receive(2000);
-
- assertNull(m);
-
- cons1.close();
- cons2.close();
- cons3.close();
-
- // Need to unsubscribe on any node that the durable sub was created on
-
- sess1.unsubscribe("sub");
- sess2.unsubscribe("sub");
- sess3.unsubscribe("sub");
- }
- finally
- {
- if (conn1 != null)
- {
- conn1.close();
- }
-
- if (conn2 != null)
- {
- conn2.close();
- }
-
- if (conn3 != null)
- {
- conn3.close();
- }
- }
- }
-
-
-
- /*
- * Create shared durable subs on multiple nodes, but without sub on local node
- * should round robin
- * note that this test assumes round robin
- */
- private void clusteredTopicSharedDurableNoLocalSub(boolean persistent) throws Exception
- {
- Connection conn1 = null;
- Connection conn2 = null;
- Connection conn3 = null;
-
- try
- {
- //This will create 3 different connection on 3 different nodes, since
- //the cf is clustered
- conn1 = cf.createConnection();
- conn2 = cf.createConnection();
- conn3 = cf.createConnection();
-
- log.info("Created connections");
-
- checkConnectionsDifferentServers(new Connection[] {conn1, conn2, conn3});
-
- conn2.setClientID("wib1");
- conn3.setClientID("wib1");
-
- Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
- Session sess3 = conn3.createSession(false, Session.AUTO_ACKNOWLEDGE);
-
- try
- {
- sess2.unsubscribe("sub");
- }
- catch (Exception ignore) {}
- try
- {
- sess3.unsubscribe("sub");
- }
- catch (Exception ignore) {}
-
- MessageConsumer cons1 = sess2.createDurableSubscriber(topic[1], "sub");
- MessageConsumer cons2 = sess3.createDurableSubscriber(topic[2], "sub");
-
- conn2.start();
- conn3.start();
-
- // Send at node 0
-
- //Should round robin between the other 2 since there is no active consumer on sub on node 0
-
- MessageProducer prod = sess1.createProducer(topic[0]);
-
- prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
-
- final int NUM_MESSAGES = 100;
-
- for (int i = 0; i < NUM_MESSAGES; i++)
- {
- TextMessage tm = sess1.createTextMessage("message" + i);
-
- prod.send(tm);
- }
-
- for (int i = 0; i < NUM_MESSAGES / 2; i++)
- {
- TextMessage tm = (TextMessage)cons1.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + i * 2, tm.getText());
- }
-
- for (int i = 0; i < NUM_MESSAGES / 2; i++)
- {
- TextMessage tm = (TextMessage)cons2.receive(1000);
-
- assertNotNull(tm);
-
- assertEquals("message" + (i * 2 + 1), tm.getText());
- }
-
- cons1.close();
- cons2.close();
-
- sess2.unsubscribe("sub");
- sess3.unsubscribe("sub");
-
- }
- finally
- {
- if (conn1 != null)
- {
- conn1.close();
- }
-
- if (conn2 != null)
- {
- conn2.close();
- }
-
- if (conn3 != null)
- {
- conn3.close();
- }
- }
- }
-
- class MyListener implements MessageListener
- {
- private int i;
-
- MyListener(int i)
- {
- this.i = i;
- }
-
- public void onMessage(Message m)
- {
- try
- {
- int count = m.getIntProperty("count");
-
- log.info("Listener " + i + " received message " + count);
- }
- catch (Exception e)
- {
- e.printStackTrace();
- }
- }
-
- }
-
-
- // Inner classes -------------------------------------------------
-
-
-
-}
Added: trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedQueueTest.java
===================================================================
--- trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedQueueTest.java (rev 0)
+++ trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedQueueTest.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -0,0 +1,572 @@
+/*
+ * JBoss, Home of Professional Open Source
+ * Copyright 2005, JBoss Inc., and individual contributors as indicated
+ * by the @authors tag. See the copyright.txt in the distribution for a
+ * full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.test.messaging.jms.clustering;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.jms.Connection;
+import javax.jms.DeliveryMode;
+import javax.jms.Message;
+import javax.jms.MessageConsumer;
+import javax.jms.MessageProducer;
+import javax.jms.Session;
+import javax.jms.TextMessage;
+
+import org.jboss.test.messaging.jms.clustering.base.ClusteringTestBase;
+
+/**
+ *
+ * A DistributedQueueTest
+ *
+ * @author <a href="mailto:tim.fox at jboss.com">Tim Fox</a>
+ * @version <tt>$Revision: 2796 $</tt>
+ *
+ * $Id: DistributedDestinationsTest.java 2796 2007-06-25 22:24:41Z timfox $
+ *
+ */
+public class DistributedQueueTest extends ClusteringTestBase
+{
+
+ // Constants -----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public DistributedQueueTest(String name)
+ {
+ super(name);
+ }
+
+ // Public --------------------------------------------------------
+
+ public void testClusteredQueueNonPersistent() throws Exception
+ {
+ clusteredQueue(false);
+ }
+
+ public void testClusteredQueuePersistent() throws Exception
+ {
+ clusteredQueue(true);
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ protected void setUp() throws Exception
+ {
+ nodeCount = 3;
+
+ super.setUp();
+
+ log.debug("setup done");
+ }
+
+ protected void tearDown() throws Exception
+ {
+ super.tearDown();
+ }
+
+ protected void clusteredQueue(boolean persistent) throws Exception
+ {
+ Connection conn0 = null;
+ Connection conn1 = null;
+ Connection conn2 = null;
+
+ try
+ {
+ //This will create 3 different connection on 3 different nodes, since
+ //the cf is clustered
+ conn0 = cf.createConnection();
+ conn1 = cf.createConnection();
+ conn2 = cf.createConnection();
+
+ log.info("Created connections");
+
+ checkConnectionsDifferentServers(new Connection[] {conn0, conn1, conn2});
+
+ Session sess0 = conn0.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
+
+ log.info("Created sessions");
+
+ MessageConsumer cons0 = sess0.createConsumer(queue[0]);
+ MessageConsumer cons1 = sess1.createConsumer(queue[1]);
+ MessageConsumer cons2 = sess2.createConsumer(queue[2]);
+
+ log.info("Created consumers");
+
+ conn0.start();
+ conn1.start();
+ conn2.start();
+
+ // Send at node 0
+
+ MessageProducer prod0 = sess0.createProducer(queue[0]);
+
+ prod0.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ final int NUM_MESSAGES = 100;
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = sess0.createTextMessage("message" + i);
+
+ prod0.send(tm);
+ }
+
+ log.info("Sent messages");
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons0.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ Message m = cons0.receive(2000);
+
+ assertNull(m);
+
+ m = cons1.receive(2000);
+
+ assertNull(m);
+
+ m = cons2.receive(2000);
+
+ assertNull(m);
+
+ // Send at node 1
+
+ MessageProducer prod1 = sess1.createProducer(queue[1]);
+
+ prod1.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = sess1.createTextMessage("message" + i);
+
+ prod1.send(tm);
+ }
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons1.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ m = cons0.receive(2000);
+
+ assertNull(m);
+
+ m = cons1.receive(2000);
+
+ assertNull(m);
+
+ m = cons2.receive(2000);
+
+ assertNull(m);
+
+ // Send at node 2
+
+ MessageProducer prod2 = sess2.createProducer(queue[2]);
+
+ prod2.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = sess2.createTextMessage("message" + i);
+
+ prod2.send(tm);
+ }
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons2.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ m = cons0.receive(2000);
+
+ assertNull(m);
+
+ m = cons1.receive(2000);
+
+ assertNull(m);
+
+ m = cons2.receive(2000);
+
+ assertNull(m);
+
+
+ //Now close the consumers at node 0 and node 1
+
+ cons0.close();
+
+ cons1.close();
+
+ //Send more messages at node 0
+
+ log.info("Sending more at node 0");
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = sess0.createTextMessage("message2-" + i);
+
+ prod0.send(tm);
+ }
+
+ log.info("Sent messages");
+
+ // consume them on node2
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons2.receive(1000);
+
+ log.info("*** got message " + tm.getText());
+
+ assertNotNull(tm);
+
+ assertEquals("message2-" + i, tm.getText());
+ }
+
+ m = cons2.receive(2000);
+
+ assertNull(m);
+
+ //Send more messages at node 0 and node 1
+
+ for (int i = 0; i < NUM_MESSAGES / 2; i++)
+ {
+ TextMessage tm = sess0.createTextMessage("message3-" + i);
+
+ prod0.send(tm);
+ }
+
+ for (int i = NUM_MESSAGES / 2; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = sess1.createTextMessage("message3-" + i);
+
+ prod2.send(tm);
+ }
+
+ //consume them on node 2 - we will get messages from both nodes so the order is undefined
+
+ Set msgs = new HashSet();
+
+ TextMessage tm = null;
+
+ do
+ {
+ tm = (TextMessage)cons2.receive(1000);
+
+ if (tm != null)
+ {
+ log.info("*** got message " + tm.getText());
+
+ assertNotNull(tm);
+
+ msgs.add(tm.getText());
+ }
+ }
+ while (tm != null);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ assertTrue(msgs.contains("message3-" + i));
+ }
+
+ // Now repeat but this time creating the consumer after send
+
+ cons2.close();
+
+ // Send more messages at node 0 and node 1
+
+ for (int i = 0; i < NUM_MESSAGES / 2; i++)
+ {
+ tm = sess0.createTextMessage("message3-" + i);
+
+ prod0.send(tm);
+ }
+
+ for (int i = NUM_MESSAGES / 2; i < NUM_MESSAGES; i++)
+ {
+ tm = sess1.createTextMessage("message3-" + i);
+
+ prod2.send(tm);
+ }
+
+ cons2 = sess2.createConsumer(queue[2]);
+
+ //consume them on node 2 - we will get messages from both nodes so the order is undefined
+
+ msgs = new HashSet();
+
+ do
+ {
+ tm = (TextMessage)cons2.receive(1000);
+
+ if (tm != null)
+ {
+ log.info("*** got message " + tm.getText());
+
+ assertNotNull(tm);
+
+ msgs.add(tm.getText());
+ }
+ }
+ while (tm != null);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ assertTrue(msgs.contains("message3-" + i));
+ }
+
+
+ //Now send messages at node 0 - but consume from node 1 AND node 2
+
+ //order is undefined
+
+ cons2.close();
+
+ cons1 = sess1.createConsumer(queue[1]);
+
+ cons2 = sess2.createConsumer(queue[2]);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ tm = sess0.createTextMessage("message4-" + i);
+
+ prod0.send(tm);
+ }
+
+ msgs = new HashSet();
+
+ int count = 0;
+
+ do
+ {
+ tm = (TextMessage)cons1.receive(1000);
+
+ if (tm != null)
+ {
+ log.info("*** got message " + tm.getText());
+
+ msgs.add(tm.getText());
+
+ count++;
+ }
+ }
+ while (tm != null);
+
+ do
+ {
+ tm = (TextMessage)cons2.receive(1000);
+
+ if (tm != null)
+ {
+ log.info("*** got message " + tm.getText());
+
+
+ msgs.add(tm.getText());
+
+ count++;
+ }
+ }
+ while (tm != null);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ assertTrue(msgs.contains("message4-" + i));
+ }
+
+ assertEquals(NUM_MESSAGES, count);
+
+ //as above but start consumers AFTER sending
+
+ cons1.close();
+
+ cons2.close();
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ tm = sess0.createTextMessage("message4-" + i);
+
+ prod0.send(tm);
+ }
+
+ cons1 = sess1.createConsumer(queue[1]);
+
+ cons2 = sess2.createConsumer(queue[2]);
+
+
+ msgs = new HashSet();
+
+ count = 0;
+
+ do
+ {
+ tm = (TextMessage)cons1.receive(1000);
+
+ if (tm != null)
+ {
+ log.info("*** got message " + tm.getText());
+
+ msgs.add(tm.getText());
+
+ count++;
+ }
+ }
+ while (tm != null);
+
+ do
+ {
+ tm = (TextMessage)cons2.receive(1000);
+
+ if (tm != null)
+ {
+
+ log.info("*** got message " + tm.getText());
+
+ msgs.add(tm.getText());
+
+ count++;
+ }
+ }
+ while (tm != null);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ assertTrue(msgs.contains("message4-" + i));
+ }
+
+ assertEquals(NUM_MESSAGES, count);
+
+
+ // Now send message on node 0, consume on node2, then cancel, consume on node1, cancel, consume on node 0
+
+ cons1.close();
+
+ cons2.close();
+
+ sess2.close();
+
+ sess2 = conn2.createSession(false, Session.CLIENT_ACKNOWLEDGE);
+
+ cons2 = sess2.createConsumer(queue[2]);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ tm = sess0.createTextMessage("message5-" + i);
+
+ prod0.send(tm);
+ }
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ tm = (TextMessage)cons2.receive(1000);
+
+ log.info("*** got message " + tm.getText());
+
+ assertNotNull(tm);
+
+ assertEquals("message5-" + i, tm.getText());
+ }
+
+ sess2.close(); // messages should go back on queue
+
+ //Now try on node 1
+
+ sess1.close();
+
+ sess1 = conn1.createSession(false, Session.CLIENT_ACKNOWLEDGE);
+
+ cons1 = sess1.createConsumer(queue[1]);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ tm = (TextMessage)cons1.receive(1000);
+
+ log.info("*** got message " + tm.getText());
+
+ assertNotNull(tm);
+
+ assertEquals("message5-" + i, tm.getText());
+ }
+
+ sess1.close(); // messages should go back on queue
+
+ //Now try on node 0
+
+ cons0 = sess0.createConsumer(queue[0]);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ tm = (TextMessage)cons0.receive(1000);
+
+ log.info("*** got message " + tm.getText());
+
+ assertNotNull(tm);
+
+ assertEquals("message5-" + i, tm.getText());
+ }
+
+
+ }
+ finally
+ {
+ if (conn0 != null)
+ {
+ conn0.close();
+ }
+
+ if (conn1 != null)
+ {
+ conn1.close();
+ }
+
+ if (conn2 != null)
+ {
+ conn2.close();
+ }
+ }
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Copied: trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedRequestResponseTest.java (from rev 2797, trunk/tests/src/org/jboss/test/messaging/jms/clustering/RequestResponseWithPullTest.java)
===================================================================
--- trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedRequestResponseTest.java (rev 0)
+++ trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedRequestResponseTest.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -0,0 +1,258 @@
+/*
+* JBoss, Home of Professional Open Source
+* Copyright 2005, JBoss Inc., and individual contributors as indicated
+* by the @authors tag. See the copyright.txt in the distribution for a
+* full listing of individual contributors.
+*
+* This is free software; you can redistribute it and/or modify it
+* under the terms of the GNU Lesser General Public License as
+* published by the Free Software Foundation; either version 2.1 of
+* the License, or (at your option) any later version.
+*
+* This software is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this software; if not, write to the Free
+* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+*/
+package org.jboss.test.messaging.jms.clustering;
+
+import javax.jms.Connection;
+import javax.jms.ConnectionFactory;
+import javax.jms.DeliveryMode;
+import javax.jms.Destination;
+import javax.jms.JMSException;
+import javax.jms.Message;
+import javax.jms.MessageConsumer;
+import javax.jms.MessageListener;
+import javax.jms.MessageProducer;
+import javax.jms.Queue;
+import javax.jms.Session;
+import javax.jms.TextMessage;
+import javax.management.ObjectName;
+import javax.naming.InitialContext;
+
+import org.jboss.jms.client.JBossConnection;
+import org.jboss.test.messaging.MessagingTestCase;
+import org.jboss.test.messaging.tools.ServerManagement;
+
+/**
+ * A test for distributed request-response pattern
+ *
+ * @author <a href="mailto:tim.fox at jboss.com">Tim Fox</a>
+ * @version <tt>$Revision: 2701 $</tt>
+ *
+ * $Id: TemporaryDestinationTest.java 2701 2007-05-17 16:01:05Z timfox $
+ */
+public class DistributedRequestResponseTest extends MessagingTestCase
+{
+ // Constants ------------------------------------------------------------------------------------
+
+ // Static ---------------------------------------------------------------------------------------
+
+ // Attributes -----------------------------------------------------------------------------------
+
+ // Constructors ---------------------------------------------------------------------------------
+
+ public DistributedRequestResponseTest(String name)
+ {
+ super(name);
+ }
+
+ // Public ---------------------------------------------------------------------------------------
+
+ public void testDistributedRequestResponseWithTempTopicP() throws Exception
+ {
+ distributedRequestResponse(false, true);
+ }
+
+ public void testDistributedRequestResponseWithTempTopicNP() throws Exception
+ {
+ distributedRequestResponse(false, false);
+ }
+
+ public void testDistributedRequestResponseWithTempQueueP() throws Exception
+ {
+ distributedRequestResponse(true, true);
+ }
+
+ public void testDistributedRequestResponseWithTempQueueNP() throws Exception
+ {
+ distributedRequestResponse(true, false);
+ }
+
+ // Package protected ----------------------------------------------------------------------------
+
+ // Protected ------------------------------------------------------------------------------------
+
+ protected void setUp() throws Exception
+ {
+ super.setUp();
+
+ ServerManagement.start(0, "all", null, true);
+ ServerManagement.start(1, "all", null, false);
+
+ ServerManagement.deployQueue("testDistributedQueue", 0);
+ ServerManagement.deployQueue("testDistributedQueue", 1);
+
+ removeAllMessages("testDistributedQueue", true, 0);
+ removeAllMessages("testDistributedQueue", true, 1);
+
+
+
+ log.debug("setup done");
+ }
+
+ protected void tearDown() throws Exception
+ {
+ super.tearDown();
+
+ ServerManagement.undeployQueue("testDistributedQueue", 0);
+
+ ServerManagement.undeployQueue("testDistributedQueue", 1);
+ }
+
+ // Private --------------------------------------------------------------------------------------
+
+
+ private void distributedRequestResponse(boolean tempQueue, final boolean persistent) throws Exception
+ {
+ Connection conn0 = null;
+ Connection conn1 = null;
+
+ try
+ {
+ InitialContext ic0 = new InitialContext(ServerManagement.getJNDIEnvironment(0));
+ InitialContext ic1 = new InitialContext(ServerManagement.getJNDIEnvironment(1));
+
+ ConnectionFactory cf = (ConnectionFactory)ic0.lookup("/ClusteredConnectionFactory");
+
+ Queue queue0 = (Queue)ic0.lookup("/queue/testDistributedQueue");
+ Queue queue1 = (Queue)ic1.lookup("/queue/testDistributedQueue");
+
+ conn0 = cf.createConnection();
+
+ conn1 = cf.createConnection();
+
+ assertEquals(0, ((JBossConnection)conn0).getServerID());
+
+ assertEquals(1, ((JBossConnection)conn1).getServerID());
+
+ // Make sure the connections are on different servers
+
+ Session session0 = conn0.createSession(false, Session.AUTO_ACKNOWLEDGE);
+
+ Destination tempDest;
+
+ if (tempQueue)
+ {
+ tempDest = session0.createTemporaryQueue();
+ }
+ else
+ {
+ tempDest = session0.createTemporaryTopic();
+ }
+
+ MessageConsumer cons0 = session0.createConsumer(tempDest);
+
+ Thread.sleep(2000);
+
+ conn0.start();
+
+ class MyListener implements MessageListener
+ {
+ Session sess;
+
+ MyListener(Session sess)
+ {
+ this.sess = sess;
+ }
+
+ public void onMessage(Message msg)
+ {
+ try
+ {
+ log.info("Received message in listener!");
+ Destination dest = msg.getJMSReplyTo();
+ MessageProducer prod = sess.createProducer(dest);
+ prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+ TextMessage tm = (TextMessage)msg;
+ String text = tm.getText();
+ tm.clearBody();
+ tm.setText(text + "reply");
+ log.info("Sending response");
+ prod.send(msg);
+ }
+ catch (JMSException e)
+ {
+ log.error("Failed to reply to message", e);
+ fail();
+ }
+ }
+
+ }
+
+
+ Session session1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
+
+ MessageConsumer cons1 = session1.createConsumer(queue1);
+
+ MyListener listener = new MyListener(session1);
+
+ cons1.setMessageListener(listener);
+
+ conn1.start();
+
+
+ MessageProducer prod = session0.createProducer(queue0);
+
+ prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ for (int i = 0; i < 20; i++)
+ {
+ TextMessage sm = session0.createTextMessage("hoo ja ma flip" + i);
+
+ sm.setJMSReplyTo(tempDest);
+
+ log.info("Sending message!");
+
+ prod.send(sm);
+
+ TextMessage tm = (TextMessage)cons0.receive(60000);
+
+ assertNotNull(tm);
+
+ assertEquals(sm.getText() + "reply", tm.getText());
+
+ log.info("Received reply!");
+ }
+ }
+ finally
+ {
+
+ if (conn0 != null)
+ {
+ conn0.close();
+ }
+
+ if (conn1 != null)
+ {
+ conn1.close();
+ }
+ }
+ }
+
+ private void removeAllMessages(String destName, boolean isQueue, int server) throws Exception
+ {
+ String on = "jboss.messaging.destination:service=" + (isQueue ? "Queue" : "Topic") + ",name=" + destName;
+
+ ServerManagement.getServer(server).invoke(new ObjectName(on), "removeAllMessages", null, null);
+ }
+
+ // Inner classes --------------------------------------------------------------------------------
+
+}
Copied: trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedTopicTest.java (from rev 2797, trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedDestinationsTest.java)
===================================================================
--- trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedTopicTest.java (rev 0)
+++ trunk/tests/src/org/jboss/test/messaging/jms/clustering/DistributedTopicTest.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -0,0 +1,1043 @@
+/*
+ * JBoss, Home of Professional Open Source
+ * Copyright 2005, JBoss Inc., and individual contributors as indicated
+ * by the @authors tag. See the copyright.txt in the distribution for a
+ * full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.test.messaging.jms.clustering;
+
+import javax.jms.Connection;
+import javax.jms.DeliveryMode;
+import javax.jms.InvalidDestinationException;
+import javax.jms.Message;
+import javax.jms.MessageConsumer;
+import javax.jms.MessageListener;
+import javax.jms.MessageProducer;
+import javax.jms.Session;
+import javax.jms.TextMessage;
+
+import org.jboss.test.messaging.jms.clustering.base.ClusteringTestBase;
+
+/**
+ *
+ * A DistributedTopicTest
+ *
+ * @author <a href="mailto:tim.fox at jboss.com">Tim Fox</a>
+ * @version <tt>$Revision$</tt>
+ *
+ * $Id$
+ *
+ */
+public class DistributedTopicTest extends ClusteringTestBase
+{
+
+ // Constants -----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public DistributedTopicTest(String name)
+ {
+ super(name);
+ }
+
+ // Public --------------------------------------------------------
+
+ public void testClusteredTopicNonDurableNonPersistent() throws Exception
+ {
+ clusteredTopicNonDurable(false);
+ }
+
+ public void testClusteredTopicNonDurablePersistent() throws Exception
+ {
+ clusteredTopicNonDurable(true);
+ }
+
+ public void testClusteredTopicNonDurableWithSelectorsNonPersistent() throws Exception
+ {
+ clusteredTopicNonDurableWithSelectors(false);
+ }
+
+ public void testClusteredTopicNonDurableWithSelectorsPersistent() throws Exception
+ {
+ clusteredTopicNonDurableWithSelectors(true);
+ }
+
+ public void testClusteredTopicDurableNonPersistent() throws Exception
+ {
+ clusteredTopicDurable(false);
+ }
+
+ public void testClusteredTopicDurablePersistent() throws Exception
+ {
+ clusteredTopicDurable(true);
+ }
+
+ public void testClusteredTopicSharedDurableLocalConsumerNonPersistent() throws Exception
+ {
+ clusteredTopicSharedDurableLocalConsumer(false);
+ }
+
+ public void testClusteredTopicSharedDurableLocalConsumerPersistent() throws Exception
+ {
+ clusteredTopicSharedDurableLocalConsumer(true);
+ }
+
+ public void testClusteredTopicSharedDurableNoLocalSubNonPersistent() throws Exception
+ {
+ clusteredTopicSharedDurableNoLocalSub(false);
+ }
+
+ public void testClusteredTopicSharedDurableNoLocalSubPersistent() throws Exception
+ {
+ clusteredTopicSharedDurableNoLocalSub(true);
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ protected void setUp() throws Exception
+ {
+ nodeCount = 3;
+
+ super.setUp();
+
+ log.debug("setup done");
+ }
+
+ protected void tearDown() throws Exception
+ {
+ super.tearDown();
+ }
+
+ // Private -------------------------------------------------------
+
+ /*
+ * Create non durable subscriptions on all nodes of the cluster.
+ * Ensure all messages are receive as appropriate
+ */
+ private void clusteredTopicNonDurable(boolean persistent) throws Exception
+ {
+ Connection conn0 = null;
+ Connection conn1 = null;
+ Connection conn2 = null;
+ try
+ {
+ //This will create 3 different connection on 3 different nodes, since
+ //the cf is clustered
+ conn0 = cf.createConnection();
+ conn1 = cf.createConnection();
+ conn2 = cf.createConnection();
+
+ log.info("Created connections");
+
+ checkConnectionsDifferentServers(new Connection[] {conn0, conn1, conn2});
+
+ Session sess0 = conn0.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
+
+ MessageConsumer cons0 = sess0.createConsumer(topic[0]);
+ MessageConsumer cons1 = sess1.createConsumer(topic[1]);
+ MessageConsumer cons2 = sess2.createConsumer(topic[2]);
+ MessageConsumer cons3 = sess0.createConsumer(topic[0]);
+ MessageConsumer cons4 = sess1.createConsumer(topic[1]);
+
+ conn0.start();
+ conn1.start();
+ conn2.start();
+
+ // Send at node 0
+
+ MessageProducer prod = sess0.createProducer(topic[0]);
+
+ prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ final int NUM_MESSAGES = 100;
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = sess0.createTextMessage("message" + i);
+
+ prod.send(tm);
+ }
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons0.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ Message msg = cons0.receive(1000);
+
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons1.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ msg = cons1.receive(1000);
+
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons2.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ msg = cons2.receive(1000);
+
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons3.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ msg = cons3.receive(1000);
+
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons4.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ msg = cons4.receive(1000);
+
+ assertNull(msg);
+ }
+ finally
+ {
+ if (conn0 != null)
+ {
+ conn0.close();
+ }
+
+ if (conn1 != null)
+ {
+ conn1.close();
+ }
+
+ if (conn2 != null)
+ {
+ conn2.close();
+ }
+ }
+ }
+
+ /*
+ * Create non durable subscriptions on all nodes of the cluster.
+ * Include some with selectors
+ * Ensure all messages are receive as appropriate
+ */
+ private void clusteredTopicNonDurableWithSelectors(boolean persistent) throws Exception
+ {
+ Connection conn0 = null;
+ Connection conn1 = null;
+ Connection conn2 = null;
+
+ try
+ {
+ //This will create 3 different connection on 3 different nodes, since
+ //the cf is clustered
+ conn0 = cf.createConnection();
+ conn1 = cf.createConnection();
+ conn2 = cf.createConnection();
+
+ log.info("Created connections");
+
+ checkConnectionsDifferentServers(new Connection[] {conn0, conn1, conn2});
+
+ Session sess0 = conn0.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
+
+ MessageConsumer cons0 = sess0.createConsumer(topic[0]);
+ MessageConsumer cons1 = sess1.createConsumer(topic[1]);
+ MessageConsumer cons2 = sess2.createConsumer(topic[2]);
+ MessageConsumer cons3 = sess0.createConsumer(topic[0], "COLOUR='red'");
+ MessageConsumer cons4 = sess1.createConsumer(topic[1], "COLOUR='blue'");
+
+ conn0.start();
+ conn1.start();
+ conn2.start();
+
+ // Send at node 0
+
+ MessageProducer prod = sess0.createProducer(topic[0]);
+
+ prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ final int NUM_MESSAGES = 100;
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = sess0.createTextMessage("message" + i);
+
+ int c = i % 3;
+ if (c == 0)
+ {
+ tm.setStringProperty("COLOUR", "red");
+ }
+ else if (c == 1)
+ {
+ tm.setStringProperty("COLOUR", "blue");
+ }
+
+ prod.send(tm);
+ }
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons0.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ Message msg = cons0.receive(1000);
+
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons1.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ msg = cons1.receive(1000);
+
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons2.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ msg = cons2.receive(1000);
+
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ int c = i % 3;
+
+ if (c == 0)
+ {
+ TextMessage tm = (TextMessage)cons3.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+ }
+
+ msg = cons3.receive(1000);
+
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ int c = i % 3;
+
+ if (c == 1)
+ {
+ TextMessage tm = (TextMessage)cons4.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+ }
+
+ msg = cons4.receive(1000);
+
+ assertNull(msg);
+ }
+ finally
+ {
+ if (conn0 != null)
+ {
+ conn0.close();
+ }
+
+ if (conn1 != null)
+ {
+ conn1.close();
+ }
+
+ if (conn2 != null)
+ {
+ conn2.close();
+ }
+ }
+ }
+
+ private void clusteredTopicDurable(boolean persistent) throws Exception
+ {
+ Connection conn0 = null;
+ Connection conn1 = null;
+ Connection conn2 = null;
+
+ try
+ {
+ // This will create 3 different connection on 3 different nodes, since the cf is clustered
+ conn0 = cf.createConnection();
+ conn1 = cf.createConnection();
+ conn2 = cf.createConnection();
+
+ log.info("Created connections");
+
+ checkConnectionsDifferentServers(new Connection[] {conn0, conn1, conn2});
+
+ conn0.setClientID("wib1");
+ conn1.setClientID("wib1");
+ conn2.setClientID("wib1");
+
+ Session sess0 = conn0.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
+
+ try
+ {
+ sess0.unsubscribe("alpha");
+ }
+ catch (Exception ignore) {}
+ try
+ {
+ sess1.unsubscribe("beta");
+ }
+ catch (Exception ignore) {}
+ try
+ {
+ sess2.unsubscribe("gamma");
+ }
+ catch (Exception ignore) {}
+ try
+ {
+ sess0.unsubscribe("delta");
+ }
+ catch (Exception ignore) {}
+ try
+ {
+ sess1.unsubscribe("epsilon");
+ }
+ catch (Exception ignore) {}
+
+ log.info("creating subs");
+
+ MessageConsumer alpha = sess0.createDurableSubscriber(topic[0], "alpha");
+
+ log.info("created 0");
+
+ MessageConsumer beta = sess1.createDurableSubscriber(topic[1], "beta");
+
+ log.info("created 1");
+
+ MessageConsumer gamma = sess2.createDurableSubscriber(topic[2], "gamma");
+
+ log.info("created 2");
+ MessageConsumer delta = sess0.createDurableSubscriber(topic[0], "delta");
+
+ log.info("created 3");
+
+ MessageConsumer epsilon = sess1.createDurableSubscriber(topic[1], "epsilon");
+
+ log.info("created 4");
+
+ conn0.start();
+ conn1.start();
+ conn2.start();
+
+ Thread.sleep(5000);
+
+ log.info("started");
+
+ // Send at node 0 - and make sure the messages are consumable from all the durable subs
+
+ MessageProducer prod = sess0.createProducer(topic[0]);
+
+ prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ final int NUM_MESSAGES = 100;
+
+ log.info("sending messages");
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ prod.send(sess0.createTextMessage("message" + i));
+ }
+
+ log.info("Sent messages");
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)alpha.receive(1000);
+ assertNotNull(tm);
+ log.info("**** got message" + tm.getText());
+ assertEquals("message" + i, tm.getText());
+ }
+
+ log.info("got 1");
+
+ Message msg = alpha.receive(1000);
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)beta.receive(1000);
+ assertNotNull(tm);
+ log.info("**** got message" + tm.getText());
+ assertEquals("message" + i, tm.getText());
+ }
+
+ log.info("got 2");
+
+ msg = beta.receive(1000);
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)gamma.receive(1000);
+ assertNotNull(tm);
+ log.info("**** got message" + tm.getText());
+ assertEquals("message" + i, tm.getText());
+ }
+
+ log.info("got 3");
+
+ msg = gamma.receive(1000);
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)delta.receive(1000);
+ assertNotNull(tm);
+ log.info("**** got message" + tm.getText());
+ assertEquals("message" + i, tm.getText());
+ }
+
+ log.info("got 4");
+
+ msg = delta.receive(1000);
+ assertNull(msg);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)epsilon.receive(1000);
+ assertNotNull(tm);
+ log.info("**** got message" + tm.getText());
+ assertEquals("message" + i, tm.getText());
+ }
+
+
+ log.info("got 5");
+
+ msg = epsilon.receive(1000);
+ assertNull(msg);
+
+
+ //close beta
+ beta.close();
+
+
+ log.info("Sent messages");
+
+ // Create another beta - this one node 0
+ MessageConsumer beta0 = sess0.createDurableSubscriber(topic[0], "beta");
+
+ //And one node node1
+ MessageConsumer beta1 = sess1.createDurableSubscriber(topic[1], "beta");
+
+ //Now send some more messages at node 2
+
+ MessageProducer prod2 = sess2.createProducer(topic[2]);
+
+ prod2.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ log.info("sending more messages");
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ prod2.send(sess1.createTextMessage("message2-" + i));
+ }
+
+ //They should be round - robined - but we don't know which one will get them first
+
+ int offset = 0;
+
+ for (int i = 0; i < NUM_MESSAGES / 2; i++)
+ {
+ TextMessage tm = (TextMessage)beta0.receive(1000);
+ assertNotNull(tm);
+ log.info("**** got message" + tm.getText());
+
+ if (tm.getText().substring("message2-".length()).equals("1"))
+ {
+ offset = 1;
+ }
+
+ assertEquals("message2-" + (i * 2 + offset), tm.getText());
+ }
+
+ msg = beta0.receive(2000);
+ assertNull(msg);
+
+ if (offset == 1)
+ {
+ offset = 0;
+ }
+ else
+ {
+ offset = 1;
+ }
+
+ for (int i = 0; i < NUM_MESSAGES / 2; i++)
+ {
+ TextMessage tm = (TextMessage)beta1.receive(1000);
+ assertNotNull(tm);
+ log.info("**** got message" + tm.getText());
+ assertEquals("message2-" + (i * 2 + offset), tm.getText());
+ }
+
+ msg = beta1.receive(2000);
+ assertNull(msg);
+
+ //Send some more at node 0
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ prod.send(sess1.createTextMessage("message3-" + i));
+ }
+
+ //This should go straight to the local queue
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)beta0.receive(1000);
+ assertNotNull(tm);
+ log.info("**** got message" + tm.getText());
+ assertEquals("message3-" + i, tm.getText());
+ }
+
+ msg = beta0.receive(2000);
+ assertNull(msg);
+
+ //So now we have a beta on node 1 and a beta on node 0 and the messages are on node2
+
+ beta0.close();
+ beta1.close();
+
+ alpha.close();
+ beta.close();
+ gamma.close();
+ delta.close();
+ epsilon.close();
+
+ log.info("got 6");
+
+ sess0.unsubscribe("alpha");
+ sess1.unsubscribe("beta");
+ sess2.unsubscribe("gamma");
+ sess0.unsubscribe("delta");
+ sess1.unsubscribe("epsilon");
+
+ log.info("got 7");
+
+ }
+ finally
+ {
+ if (conn0 != null)
+ {
+ conn0.close();
+ }
+
+ if (conn1 != null)
+ {
+ conn1.close();
+ }
+
+ if (conn2 != null)
+ {
+ conn2.close();
+ }
+ }
+ }
+
+
+
+ /*
+ * Create shared durable subs on multiple nodes, the local instance should always get the message
+ */
+ private void clusteredTopicSharedDurableLocalConsumer(boolean persistent) throws Exception
+ {
+ Connection conn1 = null;
+ Connection conn2 = null;
+ Connection conn3 = null;
+ try
+
+ {
+ //This will create 3 different connection on 3 different nodes, since
+ //the cf is clustered
+ conn1 = cf.createConnection();
+ conn2 = cf.createConnection();
+ conn3 = cf.createConnection();
+
+ log.info("Created connections");
+
+ checkConnectionsDifferentServers(new Connection[] {conn1, conn2, conn3});
+ conn1.setClientID("wib1");
+ conn2.setClientID("wib1");
+ conn3.setClientID("wib1");
+
+ Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess3 = conn3.createSession(false, Session.AUTO_ACKNOWLEDGE);
+
+ try
+ {
+ sess1.unsubscribe("sub");
+ }
+ catch (Exception ignore) {}
+ try
+ {
+ sess2.unsubscribe("sub");
+ }
+ catch (Exception ignore) {}
+ try
+ {
+ sess3.unsubscribe("sub");
+ }
+ catch (Exception ignore) {}
+
+ log.info("** creating 1");
+ MessageConsumer cons1 = sess1.createDurableSubscriber(topic[0], "sub");
+ log.info("** creating 2");
+ MessageConsumer cons2 = sess2.createDurableSubscriber(topic[1], "sub");
+ log.info("** creating 3");
+ MessageConsumer cons3 = sess3.createDurableSubscriber(topic[2], "sub");
+ log.info("** creating 4");
+
+ conn1.start();
+ conn2.start();
+ conn3.start();
+
+ // Send at node 0
+
+ MessageProducer prod = sess1.createProducer(topic[0]);
+
+ prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ final int NUM_MESSAGES = 100;
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = sess1.createTextMessage("message" + i);
+
+ prod.send(tm);
+ }
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons1.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ Message m = cons2.receive(2000);
+
+ assertNull(m);
+
+ m = cons3.receive(2000);
+
+ assertNull(m);
+
+ // Send at node 1
+
+ MessageProducer prod1 = sess2.createProducer(topic[1]);
+
+ prod1.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = sess3.createTextMessage("message" + i);
+
+ prod1.send(tm);
+ }
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons2.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ m = cons1.receive(2000);
+
+ assertNull(m);
+
+ m = cons3.receive(2000);
+
+ assertNull(m);
+
+ // Send at node 2
+
+ MessageProducer prod2 = sess3.createProducer(topic[2]);
+
+ prod2.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = sess3.createTextMessage("message" + i);
+
+ prod2.send(tm);
+ }
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = (TextMessage)cons3.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i, tm.getText());
+ }
+
+ m = cons1.receive(2000);
+
+ assertNull(m);
+
+ m = cons2.receive(2000);
+
+ assertNull(m);
+
+ cons1.close();
+ cons2.close();
+
+ //Try and unsubscribe now - this should fail since there is still a consumer open on another node
+
+ try
+ {
+ sess1.unsubscribe("sub");
+
+ fail("Did not throw IllegalStateException");
+ }
+ catch (javax.jms.IllegalStateException e)
+ {
+ //Ok
+ }
+
+ cons3.close();
+
+ // Need to unsubscribe on any node that the durable sub was created on
+
+ sess1.unsubscribe("sub");
+
+ //Next unsubscribe should fail since it's already unsubscribed from a different node of the cluster
+ try
+ {
+ sess2.unsubscribe("sub");
+
+ fail("Did not throw InvalidDestinationException");
+ }
+ catch (InvalidDestinationException e)
+ {
+ //Ok
+ }
+
+ try
+ {
+ sess3.unsubscribe("sub");
+
+ fail("Did not throw InvalidDestinationException");
+ }
+ catch (InvalidDestinationException e)
+ {
+ //Ok
+ }
+ }
+ finally
+ {
+ if (conn1 != null)
+ {
+ conn1.close();
+ }
+
+ if (conn2 != null)
+ {
+ conn2.close();
+ }
+
+ if (conn3 != null)
+ {
+ conn3.close();
+ }
+ }
+ }
+
+
+
+ /*
+ * Create shared durable subs on multiple nodes, but without sub on local node
+ * should round robin
+ * note that this test assumes round robin
+ */
+ private void clusteredTopicSharedDurableNoLocalSub(boolean persistent) throws Exception
+ {
+ Connection conn1 = null;
+ Connection conn2 = null;
+ Connection conn3 = null;
+
+ try
+ {
+ //This will create 3 different connection on 3 different nodes, since
+ //the cf is clustered
+ conn1 = cf.createConnection();
+ conn2 = cf.createConnection();
+ conn3 = cf.createConnection();
+
+ log.info("Created connections");
+
+ checkConnectionsDifferentServers(new Connection[] {conn1, conn2, conn3});
+
+ conn2.setClientID("wib1");
+ conn3.setClientID("wib1");
+
+ Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ Session sess3 = conn3.createSession(false, Session.AUTO_ACKNOWLEDGE);
+
+ try
+ {
+ sess2.unsubscribe("sub");
+ }
+ catch (Exception ignore) {}
+ try
+ {
+ sess3.unsubscribe("sub");
+ }
+ catch (Exception ignore) {}
+
+ MessageConsumer cons1 = sess2.createDurableSubscriber(topic[1], "sub");
+ MessageConsumer cons2 = sess3.createDurableSubscriber(topic[2], "sub");
+
+ conn2.start();
+ conn3.start();
+
+ // Send at node 0
+
+ //Should round robin between the other 2 since there is no active consumer on sub on node 0
+
+ MessageProducer prod = sess1.createProducer(topic[0]);
+
+ prod.setDeliveryMode(persistent ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT);
+
+ final int NUM_MESSAGES = 100;
+
+ for (int i = 0; i < NUM_MESSAGES; i++)
+ {
+ TextMessage tm = sess1.createTextMessage("message" + i);
+
+ prod.send(tm);
+ }
+
+ for (int i = 0; i < NUM_MESSAGES / 2; i++)
+ {
+ TextMessage tm = (TextMessage)cons1.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + i * 2, tm.getText());
+ }
+
+ for (int i = 0; i < NUM_MESSAGES / 2; i++)
+ {
+ TextMessage tm = (TextMessage)cons2.receive(1000);
+
+ assertNotNull(tm);
+
+ assertEquals("message" + (i * 2 + 1), tm.getText());
+ }
+
+ cons1.close();
+ cons2.close();
+
+ sess2.unsubscribe("sub");
+ sess3.unsubscribe("sub");
+
+ }
+ finally
+ {
+ if (conn1 != null)
+ {
+ conn1.close();
+ }
+
+ if (conn2 != null)
+ {
+ conn2.close();
+ }
+
+ if (conn3 != null)
+ {
+ conn3.close();
+ }
+ }
+ }
+
+
+ // Inner classes -------------------------------------------------
+
+
+}
Modified: trunk/tests/src/org/jboss/test/messaging/jms/clustering/HATest.java
===================================================================
--- trunk/tests/src/org/jboss/test/messaging/jms/clustering/HATest.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/tests/src/org/jboss/test/messaging/jms/clustering/HATest.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -584,7 +584,7 @@
ServerManagement.kill(1);
- long sleepTime = 60;
+ long sleepTime = 30;
log.info("killed server, now waiting for " + sleepTime + " seconds");
@@ -747,7 +747,7 @@
killed = true;
- long sleepTime = 60;
+ long sleepTime = 30;
log.info("killed server, now waiting for " + sleepTime + " seconds");
@@ -1072,7 +1072,7 @@
ServerManagement.kill(1);
- Thread.sleep(60000);
+ Thread.sleep(30000);
// if failover happened, this object was replaced
assertNotSame(originalRemoting, delegate.getRemotingConnection());
Modified: trunk/tests/src/org/jboss/test/messaging/jms/clustering/MergeQueueTest.java
===================================================================
--- trunk/tests/src/org/jboss/test/messaging/jms/clustering/MergeQueueTest.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/tests/src/org/jboss/test/messaging/jms/clustering/MergeQueueTest.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -156,7 +156,7 @@
MessageConsumer consumer0 = session0.createConsumer(queue[0]);
- for (int i=0; i<10; i++)
+ for (int i=0; i < 10; i++)
{
producer0.send(session0.createTextMessage("message " + i));
}
@@ -176,6 +176,7 @@
session0.commit();
consumer0.close();
+ log.info("** sent first five on node0");
// Objects Server1
conn1 = cf.createConnection();
@@ -190,22 +191,32 @@
producer1.setDeliveryMode(DeliveryMode.PERSISTENT);
- for (int i=10; i<20; i++)
+ for (int i = 10; i < 20; i++)
{
producer1.send(session0.createTextMessage("message " + i));
}
session1.commit();
+
+ log.info("Sent next 15 on node 1");
// creates another consumer... before killing the server
MessageConsumer consumer1 = session1.createConsumer(queue[1]);
+ log.info("Killing node1");
ServerManagement.killAndWait(1);
+ log.info("Killed node1");
// close the consumer .. .and this should cause failover to kick in
+ log.info("closing the consumer");
consumer1.close();
+
+ log.info("closed the consumer");
consumer0 = session0.createConsumer(queue[0]);
+
+ log.info("creating new consumer");
+
for (int i = 5; i < 20; i++)
{
msg = (TextMessage)consumer0.receive(5000);
@@ -217,6 +228,8 @@
assertNull(consumer0.receive(5000));
session0.commit();
+
+ log.info("end");
}
finally
{
Deleted: trunk/tests/src/org/jboss/test/messaging/jms/clustering/RequestResponseWithPullTest.java
===================================================================
--- trunk/tests/src/org/jboss/test/messaging/jms/clustering/RequestResponseWithPullTest.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/tests/src/org/jboss/test/messaging/jms/clustering/RequestResponseWithPullTest.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -1,227 +0,0 @@
-/*
-* JBoss, Home of Professional Open Source
-* Copyright 2005, JBoss Inc., and individual contributors as indicated
-* by the @authors tag. See the copyright.txt in the distribution for a
-* full listing of individual contributors.
-*
-* This is free software; you can redistribute it and/or modify it
-* under the terms of the GNU Lesser General Public License as
-* published by the Free Software Foundation; either version 2.1 of
-* the License, or (at your option) any later version.
-*
-* This software is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* Lesser General Public License for more details.
-*
-* You should have received a copy of the GNU Lesser General Public
-* License along with this software; if not, write to the Free
-* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
-* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
-*/
-package org.jboss.test.messaging.jms.clustering;
-
-import javax.jms.Connection;
-import javax.jms.ConnectionFactory;
-import javax.jms.Destination;
-import javax.jms.JMSException;
-import javax.jms.Message;
-import javax.jms.MessageConsumer;
-import javax.jms.MessageListener;
-import javax.jms.MessageProducer;
-import javax.jms.Queue;
-import javax.jms.Session;
-import javax.jms.TextMessage;
-import javax.management.ObjectName;
-import javax.naming.InitialContext;
-
-import org.jboss.jms.client.JBossConnection;
-import org.jboss.test.messaging.MessagingTestCase;
-import org.jboss.test.messaging.tools.ServerManagement;
-import org.jboss.test.messaging.tools.jmx.ServiceAttributeOverrides;
-
-/**
- * A test for distributed request-response pattern with message pulling
- *
- * @author <a href="mailto:tim.fox at jboss.com">Tim Fox</a>
- * @version <tt>$Revision: 2701 $</tt>
- *
- * $Id: TemporaryDestinationTest.java 2701 2007-05-17 16:01:05Z timfox $
- */
-public class RequestResponseWithPullTest extends MessagingTestCase
-{
- // Constants ------------------------------------------------------------------------------------
-
- // Static ---------------------------------------------------------------------------------------
-
- // Attributes -----------------------------------------------------------------------------------
-
- // Constructors ---------------------------------------------------------------------------------
-
- public RequestResponseWithPullTest(String name)
- {
- super(name);
- }
-
- // Public ---------------------------------------------------------------------------------------
-
- private void removeAllMessages(String destName, boolean isQueue, int server) throws Exception
- {
- String on = "jboss.messaging.destination:service=" + (isQueue ? "Queue" : "Topic") + ",name=" + destName;
-
- ServerManagement.getServer(server).invoke(new ObjectName(on), "removeAllMessages", null, null);
- }
-
- public void testDistributedRequestResponse() throws Exception
- {
- // start servers with redistribution policies that actually do something
- ServiceAttributeOverrides attrOverrides = new ServiceAttributeOverrides();
-
- ObjectName postOfficeObjectName = new ObjectName("jboss.messaging:service=PostOffice");
-
- attrOverrides.
- put(postOfficeObjectName, "MessagePullPolicy",
- "org.jboss.messaging.core.plugin.postoffice.cluster.DefaultMessagePullPolicy");
-
- ServerManagement.start(0, "all", attrOverrides, true);
- ServerManagement.start(1, "all", attrOverrides, false);
-
- ServerManagement.deployQueue("testDistributedQueue", 0);
- ServerManagement.deployQueue("testDistributedQueue", 1);
-
- removeAllMessages("testDistributedQueue", true, 0);
- removeAllMessages("testDistributedQueue", true, 1);
-
- InitialContext ic0 = new InitialContext(ServerManagement.getJNDIEnvironment(0));
- InitialContext ic1 = new InitialContext(ServerManagement.getJNDIEnvironment(1));
-
- ConnectionFactory cf = (ConnectionFactory)ic0.lookup("/ClusteredConnectionFactory");
-
- Queue queue0 = (Queue)ic0.lookup("/queue/testDistributedQueue");
- Queue queue1 = (Queue)ic1.lookup("/queue/testDistributedQueue");
-
- Connection conn0 = null;
-
- Connection conn1 = null;
-
- try
- {
- conn0 = cf.createConnection();
-
- conn1 = cf.createConnection();
-
- assertEquals(0, ((JBossConnection)conn0).getServerID());
-
- assertEquals(1, ((JBossConnection)conn1).getServerID());
-
- // Make sure the connections are on different servers
-
- Session session0 = conn0.createSession(false, Session.AUTO_ACKNOWLEDGE);
-
- Queue tempQueue = session0.createTemporaryQueue();
-
- MessageConsumer cons0 = session0.createConsumer(tempQueue);
-
- conn0.start();
-
- class MyListener implements MessageListener
- {
- Session sess;
-
- MyListener(Session sess)
- {
- this.sess = sess;
- }
-
- public void onMessage(Message msg)
- {
- try
- {
- log.info("Received message in listener!");
- Destination dest = msg.getJMSReplyTo();
- MessageProducer prod = sess.createProducer(dest);
- TextMessage tm = (TextMessage)msg;
- String text = tm.getText();
- tm.clearBody();
- tm.setText(text + "reply");
- prod.send(msg);
- }
- catch (JMSException e)
- {
- log.error("Failed to reply to message", e);
- fail();
- }
- }
-
- }
-
-
- Session session1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE);
-
- MessageConsumer cons1 = session1.createConsumer(queue1);
-
- MyListener listener = new MyListener(session1);
-
- cons1.setMessageListener(listener);
-
- conn1.start();
-
-
- MessageProducer prod = session0.createProducer(queue0);
-
- for (int i = 0; i < 20; i++)
- {
- TextMessage sm = session0.createTextMessage("hoo ja ma flip" + i);
-
- sm.setJMSReplyTo(tempQueue);
-
- log.info("Sending message!");
-
- prod.send(sm);
-
- TextMessage tm = (TextMessage)cons0.receive(60000);
-
- assertNotNull(tm);
-
- assertEquals(sm.getText() + "reply", tm.getText());
-
- log.info("Received reply!");
-
- //Thread.sleep(2000);
- }
- }
- finally
- {
- if (conn0 != null)
- {
- conn0.close();
- }
-
- if (conn1 != null)
- {
- conn1.close();
- }
- }
- }
-
- // Package protected ----------------------------------------------------------------------------
-
- // Protected ------------------------------------------------------------------------------------
-
- protected void setUp() throws Exception
- {
- super.setUp();
-
- log.debug("setup done");
- }
-
- protected void tearDown() throws Exception
- {
- super.tearDown();
- }
-
- // Private --------------------------------------------------------------------------------------
-
- // Inner classes --------------------------------------------------------------------------------
-
-}
Modified: trunk/tests/src/org/jboss/test/messaging/jms/clustering/TemporaryDestinationTest.java
===================================================================
--- trunk/tests/src/org/jboss/test/messaging/jms/clustering/TemporaryDestinationTest.java 2007-06-27 15:21:58 UTC (rev 2807)
+++ trunk/tests/src/org/jboss/test/messaging/jms/clustering/TemporaryDestinationTest.java 2007-06-27 20:23:20 UTC (rev 2808)
@@ -22,11 +22,7 @@
package org.jboss.test.messaging.jms.clustering;
import javax.jms.Connection;
-import javax.jms.Destination;
-import javax.jms.JMSException;
-import javax.jms.Message;
import javax.jms.MessageConsumer;
-import javax.jms.MessageListener;
import javax.jms.MessageProducer;
import javax.jms.Queue;
import javax.jms.Session;
@@ -176,6 +172,8 @@
Topic topic = session0.createTemporaryTopic();
+ Thread.sleep(1000);
+
MessageConsumer cons0 = session0.createConsumer(topic);
conn0.start();
More information about the jboss-cvs-commits
mailing list