JBoss hornetq SVN: r8125 - in trunk: src/config/common/schema and 4 other directories.
by do-not-reply@jboss.org
Author: timfox
Date: 2009-10-17 08:38:46 -0400 (Sat, 17 Oct 2009)
New Revision: 8125
Added:
trunk/src/main/org/hornetq/core/journal/impl/SyncSpeedTest.java
Modified:
trunk/examples/core/perf/server0/hornetq-configuration.xml
trunk/src/config/common/schema/hornetq-configuration.xsd
trunk/src/main/org/hornetq/core/config/Configuration.java
trunk/src/main/org/hornetq/core/config/impl/ConfigurationImpl.java
trunk/src/main/org/hornetq/core/config/impl/FileConfiguration.java
trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java
Log:
built sync speed test into server
Modified: trunk/examples/core/perf/server0/hornetq-configuration.xml
===================================================================
--- trunk/examples/core/perf/server0/hornetq-configuration.xml 2009-10-17 12:15:09 UTC (rev 8124)
+++ trunk/examples/core/perf/server0/hornetq-configuration.xml 2009-10-17 12:38:46 UTC (rev 8125)
@@ -17,14 +17,16 @@
<persistence-enabled>true</persistence-enabled>
- <journal-sync-non-transactional>false</journal-sync-non-transactional>
+ <journal-sync-non-transactional>true</journal-sync-non-transactional>
+ <journal-sync-transactional>true</journal-sync-transactional>
<journal-type>ASYNCIO</journal-type>
<journal-min-files>20</journal-min-files>
<journal-aio-buffer-timeout>20000</journal-aio-buffer-timeout>
<log-journal-write-rate>true</log-journal-write-rate>
+ <run-sync-speed-test>true</run-sync-speed-test>
+
<!-- <perf-blast-pages>5000</perf-blast-pages> -->
-
-
+
<queues>
<queue name="perfQueue">
<address>perfAddress</address>
Modified: trunk/src/config/common/schema/hornetq-configuration.xsd
===================================================================
--- trunk/src/config/common/schema/hornetq-configuration.xsd 2009-10-17 12:15:09 UTC (rev 8124)
+++ trunk/src/config/common/schema/hornetq-configuration.xsd 2009-10-17 12:38:46 UTC (rev 8125)
@@ -168,6 +168,8 @@
</xsd:element>
<xsd:element maxOccurs="1" minOccurs="0" name="perf-blast-pages" type="xsd:int">
</xsd:element>
+ <xsd:element maxOccurs="1" minOccurs="0" name="run-sync-speed-test" type="xsd:boolean">
+ </xsd:element>
<xsd:element maxOccurs="1" minOccurs="0" name="server-dump-interval" type="xsd:long">
</xsd:element>
<xsd:element maxOccurs="1" minOccurs="0" name="memory-warning-threshold" type="xsd:int">
Modified: trunk/src/main/org/hornetq/core/config/Configuration.java
===================================================================
--- trunk/src/main/org/hornetq/core/config/Configuration.java 2009-10-17 12:15:09 UTC (rev 8124)
+++ trunk/src/main/org/hornetq/core/config/Configuration.java 2009-10-17 12:38:46 UTC (rev 8125)
@@ -243,6 +243,8 @@
void setLogJournalWriteRate(boolean rate);
+ //Undocumented attributes
+
int getJournalPerfBlastPages();
void setJournalPerfBlastPages(int pages);
@@ -259,6 +261,10 @@
void setMemoryMeasureInterval(long memoryMeasureInterval);
+ boolean isRunSyncSpeedTest();
+
+ void setRunSyncSpeedTest(boolean run);
+
// Paging Properties --------------------------------------------------------------------
String getPagingDirectory();
Modified: trunk/src/main/org/hornetq/core/config/impl/ConfigurationImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/config/impl/ConfigurationImpl.java 2009-10-17 12:15:09 UTC (rev 8124)
+++ trunk/src/main/org/hornetq/core/config/impl/ConfigurationImpl.java 2009-10-17 12:38:46 UTC (rev 8125)
@@ -110,6 +110,8 @@
public static final boolean DEFAULT_JOURNAL_LOG_WRITE_RATE = false;
public static final int DEFAULT_JOURNAL_PERF_BLAST_PAGES = -1;
+
+ public static final boolean DEFAULT_RUN_SYNC_SPEED_TEST = false;
public static final boolean DEFAULT_WILDCARD_ROUTING_ENABLED = true;
@@ -270,6 +272,8 @@
protected boolean logJournalWriteRate = DEFAULT_JOURNAL_LOG_WRITE_RATE;
protected int journalPerfBlastPages = DEFAULT_JOURNAL_PERF_BLAST_PAGES;
+
+ protected boolean runSyncSpeedTest = DEFAULT_RUN_SYNC_SPEED_TEST;
protected boolean wildcardRoutingEnabled = DEFAULT_WILDCARD_ROUTING_ENABLED;
@@ -670,7 +674,17 @@
{
this.journalPerfBlastPages = journalPerfBlastPages;
}
+
+ public boolean isRunSyncSpeedTest()
+ {
+ return runSyncSpeedTest;
+ }
+ public void setRunSyncSpeedTest(boolean run)
+ {
+ this.runSyncSpeedTest = run;
+ }
+
public boolean isCreateBindingsDir()
{
return createBindingsDir;
@@ -1109,4 +1123,5 @@
{
this.logDelegateFactoryClassName = className;
}
+
}
Modified: trunk/src/main/org/hornetq/core/config/impl/FileConfiguration.java
===================================================================
--- trunk/src/main/org/hornetq/core/config/impl/FileConfiguration.java 2009-10-17 12:15:09 UTC (rev 8124)
+++ trunk/src/main/org/hornetq/core/config/impl/FileConfiguration.java 2009-10-17 12:38:46 UTC (rev 8125)
@@ -319,6 +319,8 @@
logJournalWriteRate = getBoolean(e, "log-journal-write-rate", DEFAULT_JOURNAL_LOG_WRITE_RATE);
journalPerfBlastPages = getInteger(e, "perf-blast-pages", DEFAULT_JOURNAL_PERF_BLAST_PAGES, MINUS_ONE_OR_GT_ZERO);
+
+ runSyncSpeedTest = getBoolean(e, "run-sync-speed-test", runSyncSpeedTest);
wildcardRoutingEnabled = getBoolean(e, "wild-card-routing-enabled", wildcardRoutingEnabled);
Added: trunk/src/main/org/hornetq/core/journal/impl/SyncSpeedTest.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/impl/SyncSpeedTest.java (rev 0)
+++ trunk/src/main/org/hornetq/core/journal/impl/SyncSpeedTest.java 2009-10-17 12:38:46 UTC (rev 8125)
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.journal.impl;
+
+import java.io.File;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+
+import org.hornetq.core.logging.Logger;
+
+/**
+ * A SyncSpeedTest
+ *
+ * This class just provides some diagnostics on how fast your disk can sync
+ * Useful when determining performance issues
+ *
+ * @author tim fox
+ *
+ *
+ */
+public class SyncSpeedTest
+{
+ private static final Logger log = Logger.getLogger(SyncSpeedTest.class);
+
+ public static void main(final String[] args)
+ {
+ try
+ {
+ new SyncSpeedTest().run();
+ }
+ catch (Exception e)
+ {
+ e.printStackTrace();
+ }
+ }
+
+ public void run() throws Exception
+ {
+ log.info("******* Starting file sync speed test *******");
+
+ int fileSize = 1024 * 1024 * 10;
+
+ int recordSize = 1024;
+
+ int its = 10 * 1024;
+
+ File file = new File("sync-speed-test.dat");
+
+ if (file.exists())
+ {
+ file.delete();
+ }
+
+ RandomAccessFile rfile = new RandomAccessFile(file, "rw");
+
+ FileChannel channel = rfile.getChannel();
+
+ ByteBuffer bb = generateBuffer(fileSize, (byte)'x');
+
+ write(bb, channel, fileSize);
+
+ channel.force(false);
+
+ channel.position(0);
+
+ ByteBuffer bb1 = generateBuffer(recordSize, (byte)'h');
+
+ log.info("Measuring");
+
+ long start = System.currentTimeMillis();
+
+ for (int i = 0; i < its; i++)
+ {
+ write(bb1, channel, recordSize);
+
+ channel.force(false);
+ }
+
+ long end = System.currentTimeMillis();
+
+ double rate = 1000 * ((double)its) / (end - start);
+
+ log.info("Rate of " + rate + " syncs per sec");
+
+ rfile.close();
+
+ file.delete();
+
+ log.info("****** test complete *****");
+ }
+
+ private void write(final ByteBuffer buffer, final FileChannel channel, final int size) throws Exception
+ {
+ buffer.flip();
+
+ channel.write(buffer);
+ }
+
+ private ByteBuffer generateBuffer(final int size, final byte ch)
+ {
+ ByteBuffer bb = ByteBuffer.allocateDirect(size);
+
+ for (int i = 0; i < size; i++)
+ {
+ bb.put(ch);
+ }
+
+ return bb;
+ }
+}
Modified: trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java 2009-10-17 12:15:09 UTC (rev 8124)
+++ trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java 2009-10-17 12:38:46 UTC (rev 8125)
@@ -50,6 +50,7 @@
import org.hornetq.core.exception.HornetQException;
import org.hornetq.core.filter.Filter;
import org.hornetq.core.filter.impl.FilterImpl;
+import org.hornetq.core.journal.impl.SyncSpeedTest;
import org.hornetq.core.logging.LogDelegateFactory;
import org.hornetq.core.logging.Logger;
import org.hornetq.core.management.ManagementService;
@@ -69,7 +70,6 @@
import org.hornetq.core.postoffice.impl.LocalQueueBinding;
import org.hornetq.core.postoffice.impl.PostOfficeImpl;
import org.hornetq.core.remoting.Channel;
-import org.hornetq.core.remoting.Interceptor;
import org.hornetq.core.remoting.RemotingConnection;
import org.hornetq.core.remoting.impl.wireformat.CreateSessionResponseMessage;
import org.hornetq.core.remoting.impl.wireformat.ReattachSessionResponseMessage;
@@ -267,6 +267,13 @@
{
return;
}
+
+ if (configuration.isRunSyncSpeedTest())
+ {
+ SyncSpeedTest test = new SyncSpeedTest();
+
+ test.run();
+ }
initialisePart1();
@@ -1020,7 +1027,7 @@
securityDeployer.start();
}
}
-
+
// Load the journal and populate queues, transactions and caches in memory
loadJournal();
14 years, 7 months
JBoss hornetq SVN: r8124 - in trunk: examples/core/embedded-remote and 76 other directories.
by do-not-reply@jboss.org
Author: timfox
Date: 2009-10-17 08:15:09 -0400 (Sat, 17 Oct 2009)
New Revision: 8124
Modified:
trunk/examples/core/embedded-remote/build.sh
trunk/examples/core/embedded/build.sh
trunk/examples/core/microcontainer/build.sh
trunk/examples/core/perf/build.sh
trunk/examples/javaee/ejb-jms-transaction/build.sh
trunk/examples/javaee/hajndi/build.sh
trunk/examples/javaee/jca-config/build.sh
trunk/examples/javaee/jms-bridge/build.sh
trunk/examples/javaee/mdb-bmt/build.sh
trunk/examples/javaee/mdb-cmt-setrollbackonly/build.sh
trunk/examples/javaee/mdb-cmt-tx-local/build.sh
trunk/examples/javaee/mdb-cmt-tx-not-supported/build.sh
trunk/examples/javaee/mdb-cmt-tx-required/build.sh
trunk/examples/javaee/mdb-message-selector/build.sh
trunk/examples/javaee/mdb-tx-send/build.sh
trunk/examples/javaee/servlet-ssl/build.sh
trunk/examples/javaee/servlet-transport/build.sh
trunk/examples/javaee/xarecovery/build.sh
trunk/examples/jms/applet/build.sh
trunk/examples/jms/application-layer-failover/build.sh
trunk/examples/jms/automatic-failover/build.sh
trunk/examples/jms/bridge/build.sh
trunk/examples/jms/browser/build.sh
trunk/examples/jms/client-kickoff/build.sh
trunk/examples/jms/client-side-load-balancing/build.sh
trunk/examples/jms/clustered-durable-subscription/build.sh
trunk/examples/jms/clustered-queue/build.sh
trunk/examples/jms/clustered-standalone/build.sh
trunk/examples/jms/clustered-topic/build.sh
trunk/examples/jms/consumer-rate-limit/build.sh
trunk/examples/jms/dead-letter/build.sh
trunk/examples/jms/delayed-redelivery/build.sh
trunk/examples/jms/divert/build.sh
trunk/examples/jms/durable-subscription/build.sh
trunk/examples/jms/embedded/build.sh
trunk/examples/jms/expiry/build.sh
trunk/examples/jms/http-transport/build.sh
trunk/examples/jms/instantiate-connection-factory/build.sh
trunk/examples/jms/interceptor/build.sh
trunk/examples/jms/jaas/build.sh
trunk/examples/jms/jms-bridge/build.sh
trunk/examples/jms/jmx/build.sh
trunk/examples/jms/large-message/build.sh
trunk/examples/jms/last-value-queue/build.sh
trunk/examples/jms/management-notifications/build.sh
trunk/examples/jms/management/build.sh
trunk/examples/jms/message-counters/build.sh
trunk/examples/jms/message-group/build.sh
trunk/examples/jms/message-priority/build.sh
trunk/examples/jms/no-consumer-buffering/build.sh
trunk/examples/jms/paging/build.sh
trunk/examples/jms/perf/build.sh
trunk/examples/jms/pre-acknowledge/build.sh
trunk/examples/jms/producer-rate-limit/build.sh
trunk/examples/jms/queue-message-redistribution/build.sh
trunk/examples/jms/queue-requestor/build.sh
trunk/examples/jms/queue-selector/build.sh
trunk/examples/jms/queue/build.sh
trunk/examples/jms/reconnect-same-node/build.sh
trunk/examples/jms/request-reply/build.sh
trunk/examples/jms/scheduled-message/build.sh
trunk/examples/jms/security/build.sh
trunk/examples/jms/send-acknowledgements/build.sh
trunk/examples/jms/ssl-enabled/build.sh
trunk/examples/jms/static-selector-jms/build.sh
trunk/examples/jms/static-selector/build.sh
trunk/examples/jms/symmetric-cluster/build.sh
trunk/examples/jms/temp-queue/build.sh
trunk/examples/jms/topic-hierarchies/build.sh
trunk/examples/jms/topic-selector-example1/build.sh
trunk/examples/jms/topic-selector-example2/build.sh
trunk/examples/jms/topic/build.sh
trunk/examples/jms/transactional/build.sh
trunk/examples/jms/xa-heuristic/build.sh
trunk/examples/jms/xa-receive/build.sh
trunk/examples/jms/xa-send/build.sh
trunk/examples/jms/xa-with-jta/build.sh
trunk/src/bin/build.sh
Log:
make shell scripts unix bourne shell compatible
Modified: trunk/examples/core/embedded/build.sh
===================================================================
--- trunk/examples/core/embedded/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/core/embedded/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/core/embedded-remote/build.sh
===================================================================
--- trunk/examples/core/embedded-remote/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/core/embedded-remote/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/core/microcontainer/build.sh
===================================================================
--- trunk/examples/core/microcontainer/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/core/microcontainer/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/core/perf/build.sh
===================================================================
--- trunk/examples/core/perf/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/core/perf/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/ejb-jms-transaction/build.sh
===================================================================
--- trunk/examples/javaee/ejb-jms-transaction/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/ejb-jms-transaction/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/hajndi/build.sh
===================================================================
--- trunk/examples/javaee/hajndi/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/hajndi/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/jca-config/build.sh
===================================================================
--- trunk/examples/javaee/jca-config/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/jca-config/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/jms-bridge/build.sh
===================================================================
--- trunk/examples/javaee/jms-bridge/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/jms-bridge/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/mdb-bmt/build.sh
===================================================================
--- trunk/examples/javaee/mdb-bmt/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/mdb-bmt/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/mdb-cmt-setrollbackonly/build.sh
===================================================================
--- trunk/examples/javaee/mdb-cmt-setrollbackonly/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/mdb-cmt-setrollbackonly/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/mdb-cmt-tx-local/build.sh
===================================================================
--- trunk/examples/javaee/mdb-cmt-tx-local/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/mdb-cmt-tx-local/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/mdb-cmt-tx-not-supported/build.sh
===================================================================
--- trunk/examples/javaee/mdb-cmt-tx-not-supported/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/mdb-cmt-tx-not-supported/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/mdb-cmt-tx-required/build.sh
===================================================================
--- trunk/examples/javaee/mdb-cmt-tx-required/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/mdb-cmt-tx-required/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/mdb-message-selector/build.sh
===================================================================
--- trunk/examples/javaee/mdb-message-selector/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/mdb-message-selector/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/mdb-tx-send/build.sh
===================================================================
--- trunk/examples/javaee/mdb-tx-send/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/mdb-tx-send/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/servlet-ssl/build.sh
===================================================================
--- trunk/examples/javaee/servlet-ssl/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/servlet-ssl/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/servlet-transport/build.sh
===================================================================
--- trunk/examples/javaee/servlet-transport/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/servlet-transport/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/javaee/xarecovery/build.sh
===================================================================
--- trunk/examples/javaee/xarecovery/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/javaee/xarecovery/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/applet/build.sh
===================================================================
--- trunk/examples/jms/applet/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/applet/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/application-layer-failover/build.sh
===================================================================
--- trunk/examples/jms/application-layer-failover/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/application-layer-failover/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/automatic-failover/build.sh
===================================================================
--- trunk/examples/jms/automatic-failover/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/automatic-failover/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/bridge/build.sh
===================================================================
--- trunk/examples/jms/bridge/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/bridge/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/browser/build.sh
===================================================================
--- trunk/examples/jms/browser/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/browser/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/client-kickoff/build.sh
===================================================================
--- trunk/examples/jms/client-kickoff/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/client-kickoff/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/client-side-load-balancing/build.sh
===================================================================
--- trunk/examples/jms/client-side-load-balancing/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/client-side-load-balancing/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/clustered-durable-subscription/build.sh
===================================================================
--- trunk/examples/jms/clustered-durable-subscription/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/clustered-durable-subscription/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/clustered-queue/build.sh
===================================================================
--- trunk/examples/jms/clustered-queue/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/clustered-queue/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/clustered-standalone/build.sh
===================================================================
--- trunk/examples/jms/clustered-standalone/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/clustered-standalone/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/clustered-topic/build.sh
===================================================================
--- trunk/examples/jms/clustered-topic/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/clustered-topic/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/consumer-rate-limit/build.sh
===================================================================
--- trunk/examples/jms/consumer-rate-limit/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/consumer-rate-limit/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/dead-letter/build.sh
===================================================================
--- trunk/examples/jms/dead-letter/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/dead-letter/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/delayed-redelivery/build.sh
===================================================================
--- trunk/examples/jms/delayed-redelivery/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/delayed-redelivery/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/divert/build.sh
===================================================================
--- trunk/examples/jms/divert/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/divert/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/durable-subscription/build.sh
===================================================================
--- trunk/examples/jms/durable-subscription/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/durable-subscription/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/embedded/build.sh
===================================================================
--- trunk/examples/jms/embedded/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/embedded/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/expiry/build.sh
===================================================================
--- trunk/examples/jms/expiry/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/expiry/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/http-transport/build.sh
===================================================================
--- trunk/examples/jms/http-transport/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/http-transport/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/instantiate-connection-factory/build.sh
===================================================================
--- trunk/examples/jms/instantiate-connection-factory/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/instantiate-connection-factory/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/interceptor/build.sh
===================================================================
--- trunk/examples/jms/interceptor/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/interceptor/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/jaas/build.sh
===================================================================
--- trunk/examples/jms/jaas/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/jaas/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/jms-bridge/build.sh
===================================================================
--- trunk/examples/jms/jms-bridge/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/jms-bridge/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/jmx/build.sh
===================================================================
--- trunk/examples/jms/jmx/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/jmx/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/large-message/build.sh
===================================================================
--- trunk/examples/jms/large-message/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/large-message/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/last-value-queue/build.sh
===================================================================
--- trunk/examples/jms/last-value-queue/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/last-value-queue/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/management/build.sh
===================================================================
--- trunk/examples/jms/management/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/management/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/management-notifications/build.sh
===================================================================
--- trunk/examples/jms/management-notifications/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/management-notifications/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/message-counters/build.sh
===================================================================
--- trunk/examples/jms/message-counters/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/message-counters/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/message-group/build.sh
===================================================================
--- trunk/examples/jms/message-group/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/message-group/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/message-priority/build.sh
===================================================================
--- trunk/examples/jms/message-priority/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/message-priority/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/no-consumer-buffering/build.sh
===================================================================
--- trunk/examples/jms/no-consumer-buffering/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/no-consumer-buffering/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/paging/build.sh
===================================================================
--- trunk/examples/jms/paging/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/paging/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/perf/build.sh
===================================================================
--- trunk/examples/jms/perf/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/perf/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/pre-acknowledge/build.sh
===================================================================
--- trunk/examples/jms/pre-acknowledge/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/pre-acknowledge/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/producer-rate-limit/build.sh
===================================================================
--- trunk/examples/jms/producer-rate-limit/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/producer-rate-limit/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/queue/build.sh
===================================================================
--- trunk/examples/jms/queue/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/queue/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/queue-message-redistribution/build.sh
===================================================================
--- trunk/examples/jms/queue-message-redistribution/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/queue-message-redistribution/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/queue-requestor/build.sh
===================================================================
--- trunk/examples/jms/queue-requestor/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/queue-requestor/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/queue-selector/build.sh
===================================================================
--- trunk/examples/jms/queue-selector/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/queue-selector/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/reconnect-same-node/build.sh
===================================================================
--- trunk/examples/jms/reconnect-same-node/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/reconnect-same-node/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/request-reply/build.sh
===================================================================
--- trunk/examples/jms/request-reply/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/request-reply/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/scheduled-message/build.sh
===================================================================
--- trunk/examples/jms/scheduled-message/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/scheduled-message/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/security/build.sh
===================================================================
--- trunk/examples/jms/security/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/security/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/send-acknowledgements/build.sh
===================================================================
--- trunk/examples/jms/send-acknowledgements/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/send-acknowledgements/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/ssl-enabled/build.sh
===================================================================
--- trunk/examples/jms/ssl-enabled/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/ssl-enabled/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/static-selector/build.sh
===================================================================
--- trunk/examples/jms/static-selector/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/static-selector/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/static-selector-jms/build.sh
===================================================================
--- trunk/examples/jms/static-selector-jms/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/static-selector-jms/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/symmetric-cluster/build.sh
===================================================================
--- trunk/examples/jms/symmetric-cluster/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/symmetric-cluster/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/temp-queue/build.sh
===================================================================
--- trunk/examples/jms/temp-queue/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/temp-queue/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/topic/build.sh
===================================================================
--- trunk/examples/jms/topic/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/topic/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/topic-hierarchies/build.sh
===================================================================
--- trunk/examples/jms/topic-hierarchies/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/topic-hierarchies/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/topic-selector-example1/build.sh
===================================================================
--- trunk/examples/jms/topic-selector-example1/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/topic-selector-example1/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/topic-selector-example2/build.sh
===================================================================
--- trunk/examples/jms/topic-selector-example2/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/topic-selector-example2/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/transactional/build.sh
===================================================================
--- trunk/examples/jms/transactional/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/transactional/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/xa-heuristic/build.sh
===================================================================
--- trunk/examples/jms/xa-heuristic/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/xa-heuristic/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/xa-receive/build.sh
===================================================================
--- trunk/examples/jms/xa-receive/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/xa-receive/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/xa-send/build.sh
===================================================================
--- trunk/examples/jms/xa-send/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/xa-send/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/examples/jms/xa-with-jta/build.sh
===================================================================
--- trunk/examples/jms/xa-with-jta/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/examples/jms/xa-with-jta/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -1,6 +1,7 @@
#!/bin/sh
-export OVERRIDE_ANT_HOME=../../../tools/ant
+OVERRIDE_ANT_HOME=../../../tools/ant
+export OVERRIDE_ANT_HOME
if [ -f "../../../src/bin/build.sh" ]; then
# running from TRUNK
Modified: trunk/src/bin/build.sh
===================================================================
--- trunk/src/bin/build.sh 2009-10-17 11:57:49 UTC (rev 8123)
+++ trunk/src/bin/build.sh 2009-10-17 12:15:09 UTC (rev 8124)
@@ -30,7 +30,8 @@
}
# Save off the original ANT_HOME value
-export ORIG_ANT_HOME=$ANT_HOME
+ORIG_ANT_HOME=$ANT_HOME
+export ORIG_ANT_HOME
# Set the temporary ANT_HOME
@@ -38,11 +39,15 @@
echo "ANT_HOME is ${OVERRIDE_ANT_HOME}"
- export ANT_HOME=$OVERRIDE_ANT_HOME
+ ANT_HOME=$OVERRIDE_ANT_HOME
+
+ export ANT_HOME
else
- export ANT_HOME=tools/ant
+ ANT_HOME=tools/ant
+
+ export ANT_HOME
fi
@@ -71,4 +76,6 @@
# Restore the original path
-export ANT_HOME=$ORIG_ANT_HOME
+ANT_HOME=$ORIG_ANT_HOME
+
+export ANT_HOME
14 years, 7 months
JBoss hornetq SVN: r8123 - in trunk: src/main/org/hornetq/core/client/impl and 7 other directories.
by do-not-reply@jboss.org
Author: timfox
Date: 2009-10-17 07:57:49 -0400 (Sat, 17 Oct 2009)
New Revision: 8123
Modified:
trunk/docs/user-manual/en/client-reconnection.xml
trunk/src/main/org/hornetq/core/client/impl/ClientSessionInternal.java
trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java
trunk/src/main/org/hornetq/core/remoting/Interceptor.java
trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java
trunk/src/main/org/hornetq/core/server/impl/RoutingContextImpl.java
trunk/tests/src/org/hornetq/tests/integration/client/NewDeadLetterAddressTest.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousFailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/jms/client/MessageTest.java
Log:
a few tweaks
Modified: trunk/docs/user-manual/en/client-reconnection.xml
===================================================================
--- trunk/docs/user-manual/en/client-reconnection.xml 2009-10-17 11:46:23 UTC (rev 8122)
+++ trunk/docs/user-manual/en/client-reconnection.xml 2009-10-17 11:57:49 UTC (rev 8123)
@@ -80,9 +80,6 @@
shutting down. A value of <literal>-1</literal> signifies an unlimited number of
attempts. The default value is <literal>-1</literal>.</para>
</listitem>
- <listitem>
- <para><literal>use-reattach</literal>. </para>
- </listitem>
</itemizedlist>
<para>If you're using JMS, and you're using the JMS Service on the server to load your JMS
connection factory instances directly into JNDI, then you can specify these parameters in
Modified: trunk/src/main/org/hornetq/core/client/impl/ClientSessionInternal.java
===================================================================
--- trunk/src/main/org/hornetq/core/client/impl/ClientSessionInternal.java 2009-10-17 11:46:23 UTC (rev 8122)
+++ trunk/src/main/org/hornetq/core/client/impl/ClientSessionInternal.java 2009-10-17 11:57:49 UTC (rev 8123)
@@ -19,6 +19,7 @@
import org.hornetq.core.remoting.impl.wireformat.SessionReceiveContinuationMessage;
import org.hornetq.core.remoting.impl.wireformat.SessionReceiveMessage;
import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.SimpleString;
/**
* A ClientSessionInternal
@@ -55,8 +56,6 @@
void handleFailover(RemotingConnection backupConnection);
- // boolean handleReattach(RemotingConnection backupConnection);
-
RemotingConnection getConnection();
void cleanUp() throws Exception;
@@ -70,4 +69,6 @@
void workDone();
void forceDelivery(long consumerID, long sequence) throws HornetQException;
+
+ //void sendProducerCreditsMessage(int credits, SimpleString destination);
}
Modified: trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java 2009-10-17 11:46:23 UTC (rev 8122)
+++ trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java 2009-10-17 11:57:49 UTC (rev 8123)
@@ -931,13 +931,13 @@
{
public void run()
{
- deliverReferences(refs);
+ addReferences(refs);
}
});
}
else
{
- deliverReferences(refs);
+ addReferences(refs);
}
}
}
@@ -945,11 +945,10 @@
/**
* @param refs
*/
- private void deliverReferences(final List<MessageReference> refs)
+ private void addReferences(final List<MessageReference> refs)
{
for (MessageReference ref : refs)
- {
-
+ {
ref.getQueue().addLast(ref);
}
}
Modified: trunk/src/main/org/hornetq/core/remoting/Interceptor.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/Interceptor.java 2009-10-17 11:46:23 UTC (rev 8122)
+++ trunk/src/main/org/hornetq/core/remoting/Interceptor.java 2009-10-17 11:57:49 UTC (rev 8123)
@@ -22,7 +22,7 @@
* To Add this interceptor, you have to modify hornetq-configuration.xml
*
* @author clebert.suconic(a)jboss.com
- * @author tim.fox(a)jboss.com
+ * @author <a href="mailto:tim.fox@jboss.com">Tim Fox</a>
*/
public interface Interceptor
{
Modified: trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java 2009-10-17 11:46:23 UTC (rev 8122)
+++ trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java 2009-10-17 11:57:49 UTC (rev 8123)
@@ -468,8 +468,7 @@
ReplicationContext tokenPolled = pendingTokens.poll();
if (tokenPolled == null)
{
- // We should debug the logs if this happens
- log.warn("Missing replication token on the stack. There is a bug on the ReplicatoinManager since this was not supposed to happen");
+ throw new IllegalStateException("Missing replication token on the queue.");
}
else
{
Modified: trunk/src/main/org/hornetq/core/server/impl/RoutingContextImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/server/impl/RoutingContextImpl.java 2009-10-17 11:46:23 UTC (rev 8122)
+++ trunk/src/main/org/hornetq/core/server/impl/RoutingContextImpl.java 2009-10-17 11:57:49 UTC (rev 8123)
@@ -23,7 +23,7 @@
/**
* A RoutingContextImpl
*
- * @author tim
+ * @author <a href="mailto:tim.fox@jboss.com">Tim Fox</a>
*
*
*/
Modified: trunk/tests/src/org/hornetq/tests/integration/client/NewDeadLetterAddressTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/client/NewDeadLetterAddressTest.java 2009-10-17 11:46:23 UTC (rev 8122)
+++ trunk/tests/src/org/hornetq/tests/integration/client/NewDeadLetterAddressTest.java 2009-10-17 11:57:49 UTC (rev 8123)
@@ -12,14 +12,6 @@
*/
package org.hornetq.tests.integration.client;
-import static org.hornetq.tests.util.RandomUtil.randomSimpleString;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import javax.transaction.xa.XAResource;
-import javax.transaction.xa.Xid;
-
import org.hornetq.core.client.ClientConsumer;
import org.hornetq.core.client.ClientMessage;
import org.hornetq.core.client.ClientProducer;
@@ -29,12 +21,9 @@
import org.hornetq.core.config.TransportConfiguration;
import org.hornetq.core.config.impl.ConfigurationImpl;
import org.hornetq.core.exception.HornetQException;
-import org.hornetq.core.message.impl.MessageImpl;
import org.hornetq.core.server.HornetQ;
import org.hornetq.core.server.HornetQServer;
-import org.hornetq.core.server.Queue;
import org.hornetq.core.settings.impl.AddressSettings;
-import org.hornetq.core.transaction.impl.XidImpl;
import org.hornetq.tests.util.UnitTestCase;
import org.hornetq.utils.SimpleString;
@@ -42,7 +31,7 @@
*
* A NewDeadLetterAddressTest
*
- * @author tim fox
+ * @author <a href="mailto:tim.fox@jboss.com">Tim Fox</a>
*
*
*/
Modified: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java 2009-10-17 11:46:23 UTC (rev 8122)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java 2009-10-17 11:57:49 UTC (rev 8123)
@@ -30,7 +30,7 @@
/**
* A FailoverTestBase
*
- * @author tim
+ * @author <a href="mailto:tim.fox@jboss.com">Tim Fox</a>
*
*
*/
Modified: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousFailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousFailoverTest.java 2009-10-17 11:46:23 UTC (rev 8122)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousFailoverTest.java 2009-10-17 11:57:49 UTC (rev 8123)
@@ -13,16 +13,12 @@
package org.hornetq.tests.integration.cluster.failover;
-import java.util.HashMap;
-import java.util.Map;
-
import org.hornetq.core.config.TransportConfiguration;
-import org.hornetq.integration.transports.netty.TransportConstants;
/**
* A NettyAsynchronousFailoverTest
*
- * @author tim
+ * @author <a href="mailto:tim.fox@jboss.com">Tim Fox</a>
*
*
*/
Modified: trunk/tests/src/org/hornetq/tests/integration/jms/client/MessageTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/jms/client/MessageTest.java 2009-10-17 11:46:23 UTC (rev 8122)
+++ trunk/tests/src/org/hornetq/tests/integration/jms/client/MessageTest.java 2009-10-17 11:57:49 UTC (rev 8123)
@@ -27,7 +27,7 @@
*
* A MessageTest
*
- * @author tim
+ * @author <a href="mailto:tim.fox@jboss.com">Tim Fox</a>
*
*
*/
14 years, 7 months
JBoss hornetq SVN: r8122 - trunk/examples/core/perf.
by do-not-reply@jboss.org
Author: timfox
Date: 2009-10-17 07:46:23 -0400 (Sat, 17 Oct 2009)
New Revision: 8122
Modified:
trunk/examples/core/perf/build.xml
trunk/examples/core/perf/perf.properties
Log:
fixed build for core perf example and tweaked params
Modified: trunk/examples/core/perf/build.xml
===================================================================
--- trunk/examples/core/perf/build.xml 2009-10-17 02:25:10 UTC (rev 8121)
+++ trunk/examples/core/perf/build.xml 2009-10-17 11:46:23 UTC (rev 8122)
@@ -28,7 +28,7 @@
<property name="perf.properties.file.name" value="${file.name}" />
<target name="runSender" depends="compile">
- <java classname="org.hornetq.jms.example.PerfSender" fork="true" resultproperty="example-result">
+ <java classname="org.hornetq.core.example.PerfSender" fork="true" resultproperty="example-result">
<jvmarg value="-Xms512M"/>
<jvmarg value="-Xmx512M"/>
<jvmarg value="-XX:+UseParallelGC"/>
@@ -41,7 +41,7 @@
</target>
<target name="runListener" depends="compile">
- <java classname="org.hornetq.jms.example.PerfListener" fork="true" resultproperty="example-result">
+ <java classname="org.hornetq.core.example.PerfListener" fork="true" resultproperty="example-result">
<jvmarg value="-Xms512M"/>
<jvmarg value="-Xmx512M"/>
<jvmarg value="-XX:+UseParallelGC"/>
Modified: trunk/examples/core/perf/perf.properties
===================================================================
--- trunk/examples/core/perf/perf.properties 2009-10-17 02:25:10 UTC (rev 8121)
+++ trunk/examples/core/perf/perf.properties 2009-10-17 11:46:23 UTC (rev 8122)
@@ -1,18 +1,18 @@
-num-messages=1000000
-num-warmup-messages=20000
+num-messages=10000
+num-warmup-messages=100
message-size=1024
durable=true
transacted=false
batch-size=1000
-drain-queue=false
+drain-queue=true
throttle-rate=-1
address=perfAddress
queue-name=perfQueue
host=localhost
port=5445
tcp-buffer=1048576
-tcp-no-delay=false
+tcp-no-delay=true
send-window=1048576
pre-ack=true
block-ack=false
-block-persistent=false
+block-persistent=true
14 years, 7 months
JBoss hornetq SVN: r8121 - in trunk: src/main/org/hornetq/core/persistence/impl/journal and 6 other directories.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2009-10-16 22:25:10 -0400 (Fri, 16 Oct 2009)
New Revision: 8121
Modified:
trunk/src/main/org/hornetq/core/journal/impl/NIOSequentialFile.java
trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java
trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java
trunk/src/main/org/hornetq/core/replication/ReplicationContext.java
trunk/src/main/org/hornetq/core/replication/impl/ReplicatedJournal.java
trunk/src/main/org/hornetq/core/replication/impl/ReplicationContextImpl.java
trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java
trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java
trunk/tests/src/org/hornetq/tests/
trunk/tests/src/org/hornetq/tests/integration/replication/ReplicationTest.java
Log:
https://jira.jboss.org/jira/browse/HORNETQ-125 - Replication stop on backup failure
Modified: trunk/src/main/org/hornetq/core/journal/impl/NIOSequentialFile.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/impl/NIOSequentialFile.java 2009-10-16 12:01:15 UTC (rev 8120)
+++ trunk/src/main/org/hornetq/core/journal/impl/NIOSequentialFile.java 2009-10-17 02:25:10 UTC (rev 8121)
@@ -54,10 +54,6 @@
return 1;
}
- public void flush()
- {
- }
-
public int calculateBlockStart(final int position) throws Exception
{
return position;
Modified: trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java
===================================================================
--- trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java 2009-10-16 12:01:15 UTC (rev 8120)
+++ trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java 2009-10-17 02:25:10 UTC (rev 8121)
@@ -1764,7 +1764,7 @@
private class FinishPageMessageOperation implements TransactionOperation
{
- public void afterCommit(final Transaction tx) throws Exception
+ public void afterCommit(final Transaction tx)
{
// If part of the transaction goes to the queue, and part goes to paging, we can't let depage start for the
// transaction until all the messages were added to the queue
Modified: trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java 2009-10-16 12:01:15 UTC (rev 8120)
+++ trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java 2009-10-17 02:25:10 UTC (rev 8121)
@@ -1100,7 +1100,7 @@
return Collections.emptySet();
}
- public void afterCommit(final Transaction tx) throws Exception
+ public void afterCommit(final Transaction tx)
{
// If part of the transaction goes to the queue, and part goes to paging, we can't let depage start for the
// transaction until all the messages were added to the queue
@@ -1214,7 +1214,7 @@
this.refs = refs;
}
- public void afterCommit(Transaction tx) throws Exception
+ public void afterCommit(Transaction tx)
{
for (MessageReference ref : refs)
{
Modified: trunk/src/main/org/hornetq/core/replication/ReplicationContext.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/ReplicationContext.java 2009-10-16 12:01:15 UTC (rev 8120)
+++ trunk/src/main/org/hornetq/core/replication/ReplicationContext.java 2009-10-17 02:25:10 UTC (rev 8121)
@@ -34,5 +34,8 @@
/** To be called when there are no more operations pending */
void complete();
+
+ /** Flush all pending callbacks on the Context */
+ void flush();
}
Modified: trunk/src/main/org/hornetq/core/replication/impl/ReplicatedJournal.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/impl/ReplicatedJournal.java 2009-10-16 12:01:15 UTC (rev 8120)
+++ trunk/src/main/org/hornetq/core/replication/impl/ReplicatedJournal.java 2009-10-17 02:25:10 UTC (rev 8121)
@@ -48,17 +48,17 @@
private final ReplicationManager replicationManager;
- private final Journal replicatedJournal;
+ private final Journal localJournal;
private final byte journalID;
public ReplicatedJournal(final byte journaID,
- final Journal replicatedJournal,
+ final Journal localJournal,
final ReplicationManager replicationManager)
{
super();
journalID = journaID;
- this.replicatedJournal = replicatedJournal;
+ this.localJournal = localJournal;
this.replicationManager = replicationManager;
}
@@ -100,7 +100,7 @@
trace("Append record id = " + id + " recordType = " + recordType);
}
replicationManager.appendAddRecord(journalID, id, recordType, record);
- replicatedJournal.appendAddRecord(id, recordType, record, sync);
+ localJournal.appendAddRecord(id, recordType, record, sync);
}
/**
@@ -134,7 +134,7 @@
trace("Append record TXid = " + id + " recordType = " + recordType);
}
replicationManager.appendAddRecordTransactional(journalID, txID, id, recordType, record);
- replicatedJournal.appendAddRecordTransactional(txID, id, recordType, record);
+ localJournal.appendAddRecordTransactional(txID, id, recordType, record);
}
/**
@@ -150,7 +150,7 @@
trace("AppendCommit " + txID);
}
replicationManager.appendCommitRecord(journalID, txID);
- replicatedJournal.appendCommitRecord(txID, sync);
+ localJournal.appendCommitRecord(txID, sync);
}
/**
@@ -166,7 +166,7 @@
trace("AppendDelete " + id);
}
replicationManager.appendDeleteRecord(journalID, id);
- replicatedJournal.appendDeleteRecord(id, sync);
+ localJournal.appendDeleteRecord(id, sync);
}
/**
@@ -195,7 +195,7 @@
trace("AppendDelete txID=" + txID + " id=" + id);
}
replicationManager.appendDeleteRecordTransactional(journalID, txID, id, record);
- replicatedJournal.appendDeleteRecordTransactional(txID, id, record);
+ localJournal.appendDeleteRecordTransactional(txID, id, record);
}
/**
@@ -211,7 +211,7 @@
trace("AppendDelete (noencoding) txID=" + txID + " id=" + id);
}
replicationManager.appendDeleteRecordTransactional(journalID, txID, id);
- replicatedJournal.appendDeleteRecordTransactional(txID, id);
+ localJournal.appendDeleteRecordTransactional(txID, id);
}
/**
@@ -240,7 +240,7 @@
trace("AppendPrepare txID=" + txID);
}
replicationManager.appendPrepareRecord(journalID, txID, transactionData);
- replicatedJournal.appendPrepareRecord(txID, transactionData, sync);
+ localJournal.appendPrepareRecord(txID, transactionData, sync);
}
/**
@@ -256,7 +256,7 @@
trace("AppendRollback " + txID);
}
replicationManager.appendRollbackRecord(journalID, txID);
- replicatedJournal.appendRollbackRecord(txID, sync);
+ localJournal.appendRollbackRecord(txID, sync);
}
/**
@@ -287,7 +287,7 @@
trace("AppendUpdateRecord id = " + id + " , recordType = " + recordType);
}
replicationManager.appendUpdateRecord(journalID, id, recordType, record);
- replicatedJournal.appendUpdateRecord(id, recordType, record, sync);
+ localJournal.appendUpdateRecord(id, recordType, record, sync);
}
/**
@@ -324,7 +324,7 @@
trace("AppendUpdateRecord txid=" + txID + " id = " + id + " , recordType = " + recordType);
}
replicationManager.appendUpdateRecordTransactional(journalID, txID, id, recordType, record);
- replicatedJournal.appendUpdateRecordTransactional(txID, id, recordType, record);
+ localJournal.appendUpdateRecordTransactional(txID, id, recordType, record);
}
/**
@@ -339,7 +339,7 @@
final List<PreparedTransactionInfo> preparedTransactions,
final TransactionFailureCallback transactionFailure) throws Exception
{
- return replicatedJournal.load(committedRecords, preparedTransactions, transactionFailure);
+ return localJournal.load(committedRecords, preparedTransactions, transactionFailure);
}
/**
@@ -350,7 +350,7 @@
*/
public long load(final LoaderCallback reloadManager) throws Exception
{
- return replicatedJournal.load(reloadManager);
+ return localJournal.load(reloadManager);
}
/**
@@ -360,7 +360,7 @@
*/
public void perfBlast(final int pages) throws Exception
{
- replicatedJournal.perfBlast(pages);
+ localJournal.perfBlast(pages);
}
/**
@@ -369,7 +369,7 @@
*/
public void start() throws Exception
{
- replicatedJournal.start();
+ localJournal.start();
}
/**
@@ -378,7 +378,7 @@
*/
public void stop() throws Exception
{
- replicatedJournal.stop();
+ localJournal.stop();
}
/* (non-Javadoc)
@@ -386,7 +386,7 @@
*/
public int getAlignment() throws Exception
{
- return replicatedJournal.getAlignment();
+ return localJournal.getAlignment();
}
/* (non-Javadoc)
@@ -394,7 +394,7 @@
*/
public boolean isStarted()
{
- return replicatedJournal.isStarted();
+ return localJournal.isStarted();
}
// Package protected ---------------------------------------------
Modified: trunk/src/main/org/hornetq/core/replication/impl/ReplicationContextImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/impl/ReplicationContextImpl.java 2009-10-16 12:01:15 UTC (rev 8120)
+++ trunk/src/main/org/hornetq/core/replication/impl/ReplicationContextImpl.java 2009-10-17 02:25:10 UTC (rev 8121)
@@ -53,14 +53,22 @@
{
if (--pendings == 0)
{
- if (tasks != null)
+ flush();
+ }
+ }
+
+ /**
+ *
+ */
+ public void flush()
+ {
+ if (tasks != null)
+ {
+ for (Runnable run : tasks)
{
- for (Runnable run : tasks)
- {
- executor.execute(run);
- }
- tasks.clear();
+ executor.execute(run);
}
+ tasks.clear();
}
}
Modified: trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java 2009-10-16 12:01:15 UTC (rev 8120)
+++ trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java 2009-10-17 02:25:10 UTC (rev 8121)
@@ -19,11 +19,13 @@
import java.util.concurrent.Executor;
import org.hornetq.core.client.impl.FailoverManager;
+import org.hornetq.core.exception.HornetQException;
import org.hornetq.core.journal.EncodingSupport;
import org.hornetq.core.logging.Logger;
import org.hornetq.core.paging.PagedMessage;
import org.hornetq.core.remoting.Channel;
import org.hornetq.core.remoting.ChannelHandler;
+import org.hornetq.core.remoting.FailureListener;
import org.hornetq.core.remoting.Packet;
import org.hornetq.core.remoting.RemotingConnection;
import org.hornetq.core.remoting.impl.wireformat.CreateReplicationSessionMessage;
@@ -40,8 +42,8 @@
import org.hornetq.core.remoting.impl.wireformat.ReplicationPageWriteMessage;
import org.hornetq.core.remoting.impl.wireformat.ReplicationPrepareMessage;
import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.core.replication.ReplicationContext;
import org.hornetq.core.replication.ReplicationManager;
-import org.hornetq.core.replication.ReplicationContext;
import org.hornetq.utils.ConcurrentHashSet;
import org.hornetq.utils.SimpleString;
@@ -79,11 +81,11 @@
private final Executor executor;
- private final ThreadLocal<ReplicationContext> repliToken = new ThreadLocal<ReplicationContext>();
+ private final ThreadLocal<ReplicationContext> tlReplicationContext = new ThreadLocal<ReplicationContext>();
private final Queue<ReplicationContext> pendingTokens = new ConcurrentLinkedQueue<ReplicationContext>();
- private final ConcurrentHashSet<ReplicationContext> activeTokens = new ConcurrentHashSet<ReplicationContext>();
+ private final ConcurrentHashSet<ReplicationContext> activeContexts = new ConcurrentHashSet<ReplicationContext>();
// Static --------------------------------------------------------
@@ -255,7 +257,7 @@
sendReplicatePacket(new ReplicationPageWriteMessage(message, pageNumber));
}
}
-
+
/* (non-Javadoc)
* @see org.hornetq.core.replication.ReplicationManager#largeMessageBegin(byte[])
*/
@@ -300,8 +302,6 @@
}
}
-
-
/* (non-Javadoc)
* @see org.hornetq.core.server.HornetQComponent#isStarted()
*/
@@ -330,6 +330,22 @@
mainChannel.sendBlocking(replicationStartPackage);
+ failoverManager.addFailureListener(new FailureListener()
+ {
+ public void connectionFailed(HornetQException me)
+ {
+ log.warn("Connection to the backup node failed, removing replication now");
+ try
+ {
+ stop();
+ }
+ catch (Exception e)
+ {
+ log.warn(e.getMessage(), e);
+ }
+ }
+ });
+
started = true;
enabled = true;
@@ -340,6 +356,16 @@
*/
public void stop() throws Exception
{
+ enabled = false;
+
+ for (ReplicationContext ctx : activeContexts)
+ {
+ ctx.complete();
+ ctx.flush();
+ }
+
+ activeContexts.clear();
+
if (replicatingChannel != null)
{
replicatingChannel.close();
@@ -353,16 +379,18 @@
}
connection = null;
+
+ started = false;
}
public ReplicationContext getContext()
{
- ReplicationContext token = repliToken.get();
+ ReplicationContext token = tlReplicationContext.get();
if (token == null)
{
token = new ReplicationContextImpl(executor);
- activeTokens.add(token);
- repliToken.set(token);
+ activeContexts.add(token);
+ tlReplicationContext.set(token);
}
return token;
}
@@ -380,17 +408,17 @@
*/
public void closeContext()
{
- final ReplicationContext token = repliToken.get();
+ final ReplicationContext token = tlReplicationContext.get();
if (token != null)
{
// Disassociate thread local
- repliToken.set(null);
+ tlReplicationContext.set(null);
// Remove from pending tokens as soon as this is complete
token.addReplicationAction(new Runnable()
{
public void run()
{
- activeTokens.remove(token);
+ activeContexts.remove(token);
}
});
}
@@ -401,7 +429,7 @@
*/
public Set<ReplicationContext> getActiveTokens()
{
- return activeTokens;
+ return activeContexts;
}
private void sendReplicatePacket(final Packet packet)
Modified: trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java 2009-10-16 12:01:15 UTC (rev 8120)
+++ trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java 2009-10-17 02:25:10 UTC (rev 8121)
@@ -192,9 +192,9 @@
private boolean initialised;
private FailoverManager replicationFailoverManager;
-
+
private ReplicationManager replicationManager;
-
+
private ReplicationEndpoint replicationEndpoint;
private final Set<ActivateCallback> activateCallbacks = new HashSet<ActivateCallback>();
@@ -251,7 +251,7 @@
addressSettingsRepository.setDefault(new AddressSettings());
- // this.managementConnectorID = managementConnectorSequence.decrementAndGet();
+ // this.managementConnectorID = managementConnectorSequence.decrementAndGet();
}
// lifecycle methods
@@ -351,7 +351,7 @@
{
storageManager.stop();
}
-
+
if (replicationEndpoint != null)
{
replicationEndpoint.stop();
@@ -407,7 +407,7 @@
{
memoryManager.stop();
}
-
+
pagingManager = null;
securityStore = null;
resourceManager = null;
@@ -603,22 +603,21 @@
return new CreateSessionResponseMessage(version.getIncrementingVersion());
}
-
+
public synchronized ReplicationEndpoint createReplicationEndpoint(final Channel channel) throws Exception
{
if (!configuration.isBackup())
{
throw new HornetQException(HornetQException.ILLEGAL_STATE, "Connected server is not a backup server");
}
-
+
if (replicationEndpoint == null)
{
replicationEndpoint = new ReplicationEndpointImpl(this);
replicationEndpoint.setChannel(channel);
replicationEndpoint.start();
}
-
-
+
return replicationEndpoint;
}
@@ -660,81 +659,6 @@
}
}
- // public void initialiseBackup(final UUID theUUID, final long liveUniqueID) throws Exception
- // {
- // if (theUUID == null)
- // {
- // throw new IllegalArgumentException("node id is null");
- // }
- //
- // synchronized (initialiseLock)
- // {
- // if (initialised)
- // {
- // throw new IllegalStateException("Server is already initialised");
- // }
- //
- // this.uuid = theUUID;
- //
- // this.nodeID = new SimpleString(uuid.toString());
- //
- // initialisePart2();
- //
- // long backupID = storageManager.getCurrentUniqueID();
- //
- // if (liveUniqueID != backupID)
- // {
- // initialised = false;
- //
- // throw new IllegalStateException("Live and backup unique ids different (" + liveUniqueID +
- // ":" +
- // backupID +
- // "). You're probably trying to restart a live backup pair after a crash");
- // }
- //
- // log.info("Backup server is now operational");
- // }
- // }
-
- private boolean startReplication() throws Exception
- {
- String backupConnectorName = configuration.getBackupConnectorName();
-
- if (backupConnectorName != null)
- {
- TransportConfiguration backupConnector = configuration.getConnectorConfigurations().get(backupConnectorName);
-
- if (backupConnector == null)
- {
- log.warn("connector with name '" + backupConnectorName + "' is not defined in the configuration.");
- }
- else
- {
-
- replicationFailoverManager = new FailoverManagerImpl((ClientSessionFactory)null,
- backupConnector,
- null,
- false,
- ClientSessionFactoryImpl.DEFAULT_CALL_TIMEOUT,
- ClientSessionFactoryImpl.DEFAULT_CLIENT_FAILURE_CHECK_PERIOD,
- ClientSessionFactoryImpl.DEFAULT_CONNECTION_TTL,
- 0,
- 1.0d,
- 0,
- 1,
- threadPool,
- scheduledPool,
- null);
-
-
- this.replicationManager = new ReplicationManagerImpl(replicationFailoverManager, this.executorFactory.getExecutor());
- replicationManager.start();
- }
- }
-
- return true;
- }
-
public HornetQServerControlImpl getHornetQServerControl()
{
return messagingServerControl;
@@ -841,6 +765,30 @@
// Protected
// ------------------------------------------------------------------------------------
+ /**
+ * Protected so tests can change this behaviour
+ * @param backupConnector
+ */
+ protected FailoverManagerImpl createBackupConnection(final TransportConfiguration backupConnector,
+ final ExecutorService threadPool,
+ final ScheduledExecutorService scheduledPool)
+ {
+ return new FailoverManagerImpl((ClientSessionFactory)null,
+ backupConnector,
+ null,
+ false,
+ ClientSessionFactoryImpl.DEFAULT_CALL_TIMEOUT,
+ ClientSessionFactoryImpl.DEFAULT_CLIENT_FAILURE_CHECK_PERIOD,
+ ClientSessionFactoryImpl.DEFAULT_CONNECTION_TTL,
+ 0,
+ 1.0d,
+ 0,
+ 1,
+ threadPool,
+ scheduledPool,
+ null);
+ }
+
protected PagingManager createPagingManager()
{
return new PagingManagerImpl(new PagingStoreFactoryNIO(configuration.getPagingDirectory(), executorFactory),
@@ -873,7 +821,34 @@
// Private
// --------------------------------------------------------------------------------------
+
+ private boolean startReplication() throws Exception
+ {
+ String backupConnectorName = configuration.getBackupConnectorName();
+ if (backupConnectorName != null)
+ {
+ TransportConfiguration backupConnector = configuration.getConnectorConfigurations().get(backupConnectorName);
+
+ if (backupConnector == null)
+ {
+ log.warn("connector with name '" + backupConnectorName + "' is not defined in the configuration.");
+ }
+ else
+ {
+
+ replicationFailoverManager = createBackupConnection(backupConnector, threadPool, scheduledPool);
+
+ this.replicationManager = new ReplicationManagerImpl(replicationFailoverManager,
+ this.executorFactory.getExecutor());
+ replicationManager.start();
+ }
+ }
+
+ return true;
+ }
+
+
private synchronized void callActivateCallbacks()
{
for (ActivateCallback callback : activateCallbacks)
@@ -895,10 +870,10 @@
log.warn("There is no replication endpoint, can't activate this backup server");
throw new HornetQException(HornetQException.INTERNAL_ERROR, "Can't activate the server");
}
-
+
replicationEndpoint.stop();
}
-
+
// Complete the startup procedure
log.info("Activating server");
@@ -933,11 +908,7 @@
managementService = new ManagementServiceImpl(mbeanServer, configuration);
- remotingService = new RemotingServiceImpl(configuration,
- this,
- managementService,
- threadPool,
- scheduledPool);
+ remotingService = new RemotingServiceImpl(configuration, this, managementService, threadPool, scheduledPool);
if (configuration.getMemoryMeasureInterval() != -1)
{
@@ -964,7 +935,6 @@
deploymentManager = new FileDeploymentManager(configuration.getFileDeployerScanPeriod());
}
-
startReplication();
this.storageManager = createStorageManager();
@@ -1009,7 +979,7 @@
this,
queueFactory,
scheduledPool,
- pagingManager,
+ pagingManager,
configuration.isBackup());
// Address settings need to deployed initially, since they're require on paging manager.start()
Property changes on: trunk/tests/src/org/hornetq/tests
___________________________________________________________________
Name: svn:ignore
+ svnignored
Modified: trunk/tests/src/org/hornetq/tests/integration/replication/ReplicationTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/replication/ReplicationTest.java 2009-10-16 12:01:15 UTC (rev 8120)
+++ trunk/tests/src/org/hornetq/tests/integration/replication/ReplicationTest.java 2009-10-17 02:25:10 UTC (rev 8121)
@@ -15,6 +15,7 @@
import static org.hornetq.tests.util.RandomUtil.randomString;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.CountDownLatch;
@@ -24,6 +25,7 @@
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
import org.hornetq.core.buffers.ChannelBuffers;
import org.hornetq.core.client.ClientSessionFactory;
@@ -46,6 +48,9 @@
import org.hornetq.core.paging.impl.PagingManagerImpl;
import org.hornetq.core.paging.impl.PagingStoreFactoryNIO;
import org.hornetq.core.persistence.StorageManager;
+import org.hornetq.core.remoting.Interceptor;
+import org.hornetq.core.remoting.Packet;
+import org.hornetq.core.remoting.RemotingConnection;
import org.hornetq.core.remoting.impl.invm.InVMConnectorFactory;
import org.hornetq.core.remoting.spi.HornetQBuffer;
import org.hornetq.core.replication.impl.ReplicatedJournal;
@@ -79,8 +84,6 @@
private ExecutorService executor;
- private FailoverManager connectionManager;
-
private ScheduledExecutorService scheduledExecutor;
// Static --------------------------------------------------------
@@ -98,11 +101,13 @@
HornetQServer server = new HornetQServerImpl(config);
+ FailoverManager failoverManager = createFailoverManager();
+
server.start();
try
{
- ReplicationManagerImpl manager = new ReplicationManagerImpl(connectionManager, executor);
+ ReplicationManagerImpl manager = new ReplicationManagerImpl(failoverManager, executor);
manager.start();
manager.stop();
}
@@ -123,9 +128,12 @@
server.start();
+ FailoverManager failoverManager = createFailoverManager();
+
try
{
- ReplicationManagerImpl manager = new ReplicationManagerImpl(connectionManager, executor);
+ ReplicationManagerImpl manager = new ReplicationManagerImpl(failoverManager, executor);
+
try
{
manager.start();
@@ -154,9 +162,11 @@
server.start();
+ FailoverManager failoverManager = createFailoverManager();
+
try
{
- ReplicationManagerImpl manager = new ReplicationManagerImpl(connectionManager, executor);
+ ReplicationManagerImpl manager = new ReplicationManagerImpl(failoverManager, executor);
manager.start();
Journal replicatedJournal = new ReplicatedJournal((byte)1, new FakeJournal(), manager);
@@ -212,12 +222,12 @@
server.getConfiguration(),
server.getExecutorFactory(),
server.getAddressSettingsRepository());
-
+
PagingStore store = pagingManager.getPageStore(dummy);
store.start();
assertEquals(5, store.getNumberOfPages());
store.stop();
-
+
manager.pageDeleted(dummy, 1);
manager.pageDeleted(dummy, 2);
manager.pageDeleted(dummy, 3);
@@ -226,25 +236,24 @@
manager.pageDeleted(dummy, 6);
blockOnReplication(manager);
-
+
ServerMessageImpl serverMsg = new ServerMessageImpl();
serverMsg.setMessageID(500);
serverMsg.setDestination(new SimpleString("tttt"));
-
-
+
HornetQBuffer buffer = ChannelBuffers.dynamicBuffer(100);
serverMsg.encodeProperties(buffer);
-
+
manager.largeMessageBegin(500);
manager.largeMessageWrite(500, new byte[1024]);
-
+
manager.largeMessageEnd(500);
-
+
blockOnReplication(manager);
-
+
store.start();
-
+
assertEquals(0, store.getNumberOfPages());
manager.stop();
@@ -255,26 +264,49 @@
}
}
-
public void testSendPacketsWithFailure() throws Exception
{
Configuration config = createDefaultConfig(false);
config.setBackup(true);
+
+ final AtomicBoolean returnIntercept = new AtomicBoolean(true);
+ final Interceptor intercept = new Interceptor()
+ {
+
+ public boolean intercept(Packet packet, RemotingConnection connection) throws HornetQException
+ {
+ if (returnIntercept.get())
+ {
+ System.out.println("Returning true");
+ }
+ return returnIntercept.get();
+ }
+
+ };
+
HornetQServer server = new HornetQServerImpl(config);
server.start();
+ final ArrayList<Interceptor> listInterceptor = new ArrayList<Interceptor>();
+ listInterceptor.add(intercept);
+
+ FailoverManager failoverManager = createFailoverManager(listInterceptor);
+
try
{
- ReplicationManagerImpl manager = new ReplicationManagerImpl(connectionManager, executor);
+ ReplicationManagerImpl manager = new ReplicationManagerImpl(failoverManager, executor);
manager.start();
Journal replicatedJournal = new ReplicatedJournal((byte)1, new FakeJournal(), manager);
- for (int i = 0 ; i < 500; i++)
+ Thread.sleep(100);
+ returnIntercept.set(false);
+
+ for (int i = 0; i < 500; i++)
{
replicatedJournal.appendAddRecord(i, (byte)1, new FakeData(), false);
}
@@ -287,10 +319,12 @@
latch.countDown();
}
});
-
+
manager.closeContext();
- assertTrue(latch.await(10, TimeUnit.SECONDS));
+ server.stop();
+
+ assertTrue(latch.await(50, TimeUnit.SECONDS));
}
finally
{
@@ -314,7 +348,7 @@
}
});
-
+
assertTrue(latch.await(30, TimeUnit.SECONDS));
}
@@ -329,9 +363,11 @@
server.start();
+ FailoverManager failoverManager = createFailoverManager();
+
try
{
- ReplicationManagerImpl manager = new ReplicationManagerImpl(connectionManager, executor);
+ ReplicationManagerImpl manager = new ReplicationManagerImpl(failoverManager, executor);
manager.start();
Journal replicatedJournal = new ReplicatedJournal((byte)1, new FakeJournal(), manager);
@@ -395,25 +431,7 @@
}
// Package protected ---------------------------------------------
- /*class LocalRemotingServiceImpl extends RemotingServiceImpl
- {
-
- public LocalRemotingServiceImpl(final Configuration config,
- final HornetQServer server,
- final ManagementService managementService,
- final Executor threadPool,
- final ScheduledExecutorService scheduledThreadPool)
- {
- super(config, server, managementService, threadPool, scheduledThreadPool);
- }
- protected ChannelHandler createHandler(RemotingConnection conn, Channel channel)
- {
- return super.createHandler(conn, channel);
- }
-
- }*/
-
// Protected -----------------------------------------------------
protected void setUp() throws Exception
@@ -426,25 +444,33 @@
scheduledExecutor = new ScheduledThreadPoolExecutor(10, tFactory);
+ }
+
+ private FailoverManagerImpl createFailoverManager()
+ {
+ return createFailoverManager(null);
+ }
+
+ private FailoverManagerImpl createFailoverManager(List<Interceptor> interceptors)
+ {
TransportConfiguration connectorConfig = new TransportConfiguration(InVMConnectorFactory.class.getName(),
new HashMap<String, Object>(),
randomString());
- connectionManager = new FailoverManagerImpl((ClientSessionFactory)null,
- connectorConfig,
- null,
- false,
- ClientSessionFactoryImpl.DEFAULT_CALL_TIMEOUT,
- ClientSessionFactoryImpl.DEFAULT_CLIENT_FAILURE_CHECK_PERIOD,
- ClientSessionFactoryImpl.DEFAULT_CONNECTION_TTL,
- 0,
- 1.0d,
- 0,
- 1,
- executor,
- scheduledExecutor,
- null);
-
+ return new FailoverManagerImpl((ClientSessionFactory)null,
+ connectorConfig,
+ null,
+ false,
+ ClientSessionFactoryImpl.DEFAULT_CALL_TIMEOUT,
+ ClientSessionFactoryImpl.DEFAULT_CLIENT_FAILURE_CHECK_PERIOD,
+ ClientSessionFactoryImpl.DEFAULT_CONNECTION_TTL,
+ 0,
+ 1.0d,
+ 0,
+ 1,
+ executor,
+ scheduledExecutor,
+ interceptors);
}
protected void tearDown() throws Exception
@@ -456,8 +482,6 @@
tFactory = null;
- connectionManager = null;
-
scheduledExecutor = null;
super.tearDown();
14 years, 7 months
JBoss hornetq SVN: r8120 - trunk/docs/user-manual/en.
by do-not-reply@jboss.org
Author: jmesnil
Date: 2009-10-16 08:01:15 -0400 (Fri, 16 Oct 2009)
New Revision: 8120
Modified:
trunk/docs/user-manual/en/configuring-transports.xml
Log:
transport documentation
* removed warning about 0.0.0.0 being invalid for an acceptor address
Modified: trunk/docs/user-manual/en/configuring-transports.xml
===================================================================
--- trunk/docs/user-manual/en/configuring-transports.xml 2009-10-16 10:24:31 UTC (rev 8119)
+++ trunk/docs/user-manual/en/configuring-transports.xml 2009-10-16 12:01:15 UTC (rev 8120)
@@ -204,7 +204,9 @@
name or IP address to connect to (when configuring a connector) or to listen
on (when configuring an acceptor). The default value for this property is
<literal>localhost</literal>. When configuring acceptors, multiple hosts
- or IP addresses can be specified by separating them with commas. It's not
+ or IP addresses can be specified by separating them with commas. It is also
+ possible to specify <code>0.0.0.0</code> to accept connection from all
+ the host network interfaces. It's not
valid to specify multiple addresses when specifying the host for a
connector; a connector makes a connection to one specific address.</para>
<note>
@@ -214,11 +216,6 @@
incoming connections. The default is localhost which of course is not
accessible from remote nodes!</para>
</note>
- <note>
- <para>Although an address 0.0.0.0 is sometimes used by other systems to mean
- "bind to all available addresses", this is not a valid address for a
- Netty acceptor to bind to.</para>
- </note>
</listitem>
<listitem>
<para><literal>hornetq.remoting.netty.port</literal>. This specified the port to
14 years, 7 months
JBoss hornetq SVN: r8119 - in trunk: src/main/org/hornetq/jms/server/management and 2 other directories.
by do-not-reply@jboss.org
Author: jmesnil
Date: 2009-10-16 06:24:31 -0400 (Fri, 16 Oct 2009)
New Revision: 8119
Modified:
trunk/src/main/org/hornetq/core/management/QueueControl.java
trunk/src/main/org/hornetq/jms/server/management/JMSQueueControl.java
trunk/src/main/org/hornetq/jms/server/management/impl/JMSQueueControlImpl.java
trunk/tests/src/org/hornetq/tests/integration/jms/server/management/JMSQueueControlTest.java
trunk/tests/src/org/hornetq/tests/integration/jms/server/management/JMSQueueControlUsingJMSTest.java
Log:
added missing methods to JMS Queue management API
Modified: trunk/src/main/org/hornetq/core/management/QueueControl.java
===================================================================
--- trunk/src/main/org/hornetq/core/management/QueueControl.java 2009-10-16 10:23:47 UTC (rev 8118)
+++ trunk/src/main/org/hornetq/core/management/QueueControl.java 2009-10-16 10:24:31 UTC (rev 8119)
@@ -98,7 +98,8 @@
@Operation(desc = "Send the message corresponding to the given messageID to this queue's Dead Letter Address", impact = ACTION)
boolean sendMessageToDeadLetterAddress(@Parameter(name = "messageID", desc = "A message ID") long messageID) throws Exception;
- int sendMessagesToDeadLetterAddress(String filterStr) throws Exception;
+ @Operation(desc = "Send the messages corresponding to the given filter to this queue's Dead Letter Address", impact = ACTION)
+ int sendMessagesToDeadLetterAddress(@Parameter(name = "filter", desc = "A message filter (can be empty)") String filterStr) throws Exception;
@Operation(desc = "Change the priority of the message corresponding to the given messageID", impact = ACTION)
boolean changeMessagePriority(@Parameter(name = "messageID", desc = "A message ID") long messageID,
Modified: trunk/src/main/org/hornetq/jms/server/management/JMSQueueControl.java
===================================================================
--- trunk/src/main/org/hornetq/jms/server/management/JMSQueueControl.java 2009-10-16 10:23:47 UTC (rev 8118)
+++ trunk/src/main/org/hornetq/jms/server/management/JMSQueueControl.java 2009-10-16 10:24:31 UTC (rev 8119)
@@ -81,10 +81,17 @@
@Operation(desc = "Send the message corresponding to the given messageID to the queue's Dead Letter Queue", impact = ACTION)
boolean sendMessageToDeadLetterAddress(@Parameter(name = "messageID", desc = "A message ID") String messageID) throws Exception;
+ @Operation(desc = "Send the messages corresponding to the given filter to this queue's Dead Letter Address", impact = ACTION)
+ int sendMessagesToDeadLetterAddress(@Parameter(name = "filter", desc = "A message filter (can be empty)") String filterStr) throws Exception;
+
@Operation(desc = "Change the priority of the message corresponding to the given messageID", impact = ACTION)
boolean changeMessagePriority(@Parameter(name = "messageID", desc = "A message ID") String messageID,
@Parameter(name = "newPriority", desc = "the new priority (between 0 and 9)") int newPriority) throws Exception;
+ @Operation(desc = "Change the priority of the messages corresponding to the given filter", impact = ACTION)
+ int changeMessagesPriority(@Parameter(name = "filter", desc = "A message filter") String filter,
+ @Parameter(name = "newPriority", desc = "the new priority (between 0 and 9)") int newPriority) throws Exception;
+
@Operation(desc = "Move the message corresponding to the given messageID to another queue", impact = ACTION)
boolean moveMessage(@Parameter(name = "messageID", desc = "A message ID") String messageID,
@Parameter(name = "otherQueueName", desc = "The name of the queue to move the message to") String otherQueueName) throws Exception;
@@ -96,6 +103,9 @@
@Operation(desc = "List the message counters", impact = INFO)
String listMessageCounter() throws Exception;
+ @Operation(desc = "Reset the message counters", impact = INFO)
+ void resetMessageCounter() throws Exception;
+
@Operation(desc = "List the message counters as HTML", impact = INFO)
String listMessageCounterAsHTML() throws Exception;
Modified: trunk/src/main/org/hornetq/jms/server/management/impl/JMSQueueControlImpl.java
===================================================================
--- trunk/src/main/org/hornetq/jms/server/management/impl/JMSQueueControlImpl.java 2009-10-16 10:23:47 UTC (rev 8118)
+++ trunk/src/main/org/hornetq/jms/server/management/impl/JMSQueueControlImpl.java 2009-10-16 10:24:31 UTC (rev 8119)
@@ -246,6 +246,12 @@
}
return true;
}
+
+ public int sendMessagesToDeadLetterAddress(String filterStr) throws Exception
+ {
+ String filter = createFilterFromJMSSelector(filterStr);
+ return coreQueueControl.sendMessagesToDeadLetterAddress(filter);
+ }
public boolean changeMessagePriority(final String messageID, final int newPriority) throws Exception
{
@@ -257,6 +263,12 @@
}
return true;
}
+
+ public int changeMessagesPriority(String filterStr, int newPriority) throws Exception
+ {
+ String filter = createFilterFromJMSSelector(filterStr);
+ return coreQueueControl.changeMessagesPriority(filter, newPriority);
+ }
public boolean moveMessage(String messageID, String otherQueueName) throws Exception
{
@@ -289,6 +301,11 @@
throw new IllegalStateException(e);
}
}
+
+ public void resetMessageCounter() throws Exception
+ {
+ coreQueueControl.resetMessageCounter();
+ }
public String listMessageCounterAsHTML()
{
Modified: trunk/tests/src/org/hornetq/tests/integration/jms/server/management/JMSQueueControlTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/jms/server/management/JMSQueueControlTest.java 2009-10-16 10:23:47 UTC (rev 8118)
+++ trunk/tests/src/org/hornetq/tests/integration/jms/server/management/JMSQueueControlTest.java 2009-10-16 10:24:31 UTC (rev 8119)
@@ -344,7 +344,46 @@
{
}
}
+
+ public void testChangeMessagesPriority() throws Exception
+ {
+ String key = "key";
+ long matchingValue = randomLong();
+ long unmatchingValue = matchingValue + 1;
+ String filter = "key = " + matchingValue;
+ int newPriority = 9;
+ Connection connection = JMSUtil.createConnection(InVMConnectorFactory.class.getName());
+ Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
+ // send on queue
+ Message msg_1 = JMSUtil.sendMessageWithProperty(session, queue, key, matchingValue);
+ Message msg_2 = JMSUtil.sendMessageWithProperty(session, queue, key, unmatchingValue);
+
+ JMSQueueControl queueControl = createManagementControl();
+ assertEquals(2, queueControl.getMessageCount());
+
+ int changedMessagesCount = queueControl.changeMessagesPriority(filter, newPriority);
+ assertEquals(1, changedMessagesCount);
+ assertEquals(2, queueControl.getMessageCount());
+
+ connection.start();
+ MessageConsumer consumer = session.createConsumer(queue);
+ Message message = consumer.receive(500);
+ assertNotNull(message);
+ assertEquals(msg_1.getJMSMessageID(), message.getJMSMessageID());
+ assertEquals(9, message.getJMSPriority());
+ assertEquals(matchingValue, message.getLongProperty(key));
+
+ message = consumer.receive(500);
+ assertNotNull(message);
+ assertEquals(msg_2.getJMSMessageID(), message.getJMSMessageID());
+ assertEquals(unmatchingValue, message.getLongProperty(key));
+
+ assertNull(consumer.receive(500));
+
+ connection.close();
+ }
+
public void testGetExpiryAddress() throws Exception
{
final SimpleString expiryAddress = randomSimpleString();
@@ -561,7 +600,53 @@
}
}
+
+ public void testSendMessagesToDeadLetterAddress() throws Exception
+ {
+ String key = "key";
+ long matchingValue = randomLong();
+ long unmatchingValue = matchingValue + 1;
+ String filter = "key = " + matchingValue;
+ String deadLetterQueue = randomString();
+ serverManager.createQueue(deadLetterQueue, deadLetterQueue, null, true);
+ HornetQQueue dlq = new HornetQQueue(deadLetterQueue);
+
+ Connection conn = createConnection();
+ Session sess = conn.createSession(false, Session.AUTO_ACKNOWLEDGE);
+
+ // send 2 messages on queue
+ JMSUtil.sendMessageWithProperty(sess, queue, key, matchingValue);
+ JMSUtil.sendMessageWithProperty(sess, queue, key, unmatchingValue);
+
+ JMSQueueControl queueControl = createManagementControl();
+ JMSQueueControl dlqControl = ManagementControlHelper.createJMSQueueControl(dlq, mbeanServer);
+
+ assertEquals(2, queueControl.getMessageCount());
+ assertEquals(0, dlqControl.getMessageCount());
+
+ queueControl.setDeadLetterAddress(dlq.getAddress());
+
+ int deadMessageCount = queueControl.sendMessagesToDeadLetterAddress(filter);
+ assertEquals(1, deadMessageCount);
+ assertEquals(1, queueControl.getMessageCount());
+ assertEquals(1, dlqControl.getMessageCount());
+
+ conn.start();
+ MessageConsumer consumer = sess.createConsumer(queue);
+
+ Message message = consumer.receive(500);
+ assertNotNull(message);
+ assertEquals(unmatchingValue, message.getLongProperty(key));
+
+ // check there is a single message to consume from deadletter queue
+ JMSUtil.consumeMessages(1, dlq);
+
+ conn.close();
+
+ serverManager.destroyQueue(deadLetterQueue);
+ }
+
public void testMoveMessages() throws Exception
{
String otherQueueName = randomString();
Modified: trunk/tests/src/org/hornetq/tests/integration/jms/server/management/JMSQueueControlUsingJMSTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/jms/server/management/JMSQueueControlUsingJMSTest.java 2009-10-16 10:23:47 UTC (rev 8118)
+++ trunk/tests/src/org/hornetq/tests/integration/jms/server/management/JMSQueueControlUsingJMSTest.java 2009-10-16 10:24:31 UTC (rev 8119)
@@ -90,6 +90,11 @@
{
return (Boolean)proxy.invokeOperation("changeMessagePriority", messageID, newPriority);
}
+
+ public int changeMessagesPriority(String filter, int newPriority) throws Exception
+ {
+ return (Integer)proxy.invokeOperation("changeMessagesPriority", filter, newPriority);
+ }
public int countMessages(String filter) throws Exception
{
@@ -160,6 +165,11 @@
{
return (String)proxy.invokeOperation("listMessageCounter");
}
+
+ public void resetMessageCounter() throws Exception
+ {
+ proxy.invokeOperation("resetMessageCounter");
+ }
public String listMessageCounterAsHTML() throws Exception
{
@@ -217,6 +227,11 @@
return (Boolean)proxy.invokeOperation("sendMessageToDeadLetterAddress", messageID);
}
+ public int sendMessagesToDeadLetterAddress(String filterStr) throws Exception
+ {
+ return (Integer)proxy.invokeOperation("sendMessagesToDeadLetterAddress", filterStr);
+ }
+
public void setDeadLetterAddress(String deadLetterAddress) throws Exception
{
proxy.invokeOperation("setDeadLetterAddress", deadLetterAddress);
14 years, 7 months
JBoss hornetq SVN: r8118 - trunk/examples/jms/jms-bridge.
by do-not-reply@jboss.org
Author: jmesnil
Date: 2009-10-16 06:23:47 -0400 (Fri, 16 Oct 2009)
New Revision: 8118
Modified:
trunk/examples/jms/jms-bridge/
Log:
added build & data to svn:ignore
Property changes on: trunk/examples/jms/jms-bridge
___________________________________________________________________
Name: svn:ignore
+ build
data
14 years, 7 months
JBoss hornetq SVN: r8117 - trunk/tests/src/org/hornetq/tests/integration/client.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2009-10-15 13:24:09 -0400 (Thu, 15 Oct 2009)
New Revision: 8117
Modified:
trunk/tests/src/org/hornetq/tests/integration/client/PagingTest.java
Log:
just removing some verbose logging from a test
Modified: trunk/tests/src/org/hornetq/tests/integration/client/PagingTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/client/PagingTest.java 2009-10-15 16:41:41 UTC (rev 8116)
+++ trunk/tests/src/org/hornetq/tests/integration/client/PagingTest.java 2009-10-15 17:24:09 UTC (rev 8117)
@@ -17,7 +17,6 @@
import java.util.Map;
import junit.framework.AssertionFailedError;
-import junit.framework.TestSuite;
import org.hornetq.core.buffers.ChannelBuffers;
import org.hornetq.core.client.ClientConsumer;
@@ -161,8 +160,6 @@
assertNotNull(message2);
- log.info("got message " + message2.getProperty(new SimpleString("id")));
-
assertEquals(i, ((Integer)message2.getProperty(new SimpleString("id"))).intValue());
message2.acknowledge();
14 years, 7 months
JBoss hornetq SVN: r8116 - in trunk: src/main/org/hornetq/core/client/impl and 26 other directories.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2009-10-15 12:41:41 -0400 (Thu, 15 Oct 2009)
New Revision: 8116
Added:
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/CreateReplicationSessionMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationAddMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationAddTXMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationCommitMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationDeleteMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationDeleteTXMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargeMessageBeingMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargeMessageWriteMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargemessageEndMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPageEventMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPageWriteMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPrepareMessage.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationResponseMessage.java
trunk/src/main/org/hornetq/core/replication/
trunk/src/main/org/hornetq/core/replication/ReplicationContext.java
trunk/src/main/org/hornetq/core/replication/ReplicationEndpoint.java
trunk/src/main/org/hornetq/core/replication/ReplicationManager.java
trunk/src/main/org/hornetq/core/replication/impl/
trunk/src/main/org/hornetq/core/replication/impl/ReplicatedJournal.java
trunk/src/main/org/hornetq/core/replication/impl/ReplicationContextImpl.java
trunk/src/main/org/hornetq/core/replication/impl/ReplicationEndpointImpl.java
trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/LargeMessageFailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyReplicatedFailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/PagingFailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedAsynchronousFailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedFailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedLargeMessageFailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedNettyAsynchronousFailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedPagingFailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/replication/
trunk/tests/src/org/hornetq/tests/integration/replication/ReplicationTest.java
Modified:
trunk/src/main/org/hornetq/core/client/impl/FailoverManager.java
trunk/src/main/org/hornetq/core/journal/Journal.java
trunk/src/main/org/hornetq/core/journal/TestableJournal.java
trunk/src/main/org/hornetq/core/journal/TransactionFailureCallback.java
trunk/src/main/org/hornetq/core/journal/impl/JournalImpl.java
trunk/src/main/org/hornetq/core/paging/PagingStore.java
trunk/src/main/org/hornetq/core/paging/impl/PageImpl.java
trunk/src/main/org/hornetq/core/paging/impl/PagingStoreImpl.java
trunk/src/main/org/hornetq/core/persistence/StorageManager.java
trunk/src/main/org/hornetq/core/persistence/impl/journal/BatchingIDGenerator.java
trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalLargeServerMessage.java
trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java
trunk/src/main/org/hornetq/core/persistence/impl/nullpm/NullStorageManager.java
trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java
trunk/src/main/org/hornetq/core/remoting/impl/PacketDecoder.java
trunk/src/main/org/hornetq/core/remoting/impl/wireformat/PacketImpl.java
trunk/src/main/org/hornetq/core/remoting/server/impl/RemotingServiceImpl.java
trunk/src/main/org/hornetq/core/server/HornetQServer.java
trunk/src/main/org/hornetq/core/server/cluster/impl/Redistributor.java
trunk/src/main/org/hornetq/core/server/impl/HornetQPacketHandler.java
trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java
trunk/src/main/org/hornetq/core/server/impl/ServerSessionImpl.java
trunk/src/main/org/hornetq/core/transaction/impl/TransactionImpl.java
trunk/tests/src/org/hornetq/tests/integration/client/PagingTest.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/AsynchronousFailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java
trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousFailoverTest.java
trunk/tests/src/org/hornetq/tests/integration/largemessage/mock/MockConnectorFactory.java
trunk/tests/src/org/hornetq/tests/integration/paging/PageCrashTest.java
trunk/tests/src/org/hornetq/tests/unit/core/paging/impl/PageImplTest.java
trunk/tests/src/org/hornetq/tests/unit/core/paging/impl/PagingStoreImplTest.java
trunk/tests/src/org/hornetq/tests/unit/core/persistence/impl/BatchIDGeneratorUnitTest.java
trunk/tests/src/org/hornetq/tests/util/ServiceTestBase.java
trunk/tests/src/org/hornetq/tests/util/UnitTestCase.java
Log:
https://jira.jboss.org/jira/browse/HORNETQ-125 - Adding replicated Journal, paging and large message
Modified: trunk/src/main/org/hornetq/core/client/impl/FailoverManager.java
===================================================================
--- trunk/src/main/org/hornetq/core/client/impl/FailoverManager.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/client/impl/FailoverManager.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -48,7 +48,9 @@
final boolean blockOnPersistentSend) throws HornetQException;
void removeSession(final ClientSessionInternal session);
-
+
+ public RemotingConnection getConnection();
+
int numConnections();
int numSessions();
Modified: trunk/src/main/org/hornetq/core/journal/Journal.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/Journal.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/journal/Journal.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -71,32 +71,20 @@
*/
void appendPrepareRecord(long txID, EncodingSupport transactionData, boolean sync) throws Exception;
+ void appendPrepareRecord(long txID, byte[] transactionData, boolean sync) throws Exception;
+
void appendRollbackRecord(long txID, boolean sync) throws Exception;
// Load
+
+ long load(LoaderCallback reloadManager) throws Exception;
+
long load(List<RecordInfo> committedRecords, List<PreparedTransactionInfo> preparedTransactions, TransactionFailureCallback transactionFailure) throws Exception;
int getAlignment() throws Exception;
void perfBlast(int pages) throws Exception;
- /** This method is called automatically when a new file is opened.
- * @return true if it needs to re-check due to cleanup or other factors */
- boolean checkReclaimStatus() throws Exception;
- /** This method check for the need of compacting based on the minCompactPercentage
- * This method is usually called automatically when new files are opened
- */
- void checkCompact() throws Exception;
-
- /**
- * Eliminate deleted records of the journal.
- * @throws Exception
- */
- void compact() throws Exception;
-
-
- JournalFile[] getDataFiles();
-
}
Modified: trunk/src/main/org/hornetq/core/journal/TestableJournal.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/TestableJournal.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/journal/TestableJournal.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -13,6 +13,7 @@
package org.hornetq.core.journal;
+import org.hornetq.core.journal.impl.JournalFile;
/**
*
@@ -46,17 +47,20 @@
int getMaxAIO();
- /** This method could be promoted to {@link Journal} interface when we decide to use the loadManager
- * instead of load(List,List)
- */
- long load(LoaderCallback reloadManager) throws Exception;
-
void forceMoveNextFile() throws Exception;
void setAutoReclaim(boolean autoReclaim);
boolean isAutoReclaim();
+ void compact() throws Exception;
+ /** This method is called automatically when a new file is opened.
+ * @return true if it needs to re-check due to cleanup or other factors */
+ boolean checkReclaimStatus() throws Exception;
+
+ JournalFile[] getDataFiles();
+
+
}
Modified: trunk/src/main/org/hornetq/core/journal/TransactionFailureCallback.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/TransactionFailureCallback.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/journal/TransactionFailureCallback.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -16,7 +16,7 @@
import java.util.List;
/**
- * A TransactionFailureCallback
+ * A Callback to receive information about bad transactions for extra cleanup required for broken transactions such as large messages.
*
* @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
*
Modified: trunk/src/main/org/hornetq/core/journal/impl/JournalImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/impl/JournalImpl.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/journal/impl/JournalImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -1163,7 +1163,17 @@
{
appendDeleteRecordTransactional(txID, id, NullEncoding.instance);
}
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendPrepareRecord(long, byte[], boolean)
+ */
+ public void appendPrepareRecord(long txID, byte[] transactionData, boolean sync) throws Exception
+ {
+ appendPrepareRecord(txID, new ByteArrayEncoding(transactionData), sync);
+ }
+
+
/**
*
* <p>If the system crashed after a prepare was called, it should store information that is required to bring the transaction
@@ -2148,7 +2158,7 @@
return (compactMinFiles * compactPercentage);
}
- public synchronized void cleanUp(final JournalFile file) throws Exception
+ private synchronized void cleanUp(final JournalFile file) throws Exception
{
if (state != STATE_LOADED)
{
@@ -2225,7 +2235,7 @@
}
- public void checkCompact() throws Exception
+ private void checkCompact() throws Exception
{
if (compactMinFiles == 0)
{
@@ -3344,7 +3354,12 @@
private static class NullEncoding implements EncodingSupport
{
- static NullEncoding instance = new NullEncoding();
+ private static NullEncoding instance = new NullEncoding();
+
+ public static NullEncoding getInstance()
+ {
+ return instance;
+ }
public void decode(final HornetQBuffer buffer)
{
Modified: trunk/src/main/org/hornetq/core/paging/PagingStore.java
===================================================================
--- trunk/src/main/org/hornetq/core/paging/PagingStore.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/paging/PagingStore.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -54,6 +54,8 @@
public boolean readPage() throws Exception;
Page getCurrentPage();
+
+ Page createPage(final int page) throws Exception;
/**
*
Modified: trunk/src/main/org/hornetq/core/paging/impl/PageImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/paging/impl/PageImpl.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/paging/impl/PageImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -28,6 +28,8 @@
import org.hornetq.core.logging.Logger;
import org.hornetq.core.paging.Page;
import org.hornetq.core.paging.PagedMessage;
+import org.hornetq.core.persistence.StorageManager;
+import org.hornetq.utils.SimpleString;
/**
*
@@ -59,16 +61,22 @@
private final SequentialFileFactory fileFactory;
private final AtomicInteger size = new AtomicInteger(0);
+
+ private final StorageManager storageManager;
+
+ private final SimpleString storeName;
// Static --------------------------------------------------------
// Constructors --------------------------------------------------
- public PageImpl(final SequentialFileFactory factory, final SequentialFile file, final int pageId) throws Exception
+ public PageImpl(final SimpleString storeName, final StorageManager storageManager, final SequentialFileFactory factory, final SequentialFile file, final int pageId) throws Exception
{
this.pageId = pageId;
this.file = file;
- fileFactory = factory;
+ this.fileFactory = factory;
+ this.storageManager = storageManager;
+ this.storeName = storeName;
}
// Public --------------------------------------------------------
@@ -154,9 +162,11 @@
numberOfMessages.incrementAndGet();
size.addAndGet(buffer.limit());
+ storageManager.pageWrite(message, pageId);
+
if (message.getMessage(null).isLargeMessage())
{
- // If we don't sync on large messages we could have the risk of files unnatended files on disk
+ // If we don't sync on large messages we could have the risk of unattended files on disk
sync();
}
}
@@ -175,11 +185,20 @@
public void close() throws Exception
{
+ if (storageManager != null)
+ {
+ storageManager.pageClosed(storeName, pageId);
+ }
file.close();
}
public void delete() throws Exception
{
+ if (storageManager != null)
+ {
+ storageManager.pageDeleted(storeName, pageId);
+ }
+
if (suspiciousRecords)
{
log.warn("File " + file.getFileName() +
Modified: trunk/src/main/org/hornetq/core/paging/impl/PagingStoreImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/paging/impl/PagingStoreImpl.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/paging/impl/PagingStoreImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -610,6 +610,35 @@
return currentPage;
}
+
+ public Page createPage(final int page) throws Exception
+ {
+ String fileName = createFileName(page);
+
+ if (fileFactory == null)
+ {
+ fileFactory = storeFactory.newFileFactory(getStoreName());
+ }
+
+ SequentialFile file = fileFactory.createSequentialFile(fileName, 1000);
+
+ file.open();
+
+ long size = file.size();
+
+ if (fileFactory.isSupportsCallbacks() && size < pageSize)
+ {
+ file.fill((int)size, (int)(pageSize - size), (byte)0);
+ }
+
+ file.position(0);
+
+ file.close();
+
+ return new PageImpl(this.storeName, storageManager, fileFactory, file, page);
+ }
+
+
// TestSupportPageStore ------------------------------------------
public void forceAnotherPage() throws Exception
@@ -703,36 +732,6 @@
// Protected -----------------------------------------------------
- // In order to test failures, we need to be able to extend this class
- // and replace the Page for another Page that will fail before the file is removed
- // That's why createPage is not a private method
- protected Page createPage(final int page) throws Exception
- {
- String fileName = createFileName(page);
-
- if (fileFactory == null)
- {
- fileFactory = storeFactory.newFileFactory(getStoreName());
- }
-
- SequentialFile file = fileFactory.createSequentialFile(fileName, 1000);
-
- file.open();
-
- long size = file.size();
-
- if (fileFactory.isSupportsCallbacks() && size < pageSize)
- {
- file.fill((int)size, (int)(pageSize - size), (byte)0);
- }
-
- file.position(0);
-
- file.close();
-
- return new PageImpl(fileFactory, file, page);
- }
-
// Private -------------------------------------------------------
/**
@@ -753,6 +752,7 @@
// nothing to be done on this case.
return true;
}
+
// Depage has to be done atomically, in case of failure it should be
// back to where it was
@@ -931,8 +931,9 @@
{
currentPage.close();
}
-
+
currentPage = createPage(currentPageId);
+
currentPageSize.set(0);
Modified: trunk/src/main/org/hornetq/core/persistence/StorageManager.java
===================================================================
--- trunk/src/main/org/hornetq/core/persistence/StorageManager.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/persistence/StorageManager.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -19,6 +19,7 @@
import javax.transaction.xa.Xid;
import org.hornetq.core.paging.PageTransactionInfo;
+import org.hornetq.core.paging.PagedMessage;
import org.hornetq.core.paging.PagingManager;
import org.hornetq.core.postoffice.Binding;
import org.hornetq.core.postoffice.PostOffice;
@@ -45,6 +46,18 @@
{
// Message related operations
+ void pageClosed(SimpleString storeName, int pageNumber);
+
+ void pageDeleted(SimpleString storeName, int pageNumber);
+
+ void pageWrite(PagedMessage message, int pageNumber);
+
+ boolean isReplicated();
+
+ void afterReplicated(Runnable run);
+
+ void completeReplication();
+
UUID getPersistentID();
void setPersistentID(UUID id) throws Exception;
@@ -87,6 +100,8 @@
LargeServerMessage createLargeMessage();
+ LargeServerMessage createLargeMessage(long id, byte [] header);
+
void prepare(long txID, Xid xid) throws Exception;
void commit(long txID) throws Exception;
@@ -97,15 +112,21 @@
void deletePageTransactional(long txID, long recordID) throws Exception;
+ /** This method is only useful at the backup side. We only load internal structures making the journals ready for
+ * append mode on the backup side. */
+ void loadInternalOnly() throws Exception;
+
+
+ public void loadMessageJournal(final PostOffice postOffice,
+ final PagingManager pagingManager,
+ final ResourceManager resourceManager,
+ final Map<Long, Queue> queues,
+ final Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap) throws Exception;
+
long storeHeuristicCompletion(Xid xid, boolean isCommit) throws Exception;
void deleteHeuristicCompletion(long id) throws Exception;
- void loadMessageJournal(PostOffice postOffice,
- PagingManager pagingManager,
- ResourceManager resourceManager,
- Map<Long, Queue> queues,
- Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap) throws Exception;
// Bindings related operations
Modified: trunk/src/main/org/hornetq/core/persistence/impl/journal/BatchingIDGenerator.java
===================================================================
--- trunk/src/main/org/hornetq/core/persistence/impl/journal/BatchingIDGenerator.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/persistence/impl/journal/BatchingIDGenerator.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -38,8 +38,6 @@
private static final Logger log = Logger.getLogger(BatchingIDGenerator.class);
- public static final byte ID_COUNTER_RECORD = 24;
-
// Attributes ----------------------------------------------------
// Static --------------------------------------------------------
@@ -127,7 +125,7 @@
{
try
{
- journalStorage.appendAddRecord(journalID, ID_COUNTER_RECORD, new IDCounterEncoding(id), true);
+ journalStorage.appendAddRecord(journalID, JournalStorageManager.ID_COUNTER_RECORD, new IDCounterEncoding(id), true);
}
catch (Exception e)
{
Modified: trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalLargeServerMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalLargeServerMessage.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalLargeServerMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -91,11 +91,9 @@
{
file.open();
}
+
+ storageManager.addBytesToLargeMessage(file, this.getMessageID(), bytes);
- file.position(file.size());
-
- file.write(ByteBuffer.wrap(bytes), false);
-
bodySize += bytes.length;
}
@@ -232,6 +230,7 @@
public synchronized void deleteFile() throws Exception
{
validateFile();
+ releaseResources();
storageManager.deleteFile(file);
}
@@ -262,10 +261,11 @@
{
super.setStored();
releaseResources();
+
+
if (file != null && linkMessage == null)
{
- SequentialFile fileToRename = storageManager.createFileForLargeMessage(getMessageID(), isStored());
- file.renameTo(fileToRename.getFileName());
+ storageManager.completeLargeMessage(this);
}
}
@@ -304,6 +304,12 @@
return newMessage;
}
+
+
+ public SequentialFile getFile()
+ {
+ return file;
+ }
// Package protected ---------------------------------------------
Modified: trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java
===================================================================
--- trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/persistence/impl/journal/JournalStorageManager.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -18,6 +18,7 @@
import static org.hornetq.utils.DataConstants.SIZE_LONG;
import java.io.File;
+import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@@ -29,12 +30,14 @@
import javax.transaction.xa.Xid;
+import org.hornetq.core.buffers.ChannelBuffer;
import org.hornetq.core.buffers.ChannelBuffers;
import org.hornetq.core.config.Configuration;
import org.hornetq.core.exception.HornetQException;
import org.hornetq.core.filter.Filter;
import org.hornetq.core.journal.EncodingSupport;
import org.hornetq.core.journal.Journal;
+import org.hornetq.core.journal.LoaderCallback;
import org.hornetq.core.journal.PreparedTransactionInfo;
import org.hornetq.core.journal.RecordInfo;
import org.hornetq.core.journal.SequentialFile;
@@ -46,6 +49,7 @@
import org.hornetq.core.logging.Logger;
import org.hornetq.core.message.impl.MessageImpl;
import org.hornetq.core.paging.PageTransactionInfo;
+import org.hornetq.core.paging.PagedMessage;
import org.hornetq.core.paging.PagingManager;
import org.hornetq.core.paging.impl.PageTransactionInfoImpl;
import org.hornetq.core.persistence.QueueBindingInfo;
@@ -54,6 +58,8 @@
import org.hornetq.core.postoffice.PostOffice;
import org.hornetq.core.remoting.impl.wireformat.XidCodecSupport;
import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.core.replication.ReplicationManager;
+import org.hornetq.core.replication.impl.ReplicatedJournal;
import org.hornetq.core.server.JournalType;
import org.hornetq.core.server.LargeServerMessage;
import org.hornetq.core.server.MessageReference;
@@ -92,6 +98,8 @@
public static final byte PERSISTENT_ID_RECORD = 23;
+ public static final byte ID_COUNTER_RECORD = 24;
+
// type + expiration + timestamp + priority
public static final int SIZE_FIELDS = SIZE_INT + SIZE_LONG + SIZE_LONG + SIZE_BYTE;
@@ -119,6 +127,8 @@
private final BatchingIDGenerator idGenerator;
+ private final ReplicationManager replicator;
+
private final Journal messageJournal;
private final Journal bindingsJournal;
@@ -144,11 +154,18 @@
private final String journalDir;
private final String largeMessagesDirectory;
-
+
public JournalStorageManager(final Configuration config, final Executor executor)
{
+ this(config, executor, null);
+ }
+
+ public JournalStorageManager(final Configuration config, final Executor executor, final ReplicationManager replicator)
+ {
this.executor = executor;
-
+
+ this.replicator = replicator;
+
if (config.getJournalType() != JournalType.NIO && config.getJournalType() != JournalType.ASYNCIO)
{
throw new IllegalArgumentException("Only NIO and AsyncIO are supported journals");
@@ -165,6 +182,26 @@
journalDir = config.getJournalDirectory();
+ SequentialFileFactory bindingsFF = new NIOSequentialFileFactory(bindingsDir);
+
+ Journal localBindings = new JournalImpl(1024 * 1024,
+ 2,
+ config.getJournalCompactMinFiles(),
+ config.getJournalCompactPercentage(),
+ bindingsFF,
+ "hornetq-bindings",
+ "bindings",
+ 1);
+
+ if (replicator != null)
+ {
+ this.bindingsJournal = new ReplicatedJournal((byte)0, localBindings, replicator);
+ }
+ else
+ {
+ this.bindingsJournal = localBindings;
+ }
+
if (journalDir == null)
{
throw new NullPointerException("journal-dir is null");
@@ -172,17 +209,6 @@
createJournalDir = config.isCreateJournalDir();
- SequentialFileFactory bindingsFF = new NIOSequentialFileFactory(bindingsDir);
-
- bindingsJournal = new JournalImpl(1024 * 1024,
- 2,
- config.getJournalCompactMinFiles(),
- config.getJournalCompactPercentage(),
- bindingsFF,
- "hornetq-bindings",
- "bindings",
- 1);
-
syncNonTransactional = config.isJournalSyncNonTransactional();
syncTransactional = config.isJournalSyncTransactional();
@@ -217,17 +243,33 @@
throw new IllegalArgumentException("Unsupported journal type " + config.getJournalType());
}
- this.idGenerator = new BatchingIDGenerator(0, CHECKPOINT_BATCH_SIZE, bindingsJournal);
+ if (config.isBackup())
+ {
+ this.idGenerator = null;
+ }
+ else
+ {
+ this.idGenerator = new BatchingIDGenerator(0, CHECKPOINT_BATCH_SIZE, bindingsJournal);
+ }
- messageJournal = new JournalImpl(config.getJournalFileSize(),
- config.getJournalMinFiles(),
- config.getJournalCompactMinFiles(),
- config.getJournalCompactPercentage(),
- journalFF,
- "hornetq-data",
- "hq",
- config.getJournalMaxAIO());
+ Journal localMessage = new JournalImpl(config.getJournalFileSize(),
+ config.getJournalMinFiles(),
+ config.getJournalCompactMinFiles(),
+ config.getJournalCompactPercentage(),
+ journalFF,
+ "hornetq-data",
+ "hq",
+ config.getJournalMaxAIO());
+ if (replicator != null)
+ {
+ this.messageJournal = new ReplicatedJournal((byte)1, localMessage, replicator);
+ }
+ else
+ {
+ this.messageJournal = localMessage;
+ }
+
largeMessagesDirectory = config.getLargeMessagesDirectory();
largeMessagesFactory = new NIOSequentialFileFactory(largeMessagesDirectory);
@@ -235,6 +277,70 @@
perfBlastPages = config.getJournalPerfBlastPages();
}
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#completeReplication()
+ */
+ public void completeReplication()
+ {
+ if (replicator != null)
+ {
+ replicator.closeContext();
+ }
+ }
+
+ public boolean isReplicated()
+ {
+ return replicator != null;
+ }
+
+ // TODO: shouldn't those page methods be on the PageManager?
+
+ /*
+ *
+ * (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#pageClosed(org.hornetq.utils.SimpleString, int)
+ */
+ public void pageClosed(SimpleString storeName, int pageNumber)
+ {
+ if (isReplicated())
+ {
+ replicator.pageClosed(storeName, pageNumber);
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#pageDeleted(org.hornetq.utils.SimpleString, int)
+ */
+ public void pageDeleted(SimpleString storeName, int pageNumber)
+ {
+ if (isReplicated())
+ {
+ replicator.pageDeleted(storeName, pageNumber);
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#pageWrite(org.hornetq.utils.SimpleString, int, org.hornetq.core.buffers.ChannelBuffer)
+ */
+ public void pageWrite(PagedMessage message, int pageNumber)
+ {
+ if (isReplicated())
+ {
+ replicator.pageWrite(message, pageNumber);
+ }
+ }
+
+ // TODO: shouldn't those page methods be on the PageManager? ^^^^
+
+ public void afterReplicated(Runnable run)
+ {
+ if (replicator == null)
+ {
+ throw new IllegalStateException("StorageManager is not replicated");
+ }
+ replicator.afterReplicated(run);
+ }
+
public UUID getPersistentID()
{
return persistentID;
@@ -251,7 +357,7 @@
this.persistentID = id;
}
-
+
public long generateUniqueID()
{
long id = idGenerator.generateID();
@@ -269,6 +375,36 @@
return new JournalLargeServerMessage(this);
}
+ public void addBytesToLargeMessage(SequentialFile file, long messageId, final byte[] bytes) throws Exception
+ {
+ file.position(file.size());
+
+ file.write(ByteBuffer.wrap(bytes), false);
+
+ if (isReplicated())
+ {
+ this.replicator.largeMessageWrite(messageId, bytes);
+ }
+ }
+
+ public LargeServerMessage createLargeMessage(long id, byte[] header)
+ {
+ if (isReplicated())
+ {
+ replicator.largeMessageBegin(id);
+ }
+
+ JournalLargeServerMessage largeMessage = (JournalLargeServerMessage)createLargeMessage();
+
+ HornetQBuffer headerBuffer = ChannelBuffers.wrappedBuffer(header);
+
+ largeMessage.decodeProperties(headerBuffer);
+
+ largeMessage.setMessageID(id);
+
+ return largeMessage;
+ }
+
// Non transactional operations
public void storeMessage(final ServerMessage message) throws Exception
@@ -310,8 +446,8 @@
public void updateScheduledDeliveryTime(final MessageReference ref) throws Exception
{
- ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding(ref.getScheduledDeliveryTime(),
- ref.getQueue().getID());
+ ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding(ref.getScheduledDeliveryTime(), ref.getQueue()
+ .getID());
messageJournal.appendUpdateRecord(ref.getMessage().getMessageID(),
SET_SCHEDULED_DELIVERY_TIME,
@@ -387,12 +523,12 @@
messageJournal.appendAddRecord(id, HEURISTIC_COMPLETION, new HeuristicCompletionEncoding(xid, isCommit), true);
return id;
}
-
+
public void deleteHeuristicCompletion(long id) throws Exception
{
messageJournal.appendDeleteRecord(id, true);
}
-
+
public void deletePageTransactional(final long txID, final long recordID) throws Exception
{
messageJournal.appendDeleteRecordTransactional(txID, recordID);
@@ -400,8 +536,8 @@
public void updateScheduledDeliveryTimeTransactional(final long txID, final MessageReference ref) throws Exception
{
- ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding(ref.getScheduledDeliveryTime(),
- ref.getQueue().getID());
+ ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding(ref.getScheduledDeliveryTime(), ref.getQueue()
+ .getID());
messageJournal.appendUpdateRecordTransactional(txID,
ref.getMessage().getMessageID(),
@@ -466,6 +602,19 @@
updateInfo,
syncNonTransactional);
}
+ /**
+ * @param journalLargeServerMessage
+ * @throws Exception
+ */
+ public void completeLargeMessage(JournalLargeServerMessage message) throws Exception
+ {
+ if (isReplicated())
+ {
+ replicator.largeMessageEnd(message.getMessageID());
+ }
+ SequentialFile fileToRename = createFileForLargeMessage(message.getMessageID(), true);
+ message.getFile().renameTo(fileToRename.getFileName());
+ }
private static final class AddMessageRecord
{
@@ -480,20 +629,17 @@
int deliveryCount;
}
-
-
+
private class LargeMessageTXFailureCallback implements TransactionFailureCallback
{
private final Map<Long, ServerMessage> messages;
-
+
public LargeMessageTXFailureCallback(Map<Long, ServerMessage> messages)
{
super();
this.messages = messages;
}
-
-
public void failedTransaction(long transactionID, List<RecordInfo> records, List<RecordInfo> recordsToDelete)
{
for (RecordInfo record : records)
@@ -516,7 +662,7 @@
}
}
}
-
+
}
public void loadMessageJournal(final PostOffice postOffice,
@@ -530,9 +676,9 @@
List<PreparedTransactionInfo> preparedTransactions = new ArrayList<PreparedTransactionInfo>();
Map<Long, ServerMessage> messages = new HashMap<Long, ServerMessage>();
-
+
messageJournal.load(records, preparedTransactions, new LargeMessageTXFailureCallback(messages));
-
+
ArrayList<LargeServerMessage> largeMessages = new ArrayList<LargeServerMessage>();
Map<Long, Map<Long, AddMessageRecord>> queueMap = new HashMap<Long, Map<Long, AddMessageRecord>>();
@@ -732,7 +878,7 @@
{
record.message.putLongProperty(MessageImpl.HDR_SCHEDULED_DELIVERY_TIME, scheduledDeliveryTime);
}
-
+
MessageReference ref = postOffice.reroute(record.message, queue, null);
ref.setDeliveryCount(record.deliveryCount);
@@ -750,11 +896,12 @@
{
if (msg.getRefCount() == 0)
{
- log.debug("Large message: " + msg.getMessageID() + " didn't have any associated reference, file will be deleted");
+ log.debug("Large message: " + msg.getMessageID() +
+ " didn't have any associated reference, file will be deleted");
msg.decrementRefCount();
}
}
-
+
if (perfBlastPages != -1)
{
messageJournal.perfBlast(perfBlastPages);
@@ -774,14 +921,14 @@
LargeMessageEncoding messageEncoding = new LargeMessageEncoding(largeMessage);
messageEncoding.decode(buff);
-
+
Long originalMessageID = (Long)largeMessage.getProperties().getProperty(MessageImpl.HDR_ORIG_MESSAGE_ID);
-
+
// Using the linked file by the original file
if (originalMessageID != null)
{
LargeServerMessage originalMessage = (LargeServerMessage)messages.get(originalMessageID);
-
+
if (originalMessage == null)
{
// this could happen if the message was deleted but the file still exists as the file still being used
@@ -790,9 +937,9 @@
originalMessage.setStored();
messages.put(originalMessageID, originalMessage);
}
-
+
originalMessage.incrementRefCount();
-
+
largeMessage.setLinkedMessage(originalMessage);
}
return largeMessage;
@@ -835,7 +982,7 @@
case ADD_LARGE_MESSAGE:
{
messages.put(record.id, parseLargeMessage(messages, buff));
-
+
break;
}
case ADD_MESSAGE:
@@ -1038,7 +1185,7 @@
bindingEncoding.setId(id);
- queueBindingInfos.add(bindingEncoding);
+ queueBindingInfos.add(bindingEncoding);
}
else if (rec == PERSISTENT_ID_RECORD)
{
@@ -1048,7 +1195,7 @@
persistentID = encoding.uuid;
}
- else if (rec == BatchingIDGenerator.ID_COUNTER_RECORD)
+ else if (rec == ID_COUNTER_RECORD)
{
idGenerator.loadState(record.id, buffer);
}
@@ -1092,7 +1239,10 @@
}
// Must call close to make sure last id is persisted
- idGenerator.close();
+ if (idGenerator != null)
+ {
+ idGenerator.close();
+ }
bindingsJournal.stop();
@@ -1108,6 +1258,39 @@
return started;
}
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#loadInternalOnly()
+ */
+ public void loadInternalOnly() throws Exception
+ {
+ LoaderCallback dummyLoader = new LoaderCallback()
+ {
+
+ public void failedTransaction(long transactionID, List<RecordInfo> records, List<RecordInfo> recordsToDelete)
+ {
+ }
+
+ public void updateRecord(RecordInfo info)
+ {
+ }
+
+ public void deleteRecord(long id)
+ {
+ }
+
+ public void addRecord(RecordInfo info)
+ {
+ }
+
+ public void addPreparedTransaction(PreparedTransactionInfo preparedTransaction)
+ {
+ }
+ };
+
+ bindingsJournal.load(dummyLoader);
+ messageJournal.load(dummyLoader);
+ }
+
// Public -----------------------------------------------------------------------------------
public Journal getMessageJournal()
@@ -1125,7 +1308,7 @@
// This should be accessed from this package only
void deleteFile(final SequentialFile file)
{
- executor.execute(new Runnable()
+ Runnable deleteAction = new Runnable()
{
public void run()
{
@@ -1139,7 +1322,16 @@
}
}
- });
+ };
+
+ if (executor == null)
+ {
+ deleteAction.run();
+ }
+ else
+ {
+ executor.execute(deleteAction);
+ }
}
/**
@@ -1228,12 +1420,13 @@
return XidCodecSupport.getXidEncodeLength(xid);
}
}
-
+
private static class HeuristicCompletionEncoding implements EncodingSupport
{
Xid xid;
+
boolean isCommit;
-
+
HeuristicCompletionEncoding(final Xid xid, final boolean isCommit)
{
this.xid = xid;
@@ -1621,4 +1814,5 @@
}
+
}
Modified: trunk/src/main/org/hornetq/core/persistence/impl/nullpm/NullStorageManager.java
===================================================================
--- trunk/src/main/org/hornetq/core/persistence/impl/nullpm/NullStorageManager.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/persistence/impl/nullpm/NullStorageManager.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -19,13 +19,16 @@
import javax.transaction.xa.Xid;
+import org.hornetq.core.buffers.ChannelBuffers;
import org.hornetq.core.logging.Logger;
import org.hornetq.core.paging.PageTransactionInfo;
+import org.hornetq.core.paging.PagedMessage;
import org.hornetq.core.paging.PagingManager;
import org.hornetq.core.persistence.QueueBindingInfo;
import org.hornetq.core.persistence.StorageManager;
import org.hornetq.core.postoffice.Binding;
import org.hornetq.core.postoffice.PostOffice;
+import org.hornetq.core.remoting.spi.HornetQBuffer;
import org.hornetq.core.server.LargeServerMessage;
import org.hornetq.core.server.MessageReference;
import org.hornetq.core.server.Queue;
@@ -183,6 +186,20 @@
return new NullStorageLargeServerMessage();
}
+ public LargeServerMessage createLargeMessage(long id, byte[] header)
+ {
+ NullStorageLargeServerMessage largeMessage = new NullStorageLargeServerMessage();
+
+ HornetQBuffer headerBuffer = ChannelBuffers.wrappedBuffer(header);
+
+ largeMessage.decodeProperties(headerBuffer);
+
+ largeMessage.setMessageID(id);
+
+ return largeMessage;
+ }
+
+
public long generateUniqueID()
{
long id = idSequence.getAndIncrement();
@@ -249,4 +266,56 @@
{
}
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#loadInternalOnly()
+ */
+ public void loadInternalOnly() throws Exception
+ {
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#afterReplicated(java.lang.Runnable)
+ */
+ public void afterReplicated(Runnable run)
+ {
+ run.run();
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#isReplicated()
+ */
+ public boolean isReplicated()
+ {
+ return false;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#completeReplication()
+ */
+ public void completeReplication()
+ {
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#pageClosed(org.hornetq.utils.SimpleString, int)
+ */
+ public void pageClosed(SimpleString storeName, int pageNumber)
+ {
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#pageDeleted(org.hornetq.utils.SimpleString, int)
+ */
+ public void pageDeleted(SimpleString storeName, int pageNumber)
+ {
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#pageWrite(org.hornetq.core.paging.PagedMessage, int)
+ */
+ public void pageWrite(PagedMessage message, int pageNumber)
+ {
+ }
+
+
}
Modified: trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/postoffice/impl/PostOfficeImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -848,7 +848,7 @@
private void processRoute(final ServerMessage message, final RoutingContext context) throws Exception
{
- List<MessageReference> refs = new ArrayList<MessageReference>();
+ final List<MessageReference> refs = new ArrayList<MessageReference>();
Transaction tx = context.getTransaction();
@@ -924,14 +924,35 @@
tx.addOperation(new AddOperation(refs));
}
else
- {
- for (MessageReference ref : refs)
+ {
+ if (storageManager.isReplicated())
{
-
- ref.getQueue().addLast(ref);
+ storageManager.afterReplicated(new Runnable()
+ {
+ public void run()
+ {
+ deliverReferences(refs);
+ }
+ });
}
+ else
+ {
+ deliverReferences(refs);
+ }
}
}
+
+ /**
+ * @param refs
+ */
+ private void deliverReferences(final List<MessageReference> refs)
+ {
+ for (MessageReference ref : refs)
+ {
+
+ ref.getQueue().addLast(ref);
+ }
+ }
private synchronized void startExpiryScanner()
{
Modified: trunk/src/main/org/hornetq/core/remoting/impl/PacketDecoder.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/PacketDecoder.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/remoting/impl/PacketDecoder.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -13,6 +13,19 @@
package org.hornetq.core.remoting.impl;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_LARGE_MESSAGE_BEGIN;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_LARGE_MESSAGE_END;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_LARGE_MESSAGE_WRITE;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_PAGE_EVENT;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_PAGE_WRITE;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_PREPARE;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_DELETE_TX;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_COMMIT_ROLLBACK;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_APPEND_TX;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_DELETE;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_RESPONSE;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_APPEND;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.CREATE_REPLICATION;
import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.CREATESESSION;
import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.CREATESESSION_RESP;
import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.CREATE_QUEUE;
@@ -63,6 +76,7 @@
import org.hornetq.core.remoting.Packet;
import org.hornetq.core.remoting.impl.wireformat.CreateQueueMessage;
+import org.hornetq.core.remoting.impl.wireformat.CreateReplicationSessionMessage;
import org.hornetq.core.remoting.impl.wireformat.CreateSessionMessage;
import org.hornetq.core.remoting.impl.wireformat.CreateSessionResponseMessage;
import org.hornetq.core.remoting.impl.wireformat.HornetQExceptionMessage;
@@ -72,6 +86,18 @@
import org.hornetq.core.remoting.impl.wireformat.Ping;
import org.hornetq.core.remoting.impl.wireformat.ReattachSessionMessage;
import org.hornetq.core.remoting.impl.wireformat.ReattachSessionResponseMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationAddMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationAddTXMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationCommitMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationDeleteMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationDeleteTXMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationLargeMessageBeingMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationLargeMessageWriteMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationLargemessageEndMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationPageEventMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationPageWriteMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationPrepareMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationResponseMessage;
import org.hornetq.core.remoting.impl.wireformat.RollbackMessage;
import org.hornetq.core.remoting.impl.wireformat.SessionAcknowledgeMessage;
import org.hornetq.core.remoting.impl.wireformat.SessionBindingQueryMessage;
@@ -351,7 +377,72 @@
{
packet = new SessionSendContinuationMessage();
break;
- }
+ }
+ case CREATE_REPLICATION:
+ {
+ packet = new CreateReplicationSessionMessage();
+ break;
+ }
+ case REPLICATION_APPEND:
+ {
+ packet = new ReplicationAddMessage();
+ break;
+ }
+ case REPLICATION_APPEND_TX:
+ {
+ packet = new ReplicationAddTXMessage();
+ break;
+ }
+ case REPLICATION_DELETE:
+ {
+ packet = new ReplicationDeleteMessage();
+ break;
+ }
+ case REPLICATION_DELETE_TX:
+ {
+ packet = new ReplicationDeleteTXMessage();
+ break;
+ }
+ case REPLICATION_PREPARE:
+ {
+ packet = new ReplicationPrepareMessage();
+ break;
+ }
+ case REPLICATION_COMMIT_ROLLBACK:
+ {
+ packet = new ReplicationCommitMessage();
+ break;
+ }
+ case REPLICATION_RESPONSE:
+ {
+ packet = new ReplicationResponseMessage();
+ break;
+ }
+ case REPLICATION_PAGE_WRITE:
+ {
+ packet = new ReplicationPageWriteMessage();
+ break;
+ }
+ case REPLICATION_PAGE_EVENT:
+ {
+ packet = new ReplicationPageEventMessage();
+ break;
+ }
+ case REPLICATION_LARGE_MESSAGE_BEGIN:
+ {
+ packet = new ReplicationLargeMessageBeingMessage();
+ break;
+ }
+ case REPLICATION_LARGE_MESSAGE_END:
+ {
+ packet = new ReplicationLargemessageEndMessage();
+ break;
+ }
+ case REPLICATION_LARGE_MESSAGE_WRITE:
+ {
+ packet = new ReplicationLargeMessageWriteMessage();
+ break;
+ }
case SESS_FORCE_CONSUMER_DELIVERY:
{
packet = new SessionForceConsumerDelivery();
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/CreateReplicationSessionMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/CreateReplicationSessionMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/CreateReplicationSessionMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+
+/**
+ * @author <a href="mailto:tim.fox@jboss.com">Clebert Suconic</a>
+ */
+public class CreateReplicationSessionMessage extends PacketImpl
+{
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ private long sessionChannelID;
+
+ private int windowSize;
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public CreateReplicationSessionMessage(final long sessionChannelID, final int windowSize)
+ {
+ super(CREATE_REPLICATION);
+
+ this.sessionChannelID = sessionChannelID;
+
+ this.windowSize = windowSize;
+ }
+
+ public CreateReplicationSessionMessage()
+ {
+ super(CREATE_REPLICATION);
+ }
+
+ // Public --------------------------------------------------------
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE +
+ // buffer.writeLong(sessionChannelID);
+ DataConstants.SIZE_LONG +
+ // buffer.writeInt(windowSize);
+ DataConstants.SIZE_INT;
+
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeLong(sessionChannelID);
+ buffer.writeInt(windowSize);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ sessionChannelID = buffer.readLong();
+ windowSize = buffer.readInt();
+ }
+
+ /**
+ * @return the sessionChannelID
+ */
+ public long getSessionChannelID()
+ {
+ return sessionChannelID;
+ }
+
+ /**
+ * @return the windowSize
+ */
+ public int getWindowSize()
+ {
+ return windowSize;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+}
Modified: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/PacketImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/PacketImpl.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/PacketImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -28,16 +28,17 @@
// Constants -------------------------------------------------------------------------
private static final Logger log = Logger.getLogger(PacketImpl.class);
-
+
// The minimal size for all the packets, Common data for all the packets (look at PacketImpl.encode)
- protected static final int BASIC_PACKET_SIZE = DataConstants.SIZE_INT + DataConstants.SIZE_BYTE + DataConstants.SIZE_LONG;
+ protected static final int BASIC_PACKET_SIZE = DataConstants.SIZE_INT + DataConstants.SIZE_BYTE +
+ DataConstants.SIZE_LONG;
private long channelID;
private final byte type;
-
+
private int size;
-
+
// The packet types
// -----------------------------------------------------------------------------------
@@ -65,12 +66,13 @@
public static final byte DELETE_QUEUE = 35;
+ public static final byte CREATE_REPLICATION = 36;
// Session
public static final byte SESS_CREATECONSUMER = 40;
public static final byte SESS_ACKNOWLEDGE = 41;
-
+
public static final byte SESS_EXPIRED = 42;
public static final byte SESS_COMMIT = 43;
@@ -126,7 +128,7 @@
public static final byte SESS_FLOWTOKEN = 70;
public static final byte SESS_SEND = 71;
-
+
public static final byte SESS_SEND_LARGE = 72;
public static final byte SESS_SEND_CONTINUATION = 73;
@@ -139,6 +141,31 @@
public static final byte SESS_FORCE_CONSUMER_DELIVERY = 77;
+ // Replication
+
+ public static final byte REPLICATION_RESPONSE = 80;
+
+ public static final byte REPLICATION_APPEND = 81;
+
+ public static final byte REPLICATION_APPEND_TX = 82;
+
+ public static final byte REPLICATION_DELETE = 83;
+
+ public static final byte REPLICATION_DELETE_TX = 84;
+
+ public static final byte REPLICATION_PREPARE = 85;
+
+ public static final byte REPLICATION_COMMIT_ROLLBACK = 86;
+
+ public static final byte REPLICATION_PAGE_WRITE = 87;
+
+ public static final byte REPLICATION_PAGE_EVENT = 88;
+
+ public static final byte REPLICATION_LARGE_MESSAGE_BEGIN = 89;
+
+ public static final byte REPLICATION_LARGE_MESSAGE_END = 90;
+
+ public static final byte REPLICATION_LARGE_MESSAGE_WRITE = 91;
// Static --------------------------------------------------------
public PacketImpl(final byte type)
@@ -148,7 +175,6 @@
// Public --------------------------------------------------------
-
public byte getType()
{
return type;
@@ -163,40 +189,40 @@
{
this.channelID = channelID;
}
-
+
public int encode(final HornetQBuffer buffer)
{
// The standard header fields
buffer.writeInt(0); // The length gets filled in at the end
buffer.writeByte(type);
buffer.writeLong(channelID);
-
+
encodeBody(buffer);
size = buffer.writerIndex();
-
+
// The length doesn't include the actual length byte
int len = size - DataConstants.SIZE_INT;
buffer.setInt(0, len);
-
+
return size;
}
public void decode(final HornetQBuffer buffer)
{
channelID = buffer.readLong();
-
+
decodeBody(buffer);
-
+
size = buffer.readerIndex();
}
-
+
public final int getPacketSize()
{
return size;
}
-
+
public int getRequiredBufferSize()
{
return BASIC_PACKET_SIZE;
@@ -252,7 +278,7 @@
{
return DataConstants.SIZE_INT + str.length() * 2;
}
-
+
protected int nullableStringEncodeSize(String str)
{
return DataConstants.SIZE_BOOLEAN + (str != null ? stringEncodeSize(str) : 0);
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationAddMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationAddMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationAddMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.journal.EncodingSupport;
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+
+/**
+ * A ReplicationAddMessage
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationAddMessage extends PacketImpl
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ private long id;
+
+ /** 0 - Bindings, 1 - MessagesJournal */
+ private byte journalID;
+
+ private boolean isUpdate;
+
+ private byte recordType;
+
+ private EncodingSupport encodingData;
+
+ private byte[] recordData;
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicationAddMessage()
+ {
+ super(REPLICATION_APPEND);
+ }
+
+ public ReplicationAddMessage(final byte journalID,
+ final boolean isUpdate,
+ final long id,
+ final byte recordType,
+ final EncodingSupport encodingData)
+ {
+ this();
+ this.journalID = journalID;
+ this.isUpdate = isUpdate;
+ this.id = id;
+ this.recordType = recordType;
+ this.encodingData = encodingData;
+ }
+
+ // Public --------------------------------------------------------
+
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE + DataConstants.SIZE_BYTE +
+ DataConstants.SIZE_BOOLEAN +
+ DataConstants.SIZE_LONG +
+ DataConstants.SIZE_BYTE +
+ DataConstants.SIZE_INT +
+ (encodingData != null ? encodingData.getEncodeSize() : recordData.length);
+
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeByte(journalID);
+ buffer.writeBoolean(isUpdate);
+ buffer.writeLong(id);
+ buffer.writeByte(recordType);
+ buffer.writeInt(encodingData.getEncodeSize());
+ encodingData.encode(buffer);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ journalID = buffer.readByte();
+ isUpdate = buffer.readBoolean();
+ id = buffer.readLong();
+ recordType = buffer.readByte();
+ int size = buffer.readInt();
+ recordData = new byte[size];
+ buffer.readBytes(recordData);
+ }
+
+ /**
+ * @return the id
+ */
+ public long getId()
+ {
+ return id;
+ }
+
+ /**
+ * @return the journalID
+ */
+ public byte getJournalID()
+ {
+ return journalID;
+ }
+
+ public boolean isUpdate()
+ {
+ return isUpdate;
+ }
+
+ /**
+ * @return the recordType
+ */
+ public byte getRecordType()
+ {
+ return recordType;
+ }
+
+ /**
+ * @return the recordData
+ */
+ public byte[] getRecordData()
+ {
+ return recordData;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationAddTXMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationAddTXMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationAddTXMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.journal.EncodingSupport;
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+
+/**
+ * A ReplicationAddMessage
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationAddTXMessage extends PacketImpl
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ private long txId;
+
+ private long id;
+
+ /** 0 - Bindings, 1 - MessagesJournal */
+ private byte journalID;
+
+ private boolean isUpdate;
+
+ private byte recordType;
+
+ private EncodingSupport encodingData;
+
+ private byte[] recordData;
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicationAddTXMessage()
+ {
+ super(REPLICATION_APPEND_TX);
+ }
+
+ public ReplicationAddTXMessage(final byte journalID,
+ final boolean isUpdate,
+ final long txId,
+ final long id,
+ final byte recordType,
+ final EncodingSupport encodingData)
+ {
+ this();
+ this.journalID = journalID;
+ this.isUpdate = isUpdate;
+ this.txId = txId;
+ this.id = id;
+ this.recordType = recordType;
+ this.encodingData = encodingData;
+ }
+
+ // Public --------------------------------------------------------
+
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE + DataConstants.SIZE_BYTE +
+ DataConstants.SIZE_BOOLEAN +
+ DataConstants.SIZE_LONG +
+ DataConstants.SIZE_LONG +
+ DataConstants.SIZE_BYTE +
+ DataConstants.SIZE_INT +
+ (encodingData != null ? encodingData.getEncodeSize() : recordData.length);
+
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeByte(journalID);
+ buffer.writeBoolean(isUpdate);
+ buffer.writeLong(txId);
+ buffer.writeLong(id);
+ buffer.writeByte(recordType);
+ buffer.writeInt(encodingData.getEncodeSize());
+ encodingData.encode(buffer);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ journalID = buffer.readByte();
+ isUpdate = buffer.readBoolean();
+ txId = buffer.readLong();
+ id = buffer.readLong();
+ recordType = buffer.readByte();
+ int size = buffer.readInt();
+ recordData = new byte[size];
+ buffer.readBytes(recordData);
+ }
+
+ /**
+ * @return the id
+ */
+ public long getId()
+ {
+ return id;
+ }
+
+ public long getTxId()
+ {
+ return txId;
+ }
+
+ /**
+ * @return the journalID
+ */
+ public byte getJournalID()
+ {
+ return journalID;
+ }
+
+ public boolean isUpdate()
+ {
+ return isUpdate;
+ }
+
+ /**
+ * @return the recordType
+ */
+ public byte getRecordType()
+ {
+ return recordType;
+ }
+
+ /**
+ * @return the recordData
+ */
+ public byte[] getRecordData()
+ {
+ return recordData;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationCommitMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationCommitMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationCommitMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+
+/**
+ * A ReplicationAddMessage
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationCommitMessage extends PacketImpl
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ /** 0 - Bindings, 1 - MessagesJournal */
+ private byte journalID;
+
+ private boolean rollback;
+
+ private long txId;
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicationCommitMessage()
+ {
+ super(REPLICATION_COMMIT_ROLLBACK);
+ }
+
+ public ReplicationCommitMessage(final byte journalID, final boolean rollback, final long txId)
+ {
+ this();
+ this.journalID = journalID;
+ this.rollback = rollback;
+ this.txId = txId;
+ }
+
+ // Public --------------------------------------------------------
+
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE + DataConstants.SIZE_BYTE + DataConstants.SIZE_BOOLEAN + DataConstants.SIZE_LONG;
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeByte(journalID);
+ buffer.writeBoolean(rollback);
+ buffer.writeLong(txId);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ journalID = buffer.readByte();
+ rollback = buffer.readBoolean();
+ txId = buffer.readLong();
+ }
+
+ public boolean isRollback()
+ {
+ return rollback;
+ }
+
+ public long getTxId()
+ {
+ return txId;
+ }
+
+ /**
+ * @return the journalID
+ */
+ public byte getJournalID()
+ {
+ return journalID;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationDeleteMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationDeleteMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationDeleteMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+
+/**
+ * A ReplicationAddMessage
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationDeleteMessage extends PacketImpl
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ private long id;
+
+ /** 0 - Bindings, 1 - MessagesJournal */
+ private byte journalID;
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicationDeleteMessage()
+ {
+ super(REPLICATION_DELETE);
+ }
+
+ public ReplicationDeleteMessage(final byte journalID, final long id)
+ {
+ this();
+ this.journalID = journalID;
+ this.id = id;
+ }
+
+ // Public --------------------------------------------------------
+
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE + DataConstants.SIZE_BYTE + DataConstants.SIZE_LONG;
+
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeByte(journalID);
+ buffer.writeLong(id);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ journalID = buffer.readByte();
+ id = buffer.readLong();
+ }
+
+ /**
+ * @return the id
+ */
+ public long getId()
+ {
+ return id;
+ }
+
+ /**
+ * @return the journalID
+ */
+ public byte getJournalID()
+ {
+ return journalID;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationDeleteTXMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationDeleteTXMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationDeleteTXMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.journal.EncodingSupport;
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+
+/**
+ * A ReplicationAddMessage
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationDeleteTXMessage extends PacketImpl
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ private long txId;
+
+ private long id;
+
+ /** 0 - Bindings, 1 - MessagesJournal */
+ private byte journalID;
+
+ private EncodingSupport encodingData;
+
+ private byte[] recordData;
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicationDeleteTXMessage()
+ {
+ super(REPLICATION_DELETE_TX);
+ }
+
+ public ReplicationDeleteTXMessage(final byte journalID,
+ final long txId,
+ final long id,
+ final EncodingSupport encodingData)
+ {
+ this();
+ this.journalID = journalID;
+ this.txId = txId;
+ this.id = id;
+ this.encodingData = encodingData;
+ }
+
+ // Public --------------------------------------------------------
+
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE + DataConstants.SIZE_BYTE +
+ DataConstants.SIZE_LONG +
+ DataConstants.SIZE_LONG +
+ DataConstants.SIZE_INT +
+ (encodingData != null ? encodingData.getEncodeSize() : recordData.length);
+
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeByte(journalID);
+ buffer.writeLong(txId);
+ buffer.writeLong(id);
+ buffer.writeInt(encodingData.getEncodeSize());
+ encodingData.encode(buffer);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ journalID = buffer.readByte();
+ txId = buffer.readLong();
+ id = buffer.readLong();
+ int size = buffer.readInt();
+ recordData = new byte[size];
+ buffer.readBytes(recordData);
+ }
+
+ /**
+ * @return the id
+ */
+ public long getId()
+ {
+ return id;
+ }
+
+ public long getTxId()
+ {
+ return txId;
+ }
+
+ /**
+ * @return the journalID
+ */
+ public byte getJournalID()
+ {
+ return journalID;
+ }
+
+ /**
+ * @return the recordData
+ */
+ public byte[] getRecordData()
+ {
+ return recordData;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargeMessageBeingMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargeMessageBeingMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargeMessageBeingMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+
+/**
+ * A ReplicationLargeMessageBeingMessage
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationLargeMessageBeingMessage extends PacketImpl
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ long messageId;
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicationLargeMessageBeingMessage(final long messageId)
+ {
+ this();
+ this.messageId = messageId;
+ }
+
+ public ReplicationLargeMessageBeingMessage()
+ {
+ super(REPLICATION_LARGE_MESSAGE_BEGIN);
+ }
+
+ // Public --------------------------------------------------------
+
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE + DataConstants.SIZE_LONG;
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeLong(messageId);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ this.messageId = buffer.readLong();
+ }
+
+ /**
+ * @return the messageId
+ */
+ public long getMessageId()
+ {
+ return messageId;
+ }
+
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargeMessageWriteMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargeMessageWriteMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargeMessageWriteMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+
+/**
+ * A ReplicationLargeMessageWriteMessage
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationLargeMessageWriteMessage extends PacketImpl
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ private long messageId;
+
+ private byte body[];
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+ public ReplicationLargeMessageWriteMessage()
+ {
+ super(REPLICATION_LARGE_MESSAGE_WRITE);
+ }
+
+ /**
+ * @param messageId
+ * @param body
+ */
+ public ReplicationLargeMessageWriteMessage(final long messageId, final byte[] body)
+ {
+ this();
+
+ this.messageId = messageId;
+ this.body = body;
+ }
+
+ // Public --------------------------------------------------------
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE + DataConstants.SIZE_LONG + DataConstants.SIZE_INT + body.length;
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeLong(messageId);
+ buffer.writeInt(body.length);
+ buffer.writeBytes(body);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ messageId = buffer.readLong();
+ int size = buffer.readInt();
+ body = new byte[size];
+ buffer.readBytes(body);
+ }
+
+ /**
+ * @return the messageId
+ */
+ public long getMessageId()
+ {
+ return messageId;
+ }
+
+ /**
+ * @return the body
+ */
+ public byte[] getBody()
+ {
+ return body;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargemessageEndMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargemessageEndMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationLargemessageEndMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+
+/**
+ * A ReplicationLargemessageEndMessage
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationLargemessageEndMessage extends PacketImpl
+{
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ long messageId;
+
+ boolean isDelete;
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicationLargemessageEndMessage()
+ {
+ super(REPLICATION_LARGE_MESSAGE_END);
+ }
+
+ public ReplicationLargemessageEndMessage(final long messageId, final boolean isDelete)
+ {
+ this();
+ this.messageId = messageId;
+ this.isDelete = isDelete;
+ }
+
+ // Public --------------------------------------------------------
+
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE + DataConstants.SIZE_LONG + DataConstants.SIZE_BOOLEAN;
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeLong(messageId);
+ buffer.writeBoolean(isDelete);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ messageId = buffer.readLong();
+ isDelete = buffer.readBoolean();
+ }
+
+ /**
+ * @return the messageId
+ */
+ public long getMessageId()
+ {
+ return messageId;
+ }
+
+ /**
+ * @return the isDelete
+ */
+ public boolean isDelete()
+ {
+ return isDelete;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPageEventMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPageEventMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPageEventMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+import org.hornetq.utils.SimpleString;
+
+/**
+ * A ReplicationPageWrite
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationPageEventMessage extends PacketImpl
+{
+
+ private int pageNumber;
+
+ private SimpleString storeName;
+
+ /**
+ * True = delete page, False = close page
+ */
+ private boolean isDelete;
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicationPageEventMessage()
+ {
+ super(REPLICATION_PAGE_EVENT);
+ }
+
+ public ReplicationPageEventMessage(final SimpleString storeName, final int pageNumber, final boolean isDelete)
+ {
+ this();
+ this.pageNumber = pageNumber;
+ this.isDelete = isDelete;
+ this.storeName = storeName;
+ }
+
+ // Public --------------------------------------------------------
+
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE + DataConstants.SIZE_INT + storeName.sizeof() + DataConstants.SIZE_BOOLEAN;
+
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeSimpleString(storeName);
+ buffer.writeInt(pageNumber);
+ buffer.writeBoolean(isDelete);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ storeName = buffer.readSimpleString();
+ pageNumber = buffer.readInt();
+ isDelete = buffer.readBoolean();
+ }
+
+ /**
+ * @return the pageNumber
+ */
+ public int getPageNumber()
+ {
+ return pageNumber;
+ }
+
+ /**
+ * @return the storeName
+ */
+ public SimpleString getStoreName()
+ {
+ return storeName;
+ }
+
+ /**
+ * @return the isDelete
+ */
+ public boolean isDelete()
+ {
+ return isDelete;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPageWriteMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPageWriteMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPageWriteMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.paging.PagedMessage;
+import org.hornetq.core.paging.impl.PagedMessageImpl;
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+
+/**
+ * A ReplicationPageWrite
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationPageWriteMessage extends PacketImpl
+{
+
+ int pageNumber;
+
+ PagedMessage pagedMessage;
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicationPageWriteMessage()
+ {
+ super(REPLICATION_PAGE_WRITE);
+ }
+
+ public ReplicationPageWriteMessage(final PagedMessage pagedMessage, final int pageNumber)
+ {
+ this();
+ this.pageNumber = pageNumber;
+ this.pagedMessage = pagedMessage;
+ }
+
+ // Public --------------------------------------------------------
+
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE + DataConstants.SIZE_INT + pagedMessage.getEncodeSize();
+
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeInt(pageNumber);
+ pagedMessage.encode(buffer);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ pageNumber = buffer.readInt();
+ pagedMessage = new PagedMessageImpl();
+ pagedMessage.decode(buffer);
+ }
+
+ /**
+ * @return the pageNumber
+ */
+ public int getPageNumber()
+ {
+ return pageNumber;
+ }
+
+ /**
+ * @return the pagedMessage
+ */
+ public PagedMessage getPagedMessage()
+ {
+ return pagedMessage;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPrepareMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPrepareMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationPrepareMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+import org.hornetq.core.journal.EncodingSupport;
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.utils.DataConstants;
+
+/**
+ * A ReplicationAddMessage
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationPrepareMessage extends PacketImpl
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ private long txId;
+
+ /** 0 - Bindings, 1 - MessagesJournal */
+ private byte journalID;
+
+ private EncodingSupport encodingData;
+
+ private byte[] recordData;
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicationPrepareMessage()
+ {
+ super(REPLICATION_PREPARE);
+ }
+
+ public ReplicationPrepareMessage(final byte journalID, final long txId, final EncodingSupport encodingData)
+ {
+ this();
+ this.journalID = journalID;
+ this.txId = txId;
+ this.encodingData = encodingData;
+ }
+
+ // Public --------------------------------------------------------
+
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE + DataConstants.SIZE_BYTE +
+ DataConstants.SIZE_LONG +
+ DataConstants.SIZE_INT +
+ (encodingData != null ? encodingData.getEncodeSize() : recordData.length);
+
+ }
+
+ @Override
+ public void encodeBody(final HornetQBuffer buffer)
+ {
+ buffer.writeByte(journalID);
+ buffer.writeLong(txId);
+ buffer.writeInt(encodingData.getEncodeSize());
+ encodingData.encode(buffer);
+ }
+
+ @Override
+ public void decodeBody(final HornetQBuffer buffer)
+ {
+ journalID = buffer.readByte();
+ txId = buffer.readLong();
+ int size = buffer.readInt();
+ recordData = new byte[size];
+ buffer.readBytes(recordData);
+ }
+
+ public long getTxId()
+ {
+ return txId;
+ }
+
+ /**
+ * @return the journalID
+ */
+ public byte getJournalID()
+ {
+ return journalID;
+ }
+
+ /**
+ * @return the recordData
+ */
+ public byte[] getRecordData()
+ {
+ return recordData;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationResponseMessage.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationResponseMessage.java (rev 0)
+++ trunk/src/main/org/hornetq/core/remoting/impl/wireformat/ReplicationResponseMessage.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.remoting.impl.wireformat;
+
+/**
+ * @author <a href="mailto:tim.fox@jboss.com">Tim Fox</a>
+ * @version <tt>$Revision$</tt>
+ */
+public class ReplicationResponseMessage extends PacketImpl
+{
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicationResponseMessage()
+ {
+ super(REPLICATION_RESPONSE);
+ }
+
+ // Public --------------------------------------------------------
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.remoting.Packet#getRequiredBufferSize()
+ */
+ @Override
+ public int getRequiredBufferSize()
+ {
+ return BASIC_PACKET_SIZE;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+}
Modified: trunk/src/main/org/hornetq/core/remoting/server/impl/RemotingServiceImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/server/impl/RemotingServiceImpl.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/remoting/server/impl/RemotingServiceImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -83,7 +83,7 @@
private final Configuration config;
- private volatile HornetQServer server;
+ private final HornetQServer server;
private ManagementService managementService;
@@ -107,6 +107,8 @@
{
transportConfigs = config.getAcceptorConfigurations();
+ this.server = server;
+
ClassLoader loader = Thread.currentThread().getContextClassLoader();
for (String interceptorClass : config.getInterceptorClassNames())
{
@@ -122,7 +124,6 @@
}
this.config = config;
- this.server = server;
this.managementService = managementService;
this.threadPool = threadPool;
this.scheduledThreadPool = scheduledThreadPool;
@@ -240,7 +241,10 @@
connections.clear();
- managementService.unregisterAcceptors();
+ if (managementService != null)
+ {
+ managementService.unregisterAcceptors();
+ }
started = false;
}
@@ -297,11 +301,14 @@
: null);
Channel channel1 = rc.getChannel(1, -1, false);
+
+ ChannelHandler handler = createHandler(rc, channel1);
- ChannelHandler handler = new HornetQPacketHandler(server, channel1, rc);
-
channel1.setHandler(handler);
+
+
+
long ttl = ClientSessionFactoryImpl.DEFAULT_CONNECTION_TTL;
if (config.getConnectionTTLOverride() != -1)
{
@@ -386,6 +393,14 @@
// Protected -----------------------------------------------------
+ /**
+ * Subclasses (on tests) may use this to create a different channel.
+ */
+ protected ChannelHandler createHandler(final RemotingConnection rc, Channel channel)
+ {
+ return new HornetQPacketHandler(server, channel, rc);
+ }
+
// Private -------------------------------------------------------
// Inner classes -------------------------------------------------
Added: trunk/src/main/org/hornetq/core/replication/ReplicationContext.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/ReplicationContext.java (rev 0)
+++ trunk/src/main/org/hornetq/core/replication/ReplicationContext.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.replication;
+
+
+/**
+ * This represents a set of operations done as part of replication.
+ * When the entire set is done a group of Runnables can be executed.
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public interface ReplicationContext
+{
+ /** To be called by the replication manager, when new replication is added to the queue */
+ void linedUp();
+
+ /** To be called by the replication manager, when data is confirmed on the channel */
+ void replicated();
+
+ void addReplicationAction(Runnable runnable);
+
+ /** To be called when there are no more operations pending */
+ void complete();
+
+}
Added: trunk/src/main/org/hornetq/core/replication/ReplicationEndpoint.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/ReplicationEndpoint.java (rev 0)
+++ trunk/src/main/org/hornetq/core/replication/ReplicationEndpoint.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.replication;
+
+import org.hornetq.core.remoting.Channel;
+import org.hornetq.core.remoting.ChannelHandler;
+import org.hornetq.core.server.HornetQComponent;
+
+/**
+ * A ReplicationEndpoint
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public interface ReplicationEndpoint extends ChannelHandler, HornetQComponent
+{
+
+ void setChannel(Channel channel);
+
+ Channel getChannel();
+
+}
Added: trunk/src/main/org/hornetq/core/replication/ReplicationManager.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/ReplicationManager.java (rev 0)
+++ trunk/src/main/org/hornetq/core/replication/ReplicationManager.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.replication;
+
+import java.util.Set;
+
+import org.hornetq.core.journal.EncodingSupport;
+import org.hornetq.core.paging.PagedMessage;
+import org.hornetq.core.server.HornetQComponent;
+import org.hornetq.utils.SimpleString;
+
+/**
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public interface ReplicationManager extends HornetQComponent
+{
+ void appendAddRecord(byte journalID, long id, byte recordType, EncodingSupport record) throws Exception;
+
+ void appendUpdateRecord(byte journalID, long id, byte recordType, EncodingSupport record) throws Exception;
+
+ void appendDeleteRecord(byte journalID, long id) throws Exception;
+
+ void appendAddRecordTransactional(byte journalID, long txID, long id, byte recordType, EncodingSupport record) throws Exception;
+
+ void appendUpdateRecordTransactional(byte journalID, long txID, long id, byte recordType, EncodingSupport record) throws Exception;
+
+ void appendDeleteRecordTransactional(byte journalID, long txID, long id, EncodingSupport record) throws Exception;
+
+ void appendDeleteRecordTransactional(byte journalID, long txID, long id) throws Exception;
+
+ void appendCommitRecord(byte journalID, long txID) throws Exception;
+
+ void appendPrepareRecord(byte journalID, long txID, EncodingSupport transactionData) throws Exception;
+
+ void appendRollbackRecord(byte journalID, long txID) throws Exception;
+
+ /** Add an action to be executed after the pending replications */
+ void afterReplicated(Runnable runnable);
+
+ void closeContext();
+
+ /** A list of tokens that are still waiting for replications to be completed */
+ Set<ReplicationContext> getActiveTokens();
+
+ /**
+ * @param storeName
+ * @param pageNumber
+ */
+ void pageClosed(SimpleString storeName, int pageNumber);
+
+ /**
+ * @param storeName
+ * @param pageNumber
+ */
+ void pageDeleted(SimpleString storeName, int pageNumber);
+
+
+ /**
+ * @param storeName
+ * @param pageNumber
+ */
+ void pageWrite(PagedMessage message, int pageNumber);
+
+ void largeMessageBegin(long messageId);
+
+ void largeMessageWrite(long messageId, byte [] body);
+
+ void largeMessageEnd(long messageId);
+
+ void largeMessageDelete(long messageId);
+
+}
Added: trunk/src/main/org/hornetq/core/replication/impl/ReplicatedJournal.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/impl/ReplicatedJournal.java (rev 0)
+++ trunk/src/main/org/hornetq/core/replication/impl/ReplicatedJournal.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,408 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.replication.impl;
+
+import java.util.List;
+
+import org.hornetq.core.journal.EncodingSupport;
+import org.hornetq.core.journal.Journal;
+import org.hornetq.core.journal.LoaderCallback;
+import org.hornetq.core.journal.PreparedTransactionInfo;
+import org.hornetq.core.journal.RecordInfo;
+import org.hornetq.core.journal.TransactionFailureCallback;
+import org.hornetq.core.journal.impl.JournalImpl.ByteArrayEncoding;
+import org.hornetq.core.logging.Logger;
+import org.hornetq.core.persistence.impl.journal.JournalStorageManager;
+import org.hornetq.core.replication.ReplicationManager;
+
+
+/**
+ * Used by the {@link JournalStorageManager} to replicate journal calls.
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ * @see JournalStorageManager
+ *
+ */
+public class ReplicatedJournal implements Journal
+{
+
+ // Constants -----------------------------------------------------
+
+ private static final Logger log = Logger.getLogger(ReplicatedJournal.class);
+
+ // Attributes ----------------------------------------------------
+
+ private static final boolean trace = log.isTraceEnabled();
+
+ private final ReplicationManager replicationManager;
+
+ private final Journal replicatedJournal;
+
+ private final byte journalID;
+
+ public ReplicatedJournal(final byte journaID,
+ final Journal replicatedJournal,
+ final ReplicationManager replicationManager)
+ {
+ super();
+ journalID = journaID;
+ this.replicatedJournal = replicatedJournal;
+ this.replicationManager = replicationManager;
+ }
+
+ // Static --------------------------------------------------------
+
+ private static void trace(String message)
+ {
+ log.trace(message);
+ }
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+ /**
+ * @param id
+ * @param recordType
+ * @param record
+ * @param sync
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendAddRecord(long, byte, byte[], boolean)
+ */
+ public void appendAddRecord(final long id, final byte recordType, final byte[] record, final boolean sync) throws Exception
+ {
+ this.appendAddRecord(id, recordType, new ByteArrayEncoding(record), sync);
+ }
+
+ /**
+ * @param id
+ * @param recordType
+ * @param record
+ * @param sync
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendAddRecord(long, byte, org.hornetq.core.journal.EncodingSupport, boolean)
+ */
+ public void appendAddRecord(final long id, final byte recordType, final EncodingSupport record, final boolean sync) throws Exception
+ {
+ if (trace)
+ {
+ trace("Append record id = " + id + " recordType = " + recordType);
+ }
+ replicationManager.appendAddRecord(journalID, id, recordType, record);
+ replicatedJournal.appendAddRecord(id, recordType, record, sync);
+ }
+
+ /**
+ * @param txID
+ * @param id
+ * @param recordType
+ * @param record
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendAddRecordTransactional(long, long, byte, byte[])
+ */
+ public void appendAddRecordTransactional(final long txID, final long id, final byte recordType, final byte[] record) throws Exception
+ {
+ this.appendAddRecordTransactional(txID, id, recordType, new ByteArrayEncoding(record));
+ }
+
+ /**
+ * @param txID
+ * @param id
+ * @param recordType
+ * @param record
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendAddRecordTransactional(long, long, byte, org.hornetq.core.journal.EncodingSupport)
+ */
+ public void appendAddRecordTransactional(final long txID,
+ final long id,
+ final byte recordType,
+ final EncodingSupport record) throws Exception
+ {
+ if (trace)
+ {
+ trace("Append record TXid = " + id + " recordType = " + recordType);
+ }
+ replicationManager.appendAddRecordTransactional(journalID, txID, id, recordType, record);
+ replicatedJournal.appendAddRecordTransactional(txID, id, recordType, record);
+ }
+
+ /**
+ * @param txID
+ * @param sync
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendCommitRecord(long, boolean)
+ */
+ public void appendCommitRecord(final long txID, final boolean sync) throws Exception
+ {
+ if (trace)
+ {
+ trace("AppendCommit " + txID);
+ }
+ replicationManager.appendCommitRecord(journalID, txID);
+ replicatedJournal.appendCommitRecord(txID, sync);
+ }
+
+ /**
+ * @param id
+ * @param sync
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendDeleteRecord(long, boolean)
+ */
+ public void appendDeleteRecord(final long id, final boolean sync) throws Exception
+ {
+ if (trace)
+ {
+ trace("AppendDelete " + id);
+ }
+ replicationManager.appendDeleteRecord(journalID, id);
+ replicatedJournal.appendDeleteRecord(id, sync);
+ }
+
+ /**
+ * @param txID
+ * @param id
+ * @param record
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendDeleteRecordTransactional(long, long, byte[])
+ */
+ public void appendDeleteRecordTransactional(final long txID, final long id, final byte[] record) throws Exception
+ {
+ this.appendDeleteRecordTransactional(txID, id, new ByteArrayEncoding(record));
+ }
+
+ /**
+ * @param txID
+ * @param id
+ * @param record
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendDeleteRecordTransactional(long, long, org.hornetq.core.journal.EncodingSupport)
+ */
+ public void appendDeleteRecordTransactional(final long txID, final long id, final EncodingSupport record) throws Exception
+ {
+ if (trace)
+ {
+ trace("AppendDelete txID=" + txID + " id=" + id);
+ }
+ replicationManager.appendDeleteRecordTransactional(journalID, txID, id, record);
+ replicatedJournal.appendDeleteRecordTransactional(txID, id, record);
+ }
+
+ /**
+ * @param txID
+ * @param id
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendDeleteRecordTransactional(long, long)
+ */
+ public void appendDeleteRecordTransactional(final long txID, final long id) throws Exception
+ {
+ if (trace)
+ {
+ trace("AppendDelete (noencoding) txID=" + txID + " id=" + id);
+ }
+ replicationManager.appendDeleteRecordTransactional(journalID, txID, id);
+ replicatedJournal.appendDeleteRecordTransactional(txID, id);
+ }
+
+ /**
+ * @param txID
+ * @param transactionData
+ * @param sync
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendPrepareRecord(long, byte[], boolean)
+ */
+ public void appendPrepareRecord(final long txID, final byte[] transactionData, final boolean sync) throws Exception
+ {
+ this.appendPrepareRecord(txID, new ByteArrayEncoding(transactionData), sync);
+ }
+
+ /**
+ * @param txID
+ * @param transactionData
+ * @param sync
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendPrepareRecord(long, org.hornetq.core.journal.EncodingSupport, boolean)
+ */
+ public void appendPrepareRecord(final long txID, final EncodingSupport transactionData, final boolean sync) throws Exception
+ {
+ if (trace)
+ {
+ trace("AppendPrepare txID=" + txID);
+ }
+ replicationManager.appendPrepareRecord(journalID, txID, transactionData);
+ replicatedJournal.appendPrepareRecord(txID, transactionData, sync);
+ }
+
+ /**
+ * @param txID
+ * @param sync
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendRollbackRecord(long, boolean)
+ */
+ public void appendRollbackRecord(final long txID, final boolean sync) throws Exception
+ {
+ if (trace)
+ {
+ trace("AppendRollback " + txID);
+ }
+ replicationManager.appendRollbackRecord(journalID, txID);
+ replicatedJournal.appendRollbackRecord(txID, sync);
+ }
+
+ /**
+ * @param id
+ * @param recordType
+ * @param record
+ * @param sync
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendUpdateRecord(long, byte, byte[], boolean)
+ */
+ public void appendUpdateRecord(final long id, final byte recordType, final byte[] record, final boolean sync) throws Exception
+ {
+ this.appendUpdateRecord(id, recordType, new ByteArrayEncoding(record), sync);
+ }
+
+ /**
+ * @param id
+ * @param recordType
+ * @param record
+ * @param sync
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendUpdateRecord(long, byte, org.hornetq.core.journal.EncodingSupport, boolean)
+ */
+ public void appendUpdateRecord(final long id, final byte recordType, final EncodingSupport record, final boolean sync) throws Exception
+ {
+ if (trace)
+ {
+ trace("AppendUpdateRecord id = " + id + " , recordType = " + recordType);
+ }
+ replicationManager.appendUpdateRecord(journalID, id, recordType, record);
+ replicatedJournal.appendUpdateRecord(id, recordType, record, sync);
+ }
+
+ /**
+ * @param txID
+ * @param id
+ * @param recordType
+ * @param record
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendUpdateRecordTransactional(long, long, byte, byte[])
+ */
+ public void appendUpdateRecordTransactional(final long txID,
+ final long id,
+ final byte recordType,
+ final byte[] record) throws Exception
+ {
+ this.appendUpdateRecordTransactional(txID, id, recordType, new ByteArrayEncoding(record));
+ }
+
+ /**
+ * @param txID
+ * @param id
+ * @param recordType
+ * @param record
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#appendUpdateRecordTransactional(long, long, byte, org.hornetq.core.journal.EncodingSupport)
+ */
+ public void appendUpdateRecordTransactional(final long txID,
+ final long id,
+ final byte recordType,
+ final EncodingSupport record) throws Exception
+ {
+ if (trace)
+ {
+ trace("AppendUpdateRecord txid=" + txID + " id = " + id + " , recordType = " + recordType);
+ }
+ replicationManager.appendUpdateRecordTransactional(journalID, txID, id, recordType, record);
+ replicatedJournal.appendUpdateRecordTransactional(txID, id, recordType, record);
+ }
+
+ /**
+ * @param committedRecords
+ * @param preparedTransactions
+ * @param transactionFailure
+ * @return
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#load(java.util.List, java.util.List, org.hornetq.core.journal.TransactionFailureCallback)
+ */
+ public long load(final List<RecordInfo> committedRecords,
+ final List<PreparedTransactionInfo> preparedTransactions,
+ final TransactionFailureCallback transactionFailure) throws Exception
+ {
+ return replicatedJournal.load(committedRecords, preparedTransactions, transactionFailure);
+ }
+
+ /**
+ * @param reloadManager
+ * @return
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#load(org.hornetq.core.journal.LoaderCallback)
+ */
+ public long load(final LoaderCallback reloadManager) throws Exception
+ {
+ return replicatedJournal.load(reloadManager);
+ }
+
+ /**
+ * @param pages
+ * @throws Exception
+ * @see org.hornetq.core.journal.Journal#perfBlast(int)
+ */
+ public void perfBlast(final int pages) throws Exception
+ {
+ replicatedJournal.perfBlast(pages);
+ }
+
+ /**
+ * @throws Exception
+ * @see org.hornetq.core.server.HornetQComponent#start()
+ */
+ public void start() throws Exception
+ {
+ replicatedJournal.start();
+ }
+
+ /**
+ * @throws Exception
+ * @see org.hornetq.core.server.HornetQComponent#stop()
+ */
+ public void stop() throws Exception
+ {
+ replicatedJournal.stop();
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#getAlignment()
+ */
+ public int getAlignment() throws Exception
+ {
+ return replicatedJournal.getAlignment();
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.server.HornetQComponent#isStarted()
+ */
+ public boolean isStarted()
+ {
+ return replicatedJournal.isStarted();
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/replication/impl/ReplicationContextImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/impl/ReplicationContextImpl.java (rev 0)
+++ trunk/src/main/org/hornetq/core/replication/impl/ReplicationContextImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.replication.impl;
+
+import java.util.ArrayList;
+import java.util.concurrent.Executor;
+
+import org.hornetq.core.replication.ReplicationContext;
+
+/**
+ * A ReplicationToken
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationContextImpl implements ReplicationContext
+{
+ final Executor executor;
+
+ private ArrayList<Runnable> tasks;
+
+ private volatile int pendings;
+
+ /**
+ * @param executor
+ */
+ public ReplicationContextImpl(Executor executor)
+ {
+ super();
+ this.executor = executor;
+ }
+
+ /** To be called by the replication manager, when new replication is added to the queue */
+ public synchronized void linedUp()
+ {
+ pendings++;
+ }
+
+ /** To be called by the replication manager, when data is confirmed on the channel */
+ public synchronized void replicated()
+ {
+ if (--pendings == 0)
+ {
+ if (tasks != null)
+ {
+ for (Runnable run : tasks)
+ {
+ executor.execute(run);
+ }
+ tasks.clear();
+ }
+ }
+ }
+
+ /** You may have several actions to be done after a replication operation is completed. */
+ public synchronized void addReplicationAction(Runnable runnable)
+ {
+ if (pendings == 0)
+ {
+ executor.execute(runnable);
+ }
+ else
+ {
+ if (tasks == null)
+ {
+ tasks = new ArrayList<Runnable>();
+ }
+
+ tasks.add(runnable);
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationToken#complete()
+ */
+ public void complete()
+ {
+ // TODO Auto-generated method stub
+
+ }
+}
Added: trunk/src/main/org/hornetq/core/replication/impl/ReplicationEndpointImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/impl/ReplicationEndpointImpl.java (rev 0)
+++ trunk/src/main/org/hornetq/core/replication/impl/ReplicationEndpointImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,540 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.replication.impl;
+
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_LARGE_MESSAGE_BEGIN;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_LARGE_MESSAGE_END;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REPLICATION_LARGE_MESSAGE_WRITE;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.hornetq.core.config.Configuration;
+import org.hornetq.core.journal.Journal;
+import org.hornetq.core.logging.Logger;
+import org.hornetq.core.paging.Page;
+import org.hornetq.core.paging.PagedMessage;
+import org.hornetq.core.paging.PagingManager;
+import org.hornetq.core.paging.impl.PagingManagerImpl;
+import org.hornetq.core.paging.impl.PagingStoreFactoryNIO;
+import org.hornetq.core.persistence.impl.journal.JournalStorageManager;
+import org.hornetq.core.remoting.Channel;
+import org.hornetq.core.remoting.Packet;
+import org.hornetq.core.remoting.impl.wireformat.PacketImpl;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationAddMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationAddTXMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationCommitMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationDeleteMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationDeleteTXMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationLargeMessageBeingMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationLargeMessageWriteMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationLargemessageEndMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationPageEventMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationPageWriteMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationPrepareMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationResponseMessage;
+import org.hornetq.core.replication.ReplicationEndpoint;
+import org.hornetq.core.server.HornetQServer;
+import org.hornetq.core.server.LargeServerMessage;
+import org.hornetq.core.server.ServerMessage;
+import org.hornetq.utils.SimpleString;
+
+/**
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationEndpointImpl implements ReplicationEndpoint
+{
+
+ // Constants -----------------------------------------------------
+
+ private static final Logger log = Logger.getLogger(ReplicationEndpointImpl.class);
+
+ // Attributes ----------------------------------------------------
+
+ private static final boolean trace = log.isTraceEnabled();
+
+ private static void trace(String msg)
+ {
+ log.trace(msg);
+ }
+
+ private final HornetQServer server;
+
+ private Channel channel;
+
+ private Journal bindingsJournal;
+
+ private Journal messagingJournal;
+
+ private JournalStorageManager storage;
+
+ private PagingManager pageManager;
+
+ private final ConcurrentMap<SimpleString, ConcurrentMap<Integer, Page>> pageIndex = new ConcurrentHashMap<SimpleString, ConcurrentMap<Integer, Page>>();
+
+ private final ConcurrentMap<Long, LargeServerMessage> largeMessages = new ConcurrentHashMap<Long, LargeServerMessage>();
+
+ // Constructors --------------------------------------------------
+ public ReplicationEndpointImpl(final HornetQServer server)
+ {
+ this.server = server;
+ }
+
+ // Public --------------------------------------------------------
+ /*
+ * (non-Javadoc)
+ * @see org.hornetq.core.remoting.ChannelHandler#handlePacket(org.hornetq.core.remoting.Packet)
+ */
+ public void handlePacket(final Packet packet)
+ {
+ try
+ {
+ if (packet.getType() == PacketImpl.REPLICATION_APPEND)
+ {
+ handleAppendAddRecord((ReplicationAddMessage)packet);
+ }
+ else if (packet.getType() == PacketImpl.REPLICATION_APPEND_TX)
+ {
+ handleAppendAddTXRecord((ReplicationAddTXMessage)packet);
+ }
+ else if (packet.getType() == PacketImpl.REPLICATION_DELETE)
+ {
+ handleAppendDelete((ReplicationDeleteMessage)packet);
+ }
+ else if (packet.getType() == PacketImpl.REPLICATION_DELETE_TX)
+ {
+ handleAppendDeleteTX((ReplicationDeleteTXMessage)packet);
+ }
+ else if (packet.getType() == PacketImpl.REPLICATION_PREPARE)
+ {
+ handlePrepare((ReplicationPrepareMessage)packet);
+ }
+ else if (packet.getType() == PacketImpl.REPLICATION_COMMIT_ROLLBACK)
+ {
+ handleCommitRollback((ReplicationCommitMessage)packet);
+ }
+ else if (packet.getType() == PacketImpl.REPLICATION_PAGE_WRITE)
+ {
+ handlePageWrite((ReplicationPageWriteMessage)packet);
+ }
+ else if (packet.getType() == PacketImpl.REPLICATION_PAGE_EVENT)
+ {
+ handlePageEvent((ReplicationPageEventMessage)packet);
+ }
+ else if (packet.getType() == REPLICATION_LARGE_MESSAGE_BEGIN)
+ {
+ handleLargeMessageBegin((ReplicationLargeMessageBeingMessage)packet);
+ }
+ else if (packet.getType() == REPLICATION_LARGE_MESSAGE_WRITE)
+ {
+ handleLargeMessageWrite((ReplicationLargeMessageWriteMessage)packet);
+ }
+ else if (packet.getType() == REPLICATION_LARGE_MESSAGE_END)
+ {
+ handleLargeMessageEnd((ReplicationLargemessageEndMessage)packet);
+ }
+ else
+ {
+ log.warn("Packet " + packet + " can't be processed by the ReplicationEndpoint");
+ }
+ }
+ catch (Exception e)
+ {
+ // TODO: what to do when the IO fails on the backup side? should we shutdown the backup?
+ log.warn(e.getMessage(), e);
+ }
+ channel.send(new ReplicationResponseMessage());
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.server.HornetQComponent#isStarted()
+ */
+ public boolean isStarted()
+ {
+ return true;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.server.HornetQComponent#start()
+ */
+ public void start() throws Exception
+ {
+ Configuration config = server.getConfiguration();
+
+ storage = new JournalStorageManager(config, server.getExecutorFactory().getExecutor());
+ storage.start();
+
+ bindingsJournal = storage.getBindingsJournal();
+ messagingJournal = storage.getMessageJournal();
+
+ // We only need to load internal structures on the backup...
+ storage.loadInternalOnly();
+
+ pageManager = new PagingManagerImpl(new PagingStoreFactoryNIO(config.getPagingDirectory(),
+ server.getExecutorFactory()),
+ storage,
+ server.getAddressSettingsRepository(),
+ false);
+
+ pageManager.start();
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.server.HornetQComponent#stop()
+ */
+ public void stop() throws Exception
+ {
+ channel.close();
+ storage.stop();
+
+ for (ConcurrentMap<Integer, Page> map : pageIndex.values())
+ {
+ for (Page page : map.values())
+ {
+ try
+ {
+ page.close();
+ }
+ catch (Exception e)
+ {
+ log.warn("Error while closing the page on backup", e);
+ }
+ }
+ }
+
+ pageIndex.clear();
+
+
+ for (LargeServerMessage largeMessage : largeMessages.values())
+ {
+ largeMessage.releaseResources();
+ }
+
+ largeMessages.clear();
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationEndpoint#getChannel()
+ */
+ public Channel getChannel()
+ {
+ return channel;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationEndpoint#setChannel(org.hornetq.core.remoting.Channel)
+ */
+ public void setChannel(final Channel channel)
+ {
+ this.channel = channel;
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+ /**
+ * @param packet
+ */
+ private void handleLargeMessageEnd(ReplicationLargemessageEndMessage packet)
+ {
+ LargeServerMessage message = lookupLargeMessage(packet.getMessageId(), packet.isDelete());
+ if (message != null)
+ {
+ if (packet.isDelete())
+ {
+ try
+ {
+ message.deleteFile();
+ }
+ catch (Exception e)
+ {
+ log.warn("Error deleting large message ID = " + packet.getMessageId(), e);
+ }
+ }
+ else
+ {
+ try
+ {
+ message.setStored();
+ }
+ catch (Exception e)
+ {
+ log.warn("Error deleting large message ID = " + packet.getMessageId(), e);
+ }
+ }
+ }
+ }
+
+ /**
+ * @param packet
+ */
+ private void handleLargeMessageWrite(ReplicationLargeMessageWriteMessage packet) throws Exception
+ {
+ LargeServerMessage message = lookupLargeMessage(packet.getMessageId(), false);
+ if (message != null)
+ {
+ message.addBytes(packet.getBody());
+ }
+ }
+
+
+ private LargeServerMessage lookupLargeMessage(long messageId, boolean isDelete)
+ {
+ LargeServerMessage message;
+
+ if (isDelete)
+ {
+ message = largeMessages.remove(messageId);
+ }
+ else
+ {
+ message = largeMessages.get(messageId);
+ }
+
+ if (message == null)
+ {
+ log.warn("Large MessageID " + messageId + " is not available on backup server. Ignoring replication message");
+ }
+
+ return message;
+
+ }
+
+ /**
+ * @param packet
+ */
+ private void handleLargeMessageBegin(ReplicationLargeMessageBeingMessage packet)
+ {
+ LargeServerMessage largeMessage = storage.createLargeMessage();
+ largeMessage.setMessageID(packet.getMessageId());
+ trace("Receiving Large Message " + largeMessage.getMessageID() + " on backup");
+ this.largeMessages.put(largeMessage.getMessageID(), largeMessage);
+ }
+
+
+ /**
+ * @param packet
+ */
+ private void handleCommitRollback(final ReplicationCommitMessage packet) throws Exception
+ {
+ Journal journalToUse = getJournal(packet.getJournalID());
+
+ if (packet.isRollback())
+ {
+ journalToUse.appendRollbackRecord(packet.getTxId(), false);
+ }
+ else
+ {
+ journalToUse.appendCommitRecord(packet.getTxId(), false);
+ }
+ }
+
+ /**
+ * @param packet
+ */
+ private void handlePrepare(final ReplicationPrepareMessage packet) throws Exception
+ {
+ Journal journalToUse = getJournal(packet.getJournalID());
+
+ journalToUse.appendPrepareRecord(packet.getTxId(), packet.getRecordData(), false);
+ }
+
+ /**
+ * @param packet
+ */
+ private void handleAppendDeleteTX(final ReplicationDeleteTXMessage packet) throws Exception
+ {
+ Journal journalToUse = getJournal(packet.getJournalID());
+
+ journalToUse.appendDeleteRecordTransactional(packet.getTxId(), packet.getId(), packet.getRecordData());
+ }
+
+ /**
+ * @param packet
+ */
+ private void handleAppendDelete(final ReplicationDeleteMessage packet) throws Exception
+ {
+ Journal journalToUse = getJournal(packet.getJournalID());
+
+ journalToUse.appendDeleteRecord(packet.getId(), false);
+ }
+
+ /**
+ * @param packet
+ */
+ private void handleAppendAddTXRecord(final ReplicationAddTXMessage packet) throws Exception
+ {
+ Journal journalToUse = getJournal(packet.getJournalID());
+
+ if (packet.isUpdate())
+ {
+ journalToUse.appendUpdateRecordTransactional(packet.getTxId(),
+ packet.getId(),
+ packet.getRecordType(),
+ packet.getRecordData());
+ }
+ else
+ {
+ journalToUse.appendAddRecordTransactional(packet.getTxId(),
+ packet.getId(),
+ packet.getRecordType(),
+ packet.getRecordData());
+ }
+ }
+
+ /**
+ * @param packet
+ * @throws Exception
+ */
+ private void handleAppendAddRecord(final ReplicationAddMessage packet) throws Exception
+ {
+ Journal journalToUse = getJournal(packet.getJournalID());
+
+ if (packet.isUpdate())
+ {
+ if (trace)
+ {
+ trace("Endpoint appendUpdate id = " + packet.getId());
+ }
+ journalToUse.appendUpdateRecord(packet.getId(), packet.getRecordType(), packet.getRecordData(), false);
+ }
+ else
+ {
+ if (trace)
+ {
+ trace("Endpoint append id = " + packet.getId());
+ }
+ journalToUse.appendAddRecord(packet.getId(), packet.getRecordType(), packet.getRecordData(), false);
+ }
+ }
+
+ /**
+ * @param packet
+ */
+ private void handlePageEvent(final ReplicationPageEventMessage packet) throws Exception
+ {
+ ConcurrentMap<Integer, Page> pages = getPageMap(packet.getStoreName());
+
+ Page page = pages.remove(packet.getPageNumber());
+
+ if (page == null)
+ {
+ page = getPage(packet.getStoreName(), packet.getPageNumber());
+ }
+
+
+ if (page != null)
+ {
+ if (packet.isDelete())
+ {
+ page.delete();
+ }
+ else
+ {
+ page.close();
+ }
+ }
+
+ }
+
+ /**
+ * @param packet
+ */
+ private void handlePageWrite(final ReplicationPageWriteMessage packet) throws Exception
+ {
+ PagedMessage pgdMessage = packet.getPagedMessage();
+ ServerMessage msg = pgdMessage.getMessage(storage);
+ Page page = getPage(msg.getDestination(), packet.getPageNumber());
+ page.write(pgdMessage);
+ }
+
+ private ConcurrentMap<Integer, Page> getPageMap(final SimpleString storeName)
+ {
+ ConcurrentMap<Integer, Page> resultIndex = pageIndex.get(storeName);
+
+ if (resultIndex == null)
+ {
+ resultIndex = new ConcurrentHashMap<Integer, Page>();
+ ConcurrentMap<Integer, Page> mapResult = pageIndex.putIfAbsent(storeName, resultIndex);
+ if (mapResult != null)
+ {
+ resultIndex = mapResult;
+ }
+ }
+
+ return resultIndex;
+ }
+
+ private Page getPage(final SimpleString storeName, final int pageId) throws Exception
+ {
+ ConcurrentMap<Integer, Page> map = getPageMap(storeName);
+
+ Page page = map.get(pageId);
+
+ if (page == null)
+ {
+ page = newPage(pageId, storeName, map);
+ }
+
+ return page;
+ }
+
+ /**
+ * @param pageId
+ * @param map
+ * @return
+ */
+ private synchronized Page newPage(final int pageId,
+ final SimpleString storeName,
+ final ConcurrentMap<Integer, Page> map) throws Exception
+ {
+ Page page = map.get(pageId);
+
+ if (page == null)
+ {
+ page = pageManager.getPageStore(storeName).createPage(pageId);
+ page.open();
+ map.put(pageId, page);
+ }
+
+ return page;
+ }
+
+ /**
+ * @param journalID
+ * @return
+ */
+ private Journal getJournal(final byte journalID)
+ {
+ Journal journalToUse;
+ if (journalID == (byte)0)
+ {
+ journalToUse = bindingsJournal;
+ }
+ else
+ {
+ journalToUse = messagingJournal;
+ }
+ return journalToUse;
+ }
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java (rev 0)
+++ trunk/src/main/org/hornetq/core/replication/impl/ReplicationManagerImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.core.replication.impl;
+
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.Executor;
+
+import org.hornetq.core.client.impl.FailoverManager;
+import org.hornetq.core.journal.EncodingSupport;
+import org.hornetq.core.logging.Logger;
+import org.hornetq.core.paging.PagedMessage;
+import org.hornetq.core.remoting.Channel;
+import org.hornetq.core.remoting.ChannelHandler;
+import org.hornetq.core.remoting.Packet;
+import org.hornetq.core.remoting.RemotingConnection;
+import org.hornetq.core.remoting.impl.wireformat.CreateReplicationSessionMessage;
+import org.hornetq.core.remoting.impl.wireformat.PacketImpl;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationAddMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationAddTXMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationCommitMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationDeleteMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationDeleteTXMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationLargeMessageBeingMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationLargeMessageWriteMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationLargemessageEndMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationPageEventMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationPageWriteMessage;
+import org.hornetq.core.remoting.impl.wireformat.ReplicationPrepareMessage;
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.core.replication.ReplicationManager;
+import org.hornetq.core.replication.ReplicationContext;
+import org.hornetq.utils.ConcurrentHashSet;
+import org.hornetq.utils.SimpleString;
+
+/**
+ * A RepplicationManagerImpl
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationManagerImpl implements ReplicationManager
+{
+
+ // Constants -----------------------------------------------------
+ private static final Logger log = Logger.getLogger(ReplicationManagerImpl.class);
+
+ // Attributes ----------------------------------------------------
+
+ // TODO: where should this be configured?
+ private static final int WINDOW_SIZE = 1024 * 1024;
+
+ private final ResponseHandler responseHandler = new ResponseHandler();
+
+ private final FailoverManager failoverManager;
+
+ private RemotingConnection connection;
+
+ private Channel replicatingChannel;
+
+ private boolean started;
+
+ private volatile boolean enabled;
+
+ private final Object replicationLock = new Object();
+
+ private final Executor executor;
+
+ private final ThreadLocal<ReplicationContext> repliToken = new ThreadLocal<ReplicationContext>();
+
+ private final Queue<ReplicationContext> pendingTokens = new ConcurrentLinkedQueue<ReplicationContext>();
+
+ private final ConcurrentHashSet<ReplicationContext> activeTokens = new ConcurrentHashSet<ReplicationContext>();
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ /**
+ * @param replicationConnectionManager
+ */
+ public ReplicationManagerImpl(final FailoverManager failoverManager, final Executor executor)
+ {
+ super();
+ this.failoverManager = failoverManager;
+ this.executor = executor;
+ }
+
+ // Public --------------------------------------------------------
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#replicate(byte[], org.hornetq.core.replication.ReplicationToken)
+ */
+
+ public void appendAddRecord(final byte journalID, final long id, final byte recordType, final EncodingSupport record)
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationAddMessage(journalID, false, id, recordType, record));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#appendUpdateRecord(byte, long, byte, org.hornetq.core.journal.EncodingSupport, boolean)
+ */
+ public void appendUpdateRecord(final byte journalID,
+ final long id,
+ final byte recordType,
+ final EncodingSupport record) throws Exception
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationAddMessage(journalID, true, id, recordType, record));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#appendDeleteRecord(byte, long, boolean)
+ */
+ public void appendDeleteRecord(final byte journalID, final long id) throws Exception
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationDeleteMessage(journalID, id));
+ }
+ }
+
+ public void appendAddRecordTransactional(final byte journalID,
+ final long txID,
+ final long id,
+ final byte recordType,
+ final EncodingSupport record) throws Exception
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationAddTXMessage(journalID, false, txID, id, recordType, record));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#appendUpdateRecordTransactional(byte, long, long, byte, org.hornetq.core.journal.EncodingSupport)
+ */
+ public void appendUpdateRecordTransactional(final byte journalID,
+ final long txID,
+ final long id,
+ final byte recordType,
+ final EncodingSupport record) throws Exception
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationAddTXMessage(journalID, true, txID, id, recordType, record));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#appendCommitRecord(byte, long, boolean)
+ */
+ public void appendCommitRecord(final byte journalID, final long txID) throws Exception
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationCommitMessage(journalID, false, txID));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#appendDeleteRecordTransactional(byte, long, long, org.hornetq.core.journal.EncodingSupport)
+ */
+ public void appendDeleteRecordTransactional(final byte journalID,
+ final long txID,
+ final long id,
+ final EncodingSupport record) throws Exception
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationDeleteTXMessage(journalID, txID, id, record));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#appendDeleteRecordTransactional(byte, long, long)
+ */
+ public void appendDeleteRecordTransactional(final byte journalID, final long txID, final long id) throws Exception
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationDeleteTXMessage(journalID, txID, id, NullEncoding.instance));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#appendPrepareRecord(byte, long, org.hornetq.core.journal.EncodingSupport, boolean)
+ */
+ public void appendPrepareRecord(final byte journalID, final long txID, final EncodingSupport transactionData) throws Exception
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationPrepareMessage(journalID, txID, transactionData));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#appendRollbackRecord(byte, long, boolean)
+ */
+ public void appendRollbackRecord(final byte journalID, final long txID) throws Exception
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationCommitMessage(journalID, false, txID));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#pageClosed(org.hornetq.utils.SimpleString, int)
+ */
+ public void pageClosed(final SimpleString storeName, final int pageNumber)
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationPageEventMessage(storeName, pageNumber, false));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#pageDeleted(org.hornetq.utils.SimpleString, int)
+ */
+ public void pageDeleted(final SimpleString storeName, final int pageNumber)
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationPageEventMessage(storeName, pageNumber, true));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#pageWrite(org.hornetq.utils.SimpleString, int)
+ */
+ public void pageWrite(final PagedMessage message, final int pageNumber)
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationPageWriteMessage(message, pageNumber));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#largeMessageBegin(byte[])
+ */
+ public void largeMessageBegin(long messageId)
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationLargeMessageBeingMessage(messageId));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#largeMessageDelete(long)
+ */
+ public void largeMessageDelete(long messageId)
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationLargemessageEndMessage(messageId, true));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#largeMessageEnd(long)
+ */
+ public void largeMessageEnd(long messageId)
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationLargemessageEndMessage(messageId, false));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#largeMessageWrite(long, byte[])
+ */
+ public void largeMessageWrite(long messageId, byte[] body)
+ {
+ if (enabled)
+ {
+ sendReplicatePacket(new ReplicationLargeMessageWriteMessage(messageId, body));
+ }
+ }
+
+
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.server.HornetQComponent#isStarted()
+ */
+ public synchronized boolean isStarted()
+ {
+ return started;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.server.HornetQComponent#start()
+ */
+ public synchronized void start() throws Exception
+ {
+ connection = failoverManager.getConnection();
+
+ long channelID = connection.generateChannelID();
+
+ Channel mainChannel = connection.getChannel(1, -1, false);
+
+ replicatingChannel = connection.getChannel(channelID, WINDOW_SIZE, false);
+
+ replicatingChannel.setHandler(responseHandler);
+
+ CreateReplicationSessionMessage replicationStartPackage = new CreateReplicationSessionMessage(channelID,
+ WINDOW_SIZE);
+
+ mainChannel.sendBlocking(replicationStartPackage);
+
+ started = true;
+
+ enabled = true;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.server.HornetQComponent#stop()
+ */
+ public void stop() throws Exception
+ {
+ if (replicatingChannel != null)
+ {
+ replicatingChannel.close();
+ }
+
+ started = false;
+
+ if (connection != null)
+ {
+ connection.destroy();
+ }
+
+ connection = null;
+ }
+
+ public ReplicationContext getContext()
+ {
+ ReplicationContext token = repliToken.get();
+ if (token == null)
+ {
+ token = new ReplicationContextImpl(executor);
+ activeTokens.add(token);
+ repliToken.set(token);
+ }
+ return token;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#addReplicationAction(java.lang.Runnable)
+ */
+ public void afterReplicated(final Runnable runnable)
+ {
+ getContext().addReplicationAction(runnable);
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#completeToken()
+ */
+ public void closeContext()
+ {
+ final ReplicationContext token = repliToken.get();
+ if (token != null)
+ {
+ // Disassociate thread local
+ repliToken.set(null);
+ // Remove from pending tokens as soon as this is complete
+ token.addReplicationAction(new Runnable()
+ {
+ public void run()
+ {
+ activeTokens.remove(token);
+ }
+ });
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.replication.ReplicationManager#getPendingTokens()
+ */
+ public Set<ReplicationContext> getActiveTokens()
+ {
+ return activeTokens;
+ }
+
+ private void sendReplicatePacket(final Packet packet)
+ {
+ boolean runItNow = false;
+
+ ReplicationContext repliToken = getContext();
+ repliToken.linedUp();
+
+ synchronized (replicationLock)
+ {
+ if (!enabled)
+ {
+ // Already replicating channel failed, so just play the action now
+
+ runItNow = true;
+ }
+ else
+ {
+ pendingTokens.add(repliToken);
+
+ replicatingChannel.send(packet);
+ }
+ }
+
+ // Execute outside lock
+
+ if (runItNow)
+ {
+ repliToken.replicated();
+ }
+ }
+
+ private void replicated()
+ {
+ ReplicationContext tokenPolled = pendingTokens.poll();
+ if (tokenPolled == null)
+ {
+ // We should debug the logs if this happens
+ log.warn("Missing replication token on the stack. There is a bug on the ReplicatoinManager since this was not supposed to happen");
+ }
+ else
+ {
+ tokenPolled.replicated();
+ }
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+ protected class ResponseHandler implements ChannelHandler
+ {
+ /* (non-Javadoc)
+ * @see org.hornetq.core.remoting.ChannelHandler#handlePacket(org.hornetq.core.remoting.Packet)
+ */
+ public void handlePacket(final Packet packet)
+ {
+ if (packet.getType() == PacketImpl.REPLICATION_RESPONSE)
+ {
+ replicated();
+ }
+ }
+
+ }
+
+ private static class NullEncoding implements EncodingSupport
+ {
+
+ static NullEncoding instance = new NullEncoding();
+
+ public void decode(final HornetQBuffer buffer)
+ {
+ }
+
+ public void encode(final HornetQBuffer buffer)
+ {
+ }
+
+ public int getEncodeSize()
+ {
+ return 0;
+ }
+
+ }
+
+}
Modified: trunk/src/main/org/hornetq/core/server/HornetQServer.java
===================================================================
--- trunk/src/main/org/hornetq/core/server/HornetQServer.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/server/HornetQServer.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -19,14 +19,17 @@
import javax.management.MBeanServer;
import org.hornetq.core.config.Configuration;
+import org.hornetq.core.exception.HornetQException;
import org.hornetq.core.management.ManagementService;
import org.hornetq.core.management.impl.HornetQServerControlImpl;
import org.hornetq.core.persistence.StorageManager;
import org.hornetq.core.postoffice.PostOffice;
+import org.hornetq.core.remoting.Channel;
import org.hornetq.core.remoting.RemotingConnection;
import org.hornetq.core.remoting.impl.wireformat.CreateSessionResponseMessage;
import org.hornetq.core.remoting.impl.wireformat.ReattachSessionResponseMessage;
import org.hornetq.core.remoting.server.RemotingService;
+import org.hornetq.core.replication.ReplicationEndpoint;
import org.hornetq.core.security.HornetQSecurityManager;
import org.hornetq.core.security.Role;
import org.hornetq.core.server.cluster.ClusterManager;
@@ -36,7 +39,6 @@
import org.hornetq.core.version.Version;
import org.hornetq.utils.ExecutorFactory;
import org.hornetq.utils.SimpleString;
-import org.hornetq.utils.UUID;
/**
* This interface defines the internal interface of the HornetQ Server exposed to other components of the server. The
@@ -69,6 +71,8 @@
void unregisterActivateCallback(ActivateCallback callback);
ReattachSessionResponseMessage reattachSession(RemotingConnection connection, String name, int lastReceivedCommandID) throws Exception;
+
+ ReplicationEndpoint createReplicationEndpoint(Channel channel) throws Exception;
CreateSessionResponseMessage createSession(String name,
long channelID,
Modified: trunk/src/main/org/hornetq/core/server/cluster/impl/Redistributor.java
===================================================================
--- trunk/src/main/org/hornetq/core/server/cluster/impl/Redistributor.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/server/cluster/impl/Redistributor.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -145,18 +145,38 @@
tx.commit();
- count++;
-
- if (count == batchSize)
+
+ Runnable action = new Runnable()
{
- // We continue the next batch on a different thread, so as not to keep the delivery thread busy for a very
- // long time in the case there are many messages in the queue
- active = false;
-
- executor.execute(new Prompter());
-
- count = 0;
+ public void run()
+ {
+
+ count++;
+
+ if (count == batchSize)
+ {
+ // We continue the next batch on a different thread, so as not to keep the delivery thread busy for a very
+ // long time in the case there are many messages in the queue
+ active = false;
+
+
+ executor.execute(new Prompter());
+
+ count = 0;
+ }
+
+ }
+ };
+
+ if (storageManager.isReplicated())
+ {
+ storageManager.afterReplicated(action);
+ storageManager.completeReplication();
}
+ else
+ {
+ action.run();
+ }
}
private class Prompter implements Runnable
Modified: trunk/src/main/org/hornetq/core/server/impl/HornetQPacketHandler.java
===================================================================
--- trunk/src/main/org/hornetq/core/server/impl/HornetQPacketHandler.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/server/impl/HornetQPacketHandler.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -16,6 +16,7 @@
import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.CREATESESSION;
import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.CREATE_QUEUE;
import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.REATTACH_SESSION;
+import static org.hornetq.core.remoting.impl.wireformat.PacketImpl.CREATE_REPLICATION;
import org.hornetq.core.exception.HornetQException;
import org.hornetq.core.logging.Logger;
@@ -24,9 +25,12 @@
import org.hornetq.core.remoting.Packet;
import org.hornetq.core.remoting.RemotingConnection;
import org.hornetq.core.remoting.impl.wireformat.CreateQueueMessage;
+import org.hornetq.core.remoting.impl.wireformat.CreateReplicationSessionMessage;
import org.hornetq.core.remoting.impl.wireformat.CreateSessionMessage;
import org.hornetq.core.remoting.impl.wireformat.HornetQExceptionMessage;
+import org.hornetq.core.remoting.impl.wireformat.NullResponseMessage;
import org.hornetq.core.remoting.impl.wireformat.ReattachSessionMessage;
+import org.hornetq.core.replication.ReplicationEndpoint;
import org.hornetq.core.server.HornetQServer;
/**
@@ -91,6 +95,16 @@
break;
}
+ case CREATE_REPLICATION:
+ {
+ // Create queue can also be fielded here in the case of a replicated store and forward queue creation
+
+ CreateReplicationSessionMessage request = (CreateReplicationSessionMessage)packet;
+
+ handleCreateReplication(request);
+
+ break;
+ }
default:
{
log.error("Invalid packet " + packet);
@@ -174,6 +188,38 @@
log.error("Failed to handle create queue", e);
}
}
+
+ private void handleCreateReplication(CreateReplicationSessionMessage request)
+ {
+ Packet response;
+ try
+ {
+ Channel channel = connection.getChannel(request.getSessionChannelID(), request.getWindowSize(), false);
+ ReplicationEndpoint endpoint = server.createReplicationEndpoint(channel);
+ channel.setHandler(endpoint);
+ response = new NullResponseMessage();
+
+ }
+ catch (Exception e)
+ {
+ log.warn(e.getMessage(), e);
+
+ if (e instanceof HornetQException)
+ {
+ response = new HornetQExceptionMessage((HornetQException)e);
+ }
+ else
+ {
+ response = new HornetQExceptionMessage(new HornetQException(HornetQException.INTERNAL_ERROR));
+ }
+ }
+
+ channel1.send(response);
+ }
+
+
+
+
}
\ No newline at end of file
Modified: trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/server/impl/HornetQServerImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -31,8 +31,12 @@
import javax.management.MBeanServer;
+import org.hornetq.core.client.ClientSessionFactory;
+import org.hornetq.core.client.impl.ClientSessionFactoryImpl;
import org.hornetq.core.client.impl.FailoverManager;
+import org.hornetq.core.client.impl.FailoverManagerImpl;
import org.hornetq.core.config.Configuration;
+import org.hornetq.core.config.TransportConfiguration;
import org.hornetq.core.config.cluster.DivertConfiguration;
import org.hornetq.core.config.cluster.QueueConfiguration;
import org.hornetq.core.config.impl.ConfigurationImpl;
@@ -65,11 +69,16 @@
import org.hornetq.core.postoffice.impl.LocalQueueBinding;
import org.hornetq.core.postoffice.impl.PostOfficeImpl;
import org.hornetq.core.remoting.Channel;
+import org.hornetq.core.remoting.Interceptor;
import org.hornetq.core.remoting.RemotingConnection;
import org.hornetq.core.remoting.impl.wireformat.CreateSessionResponseMessage;
import org.hornetq.core.remoting.impl.wireformat.ReattachSessionResponseMessage;
import org.hornetq.core.remoting.server.RemotingService;
import org.hornetq.core.remoting.server.impl.RemotingServiceImpl;
+import org.hornetq.core.replication.ReplicationEndpoint;
+import org.hornetq.core.replication.ReplicationManager;
+import org.hornetq.core.replication.impl.ReplicationEndpointImpl;
+import org.hornetq.core.replication.impl.ReplicationManagerImpl;
import org.hornetq.core.security.CheckType;
import org.hornetq.core.security.HornetQSecurityManager;
import org.hornetq.core.security.Role;
@@ -182,7 +191,11 @@
private boolean initialised;
- private FailoverManager replicatingFailoverManager;
+ private FailoverManager replicationFailoverManager;
+
+ private ReplicationManager replicationManager;
+
+ private ReplicationEndpoint replicationEndpoint;
private final Set<ActivateCallback> activateCallbacks = new HashSet<ActivateCallback>();
@@ -338,6 +351,12 @@
{
storageManager.stop();
}
+
+ if (replicationEndpoint != null)
+ {
+ replicationEndpoint.stop();
+ replicationEndpoint = null;
+ }
if (securityManager != null)
{
@@ -584,6 +603,24 @@
return new CreateSessionResponseMessage(version.getIncrementingVersion());
}
+
+ public synchronized ReplicationEndpoint createReplicationEndpoint(final Channel channel) throws Exception
+ {
+ if (!configuration.isBackup())
+ {
+ throw new HornetQException(HornetQException.ILLEGAL_STATE, "Connected server is not a backup server");
+ }
+
+ if (replicationEndpoint == null)
+ {
+ replicationEndpoint = new ReplicationEndpointImpl(this);
+ replicationEndpoint.setChannel(channel);
+ replicationEndpoint.start();
+ }
+
+
+ return replicationEndpoint;
+ }
public void removeSession(final String name) throws Exception
{
@@ -659,83 +696,45 @@
// }
// }
- // private boolean setupReplicatingConnection() throws Exception
- // {
- // String backupConnectorName = configuration.getBackupConnectorName();
- //
- // if (backupConnectorName != null)
- // {
- // TransportConfiguration backupConnector = configuration.getConnectorConfigurations().get(backupConnectorName);
- //
- // if (backupConnector == null)
- // {
- // log.warn("connector with name '" + backupConnectorName + "' is not defined in the configuration.");
- // }
- // else
- // {
- // replicatingConnectionManager = new ConnectionManagerImpl(null,
- // backupConnector,
- // null,
- // false,
- // 1,
- // ClientSessionFactoryImpl.DEFAULT_CALL_TIMEOUT,
- // ClientSessionFactoryImpl.DEFAULT_CLIENT_FAILURE_CHECK_PERIOD,
- // ClientSessionFactoryImpl.DEFAULT_CONNECTION_TTL,
- // 0,
- // 1.0d,
- // 0,
- // threadPool,
- // scheduledPool);
- //
- // replicatingConnection = replicatingConnectionManager.getConnection(1);
- //
- // if (replicatingConnection != null)
- // {
- // replicatingChannel = replicatingConnection.getChannel(2, -1, false);
- //
- // replicatingConnection.addFailureListener(new FailureListener()
- // {
- // public void connectionFailed(HornetQException me)
- // {
- // replicatingChannel.executeOutstandingDelayedResults();
- // }
- // });
- //
- // // First time we get channel we send a message down it informing the backup of our node id -
- // // backup and live must have the same node id
- //
- // Packet packet = new ReplicateStartupInfoMessage(uuid, storageManager.getCurrentUniqueID());
- //
- // final Future future = new Future();
- //
- // replicatingChannel.replicatePacket(packet, 1, new Runnable()
- // {
- // public void run()
- // {
- // future.run();
- // }
- // });
- //
- // // This may take a while especially if the journal is large
- // boolean ok = future.await(60000);
- //
- // if (!ok)
- // {
- // throw new IllegalStateException("Timed out waiting for response from backup for initialisation");
- // }
- // }
- // else
- // {
- // log.warn("Backup server MUST be started before live server. Initialisation will not proceed.");
- //
- // return false;
- // }
- // }
- // }
- //
- // return true;
- // }
+ private boolean startReplication() throws Exception
+ {
+ String backupConnectorName = configuration.getBackupConnectorName();
+ if (backupConnectorName != null)
+ {
+ TransportConfiguration backupConnector = configuration.getConnectorConfigurations().get(backupConnectorName);
+
+ if (backupConnector == null)
+ {
+ log.warn("connector with name '" + backupConnectorName + "' is not defined in the configuration.");
+ }
+ else
+ {
+
+ replicationFailoverManager = new FailoverManagerImpl((ClientSessionFactory)null,
+ backupConnector,
+ null,
+ false,
+ ClientSessionFactoryImpl.DEFAULT_CALL_TIMEOUT,
+ ClientSessionFactoryImpl.DEFAULT_CLIENT_FAILURE_CHECK_PERIOD,
+ ClientSessionFactoryImpl.DEFAULT_CONNECTION_TTL,
+ 0,
+ 1.0d,
+ 0,
+ 1,
+ threadPool,
+ scheduledPool,
+ null);
+
+
+ this.replicationManager = new ReplicationManagerImpl(replicationFailoverManager, this.executorFactory.getExecutor());
+ replicationManager.start();
+ }
+ }
+
+ return true;
+ }
+
public HornetQServerControlImpl getHornetQServerControl()
{
return messagingServerControl;
@@ -864,7 +863,7 @@
{
if (configuration.isPersistenceEnabled())
{
- return new JournalStorageManager(configuration, threadPool);
+ return new JournalStorageManager(configuration, threadPool, replicationManager);
}
else
{
@@ -889,19 +888,24 @@
{
// Handle backup server activation
- if (configuration.isSharedStore())
+ if (!configuration.isSharedStore())
{
- // Complete the startup procedure
+ if (replicationEndpoint == null)
+ {
+ log.warn("There is no replication endpoint, can't activate this backup server");
+ throw new HornetQException(HornetQException.INTERNAL_ERROR, "Can't activate the server");
+ }
+
+ replicationEndpoint.stop();
+ }
+
+ // Complete the startup procedure
- configuration.setBackup(false);
+ log.info("Activating server");
- initialisePart2();
- }
- else
- {
- // TODO
- // just load journal
- }
+ configuration.setBackup(false);
+
+ initialisePart2();
}
return true;
@@ -960,6 +964,9 @@
deploymentManager = new FileDeploymentManager(configuration.getFileDeployerScanPeriod());
}
+
+ startReplication();
+
this.storageManager = createStorageManager();
securityRepository = new HierarchicalObjectRepository<Set<Role>>();
Modified: trunk/src/main/org/hornetq/core/server/impl/ServerSessionImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/server/impl/ServerSessionImpl.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/server/impl/ServerSessionImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -161,7 +161,6 @@
private final SimpleString managementAddress;
// The current currentLargeMessage being processed
- // In case of replication, currentLargeMessage should only be accessed within the replication callbacks
private volatile LargeServerMessage currentLargeMessage;
private ServerSessionPacketHandler handler;
@@ -414,9 +413,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleCreateQueue(final CreateQueueMessage packet)
@@ -489,9 +486,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleDeleteQueue(final SessionDeleteQueueMessage packet)
@@ -527,9 +522,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleExecuteQueueQuery(final SessionQueueQueryMessage packet)
@@ -584,9 +577,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleExecuteBindingQuery(final SessionBindingQueryMessage packet)
@@ -630,9 +621,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleForceConsumerDelivery(SessionForceConsumerDelivery message)
@@ -681,12 +670,7 @@
}
}
- channel.confirm(packet);
-
- if (response != null)
- {
- channel.send(response);
- }
+ sendResponse(packet, response, false, false);
}
public void handleExpired(final SessionExpiredMessage packet)
@@ -705,7 +689,8 @@
log.error("Failed to acknowledge", e);
}
- channel.confirm(packet);
+
+ sendResponse(packet, null, false, false);
}
public void handleCommit(final Packet packet)
@@ -736,9 +721,7 @@
tx = new TransactionImpl(storageManager);
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleRollback(final RollbackMessage packet)
@@ -765,9 +748,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleXACommit(final SessionXACommitMessage packet)
@@ -839,9 +820,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleXAEnd(final SessionXAEndMessage packet)
@@ -913,9 +892,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleXAForget(final SessionXAForgetMessage packet)
@@ -940,9 +917,7 @@
Packet response = new SessionXAResponseMessage((code != XAResource.XA_OK), code, null);
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleXAJoin(final SessionXAJoinMessage packet)
@@ -991,9 +966,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleXAResume(final SessionXAResumeMessage packet)
@@ -1053,9 +1026,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleXARollback(final SessionXARollbackMessage packet)
@@ -1127,9 +1098,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleXAStart(final SessionXAStartMessage packet)
@@ -1178,9 +1147,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleXASuspend(final Packet packet)
@@ -1227,9 +1194,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleXAPrepare(final SessionXAPrepareMessage packet)
@@ -1287,9 +1252,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleGetInDoubtXids(final Packet packet)
@@ -1300,34 +1263,28 @@
indoubtsXids.addAll(resourceManager.getHeuristicRolledbackTransactions());
Packet response = new SessionXAGetInDoubtXidsResponseMessage(indoubtsXids);
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleGetXATimeout(final Packet packet)
{
Packet response = new SessionXAGetTimeoutResponseMessage(resourceManager.getTimeoutSeconds());
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleSetXATimeout(final SessionXASetTimeoutMessage packet)
{
Packet response = new SessionXASetTimeoutResponseMessage(resourceManager.setTimeoutSeconds(packet.getTimeoutSeconds()));
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleStart(final Packet packet)
{
setStarted(true);
- channel.confirm(packet);
+ sendResponse(packet, null, false, false);
}
public void handleStop(final Packet packet)
@@ -1336,9 +1293,7 @@
setStarted(false);
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleClose(final Packet packet)
@@ -1365,14 +1320,8 @@
}
}
- channel.confirm(packet);
+ sendResponse(packet, response, true, true);
- // We flush the confirmations to make sure any send confirmations get handled on the client side
- channel.flushConfirmations();
-
- channel.send(response);
-
- channel.close();
}
public void handleCloseConsumer(final SessionConsumerCloseMessage packet)
@@ -1408,9 +1357,7 @@
}
}
- channel.confirm(packet);
-
- channel.send(response);
+ sendResponse(packet, response, false, false);
}
public void handleReceiveConsumerCredits(final SessionConsumerFlowCreditMessage packet)
@@ -1432,14 +1379,17 @@
log.error("Failed to receive credits " + this.server.getConfiguration().isBackup(), e);
}
- channel.confirm(packet);
+
+ sendResponse(packet, null, false, false);
}
public void handleSendLargeMessage(final SessionSendLargeMessage packet)
{
// need to create the LargeMessage before continue
- final LargeServerMessage msg = doCreateLargeMessage(packet);
+ long id = storageManager.generateUniqueID();
+ final LargeServerMessage msg = doCreateLargeMessage(id, packet);
+
if (msg == null)
{
// packet logged an error, and played with channel.returns... and nothing needs to be done now
@@ -1456,18 +1406,7 @@
currentLargeMessage = msg;
- try
- {
- long id = storageManager.generateUniqueID();
-
- currentLargeMessage.setMessageID(id);
- }
- catch (Exception e)
- {
- log.error("Failed to send message", e);
- }
-
- channel.confirm(packet);
+ sendResponse(packet, null, false, false);
}
public void handleSend(final SessionSendMessage packet)
@@ -1514,13 +1453,8 @@
}
}
}
-
- channel.confirm(packet);
-
- if (response != null)
- {
- channel.send(response);
- }
+
+ sendResponse(packet, response, false, false);
}
public void handleSendContinuations(final SessionSendContinuationMessage packet)
@@ -1569,12 +1503,7 @@
}
}
- channel.confirm(packet);
-
- if (response != null)
- {
- channel.send(response);
- }
+ sendResponse(packet, response, false, false);
}
public int transferConnection(final RemotingConnection newConnection, final int lastReceivedCommandID)
@@ -1682,6 +1611,63 @@
// Private
// ----------------------------------------------------------------------------
+ /**
+ * Respond to client after replication
+ * @param packet
+ * @param response
+ */
+ private void sendResponse(final Packet confirmPacket, final Packet response, final boolean flush, final boolean closeChannel)
+ {
+ if (storageManager.isReplicated())
+ {
+ storageManager.afterReplicated(new Runnable()
+ {
+ public void run()
+ {
+ doSendResponse(confirmPacket, response, flush, closeChannel);
+ }
+
+ });
+ storageManager.completeReplication();
+ }
+ else
+ {
+ doSendResponse(confirmPacket, response, flush, closeChannel);
+ }
+ }
+
+ /**
+ * @param confirmPacket
+ * @param response
+ * @param flush
+ * @param closeChannel
+ */
+ private void doSendResponse(final Packet confirmPacket,
+ final Packet response,
+ final boolean flush,
+ final boolean closeChannel)
+ {
+ if (confirmPacket != null)
+ {
+ channel.confirm(confirmPacket);
+ if (flush)
+ {
+ channel.flushConfirmations();
+ }
+ }
+
+ if (response != null)
+ {
+ channel.send(response);
+ }
+
+ if (closeChannel)
+ {
+ channel.close();
+ }
+ }
+
+
private void setStarted(final boolean s)
{
Set<ServerConsumer> consumersClone = new HashSet<ServerConsumer>(consumers.values());
@@ -1700,11 +1686,11 @@
* @param packet
* @throws Exception
*/
- private LargeServerMessage doCreateLargeMessage(final SessionSendLargeMessage packet)
+ private LargeServerMessage doCreateLargeMessage(long id, final SessionSendLargeMessage packet)
{
try
{
- return createLargeMessageStorage(packet.getLargeMessageHeader());
+ return createLargeMessageStorage(id, packet.getLargeMessageHeader());
}
catch (Exception e)
{
@@ -1747,15 +1733,9 @@
}
}
- private LargeServerMessage createLargeMessageStorage(final byte[] header) throws Exception
+ private LargeServerMessage createLargeMessageStorage(final long id, final byte[] header) throws Exception
{
- LargeServerMessage largeMessage = storageManager.createLargeMessage();
-
- HornetQBuffer headerBuffer = ChannelBuffers.wrappedBuffer(header);
-
- largeMessage.decodeProperties(headerBuffer);
-
- return largeMessage;
+ return storageManager.createLargeMessage(id, header);
}
private void doRollback(final boolean lastMessageAsDelived, final Transaction theTx) throws Exception
Modified: trunk/src/main/org/hornetq/core/transaction/impl/TransactionImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/transaction/impl/TransactionImpl.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/src/main/org/hornetq/core/transaction/impl/TransactionImpl.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -206,21 +206,51 @@
operation.beforeCommit(this);
}
}
+
+ // TODO: Verify Exception handling here with Tim
+ Runnable execAfterCommit = null;
+
+ if (operations != null)
+ {
+ execAfterCommit = new Runnable()
+ {
+ public void run()
+ {
+ for (TransactionOperation operation : operations)
+ {
+ try
+ {
+ operation.afterCommit(TransactionImpl.this);
+ }
+ catch (Exception e)
+ {
+ log.warn(e.getMessage(), e);
+ }
+ }
+ }
+ };
+ }
if ((getProperty(TransactionPropertyIndexes.CONTAINS_PERSISTENT) != null) || (xid != null && state == State.PREPARED))
{
storageManager.commit(id);
- }
-
- state = State.COMMITTED;
-
- if (operations != null)
- {
- for (TransactionOperation operation : operations)
+ state = State.COMMITTED;
+ if (execAfterCommit != null)
{
- operation.afterCommit(this);
+ if (storageManager.isReplicated())
+ {
+ storageManager.afterReplicated(execAfterCommit);
+ }
+ else
+ {
+ execAfterCommit.run();
+ }
}
}
+ else if (execAfterCommit != null)
+ {
+ execAfterCommit.run();
+ }
}
}
Modified: trunk/tests/src/org/hornetq/tests/integration/client/PagingTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/client/PagingTest.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/integration/client/PagingTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -17,6 +17,7 @@
import java.util.Map;
import junit.framework.AssertionFailedError;
+import junit.framework.TestSuite;
import org.hornetq.core.buffers.ChannelBuffers;
import org.hornetq.core.client.ClientConsumer;
@@ -46,7 +47,17 @@
*/
public class PagingTest extends ServiceTestBase
{
-
+
+ public PagingTest(String name)
+ {
+ super(name);
+ }
+
+ public PagingTest()
+ {
+ super();
+ }
+
// Constants -----------------------------------------------------
private static final Logger log = Logger.getLogger(PagingTest.class);
Modified: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/AsynchronousFailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/AsynchronousFailoverTest.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/AsynchronousFailoverTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -406,35 +406,14 @@
@Override
protected TransportConfiguration getAcceptorTransportConfiguration(boolean live)
{
- if (live)
- {
- return new TransportConfiguration("org.hornetq.core.remoting.impl.invm.InVMAcceptorFactory");
- }
- else
- {
- Map<String, Object> server1Params = new HashMap<String, Object>();
-
- server1Params.put(TransportConstants.SERVER_ID_PROP_NAME, 1);
-
- return new TransportConfiguration("org.hornetq.core.remoting.impl.invm.InVMAcceptorFactory", server1Params);
- }
+ return getInVMTransportAcceptorConfiguration(live);
}
@Override
protected TransportConfiguration getConnectorTransportConfiguration(final boolean live)
{
- if (live)
- {
- return new TransportConfiguration("org.hornetq.core.remoting.impl.invm.InVMConnectorFactory");
- }
- else
- {
- Map<String, Object> server1Params = new HashMap<String, Object>();
-
- server1Params.put(TransportConstants.SERVER_ID_PROP_NAME, 1);
-
- return new TransportConfiguration("org.hornetq.core.remoting.impl.invm.InVMConnectorFactory", server1Params);
- }
+ return getInVMConnectorTransportConfiguration(live);
}
+
}
Modified: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTest.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -78,6 +78,18 @@
// Public --------------------------------------------------------
+ /**
+ * @param name
+ */
+ public FailoverTest(String name)
+ {
+ super(name);
+ }
+
+ public FailoverTest()
+ {
+ }
+
public void testNonTransacted() throws Exception
{
ClientSessionFactoryInternal sf = getSessionFactory();
@@ -109,23 +121,14 @@
{
ClientMessage message = session.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
producer.send(message);
}
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
-
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
+ fail(session, latch);
log.info("got here 1");
@@ -143,7 +146,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -159,6 +162,26 @@
assertEquals(0, sf.numConnections());
}
+ /**
+ * @param session
+ * @param latch
+ * @throws InterruptedException
+ */
+ private void fail(ClientSession session, final CountDownLatch latch) throws InterruptedException
+ {
+
+ RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+
+ // Simulate failure on connection
+ conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
+
+ // Wait to be informed of failure
+
+ boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
+
+ assertTrue(ok);
+ }
+
public void testTransactedMessagesSentSoRollback() throws Exception
{
ClientSessionFactoryInternal sf = getSessionFactory();
@@ -190,24 +213,15 @@
{
ClientMessage message = session.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
producer.send(message);
}
+
+ fail(session, latch);
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
-
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
try
{
session.commit();
@@ -265,7 +279,7 @@
{
ClientMessage message = session.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -274,17 +288,8 @@
session.commit();
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+ fail(session, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
// committing again should work since didn't send anything since last commit
session.commit();
@@ -303,7 +308,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -351,7 +356,7 @@
{
ClientMessage message = session1.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -372,24 +377,15 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
message.acknowledge();
}
- RemotingConnection conn = ((ClientSessionInternal)session2).getConnection();
+ fail(session2, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
try
{
session2.commit();
@@ -441,7 +437,7 @@
{
ClientMessage message = session1.createClientMessage(true);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -462,7 +458,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -473,17 +469,8 @@
consumer.close();
- RemotingConnection conn = ((ClientSessionInternal)session2).getConnection();
+ fail(session2, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
consumer = session2.createConsumer(ADDRESS);
for (int i = numMessages / 2; i < numMessages; i++)
@@ -492,7 +479,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -545,24 +532,15 @@
{
ClientMessage message = session.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
producer.send(message);
}
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+ fail(session, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
try
{
session.end(xid, XAResource.TMSUCCESS);
@@ -624,7 +602,7 @@
{
ClientMessage message = session.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -633,17 +611,8 @@
session.end(xid, XAResource.TMSUCCESS);
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+ fail(session, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
try
{
session.prepare(xid);
@@ -706,7 +675,7 @@
{
ClientMessage message = session.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -717,17 +686,8 @@
session.prepare(xid);
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+ fail(session, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
try
{
session.commit(xid, true);
@@ -789,7 +749,7 @@
{
ClientMessage message = session.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -802,17 +762,8 @@
session.commit(xid, false);
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+ fail(session, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
ClientConsumer consumer = session.createConsumer(ADDRESS);
session.start();
@@ -831,7 +782,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -883,7 +834,7 @@
{
ClientMessage message = session1.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -908,24 +859,15 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
message.acknowledge();
}
- RemotingConnection conn = ((ClientSessionInternal)session2).getConnection();
+ fail(session2, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
try
{
session2.end(xid, XAResource.TMSUCCESS);
@@ -978,7 +920,7 @@
{
ClientMessage message = session1.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -1003,7 +945,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -1079,7 +1021,7 @@
{
ClientMessage message = session1.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -1104,7 +1046,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -1115,17 +1057,8 @@
session2.prepare(xid);
- RemotingConnection conn = ((ClientSessionInternal)session2).getConnection();
+ fail(session2, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
try
{
session2.commit(xid, true);
@@ -1243,7 +1176,7 @@
{
ClientMessage message = sendSession.createClientMessage(true);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -1278,7 +1211,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -1334,7 +1267,7 @@
{
ClientMessage message = session.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -1351,22 +1284,13 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
}
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+ fail(session, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
for (int i = 0; i < numMessages; i++)
{
// Only the persistent messages will survive
@@ -1377,7 +1301,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -1423,7 +1347,7 @@
{
ClientMessage message = session.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -1440,22 +1364,13 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
}
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+ fail(session, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
// Should get the same ones after failover since we didn't ack
for (int i = 0; i < numMessages; i++)
@@ -1468,7 +1383,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -1515,7 +1430,7 @@
{
ClientMessage message = session.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -1532,31 +1447,22 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
message.acknowledge();
}
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+ fail(session, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
// Send some more
for (int i = numMessages; i < numMessages * 2; i++)
{
ClientMessage message = session.createClientMessage(i % 2 == 0);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -1571,7 +1477,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -1642,17 +1548,8 @@
Thread.sleep(500);
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+ fail(session, latch);
- // Simulate failure on connection
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
sender.join();
assertNotNull(sender.e);
@@ -1706,7 +1603,7 @@
message.putStringProperty(MessageImpl.HDR_DUPLICATE_DETECTION_ID, new SimpleString(txID));
}
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -1759,16 +1656,8 @@
Thread.sleep(500);
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+ fail(session, latch);
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
committer.join();
assertFalse(committer.failed);
@@ -1792,7 +1681,7 @@
message.putStringProperty(MessageImpl.HDR_DUPLICATE_DETECTION_ID, new SimpleString(txID));
}
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -1811,7 +1700,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -1861,7 +1750,7 @@
{
ClientMessage message = session.createClientMessage(true);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -1914,16 +1803,8 @@
Thread.sleep(500);
- RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+ fail(session, latch);
- conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
-
- // Wait to be informed of failure
-
- boolean ok = latch.await(1000, TimeUnit.MILLISECONDS);
-
- assertTrue(ok);
-
committer.join();
assertFalse(committer.failed);
@@ -1940,7 +1821,7 @@
{
ClientMessage message = session2.createClientMessage(true);
- message.getBody().writeString("message" + i);
+ setBody(i, message);
message.putIntProperty("counter", i);
@@ -1959,7 +1840,7 @@
assertNotNull(message);
- assertEquals("message" + i, message.getBody().readString());
+ assertMessageBody(i, message);
assertEquals(i, message.getProperty("counter"));
@@ -2015,6 +1896,26 @@
}
}
+
+ /**
+ * @param i
+ * @param message
+ */
+ protected void assertMessageBody(int i, ClientMessage message)
+ {
+ assertEquals("message" + i, message.getBody().readString());
+ }
+
+ /**
+ * @param i
+ * @param message
+ * @throws Exception
+ */
+ protected void setBody(int i, ClientMessage message) throws Exception
+ {
+ message.getBody().writeString("message" + i);
+ }
+
// Private -------------------------------------------------------
private ClientSession sendAndConsume(final ClientSessionFactory sf) throws Exception
Modified: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/FailoverTestBase.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -13,12 +13,16 @@
package org.hornetq.tests.integration.cluster.failover;
+import java.util.HashMap;
+import java.util.Map;
+
import org.hornetq.core.client.impl.ClientSessionFactoryImpl;
import org.hornetq.core.client.impl.ClientSessionFactoryInternal;
import org.hornetq.core.config.Configuration;
import org.hornetq.core.config.TransportConfiguration;
import org.hornetq.core.remoting.impl.invm.InVMConnector;
import org.hornetq.core.remoting.impl.invm.InVMRegistry;
+import org.hornetq.core.remoting.impl.invm.TransportConstants;
import org.hornetq.core.server.HornetQServer;
import org.hornetq.tests.util.ServiceTestBase;
import org.hornetq.utils.SimpleString;
@@ -33,11 +37,11 @@
public abstract class FailoverTestBase extends ServiceTestBase
{
// Constants -----------------------------------------------------
-
+
protected static final SimpleString ADDRESS = new SimpleString("FailoverTestAddress");
// Attributes ----------------------------------------------------
-
+
protected HornetQServer server0Service;
protected HornetQServer server1Service;
@@ -51,33 +55,83 @@
// Package protected ---------------------------------------------
// Protected -----------------------------------------------------
+
+ /**
+ * @param name
+ */
+ public FailoverTestBase(String name)
+ {
+ super(name);
+ }
+ public FailoverTestBase()
+ {
+ }
+
protected void setUp() throws Exception
{
super.setUp();
+
+ createConfigs();
+
+ if (server1Service != null)
+ {
+ server1Service.start();
+ }
+ server0Service.start();
+ }
+
+ /**
+ * @throws Exception
+ */
+ protected void createConfigs() throws Exception
+ {
Configuration config1 = super.createDefaultConfig();
config1.getAcceptorConfigurations().clear();
- config1.getAcceptorConfigurations()
- .add(getAcceptorTransportConfiguration(false));
+ config1.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(false));
config1.setSecurityEnabled(false);
config1.setSharedStore(true);
config1.setBackup(true);
+ server1Service = createServer(true, config1);
+
+ Configuration config0 = super.createDefaultConfig();
+ config0.getAcceptorConfigurations().clear();
+ config0.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(true));
+ config0.setSecurityEnabled(false);
+ config0.setSharedStore(true);
+ server0Service = createServer(true, config0);
+
+ }
+
+ protected void createReplicatedConfigs() throws Exception
+ {
+ Configuration config1 = super.createDefaultConfig();
+ config1.setBindingsDirectory(config1.getBindingsDirectory() + "_backup");
+ config1.setJournalDirectory(config1.getJournalDirectory() + "_backup");
+ config1.setPagingDirectory(config1.getPagingDirectory() + "_backup");
+ config1.setLargeMessagesDirectory(config1.getLargeMessagesDirectory() + "_backup");
+ config1.getAcceptorConfigurations().clear();
+ config1.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(false));
+ config1.setSecurityEnabled(false);
+ config1.setSharedStore(false);
+ config1.setBackup(true);
server1Service = super.createServer(true, config1);
Configuration config0 = super.createDefaultConfig();
config0.getAcceptorConfigurations().clear();
- config0.getAcceptorConfigurations()
- .add(getAcceptorTransportConfiguration(true));
+ config0.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(true));
+
+ config0.getConnectorConfigurations().put("toBackup", getConnectorTransportConfiguration(false));
+ config0.setBackupConnectorName("toBackup");
config0.setSecurityEnabled(false);
- config0.setSharedStore(true);
+ config0.setSharedStore(false);
server0Service = super.createServer(true, config0);
server1Service.start();
server0Service.start();
}
-
-
+
protected void tearDown() throws Exception
{
server1Service.stop();
@@ -89,19 +143,88 @@
server1Service = null;
server0Service = null;
-
+
InVMConnector.failOnCreateConnection = false;
super.tearDown();
}
-
+
+ protected TransportConfiguration getInVMConnectorTransportConfiguration(final boolean live)
+ {
+ if (live)
+ {
+ return new TransportConfiguration("org.hornetq.core.remoting.impl.invm.InVMConnectorFactory");
+ }
+ else
+ {
+ Map<String, Object> server1Params = new HashMap<String, Object>();
+
+ server1Params.put(TransportConstants.SERVER_ID_PROP_NAME, 1);
+
+ return new TransportConfiguration("org.hornetq.core.remoting.impl.invm.InVMConnectorFactory", server1Params);
+ }
+ }
+
+ protected TransportConfiguration getInVMTransportAcceptorConfiguration(boolean live)
+ {
+ if (live)
+ {
+ return new TransportConfiguration("org.hornetq.core.remoting.impl.invm.InVMAcceptorFactory");
+ }
+ else
+ {
+ Map<String, Object> server1Params = new HashMap<String, Object>();
+
+ server1Params.put(TransportConstants.SERVER_ID_PROP_NAME, 1);
+
+ return new TransportConfiguration("org.hornetq.core.remoting.impl.invm.InVMAcceptorFactory", server1Params);
+ }
+ }
+
+ protected TransportConfiguration getNettyAcceptorTransportConfiguration(boolean live)
+ {
+ if (live)
+ {
+ return new TransportConfiguration("org.hornetq.integration.transports.netty.NettyAcceptorFactory");
+ }
+ else
+ {
+ Map<String, Object> server1Params = new HashMap<String, Object>();
+
+ server1Params.put(org.hornetq.integration.transports.netty.TransportConstants.PORT_PROP_NAME,
+ org.hornetq.integration.transports.netty.TransportConstants.DEFAULT_PORT + 1);
+
+ return new TransportConfiguration("org.hornetq.integration.transports.netty.NettyAcceptorFactory",
+ server1Params);
+ }
+ }
+
+ protected TransportConfiguration getNettyConnectorTransportConfiguration(final boolean live)
+ {
+ if (live)
+ {
+ return new TransportConfiguration("org.hornetq.integration.transports.netty.NettyConnectorFactory");
+ }
+ else
+ {
+ Map<String, Object> server1Params = new HashMap<String, Object>();
+
+ server1Params.put(org.hornetq.integration.transports.netty.TransportConstants.PORT_PROP_NAME,
+ org.hornetq.integration.transports.netty.TransportConstants.DEFAULT_PORT + 1);
+
+ return new TransportConfiguration("org.hornetq.integration.transports.netty.NettyConnectorFactory",
+ server1Params);
+ }
+ }
+
protected abstract TransportConfiguration getAcceptorTransportConfiguration(boolean live);
-
+
protected abstract TransportConfiguration getConnectorTransportConfiguration(final boolean live);
-
+
protected ClientSessionFactoryInternal getSessionFactory()
{
- return new ClientSessionFactoryImpl(getConnectorTransportConfiguration(true), getConnectorTransportConfiguration(false));
+ return new ClientSessionFactoryImpl(getConnectorTransportConfiguration(true),
+ getConnectorTransportConfiguration(false));
}
// Private -------------------------------------------------------
Added: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/LargeMessageFailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/LargeMessageFailoverTest.java (rev 0)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/LargeMessageFailoverTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+import junit.framework.TestSuite;
+
+import org.hornetq.core.client.ClientMessage;
+import org.hornetq.core.client.impl.ClientSessionFactoryImpl;
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+
+/**
+ * A LargeMessageFailoverTest
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class LargeMessageFailoverTest extends FailoverTest
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ public static TestSuite suite()
+ {
+ TestSuite suite = new TestSuite();
+
+ suite.addTest(new LargeMessageFailoverTest("testNonTransacted"));
+ suite.addTest(new LargeMessageFailoverTest("testTransactedMessagesSentSoRollback"));
+ suite.addTest(new LargeMessageFailoverTest("testTransactedMessagesNotSentSoNoRollback"));
+ suite.addTest(new LargeMessageFailoverTest("testTransactedMessagesConsumedSoRollback"));
+ suite.addTest(new LargeMessageFailoverTest("testTransactedMessagesNotConsumedSoNoRollback"));
+ suite.addTest(new LargeMessageFailoverTest("testXAMessagesSentSoRollbackOnEnd"));
+ suite.addTest(new LargeMessageFailoverTest("testXAMessagesSentSoRollbackOnPrepare"));
+ suite.addTest(new LargeMessageFailoverTest("testXAMessagesSentSoRollbackOnCommit"));
+ suite.addTest(new LargeMessageFailoverTest("testXAMessagesNotSentSoNoRollbackOnCommit"));
+ suite.addTest(new LargeMessageFailoverTest("testXAMessagesConsumedSoRollbackOnEnd"));
+ suite.addTest(new LargeMessageFailoverTest("testXAMessagesConsumedSoRollbackOnPrepare"));
+ suite.addTest(new LargeMessageFailoverTest("testXAMessagesConsumedSoRollbackOnCommit"));
+ suite.addTest(new LargeMessageFailoverTest("testCreateNewFactoryAfterFailover"));
+
+ // Those tests are temporarily disabled for LargeMessage
+ // suite.addTest(new LargeMessageFailoverTest("testFailoverMultipleSessionsWithConsumers"));
+ // suite.addTest(new LargeMessageFailoverTest("testFailWithBrowser"));
+ // suite.addTest(new LargeMessageFailoverTest("testFailThenReceiveMoreMessagesAfterFailover"));
+ // suite.addTest(new LargeMessageFailoverTest("testFailThenReceiveMoreMessagesAfterFailover2"));
+
+ suite.addTest(new LargeMessageFailoverTest("testForceBlockingReturn"));
+ suite.addTest(new LargeMessageFailoverTest("testCommitOccurredUnblockedAndResendNoDuplicates"));
+ suite.addTest(new LargeMessageFailoverTest("testCommitDidNotOccurUnblockedAndResend"));
+ return suite;
+ }
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ /**
+ * @param name
+ */
+ public LargeMessageFailoverTest(String name)
+ {
+ super(name);
+ }
+
+ /**
+ *
+ */
+ public LargeMessageFailoverTest()
+ {
+ super();
+ }
+
+ /**
+ * @param i
+ * @param message
+ */
+ protected void assertMessageBody(int i, ClientMessage message)
+ {
+ HornetQBuffer buffer = message.getBody();
+
+ for (int j = 0; j < ClientSessionFactoryImpl.DEFAULT_MIN_LARGE_MESSAGE_SIZE * 3; j++)
+ {
+ assertEquals(buffer.readByte(), getSamplebyte(j));
+ }
+ }
+
+ /**
+ * @param i
+ * @param message
+ */
+ protected void setBody(int i, ClientMessage message) throws Exception
+ {
+ message.setBodyInputStream(createFakeLargeStream(ClientSessionFactoryImpl.DEFAULT_MIN_LARGE_MESSAGE_SIZE * 3));
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Modified: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousFailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousFailoverTest.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyAsynchronousFailoverTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -31,34 +31,12 @@
@Override
protected TransportConfiguration getAcceptorTransportConfiguration(boolean live)
{
- if (live)
- {
- return new TransportConfiguration("org.hornetq.integration.transports.netty.NettyAcceptorFactory");
- }
- else
- {
- Map<String, Object> server1Params = new HashMap<String, Object>();
-
- server1Params.put(TransportConstants.PORT_PROP_NAME, TransportConstants.DEFAULT_PORT + 1);
-
- return new TransportConfiguration("org.hornetq.integration.transports.netty.NettyAcceptorFactory", server1Params);
- }
+ return getNettyAcceptorTransportConfiguration(live);
}
@Override
protected TransportConfiguration getConnectorTransportConfiguration(final boolean live)
{
- if (live)
- {
- return new TransportConfiguration("org.hornetq.integration.transports.netty.NettyConnectorFactory");
- }
- else
- {
- Map<String, Object> server1Params = new HashMap<String, Object>();
-
- server1Params.put(TransportConstants.PORT_PROP_NAME, TransportConstants.DEFAULT_PORT + 1);
-
- return new TransportConfiguration("org.hornetq.integration.transports.netty.NettyConnectorFactory", server1Params);
- }
+ return getNettyConnectorTransportConfiguration(live);
}
}
Added: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyReplicatedFailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyReplicatedFailoverTest.java (rev 0)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/NettyReplicatedFailoverTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+import org.hornetq.core.config.Configuration;
+
+/**
+ * A NettyReplicatedFailoverTest
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class NettyReplicatedFailoverTest extends NettyFailoverTest
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ @Override
+ protected void createConfigs() throws Exception
+ {
+ Configuration config1 = super.createDefaultConfig();
+ config1.setBindingsDirectory(config1.getBindingsDirectory() + "_backup");
+ config1.setJournalDirectory(config1.getJournalDirectory() + "_backup");
+ config1.getAcceptorConfigurations().clear();
+ config1.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(false));
+ config1.setSecurityEnabled(false);
+ config1.setSharedStore(false);
+ config1.setBackup(true);
+ server1Service = super.createServer(true, config1);
+
+ Configuration config0 = super.createDefaultConfig();
+ config0.getAcceptorConfigurations().clear();
+ config0.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(true));
+
+ config0.getConnectorConfigurations().put("toBackup", getConnectorTransportConfiguration(false));
+ config0.setBackupConnectorName("toBackup");
+ config0.setSecurityEnabled(false);
+ config0.setSharedStore(false);
+ server0Service = super.createServer(true, config0);
+
+ server1Service.start();
+ server0Service.start();
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/PagingFailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/PagingFailoverTest.java (rev 0)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/PagingFailoverTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+import java.util.HashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.hornetq.core.buffers.ChannelBuffers;
+import org.hornetq.core.client.ClientConsumer;
+import org.hornetq.core.client.ClientMessage;
+import org.hornetq.core.client.ClientProducer;
+import org.hornetq.core.client.ClientSession;
+import org.hornetq.core.client.impl.ClientSessionFactoryInternal;
+import org.hornetq.core.client.impl.ClientSessionInternal;
+import org.hornetq.core.config.Configuration;
+import org.hornetq.core.config.TransportConfiguration;
+import org.hornetq.core.exception.HornetQException;
+import org.hornetq.core.remoting.FailureListener;
+import org.hornetq.core.remoting.RemotingConnection;
+import org.hornetq.core.server.HornetQServer;
+import org.hornetq.core.settings.impl.AddressSettings;
+import org.hornetq.utils.SimpleString;
+
+/**
+ * A PagingFailoverTest
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class PagingFailoverTest extends FailoverTestBase
+{
+
+ // Constants -----------------------------------------------------
+
+ private static final int PAGE_MAX = 100 * 1024;
+
+ private static final int PAGE_SIZE = 10 * 1024;
+
+ static final SimpleString ADDRESS = new SimpleString("SimpleAddress");
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ public void testPage() throws Exception
+ {
+ internalTestPagedTransacted(false, false);
+ }
+
+
+ public void testPageTransactioned() throws Exception
+ {
+ internalTestPagedTransacted(true, false);
+ }
+
+ public void testPageTransactionedFailBeforeconsume() throws Exception
+ {
+ internalTestPagedTransacted(true, true);
+ }
+
+ public void internalTestPagedTransacted(final boolean transacted, final boolean failBeforeConsume) throws Exception
+ {
+ ClientSessionFactoryInternal factory = getSessionFactory();
+ factory.setBlockOnPersistentSend(true);
+ ClientSession session = factory.createSession(!transacted, !transacted, 0);
+
+ try
+ {
+
+ session.createQueue(ADDRESS, ADDRESS, true);
+
+ final CountDownLatch latch = new CountDownLatch(1);
+
+ class MyListener implements FailureListener
+ {
+ public void connectionFailed(HornetQException me)
+ {
+ latch.countDown();
+ }
+ }
+
+ session.addFailureListener(new MyListener());
+
+ ClientProducer prod = session.createProducer(ADDRESS);
+
+ final int TOTAL_MESSAGES = 2000;
+
+ for (int i = 0; i < TOTAL_MESSAGES; i++)
+ {
+ if (transacted && i % 10 == 0)
+ {
+ session.commit();
+ }
+ ClientMessage msg = session.createClientMessage(true);
+ msg.setBody(ChannelBuffers.buffer(1024));
+ msg.putIntProperty(new SimpleString("key"), i);
+ prod.send(msg);
+ }
+
+ session.commit();
+
+ if (failBeforeConsume)
+ {
+ failSession(session, latch);
+ }
+
+
+ session.start();
+
+ ClientConsumer cons = session.createConsumer(ADDRESS);
+
+ final int MIDDLE = TOTAL_MESSAGES / 2;
+
+ for (int i = 0; i < MIDDLE; i++)
+ {
+ ClientMessage msg = cons.receive(10000);
+ assertNotNull(msg);
+ msg.acknowledge();
+ if (transacted && i % 10 == 0)
+ {
+ session.commit();
+ }
+ assertEquals((Integer)i, (Integer)msg.getProperty(new SimpleString("key")));
+ }
+
+ session.commit();
+
+ if (!failBeforeConsume)
+ {
+ failSession(session, latch);
+ }
+
+ session.close();
+
+ session = factory.createSession(true, true, 0);
+
+ cons = session.createConsumer(ADDRESS);
+
+ session.start();
+
+ for (int i = MIDDLE; i < TOTAL_MESSAGES; i++)
+ {
+ ClientMessage msg = cons.receive(5000);
+ assertNotNull(msg);
+
+ msg.acknowledge();
+ int result = (Integer)msg.getProperty(new SimpleString("key"));
+ assertEquals(i, result);
+ }
+ }
+ finally
+ {
+ try
+ {
+ session.close();
+ }
+ catch (Exception ignored)
+ {
+ }
+ }
+ }
+
+
+ /**
+ * @param session
+ * @param latch
+ * @throws InterruptedException
+ */
+ private void failSession(ClientSession session, final CountDownLatch latch) throws InterruptedException
+ {
+ RemotingConnection conn = ((ClientSessionInternal)session).getConnection();
+
+ // Simulate failure on connection
+ conn.fail(new HornetQException(HornetQException.NOT_CONNECTED));
+
+ // Wait to be informed of failure
+
+ assertTrue(latch.await(5000, TimeUnit.MILLISECONDS));
+ }
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ /* (non-Javadoc)
+ * @see org.hornetq.tests.integration.cluster.failover.FailoverTestBase#getAcceptorTransportConfiguration(boolean)
+ */
+ @Override
+ protected TransportConfiguration getAcceptorTransportConfiguration(boolean live)
+ {
+ return getInVMTransportAcceptorConfiguration(live);
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.tests.integration.cluster.failover.FailoverTestBase#getConnectorTransportConfiguration(boolean)
+ */
+ @Override
+ protected TransportConfiguration getConnectorTransportConfiguration(boolean live)
+ {
+ return getInVMConnectorTransportConfiguration(live);
+ }
+
+ protected HornetQServer createServer(final boolean realFiles, final Configuration configuration)
+ {
+ return createServer(realFiles, configuration, PAGE_SIZE, PAGE_MAX, new HashMap<String, AddressSettings>());
+ }
+
+ /**
+ * @throws Exception
+ */
+ protected void createConfigs() throws Exception
+ {
+ Configuration config1 = super.createDefaultConfig();
+ config1.getAcceptorConfigurations().clear();
+ config1.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(false));
+ config1.setSecurityEnabled(false);
+ config1.setSharedStore(true);
+ config1.setBackup(true);
+ server1Service = createServer(true, config1);
+
+ Configuration config0 = super.createDefaultConfig();
+ config0.getAcceptorConfigurations().clear();
+ config0.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(true));
+ config0.setSecurityEnabled(false);
+ config0.setSharedStore(true);
+ server0Service = createServer(true, config0);
+
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedAsynchronousFailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedAsynchronousFailoverTest.java (rev 0)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedAsynchronousFailoverTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+import org.hornetq.core.config.Configuration;
+
+/**
+ * A ReplicatedAsynchronousFailoverTest
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicatedAsynchronousFailoverTest extends AsynchronousFailoverTest
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+ @Override
+ protected void createConfigs() throws Exception
+ {
+ Configuration config1 = super.createDefaultConfig();
+ config1.setBindingsDirectory(config1.getBindingsDirectory() + "_backup");
+ config1.setJournalDirectory(config1.getJournalDirectory() + "_backup");
+ config1.getAcceptorConfigurations().clear();
+ config1.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(false));
+ config1.setSecurityEnabled(false);
+ config1.setSharedStore(false);
+ config1.setBackup(true);
+ server1Service = super.createServer(true, config1);
+
+ Configuration config0 = super.createDefaultConfig();
+ config0.getAcceptorConfigurations().clear();
+ config0.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(true));
+
+ config0.getConnectorConfigurations().put("toBackup", getConnectorTransportConfiguration(false));
+ config0.setBackupConnectorName("toBackup");
+ config0.setSecurityEnabled(false);
+ config0.setSharedStore(false);
+ server0Service = super.createServer(true, config0);
+
+ server1Service.start();
+ server0Service.start();
+ }
+
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedFailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedFailoverTest.java (rev 0)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedFailoverTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+
+/**
+ * A ReplicatedFailoverTest
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicatedFailoverTest extends FailoverTest
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ @Override
+ protected void createConfigs() throws Exception
+ {
+ createReplicatedConfigs();
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedLargeMessageFailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedLargeMessageFailoverTest.java (rev 0)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedLargeMessageFailoverTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+/**
+ * A ReplicatedLargeMessageFailoverTest
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicatedLargeMessageFailoverTest extends LargeMessageFailoverTest
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ public ReplicatedLargeMessageFailoverTest()
+ {
+ super();
+ }
+
+ public ReplicatedLargeMessageFailoverTest(String name)
+ {
+ super(name);
+ }
+
+ // Public --------------------------------------------------------
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+ @Override
+ protected void createConfigs() throws Exception
+ {
+ createReplicatedConfigs();
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedNettyAsynchronousFailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedNettyAsynchronousFailoverTest.java (rev 0)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedNettyAsynchronousFailoverTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+import org.hornetq.core.config.Configuration;
+
+/**
+ * A ReplicatedNettyAsynchronousFailoverTest
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicatedNettyAsynchronousFailoverTest extends NettyAsynchronousFailoverTest
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ @Override
+ protected void createConfigs() throws Exception
+ {
+ Configuration config1 = super.createDefaultConfig();
+ config1.setBindingsDirectory(config1.getBindingsDirectory() + "_backup");
+ config1.setJournalDirectory(config1.getJournalDirectory() + "_backup");
+ config1.getAcceptorConfigurations().clear();
+ config1.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(false));
+ config1.setSecurityEnabled(false);
+ config1.setSharedStore(false);
+ config1.setBackup(true);
+ server1Service = super.createServer(true, config1);
+
+ Configuration config0 = super.createDefaultConfig();
+ config0.getAcceptorConfigurations().clear();
+ config0.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(true));
+
+ config0.getConnectorConfigurations().put("toBackup", getConnectorTransportConfiguration(false));
+ config0.setBackupConnectorName("toBackup");
+ config0.setSecurityEnabled(false);
+ config0.setSharedStore(false);
+ server0Service = super.createServer(true, config0);
+
+ server1Service.start();
+ server0Service.start();
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Added: trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedPagingFailoverTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedPagingFailoverTest.java (rev 0)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/failover/ReplicatedPagingFailoverTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.cluster.failover;
+
+import org.hornetq.core.config.Configuration;
+
+/**
+ * A ReplicatedPagingFailoverTest
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicatedPagingFailoverTest extends PagingFailoverTest
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+
+ @Override
+ protected void createConfigs() throws Exception
+ {
+ Configuration config1 = super.createDefaultConfig();
+ config1.setBindingsDirectory(config1.getBindingsDirectory() + "_backup");
+ config1.setJournalDirectory(config1.getJournalDirectory() + "_backup");
+ config1.setPagingDirectory(config1.getPagingDirectory() + "_backup");
+ config1.getAcceptorConfigurations().clear();
+ config1.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(false));
+ config1.setSecurityEnabled(false);
+ config1.setSharedStore(false);
+ config1.setBackup(true);
+ server1Service = super.createServer(true, config1);
+
+ Configuration config0 = super.createDefaultConfig();
+ config0.getAcceptorConfigurations().clear();
+ config0.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(true));
+
+ config0.getConnectorConfigurations().put("toBackup", getConnectorTransportConfiguration(false));
+ config0.setBackupConnectorName("toBackup");
+ config0.setSecurityEnabled(false);
+ config0.setSharedStore(false);
+ server0Service = super.createServer(true, config0);
+
+ server1Service.start();
+ server0Service.start();
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Modified: trunk/tests/src/org/hornetq/tests/integration/largemessage/mock/MockConnectorFactory.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/largemessage/mock/MockConnectorFactory.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/integration/largemessage/mock/MockConnectorFactory.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -14,6 +14,7 @@
package org.hornetq.tests.integration.largemessage.mock;
import java.util.HashSet;
+import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executor;
Modified: trunk/tests/src/org/hornetq/tests/integration/paging/PageCrashTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/paging/PageCrashTest.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/integration/paging/PageCrashTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -310,7 +310,7 @@
}
@Override
- protected Page createPage(final int page) throws Exception
+ public Page createPage(final int page) throws Exception
{
Page originalPage = super.createPage(page);
Added: trunk/tests/src/org/hornetq/tests/integration/replication/ReplicationTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/replication/ReplicationTest.java (rev 0)
+++ trunk/tests/src/org/hornetq/tests/integration/replication/ReplicationTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -0,0 +1,681 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.replication;
+
+import static org.hornetq.tests.util.RandomUtil.randomString;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import org.hornetq.core.buffers.ChannelBuffers;
+import org.hornetq.core.client.ClientSessionFactory;
+import org.hornetq.core.client.impl.ClientSessionFactoryImpl;
+import org.hornetq.core.client.impl.FailoverManager;
+import org.hornetq.core.client.impl.FailoverManagerImpl;
+import org.hornetq.core.config.Configuration;
+import org.hornetq.core.config.TransportConfiguration;
+import org.hornetq.core.exception.HornetQException;
+import org.hornetq.core.journal.EncodingSupport;
+import org.hornetq.core.journal.Journal;
+import org.hornetq.core.journal.LoaderCallback;
+import org.hornetq.core.journal.PreparedTransactionInfo;
+import org.hornetq.core.journal.RecordInfo;
+import org.hornetq.core.journal.TransactionFailureCallback;
+import org.hornetq.core.paging.PagedMessage;
+import org.hornetq.core.paging.PagingManager;
+import org.hornetq.core.paging.PagingStore;
+import org.hornetq.core.paging.impl.PagedMessageImpl;
+import org.hornetq.core.paging.impl.PagingManagerImpl;
+import org.hornetq.core.paging.impl.PagingStoreFactoryNIO;
+import org.hornetq.core.persistence.StorageManager;
+import org.hornetq.core.remoting.impl.invm.InVMConnectorFactory;
+import org.hornetq.core.remoting.spi.HornetQBuffer;
+import org.hornetq.core.replication.impl.ReplicatedJournal;
+import org.hornetq.core.replication.impl.ReplicationManagerImpl;
+import org.hornetq.core.server.HornetQServer;
+import org.hornetq.core.server.ServerMessage;
+import org.hornetq.core.server.impl.HornetQServerImpl;
+import org.hornetq.core.server.impl.ServerMessageImpl;
+import org.hornetq.core.settings.HierarchicalRepository;
+import org.hornetq.core.settings.impl.AddressSettings;
+import org.hornetq.tests.util.ServiceTestBase;
+import org.hornetq.utils.ExecutorFactory;
+import org.hornetq.utils.HornetQThreadFactory;
+import org.hornetq.utils.SimpleString;
+
+/**
+ * A ReplicationTest
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class ReplicationTest extends ServiceTestBase
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ private ThreadFactory tFactory;
+
+ private ExecutorService executor;
+
+ private FailoverManager connectionManager;
+
+ private ScheduledExecutorService scheduledExecutor;
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ public void testBasicConnection() throws Exception
+ {
+
+ Configuration config = createDefaultConfig(false);
+
+ config.setBackup(true);
+
+ HornetQServer server = new HornetQServerImpl(config);
+
+ server.start();
+
+ try
+ {
+ ReplicationManagerImpl manager = new ReplicationManagerImpl(connectionManager, executor);
+ manager.start();
+ manager.stop();
+ }
+ finally
+ {
+ server.stop();
+ }
+ }
+
+ public void testConnectIntoNonBackup() throws Exception
+ {
+
+ Configuration config = createDefaultConfig(false);
+
+ config.setBackup(false);
+
+ HornetQServer server = new HornetQServerImpl(config);
+
+ server.start();
+
+ try
+ {
+ ReplicationManagerImpl manager = new ReplicationManagerImpl(connectionManager, executor);
+ try
+ {
+ manager.start();
+ fail("Exception was expected");
+ }
+ catch (HornetQException expected)
+ {
+ }
+
+ manager.stop();
+ }
+ finally
+ {
+ server.stop();
+ }
+ }
+
+ public void testSendPackets() throws Exception
+ {
+
+ Configuration config = createDefaultConfig(false);
+
+ config.setBackup(true);
+
+ HornetQServer server = new HornetQServerImpl(config);
+
+ server.start();
+
+ try
+ {
+ ReplicationManagerImpl manager = new ReplicationManagerImpl(connectionManager, executor);
+ manager.start();
+
+ Journal replicatedJournal = new ReplicatedJournal((byte)1, new FakeJournal(), manager);
+
+ replicatedJournal.appendPrepareRecord(1, new FakeData(), false);
+
+ replicatedJournal.appendAddRecord(1, (byte)1, new FakeData(), false);
+ replicatedJournal.appendUpdateRecord(1, (byte)2, new FakeData(), false);
+ replicatedJournal.appendDeleteRecord(1, false);
+ replicatedJournal.appendAddRecordTransactional(2, 2, (byte)1, new FakeData());
+ replicatedJournal.appendUpdateRecordTransactional(2, 2, (byte)2, new FakeData());
+ replicatedJournal.appendCommitRecord(2, false);
+
+ replicatedJournal.appendDeleteRecordTransactional(3, 4, new FakeData());
+ replicatedJournal.appendPrepareRecord(3, new FakeData(), false);
+ replicatedJournal.appendRollbackRecord(3, false);
+
+ blockOnReplication(manager);
+
+ assertEquals(1, manager.getActiveTokens().size());
+
+ manager.closeContext();
+
+ for (int i = 0; i < 100; i++)
+ {
+ // This is asynchronous. Have to wait completion
+ if (manager.getActiveTokens().size() == 0)
+ {
+ break;
+ }
+ Thread.sleep(1);
+ }
+
+ assertEquals(0, manager.getActiveTokens().size());
+
+ ServerMessage msg = new ServerMessageImpl();
+
+ SimpleString dummy = new SimpleString("dummy");
+ msg.setDestination(dummy);
+ msg.setBody(ChannelBuffers.wrappedBuffer(new byte[10]));
+
+ replicatedJournal.appendAddRecordTransactional(23, 24, (byte)1, new FakeData());
+
+ PagedMessage pgmsg = new PagedMessageImpl(msg, -1);
+ manager.pageWrite(pgmsg, 1);
+ manager.pageWrite(pgmsg, 2);
+ manager.pageWrite(pgmsg, 3);
+ manager.pageWrite(pgmsg, 4);
+
+ blockOnReplication(manager);
+
+ PagingManager pagingManager = createPageManager(server.getStorageManager(),
+ server.getConfiguration(),
+ server.getExecutorFactory(),
+ server.getAddressSettingsRepository());
+
+ PagingStore store = pagingManager.getPageStore(dummy);
+ store.start();
+ assertEquals(5, store.getNumberOfPages());
+ store.stop();
+
+ manager.pageDeleted(dummy, 1);
+ manager.pageDeleted(dummy, 2);
+ manager.pageDeleted(dummy, 3);
+ manager.pageDeleted(dummy, 4);
+ manager.pageDeleted(dummy, 5);
+ manager.pageDeleted(dummy, 6);
+
+ blockOnReplication(manager);
+
+ ServerMessageImpl serverMsg = new ServerMessageImpl();
+ serverMsg.setMessageID(500);
+ serverMsg.setDestination(new SimpleString("tttt"));
+
+
+ HornetQBuffer buffer = ChannelBuffers.dynamicBuffer(100);
+ serverMsg.encodeProperties(buffer);
+
+ manager.largeMessageBegin(500);
+
+ manager.largeMessageWrite(500, new byte[1024]);
+
+ manager.largeMessageEnd(500);
+
+ blockOnReplication(manager);
+
+ store.start();
+
+ assertEquals(0, store.getNumberOfPages());
+
+ manager.stop();
+ }
+ finally
+ {
+ server.stop();
+ }
+ }
+
+
+ public void testSendPacketsWithFailure() throws Exception
+ {
+
+ Configuration config = createDefaultConfig(false);
+
+ config.setBackup(true);
+
+ HornetQServer server = new HornetQServerImpl(config);
+
+ server.start();
+
+ try
+ {
+ ReplicationManagerImpl manager = new ReplicationManagerImpl(connectionManager, executor);
+ manager.start();
+
+ Journal replicatedJournal = new ReplicatedJournal((byte)1, new FakeJournal(), manager);
+
+ for (int i = 0 ; i < 500; i++)
+ {
+ replicatedJournal.appendAddRecord(i, (byte)1, new FakeData(), false);
+ }
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ manager.afterReplicated(new Runnable()
+ {
+ public void run()
+ {
+ latch.countDown();
+ }
+ });
+
+ manager.closeContext();
+
+ assertTrue(latch.await(10, TimeUnit.SECONDS));
+ }
+ finally
+ {
+ server.stop();
+ }
+ }
+
+ /**
+ * @param manager
+ * @return
+ */
+ private void blockOnReplication(ReplicationManagerImpl manager) throws Exception
+ {
+ final CountDownLatch latch = new CountDownLatch(1);
+ manager.afterReplicated(new Runnable()
+ {
+
+ public void run()
+ {
+ latch.countDown();
+ }
+
+ });
+
+ assertTrue(latch.await(30, TimeUnit.SECONDS));
+ }
+
+ public void testNoActions() throws Exception
+ {
+
+ Configuration config = createDefaultConfig(false);
+
+ config.setBackup(true);
+
+ HornetQServer server = new HornetQServerImpl(config);
+
+ server.start();
+
+ try
+ {
+ ReplicationManagerImpl manager = new ReplicationManagerImpl(connectionManager, executor);
+ manager.start();
+
+ Journal replicatedJournal = new ReplicatedJournal((byte)1, new FakeJournal(), manager);
+
+ replicatedJournal.appendPrepareRecord(1, new FakeData(), false);
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ manager.afterReplicated(new Runnable()
+ {
+
+ public void run()
+ {
+ latch.countDown();
+ }
+
+ });
+ assertTrue(latch.await(1, TimeUnit.SECONDS));
+ assertEquals(1, manager.getActiveTokens().size());
+
+ manager.closeContext();
+
+ for (int i = 0; i < 100; i++)
+ {
+ // This is asynchronous. Have to wait completion
+ if (manager.getActiveTokens().size() == 0)
+ {
+ break;
+ }
+ Thread.sleep(1);
+ }
+
+ assertEquals(0, manager.getActiveTokens().size());
+ manager.stop();
+ }
+ finally
+ {
+ server.stop();
+ }
+ }
+
+ class FakeData implements EncodingSupport
+ {
+
+ public void decode(HornetQBuffer buffer)
+ {
+ }
+
+ public void encode(HornetQBuffer buffer)
+ {
+ buffer.writeBytes(new byte[5]);
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.EncodingSupport#getEncodeSize()
+ */
+ public int getEncodeSize()
+ {
+ return 5;
+ }
+
+ }
+
+ // Package protected ---------------------------------------------
+ /*class LocalRemotingServiceImpl extends RemotingServiceImpl
+ {
+
+ public LocalRemotingServiceImpl(final Configuration config,
+ final HornetQServer server,
+ final ManagementService managementService,
+ final Executor threadPool,
+ final ScheduledExecutorService scheduledThreadPool)
+ {
+ super(config, server, managementService, threadPool, scheduledThreadPool);
+ }
+
+ protected ChannelHandler createHandler(RemotingConnection conn, Channel channel)
+ {
+ return super.createHandler(conn, channel);
+ }
+
+ }*/
+
+ // Protected -----------------------------------------------------
+
+ protected void setUp() throws Exception
+ {
+ super.setUp();
+
+ tFactory = new HornetQThreadFactory("HornetQ-ReplicationTest", false);
+
+ executor = Executors.newCachedThreadPool(tFactory);
+
+ scheduledExecutor = new ScheduledThreadPoolExecutor(10, tFactory);
+
+ TransportConfiguration connectorConfig = new TransportConfiguration(InVMConnectorFactory.class.getName(),
+ new HashMap<String, Object>(),
+ randomString());
+
+ connectionManager = new FailoverManagerImpl((ClientSessionFactory)null,
+ connectorConfig,
+ null,
+ false,
+ ClientSessionFactoryImpl.DEFAULT_CALL_TIMEOUT,
+ ClientSessionFactoryImpl.DEFAULT_CLIENT_FAILURE_CHECK_PERIOD,
+ ClientSessionFactoryImpl.DEFAULT_CONNECTION_TTL,
+ 0,
+ 1.0d,
+ 0,
+ 1,
+ executor,
+ scheduledExecutor,
+ null);
+
+ }
+
+ protected void tearDown() throws Exception
+ {
+
+ executor.shutdown();
+
+ scheduledExecutor.shutdown();
+
+ tFactory = null;
+
+ connectionManager = null;
+
+ scheduledExecutor = null;
+
+ super.tearDown();
+
+ }
+
+ protected PagingManager createPageManager(StorageManager storageManager,
+ Configuration configuration,
+ ExecutorFactory executorFactory,
+ HierarchicalRepository<AddressSettings> addressSettingsRepository) throws Exception
+ {
+
+ PagingManager paging = new PagingManagerImpl(new PagingStoreFactoryNIO(configuration.getPagingDirectory(),
+ executorFactory),
+ storageManager,
+ addressSettingsRepository,
+ false);
+
+ paging.start();
+ return paging;
+ }
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+ static class FakeJournal implements Journal
+ {
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendAddRecord(long, byte, byte[], boolean)
+ */
+ public void appendAddRecord(long id, byte recordType, byte[] record, boolean sync) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendAddRecord(long, byte, org.hornetq.core.journal.EncodingSupport, boolean)
+ */
+ public void appendAddRecord(long id, byte recordType, EncodingSupport record, boolean sync) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendAddRecordTransactional(long, long, byte, byte[])
+ */
+ public void appendAddRecordTransactional(long txID, long id, byte recordType, byte[] record) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendAddRecordTransactional(long, long, byte, org.hornetq.core.journal.EncodingSupport)
+ */
+ public void appendAddRecordTransactional(long txID, long id, byte recordType, EncodingSupport record) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendCommitRecord(long, boolean)
+ */
+ public void appendCommitRecord(long txID, boolean sync) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendDeleteRecord(long, boolean)
+ */
+ public void appendDeleteRecord(long id, boolean sync) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendDeleteRecordTransactional(long, long, byte[])
+ */
+ public void appendDeleteRecordTransactional(long txID, long id, byte[] record) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendDeleteRecordTransactional(long, long, org.hornetq.core.journal.EncodingSupport)
+ */
+ public void appendDeleteRecordTransactional(long txID, long id, EncodingSupport record) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendDeleteRecordTransactional(long, long)
+ */
+ public void appendDeleteRecordTransactional(long txID, long id) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendPrepareRecord(long, org.hornetq.core.journal.EncodingSupport, boolean)
+ */
+ public void appendPrepareRecord(long txID, EncodingSupport transactionData, boolean sync) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendPrepareRecord(long, byte[], boolean)
+ */
+ public void appendPrepareRecord(long txID, byte[] transactionData, boolean sync) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendRollbackRecord(long, boolean)
+ */
+ public void appendRollbackRecord(long txID, boolean sync) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendUpdateRecord(long, byte, byte[], boolean)
+ */
+ public void appendUpdateRecord(long id, byte recordType, byte[] record, boolean sync) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendUpdateRecord(long, byte, org.hornetq.core.journal.EncodingSupport, boolean)
+ */
+ public void appendUpdateRecord(long id, byte recordType, EncodingSupport record, boolean sync) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendUpdateRecordTransactional(long, long, byte, byte[])
+ */
+ public void appendUpdateRecordTransactional(long txID, long id, byte recordType, byte[] record) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#appendUpdateRecordTransactional(long, long, byte, org.hornetq.core.journal.EncodingSupport)
+ */
+ public void appendUpdateRecordTransactional(long txID, long id, byte recordType, EncodingSupport record) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#getAlignment()
+ */
+ public int getAlignment() throws Exception
+ {
+
+ return 0;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#load(org.hornetq.core.journal.LoaderCallback)
+ */
+ public long load(LoaderCallback reloadManager) throws Exception
+ {
+
+ return 0;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#load(java.util.List, java.util.List, org.hornetq.core.journal.TransactionFailureCallback)
+ */
+ public long load(List<RecordInfo> committedRecords,
+ List<PreparedTransactionInfo> preparedTransactions,
+ TransactionFailureCallback transactionFailure) throws Exception
+ {
+
+ return 0;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.journal.Journal#perfBlast(int)
+ */
+ public void perfBlast(int pages) throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.server.HornetQComponent#isStarted()
+ */
+ public boolean isStarted()
+ {
+
+ return false;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.server.HornetQComponent#start()
+ */
+ public void start() throws Exception
+ {
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.server.HornetQComponent#stop()
+ */
+ public void stop() throws Exception
+ {
+
+ }
+
+ }
+}
Modified: trunk/tests/src/org/hornetq/tests/unit/core/paging/impl/PageImplTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/unit/core/paging/impl/PageImplTest.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/unit/core/paging/impl/PageImplTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -24,6 +24,7 @@
import org.hornetq.core.paging.PagedMessage;
import org.hornetq.core.paging.impl.PageImpl;
import org.hornetq.core.paging.impl.PagedMessageImpl;
+import org.hornetq.core.persistence.impl.nullpm.NullStorageManager;
import org.hornetq.core.remoting.spi.HornetQBuffer;
import org.hornetq.core.server.ServerMessage;
import org.hornetq.core.server.impl.ServerMessageImpl;
@@ -77,7 +78,7 @@
SequentialFile file = factory.createSequentialFile("00010.page", 1);
- PageImpl impl = new PageImpl(factory, file, 10);
+ PageImpl impl = new PageImpl(new SimpleString("something"), new NullStorageManager(), factory, file, 10);
assertEquals(10, impl.getPageId());
@@ -94,7 +95,7 @@
file = factory.createSequentialFile("00010.page", 1);
file.open();
- impl = new PageImpl(factory, file, 10);
+ impl = new PageImpl(new SimpleString("something"), new NullStorageManager(), factory, file, 10);
List<PagedMessage> msgs = impl.read();
@@ -124,7 +125,7 @@
SequentialFile file = factory.createSequentialFile("00010.page", 1);
- PageImpl impl = new PageImpl(factory, file, 10);
+ PageImpl impl = new PageImpl(new SimpleString("something"), new NullStorageManager(), factory, file, 10);
assertEquals(10, impl.getPageId());
@@ -167,7 +168,7 @@
file = factory.createSequentialFile("00010.page", 1);
file.open();
- impl = new PageImpl(factory, file, 10);
+ impl = new PageImpl(new SimpleString("something"), new NullStorageManager(), factory, file, 10);
List<PagedMessage> msgs = impl.read();
Modified: trunk/tests/src/org/hornetq/tests/unit/core/paging/impl/PagingStoreImplTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/unit/core/paging/impl/PagingStoreImplTest.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/unit/core/paging/impl/PagingStoreImplTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -1105,6 +1105,78 @@
{
}
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#afterReplicated(java.lang.Runnable)
+ */
+ public void afterReplicated(Runnable run)
+ {
+
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#completeReplication()
+ */
+ public void completeReplication()
+ {
+
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#createLargeMessage(byte[])
+ */
+ public LargeServerMessage createLargeMessage(long messageId, byte[] header)
+ {
+
+ return null;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#isReplicated()
+ */
+ public boolean isReplicated()
+ {
+
+ return false;
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#loadInternalOnly()
+ */
+ public void loadInternalOnly() throws Exception
+ {
+
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#pageClosed(org.hornetq.utils.SimpleString, int)
+ */
+ public void pageClosed(SimpleString storeName, int pageNumber)
+ {
+
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#pageDeleted(org.hornetq.utils.SimpleString, int)
+ */
+ public void pageDeleted(SimpleString storeName, int pageNumber)
+ {
+
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.hornetq.core.persistence.StorageManager#pageWrite(org.hornetq.core.paging.PagedMessage, int)
+ */
+ public void pageWrite(PagedMessage message, int pageNumber)
+ {
+
+
+ }
+
}
class FakeStoreFactory implements PagingStoreFactory
Modified: trunk/tests/src/org/hornetq/tests/unit/core/persistence/impl/BatchIDGeneratorUnitTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/unit/core/persistence/impl/BatchIDGeneratorUnitTest.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/unit/core/persistence/impl/BatchIDGeneratorUnitTest.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -23,6 +23,7 @@
import org.hornetq.core.journal.impl.JournalImpl;
import org.hornetq.core.journal.impl.NIOSequentialFileFactory;
import org.hornetq.core.persistence.impl.journal.BatchingIDGenerator;
+import org.hornetq.core.persistence.impl.journal.JournalStorageManager;
import org.hornetq.core.remoting.spi.HornetQBuffer;
import org.hornetq.tests.util.UnitTestCase;
@@ -142,7 +143,7 @@
for (RecordInfo record : records)
{
- if (record.userRecordType == BatchingIDGenerator.ID_COUNTER_RECORD)
+ if (record.userRecordType == JournalStorageManager.ID_COUNTER_RECORD)
{
HornetQBuffer buffer = ChannelBuffers.wrappedBuffer(record.data);
batch.loadState(record.id, buffer);
Modified: trunk/tests/src/org/hornetq/tests/util/ServiceTestBase.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/util/ServiceTestBase.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/util/ServiceTestBase.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -67,6 +67,16 @@
// Constructors --------------------------------------------------
+ public ServiceTestBase()
+ {
+ super();
+ }
+
+ public ServiceTestBase(String name)
+ {
+ super(name);
+ }
+
// Public --------------------------------------------------------
// Package protected ---------------------------------------------
Modified: trunk/tests/src/org/hornetq/tests/util/UnitTestCase.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/util/UnitTestCase.java 2009-10-15 16:17:52 UTC (rev 8115)
+++ trunk/tests/src/org/hornetq/tests/util/UnitTestCase.java 2009-10-15 16:41:41 UTC (rev 8116)
@@ -96,6 +96,19 @@
// Static --------------------------------------------------------
+ /**
+ * @param name
+ */
+ public UnitTestCase(String name)
+ {
+ super(name);
+ }
+
+ public UnitTestCase()
+ {
+ super();
+ }
+
public static void forceGC()
{
WeakReference<Object> dumbReference = new WeakReference<Object>(new Object());
14 years, 7 months