JBoss hornetq SVN: r8576 - tags.
by do-not-reply@jboss.org
Author: ataylor
Date: 2009-12-05 03:53:20 -0500 (Sat, 05 Dec 2009)
New Revision: 8576
Added:
tags/hornetq_2_0_0_CR1_pending/
Log:
created pending tag for HornetQ 2.0.0.CR1
Copied: tags/hornetq_2_0_0_CR1_pending (from rev 8575, trunk)
15 years, 1 month
JBoss hornetq SVN: r8575 - trunk/src/main/org/hornetq/core/remoting/impl.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2009-12-05 01:32:26 -0500 (Sat, 05 Dec 2009)
New Revision: 8575
Modified:
trunk/src/main/org/hornetq/core/remoting/impl/ChannelImpl.java
Log:
Reverting last commit on ordering & failover since it didn't fix the issue
Modified: trunk/src/main/org/hornetq/core/remoting/impl/ChannelImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/ChannelImpl.java 2009-12-05 04:41:10 UTC (rev 8574)
+++ trunk/src/main/org/hornetq/core/remoting/impl/ChannelImpl.java 2009-12-05 06:32:26 UTC (rev 8575)
@@ -68,8 +68,6 @@
private final Object sendBlockingLock = new Object();
- private final Object replayLock = new Object();
-
private boolean failingOver;
private final int confWindowSize;
@@ -110,7 +108,7 @@
{
return lock;
}
-
+
public int getConfirmationWindowSize()
{
return confWindowSize;
@@ -147,7 +145,7 @@
// This must never called by more than one thread concurrently
public void send(final Packet packet, final boolean flush)
- {
+ {
synchronized (sendLock)
{
packet.setChannelID(id);
@@ -331,20 +329,15 @@
public void replayCommands(final int otherLastReceivedCommandID, final long newChannelID)
{
- // need to make sure we won't clear any packets while replaying or we could
- // break order eventually
- synchronized (replayLock)
+ if (resendCache != null)
{
- if (resendCache != null)
+ clearUpTo(otherLastReceivedCommandID);
+
+ for (final Packet packet : resendCache)
{
- clearUpTo(otherLastReceivedCommandID);
-
- for (final Packet packet : resendCache)
- {
- packet.setChannelID(newChannelID);
-
- doWrite(packet);
- }
+ packet.setChannelID(newChannelID);
+
+ doWrite(packet);
}
}
}
@@ -394,7 +387,7 @@
{
lastReceivedCommandID++;
- receivedBytes += packet.getPacketSize();
+ receivedBytes += packet.getPacketSize();
if (receivedBytes >= confWindowSize)
{
@@ -474,42 +467,39 @@
private void clearUpTo(final int lastReceivedCommandID)
{
- synchronized (replayLock)
+ final int numberToClear = 1 + lastReceivedCommandID - firstStoredCommandID;
+
+ if (numberToClear == -1)
{
- final int numberToClear = 1 + lastReceivedCommandID - firstStoredCommandID;
+ throw new IllegalArgumentException("Invalid lastReceivedCommandID: " + lastReceivedCommandID);
+ }
- if (numberToClear == -1)
- {
- throw new IllegalArgumentException("Invalid lastReceivedCommandID: " + lastReceivedCommandID);
- }
+ int sizeToFree = 0;
- int sizeToFree = 0;
+ for (int i = 0; i < numberToClear; i++)
+ {
+ final Packet packet = resendCache.poll();
- for (int i = 0; i < numberToClear; i++)
+ if (packet == null)
{
- final Packet packet = resendCache.poll();
+ log.warn("Can't find packet to clear: " + " last received command id " +
+ lastReceivedCommandID +
+ " first stored command id " +
+ firstStoredCommandID);
+ return;
+ }
- if (packet == null)
- {
- log.warn("Can't find packet to clear: " + " last received command id " +
- lastReceivedCommandID +
- " first stored command id " +
- firstStoredCommandID);
- return;
- }
-
- if (packet.getType() != PACKETS_CONFIRMED)
- {
- sizeToFree += packet.getPacketSize();
- }
-
- if (commandConfirmationHandler != null)
- {
- commandConfirmationHandler.commandConfirmed(packet);
- }
+ if (packet.getType() != PACKETS_CONFIRMED)
+ {
+ sizeToFree += packet.getPacketSize();
}
- firstStoredCommandID += numberToClear;
+ if (commandConfirmationHandler != null)
+ {
+ commandConfirmationHandler.commandConfirmed(packet);
+ }
}
+
+ firstStoredCommandID += numberToClear;
}
}
15 years, 1 month
JBoss hornetq SVN: r8574 - in trunk: tests/src/org/hornetq/tests/integration/cluster/reattach and 1 other directory.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2009-12-04 23:41:10 -0500 (Fri, 04 Dec 2009)
New Revision: 8574
Modified:
trunk/src/main/org/hornetq/core/remoting/impl/ChannelImpl.java
trunk/tests/src/org/hornetq/tests/integration/cluster/reattach/RandomReattachTest.java
Log:
Out of order issue during failover fix.
Modified: trunk/src/main/org/hornetq/core/remoting/impl/ChannelImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/remoting/impl/ChannelImpl.java 2009-12-05 02:06:10 UTC (rev 8573)
+++ trunk/src/main/org/hornetq/core/remoting/impl/ChannelImpl.java 2009-12-05 04:41:10 UTC (rev 8574)
@@ -68,6 +68,8 @@
private final Object sendBlockingLock = new Object();
+ private final Object replayLock = new Object();
+
private boolean failingOver;
private final int confWindowSize;
@@ -108,7 +110,7 @@
{
return lock;
}
-
+
public int getConfirmationWindowSize()
{
return confWindowSize;
@@ -145,7 +147,7 @@
// This must never called by more than one thread concurrently
public void send(final Packet packet, final boolean flush)
- {
+ {
synchronized (sendLock)
{
packet.setChannelID(id);
@@ -329,15 +331,20 @@
public void replayCommands(final int otherLastReceivedCommandID, final long newChannelID)
{
- if (resendCache != null)
+ // need to make sure we won't clear any packets while replaying or we could
+ // break order eventually
+ synchronized (replayLock)
{
- clearUpTo(otherLastReceivedCommandID);
-
- for (final Packet packet : resendCache)
+ if (resendCache != null)
{
- packet.setChannelID(newChannelID);
-
- doWrite(packet);
+ clearUpTo(otherLastReceivedCommandID);
+
+ for (final Packet packet : resendCache)
+ {
+ packet.setChannelID(newChannelID);
+
+ doWrite(packet);
+ }
}
}
}
@@ -387,7 +394,7 @@
{
lastReceivedCommandID++;
- receivedBytes += packet.getPacketSize();
+ receivedBytes += packet.getPacketSize();
if (receivedBytes >= confWindowSize)
{
@@ -467,39 +474,42 @@
private void clearUpTo(final int lastReceivedCommandID)
{
- final int numberToClear = 1 + lastReceivedCommandID - firstStoredCommandID;
-
- if (numberToClear == -1)
+ synchronized (replayLock)
{
- throw new IllegalArgumentException("Invalid lastReceivedCommandID: " + lastReceivedCommandID);
- }
+ final int numberToClear = 1 + lastReceivedCommandID - firstStoredCommandID;
- int sizeToFree = 0;
+ if (numberToClear == -1)
+ {
+ throw new IllegalArgumentException("Invalid lastReceivedCommandID: " + lastReceivedCommandID);
+ }
- for (int i = 0; i < numberToClear; i++)
- {
- final Packet packet = resendCache.poll();
+ int sizeToFree = 0;
- if (packet == null)
+ for (int i = 0; i < numberToClear; i++)
{
- log.warn("Can't find packet to clear: " + " last received command id " +
- lastReceivedCommandID +
- " first stored command id " +
- firstStoredCommandID);
- return;
- }
+ final Packet packet = resendCache.poll();
- if (packet.getType() != PACKETS_CONFIRMED)
- {
- sizeToFree += packet.getPacketSize();
+ if (packet == null)
+ {
+ log.warn("Can't find packet to clear: " + " last received command id " +
+ lastReceivedCommandID +
+ " first stored command id " +
+ firstStoredCommandID);
+ return;
+ }
+
+ if (packet.getType() != PACKETS_CONFIRMED)
+ {
+ sizeToFree += packet.getPacketSize();
+ }
+
+ if (commandConfirmationHandler != null)
+ {
+ commandConfirmationHandler.commandConfirmed(packet);
+ }
}
- if (commandConfirmationHandler != null)
- {
- commandConfirmationHandler.commandConfirmed(packet);
- }
+ firstStoredCommandID += numberToClear;
}
-
- firstStoredCommandID += numberToClear;
}
}
Modified: trunk/tests/src/org/hornetq/tests/integration/cluster/reattach/RandomReattachTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/cluster/reattach/RandomReattachTest.java 2009-12-05 02:06:10 UTC (rev 8573)
+++ trunk/tests/src/org/hornetq/tests/integration/cluster/reattach/RandomReattachTest.java 2009-12-05 04:41:10 UTC (rev 8574)
@@ -216,6 +216,7 @@
for (int its = 0; its < numIts; its++)
{
+ log.info("####" + this.getName() + " iteration #" + its);
start();
ClientSessionFactoryImpl sf = new ClientSessionFactoryImpl(new TransportConfiguration("org.hornetq.core.remoting.impl.invm.InVMConnectorFactory"));
15 years, 1 month
JBoss hornetq SVN: r8573 - trunk/tests/src/org/hornetq/tests/integration/journal.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2009-12-04 21:06:10 -0500 (Fri, 04 Dec 2009)
New Revision: 8573
Modified:
trunk/tests/src/org/hornetq/tests/integration/journal/NIOJournalCompactTest.java
Log:
A few tweaks on persistence
Modified: trunk/tests/src/org/hornetq/tests/integration/journal/NIOJournalCompactTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/journal/NIOJournalCompactTest.java 2009-12-05 02:05:16 UTC (rev 8572)
+++ trunk/tests/src/org/hornetq/tests/integration/journal/NIOJournalCompactTest.java 2009-12-05 02:06:10 UTC (rev 8573)
@@ -204,7 +204,7 @@
performNonTransactionalDelete = false;
}
- setup(50, 60 * 1024, true);
+ setup(50, 60 * 1024, false);
ArrayList<Long> liveIDs = new ArrayList<Long>();
15 years, 1 month
JBoss hornetq SVN: r8572 - in trunk: src/main/org/hornetq/core/journal/impl and 2 other directories.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2009-12-04 21:05:16 -0500 (Fri, 04 Dec 2009)
New Revision: 8572
Added:
trunk/tests/src/org/hornetq/tests/integration/journal/NIOBufferedJournalCompactTest.java
Modified:
trunk/src/main/org/hornetq/core/journal/SequentialFile.java
trunk/src/main/org/hornetq/core/journal/impl/AIOSequentialFile.java
trunk/src/main/org/hornetq/core/journal/impl/AbstractJournalUpdateTask.java
trunk/src/main/org/hornetq/core/journal/impl/JournalImpl.java
trunk/src/main/org/hornetq/core/journal/impl/NIOSequentialFile.java
trunk/tests/src/org/hornetq/tests/integration/journal/NIOJournalCompactTest.java
trunk/tests/src/org/hornetq/tests/unit/core/journal/impl/fakes/FakeSequentialFileFactory.java
Log:
A few tweaks on persistence
Modified: trunk/src/main/org/hornetq/core/journal/SequentialFile.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/SequentialFile.java 2009-12-04 20:54:28 UTC (rev 8571)
+++ trunk/src/main/org/hornetq/core/journal/SequentialFile.java 2009-12-05 02:05:16 UTC (rev 8572)
@@ -36,13 +36,13 @@
boolean isOpen();
boolean exists();
-
+
/**
* The maximum number of simultaneous writes accepted
* @param maxIO
* @throws Exception
*/
- void open(int maxIO) throws Exception;
+ void open(int maxIO, boolean useExecutor) throws Exception;
boolean fits(int size);
Modified: trunk/src/main/org/hornetq/core/journal/impl/AIOSequentialFile.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/impl/AIOSequentialFile.java 2009-12-04 20:54:28 UTC (rev 8571)
+++ trunk/src/main/org/hornetq/core/journal/impl/AIOSequentialFile.java 2009-12-05 02:05:16 UTC (rev 8572)
@@ -178,14 +178,14 @@
public void open() throws Exception
{
- open(maxIO);
+ open(maxIO, true);
}
- public synchronized void open(final int maxIO) throws Exception
+ public synchronized void open(final int maxIO, final boolean useExecutor) throws Exception
{
opened = true;
- aioFile = new AsynchronousFileImpl(writerExecutor, pollerExecutor);
+ aioFile = new AsynchronousFileImpl(useExecutor ? writerExecutor : null, pollerExecutor);
aioFile.open(getFile().getAbsolutePath(), maxIO);
Modified: trunk/src/main/org/hornetq/core/journal/impl/AbstractJournalUpdateTask.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/impl/AbstractJournalUpdateTask.java 2009-12-04 20:54:28 UTC (rev 8571)
+++ trunk/src/main/org/hornetq/core/journal/impl/AbstractJournalUpdateTask.java 2009-12-05 02:05:16 UTC (rev 8572)
@@ -98,7 +98,7 @@
try
{
- controlFile.open(1);
+ controlFile.open(1, false);
HornetQBuffer renameBuffer = HornetQBuffers.dynamicBuffer(1);
@@ -219,7 +219,7 @@
currentFile = journal.getFile(false, false, false, true);
sequentialFile = currentFile.getFile();
- sequentialFile.open(1);
+ sequentialFile.open(1, false);
fileID = nextOrderingID++;
currentFile = new JournalFileImpl(sequentialFile, fileID);
Modified: trunk/src/main/org/hornetq/core/journal/impl/JournalImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/impl/JournalImpl.java 2009-12-04 20:54:28 UTC (rev 8571)
+++ trunk/src/main/org/hornetq/core/journal/impl/JournalImpl.java 2009-12-05 02:05:16 UTC (rev 8572)
@@ -369,7 +369,7 @@
final JournalFile file,
final JournalReaderCallback reader) throws Exception
{
- file.getFile().open(1);
+ file.getFile().open(1, false);
ByteBuffer wholeFileBuffer = null;
try
{
@@ -2616,7 +2616,7 @@
SequentialFile sf = file.getFile();
- sf.open(1);
+ sf.open(1, false);
sf.position(0);
@@ -2725,7 +2725,7 @@
{
SequentialFile file = fileFactory.createSequentialFile(fileName, maxAIO);
- file.open(1);
+ file.open(1, false);
ByteBuffer bb = fileFactory.newBuffer(SIZE_HEADER);
@@ -2912,7 +2912,7 @@
}
else
{
- sequentialFile.open(1);
+ sequentialFile.open(1, false);
}
if (fill)
@@ -2946,7 +2946,7 @@
}
else
{
- file.getFile().open(1);
+ file.getFile().open(1, false);
}
file.getFile().position(file.getFile().calculateBlockStart(SIZE_HEADER));
Modified: trunk/src/main/org/hornetq/core/journal/impl/NIOSequentialFile.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/impl/NIOSequentialFile.java 2009-12-04 20:54:28 UTC (rev 8571)
+++ trunk/src/main/org/hornetq/core/journal/impl/NIOSequentialFile.java 2009-12-05 02:05:16 UTC (rev 8572)
@@ -89,10 +89,10 @@
* Some operations while initializing files on the journal may require a different maxIO */
public synchronized void open() throws Exception
{
- open(defaultMaxIO);
+ open(defaultMaxIO, true);
}
- public void open(final int maxIO) throws Exception
+ public void open(final int maxIO, final boolean useExecutor) throws Exception
{
rfile = new RandomAccessFile(getFile(), "rw");
@@ -100,7 +100,7 @@
fileSize = channel.size();
- if (writerExecutor != null)
+ if (writerExecutor != null && useExecutor)
{
maxIOSemaphore = new Semaphore(maxIO);
this.maxIO = maxIO;
@@ -286,7 +286,7 @@
return;
}
- if (writerExecutor == null)
+ if (maxIOSemaphore == null)
{
doInternalWrite(bytes, sync, callback);
}
Added: trunk/tests/src/org/hornetq/tests/integration/journal/NIOBufferedJournalCompactTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/journal/NIOBufferedJournalCompactTest.java (rev 0)
+++ trunk/tests/src/org/hornetq/tests/integration/journal/NIOBufferedJournalCompactTest.java 2009-12-05 02:05:16 UTC (rev 8572)
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ * Red Hat licenses this file to you under the Apache License, version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.hornetq.tests.integration.journal;
+
+import java.io.File;
+
+import org.hornetq.core.config.impl.ConfigurationImpl;
+import org.hornetq.core.journal.SequentialFileFactory;
+import org.hornetq.core.journal.impl.AIOSequentialFileFactory;
+import org.hornetq.core.journal.impl.NIOSequentialFileFactory;
+
+/**
+ * A NIOBufferedJournalCompactTest
+ *
+ * @author <mailto:clebert.suconic@jboss.org">Clebert Suconic</a>
+ *
+ *
+ */
+public class NIOBufferedJournalCompactTest extends NIOJournalCompactTest
+{
+
+ // Constants -----------------------------------------------------
+
+ // Attributes ----------------------------------------------------
+
+ // Static --------------------------------------------------------
+
+ // Constructors --------------------------------------------------
+
+ // Public --------------------------------------------------------
+
+ // Package protected ---------------------------------------------
+
+ // Protected -----------------------------------------------------
+ @Override
+ protected SequentialFileFactory getFileFactory() throws Exception
+ {
+ File file = new File(getTestDir());
+
+ deleteDirectory(file);
+
+ file.mkdir();
+
+ return new NIOSequentialFileFactory(getTestDir(), true);
+ }
+
+
+ // Private -------------------------------------------------------
+
+ // Inner classes -------------------------------------------------
+
+}
Modified: trunk/tests/src/org/hornetq/tests/integration/journal/NIOJournalCompactTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/integration/journal/NIOJournalCompactTest.java 2009-12-04 20:54:28 UTC (rev 8571)
+++ trunk/tests/src/org/hornetq/tests/integration/journal/NIOJournalCompactTest.java 2009-12-05 02:05:16 UTC (rev 8572)
@@ -507,7 +507,7 @@
public void testSimpleCompacting() throws Exception
{
- setup(2, 60 * 1024, true);
+ setup(2, 60 * 1024, false);
createJournal();
startJournal();
@@ -756,7 +756,7 @@
protected void setUp() throws Exception
{
super.setUp();
-
+
File file = new File(getTestDir());
deleteDirectory(file);
Modified: trunk/tests/src/org/hornetq/tests/unit/core/journal/impl/fakes/FakeSequentialFileFactory.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/unit/core/journal/impl/fakes/FakeSequentialFileFactory.java 2009-12-04 20:54:28 UTC (rev 8571)
+++ trunk/tests/src/org/hornetq/tests/unit/core/journal/impl/fakes/FakeSequentialFileFactory.java 2009-12-05 02:05:16 UTC (rev 8572)
@@ -363,10 +363,10 @@
public void open() throws Exception
{
- open(0);
+ open(1, true);
}
- public synchronized void open(final int currentMaxIO) throws Exception
+ public synchronized void open(final int currentMaxIO, final boolean useExecutor) throws Exception
{
open = true;
checkAndResize(0);
15 years, 1 month
JBoss hornetq SVN: r8571 - trunk/docs/user-manual/en.
by do-not-reply@jboss.org
Author: jmesnil
Date: 2009-12-04 15:54:28 -0500 (Fri, 04 Dec 2009)
New Revision: 8571
Modified:
trunk/docs/user-manual/en/connection-ttl.xml
trunk/docs/user-manual/en/ha.xml
Log:
documentation update
* typo + fixed title case
Modified: trunk/docs/user-manual/en/connection-ttl.xml
===================================================================
--- trunk/docs/user-manual/en/connection-ttl.xml 2009-12-04 20:34:45 UTC (rev 8570)
+++ trunk/docs/user-manual/en/connection-ttl.xml 2009-12-04 20:54:28 UTC (rev 8571)
@@ -163,7 +163,7 @@
<para>It is possible instead to use a thread from a thread pool to handle the packents so
that the remoting thread is not tied up for too long. However, please note that processing
operations asynchronously on another thread adds a little more
- latency. To enable asynchronous connection executin, set the parameter <literal
+ latency. To enable asynchronous connection execution, set the parameter <literal
>async-connection-execution-enabled</literal> in <literal
>hornetq-configuration.xml</literal> to <literal>true</literal> (default value is
<literal>false</literal>).</para>
Modified: trunk/docs/user-manual/en/ha.xml
===================================================================
--- trunk/docs/user-manual/en/ha.xml 2009-12-04 20:34:45 UTC (rev 8570)
+++ trunk/docs/user-manual/en/ha.xml 2009-12-04 20:54:28 UTC (rev 8571)
@@ -36,7 +36,7 @@
<title>HA modes</title>
<para>HornetQ provides two different modes for high availability, either by
<emphasis>replicating data</emphasis> from the live server journal to the backup
- server or using a <emphasis>shared state</emphasis> for both servers.</para>
+ server or using a <emphasis>shared store</emphasis> for both servers.</para>
<section id="ha.mode.replicated">
<title>Data Replication</title>
<para>In this mode, data stored in the HornetQ journal are replicated from the live
@@ -56,7 +56,7 @@
<para>Data replication introduces some inevitable performance overhead compared to
non replicated operation, but has the advantage in that it requires no expensive
shared file system (e.g. a SAN) for failover, in other words it is a <emphasis
- role="italic">shared nothing</emphasis> approach to high
+ role="italic">shared-nothing</emphasis> approach to high
availability.</para>
<para>Failover with data replication is also faster than failover using shared
storage, since the journal does not have to be reloaded on failover at the
@@ -65,10 +65,10 @@
<section id="configuring.live.backup">
<title>Configuration</title>
<para>First, on the live server, in <literal
- >hornetq-configuration.xml</literal>, configures the live server with
+ >hornetq-configuration.xml</literal>, configure the live server with
knowledge of its backup server. This is done by specifying a <literal
>backup-connector-ref</literal> element. This element references a
- connector, also specified on the live server which contains knowledge of how
+ connector, also specified on the live server which specifies how
to connect to the backup server.</para>
<para>Here's a snippet from live server's <literal
>hornetq-configuration.xml</literal> configured to connect to its backup
@@ -86,7 +86,7 @@
</connector>
</connectors></programlisting>
<para>Secondly, on the backup server, we flag the server as a backup and make
- sure it has an acceptor that the live server can connect to, we also make sure the shared-store paramater is
+ sure it has an acceptor that the live server can connect to. We also make sure the shared-store paramater is
set to false:</para>
<programlisting>
<backup>true</backup>
@@ -104,15 +104,15 @@
<para>For a backup server to function correctly it's also important that it has
the same set of bridges, predefined queues, cluster connections, broadcast
groups and discovery groups as defined on the live node. The easiest way to
- ensure this is just to copy the entire server side configuration from live
+ ensure this is to copy the entire server side configuration from live
to backup and just make the changes as specified above. </para>
</section>
<section>
- <title>Synchronization a backup node to a live node</title>
+ <title>Synchronizing a Backup Node to a Live Node</title>
<para>In order for live - backup pairs to operate properly, they must be
identical replicas. This means you cannot just use any backup server that's
previously been used for other purposes as a backup server, since it will
- have different data in its persistent storage. If you try to do so you will
+ have different data in its persistent storage. If you try to do so, you will
receive an exception in the logs and the server will fail to start.</para>
<para>To create a backup server for a live server that's already been used for
other purposes, it's necessary to copy the <literal>data</literal> directory
@@ -149,7 +149,7 @@
store.</para>
<para>If you require the highest performance during normal operation, have access to
a fast SAN, and can live with a slightly slower failover (depending on amount of
- data) we recommend shared store high availability</para>
+ data), we recommend shared store high availability</para>
<graphic fileref="images/ha-shared-store.png" align="center"/>
<section id="ha/mode.shared.configuration">
<title>Configuration</title>
@@ -168,7 +168,7 @@
linkend="ha.automatic.failover"/>.</para>
</section>
<section>
- <title>Synchronizing a backup node to a live node</title>
+ <title>Synchronizing a Backup Node to a Live Node</title>
<para>As both live and backup servers share the same journal, they do not need
to be synchronized. However until, both live and backup servers are up and
running, high-availability can not be provided with a single server. After
@@ -237,7 +237,7 @@
<para>Using CTRL-C on a HornetQ server or JBoss AS instance causes the server to
<emphasis role="bold">cleanly shut down</emphasis>, so will not trigger
failover on the client. </para>
- <para>If you want the client to failover when it's server is cleanly shutdown
+ <para>If you want the client to failover when its server is cleanly shutdown
then you must set the property <literal>FailoverOnServerShutdown</literal>
to true</para>
</note>
@@ -246,71 +246,71 @@
sessions, please see <xref linkend="examples.transaction-failover"/> and <xref
linkend="examples.non-transaction-failover"/>.</para>
<section id="ha.automatic.failover.noteonreplication">
- <title>A note on server replication</title>
- <para>HornetQ does not replicate full server state betwen live and backup servers,
- so when the new session is automatically recreated on the backup it won't have
+ <title>A Note on Server Replication</title>
+ <para>HornetQ does not replicate full server state betwen live and backup servers.
+ When the new session is automatically recreated on the backup it won't have
any knowledge of messages already sent or acknowledged in that session. Any
inflight sends or acknowledgements at the time of failover might also be
lost.</para>
<para>By replicating full server state, theoretically we could provide a 100%
transparent seamless failover, which would avoid any lost messages or
- acknowledgements, however this comes at a great cost - replicating the full
- server state - that's all the queues, sessions etc, would require replication of
- the entire server state machine - every operation on the live server would have
+ acknowledgements, however this comes at a great cost: replicating the full
+ server state (including the queues, session, etc.). This would require replication of
+ the entire server state machine; every operation on the live server would have
to replicated on the replica server(s) in the exact same global order to ensure
a consistent replica state. This is extremely hard to do in a performant and
scalable way, especially when one considers that multiple threads are changing
the live server state concurrently.</para>
- <para>Some solutions which do provide full state machine replication do so by using
+ <para>Some solutions which provide full state machine replication use
techniques such as <emphasis role="italic">virtual synchrony</emphasis>, but
this does not scale well and effectively serializes all operations to a single
thread, dramatically reducing concurrency.</para>
<para>Other techniques for multi-threaded active replication exist such as
replicating lock states or replicating thread scheduling but this is very hard
to achieve at a Java level.</para>
- <para>Consequently it as decided it was not worth massively reducing performance and
+ <para>Consequently it xas decided it was not worth massively reducing performance and
concurrency for the sake of 100% transparent failover. Even without 100%
- transparent failover it is simple to guarantee <emphasis role="italic">once and
- only once</emphasis> delivery guarantees, even in the case of failure, by
- using a combination of duplicate detection and retrying of transactions, however
+ transparent failover, it is simple to guarantee <emphasis role="italic">once and
+ only once</emphasis> delivery, even in the case of failure, by
+ using a combination of duplicate detection and retrying of transactions. However
this is not 100% transparent to the client code.</para>
</section>
<section id="ha.automatic.failover.blockingcalls">
- <title>Handling blocking calls during failover</title>
- <para>If the client code is in a blocking call to the server when failover occurs,
- expecting a response before it can continue, then on failover the new session
- won't have any knowledge of the call that was in progress, and the call might
+ <title>Handling Blocking Calls During Failover</title>
+ <para>If the client code is in a blocking call to the server, waiting for
+ a response to continue its execution, when failover occurs, the new session
+ will not have any knowledge of the call that was in progress. This call might
otherwise hang for ever, waiting for a response that will never come.</para>
- <para>To remedy this, HornetQ will unblock any unblocking calls that were in
+ <para>To prevent this, HornetQ will unblock any blocking calls that were in
progress at the time of failover by making them throw a <literal
>javax.jms.JMSException</literal> (if using JMS), or a <literal
>HornetQException</literal> with error code <literal
- >HornetQException.UNBLOCKED</literal>. It is up to the user code to catch
+ >HornetQException.UNBLOCKED</literal>. It is up to the client code to catch
this exception and retry any operations if desired.</para>
</section>
<section id="ha.automatic.failover.transactions">
- <title>Handling failover with transactions</title>
+ <title>Handling Failover With Transactions</title>
<para>If the session is transactional and messages have already been sent or
acknowledged in the current transaction, then the server cannot be sure that
- messages sent or acknowledgements haven't been lost during the failover.</para>
+ messages sent or acknowledgements have not been lost during the failover.</para>
<para>Consequently the transaction will be marked as rollback-only, and any
- subsequent attempt to commit it, will throw a <literal
+ subsequent attempt to commit it will throw a <literal
>javax.jms.TransactionRolledBackException</literal> (if using JMS), or a
<literal>HornetQException</literal> with error code <literal
>HornetQException.TRANSACTION_ROLLED_BACK</literal> if using the core
API.</para>
<para>It is up to the user to catch the exception, and perform any client side local
- rollback code as necessary, the user can then just retry the transactional
+ rollback code as necessary. The user can then just retry the transactional
operations again on the same session.</para>
- <para>HornetQ ships with a fully functioning example demonstrating how to do this
+ <para>HornetQ ships with a fully functioning example demonstrating how to do this, please
see <xref linkend="examples.transaction-failover"/></para>
<para>If failover occurs when a commit call is being executed, the server, as
- previously described will unblock the call to prevent a hang, since the response
- will not come back from the backup node. In this case it is not easy for the
+ previously described, will unblock the call to prevent a hang, since no response
+ will come back. In this case it is not easy for the
client to determine whether the transaction commit was actually processed on the
live server before failure occurred.</para>
<para>To remedy this, the client can simply enable duplicate detection (<xref
- linkend="duplicate-detection"/>) in the transaction, and just retry the
+ linkend="duplicate-detection"/>) in the transaction, and retry the
transaction operations again after the call is unblocked. If the transaction had
indeed been committed on the live server successfully before failover, then when
the transaction is retried, duplicate detection will ensure that any persistent
@@ -324,9 +324,9 @@
</note>
</section>
<section id="ha.automatic.failover.nontransactional">
- <title>Handling failover with non transactional sessions</title>
- <para>If the session is non transactional, you may get lost messages or
- acknowledgements in the event of failover.</para>
+ <title>Handling Failover With Non Transactional Sessions</title>
+ <para>If the session is non transactional, messages or
+ acknowledgements can be lost in the event of failover.</para>
<para>If you wish to provide <emphasis role="italic">once and only once</emphasis>
delivery guarantees for non transacted sessions too, then make sure you send
messages blocking, enabled duplicate detection, and catch unblock exceptions as
@@ -336,7 +336,7 @@
</section>
</section>
<section>
- <title>Getting notified of connection failure</title>
+ <title>Getting Notified of Connection Failure</title>
<para>JMS provides a standard mechanism for getting notified asynchronously of
connection failure: <literal>java.jms.ExceptionListener</literal>. Please consult
the JMS javadoc or any good JMS tutorial for more information on how to use
@@ -354,10 +354,10 @@
connection failure yourself, and code your own manually reconnection logic in your
own failure handler. We define this as <emphasis>application-level</emphasis>
failover, since the failover is handled at the user application level.</para>
- <para>To implement application-level failover, if you're using JMS then you need to code
+ <para>To implement application-level failover, if you're using JMS then you need to set
an <literal>ExceptionListener</literal> class on the JMS connection. The <literal
>ExceptionListener</literal> will be called by HornetQ in the event that
- connection failure is detected. In your <literal>ExceptionListener</literal> you
+ connection failure is detected. In your <literal>ExceptionListener</literal>, you
would close your old JMS connections, potentially look up new connection factory
instances from JNDI and creating new connections. In this case you may well be using
<ulink url="http://www.jboss.org/community/wiki/JBossHAJNDIImpl">HA-JNDI</ulink>
@@ -365,8 +365,8 @@
server.</para>
<para>For a working example of application-level failover, please see <xref
linkend="application-level-failover"/>.</para>
- <para>If you are using the core API, then the procedure is very similar: you would code
- a <literal>FailureListener</literal> on your core <literal>ClientSession</literal>
+ <para>If you are using the core API, then the procedure is very similar: you would set
+ a <literal>FailureListener</literal> on the core <literal>ClientSession</literal>
instances.</para>
</section>
</section>
15 years, 1 month
JBoss hornetq SVN: r8570 - in trunk: src/main/org/hornetq/core/persistence/impl/journal and 1 other directories.
by do-not-reply@jboss.org
Author: clebert.suconic(a)jboss.com
Date: 2009-12-04 15:34:45 -0500 (Fri, 04 Dec 2009)
New Revision: 8570
Modified:
trunk/src/main/org/hornetq/core/journal/impl/AbstractJournalUpdateTask.java
trunk/src/main/org/hornetq/core/journal/impl/AbstractSequentialFileFactory.java
trunk/src/main/org/hornetq/core/journal/impl/JournalCompactor.java
trunk/src/main/org/hornetq/core/journal/impl/TimedBuffer.java
trunk/src/main/org/hornetq/core/persistence/impl/journal/OperationContextImpl.java
trunk/tests/src/org/hornetq/tests/unit/core/persistence/impl/OperationContextUnitTest.java
Log:
A few tweaks on the journal and OperationContext
Modified: trunk/src/main/org/hornetq/core/journal/impl/AbstractJournalUpdateTask.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/impl/AbstractJournalUpdateTask.java 2009-12-04 20:25:10 UTC (rev 8569)
+++ trunk/src/main/org/hornetq/core/journal/impl/AbstractJournalUpdateTask.java 2009-12-04 20:34:45 UTC (rev 8570)
@@ -187,7 +187,9 @@
if (writingChannel != null)
{
sequentialFile.position(0);
- sequentialFile.writeDirect(writingChannel.toByteBuffer(), true);
+ SimpleWaitIOCallback completion = new SimpleWaitIOCallback();
+ sequentialFile.writeDirect(writingChannel.toByteBuffer(), true, completion);
+ completion.waitCompletion();
sequentialFile.close();
newDataFiles.add(currentFile);
}
@@ -224,7 +226,7 @@
writingChannel.writeInt(fileID);
}
- protected void addToRecordsSnaptsho(long id)
+ protected void addToRecordsSnaptshot(long id)
{
recordsSnapshot.add(id);
}
Modified: trunk/src/main/org/hornetq/core/journal/impl/AbstractSequentialFileFactory.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/impl/AbstractSequentialFileFactory.java 2009-12-04 20:25:10 UTC (rev 8569)
+++ trunk/src/main/org/hornetq/core/journal/impl/AbstractSequentialFileFactory.java 2009-12-04 20:34:45 UTC (rev 8570)
@@ -141,7 +141,8 @@
{
if (timedBuffer != null)
{
- timedBuffer.flush();
+ // When moving to a new file, we need to make sure any pending buffer will be transfered to the buffer
+ timedBuffer.flush(true);
timedBuffer.setObserver(null);
}
}
Modified: trunk/src/main/org/hornetq/core/journal/impl/JournalCompactor.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/impl/JournalCompactor.java 2009-12-04 20:25:10 UTC (rev 8569)
+++ trunk/src/main/org/hornetq/core/journal/impl/JournalCompactor.java 2009-12-04 20:34:45 UTC (rev 8570)
@@ -176,7 +176,7 @@
{
for (long id : ids)
{
- addToRecordsSnaptsho(id);
+ addToRecordsSnaptshot(id);
}
}
@@ -184,7 +184,7 @@
{
for (long id : ids2)
{
- addToRecordsSnaptsho(id);
+ addToRecordsSnaptshot(id);
}
}
}
Modified: trunk/src/main/org/hornetq/core/journal/impl/TimedBuffer.java
===================================================================
--- trunk/src/main/org/hornetq/core/journal/impl/TimedBuffer.java 2009-12-04 20:25:10 UTC (rev 8569)
+++ trunk/src/main/org/hornetq/core/journal/impl/TimedBuffer.java 2009-12-04 20:34:45 UTC (rev 8570)
@@ -272,9 +272,18 @@
public void flush()
{
+ flush(false);
+ }
+
+ /**
+ * force means the Journal is moving to a new file. Any pending write need to be done immediately
+ * or data could be lost
+ * */
+ public void flush(final boolean force)
+ {
synchronized (this)
{
- if (!delayFlush && buffer.writerIndex() > 0)
+ if ((force || !delayFlush) && buffer.writerIndex() > 0)
{
int pos = buffer.writerIndex();
Modified: trunk/src/main/org/hornetq/core/persistence/impl/journal/OperationContextImpl.java
===================================================================
--- trunk/src/main/org/hornetq/core/persistence/impl/journal/OperationContextImpl.java 2009-12-04 20:25:10 UTC (rev 8569)
+++ trunk/src/main/org/hornetq/core/persistence/impl/journal/OperationContextImpl.java 2009-12-04 20:34:45 UTC (rev 8570)
@@ -223,9 +223,6 @@
*/
public void complete()
{
- // We hold errors until the complete is set, or the callbacks will never get informed
- errorCode = -1;
- errorMessage = null;
}
/* (non-Javadoc)
Modified: trunk/tests/src/org/hornetq/tests/unit/core/persistence/impl/OperationContextUnitTest.java
===================================================================
--- trunk/tests/src/org/hornetq/tests/unit/core/persistence/impl/OperationContextUnitTest.java 2009-12-04 20:25:10 UTC (rev 8569)
+++ trunk/tests/src/org/hornetq/tests/unit/core/persistence/impl/OperationContextUnitTest.java 2009-12-04 20:34:45 UTC (rev 8570)
@@ -19,6 +19,7 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
+import org.hornetq.core.journal.IOAsyncTask;
import org.hornetq.core.persistence.impl.journal.OperationContextImpl;
import org.hornetq.tests.util.UnitTestCase;
@@ -42,7 +43,7 @@
// Public --------------------------------------------------------
- public void testCaptureException() throws Exception
+ public void testCaptureExceptionOnExecutor() throws Exception
{
ExecutorService executor = Executors.newSingleThreadExecutor();
executor.shutdown();
@@ -92,6 +93,80 @@
assertEquals(1, numberOfFailures.get());
}
+ public void testCaptureExceptionOnFailure() throws Exception
+ {
+ ExecutorService executor = Executors.newSingleThreadExecutor();
+
+ final CountDownLatch latch = new CountDownLatch(1);
+
+ final OperationContextImpl context = new OperationContextImpl(executor)
+ {
+ public void complete()
+ {
+ super.complete();
+ latch.countDown();
+ }
+
+ };
+
+ context.storeLineUp();
+
+ final AtomicInteger failures = new AtomicInteger(0);
+
+ Thread t = new Thread()
+ {
+ public void run()
+ {
+ try
+ {
+ context.waitCompletion(5000);
+ }
+ catch (Throwable e)
+ {
+ e.printStackTrace();
+ failures.incrementAndGet();
+ }
+ }
+ };
+
+ t.start();
+
+ // Need to wait complete to be called first or the test would be invalid.
+ // We use a latch instead of forcing a sleep here
+ assertTrue(latch.await(5, TimeUnit.SECONDS));
+
+ context.onError(1, "Poop happens!");
+
+ t.join();
+
+ assertEquals(1, failures.get());
+
+
+ failures.set(0);
+
+ final AtomicInteger operations = new AtomicInteger(0);
+
+ // We should be up to date with lineUps and executions. this should now just finish processing
+ context.executeOnCompletion(new IOAsyncTask()
+ {
+
+ public void done()
+ {
+ operations.incrementAndGet();
+ }
+
+ public void onError(int errorCode, String errorMessage)
+ {
+ failures.incrementAndGet();
+ }
+
+ });
+
+
+ assertEquals(1, failures.get());
+ assertEquals(0, operations.get());
+ }
+
// Package protected ---------------------------------------------
// Protected -----------------------------------------------------
15 years, 1 month
JBoss hornetq SVN: r8569 - in trunk: examples/javaee/hajndi/config and 15 other directories.
by do-not-reply@jboss.org
Author: ataylor
Date: 2009-12-04 15:25:10 -0500 (Fri, 04 Dec 2009)
New Revision: 8569
Modified:
trunk/examples/javaee/ejb-jms-transaction/server/hornetq-configuration.xml
trunk/examples/javaee/hajndi/config/hornetq-queues.xml
trunk/examples/javaee/mdb-cmt-setrollbackonly/server/hornetq-configuration.xml
trunk/examples/javaee/mdb-cmt-tx-local/server/hornetq-configuration.xml
trunk/examples/javaee/mdb-cmt-tx-not-supported/server/hornetq-configuration.xml
trunk/examples/javaee/mdb-cmt-tx-required/server/hornetq-configuration.xml
trunk/examples/javaee/mdb-message-selector/server/hornetq-configuration.xml
trunk/examples/javaee/mdb-tx-send/server/hornetq-configuration.xml
trunk/examples/javaee/servlet-ssl/server/hornetq-configuration.xml
trunk/examples/javaee/servlet-transport/server/hornetq-configuration.xml
trunk/examples/javaee/xarecovery/server/hornetq-configuration.xml
trunk/src/config/jboss-as/clustered/hornetq-configuration.xml
trunk/src/config/jboss-as/non-clustered/hornetq-configuration.xml
trunk/src/config/stand-alone/clustered/hornetq-configuration.xml
trunk/src/config/stand-alone/non-clustered/hornetq-configuration.xml
trunk/src/config/trunk/clustered/hornetq-configuration.xml
trunk/src/config/trunk/non-clustered/hornetq-configuration.xml
Log:
removed cluetered tags from config files
Modified: trunk/examples/javaee/ejb-jms-transaction/server/hornetq-configuration.xml
===================================================================
--- trunk/examples/javaee/ejb-jms-transaction/server/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/examples/javaee/ejb-jms-transaction/server/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -46,7 +46,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/examples/javaee/hajndi/config/hornetq-queues.xml
===================================================================
--- trunk/examples/javaee/hajndi/config/hornetq-queues.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/examples/javaee/hajndi/config/hornetq-queues.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -12,7 +12,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/examples/javaee/mdb-cmt-setrollbackonly/server/hornetq-configuration.xml
===================================================================
--- trunk/examples/javaee/mdb-cmt-setrollbackonly/server/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/examples/javaee/mdb-cmt-setrollbackonly/server/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -52,7 +52,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/examples/javaee/mdb-cmt-tx-local/server/hornetq-configuration.xml
===================================================================
--- trunk/examples/javaee/mdb-cmt-tx-local/server/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/examples/javaee/mdb-cmt-tx-local/server/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -52,7 +52,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/examples/javaee/mdb-cmt-tx-not-supported/server/hornetq-configuration.xml
===================================================================
--- trunk/examples/javaee/mdb-cmt-tx-not-supported/server/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/examples/javaee/mdb-cmt-tx-not-supported/server/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -52,7 +52,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/examples/javaee/mdb-cmt-tx-required/server/hornetq-configuration.xml
===================================================================
--- trunk/examples/javaee/mdb-cmt-tx-required/server/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/examples/javaee/mdb-cmt-tx-required/server/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -52,7 +52,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/examples/javaee/mdb-message-selector/server/hornetq-configuration.xml
===================================================================
--- trunk/examples/javaee/mdb-message-selector/server/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/examples/javaee/mdb-message-selector/server/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -52,7 +52,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/examples/javaee/mdb-tx-send/server/hornetq-configuration.xml
===================================================================
--- trunk/examples/javaee/mdb-tx-send/server/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/examples/javaee/mdb-tx-send/server/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -52,7 +52,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/examples/javaee/servlet-ssl/server/hornetq-configuration.xml
===================================================================
--- trunk/examples/javaee/servlet-ssl/server/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/examples/javaee/servlet-ssl/server/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -52,7 +52,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/examples/javaee/servlet-transport/server/hornetq-configuration.xml
===================================================================
--- trunk/examples/javaee/servlet-transport/server/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/examples/javaee/servlet-transport/server/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -49,7 +49,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/examples/javaee/xarecovery/server/hornetq-configuration.xml
===================================================================
--- trunk/examples/javaee/xarecovery/server/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/examples/javaee/xarecovery/server/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -46,7 +46,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/src/config/jboss-as/clustered/hornetq-configuration.xml
===================================================================
--- trunk/src/config/jboss-as/clustered/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/src/config/jboss-as/clustered/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -77,7 +77,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/src/config/jboss-as/non-clustered/hornetq-configuration.xml
===================================================================
--- trunk/src/config/jboss-as/non-clustered/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/src/config/jboss-as/non-clustered/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -51,7 +51,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/src/config/stand-alone/clustered/hornetq-configuration.xml
===================================================================
--- trunk/src/config/stand-alone/clustered/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/src/config/stand-alone/clustered/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -56,7 +56,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/src/config/stand-alone/non-clustered/hornetq-configuration.xml
===================================================================
--- trunk/src/config/stand-alone/non-clustered/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/src/config/stand-alone/non-clustered/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -30,7 +30,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/src/config/trunk/clustered/hornetq-configuration.xml
===================================================================
--- trunk/src/config/trunk/clustered/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/src/config/trunk/clustered/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -56,7 +56,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
Modified: trunk/src/config/trunk/non-clustered/hornetq-configuration.xml
===================================================================
--- trunk/src/config/trunk/non-clustered/hornetq-configuration.xml 2009-12-04 20:21:54 UTC (rev 8568)
+++ trunk/src/config/trunk/non-clustered/hornetq-configuration.xml 2009-12-04 20:25:10 UTC (rev 8569)
@@ -30,7 +30,6 @@
<address-settings>
<!--default for catch all-->
<address-setting match="#">
- <clustered>false</clustered>
<dead-letter-address>jms.queue.DLQ</dead-letter-address>
<expiry-address>jms.queue.ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
15 years, 1 month
JBoss hornetq SVN: r8568 - trunk/docs.
by do-not-reply@jboss.org
Author: timfox
Date: 2009-12-04 15:21:54 -0500 (Fri, 04 Dec 2009)
New Revision: 8568
Modified:
trunk/docs/README.html
Log:
updated readme
Modified: trunk/docs/README.html
===================================================================
--- trunk/docs/README.html 2009-12-04 20:07:00 UTC (rev 8567)
+++ trunk/docs/README.html 2009-12-04 20:21:54 UTC (rev 8568)
@@ -3,22 +3,22 @@
<head>
<meta content="text/html; charset=ISO-8859-1"
http-equiv="content-type">
- <title>HornetQ 2.0.0 Beta 5 Release Notes</title>
+ <title>HornetQ 2.0.0 CR1 Release Notes</title>
</head>
<body>
-<h1>Release Notes - HornetQ - Version 2.0.0 Beta 5</h1>
+<h1>Release Notes - HornetQ - Version 2.0.0 CR1</h1>
<br>
-<h2>25th August 2009</h2>
+<h2>5th December 2009</h2>
-These are the release notes for HornetQ 2.0.0 Beta 5<br><br>
+These are the release notes for HornetQ 2.0.0 CR 1<br><br>
For full description of the contents please see the
-<a href="https://jira.jboss.org/jira/browse/HORNETQ/fixforversion/12313892">HornetQ project JIRA</a>.<br><br>
+<a href="https://jira.jboss.org/jira/secure/ReleaseNote.jspa?projectId=12310830&st...">HornetQ project JIRA</a>.<br><br>
-This release is a feature complete release for forthcoming HornetQ 2.0.0<br>
+This release is a candidate release for forthcoming HornetQ 2.0.0.GA<br>
<br>
15 years, 1 month
JBoss hornetq SVN: r8567 - trunk/docs/user-manual/en.
by do-not-reply@jboss.org
Author: jmesnil
Date: 2009-12-04 15:07:00 -0500 (Fri, 04 Dec 2009)
New Revision: 8567
Modified:
trunk/docs/user-manual/en/configuration-index.xml
Log:
documentation updated
removed journal-flush-on-sync from configuration index
Modified: trunk/docs/user-manual/en/configuration-index.xml
===================================================================
--- trunk/docs/user-manual/en/configuration-index.xml 2009-12-04 19:59:07 UTC (rev 8566)
+++ trunk/docs/user-manual/en/configuration-index.xml 2009-12-04 20:07:00 UTC (rev 8567)
@@ -125,15 +125,6 @@
</row>
<row>
<entry><link
- linkend="configuring.message.journal.journal-flush-on-sync"
- >journal-flush-on-sync</link></entry>
- <entry>Boolean</entry>
- <entry>If true, transactions will ignore timeouts and be persisted
- immediately</entry>
- <entry>False</entry>
- </row>
- <row>
- <entry><link
linkend="configuring.message.journal.journal-compact-min-files"
>journal-compact-min-files</link></entry>
<entry>Integer</entry>
15 years, 1 month