Author: kpvdr
Date: 2009-05-06 15:22:51 -0400 (Wed, 06 May 2009)
New Revision: 3363
Added:
store/trunk/cpp/tests/python_tests/cluster_basic.py
Modified:
store/trunk/cpp/tests/Makefile.am
store/trunk/cpp/tests/python_tests/__init__.py
store/trunk/cpp/tests/python_tests/flow_to_disk.py
store/trunk/cpp/tests/run_python_tests
store/trunk/cpp/tests/system_test.sh
Log:
Added some initial python cluster tests with persistence
Modified: store/trunk/cpp/tests/Makefile.am
===================================================================
--- store/trunk/cpp/tests/Makefile.am 2009-05-06 19:07:31 UTC (rev 3362)
+++ store/trunk/cpp/tests/Makefile.am 2009-05-06 19:22:51 UTC (rev 3363)
@@ -78,9 +78,8 @@
TESTS_ENVIRONMENT = \
QPID_DIR=$(QPID_DIR) \
- QPIDD=$(QPID_DIR)/cpp/src/qpidd \
VALGRIND=$(VALGRIND) \
abs_srcdir=$(abs_srcdir) \
- LIBBDBSTORE=$(abs_builddir)/../lib/.libs/msgstore.so \
+ LIBSTORE=$(abs_builddir)/../lib/.libs/msgstore.so \
TMPDIR=$(TMPDIR) \
$(srcdir)/run_test
Modified: store/trunk/cpp/tests/python_tests/__init__.py
===================================================================
--- store/trunk/cpp/tests/python_tests/__init__.py 2009-05-06 19:07:31 UTC (rev 3362)
+++ store/trunk/cpp/tests/python_tests/__init__.py 2009-05-06 19:22:51 UTC (rev 3363)
@@ -1,6 +1,6 @@
# Do not delete - marks this directory as a python package.
-# Copyright (c) 2008 Red Hat, Inc.
+# Copyright (c) 2008, 2009 Red Hat, Inc.
#
# This file is part of the Qpid async store library msgstore.so.
#
@@ -21,4 +21,5 @@
#
# The GNU Lesser General Public License is available in the file COPYING.
+from cluster_basic import *
from flow_to_disk import *
Added: store/trunk/cpp/tests/python_tests/cluster_basic.py
===================================================================
--- store/trunk/cpp/tests/python_tests/cluster_basic.py (rev 0)
+++ store/trunk/cpp/tests/python_tests/cluster_basic.py 2009-05-06 19:22:51 UTC (rev
3363)
@@ -0,0 +1,111 @@
+# Copyright (c) 2009 Red Hat, Inc.
+#
+# This file is part of the Qpid async store library msgstore.so.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
+# USA
+#
+# The GNU Lesser General Public License is available in the file COPYING.
+
+import os
+import signal
+from qpid.testlib import TestBaseCluster
+
+class BasicClusterTests(TestBaseCluster):
+ """Basic cluster with async store tests"""
+
+ _runClusterTests = os.getenv("RUN_CLUSTER_TESTS")
+
+ def test_Cluster_01_SingleClusterInitialization(self):
+ if self._runClusterTests == None:
+ print "skipped"
+ return
+ try:
+ clusterName = "test_Cluster_01_SingleClusterInitialization"
+ self.createCheckCluster(clusterName, 5)
+ self.checkNumBrokers(5)
+ self.stopCheckCluster(clusterName)
+ except:
+ self.killAllClusters()
+ raise
+
+ def test_Cluster_02_MultipleClusterInitialization(self):
+ if self._runClusterTests == None:
+ print "skipped"
+ return
+ try:
+ for i in range(0, 5):
+ clusterName =
"test_Cluster_02_MultipleClusterInitialization.%d" % i
+ self.createCluster(clusterName, 5)
+ self.checkNumBrokers(25)
+
self.killCluster("test_Cluster_02_MultipleClusterInitialization.2")
+ self.checkNumBrokers(20)
+ self.stopCheckAll()
+ except:
+ self.killAllClusters()
+ raise
+
+ def test_Cluster_03_SingleClusterAddRemoveNodes(self):
+ if self._runClusterTests == None:
+ print "skipped"
+ return
+ try:
+ clusterName = "test_Cluster_03_SingleClusterAddRemoveNodes"
+ self.createCheckCluster(clusterName, 3)
+ for i in range(4,9):
+ self.createClusterNode(i, clusterName)
+ self.checkNumClusterBrokers(clusterName, 8)
+ self.killNode(2, clusterName)
+ self.killNode(5, clusterName)
+ self.killNode(6, clusterName)
+ self.checkNumClusterBrokers(clusterName, 5)
+ self.createClusterNode(9, clusterName)
+ self.createClusterNode(10, clusterName)
+ self.checkNumClusterBrokers(clusterName, 7)
+ self.stopCheckAll()
+ except:
+ self.killAllClusters()
+ raise
+
+# TODO: Un-comment this when the "Exchange already exists: amq.direct" error is
fixed
+# def test_Cluster_04_SingleClusterRemoveRestoreNodes(self):
+# if self._runClusterTests == None:
+# print "skipped"
+# return
+# try:
+# clusterName = "test_Cluster_04_SingleClusterRemoveRestoreNodes"
+# self.createCheckCluster(clusterName, 6)
+# self.checkNumBrokers(6)
+# self.killNode(1, clusterName)
+# self.killNode(3, clusterName)
+# self.killNode(4, clusterName)
+# self.checkNumBrokers(3)
+# self.createClusterNode(1, clusterName)
+# self.createClusterNode(3, clusterName)
+# self.createClusterNode(4, clusterName)
+# self.checkNumClusterBrokers(clusterName, 6)
+# self.killNode(2, clusterName)
+# self.killNode(3, clusterName)
+# self.killNode(4, clusterName)
+# self.checkNumBrokers(3)
+# self.createClusterNode(2, clusterName)
+# self.createClusterNode(3, clusterName)
+# self.createClusterNode(4, clusterName)
+# self.checkNumClusterBrokers(clusterName, 6)
+# self.stopCheckAll()
+# except:
+# self.killAllClusters()
+# raise
+
Modified: store/trunk/cpp/tests/python_tests/flow_to_disk.py
===================================================================
--- store/trunk/cpp/tests/python_tests/flow_to_disk.py 2009-05-06 19:07:31 UTC (rev 3362)
+++ store/trunk/cpp/tests/python_tests/flow_to_disk.py 2009-05-06 19:22:51 UTC (rev 3363)
@@ -28,37 +28,37 @@
class FlowToDiskTests(TestBase010):
"""Tests for async store flow-to-disk"""
- def test_01_simple_max_count_transient(self):
+ def test_FlowToDisk_01_SimpleMaxCountTransient(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_count': 10}
- self.simple_limit("test_simple_max_count_transient", queue_args,
self.session.delivery_mode.non_persistent, self.session.acquire_mode.pre_acquired)
+ self.simple_limit("test_FlowToDisk_01_SimpleMaxCountTransient",
queue_args, self.session.delivery_mode.non_persistent,
self.session.acquire_mode.pre_acquired)
- def test_02_simple_max_count_persistent(self):
+ def test_FlowToDisk_02_SimpleMaxCountPersistent(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_count': 10}
- self.simple_limit("test_simple_max_count_persistent", queue_args,
self.session.delivery_mode.persistent, self.session.acquire_mode.pre_acquired)
+ self.simple_limit("test_FlowToDisk_02_SimpleMaxCountPersistent",
queue_args, self.session.delivery_mode.persistent,
self.session.acquire_mode.pre_acquired)
- def test_03_simple_max_size_transient(self):
+ def test_FlowToDisk_03_SimpleMaxSizeTransient(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_size': 100}
- self.simple_limit("test_simple_max_size_transient", queue_args,
self.session.delivery_mode.non_persistent, self.session.acquire_mode.pre_acquired)
+ self.simple_limit("test_FlowToDisk_03_SimpleMaxSizeTransient",
queue_args, self.session.delivery_mode.non_persistent,
self.session.acquire_mode.pre_acquired)
- def test_04_simple_max_size_persistent(self):
+ def test_FlowToDisk_04_SimpleMaxSizePersistent(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_size': 100}
- self.simple_limit("test_simple_max_size_persistent", queue_args,
self.session.delivery_mode.persistent, self.session.acquire_mode.pre_acquired)
+ self.simple_limit("test_FlowToDisk_04_SimpleMaxSizePersistent",
queue_args, self.session.delivery_mode.persistent,
self.session.acquire_mode.pre_acquired)
- def test_05_simple_max_count_transient_not_acquired(self):
+ def test_FlowToDisk_05_SimpleMaxCountTransientNotAcquired(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_count': 10}
- self.simple_limit("test_simple_max_count_transient_not_acquired",
queue_args, self.session.delivery_mode.non_persistent,
self.session.acquire_mode.not_acquired)
+
self.simple_limit("test_FlowToDisk_05_SimpleMaxCountTransientNotAcquired",
queue_args, self.session.delivery_mode.non_persistent,
self.session.acquire_mode.not_acquired)
- def test_06_simple_max_count_persistent_not_acquired(self):
+ def test_FlowToDisk_06_SimpleMaxCountPersistentNotAcquired(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_count': 10}
- self.simple_limit("test_simple_max_count_persistent_not_acquired",
queue_args, self.session.delivery_mode.persistent,
self.session.acquire_mode.not_acquired)
+
self.simple_limit("test_FlowToDisk_06_SimpleMaxCountPersistentNotAcquired",
queue_args, self.session.delivery_mode.persistent,
self.session.acquire_mode.not_acquired)
- def test_07_simple_max_size_transient_not_acquired(self):
+ def test_FlowToDisk_07_SimpleMaxSizeTransientNotAcquired(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_size': 100}
- self.simple_limit("test_simple_max_size_transient_not_acquired",
queue_args, self.session.delivery_mode.non_persistent,
self.session.acquire_mode.not_acquired)
+
self.simple_limit("test_FlowToDisk_07_SimpleMaxSizeTransientNotAcquired",
queue_args, self.session.delivery_mode.non_persistent,
self.session.acquire_mode.not_acquired)
- def test_08_simple_max_size_persistent_not_acquired(self):
+ def test_FlowToDisk_08_SimpleMaxSizePersistentNotAcquired(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_size': 100}
- self.simple_limit("test_simple_max_size_persistent_not_acquired",
queue_args, self.session.delivery_mode.persistent,
self.session.acquire_mode.not_acquired)
+
self.simple_limit("test_FlowToDisk_08_SimpleMaxSizePersistentNotAcquired",
queue_args, self.session.delivery_mode.persistent,
self.session.acquire_mode.not_acquired)
def simple_limit(self, queue_name, queue_args, delivery_mode, acquire_mode):
"""
@@ -104,21 +104,21 @@
self.assertEqual(0, session.queue_query(queue=queue_name).message_count)
- def test_09_max_count_browse_consume_transient(self):
+ def test_FlowToDisk_09_MaxCountBrowseConsumeTransient(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_count': 10}
-
self.not_acquired_browse_consume_limit("test_max_count_browse_consume_transient",
queue_args, self.session.delivery_mode.non_persistent)
+
self.not_acquired_browse_consume_limit("test_FlowToDisk_09_MaxCountBrowseConsumeTransient",
queue_args, self.session.delivery_mode.non_persistent)
- def test_10_max_count_browse_consume_persistent(self):
+ def test_FlowToDisk_10_MaxCountBrowseConsumePersistent(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_count': 10}
-
self.not_acquired_browse_consume_limit("test_max_count_browse_consume_persistent",
queue_args, self.session.delivery_mode.persistent)
+
self.not_acquired_browse_consume_limit("test_FlowToDisk_10_MaxCountBrowseConsumePersistent",
queue_args, self.session.delivery_mode.persistent)
- def test_11_max_size_browse_consume_transient(self):
+ def test_FlowToDisk_11_MaxSizeBrowseConsumeTransient(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_size': 100}
-
self.not_acquired_browse_consume_limit("test_max_size_browse_consume_transient",
queue_args, self.session.delivery_mode.non_persistent)
+
self.not_acquired_browse_consume_limit("test_FlowToDisk_11_MaxSizeBrowseConsumeTransient",
queue_args, self.session.delivery_mode.non_persistent)
- def test_12_max_size_browse_consume_persistent(self):
+ def test_FlowToDisk_12_MaxSizeBrowseConsumePersistent(self):
queue_args = {'qpid.policy_type':'flow_to_disk',
'qpid.max_size': 100}
-
self.not_acquired_browse_consume_limit("test_max_size_browse_consume_persistent",
queue_args, self.session.delivery_mode.persistent)
+
self.not_acquired_browse_consume_limit("test_FlowToDisk_12_MaxSizeBrowseConsumePersistent",
queue_args, self.session.delivery_mode.persistent)
def not_acquired_browse_consume_limit(self, queue_name, queue_args, delivery_mode):
Modified: store/trunk/cpp/tests/run_python_tests
===================================================================
--- store/trunk/cpp/tests/run_python_tests 2009-05-06 19:07:31 UTC (rev 3362)
+++ store/trunk/cpp/tests/run_python_tests 2009-05-06 19:22:51 UTC (rev 3363)
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright (c) 2008 Red Hat, Inc.
+# Copyright (c) 2008, 2009 Red Hat, Inc.
#
# This file is part of the Qpid async store library msgstore.so.
#
@@ -21,46 +21,65 @@
#
# The GNU Lesser General Public License is available in the file COPYING.
-QPID_PYTHON_DIR=$QPID_DIR/python
+QPID_PYTHON_DIR=${QPID_DIR}/python
export PYTHONPATH=${QPID_PYTHON_DIR}:${abs_srcdir}
-BROKER_OPTS="--no-module-dir --load-module=${LIBBDBSTORE} --data-dir=${TMPDIR}
--auth=no"
-PYTHON_TESTS=python_tests
-AMQP_SPEC=0-10-errata
-FAILING_PYTHON_TESTS=${abs_srcdir}/failing_python_tests.txt
-# Make sure ${QPID_DIR} contains what we need.
-if ! test -d "${QPID_DIR}" ; then
- echo "WARNING: QPID_DIR is not set - skipping python tests."
+if python -c "import qpid" ; then
+ PYTHON_TESTS=python_tests
+ FAILING_PYTHON_TESTS=${abs_srcdir}/failing_python_tests.txt
+else
+ echo "WARNING: Unable to load python qpid module - skipping python tests."
+ echo " QPID_DIR=${QPID_DIR}"
+ echo " PYTHONPATH=${PYTHONPATH}"
exit
fi
+BROKER_OPTS="--no-module-dir --load-module=${LIBSTORE} --data-dir=${TMPDIR}
--auth=no"
+AMQP_SPEC=0-10-errata
+
#Make sure temp dir exists if this is the first to use it
if ! test -d ${TMPDIR} ; then
mkdir -p ${TMPDIR}
+ mkdir -p ${TMPDIR}/cluster
+elif ! test -d "${TMPDIR}/cluster" ; then
+ mkdir -p "${TMPDIR}/cluster"
fi
-#Split PYTHONPATH at ':' and check each path exists
-old_ifs=${IFS}
-IFS=':'
-missing=0
-for path in ${PYTHONPATH} ; do
- if ! test -d ${path} ; then
- echo "WARNING: ${path} not found."
- missing=1
- fi
-done
-IFS=${old_ifs}
+# Check AIS requirements
+id -nG | grep '\<ais\>' >/dev/null || NOGROUP="You are not a member
of the ais group."
+ps -u root | grep 'aisexec\|corosync' >/dev/null || NOAISEXEC="The
aisexec or corosync daemon is not running as root"
-fail=0
-if test ${missing} != 0 ; then
- echo "WARNING: Path(s) in ${PYTHONPATH} not found - skipping python
tests."
- exit 1
+if test -n "$NOGROUP" -o -n "$NOAISEXEC"; then
+ cat <<EOF
+
+ ========= WARNING: CLUSTERING TESTS DISABLED ==============
+
+ Tests that depend on the openais library (used for clustering)
+ will not be run because:
+
+ $NOGROUP
+ $NOAISEXEC
+
+ ===========================================================
+
+EOF
else
- # Run all python tests
- pwdir=$(pwd)
- cd ${QPID_PYTHON_DIR}
- ./run-tests --skip-self-test -v -s ${AMQP_SPEC} -I ${FAILING_PYTHON_TESTS} -B
"${BROKER_OPTS}" ${PYTHON_TESTS} || { echo "FAIL python tests for
${AMQP_SPEC}"; fail=1; }
- cd ${pwdir}
+ export RUN_CLUSTER_TESTS=1
+ if test -z ${LIBCLUSTER} ; then
+ export LIBCLUSTER=${QPID_DIR}/cpp/src/.libs/cluster.so
+ fi
+fi
- exit ${fail}
+if test -z ${QPIDD} ; then
+ export QPIDD=${QPID_DIR}/cpp/src/qpidd
fi
+
+fail=0
+
+# Run all python tests
+pwdir=$(pwd)
+cd ${QPID_PYTHON_DIR}
+./run-tests --skip-self-test -v -s ${AMQP_SPEC} -I ${FAILING_PYTHON_TESTS} -B
"${BROKER_OPTS}" ${PYTHON_TESTS} || { echo "FAIL python tests for
${AMQP_SPEC}"; fail=1; }
+cd ${pwdir}
+
+exit ${fail}
Modified: store/trunk/cpp/tests/system_test.sh
===================================================================
--- store/trunk/cpp/tests/system_test.sh 2009-05-06 19:07:31 UTC (rev 3362)
+++ store/trunk/cpp/tests/system_test.sh 2009-05-06 19:22:51 UTC (rev 3363)
@@ -45,7 +45,7 @@
fail=0
# Run the tests with a given set of flags
-BROKER_OPTS="--no-module-dir --load-module=$LIBBDBSTORE --data-dir=$TMPDIR --auth=no
--wcache-page-size 16"
+BROKER_OPTS="--no-module-dir --load-module=$LIBSTORE --data-dir=$TMPDIR --auth=no
--wcache-page-size 16"
run_tests() {
for p in `seq 1 8`; do
$abs_srcdir/start_broker "$@" ${BROKER_OPTS} || { echo "FAIL broker
start"; return 1; }