[rhmessaging-commits] rhmessaging commits: r3465 - in store/trunk/cpp/tests: cluster and 3 other directories.

rhmessaging-commits at lists.jboss.org rhmessaging-commits at lists.jboss.org
Mon Jun 29 12:00:08 EDT 2009


Author: kpvdr
Date: 2009-06-29 12:00:07 -0400 (Mon, 29 Jun 2009)
New Revision: 3465

Modified:
   store/trunk/cpp/tests/Makefile.am
   store/trunk/cpp/tests/OrderingTest.cpp
   store/trunk/cpp/tests/SimpleTest.cpp
   store/trunk/cpp/tests/TransactionalTest.cpp
   store/trunk/cpp/tests/TwoPhaseCommitTest.cpp
   store/trunk/cpp/tests/clean.sh
   store/trunk/cpp/tests/cluster/Makefile.am
   store/trunk/cpp/tests/cluster/run_cluster_tests
   store/trunk/cpp/tests/jrnl/Makefile.am
   store/trunk/cpp/tests/jrnl/_st_helper_fns.hpp
   store/trunk/cpp/tests/jrnl/_ut_jdir.cpp
   store/trunk/cpp/tests/jrnl/jtt/Makefile.am
   store/trunk/cpp/tests/jrnl/jtt/_ut_jrnl_instance.cpp
   store/trunk/cpp/tests/jrnl/run-journal-tests
   store/trunk/cpp/tests/python_tests/flow_to_disk.py
   store/trunk/cpp/tests/run_python_tests
   store/trunk/cpp/tests/system_test.sh
Log:
Updates and improvements to store tests; a reorganization of flow-to-disk tests.

Modified: store/trunk/cpp/tests/Makefile.am
===================================================================
--- store/trunk/cpp/tests/Makefile.am	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/Makefile.am	2009-06-29 16:00:07 UTC (rev 3465)
@@ -26,7 +26,7 @@
 
 INCLUDES=-I$(top_srcdir)/lib -I$(top_srcdir)/lib/gen
 
-TMP_STORE_DIR=$(abs_srcdir)/test_tmp
+TMP_DATA_DIR=$(abs_srcdir)/tmp_data_dir
 
 SUBDIRS = jrnl cluster .
 
@@ -81,5 +81,5 @@
   VALGRIND=$(VALGRIND) \
   abs_srcdir=$(abs_srcdir) \
   LIBSTORE=$(abs_builddir)/../lib/.libs/msgstore.so \
-  TMP_STORE_DIR=$(TMP_STORE_DIR) \
+  TMP_DATA_DIR=$(TMP_DATA_DIR) \
   $(srcdir)/run_test

Modified: store/trunk/cpp/tests/OrderingTest.cpp
===================================================================
--- store/trunk/cpp/tests/OrderingTest.cpp	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/OrderingTest.cpp	2009-06-29 16:00:07 UTC (rev 3465)
@@ -44,7 +44,7 @@
 QPID_AUTO_TEST_SUITE(OrderingTest)
 
 const std::string test_filename("OrderingTest");
-const char* tdp = getenv("TMP_STORE_DIR");
+const char* tdp = getenv("TMP_DATA_DIR");
 const std::string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/OrderingTest");
 
 // === Helper fns ===

Modified: store/trunk/cpp/tests/SimpleTest.cpp
===================================================================
--- store/trunk/cpp/tests/SimpleTest.cpp	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/SimpleTest.cpp	2009-06-29 16:00:07 UTC (rev 3465)
@@ -51,7 +51,7 @@
 QPID_AUTO_TEST_SUITE(SimpleTest)
 
 const string test_filename("SimpleTest");
-const char* tdp = getenv("TMP_STORE_DIR");
+const char* tdp = getenv("TMP_DATA_DIR");
 const string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/SimpleTest");
 
 // === Helper fns ===

Modified: store/trunk/cpp/tests/TransactionalTest.cpp
===================================================================
--- store/trunk/cpp/tests/TransactionalTest.cpp	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/TransactionalTest.cpp	2009-06-29 16:00:07 UTC (rev 3465)
@@ -46,7 +46,7 @@
 QPID_AUTO_TEST_SUITE(TransactionalTest)
 
 const string test_filename("TransactionalTest");
-const char* tdp = getenv("TMP_STORE_DIR");
+const char* tdp = getenv("TMP_DATA_DIR");
 const string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/TransactionalTest");
 
 // Test txn context which has special setCompleteFailure() method which prevents entire "txn complete" process from hapenning

Modified: store/trunk/cpp/tests/TwoPhaseCommitTest.cpp
===================================================================
--- store/trunk/cpp/tests/TwoPhaseCommitTest.cpp	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/TwoPhaseCommitTest.cpp	2009-06-29 16:00:07 UTC (rev 3465)
@@ -47,7 +47,7 @@
 QPID_AUTO_TEST_SUITE(TwoPhaseCommitTest)
 
 const string test_filename("TwoPhaseCommitTest");
-const char* tdp = getenv("TMP_STORE_DIR");
+const char* tdp = getenv("TMP_DATA_DIR");
 string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/TwoPhaseCommitTest");
 
 // === Helper fns ===

Modified: store/trunk/cpp/tests/clean.sh
===================================================================
--- store/trunk/cpp/tests/clean.sh	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/clean.sh	2009-06-29 16:00:07 UTC (rev 3465)
@@ -26,7 +26,7 @@
 # be run prior to the store system tests, as these are prone to crashing or
 # hanging under some circumstances if the database is old or inconsistent.
 
-if [ -d ${TMP_STORE_DIR} ]; then
-    rm -rf ${TMP_STORE_DIR}
+if [ -d ${TMP_DATA_DIR} ]; then
+    rm -rf ${TMP_DATA_DIR}
 fi
 rm -f ${abs_srcdir}/*.vglog*

Modified: store/trunk/cpp/tests/cluster/Makefile.am
===================================================================
--- store/trunk/cpp/tests/cluster/Makefile.am	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/cluster/Makefile.am	2009-06-29 16:00:07 UTC (rev 3465)
@@ -30,7 +30,7 @@
 
 INCLUDES=-I$(top_srcdir)/lib $(QPID_CXXFLAGS)
 
-TMP_STORE_DIR=$(abs_srcdir)/test_tmp
+TMP_DATA_DIR=$(abs_srcdir)/../tmp_data_dir
 
 QPID_TEST_DIR = $(QPID_DIR)/cpp/src/tests
 
@@ -48,7 +48,7 @@
   QPID_DIR=$(QPID_DIR) \
   VALGRIND=$(VALGRIND) \
   LIBSTORE=$(abs_builddir)/../../lib/.libs/msgstore.so \
-  TMP_STORE_DIR=$(TMP_STORE_DIR) \
+  TMP_DATA_DIR=$(TMP_DATA_DIR) \
   abs_srcdir=$(abs_srcdir)
 
 EXTRA_DIST = \

Modified: store/trunk/cpp/tests/cluster/run_cluster_tests
===================================================================
--- store/trunk/cpp/tests/cluster/run_cluster_tests	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/cluster/run_cluster_tests	2009-06-29 16:00:07 UTC (rev 3465)
@@ -61,7 +61,7 @@
 	export SENDER_EXEC=${QPID_DIR}/cpp/src/tests/sender
 else
 	# Check expected environment vars are set
-	VARS=(CLUSTER_DIR CPP_CLUSTER_EXEC PYTHONPATH QPIDD_EXEC CLUSTER_LIB QPID_CONFIG_EXEC QPID_ROUTE_EXEC RECEIVER_EXEC SENDER_EXEC)
+	VARS=(CLUSTER_DIR PYTHONPATH QPIDD_EXEC CLUSTER_LIB QPID_CONFIG_EXEC QPID_ROUTE_EXEC RECEIVER_EXEC SENDER_EXEC)
 	for var in ${VARS[@]}; do
 		if test -z ${!var}; then
 			echo "WARNING: environment variable ${var} not set."
@@ -100,16 +100,14 @@
 fi
 
 #Make sure temp dir exists if this is the first to use it
-TEST_DIR=${abs_srcdir}
-TMP_STORE_DIR=${TEST_DIR}/test_tmp
-if ! test -d ${TMP_STORE_DIR} ; then
-   	mkdir -p ${TMP_STORE_DIR}/cluster
+if ! test -d ${TMP_DATA_DIR} ; then
+   	mkdir -p ${TMP_DATA_DIR}/cluster
 else
     # Delete old cluster test dirs
-    rm -rf ${TMP_STORE_DIR}/cluster
-    mkdir -p ${TMP_STORE_DIR}/cluster
+    rm -rf ${TMP_DATA_DIR}/cluster
+    mkdir -p ${TMP_DATA_DIR}/cluster
 fi
-export TMP_STORE_DIR
+export TMP_DATA_DIR
 
 sg ais -c "./${PYTHON_CLUSTER_EXEC} -v"
 RETCODE=$?
@@ -117,7 +115,4 @@
     exit 1;
 fi
 
-# Delete cluster store dir if test was successful.
-rm -rf ${TMP_STORE_DIR}
-
 exit 0

Modified: store/trunk/cpp/tests/jrnl/Makefile.am
===================================================================
--- store/trunk/cpp/tests/jrnl/Makefile.am	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/jrnl/Makefile.am	2009-06-29 16:00:07 UTC (rev 3465)
@@ -26,13 +26,13 @@
 
 INCLUDES=-I$(top_srcdir)/lib
 
-TMP_STORE_DIR=$(abs_srcdir)/test_tmp
+TMP_DATA_DIR=$(abs_srcdir)/../tmp_data_dir
 
 SUBDIRS = jtt .
 
 TESTS_ENVIRONMENT = \
   VALGRIND=$(VALGRIND) \
-  TMP_STORE_DIR=$(TMP_STORE_DIR) \
+  TMP_DATA_DIR=$(TMP_DATA_DIR) \
   $(srcdir)/../run_test
 
 all-local: .valgrindrc .valgrind.supp
@@ -57,8 +57,7 @@
   _st_read \
   _st_read_txn \
   _st_auto_expand \
-  run-journal-tests \
-  ../clean.sh
+  run-journal-tests
 
 check_PROGRAMS = \
   _ut_time_ns \

Modified: store/trunk/cpp/tests/jrnl/_st_helper_fns.hpp
===================================================================
--- store/trunk/cpp/tests/jrnl/_st_helper_fns.hpp	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/jrnl/_st_helper_fns.hpp	2009-06-29 16:00:07 UTC (rev 3465)
@@ -44,8 +44,8 @@
 #define NUM_JFILES 4
 #define JFSIZE_SBLKS 128
 
-const char* tdp = getenv("TMP_STORE_DIR");
-const string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/jrnl_test");
+const char* tdp = getenv("TMP_DATA_DIR");
+const string test_dir(tdp && strlen(tdp) > 0 ? string(tdp) + "/" + test_filename : "/tmp/jrnl_test");
 
 class test_dtok : public data_tok
 {

Modified: store/trunk/cpp/tests/jrnl/_ut_jdir.cpp
===================================================================
--- store/trunk/cpp/tests/jrnl/_ut_jdir.cpp	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/jrnl/_ut_jdir.cpp	2009-06-29 16:00:07 UTC (rev 3465)
@@ -49,8 +49,8 @@
 QPID_AUTO_TEST_SUITE(jdir_suite)
 
 const string test_filename("_ut_jdir");
-const char* tdp = getenv("TMP_STORE_DIR");
-const string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/_ut_jdir");
+const char* tdp = getenv("TMP_DATA_DIR");
+const string test_dir(tdp && strlen(tdp) > 0 ? string(tdp) + "/_ut_jdir" : "/tmp/_ut_jdir");
 
 // === Helper functions ===
 

Modified: store/trunk/cpp/tests/jrnl/jtt/Makefile.am
===================================================================
--- store/trunk/cpp/tests/jrnl/jtt/Makefile.am	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/jrnl/jtt/Makefile.am	2009-06-29 16:00:07 UTC (rev 3465)
@@ -24,14 +24,14 @@
 
 AM_CXXFLAGS = $(WARNING_CFLAGS) -I${top_srcdir}/lib -pthread -DBOOST_TEST_DYN_LINK
 
-TMP_STORE_DIR=$(abs_srcdir)/test_tmp
+TMP_DATA_DIR=$(abs_srcdir)/../../tmp_data_dir
 
 LINK_BDB = ${top_builddir}/lib/msgstore.la
 
 TESTS_ENVIRONMENT = \
   VALGRIND=$(VALGRIND) \
   abs_srcdir=$(abs_srcdir) \
-  TMP_STORE_DIR=$(TMP_STORE_DIR) \
+  TMP_DATA_DIR=$(TMP_DATA_DIR) \
   $(srcdir)/../../run_test
 
 all-local: .valgrindrc .valgrind.supp
@@ -48,8 +48,7 @@
     _ut_test_case_result \
     _ut_test_case_result_agregation \
     _ut_test_case_set \
-    _ut_jrnl_instance \
-    ../../clean.sh
+    _ut_jrnl_instance
 
 check_PROGRAMS = jtt \
     _ut_data_src \

Modified: store/trunk/cpp/tests/jrnl/jtt/_ut_jrnl_instance.cpp
===================================================================
--- store/trunk/cpp/tests/jrnl/jtt/_ut_jrnl_instance.cpp	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/jrnl/jtt/_ut_jrnl_instance.cpp	2009-06-29 16:00:07 UTC (rev 3465)
@@ -37,7 +37,7 @@
 QPID_AUTO_TEST_SUITE(jtt_jrnl_instance)
 
 const string test_filename("_ut_jrnl_instance");
-const char* tdp = getenv("TMP_STORE_DIR");
+const char* tdp = getenv("TMP_DATA_DIR");
 const string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/JttTest");
 
 QPID_AUTO_TEST_CASE(constructor_1)

Modified: store/trunk/cpp/tests/jrnl/run-journal-tests
===================================================================
--- store/trunk/cpp/tests/jrnl/run-journal-tests	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/jrnl/run-journal-tests	2009-06-29 16:00:07 UTC (rev 3465)
@@ -27,20 +27,20 @@
 # Run jtt using default test set
 echo
 echo "===== Mode 1: New journal instance, no recover ====="
-jtt/jtt --ja-path jtt --jrnl-dir ${TMP_STORE_DIR} --csv jtt/jtt.csv --format-chk --num-jrnls ${num_jrnls} || fail=1
-rm -rf ${TMP_STORE_DIR}/test_0*
+jtt/jtt --ja-path jtt --jrnl-dir ${TMP_DATA_DIR} --csv jtt/jtt.csv --format-chk --num-jrnls ${num_jrnls} || fail=1
+rm -rf ${TMP_DATA_DIR}/test_0*
 echo
 echo "===== Mode 2: Re-use journal instance, no recover ====="
-jtt/jtt --ja-path jtt --jrnl-dir ${TMP_STORE_DIR} --csv jtt/jtt.csv --reuse-instance --format-chk --num-jrnls ${num_jrnls} || fail=1
-rm -rf ${TMP_STORE_DIR}/test_0*
+jtt/jtt --ja-path jtt --jrnl-dir ${TMP_DATA_DIR} --csv jtt/jtt.csv --reuse-instance --format-chk --num-jrnls ${num_jrnls} || fail=1
+rm -rf ${TMP_DATA_DIR}/test_0*
 echo
 echo "===== Mode 3: New journal instance, recover previous test journal ====="
-jtt/jtt --ja-path jtt --jrnl-dir ${TMP_STORE_DIR} --csv jtt/jtt.csv --recover-mode --format-chk --num-jrnls ${num_jrnls} || fail=1
-rm -rf ${TMP_STORE_DIR}/test_0*
+jtt/jtt --ja-path jtt --jrnl-dir ${TMP_DATA_DIR} --csv jtt/jtt.csv --recover-mode --format-chk --num-jrnls ${num_jrnls} || fail=1
+rm -rf ${TMP_DATA_DIR}/test_0*
 echo
 echo "===== Mode 4: Re-use journal instance, recover previous test journal ====="
-jtt/jtt --ja-path jtt --jrnl-dir ${TMP_STORE_DIR} --csv jtt/jtt.csv --reuse-instance --recover-mode --format-chk --num-jrnls ${num_jrnls} || fail=1
-rm -rf ${TMP_STORE_DIR}/test_0*
+jtt/jtt --ja-path jtt --jrnl-dir ${TMP_DATA_DIR} --csv jtt/jtt.csv --reuse-instance --recover-mode --format-chk --num-jrnls ${num_jrnls} || fail=1
+rm -rf ${TMP_DATA_DIR}/test_0*
 echo
 
 exit $fail

Modified: store/trunk/cpp/tests/python_tests/flow_to_disk.py
===================================================================
--- store/trunk/cpp/tests/python_tests/flow_to_disk.py	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/python_tests/flow_to_disk.py	2009-06-29 16:00:07 UTC (rev 3465)
@@ -19,6 +19,7 @@
 #
 # The GNU Lesser General Public License is available in the file COPYING.
 
+import random, time
 from qpid.client import Client, Closed
 from qpid.queue import Empty
 from qpid.testlib import TestBase010
@@ -29,11 +30,11 @@
     """Tests for async store flow-to-disk"""
 
     XA_OK = 0
-    tx_counter = 0
+    txCounter = 0
 
     # --- Helper functions ---
 
-    def _browse(self, qn, dt, am, num_msgs, msg_size, txnConsume):
+    def _browse(self, qn, dt, am, numMsgs, msgSize, txnConsume):
         txid = None
         if txnConsume:
             txid = self._makeXid("consumer-xid-%s" % qn)
@@ -44,27 +45,27 @@
         self.session.message_flow(destination=dt, unit=self.session.credit_unit.byte, value=0xFFFFFFFF)
         queue = self.session.incoming(dt)
         ids = RangedSet()
-        for msg_num in range(0, num_msgs):
-            expected_str = self._makeMessage(msg_num, msg_size)
+        for msgNum in range(0, numMsgs):
+            expectedStr = self._makeMessage(msgNum, msgSize)
             msg = queue.get(timeout=5)
-            self.assertEqual(expected_str, msg.body)
+            self.assertEqual(expectedStr, msg.body)
             ids.add(msg.id)
         return ids, txid
 
-    def _checkCancel(self, qn, dt, num_msgs, ids):
+    def _checkCancel(self, qn, dt, numMsgs, ids):
         self.session.message_release(ids)
         self.session.queue_declare(queue=qn)
-        self.assertEqual(num_msgs, self.session.queue_query(queue=qn).message_count)
+        self.assertEqual(numMsgs, self.session.queue_query(queue=qn).message_count)
         self.session.message_cancel(destination=dt)
 
-    def _checkConsume(self, qn, am, num_msgs, ids, txid, txnConsume):
+    def _checkConsume(self, qn, am, numMsgs, ids, txid, txnConsume):
         if am == self.session.acquire_mode.not_acquired:
             self.session.queue_declare(queue=qn)
-            self.assertEqual(num_msgs, self.session.queue_query(queue=qn).message_count)
+            self.assertEqual(numMsgs, self.session.queue_query(queue=qn).message_count)
             response = self.session.message_acquire(ids)
-            for range_ in ids:
-                for msg_id in range_:
-                    self.assert_(msg_id in response.transfers)
+            for range in ids:
+                for msgId in range:
+                    self.assert_(msgId in response.transfers)
         self.session.message_accept(ids)
         if txnConsume:
             self.assertEqual(self.XA_OK, self.session.dtx_end(xid=txid).status)
@@ -88,23 +89,28 @@
         return msg
 
     def _makeXid(self, txid):
-        self.tx_counter += 1
-        branchqual = "v%s" % self.tx_counter
+        self.txCounter += 1
+        branchqual = "v%s" % self.txCounter
         return self.session.xid(format=0, global_id=txid, branch_id=branchqual)
 
-    def _produce(self, qn, dm, num_msgs, msg_size, txnProduce):
+    def _produce(self, qn, dm, numMsgs, msgSize, txnProduce):
         if txnProduce:
             txid = self._makeXid("producer-xid-%s" % qn)
             self.session.dtx_select()
             self.assertEqual(self.XA_OK, self.session.dtx_start(xid=txid).status)
-        for msg_num in range(0, num_msgs):
-            msg_str = self._makeMessage(msg_num, msg_size)
+        for msgNum in range(0, numMsgs):
+            msg_str = self._makeMessage(msgNum, msgSize)
             self.session.message_transfer(message=Message(self.session.delivery_properties(routing_key=qn, delivery_mode=dm), msg_str))
         if txnProduce:
             self.assertEqual(self.XA_OK, self.session.dtx_end(xid=txid).status)
             self.assertEqual(self.XA_OK, self.session.dtx_prepare(xid=txid).status)
             self.assertEqual(self.XA_OK, self.session.dtx_commit(xid=txid, one_phase=False).status)
         self._resetChannel()
+    
+    def _randomBool(self):
+        if random.randint(0, 1) > 0:
+            return True
+        return False
 
     def _resetChannel(self):
         self.session.close()
@@ -112,55 +118,45 @@
 
     # --- Simple tests ---
 
-    def test_FlowToDisk_00_SimpleMaxCountTransient(self):
-        self.simple_limit("test_FlowToDisk_00_SimpleMaxCountTransient", max_count = 10)
+    def test_FlowToDisk_00_SimpleMaxCount(self):
+        self.simpleLimit("test_FlowToDisk_00a", max_count = 10)
+        self.simpleLimit("test_FlowToDisk_00b", max_count = 10, persistent = True)
+        self.simpleLimit("test_FlowToDisk_00c", max_count = 10, max_size = 10000000, numMsgs = 100, msgSize = 10000)
+        self.simpleLimit("test_FlowToDisk_00d", max_count = 10, max_size = 10000000, persistent = True, numMsgs = 100, msgSize = 10000)
 
-    def test_FlowToDisk_01_SimpleMaxCountPersistent(self):
-        self.simple_limit("test_FlowToDisk_01_SimpleMaxCountPersistent", max_count = 10, persistent = True)
+    def test_FlowToDisk_01_SimpleMaxSize(self):
+        self.simpleLimit("test_FlowToDisk_01a", max_size = 100)
+        self.simpleLimit("test_FlowToDisk_01b", max_size = 100, persistent = True)
+        self.simpleLimit("test_FlowToDisk_01c", max_size = 100000, numMsgs = 100, msgSize = 10000)
+        self.simpleLimit("test_FlowToDisk_01d", max_size = 100000, persistent = True, numMsgs = 100, msgSize = 10000)
 
-    def test_FlowToDisk_02_SimpleMaxSizeTransient(self):
-        self.simple_limit("test_FlowToDisk_02_SimpleMaxSizeTransient", max_size = 100)
+    def test_FlowToDisk_02_SimpleMaxCountNotAcquired(self):
+        self.simpleLimit("test_FlowToDisk_02a", max_count = 10, pre_acquired = False)
+        self.simpleLimit("test_FlowToDisk_02b", max_count = 10, persistent = True, pre_acquired = False)
+        self.simpleLimit("test_FlowToDisk_02c", max_count = 10, max_size = 10000000, pre_acquired = False, numMsgs = 100, msgSize = 10000)
+        self.simpleLimit("test_FlowToDisk_02d", max_count = 10, max_size = 10000000, persistent = True, pre_acquired = False, numMsgs = 100, msgSize = 10000)
 
-    def test_FlowToDisk_03_SimpleMaxSizePersistent(self):
-        self.simple_limit("test_FlowToDisk_03_SimpleMaxSizePersistent", max_size = 100, persistent = True)
+    def test_FlowToDisk_03_SimpleMaxSizeNotAcquired(self):
+        self.simpleLimit("test_FlowToDisk_03a", max_size = 100, pre_acquired = False)
+        self.simpleLimit("test_FlowToDisk_03b", max_size = 100, persistent = True, pre_acquired = False)
+        self.simpleLimit("test_FlowToDisk_03c", max_size = 100, pre_acquired = False, numMsgs = 100, msgSize = 10000)
+        self.simpleLimit("test_FlowToDisk_03d", max_size = 100, persistent = True, pre_acquired = False, numMsgs = 100, msgSize = 10000)
+    
+    def test_FlowToDisk_04_MaxSizeMaxCount(self):
+        """Set both max-count and max-size at the same time"""
+        self.simpleLimit("test_FlowToDisk_04a", max_count = 10, max_size = 1000)
+        self.simpleLimit("test_FlowToDisk_04b", max_count = 10, max_size = 1000, msgSize = 250)
+        self.simpleLimit("test_FlowToDisk_04c", max_count = 10, max_size = 1000, persistent = True)
+        self.simpleLimit("test_FlowToDisk_04d", max_count = 10, max_size = 1000, msgSize = 250, persistent = True)
+    
+    def test_FlowToDisk_05_Randomized(self):
+        seed = long(1000.0 * time.time())
+        print "seed=0x%x" % seed
+        random.seed(seed)
+        for i in range(0, 10):
+            self.randomLimit(i)
 
-    def test_FlowToDisk_04_SimpleMaxCountTransientLargeMsg(self):
-        self.simple_limit("test_FlowToDisk_04_SimpleMaxCountTransientLargeMsg", max_count = 10, max_size = 10000000, num_msgs = 100, msg_size = 10000)
-
-    def test_FlowToDisk_05_SimpleMaxCountPersistentLargeMsg(self):
-        self.simple_limit("test_FlowToDisk_05_SimpleMaxCountPersistentLargeMsg", max_count = 10, max_size = 10000000, persistent = True, num_msgs = 100, msg_size = 10000)
-
-    def test_FlowToDisk_06_SimpleMaxSizeTransientLargeMsg(self):
-        self.simple_limit("test_FlowToDisk_06_SimpleMaxSizeTransientLargeMsg", max_size = 100, num_msgs = 100, msg_size = 10000)
-
-    def test_FlowToDisk_07_SimpleMaxSizePersistentLargeMsg(self):
-        self.simple_limit("test_FlowToDisk_07_SimpleMaxSizePersistentLargeMsg", max_size = 100, persistent = True, num_msgs = 100, msg_size = 10000)
-
-    def test_FlowToDisk_08_SimpleMaxCountTransientNotAcquired(self):
-        self.simple_limit("test_FlowToDisk_08_SimpleMaxCountTransientNotAcquired", max_count = 10, pre_acquired = False)
-
-    def test_FlowToDisk_09_SimpleMaxCountPersistentNotAcquired(self):
-        self.simple_limit("test_FlowToDisk_09_SimpleMaxCountPersistentNotAcquired", max_count = 10, persistent = True, pre_acquired = False)
-
-    def test_FlowToDisk_10_SimpleMaxSizeTransientNotAcquired(self):
-         self.simple_limit("test_FlowToDisk_10_SimpleMaxSizeTransientNotAcquired", max_size = 100, pre_acquired = False)
-
-    def test_FlowToDisk_11_SimpleMaxSizePersistentNotAcquired(self):
-        self.simple_limit("test_FlowToDisk_11_SimpleMaxSizePersistentNotAcquired", max_size = 100, persistent = True, pre_acquired = False)
-
-    def test_FlowToDisk_12_SimpleMaxCountTransientNotAcquiredLargeMsg(self):
-        self.simple_limit("test_FlowToDisk_12_SimpleMaxCountTransientNotAcquiredLargeMsg", max_count = 10, max_size = 10000000, pre_acquired = False, num_msgs = 100, msg_size = 10000)
-
-    def test_FlowToDisk_13_SimpleMaxCountPersistentNotAcquiredLargeMsg(self):
-        self.simple_limit("test_FlowToDisk_13_SimpleMaxCountPersistentNotAcquiredLargeMsg", max_count = 10, max_size = 10000000, persistent = True, pre_acquired = False, num_msgs = 100, msg_size = 10000)
-
-    def test_FlowToDisk_14_SimpleMaxSizeTransientNotAcquiredLargeMsg(self):
-        self.simple_limit("test_FlowToDisk_14_SimpleMaxSizeTransientNotAcquiredLargeMsg", max_size = 100, pre_acquired = False, num_msgs = 100, msg_size = 10000)
-
-    def test_FlowToDisk_15_SimpleMaxSizePersistentNotAcquiredLargeMsg(self):
-        self.simple_limit("test_FlowToDisk_15_SimpleMaxSizePersistentNotAcquiredLargeMsg", max_size = 100, persistent = True, pre_acquired = False, num_msgs = 100, msg_size = 10000)
-
-    def simple_limit(self, qn, max_count = None, max_size = None, persistent = False, pre_acquired = True, num_msgs = 15, msg_size = None):
+    def simpleLimit(self, qn, max_count = None, max_size = None, persistent = False, pre_acquired = True, numMsgs = 15, msgSize = None, browse = True):
         qa = {'qpid.policy_type':'flow_to_disk'}
         if max_count != None:
             qa['qpid.max_count'] = max_count
@@ -178,9 +174,46 @@
         for i in range(0, 4):
             tp = i & 1 != 0 # Transactional produce
             tc = i & 2 != 0 # Transactional consume
-            self.tx_simple_limit(qn, qa, dm, am, num_msgs, msg_size, tp, tc)      
+            self.txSimpleLimit(qn, qa, dm, am, numMsgs, msgSize, tp, tc, browse)
 
-    def tx_simple_limit(self, qn, qa, dm, am, num_msgs, msg_size, tp, tc):
+    def randomLimit(self, count):
+        qa = {'qpid.policy_type':'flow_to_disk'}
+        qn = "randomized_test_%04d" % count
+        
+        # Flow to disk policy
+        maxCount = None
+        if self._randomBool():
+            maxCount = random.randint(0,10000)
+            qa['qpid.max_count'] = maxCount
+        maxSize = None
+        if self._randomBool():
+            maxSize = random.randint(0, 1000000)
+            qa['qpid.max_size'] = maxSize
+            
+        # Persistence
+        if self._randomBool():
+            dm = self.session.delivery_mode.persistent
+        else:
+            dm = self.session.delivery_mode.non_persistent
+        
+        # Acquired mode
+        if self._randomBool():
+            am = self.session.acquire_mode.pre_acquired
+            browse = False
+        else:
+            am = self.session.acquire_mode.not_acquired
+            browse = self._randomBool()
+        
+        numMsgs = random.randint(1, 10000)
+        sizeLimit = int(1000000 / numMsgs)
+        msgSize = random.randint(1, sizeLimit)
+        tp = self._randomBool()
+        tc = self._randomBool()
+        
+        #print "  qn=%s, qa=%s, dm=%s, am=%s, numMsgs=%d, msgSize=%d, tp=%s, tc=%s, browse=%s" % (qn, qa, dm, am, numMsgs, msgSize, tp, tc, browse)
+        self.txSimpleLimit(qn, qa, dm, am, numMsgs, msgSize, tp, tc, browse)
+
+    def txSimpleLimit(self, qn, qa, dm, am, numMsgs, msgSize, tp, tc, browse):
         """
         Test a simple case of message limits which will force flow-to-disk.
         * queue_args sets a limit - either max_count and/or max_size
@@ -192,78 +225,16 @@
         self.session.queue_declare(queue=qn, durable=True, arguments=qa)
         
         # --- Add messages ---
-        self._produce(qn, dm, num_msgs, msg_size, tp)
+        self._produce(qn, dm, numMsgs, msgSize, tp)
         
-        # --- Browse messages, then consume ---
-        dt = "tag-%d-%d" % (tp, tc)
-        ids, txid = self._browse(qn, dt, am, num_msgs, msg_size, tc)
-        self._checkConsume(qn, am, num_msgs, ids, txid, tc)
-        self._checkEmpty(qn)
-
-
-    def test_FlowToDisk_50_MaxCountBrowseConsumeTransient(self):
-        self.not_acquired_browse_consume_limit("test_FlowToDisk_50_MaxCountBrowseConsumeTransient", max_count = 10)
-
-    def test_FlowToDisk_51_MaxCountBrowseConsumePersistent(self):
-        self.not_acquired_browse_consume_limit("test_FlowToDisk_51_MaxCountBrowseConsumePersistent", max_count = 10, persistent = True)
-
-    def test_FlowToDisk_52_MaxSizeBrowseConsumeTransient(self):
-        self.not_acquired_browse_consume_limit("test_FlowToDisk_52_MaxSizeBrowseConsumeTransient", max_size = 100)
-
-    def test_FlowToDisk_53_MaxSizeBrowseConsumePersistent(self):
-        self.not_acquired_browse_consume_limit("test_FlowToDisk_53_MaxSizeBrowseConsumePersistent", max_size = 100, persistent = True)
-
-    def test_FlowToDisk_54_MaxCountBrowseConsumeTransientLargeMsg(self):
-        self.not_acquired_browse_consume_limit("test_FlowToDisk_54_MaxCountBrowseConsumeTransientLargeMsg", max_count = 10, max_size = 10000000, num_msgs = 100, msg_size = 10000)
-
-    def test_FlowToDisk_55_MaxCountBrowseConsumePersistentLargeMsg(self):
-        self.not_acquired_browse_consume_limit("test_FlowToDisk_55_MaxCountBrowseConsumePersistentLargeMsg", max_count = 10, max_size = 10000000, persistent = True, num_msgs = 100, msg_size = 10000)
-
-    def test_FlowToDisk_56_MaxSizeBrowseConsumeTransientLargeMsg(self):
-        self.not_acquired_browse_consume_limit("test_FlowToDisk_56_MaxSizeBrowseConsumeTransientLargeMsg", max_size = 100, num_msgs = 100, msg_size = 10000)
-
-    def test_FlowToDisk_57_MaxSizeBrowseConsumePersistentLargeMsg(self):
-        self.not_acquired_browse_consume_limit("test_FlowToDisk_57_MaxSizeBrowseConsumePersistentLargeMsg", max_size = 100, persistent = True, num_msgs = 100, msg_size = 10000)
-
-    def not_acquired_browse_consume_limit(self, qn, max_count = None, max_size = None, persistent = False, num_msgs = 15, msg_size = None):
-        qa = {'qpid.policy_type':'flow_to_disk'}
-        if max_count != None:
-            qa['qpid.max_count'] = max_count
-        if max_size != None:
-            qa['qpid.max_size'] = max_size
-        if persistent:
-            dm = self.session.delivery_mode.persistent
-        else:
-            dm = self.session.delivery_mode.non_persistent
-        # Cycle through the produce/consume block transaction combinations
-        for i in range(0, 4):
-            tp = i & 1 != 0 # Transactional produce
-            tc = i & 2 != 0 # Transactional consume
-            self.tx_not_acquired_browse_consume_limit(qn, qa, dm, num_msgs, msg_size, tp, tc)
+        # --- Browse messages (if possible) ---
+        if am == self.session.acquire_mode.not_acquired and browse:
+            dtA = "tagA-%d-%d" % (tp, tc)
+            ids, txid = self._browse(qn, dtA, am, numMsgs, msgSize, False)
+            self._checkCancel(qn, dtA, numMsgs, ids)
         
-    def tx_not_acquired_browse_consume_limit(self, qn, qa, dm, num_msgs, msg_size, tp, tc):
-        """
-        Test to check browsing then subsequent consumption of flow-to-disk messages.
-        * 15 messages of size 10 are added. The last five will flow to disk.
-        * Browse 15 messages, then release them.
-        * Checks the broker still has all messages.
-        * Consumes 15 messages
-        * Checks the broker has no messages left.
-        """
-        
-        self.session.queue_declare(queue=qn, durable=True, arguments=qa)
-        am = self.session.acquire_mode.not_acquired
-        
-        # Add 15 messages
-        self._produce(qn, dm, num_msgs, msg_size, tp)
-        
-        # Browse 15 messages, then release and close
-        dtA = "tagA-%d-%d" % (tp, tc)
-        ids, txid = self._browse(qn, dtA, am, num_msgs, msg_size, False)
-        self._checkCancel(qn, dtA, num_msgs, ids)
-        
-        # --- Browse messages, then consume ---
+        # --- Consume messages ---
         dtB = "tagB-%d-%d" % (tp, tc)
-        ids, txid = self._browse(qn, dtB, am, num_msgs, msg_size, tc)
-        self._checkConsume(qn, am, num_msgs, ids, txid, tc)
+        ids, txid = self._browse(qn, dtB, am, numMsgs, msgSize, tc)
+        self._checkConsume(qn, am, numMsgs, ids, txid, tc)
         self._checkEmpty(qn)

Modified: store/trunk/cpp/tests/run_python_tests
===================================================================
--- store/trunk/cpp/tests/run_python_tests	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/run_python_tests	2009-06-29 16:00:07 UTC (rev 3465)
@@ -56,16 +56,15 @@
     exit
 fi
 
-BROKER_OPTS="--no-module-dir --load-module=${LIBSTORE} --data-dir=${TMP_STORE_DIR} --auth=no --log-enable info+ --log-to-file ${TMP_STORE_DIR}/broker.python-test.log"
+STORE_DIR=${TMP_DATA_DIR}/python
+BROKER_OPTS="--no-module-dir --load-module=${LIBSTORE} --data-dir=${STORE_DIR} --auth=no --log-enable info+ --log-to-file ${STORE_DIR}/broker.python-test.log"
 AMQP_SPEC=0-10-errata
 
 #Make sure temp dir exists if this is the first to use it
-if ! test -d ${TMP_STORE_DIR} ; then
-    mkdir -p ${TMP_STORE_DIR}
-   	mkdir -p ${TMP_STORE_DIR}/cluster
-elif ! test -d "${TMP_STORE_DIR}/cluster" ; then
-    mkdir -p "${TMP_STORE_DIR}/cluster"
+if test -d ${STORE_DIR} ; then
+    rm -rf ${STORE_DIR}
 fi
+mkdir -p ${STORE_DIR}
 
 if test -z ${QPIDD} ; then
 	export QPIDD=${QPID_DIR}/cpp/src/qpidd

Modified: store/trunk/cpp/tests/system_test.sh
===================================================================
--- store/trunk/cpp/tests/system_test.sh	2009-06-25 20:04:53 UTC (rev 3464)
+++ store/trunk/cpp/tests/system_test.sh	2009-06-29 16:00:07 UTC (rev 3465)
@@ -34,18 +34,12 @@
 test -f $xml_spec || error "$xml_spec not found: invalid \$QPID_DIR ?"
 export PYTHONPATH=$QPID_DIR/python
 
-# Create a temporary directory for store data.
-#if test $TMP_STORE_DIRx == x ; then
-#    export TMP_STORE_DIR=`mktemp -d` || error "Can't create temporary directory."
-#else
-#    export TMP_STORE_DIR=$TESTDIR
-#fi
-echo "Using directory $TMP_STORE_DIR"
+echo "Using directory $TMP_DATA_DIR"
 
 fail=0
 
 # Run the tests with a given set of flags
-BROKER_OPTS="--no-module-dir --load-module=$LIBSTORE --data-dir=$TMP_STORE_DIR --auth=no --wcache-page-size 16"
+BROKER_OPTS="--no-module-dir --load-module=$LIBSTORE --data-dir=$TMP_DATA_DIR --auth=no --wcache-page-size 16"
 run_tests() {
     for p in `seq 1 8`; do
 	$abs_srcdir/start_broker "$@" ${BROKER_OPTS} || { echo "FAIL broker start";  return 1; }




More information about the rhmessaging-commits mailing list