rhmessaging commits: r3914 - store/trunk.
by rhmessaging-commits@lists.jboss.org
Author: gordonsim
Date: 2010-04-21 11:39:52 -0400 (Wed, 21 Apr 2010)
New Revision: 3914
Added:
store/trunk/test
Log:
testing write access is re-enabled
Added: store/trunk/test
===================================================================
--- store/trunk/test (rev 0)
+++ store/trunk/test 2010-04-21 15:39:52 UTC (rev 3914)
@@ -0,0 +1 @@
+this is a test
14 years, 8 months
rhmessaging commits: r3913 - store/trunk/cpp/tests.
by rhmessaging-commits@lists.jboss.org
Author: kpvdr
Date: 2010-04-15 11:58:32 -0400 (Thu, 15 Apr 2010)
New Revision: 3913
Modified:
store/trunk/cpp/tests/OrderingTest.cpp
store/trunk/cpp/tests/SimpleTest.cpp
store/trunk/cpp/tests/TransactionalTest.cpp
store/trunk/cpp/tests/TwoPhaseCommitTest.cpp
Log:
Implementation of QPID-2509 (Remove message staging from C++ broker) - syncs with qpid r.934463
Modified: store/trunk/cpp/tests/OrderingTest.cpp
===================================================================
--- store/trunk/cpp/tests/OrderingTest.cpp 2010-04-14 17:34:58 UTC (rev 3912)
+++ store/trunk/cpp/tests/OrderingTest.cpp 2010-04-15 15:58:32 UTC (rev 3913)
@@ -105,7 +105,7 @@
sys::Timer t;
DtxManager mgr(t);
mgr.setStore (store.get());
- RecoveryManagerImpl recoveryMgr(queues, exchanges, links, mgr, 0);
+ RecoveryManagerImpl recoveryMgr(queues, exchanges, links, mgr);
store->recover(recoveryMgr);
queue = queues.find(name);
Modified: store/trunk/cpp/tests/SimpleTest.cpp
===================================================================
--- store/trunk/cpp/tests/SimpleTest.cpp 2010-04-14 17:34:58 UTC (rev 3912)
+++ store/trunk/cpp/tests/SimpleTest.cpp 2010-04-15 15:58:32 UTC (rev 3913)
@@ -70,7 +70,7 @@
sys::Timer t;
DtxManager mgr(t);
mgr.setStore (&store);
- RecoveryManagerImpl recovery(queues, exchanges, links, mgr, 0);
+ RecoveryManagerImpl recovery(queues, exchanges, links, mgr);
store.recover(recovery);
}
@@ -360,136 +360,6 @@
cout << "ok" << endl;
}
-QPID_AUTO_TEST_CASE(Staging)
-{
- cout << test_filename << ".Staging: " << flush;
-
- const string name("MyDurableQueue");
- const string exchange("MyExchange");
- const string routingKey("MyRoutingKey");
- const Uuid messageId(true);
- const string data1("abcdefghijklmnopqrstuvwxyz");
- const string data2("ABCDEFGHIJKLMNOPQRSTUVWXYZ");
- {
- MessageStoreImpl store;
- store.init(test_dir, 4, 1, true); // truncate store
-
- //create & stage a message
- boost::intrusive_ptr<Message> msg = MessageUtils::createMessage(exchange, routingKey, messageId, (data1.size() + data2.size()));
- intrusive_ptr<PersistableMessage> pmsg = static_pointer_cast<PersistableMessage>(msg);
- intrusive_ptr<const PersistableMessage> cpmsg = static_pointer_cast<const PersistableMessage>(msg);
- msg->getProperties<DeliveryProperties>()->setDeliveryMode(PERSISTENT);
- FieldTable table;
- table.setString("abc", "xyz");
- msg->getProperties<MessageProperties>()->setApplicationHeaders(table);
- store.stage(pmsg);
-
- //append to it
- msg->setStore(&store);
- msg->releaseContent();//ensure that data is not held in memory but is appended to disk when added
- store.appendContent(cpmsg, data1);
- store.appendContent(cpmsg, data2);
-
- //enqueue it
- Queue::shared_ptr queue(new Queue(name, 0, &store, 0));
- FieldTable settings;
- queue->create(settings);
- queue->enqueue(0, msg);
-
- //load it (without recovery)
- DummyHandler handler;
- QueuedMessage qm(queue.get(), msg, 0);
- //52 chars of data, i.e. 2 chunks of 20 and one of 12
- MessageUtils::deliver(qm, handler, 20);
- BOOST_REQUIRE(handler.frames.size() > 1);
- string loaded;
- for (uint i = 1; i < handler.frames.size(); i++) {
- AMQContentBody* contentBody(dynamic_cast<AMQContentBody*>(handler.frames[i].getBody()));
- BOOST_REQUIRE(contentBody);
- loaded += contentBody->getData();
- }
- BOOST_CHECK_EQUAL(data1 + data2, loaded);
-
- }//db will be closed
- {
- //recover
- MessageStoreImpl store;
- store.init(test_dir, 4, 1);
- QueueRegistry registry;
- registry.setStore (&store);
- ExchangeRegistry exchanges;
- LinkRegistry links;
- sys::Timer t;
- DtxManager dtx(t);
- dtx.setStore (&store);
- RecoveryManagerImpl recovery(registry, exchanges, links, dtx, 10);
- store.recover(recovery);
-
- //get message instance from queue
- Queue::shared_ptr queue = registry.find(name);
- BOOST_REQUIRE(queue);
- BOOST_CHECK_EQUAL((u_int32_t) 1, queue->getMessageCount());
- boost::intrusive_ptr<Message> msg = queue->get().payload;
-
- //check headers
- BOOST_CHECK_EQUAL(exchange, msg->getExchangeName());
- BOOST_CHECK_EQUAL(routingKey, msg->getRoutingKey());
- BOOST_CHECK_EQUAL(messageId, msg->getProperties<MessageProperties>()->getMessageId());
- BOOST_CHECK_EQUAL((uint8_t) PERSISTENT, msg->getProperties<DeliveryProperties>()->getDeliveryMode());
- BOOST_REQUIRE(Str16Value("xyz") == *msg->getProperties<MessageProperties>()->getApplicationHeaders().get("abc"));
- BOOST_CHECK_EQUAL((u_int64_t) (data1.size() + data2.size()), msg->getFrames().getHeaders()->getContentLength());
-
- BOOST_CHECK_EQUAL((u_int64_t) 0, msg->contentSize());//ensure it is being lazily loaded
-
- //load lazily
- DummyHandler handler;
- QueuedMessage qm(queue.get(),msg,0);
- //52 chars of data, i.e. 2 chunks of 20 and one of 12
- MessageUtils::deliver(qm, handler, 20);
-
- BOOST_REQUIRE(handler.frames.size() > 1);
- string loaded;
- for (uint i = 1; i < handler.frames.size(); i++) {
- AMQContentBody* contentBody(dynamic_cast<AMQContentBody*>(handler.frames[i].getBody()));
- BOOST_REQUIRE(contentBody);
- loaded += contentBody->getData();
- }
- BOOST_CHECK_EQUAL(data1 + data2, loaded);
-
- //dequeue
- queue->dequeue(0, qm);
- }
-
- cout << "ok" << endl;
-}
-
-QPID_AUTO_TEST_CASE(DestroyStagedMessage)
-{
- cout << test_filename << ".DestroyStagedMessage: " << flush;
-
- MessageStoreImpl store;
- store.init(test_dir, 4, 1, true); // truncate store
-
- const string data("abcdefg");
- boost::intrusive_ptr<Message> msg(MessageUtils::createMessage("my_exchange", "my_routing_key", "my_message", data.length()));
- intrusive_ptr<PersistableMessage> pmsg = static_pointer_cast<PersistableMessage>(msg);
- intrusive_ptr<const PersistableMessage> cpmsg = static_pointer_cast<const PersistableMessage>(msg);
- MessageUtils::addContent(msg, data);
-
- store.stage(pmsg);
- store.destroy(*pmsg);
-
- try {
- string loaded;
- Queue queue("dummy", 0, &store, 0);
- store.loadContent(queue, cpmsg, loaded, 0, data.length());
- BOOST_FAIL("store.loadContent() did not throw StoreException as expected.");
- } catch (StoreException& e) {
- }
-
- cout << "ok" << endl;
-}
-
QPID_AUTO_TEST_CASE(DestroyEnqueuedMessage)
{
cout << test_filename << ".DestroyEnqueuedMessage: " << flush;
Modified: store/trunk/cpp/tests/TransactionalTest.cpp
===================================================================
--- store/trunk/cpp/tests/TransactionalTest.cpp 2010-04-14 17:34:58 UTC (rev 3912)
+++ store/trunk/cpp/tests/TransactionalTest.cpp 2010-04-15 15:58:32 UTC (rev 3913)
@@ -136,7 +136,7 @@
sys::Timer t;
DtxManager mgr(t);
mgr.setStore (store.get());
- RecoveryManagerImpl recovery(*queues, exchanges, links, mgr, 0);
+ RecoveryManagerImpl recovery(*queues, exchanges, links, mgr);
store->recover(recovery);
queueA = queues->find(nameA);
Modified: store/trunk/cpp/tests/TwoPhaseCommitTest.cpp
===================================================================
--- store/trunk/cpp/tests/TwoPhaseCommitTest.cpp 2010-04-14 17:34:58 UTC (rev 3912)
+++ store/trunk/cpp/tests/TwoPhaseCommitTest.cpp 2010-04-15 15:58:32 UTC (rev 3913)
@@ -361,7 +361,7 @@
links = std::auto_ptr<LinkRegistry>(new LinkRegistry);
dtxmgr = std::auto_ptr<DtxManager>(new DtxManager(t));
dtxmgr->setStore (store.get());
- RecoveryManagerImpl recovery(*queues, exchanges, *links, *dtxmgr, 0);
+ RecoveryManagerImpl recovery(*queues, exchanges, *links, *dtxmgr);
store->recover(recovery);
queueA = queues->find(nameA);
14 years, 8 months
rhmessaging commits: r3912 - in mgmt/newdata: cumin/python/cumin/account and 4 other directories.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2010-04-14 13:34:58 -0400 (Wed, 14 Apr 2010)
New Revision: 3912
Added:
mgmt/newdata/cumin/python/cumin/grid/daemon.py
Removed:
mgmt/newdata/cumin/python/cumin/action.py
mgmt/newdata/cumin/python/cumin/messaging/model.py
mgmt/newdata/cumin/python/cumin/table.py
mgmt/newdata/cumin/python/cumin/table.strings
Modified:
mgmt/newdata/cumin/python/cumin/account/main.py
mgmt/newdata/cumin/python/cumin/account/widgets.py
mgmt/newdata/cumin/python/cumin/grid/collector.py
mgmt/newdata/cumin/python/cumin/grid/collector.strings
mgmt/newdata/cumin/python/cumin/grid/job.strings
mgmt/newdata/cumin/python/cumin/grid/limit.py
mgmt/newdata/cumin/python/cumin/grid/main.py
mgmt/newdata/cumin/python/cumin/grid/model.py
mgmt/newdata/cumin/python/cumin/grid/negotiator.py
mgmt/newdata/cumin/python/cumin/grid/negotiator.strings
mgmt/newdata/cumin/python/cumin/grid/pool.py
mgmt/newdata/cumin/python/cumin/grid/scheduler.py
mgmt/newdata/cumin/python/cumin/grid/scheduler.strings
mgmt/newdata/cumin/python/cumin/grid/submission.py
mgmt/newdata/cumin/python/cumin/grid/submission.strings
mgmt/newdata/cumin/python/cumin/grid/submitter.py
mgmt/newdata/cumin/python/cumin/grid/submitter.strings
mgmt/newdata/cumin/python/cumin/main.py
mgmt/newdata/cumin/python/cumin/messaging/binding.py
mgmt/newdata/cumin/python/cumin/messaging/broker.py
mgmt/newdata/cumin/python/cumin/messaging/brokergroup.py
mgmt/newdata/cumin/python/cumin/messaging/brokerlink.py
mgmt/newdata/cumin/python/cumin/messaging/connection.py
mgmt/newdata/cumin/python/cumin/messaging/connection.strings
mgmt/newdata/cumin/python/cumin/messaging/exchange.py
mgmt/newdata/cumin/python/cumin/messaging/main.py
mgmt/newdata/cumin/python/cumin/messaging/queue.py
mgmt/newdata/cumin/python/cumin/model.py
mgmt/newdata/cumin/python/cumin/objectframe.py
mgmt/newdata/cumin/python/cumin/objectframe.strings
mgmt/newdata/cumin/python/cumin/objectselector.py
mgmt/newdata/cumin/python/cumin/objectselector.strings
mgmt/newdata/cumin/python/cumin/objecttask.py
mgmt/newdata/cumin/python/cumin/parameters.py
mgmt/newdata/cumin/python/cumin/sqladapter.py
mgmt/newdata/cumin/python/cumin/widgets.py
mgmt/newdata/misc/boneyard.py
mgmt/newdata/wooly/python/wooly/datatable.py
mgmt/newdata/wooly/python/wooly/table.py
Log:
* Tasks part 2; just a little more work remains here
* Provide a (read only) cursor on session
* Removed a lot of obsoleted Frame and View classes
Modified: mgmt/newdata/cumin/python/cumin/account/main.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/account/main.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/account/main.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -8,9 +8,9 @@
def __init__(self, app, name):
super(Module, self).__init__(app, name)
- cls = app.rosemary.com_redhat_cumin.User
+ #cls = app.rosemary.com_redhat_cumin.User
- ChangePassword(self, cls)
+ #ChangePassword(self, cls)
self.app.login_page = LoginPage(self.app, "login.html")
self.app.add_page(self.app.login_page)
Modified: mgmt/newdata/cumin/python/cumin/account/widgets.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/account/widgets.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/account/widgets.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -47,9 +47,9 @@
class SettingsView(Widget):
def init(self):
# XXX deferring this, but I don't like it
- task = self.app.account.ChangePassword
- link = ObjectTaskLink(self.app, "change_password", task, None)
- self.add_child(link)
+ #task = self.app.account.ChangePassword
+ #link = ObjectTaskLink(self.app, "change_password", task, None)
+ #self.add_child(link)
super(SettingsView, self).init()
@@ -111,19 +111,12 @@
if not self.errors.get(session):
cls = self.app.rosemary.com_redhat_cumin.User
- name_literal = "'%s'" % name
user = None
- conn = self.app.model.get_sql_connection()
- cursor = conn.cursor()
+ for obj in cls.get_selection(session.cursor, name=name):
+ user = obj
+ break
- try:
- for obj in cls.get_selection(cursor, name=name_literal):
- user = obj
- break
- finally:
- cursor.close()
-
if not user:
self.login_invalid.set(session, True)
return
@@ -203,7 +196,7 @@
password = self.new0.get(session)
self.task.invoke(session, user, password)
- self.task.exit_with_redirect(session, user)
+ self.task.exit_with_redirect(session)
class Current(PasswordField):
def render_title(self, session):
Deleted: mgmt/newdata/cumin/python/cumin/action.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/action.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/action.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,62 +0,0 @@
-from wooly import *
-from wooly.pages import *
-from wooly.tables import *
-from datetime import datetime
-
-from util import *
-from formats import *
-
-strings = StringCatalog(__file__)
-
-class ActionInvocationSet(ItemSet):
- def __init__(self, app, name):
- super(ActionInvocationSet, self).__init__(app, name)
-
- self.update_enabled = True
-
- def get_item_count(self, session, *args):
- items = self.do_get_items(session, *args)
- return len(items)
-
- def do_get_items(self, session, *args):
- nsecs = secs(datetime.now())
- return [x for x in reversed(sorted_by(self.app.model.invocations, "when")) if nsecs - secs(x.when) < 60]
-
- def render_item_content(self, session, item):
- delta = secs(datetime.now()) - secs(item.when)
- if delta < 60:
- return super(ActionInvocationSet, self).render_item_content(session, item)
-
- def render_when(self, session, item):
- delta = secs(datetime.now()) - secs(item.when)
- duration = fmt_duration(delta)
- if duration:
- return "%s ago" % duration
- else:
- return ""
-
- def render_description(self, session, item):
- return item.get_description(session)
-
- def render_status_class(self, session, item):
- text = "normal"
- if item.status == "pending":
- text = "warning"
- else:
- if item.exception:
- text = "error"
-
- return text
-
- def render_status(self, session, item):
- if item.status == "pending":
- text = "Pending"
- elif item.status == "OK":
- text = "Completed"
- else:
- if item.exception:
- text = "Failed: " + str(item.exception)
- else:
- text = "Failed: " + item.status
-
- return text
Modified: mgmt/newdata/cumin/python/cumin/grid/collector.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/collector.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/collector.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -5,6 +5,9 @@
from wooly.forms import *
from wooly.resources import *
from wooly.tables import *
+
+from cumin.objectframe import *
+from cumin.objecttask import *
from cumin.stat import *
from cumin.widgets import *
from cumin.parameters import *
@@ -16,72 +19,24 @@
strings = StringCatalog(__file__)
log = logging.getLogger("cumin.grid.collector")
-class CollectorSet(CuminSelectionTable):
+class CollectorFrame(ObjectFrame):
def __init__(self, app, name):
- item = CollectorParameter(app, "item")
- super(CollectorSet, self).__init__(app, name, item)
+ cls = app.rosemary.mrg_grid.Collector
- col = self.NameColumn(app, "name")
- self.add_column(col)
- self.set_default_column(col)
+ super(CollectorFrame, self).__init__(app, name, cls)
- col = self.SystemColumn(app, "system")
- self.add_column(col)
+ self.view.add_tab(CollectorOverview(app, "stats", self.object))
- task = main.module.collector_set_start
- button = TaskButton(app, "start", task, self.selection)
- self.buttons.add_child(button)
+ self.start = DaemonStart(app, self, "COLLECTOR")
+ self.stop = DaemonStop(app, self, "COLLECTOR")
- task = main.module.collector_set_stop
- button = TaskButton(app, "stop", task, self.selection)
- self.buttons.add_child(button)
-
- def render_title(self, session):
- count = self.get_item_count(session)
- return "Collectors %s" % fmt_count(count)
-
- def render_sql_where(self, session):
- sql = "qmf_delete_time is null"
- return "where %s" % sql
-
- class NameColumn(SqlTableColumn):
- def render_title(self, session):
- return "Name"
-
- def render_content(self, session, data):
- coll = Identifiable(data["id"])
- href = self.page.main.grid.pool.collector.get_href(session, coll)
- return fmt_link(href, data["name"])
-
- class SystemColumn(SqlTableColumn):
- def render_title(self, session):
- return "System"
-
- def render_content(self, session, data):
- id = data["system_id"]
-
- if id:
- sys = Identifiable(id)
- href = self.page.main.inventory.system.get_href(session, sys)
- return fmt_link(href, data["system_name"])
-
-class CollectorFrame(CuminFrame):
- def __init__(self, app, name):
- super(CollectorFrame, self).__init__(app, name)
-
- self.object = CollectorParameter(app, "id")
- self.add_parameter(self.object)
-
- self.view = CollectorView(app, "view", self.object)
- self.add_mode(self.view)
-
class CollectorGeneralStatSet(StatSet):
def __init__(self, app, name, object):
super(CollectorGeneralStatSet, self).__init__(app, name, object)
self.attrs = ("RunningJobs", "IdleJobs",
- "HostsClaimed", "HostsUnclaimed",
- "HostsOwner", "HostsTotal")
+ "HostsClaimed", "HostsUnclaimed",
+ "HostsOwner", "HostsTotal")
class CollectorOverview(Widget):
def __init__(self, app, name, collector):
@@ -100,7 +55,7 @@
self.add_child(chart)
def render_title(self, session):
- return "Statistics"
+ return "Overview"
class JobStackedChart(StatFlashChart):
def render_title(self, session):
@@ -109,36 +64,3 @@
class SlotStackedChart(StatFlashChart):
def render_title(self, session, *args):
return "Slot state"
-
-class CollectorView(CuminView):
- def __init__(self, app, name, collector):
- super(CollectorView, self).__init__(app, name, collector)
-
- self.tabs = TabbedModeSet(app, "tabs")
- self.add_child(self.tabs)
-
- self.tabs.add_tab(CollectorOverview(app, "stats", collector))
- self.tabs.add_tab(CuminDetails(app, "details", collector))
-
-class CollectorStartForm(CuminTaskForm):
- def __init__(self, app, name, task):
- super(CollectorStartForm, self).__init__(app, name, task)
-
- self.object = CollectorParameter(app, "collector")
- self.add_parameter(self.object)
-
-class CollectorStopForm(CuminTaskForm):
- def __init__(self, app, name, task):
- super(CollectorStopForm, self).__init__(app, name, task)
-
- self.object = CollectorParameter(app, "collector")
- self.add_parameter(self.object)
-
-class CollectorSetTaskForm(CuminTaskForm):
- def __init__(self, app, name, task):
- super(CollectorSetTaskForm, self).__init__(app, name, task)
-
- item = CollectorParameter(app, "item")
-
- self.object = ListParameter(app, "collector", item)
- self.add_parameter(self.object)
Modified: mgmt/newdata/cumin/python/cumin/grid/collector.strings
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/collector.strings 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/collector.strings 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,21 +1,3 @@
-[CollectorSet.sql]
-select
- c.id,
- c.name,
- y.id as system_id,
- y.node_name as system_name
-from collector as c
-left outer join sysimage as y on c.system = y.node_name
-left outer join collector_stats as cs on cs.id = c.stats_curr_id
-{sql_where}
-{sql_orderby}
-{sql_limit}
-
-[CollectorSet.count_sql]
-select count(1) from collector as c
-left outer join collector_stats as cs on cs.id = c.stats_curr_id
-{sql_where}
-
[CollectorOverview.html]
<table class="twocol">
<tbody>
Added: mgmt/newdata/cumin/python/cumin/grid/daemon.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/daemon.py (rev 0)
+++ mgmt/newdata/cumin/python/cumin/grid/daemon.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -0,0 +1,85 @@
+from cumin.objecttask import *
+
+class DaemonStart(ObjectTask):
+ def __init__(self, app, frame, target):
+ super(DaemonStart, self).__init__(app, frame)
+
+ self.target = target
+
+ self.name = "%s_%s" % (self.name, self.target)
+
+ def get_title(self, session):
+ return "Start"
+
+ def do_invoke(self, invoc, daemon):
+ system_name = daemon.System
+
+ try:
+ master = Master.select("System = '%s'" % system_name)[0]
+ except IndexError:
+ raise Exception("Master daemon not running")
+
+ master.Start(completion, self.target)
+
+class DaemonStop(ObjectTask):
+ def __init__(self, app, frame, target):
+ super(DaemonStop, self).__init__(app, frame)
+
+ self.target = target
+
+ self.name = "%s_%s" % (self.name, self.target)
+
+ def get_title(self, session):
+ return "Stop"
+
+ def do_invoke(self, invoc, daemon):
+ system_name = daemon.System
+
+ try:
+ master = Master.select("System = '%s'" % system_name)[0]
+ except IndexError:
+ raise Exception("Master daemon not running")
+
+ master.Stop(completion, self.target)
+
+class DaemonSelectionStart(SelectionTask):
+ def __init__(self, app, selector, target):
+ super(DaemonSelectionStart, self).__init__(app, selector)
+
+ self.target = target
+
+ self.name = "%s_%s" % (self.name, self.target)
+
+ def get_title(self, session):
+ return "Start"
+
+ def do_invoke(self, invoc, daemon):
+ system_name = daemon.System
+
+ try:
+ master = Master.select("System = '%s'" % system_name)[0]
+ except IndexError:
+ raise Exception("Master daemon not running")
+
+ master.Start(completion, self.target)
+
+class DaemonSelectionStop(SelectionTask):
+ def __init__(self, app, selector, target):
+ super(DaemonSelectionStop, self).__init__(app, selector)
+
+ self.target = target
+
+ self.name = "%s_%s" % (self.name, self.target)
+
+ def get_title(self, session):
+ return "Stop"
+
+ def do_invoke(self, invoc, daemon):
+ system_name = daemon.System
+
+ try:
+ master = Master.select("System = '%s'" % system_name)[0]
+ except IndexError:
+ raise Exception("Master daemon not running")
+
+ master.Stop(completion, self.target)
Modified: mgmt/newdata/cumin/python/cumin/grid/job.strings
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/job.strings 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/job.strings 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,45 +1,3 @@
-[JobSet.sql]
-select
- j.id,
- j.accounting_group as agroup,
- j.args,
- j.cluster_id,
- j.concurrency_limits,
- j.custom_group,
- j.custom_id,
- j.custom_priority,
- j.job_status,
- j.title,
- s.name as scheduler,
- b.name as submitter,
- j.scheduler_id,
- j.submitter_id,
- j.cmd,
- j.qmf_delete_time
-from job as j
-{stats_join}
-inner join scheduler as s on s.id = j.scheduler_id
-inner join submitter as b on b.id = j.submitter_id
-{sql_where}
-{sql_orderby}
-{sql_limit}
-
-[JobSet.find_sql]
-select
- j.id,
- j.custom_id
-from job as j
-inner join scheduler as s on s.id = j.scheduler_id
-inner join submitter as b on b.id = j.submitter_id
-
-[JobSet.count_sql]
-select count(1)
-from job as j
-{stats_join}
-inner join scheduler as s on s.id = j.scheduler_id
-inner join submitter as b on b.id = j.submitter_id
-{sql_where}
-
[JobTab.css]
input.search_input {
color: #555;
@@ -57,7 +15,6 @@
padding-top: 2px;
}
-
[JobTab.javascript]
function JobSearchFocus() {
var val = this.value;
Modified: mgmt/newdata/cumin/python/cumin/grid/limit.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/limit.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/limit.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -148,7 +148,7 @@
if not self.errors.get(session):
self.task.invoke(session, negotiator, name, max)
- self.task.exit_with_redirect(session, (negotiator, name))
+ self.task.exit_with_redirect(session)
class LimitView(CuminView):
def __init__(self, app, name, negotiator, limit):
Modified: mgmt/newdata/cumin/python/cumin/grid/main.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/main.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/main.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -3,6 +3,7 @@
from wooly.resources import *
from cumin import *
+from cumin.objecttask import *
from cumin.util import *
from model import *
@@ -15,10 +16,6 @@
def __init__(self, app, name):
super(Module, self).__init__(app, name)
- cls = app.rosemary.mrg_grid.Submission
-
- SubmissionAdd(self, cls)
-
# cls = app.rosemary.mrg_grid.Job
# JobHold(self, cls)
@@ -26,26 +23,8 @@
# JobRemove(self, cls)
# JobSetAttribute(self, cls)
- cls = app.rosemary.mrg_grid.Scheduler
-
- start = SchedulerStart(self, cls)
- stop = SchedulerStop(self, cls)
-
- SelectionTask(start)
- SelectionTask(stop)
-
- cls = app.rosemary.mrg_grid.Collector
-
- start = CollectorStart(self, cls)
- stop = CollectorStop(self, cls)
-
- SelectionTask(start)
- SelectionTask(stop)
-
# cls = app.rosemary.mrg_grid.Negotiator
- # NegotiatorStart(self, cls)
- # NegotiatorStop(self, cls)
# NegotiatorEditDynamicQuota(self, cls)
# NegotiatorEditStaticQuota(self, cls)
# NegotiatorEditPrioFactor(self, cls)
Modified: mgmt/newdata/cumin/python/cumin/grid/model.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/model.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/model.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -19,178 +19,6 @@
# def do_enter(self, session, scheduler):
# self.app.main_page.grid.view.show(branch)
-class SubmissionAdd(ObjectTask):
- EXPR_TYPE, INTEGER_TYPE, FLOAT_TYPE, STRING_TYPE = 0, 1, 2, 3
- UNIVERSE = {"VANILLA": 5,
- "SCHEDULER": 7,
- "GRID": 9,
- "JAVA": 10,
- "PARALLEL": 11,
- "LOCAL": 12,
- "VM": 13}
-
- def __init__(self, module, cls):
- super(SubmissionAdd, self).__init__(module, cls)
-
- self.form = SubmissionAddForm(module.app, "submission_add", self)
-
- def get_title(self, session, scheduler):
- return "Create submission"
-
- def do_invoke(self, invoc, scheduler,
- description,
- executable,
- args=None,
- iwd="/tmp",
- stdin=None,
- stdout=None,
- stderr=None,
- requirements="TRUE",
- universe="VANILLA"):
- user_name = invoc.user.name
-
- ad = {
- "Submission": {"TYPE": self.STRING_TYPE,
- "VALUE": condor_string(description)},
- "Cmd": {"TYPE": self.STRING_TYPE,
- "VALUE": condor_string(executable)},
- "Args": {"TYPE": self.STRING_TYPE,
- "VALUE": condor_string(args)},
- "Requirements": {"TYPE": self.EXPR_TYPE,
- "VALUE": requirements},
- "JobUniverse": {"TYPE": self.INTEGER_TYPE,
- "VALUE": str(self.UNIVERSE[universe])},
- "Iwd": {"TYPE": self.STRING_TYPE,
- "VALUE": condor_string(iwd)},
- "Owner": {"TYPE": self.STRING_TYPE,
- "VALUE": "guest3"}
- }
-
-# "User": {"TYPE": self.STRING_TYPE,
-# "VALUE": condor_string("example(a)example.com")}
-
- scheduler.Submit(completion, ad, None)
-
-def condor_string(string):
- return string
- # XXX return "\"%s\"" % string
-
-class NegotiatorStart(ObjectTask):
- def get_title(self, session, negotiator):
- return "Start"
-
- def do_invoke(self, invoc, negotiator):
- system_name = negotiator.System
-
- try:
- master = Master.select("System = '%s'" % system_name)[0]
- except IndexError:
- raise Exception("Master daemon not running")
-
- master.Start(completion, "NEGOTIATOR")
-
-class NegotiatorStop(ObjectTask):
- def get_title(self, session, negotiator):
- return "Stop"
-
- def do_invoke(self, invoc, negotiator):
- system_name = negotiator.System
-
- try:
- master = Master.select("System = '%s'" % system_name)[0]
- except IndexError:
- raise Exception("Master daemon not running")
-
- master.Stop(completion, "NEGOTIATOR")
-
-class NegotiatorLimitSet(ObjectTask):
- def __init__(self, module, cls):
- super(NegotiatorLimitSet, self).__init__(module, cls)
-
- self.form = NegotiatorLimitSetForm(module.app, self.name, self)
-
- def do_enter(self, session, limit):
- self.form.negotiator.set(session, limit[0])
- self.form.limit_name.set(session, limit[1])
-
- def get_title(self, session):
- return "Set limit"
-
- def do_invoke(self, invoc, negotiator, name, max):
- assert isinstance(negotiator, Negotiator)
-
- negotiator.SetLimit(completion, name, max)
-
- # XXX
- def completion():
- pass
-
- negotiator.Reconfig(completion)
-
-class CollectorStart(ObjectTask):
- def get_title(self, session):
- return "Start"
-
- def do_invoke(self, invoc, collector):
- assert isinstance(collector, Collector)
-
- system_name = collector.System
-
- try:
- master = Master.select("System = '%s'" % system_name)[0]
- except IndexError:
- raise Exception("Master daemon not running")
-
- master.Start(completion, "COLLECTOR")
-
-class CollectorStop(ObjectTask):
- def get_title(self, session):
- return "Stop"
-
- def do_invoke(self, invoc, collector):
- assert isinstance(collector, Collector)
-
- system_name = collector.System
-
- try:
- master = Master.select("System = '%s'" % system_name)[0]
- except IndexError:
- raise Exception("Master daemon not running")
-
- master.Stop(completion, "COLLECTOR")
-
-class SchedulerStart(ObjectTask):
- def get_title(self, session, scheduler):
- return "Start"
-
- def do_invoke(self, invoc, scheduler):
- assert isinstance(scheduler, Scheduler)
-
- system_name = scheduler.System
-
- try:
- master = Master.select("System = '%s'" % system_name)[0]
- except IndexError:
- raise Exception("Master daemon not running")
-
- master.Start(completion, "SCHEDD")
-
-class SchedulerStop(ObjectTask):
- def get_title(self, session, sheduler):
- return "Stop"
-
- def do_invoke(self, invoc, scheduler):
- assert isinstance(scheduler, Scheduler)
-
- system_name = scheduler.System
-
- try:
- master = Master.select("System = '%s'" % system_name)[0]
- except IndexError:
- raise Exception("Master daemon not running")
-
- master.Stop(completion, "SCHEDD")
-
# class JobBaseTask(QmfTask):
# def __init__(self, app, cls, form, verb):
# super(JobBaseTask, self).__init__(app, cls)
@@ -283,87 +111,3 @@
# def get_title(self, session):
# return "Set Job Attribute"
-
-class NegotiatorGroupTask(ObjectTask):
- def do_exit(self, session, negotiator):
- self.app.main_page.main.grid.pool.negotiator.view.show(session)
-
- def do_invoke(self, completion, session, negotiator, group, value):
- assert isinstance(negotiator, Negotiator)
-
- if group == "Reconfig":
- negotiator.Reconfig(completion)
- else:
- negotiator.SetRawConfig(completion, group, value)
-
-class NegotiatorAddGroup(NegotiatorGroupTask):
- def __init__(self, module, cls):
- super(NegotiatorAddGroupTask, self).__init__(module, cls)
-
- self.form = AddGroupForm(self.app, self.name, self)
-
- def get_title(self, session, obj):
- return "Add group"
-
-class NegotiatorEditRegroup(NegotiatorGroupTask):
- def __init__(self, module, cls):
- super(NegotiatorEditRegroupTask, self).__init__(module, cls)
-
- self.form = EditRegroupForm(self.app, self.name, self)
-
- def get_title(self, session, obj):
- return "Edit autoregroup"
-
-class NegotiatorEditPrioFactor(NegotiatorGroupTask):
- def __init__(self, module, cls):
- super(NegotiatorEditPrioFactorTask, self).__init__(module, cls)
-
- self.form = EditPrioFactorForm(self.app, self.name, self)
-
- def get_title(self, session, obj):
- return "Edit priority factor"
-
-class NegotiatorEditDynamicQuota(NegotiatorGroupTask):
- def __init__(self, module, cls):
- super(NegotiatorEditDynamicQuotaTask, self).__init__(module, cls)
-
- self.form = EditDynamicQuotaForm(self.app, self.name, self)
-
- def get_title(self, session, obj):
- return "Edit dynamic quota"
-
-class NegotiatorEditStaticQuota(NegotiatorGroupTask):
- def __init__(self, module, cls):
- super(NegotiatorEditStaticQuotaTask, self).__init__(module, cls)
-
- self.form = EditStaticQuotaForm(self.app, self.name, self)
-
- def get_title(self, session, obj):
- return "Edit static quota"
-
-class NegotiatorUserTask(NegotiatorGroupTask):
- def do_enter(self, session, ng):
- try:
- negotiator, group = ng
- except:
- raise Exception("Must supply group for user forms")
- super(NegotiatorUserTask, self).do_enter(session, negotiator)
- self.form.group.set(session, group)
-
-class NegotiatorUserPrioFactor(NegotiatorUserTask):
- def __init__(self, module, cls):
- super(NegotiatorUserPrioFactorTask, self).__init__(module, cls)
-
- self.form = UserPrioFactorForm(self.app, self.name, self)
-
- def get_title(self, session, obj):
- return "Edit priority factor"
-
-class NegotiatorUserRegroup(NegotiatorUserTask):
- def __init__(self, module, cls):
- super(NegotiatorUserRegroupTask, self).__init__(module, cls)
-
- self.form = UserRegroupForm(self.app, self.name, self)
-
- def get_title(self, session, obj):
- return "Edit autoregroup"
Modified: mgmt/newdata/cumin/python/cumin/grid/negotiator.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/negotiator.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/negotiator.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -13,11 +13,32 @@
from cumin.util import *
from cumin.widgets import *
-import main
+from daemon import *
strings = StringCatalog(__file__)
log = logging.getLogger("cumin.grid.negotiator")
+class NegotiatorFrame(ObjectFrame):
+ def __init__(self, app, name):
+ cls = app.rosemary.mrg_grid.Negotiator
+
+ super(NegotiatorFrame, self).__init__(app, name, cls)
+
+ self.start = DaemonStart(app, self, "NEGOTIATOR")
+ self.stop = DaemonStop(app, self, "NEGOTIATOR")
+
+ self.group_add = NegotiatorGroupAdd(app, self)
+
+ self.edit_dynamic_quota = NegotiatorEditDynamicQuota(app, self)
+ self.edit_static_quota = NegotiatorEditStaticQuota(app, self)
+ self.edit_prio_factor = NegotiatorEditPrioFactor(app, self)
+ self.user_prio_factor = NegotiatorUserPrioFactor(app, self)
+ self.edit_regroup = NegotiatorEditRegroup(app, self)
+ self.user_regroup = NegotiatorUserRegroup(app, self)
+
+ overview = NegotiatorOverview(app, "overview", self.object, self)
+ self.view.add_tab(overview)
+
class NegotiatorSelector(ObjectSelector):
def __init__(self, app, name, pool):
cls = app.rosemary.mrg_grid.Negotiator
@@ -26,50 +47,18 @@
self.pool = pool
- self.add_filter(self.pool, cls.Pool, cls.Pool)
+ self.add_filter(self.pool, cls.Pool)
frame = "main.grid.pool.negotiator"
col = ObjectLinkColumn(app, "name", cls.Name, cls._id, frame)
self.add_column(col)
- # self.add_attribute_column(cls.MyAddress)
self.add_attribute_column(cls.Machine)
self.add_attribute_column(cls.System)
- # self.add_selection_task(main.module.negotiator_set_start)
- # self.add_selection_task(main.module.negotiator_set_stop)
+ self.start = DaemonSelectionStart(app, self, "NEGOTIATOR")
+ self.stop = DaemonSelectionStop(app, self, "NEGOTIATOR")
-class NegotiatorFrame(ObjectFrame):
- def __init__(self, app, name):
- cls = app.rosemary.mrg_grid.Negotiator
-
- super(NegotiatorFrame, self).__init__(app, name, cls)
-
- # self.icon_href = "resource?name=negotiator-36.png"
-
-class OldNegotiatorFrame(CuminFrame):
- def __init__(self, app, name):
- super(OldNegotiatorFrame, self).__init__(app, name)
-
- self.object = NegotiatorParameter(app, "id")
- self.add_parameter(self.object)
-
- self.view = NegotiatorView(app, "view", self.object)
- self.add_mode(self.view)
-
-class NegotiatorView(CuminView):
- def __init__(self, app, name, negotiator):
- super(NegotiatorView, self).__init__(app, name, negotiator)
-
- self.tabs = TabbedModeSet(app, "tabs")
- self.add_child(self.tabs)
-
- self.overview = NegotiatorOverview(app, "overview", negotiator)
- self.tabs.add_tab(self.overview)
-
- self.details = CuminDetails(app, "details", negotiator)
- self.tabs.add_tab(self.details)
-
class QmfGroupColumn(ItemTableColumn):
def __init__(self, app, name, getter, negotiator, task):
super(QmfGroupColumn, self).__init__(app, name)
@@ -104,7 +93,8 @@
def render_data(self, session, data):
# is user
if "." in data[0] and self.user_task:
- href = "%s" % self.user_task.get_href(session, (self.negotiator.get(session), data[0]))
+ href = "%s" % self.user_task.get_href \
+ (session, (self.negotiator.get(session), data[0]))
else:
href = self.task.get_href(session, self.negotiator.get(session))
content = data[2][2] and str(data[1]) or "NOT SET"
@@ -114,7 +104,7 @@
return "Edit the %s" % self.title
class NegotiatorOverview(ItemTable):
- def __init__(self, app, name, negotiator):
+ def __init__(self, app, name, negotiator, frame):
super(NegotiatorOverview, self).__init__(app, name)
self.update_enabled = False
@@ -130,7 +120,7 @@
self.buttons.html_class = "buttons"
self.add_child(self.buttons)
- task = main.module.negotiator_add_group
+ task = frame.group_add
button = EditButton(app, "add_group_button", task, negotiator)
self.buttons.add_child(button)
@@ -142,28 +132,28 @@
self.add_column(col)
self.set_default_column(col)
- task = main.module.negotiator_edit_dynamic_quota
+ task = frame.edit_dynamic_quota
col = self.DynamicColumn(app, "dynamic", self.group_helper.get_dyn_quota, negotiator, task)
col.title = "Dynamic Quota"
self.add_column(col)
- task = main.module.negotiator_edit_static_quota
+ task = frame.edit_static_quota
col = QmfGroupColumn(app, "static", self.group_helper.get_static_quota, negotiator, task)
col.title = "Static Quota"
self.add_column(col)
- task = main.module.negotiator_edit_prio_factor
+ task = frame.edit_prio_factor
col = QmfGroupColumn(app, "factor", self.group_helper.get_priority_factor, negotiator, task)
col.title = "Priority Factor"
col.user = True
- col.user_task = main.module.negotiator_user_prio_factor
+ col.user_task = frame.user_prio_factor
self.add_column(col)
- task = main.module.negotiator_edit_regroup
+ task = frame.edit_regroup
col = QmfGroupColumn(app, "regroup", self.group_helper.get_regroups, negotiator, task)
col.title = "Auto Regroup"
col.user = True
- col.user_task = main.module.negotiator_user_regroup
+ col.user_task = frame.user_regroup
self.add_column(col)
def render_title(self, session):
@@ -400,15 +390,12 @@
self.items.set(session, users)
return users
-class AddGroupForm(CuminTaskForm):
+class GroupAddForm(ObjectTaskForm):
def __init__(self, app, name, task):
- super(AddGroupForm, self).__init__(app, name, task)
+ super(GroupAddForm, self).__init__(app, name, task)
self.defer_enabled = True
- self.object = NegotiatorParameter(app, "negotiator")
- self.add_parameter(self.object)
-
self.group_helper = GroupHelper(app, "groups", self.object)
self.add_child(self.group_helper)
@@ -431,7 +418,7 @@
new_groups = ", ".join(original_groups)
self.task.invoke(session, negotiator, "GROUP_NAMES", new_groups)
self.task.invoke(session, negotiator, "Reconfig", None)
- self.task.exit_with_redirect(session, negotiator)
+ self.task.exit_with_redirect(session)
def is_valid(self, group):
ret = False
@@ -528,7 +515,7 @@
changed = True
if changed:
self.task.invoke(session, negotiator, "Reconfig", None)
- self.task.exit_with_redirect(session, negotiator)
+ self.task.exit_with_redirect(session)
def is_valid_factor(self, value):
try:
@@ -574,7 +561,7 @@
changed = True
if changed:
self.task.invoke(session, negotiator, "Reconfig", None)
- self.task.exit_with_redirect(session, negotiator)
+ self.task.exit_with_redirect(session)
class EditDynamicQuotaForm(GroupForm):
def __init__(self, app, name, task):
@@ -609,7 +596,8 @@
writer = Writer()
groups = self.group_helper.get_dyn_quota(session)
- groups = self.group_helper.append_unclaimed_dyn_quota(session, groups, force=True)
+ groups = self.group_helper.append_unclaimed_dyn_quota \
+ (session, groups, force=True)
for group in groups:
if group[0] == "Unclaimed":
self.unclaimed_tmpl.render(writer, session, group)
@@ -639,11 +627,12 @@
continue
quota = self.check_quota(new_value, original_value)
if quota:
- self.task.invoke(session, negotiator, "GROUP_QUOTA_DYNAMIC_"+group, quota)
+ self.task.invoke(session, negotiator,
+ "GROUP_QUOTA_DYNAMIC_" + group, quota)
changed = True
if changed:
self.task.invoke(session, negotiator, "Reconfig", None)
- self.task.exit_with_redirect(session, negotiator)
+ self.task.exit_with_redirect(session)
def check_quota(self, quota, original):
try:
@@ -717,7 +706,7 @@
changed = True
if changed:
self.task.invoke(session, negotiator, "Reconfig", None)
- self.task.exit_with_redirect(session, negotiator)
+ self.task.exit_with_redirect(session)
class EditRegroupForm(GroupForm):
def __init__(self, app, name, task):
@@ -795,7 +784,7 @@
changed = True
if changed:
self.task.invoke(session, negotiator, "Reconfig", None)
- self.task.exit_with_redirect(session, negotiator)
+ self.task.exit_with_redirect(session)
class UserRegroupForm(EditRegroupForm):
def __init__(self, app, name, task):
@@ -830,7 +819,7 @@
changed = True
if changed:
self.task.invoke(session, negotiator, "Reconfig", None)
- self.task.exit_with_redirect(session, negotiator)
+ self.task.exit_with_redirect(session)
class PriorityPieChart(StatFlashChart):
def __init__(self, app, name, negotiator, groups):
@@ -871,32 +860,117 @@
params.extend(vals)
return params
-class NegotiatorStartForm(CuminTaskForm):
- def __init__(self, app, name, task):
- super(NegotiatorStartForm, self).__init__(app, name, task)
+class EditButton(ActionSet):
+ def __init__(self, app, name, task, negotiator):
+ super(EditButton, self).__init__(app, name)
- self.object = NegotiatorParameter(app, "negotiator")
- self.add_parameter(self.object)
+ link = ObjectTaskLink(app, "edit", task)
+ self.add_child(link)
-class NegotiatorStopForm(CuminTaskForm):
- def __init__(self, app, name, task):
- super(NegotiatorStopForm, self).__init__(app, name, task)
+class NegotiatorLimitSet(ObjectTask):
+ def __init__(self, app, frame):
+ super(NegotiatorLimitSet, self).__init__(app, frame)
- self.object = NegotiatorParameter(app, "negotiator")
- self.add_parameter(self.object)
+ self.form = NegotiatorLimitSetForm(module.app, self.name, self)
-class NegotiatorSetTaskForm(CuminTaskForm):
- def __init__(self, app, name, task):
- super(NegotiatorSetTaskForm, self).__init__(app, name, task)
+ def do_enter(self, session, limit):
+ self.form.negotiator.set(session, limit[0])
+ self.form.limit_name.set(session, limit[1])
- item = NegotiatorParameter(app, "item")
+ def get_title(self, session):
+ return "Set limit"
- self.object = ListParameter(app, "negotiator", item)
- self.add_parameter(self.object)
+ def do_invoke(self, invoc, negotiator, name, max):
+ assert isinstance(negotiator, Negotiator)
-class EditButton(ActionSet):
- def __init__(self, app, name, task, negotiator):
- super(EditButton, self).__init__(app, name)
+ negotiator.SetLimit(completion, name, max)
- link = TaskLink(app, "edit", task, negotiator)
- self.add_child(link)
+ # XXX
+ def completion():
+ pass
+
+ negotiator.Reconfig(completion)
+
+class NegotiatorGroupTask(ObjectTask):
+ def do_exit(self, session):
+ self.app.main_page.main.grid.pool.negotiator.view.show(session)
+
+ def do_invoke(self, completion, session, negotiator, group, value):
+ assert isinstance(negotiator, Negotiator)
+
+ if group == "Reconfig":
+ negotiator.Reconfig(completion)
+ else:
+ negotiator.SetRawConfig(completion, group, value)
+
+class NegotiatorGroupAdd(NegotiatorGroupTask):
+ def __init__(self, app, frame):
+ super(NegotiatorGroupAdd, self).__init__(app, frame)
+
+ self.form = GroupAddForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Add group"
+
+class NegotiatorEditRegroup(NegotiatorGroupTask):
+ def __init__(self, app, frame):
+ super(NegotiatorEditRegroup, self).__init__(app, frame)
+
+ self.form = EditRegroupForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Edit autoregroup"
+
+class NegotiatorEditPrioFactor(NegotiatorGroupTask):
+ def __init__(self, app, frame):
+ super(NegotiatorEditPrioFactor, self).__init__(app, frame)
+
+ self.form = EditPrioFactorForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Edit priority factor"
+
+class NegotiatorEditDynamicQuota(NegotiatorGroupTask):
+ def __init__(self, app, frame):
+ super(NegotiatorEditDynamicQuota, self).__init__(app, frame)
+
+ self.form = EditDynamicQuotaForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Edit dynamic quota"
+
+class NegotiatorEditStaticQuota(NegotiatorGroupTask):
+ def __init__(self, app, frame):
+ super(NegotiatorEditStaticQuota, self).__init__(app, frame)
+
+ self.form = EditStaticQuotaForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Edit static quota"
+
+class NegotiatorUserTask(NegotiatorGroupTask):
+ def do_enter(self, session, ng):
+ try:
+ negotiator, group = ng
+ except:
+ raise Exception("Must supply group for user forms")
+ super(NegotiatorUserTask, self).do_enter(session, negotiator)
+ self.form.group.set(session, group)
+
+class NegotiatorUserPrioFactor(NegotiatorUserTask):
+ def __init__(self, app, frame):
+ super(NegotiatorUserPrioFactor, self).__init__(app, frame)
+
+ self.form = UserPrioFactorForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Edit priority factor"
+
+class NegotiatorUserRegroup(NegotiatorUserTask):
+ def __init__(self, app, frame):
+ super(NegotiatorUserRegroup, self).__init__(app, frame)
+
+ self.form = UserRegroupForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Edit autoregroup"
Modified: mgmt/newdata/cumin/python/cumin/grid/negotiator.strings
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/negotiator.strings 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/negotiator.strings 2010-04-14 17:34:58 UTC (rev 3912)
@@ -8,14 +8,14 @@
[NegotiatorOverview.html]
<div id="{id}" class="CuminTable GroupTable">
- <div class="sactions">
- <h2>Actions:</h2>
- {buttons}
- </div>
-<table {class}>
- <thead><tr>{headers}</tr></thead>
- <tbody>{items}</tbody>
-</table>
+ <div class="sactions">
+ <h2>Actions:</h2>
+ {buttons}
+ </div>
+ <table {class}>
+ <thead><tr>{headers}</tr></thead>
+ <tbody>{items}</tbody>
+ </table>
</div>
[GroupForm.css]
Modified: mgmt/newdata/cumin/python/cumin/grid/pool.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/pool.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/pool.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -57,6 +57,9 @@
self.submission = SubmissionFrame(app, "submission")
self.add_mode(self.submission)
+ #self.job = JobFrame(app, "job")
+ #self.add_mode(self.job)
+
self.slot = SlotFrame(app, "slot")
self.add_mode(self.slot)
@@ -66,9 +69,20 @@
self.negotiator = NegotiatorFrame(app, "negotiator")
self.add_mode(self.negotiator)
- submissions = SubmissionSelector(app, "submissions", self.object)
+ #self.collector = CollectorFrame(app, "coll")
+ #elf.add_mode(self.collector)
+
+ #self.limit = LimitFrame(app, "limit", negotiator)
+ #self.add_mode(self.limit)
+
+ overview = PoolOverview(app, "overview", self.object)
+ self.view.add_tab(overview)
+
+ submissions = SubmissionSelector(app, "submissions")
self.view.add_tab(submissions)
+ # XXX submissions.add_filter(self.object, submissions.cls.Pool)
+
slots = SlotSelector(app, "slots", self.object)
self.view.add_tab(slots)
@@ -78,107 +92,16 @@
negotiators = NegotiatorSelector(app, "negotiators", self.object)
self.view.add_tab(negotiators)
-class OldPoolFrame(CuminFrame):
- def __init__(self, app, name):
- super(OldPoolFrame, self).__init__(app, name)
+ #self.limits = LimitSet(app, "limits", negotiator)
+ #self.view.add_tab(self.limits)
- self.object = PoolParameter(app, "id")
- self.add_parameter(self.object)
+ self.submission_add = SubmissionAdd(app, self)
- negotiator = PoolNegotiatorAttribute(app, "negotiator", self.object)
- self.add_attribute(negotiator)
-
- self.view = PoolView(app, "view", self.object)
- self.add_mode(self.view)
-
- self.submission = SubmissionFrame(app, "submission")
- self.add_mode(self.submission)
-
- self.job = JobFrame(app, "job")
- self.add_mode(self.job)
-
- self.slot = SlotFrame(app, "slot")
- self.add_mode(self.slot)
-
- self.scheduler = SchedulerFrame(app, "sched", self.object)
- self.add_mode(self.scheduler)
-
- self.collector = CollectorFrame(app, "coll")
- self.add_mode(self.collector)
-
- self.limit = LimitFrame(app, "limit", negotiator)
- self.add_mode(self.limit)
-
- self.negotiator = NegotiatorFrame(app, "neg")
- self.add_mode(self.negotiator)
-
-class PoolView(CuminView):
- def __init__(self, app, name, pool):
- super(PoolView, self).__init__(app, name, pool)
-
- self.pool = pool
-
- negotiator = PoolNegotiatorAttribute(app, "negotiator", self.pool)
- self.add_attribute(negotiator)
-
- self.tabs = TabbedModeSet(app, "tabs")
- self.add_child(self.tabs)
-
- self.overview = PoolOverview(app, "overview", self.pool)
- self.tabs.add_tab(self.overview)
-
- self.submissions = PoolSubmissionSet(app, "submissions", self.pool)
- self.tabs.add_tab(self.submissions)
-
- self.slots = PoolSlotSet(app, "slots", self.pool)
- self.tabs.add_tab(self.slots)
-
- self.schedulers = PoolSchedulerSet(app, "schedulers", self.pool)
- self.tabs.add_tab(self.schedulers)
-
- self.negotiators = PoolNegotiatorSet(app, "negotiators", self.pool)
- self.tabs.add_tab(self.negotiators)
-
- self.collectors = PoolCollectorSet(app, "collectors", self.pool)
- self.tabs.add_tab(self.collectors)
-
- self.limits = LimitSet(app, "limits", negotiator)
- self.tabs.add_tab(self.limits)
-
def do_process(self, session):
- self.limits.limit_count.process(session)
+ #self.limits.limit_count.process(session)
+
super(PoolView, self).do_process(session)
-class PoolSubmissionSet(SubmissionSet):
- def __init__(self, app, name, pool):
- super(PoolSubmissionSet, self).__init__(app, name)
-
- self.pool = pool
-
- task = main.module.submission_add
- link = TaskLink(app, "add", task, self.pool)
- self.links.add_child(link)
-
- def get_submission_href(self, session, id):
- submission = Identifiable(id)
- return main.module.frame.pool.submission.get_href(session, submission)
-
- def get_submitter_href(self, session, id):
- submitter = Identifiable(id)
- return main.module.frame.pool.scheduler.submitter.get_href \
- (session, submitter)
-
- def get_scheduler_href(self, session, id):
- scheduler = Identifiable(id)
- return main.module.frame.pool.scheduler.get_href(session, scheduler)
-
- def render_sql_where(self, session):
- return "where d.pool = %(pool)s"
-
- def get_sql_values(self, session):
- pool = self.pool.get(session)
- return {"pool": pool.id}
-
class PoolOverview(Widget):
def __init__(self, app, name, pool):
super(PoolOverview, self).__init__(app, name)
@@ -234,8 +157,8 @@
super(GridGeneralStatSet, self).__init__(app, name, object)
self.attrs = ("NumJobs", "SubmitsInProgress",
- "SubmitsQueued", "SubmitsAllowed",
- "SubmitsWanted", "RunningJobs", "IdleJobs")
+ "SubmitsQueued", "SubmitsAllowed",
+ "SubmitsWanted", "RunningJobs", "IdleJobs")
def do_render(self, session):
grid = self.object.get(session)
@@ -401,7 +324,8 @@
def get_click(self, session, state):
href = self.parent.render_slots_href(session)
- return "return slot_vis('%s', this, '%s', '%s')" % (state, self.parent.path, href)
+ return "return slot_vis('%s', this, '%s', '%s')" % \
+ (state, self.parent.path, href)
class PoolSlotFullPage(FlashFullPage):
def __init__(self, app, name):
Modified: mgmt/newdata/cumin/python/cumin/grid/scheduler.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/scheduler.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/scheduler.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,5 +1,3 @@
-import logging
-
from random import choice
from wooly import *
@@ -7,6 +5,7 @@
from wooly.forms import *
from wooly.resources import *
from wooly.tables import *
+
from cumin.objectframe import *
from cumin.objectselector import *
from cumin.formats import *
@@ -16,13 +15,38 @@
from cumin.util import *
from cumin.widgets import *
+from daemon import *
from job import *
from submitter import *
from submission import *
strings = StringCatalog(__file__)
-log = logging.getLogger("cumin.scheduler")
+log = logging.getLogger("cumin.grid.scheduler")
+class SchedulerFrame(ObjectFrame):
+ def __init__(self, app, name, pool):
+ cls = app.rosemary.mrg_grid.Scheduler
+
+ super(SchedulerFrame, self).__init__(app, name, cls)
+
+ self.submitter = SubmitterFrame(app, "submitter")
+ self.add_mode(self.submitter)
+
+ overview = SchedulerOverview(app, "overview", self.object)
+ self.view.add_tab(overview)
+
+ submissions = SubmissionSelector(app, "submissions")
+ self.view.add_tab(submissions)
+
+ cls = submissions.cls
+ submissions.add_reference_filter(self.object, cls.schedulerRef)
+
+ submitters = SubmitterSelector(app, "submitters", self.object)
+ self.view.add_tab(submitters)
+
+ self.start = DaemonStart(app, self, "SCHEDD")
+ self.stop = DaemonStop(app, self, "SCHEDD")
+
class SchedulerSelector(ObjectSelector):
def __init__(self, app, name, pool):
cls = app.rosemary.mrg_grid.Scheduler
@@ -31,7 +55,7 @@
self.pool = pool
- self.add_filter(self.pool, cls.Pool, cls.Pool)
+ self.add_filter(self.pool, cls.Pool)
frame = "main.grid.pool.scheduler"
col = ObjectLinkColumn(app, "name", cls.Name, cls._id, frame)
@@ -42,26 +66,9 @@
self.add_attribute_column(cls.TotalRunningJobs)
self.add_attribute_column(cls.TotalHeldJobs)
- #self.add_selection_task(main.module.scheduler_set_start)
- #self.add_selection_task(main.module.scheduler_set_stop)
+ self.start = DaemonSelectionStart(app, self, "SCHEDD")
+ self.stop = DaemonSelectionStop(app, self, "SCHEDD")
-class SchedulerFrame(ObjectFrame):
- def __init__(self, app, name, pool):
- cls = app.rosemary.mrg_grid.Scheduler
-
- super(SchedulerFrame, self).__init__(app, name, cls)
-
- self.submitter = SubmitterFrame(app, "submitter")
- self.add_mode(self.submitter)
-
- submissions = SubmissionSelector(app, "submissions", pool)
- submissions.add_reference_filter \
- (self.object, submissions.cls.schedulerRef)
- self.view.add_tab(submissions)
-
- submitters = SubmitterSelector(app, "submitters", self.object)
- self.view.add_tab(submitters)
-
class SchedulerSelectField(ScalarField):
def __init__(self, app, name, pool):
super(SchedulerSelectField, self).__init__(app, name, None)
@@ -109,49 +116,17 @@
if item is self.param.get(session):
return "selected=\"selected\""
-class OldSchedulerFrame(CuminFrame):
- def __init__(self, app, name, pool):
- super(OldSchedulerFrame, self).__init__(app, name)
-
- self.object = SchedulerParameter(app, "id")
- self.add_parameter(self.object)
-
- self.view = SchedulerView(app, "view", self.object)
- self.add_mode(self.view)
-
- self.submitter = SubmitterFrame(app, "sub", self.object)
- self.add_mode(self.submitter)
-
-class SchedulerView(CuminView):
- def __init__(self, app, name, scheduler):
- super(SchedulerView, self).__init__(app, name, scheduler)
-
- self.tabs = TabbedModeSet(app, "tabs")
- self.add_child(self.tabs)
-
- stats = SchedulerStats(app, "stats", scheduler)
- self.tabs.add_tab(stats)
-
- submissions = SchedulerSubmissionSet(app, "submissions", scheduler)
- self.tabs.add_tab(submissions)
-
- submitters = SchedulerSubmitterSet(app, "submitters", scheduler)
- self.tabs.add_tab(submitters)
-
- details = CuminDetails(app, "details", scheduler)
- self.tabs.add_tab(details)
-
class SchedulerGeneralStatSet(StatSet):
def __init__(self, app, name, object):
super(SchedulerGeneralStatSet, self).__init__(app, name, object)
self.attrs = ("NumUsers", "TotalRunningJobs",
- "TotalIdleJobs", "TotalHeldJobs",
- "TotalJobAds", "TotalRemovedJobs")
+ "TotalIdleJobs", "TotalHeldJobs",
+ "TotalJobAds", "TotalRemovedJobs")
-class SchedulerStats(Widget):
+class SchedulerOverview(Widget):
def __init__(self, app, name, scheduler):
- super(SchedulerStats, self).__init__(app, name)
+ super(SchedulerOverview, self).__init__(app, name)
stats = SchedulerGeneralStatSet(app, "general", scheduler)
self.add_child(stats)
@@ -167,7 +142,7 @@
self.add_child(chart)
def render_title(self, session):
- return "Statistics"
+ return "Overview"
class UsersChart(StatFlashChart):
def render_title(self, session):
@@ -176,44 +151,3 @@
class JobsChart(StatFlashChart):
def render_title(self, session):
return "Jobs"
-
-class SchedulerJobSet(JobTab):
- def __init__(self, app, name, pool):
- super(SchedulerJobSet, self).__init__(app, name, pool)
-
- def get_visible_columns(self, session):
- return self.get_request_visible_columns(session, ["custom_group", "submitter"])
-
- def render_sql_where(self, session):
- scheduler = self.frame.object.get(session)
- phase_sql = self.get_phase_sql(session)
- scheduler_sql = "j.scheduler_id = %i" % scheduler.id
- return "where %s" % " and ".join([phase_sql, scheduler_sql])
-
- def render_title(self, session):
- scheduler = self.frame.object.get(session)
- where_scheduler = "scheduler_id = %i" % scheduler.id
- return "Jobs %s" % fmt_count(Job.select(where_scheduler).count())
-
-class SchedulerStartForm(CuminTaskForm):
- def __init__(self, app, name, task):
- super(SchedulerStartForm, self).__init__(app, name, task)
-
- self.object = SchedulerParameter(app, "scheduler")
- self.add_parameter(self.object)
-
-class SchedulerStopForm(CuminTaskForm):
- def __init__(self, app, name, task):
- super(SchedulerStopForm, self).__init__(app, name, task)
-
- self.object = SchedulerParameter(app, "scheduler")
- self.add_parameter(self.object)
-
-class SchedulerSetTaskForm(CuminTaskForm):
- def __init__(self, app, name, task):
- super(SchedulerSetTaskForm, self).__init__(app, name, task)
-
- item = SchedulerParameter(app, "item")
-
- self.object = ListParameter(app, "scheduler", item)
- self.add_parameter(self.object)
Modified: mgmt/newdata/cumin/python/cumin/grid/scheduler.strings
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/scheduler.strings 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/scheduler.strings 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,4 +1,4 @@
-[SchedulerStats.html]
+[SchedulerOverview.html]
<table class="twocol">
<tbody>
<tr>
Modified: mgmt/newdata/cumin/python/cumin/grid/submission.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/submission.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/submission.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -3,25 +3,26 @@
from cumin.objectframe import *
from cumin.objectselector import *
+from cumin.objecttask import *
from cumin.widgets import *
from cumin.util import *
from job import JobSet
-import main
-
strings = StringCatalog(__file__)
log = logging.getLogger("cumin.grid.submission")
+class SubmissionFrame(ObjectFrame):
+ def __init__(self, app, name):
+ cls = app.rosemary.mrg_grid.Submission
+
+ super(SubmissionFrame, self).__init__(app, name, cls)
+
class SubmissionSelector(ObjectSelector):
- def __init__(self, app, name, pool):
+ def __init__(self, app, name):
cls = app.rosemary.mrg_grid.Submission
super(SubmissionSelector, self).__init__(app, name, cls)
- self.pool = pool
-
- # XXX self.add_filter(self.pool, cls.Pool, cls.Pool)
-
frame = "main.grid.pool.submission"
col = ObjectLinkColumn(app, "name", cls.Name, cls._id, frame)
self.add_column(col)
@@ -30,175 +31,125 @@
self.add_attribute_column(cls.Running)
self.add_attribute_column(cls.Completed)
-class SubmissionFrame(ObjectFrame):
- def __init__(self, app, name):
- cls = app.rosemary.mrg_grid.Submission
-
- super(SubmissionFrame, self).__init__(app, name, cls)
-
-class SubmissionSet(CuminTable):
- def __init__(self, app, name):
- super(SubmissionSet, self).__init__(app, name)
-
- col = self.NameColumn(app, "name")
- col.width = "40%"
- self.add_column(col)
-
- self.scheduler_col = self.SchedulerColumn(app, "scheduler")
- self.add_column(self.scheduler_col)
-
- self.submitter_col = self.SubmitterColumn(app, "submitter")
- self.width = "15%"
- self.add_column(self.submitter_col)
-
- col = self.IdleColumn(app, "idle")
- self.add_column(col)
-
- col = self.RunningColumn(app, "running")
- self.add_column(col)
-
- col = self.CompletedColumn(app, "completed")
- self.add_column(col)
-
- def get_submission_href(self, session, id):
- raise Exception("Not implemented")
-
- def get_submitter_href(self, session, id):
- raise Exception("Not implemented")
-
- def get_scheduler_href(self, session, id):
- raise Exception("Not implemented")
-
+class SubmissionJobSet(JobSet):
def render_title(self, session):
- return "Submissions %s" % fmt_count(self.get_item_count(session))
+ return "Jobs"
- class NameColumn(TopTableColumn):
- def render_title(self, session):
- return "Name"
+class SubmissionAdd(ObjectTask):
+ EXPR_TYPE, INTEGER_TYPE, FLOAT_TYPE, STRING_TYPE = 0, 1, 2, 3
+ UNIVERSE = {"VANILLA": 5,
+ "SCHEDULER": 7,
+ "GRID": 9,
+ "JAVA": 10,
+ "PARALLEL": 11,
+ "LOCAL": 12,
+ "VM": 13}
- def render_content(self, session, data):
- href = self.parent.get_submission_href(session, data["id"])
- return fmt_link(href, data["name"])
+ def __init__(self, app, frame):
+ super(SubmissionAdd, self).__init__(app, frame)
- class SchedulerColumn(SqlTableColumn):
- def render_title(self, session):
- return "Scheduler"
+ self.form = SubmissionAddForm(app, self.name, self)
- def render_content(self, session, data):
- href = self.parent.get_scheduler_href \
- (session, data["scheduler_id"])
- return fmt_link(href, data["scheduler_name"])
+ def get_title(self, session):
+ return "Create submission"
- class SubmitterColumn(SqlTableColumn):
- def render_title(self, session):
- return "Submitter"
+ def do_invoke(self, invoc, scheduler,
+ description,
+ executable,
+ args=None,
+ iwd="/tmp",
+ stdin=None,
+ stdout=None,
+ stderr=None,
+ requirements="TRUE",
+ universe="VANILLA"):
+ user_name = invoc.user.name
- def render_content(self, session, data):
- href = self.parent.get_submitter_href \
- (session, data["scheduler_id"])
- return fmt_link(href, data["owner"])
+ ad = {
+ "Submission": {"TYPE": self.STRING_TYPE,
+ "VALUE": condor_string(description)},
+ "Cmd": {"TYPE": self.STRING_TYPE,
+ "VALUE": condor_string(executable)},
+ "Args": {"TYPE": self.STRING_TYPE,
+ "VALUE": condor_string(args)},
+ "Requirements": {"TYPE": self.EXPR_TYPE,
+ "VALUE": requirements},
+ "JobUniverse": {"TYPE": self.INTEGER_TYPE,
+ "VALUE": str(self.UNIVERSE[universe])},
+ "Iwd": {"TYPE": self.STRING_TYPE,
+ "VALUE": condor_string(iwd)},
+ "Owner": {"TYPE": self.STRING_TYPE,
+ "VALUE": "guest3"}
+ }
- class IdleColumn(SqlTableColumn):
- def render_title(self, session):
- return "Idle Jobs"
+# "User": {"TYPE": self.STRING_TYPE,
+# "VALUE": condor_string("example(a)example.com")}
- class RunningColumn(SqlTableColumn):
- def render_title(self, session):
- return "Running Jobs"
+ scheduler.Submit(completion, ad, None)
- class CompletedColumn(SqlTableColumn):
- def render_title(self, session):
- return "Completed Jobs"
+def condor_string(string):
+ return string
-class OldSubmissionFrame(CuminFrame):
- def __init__(self, app, name):
- super(OldSubmissionFrame, self).__init__(app, name)
-
- self.object = SubmissionParameter(app, "submission")
- self.add_parameter(self.object)
-
- self.view = SubmissionView(app, "view", self.object)
- self.add_child(self.view)
-
-class SubmissionView(CuminView):
- def __init__(self, app, name, submission):
- super(SubmissionView, self).__init__(app, name, submission)
-
- self.tabs = TabbedModeSet(app, "tabs")
- self.add_child(self.tabs)
-
- self.jobs = SubmissionJobSet(app, "jobs", submission)
- self.tabs.add_tab(self.jobs)
-
-class SubmissionJobSet(JobSet):
- def render_title(self, session):
- return "Jobs"
-
-class SubmissionAddForm(FoldingFieldSubmitForm):
+class SubmissionAddForm(ObjectTaskForm):
def __init__(self, app, name, task):
- super(SubmissionAddForm, self).__init__(app, name)
+ super(SubmissionAddForm, self).__init__(app, name, task)
+ # XXX
self.pool = PoolParameter(app, "pool")
self.add_parameter(self.pool)
- self.task = task
-
from scheduler import SchedulerSelectField # XXX
self.scheduler = SchedulerSelectField(app, "scheduler", self.pool)
self.scheduler.required = True
self.scheduler.help = "Create submission at this scheduler"
- self.main_fields.add_field(self.scheduler)
+ self.add_field(self.scheduler)
self.description = self.DescriptionField(app, "description")
self.description.input.size = 50
self.description.required = True
self.description.help = "This text will identify the submission"
- self.main_fields.add_field(self.description)
+ self.add_field(self.description)
self.command = self.CommandField(app, "command")
self.command.input.columns = 50
self.command.required = True
self.command.help = "The path to the executable and any arguments"
- self.main_fields.add_field(self.command)
+ self.add_field(self.command)
self.requirements = self.RequirementsField(app, "requirements")
self.requirements.input.columns = 50
self.requirements.help = "Attributes controlling where and when " + \
"this submission will run"
- self.main_fields.add_field(self.requirements)
+ self.add_field(self.requirements)
self.directory = self.WorkingDirectoryField(app, "directory")
self.directory.input.size = 50
self.directory.help = "Run the process in this directory"
- self.extra_fields.add_field(self.directory)
+ self.add_extra_field(self.directory)
self.stdin = self.StdinField(app, "stdin")
self.stdin.input.size = 50
self.stdin.help = "Get process input from this file"
- self.extra_fields.add_field(self.stdin)
+ self.add_extra_field(self.stdin)
self.stdout = self.StdoutField(app, "stdout")
self.stdout.input.size = 50
self.stdout.help = "Send process output to this file"
- self.extra_fields.add_field(self.stdout)
+ self.add_extra_field(self.stdout)
self.stderr = self.StderrField(app, "stderr")
self.stderr.input.size = 50
self.stderr.help = "Send error output to this file"
- self.extra_fields.add_field(self.stderr)
+ self.add_extra_field(self.stderr)
#self.options = self.OptionsField(app, "options")
- #self.main.add_field(self.options)
+ #self.add_extra_field(self.options)
self.attributes_ = self.AttributesField(app, "attributes")
self.attributes_.input.columns = 50
- self.extra_fields.add_field(self.attributes_)
+ self.add_extra_field(self.attributes_)
- def validate(self, session):
- self.main_fields.validate(session)
- self.extra_fields.validate(session)
-
def process_submit(self, session):
self.validate(session)
@@ -228,14 +179,11 @@
stdout=stdout,
stderr=stderr,
requirements=requirements)
-# universe=universe) # XXX
- self.task.exit_with_redirect(session, scheduler)
+ # universe=universe) # XXX
+
+ self.task.exit_with_redirect(session)
- def render_title(self, session):
- pool = self.pool.get(session)
- return self.task.get_description(session, pool)
-
class TemplateField(FormField):
def __init__(self, app, name):
super(SubmissionAddForm.TemplateField, self).__init__(app, name)
@@ -345,4 +293,3 @@
def render_content(self, session, data):
since = data["qmf_create_time"]
return fmt_duration(time() - secs(since))
-
Modified: mgmt/newdata/cumin/python/cumin/grid/submission.strings
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/submission.strings 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/submission.strings 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,27 +1,3 @@
-[SubmissionSet.sql]
-select
- s.id,
- s.name,
- s.owner,
- c.idle,
- c.running,
- c.completed,
- d.id as scheduler_id,
- d.name as scheduler_name
-from submission as s
-inner join scheduler as d on s.scheduler_id = d.id
-inner join submission_stats as c on s.stats_curr_id = c.id
-{sql_where}
-{sql_order_by}
-{sql_limit}
-
-[SubmissionSet.count_sql]
-select count(*)
-from submission as s
-inner join scheduler as d on s.scheduler_id = d.id
-inner join submission_stats as c on s.stats_curr_id = c.id
-{sql_where}
-
[TopSubmissionSet.sql]
select
s.id,
Modified: mgmt/newdata/cumin/python/cumin/grid/submitter.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/submitter.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/submitter.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -5,6 +5,9 @@
from wooly.forms import *
from wooly.resources import *
from wooly.tables import *
+
+from cumin.objectframe import *
+from cumin.objectselector import *
from cumin.stat import *
from cumin.widgets import *
from cumin.parameters import *
@@ -16,6 +19,17 @@
strings = StringCatalog(__file__)
log = logging.getLogger("cumin.grid.submitter")
+class SubmitterFrame(ObjectFrame):
+ def __init__(self, app, name):
+ cls = app.rosemary.mrg_grid.Submitter
+
+ super(SubmitterFrame, self).__init__(app, name, cls)
+
+ overview = SubmitterOverview(app, "overview", self.object)
+ self.view.add_tab(overview)
+
+ # submissions XXX
+
class SubmitterSelector(ObjectSelector):
def __init__(self, app, name, scheduler):
cls = app.rosemary.mrg_grid.Submitter
@@ -36,47 +50,15 @@
self.add_attribute_column(cls.RunningJobs)
self.add_attribute_column(cls.HeldJobs)
-class SubmitterFrame(ObjectFrame):
- def __init__(self, app, name):
- cls = app.rosemary.mrg_grid.Submitter
-
- super(SubmitterFrame, self).__init__(app, name, cls)
-
-class OldSubmitterFrame(CuminFrame):
- def __init__(self, app, name, pool):
- super(OldSubmitterFrame, self).__init__(app, name)
-
- self.object = SubmitterParameter(app, "id")
- self.add_parameter(self.object)
-
- self.view = SubmitterView(app, "view", self.object)
- self.add_mode(self.view)
-
-class SubmitterView(CuminView):
- def __init__(self, app, name, submitter):
- super(SubmitterView, self).__init__(app, name, submitter)
-
- self.tabs = TabbedModeSet(app, "tabs")
- self.add_child(self.tabs)
-
- stats = SubmitterStats(app, "stats", submitter)
- self.tabs.add_tab(stats)
-
- submissions = SubmitterSubmissionSet(app, "submissions", submitter)
- self.tabs.add_tab(submissions)
-
- details = CuminDetails(app, "details", submitter)
- self.tabs.add_tab(details)
-
class SubmitterGeneralStatSet(StatSet):
def __init__(self, app, name, object):
super(SubmitterGeneralStatSet, self).__init__(app, name, object)
self.attrs = ("RunningJobs", "IdleJobs", "HeldJobs")
-class SubmitterStats(Widget):
+class SubmitterOverview(Widget):
def __init__(self, app, name, submitter):
- super(SubmitterStats, self).__init__(app, name)
+ super(SubmitterOverview, self).__init__(app, name)
stats = SubmitterGeneralStatSet(app, "general", submitter)
self.add_child(stats)
@@ -86,24 +68,8 @@
self.add_child(chart)
def render_title(self, session):
- return "Statistics"
+ return "Overview"
class JobsChart(StatFlashChart):
def render_title(self, session):
return "Jobs"
-
-class SubmitterSubmissionSet(SubmissionSet):
- def __init__(self, app, name, submitter):
- super(SubmitterSubmissionSet, self).__init__(app, name)
-
- self.submitter = submitter
-
- self.scheduler_col.visible = False
- self.submitter_col.visible = False
-
- def render_sql_where(self, session):
- return "where m.id = %(id)r"
-
- def get_sql_values(self, session):
- submitter = self.submitter.get(session)
- return {"id": submitter.id}
Modified: mgmt/newdata/cumin/python/cumin/grid/submitter.strings
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/submitter.strings 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/grid/submitter.strings 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,4 +1,4 @@
-[SubmitterStats.html]
+[SubmitterOverview.html]
<table class="twocol">
<tbody>
<tr>
Modified: mgmt/newdata/cumin/python/cumin/main.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/main.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/main.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -17,7 +17,6 @@
from objectselector import *
from objecttask import *
from sqladapter import *
-from table import *
from user import *
from widgets import *
@@ -81,13 +80,13 @@
import messaging
import grid
import inventory
- import usergrid
+ #import usergrid
account.Module(self, "account")
messaging.Module(self, "messaging")
grid.Module(self, "grid")
inventory.Module(self, "inventory")
- usergrid.Module(self, "usergrid")
+ #usergrid.Module(self, "usergrid")
for module in self.modules:
module.init()
@@ -345,4 +344,4 @@
class TopTableFooter(Widget):
def render(self, session):
- return ""
\ No newline at end of file
+ return ""
Modified: mgmt/newdata/cumin/python/cumin/messaging/binding.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/messaging/binding.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/messaging/binding.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -15,6 +15,61 @@
strings = StringCatalog(__file__)
log = logging.getLogger("cumin.messaging.exchange")
+class BindingSelectionRemove(ObjectTask):
+ def get_title(self, session):
+ return "Remove"
+
+ def do_invoke(self, invoc, binding):
+ assert isinstance(binding, Binding)
+
+ session = self.app.model.get_session_by_object(binding)
+ session.exchange_unbind(queue=binding.queue.name,
+ exchange=binding.exchange.name,
+ binding_key=binding.bindingKey)
+ session.sync()
+
+ invoc.end()
+
+class BindingFrame(ObjectFrame):
+ def __init__(self, app, name):
+ cls = app.rosemary.org_apache_qpid_broker.Binding
+
+ super(BindingFrame, self).__init__(app, name, cls)
+
+ self.remove = BindingRemove(app, self)
+
+class BindingAdd(ObjectTask):
+ def __init__(self, app, frame):
+ super(BindingAdd, self).__init__(app, frame)
+
+ self.form = BindingAddForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Add binding"
+
+ def do_invoke(self, invoc, vhost, queue, exchange, binding_key, args):
+ session = self.app.model.get_session_by_object(vhost)
+ session.exchange_bind(queue=queue.name, exchange=exchange.name,
+ binding_key=binding_key, arguments=args)
+ session.sync()
+
+ invoc.end()
+
+class BindingRemove(ObjectTask):
+ def get_title(self, session):
+ return "Remove"
+
+ def do_invoke(self, invoc, binding):
+ assert isinstance(binding, Binding)
+
+ session = self.app.model.get_session_by_object(binding)
+ session.exchange_unbind(queue=binding.queue.name,
+ exchange=binding.exchange.name,
+ binding_key=binding.bindingKey)
+ session.sync()
+
+ invoc.end()
+
class BindingData(ObjectSqlAdapter):
def __init__(self, app):
binding = app.rosemary.org_apache_qpid_broker.Binding
@@ -36,20 +91,26 @@
super(BindingSelector, self).__init__(app, name, binding, data)
+ frame = "main.messaging.broker.binding"
+ col = ObjectLinkColumn \
+ (app, "binding", binding.bindingKey, binding._id, frame)
+ self.add_column(col)
+
frame = "main.messaging.broker.exchange"
self.exchange_column = self.Exchange \
(app, "exchange", exchange.name, exchange._id, frame)
+ self.add_column(self.exchange_column)
frame = "main.messaging.broker.queue"
self.queue_column = self.Queue \
(app, "queue", queue.name, queue._id, frame)
+ self.add_column(self.queue_column)
- self.add_attribute_column(binding.bindingKey)
self.add_attribute_column(binding.arguments)
self.add_attribute_column(binding.origin)
self.add_attribute_column(binding.msgMatched)
- #self.add_selection_task(main.module.binding_set_remove)
+ self.remove = BindingSelectionRemove(app, self)
class Exchange(ObjectLinkColumn):
def render_header_content(self, session):
@@ -341,16 +402,9 @@
def render_exchanges(self, session):
vhost = self.vhost.get(session)
-
cls = self.app.rosemary.org_apache_qpid_broker.Exchange
-
- conn = self.app.model.get_sql_connection()
- cursor = conn.cursor()
- try:
- exchanges = cls.get_selection(cursor, _vhostRef_id=vhost._id)
- finally:
- cursor.close()
+ exchanges = cls.get_selection(session.cursor, _vhostRef_id=vhost._id)
# render each exchange we support
writer = Writer()
Modified: mgmt/newdata/cumin/python/cumin/messaging/broker.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/messaging/broker.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/messaging/broker.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -6,9 +6,9 @@
from cumin.formats import *
from cumin.objectframe import *
+from cumin.objecttask import *
from cumin.parameters import *
from cumin.sqladapter import *
-from cumin.table import *
from cumin.util import *
from cumin.widgets import *
@@ -100,8 +100,7 @@
self.icon_href = "resource?name=broker-36.png"
- self.vhost = BrokerVhostAttribute(app, "vhost", self.object)
- self.add_attribute(self.vhost)
+ self.broker = SessionAttribute(self, "broker")
self.queue = QueueFrame(app, "queue") # XXX pass self.vhost
self.add_mode(self.queue)
@@ -109,21 +108,93 @@
self.exchange = ExchangeFrame(app, "exchange")
self.add_mode(self.exchange)
+ self.binding = BindingFrame(app, "binding")
+ self.add_mode(self.binding)
+
self.connection = ConnectionFrame(app, "connection")
self.add_mode(self.connection)
self.brokerlink = BrokerLinkFrame(app, "link")
self.add_mode(self.brokerlink)
- self.view.add_tab(QueueSelector(app, "queues", self.vhost))
- self.view.add_tab(ExchangeSelector(app, "exchanges", self.vhost))
- self.view.add_tab(ConnectionSelector(app, "connections", self.vhost))
- self.view.add_tab(BrokerLinkSelector(app, "brokerlinks", self.vhost))
+ self.view = ObjectView(app, "view", self.broker)
+ self.replace_child(self.view)
- self.add_summary_attribute(cls.port)
- self.add_summary_task(app.messaging.ExchangeAdd)
- self.add_summary_task(app.messaging.QueueAdd)
+ self.view.add_tab(QueueSelector(app, "queues", self.object))
+ self.view.add_tab(ExchangeSelector(app, "exchanges", self.object))
+ self.view.add_tab(ConnectionSelector(app, "connections", self.object))
+ self.view.add_tab(BrokerLinkSelector(app, "brokerlinks", self.object))
+ self.queue_add = QueueAdd(app, self)
+ self.exchange_add = ExchangeAdd(app, self)
+ self.brokerlink_add = BrokerLinkAdd(app, self)
+ self.move_messages = MoveMessages(app, self)
+
+ def get_object(self, session, id):
+ # self.object is Vhost, and we stick Broker in self.broker
+
+ broker = super(BrokerFrame, self).get_object(session, id)
+
+ self.broker.set(session, broker)
+
+ cls = self.app.rosemary.org_apache_qpid_broker.Vhost
+ args = {"_brokerRef_id": id, "name": "/"}
+
+ for obj in cls.get_selection(session.cursor, **args):
+ break
+
+ return obj
+
+ def get_title(self, session):
+ obj = self.broker.get(session)
+
+ return "%s '%s'" % (obj._class._title, obj.get_title())
+
+class MoveMessages(ObjectTask):
+ def __init__(self, app, frame):
+ super(MoveMessages, self).__init__(app, frame)
+
+ self.form = MoveMessagesForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Move messages"
+
+ def do_invoke(self, invoc, vhost, src, dst, count):
+ self.qmf_call(invoc, vhost, src, dst, count)
+
+class MoveMessagesForm(ObjectTaskForm):
+ def __init__(self, app, name, task):
+ super(MoveMessagesForm, self).__init__(app, name, task)
+
+ self.queue = QueueParameter(app, "queue")
+ self.add_parameter(self.queue)
+
+ self.dest_queue = QueueSelectField(app, "dest", self)
+ self.add_field(self.dest_queue)
+
+ self.count = MultiplicityField(app, "count")
+ self.add_field(self.count)
+
+ def process_submit(self, session):
+ self.validate(session)
+
+ if not self.errors.get(session):
+ queue = self.queue.get(session)
+ dest_queue = self.dest_queue.get(session)
+ scount = self.count.get(session)
+
+ if scount == "all":
+ count = 0
+ elif scount == "top":
+ count = 1
+ elif scount == "N":
+ count = self.count.top_n.get_n_value(session)
+ else:
+ raise Exception("Wrong Value")
+
+ self.task.invoke(session, queue, dest_queue, count)
+ self.task.exit_with_redirect(session)
+
class ModuleNotEnabled(Widget):
def do_render(self, session):
return "This module is not enabled"
@@ -304,7 +375,7 @@
groups = self.__groups.get(session)
if len(brokers):
self.task.invoke(session, brokers, groups)
- self.task.exit_with_redirect(session, brokers)
+ self.task.exit_with_redirect(session)
class TopBrokerSet(CuminTable):
def __init__(self, app, name):
Modified: mgmt/newdata/cumin/python/cumin/messaging/brokergroup.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/messaging/brokergroup.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/messaging/brokergroup.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -2,11 +2,11 @@
from wooly import *
from wooly.widgets import *
from wooly.forms import *
+
from cumin.model import *
from cumin.widgets import *
from cumin.parameters import *
from cumin.sqladapter import *
-from cumin.table import *
from cumin.formats import *
from cumin.util import *
@@ -28,9 +28,25 @@
self.add_attribute_column(cls.description)
- self.add_task(app.messaging.BrokerGroupAdd, None)
- self.add_selection_task(app.messaging.BrokerGroupRemove)
+ self.remove = BrokerGroupSelectionRemove(app, self)
+class BrokerGroupSelectionRemove(SelectionTask):
+ def get_title(self, session):
+ return "Remove"
+
+ def do_invoke(self, invoc, group):
+ conn = self.app.model.get_sql_connection()
+ cursor = conn.cursor()
+
+ try:
+ group.delete(cursor)
+ finally:
+ cursor.close()
+
+ conn.commit()
+
+ invoc.end()
+
class BrokerGroupInputSet(CheckboxInputSet):
def __init__(self, app, name):
super(BrokerGroupInputSet, self).__init__(app, name, None)
@@ -63,50 +79,162 @@
brokers.group = self.object
self.view.add_tab(brokers)
-class BrokerGroupForm(FieldSubmitForm):
+ self.edit = BrokerGroupEdit(app, self)
+ self.remove = BrokerGroupRemove(app, self)
+
+class BrokerGroupForm(ObjectTaskForm):
def __init__(self, app, name, task):
- super(BrokerGroupForm, self).__init__(app, name)
+ super(BrokerGroupForm, self).__init__(app, name, task)
- self.task = task
+ self.name_ = UniqueNameField(app, "name", BrokerGroup) # XXX
+ self.add_field(self.name_)
- self.group_name = UniqueNameField(app, "name", BrokerGroup)
- self.add_field(self.group_name)
+ self.description = self.Description(app, "description")
+ self.add_field(self.description)
+ class Description(MultilineStringField):
+ def render_title(self, session):
+ return "Description"
+
+class BrokerGroupAdd(ObjectTask):
+ def __init__(self, app, frame):
+ super(BrokerGroupAdd, self).__init__(app, frame)
+
+ self.form = BrokerGroupAddForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Add broker group"
+
+ def do_invoke(self, invoc, obj, name, description):
+ conn = self.app.model.get_sql_connection()
+ cursor = conn.cursor()
+
+ group = self.cls.create_object(cursor)
+ group.name = name
+ group.description = description
+
+ group.fake_qmf_values()
+
+ try:
+ group.save(cursor)
+ finally:
+ cursor.close()
+
+ conn.commit()
+
+ invoc.end()
+
+ return group
+
class BrokerGroupAddForm(BrokerGroupForm):
def process_submit(self, session):
self.validate(session)
if not self.errors.get(session):
- name = self.group_name.get(session)
+ name = self.name_.get(session)
description = ""
self.task.invoke(session, None, name, description)
- self.task.exit_with_redirect(session, None)
+ self.task.exit_with_redirect(session)
def render_title(self, session):
return self.task.get_title(session, None)
-class BrokerGroupEditForm(BrokerGroupForm):
- def __init__(self, app, name, task):
- super(BrokerGroupEditForm, self).__init__(app, name, task)
+class BrokerGroupEdit(ObjectTask):
+ def __init__(self, app, frame):
+ super(BrokerGroupEdit, self).__init__(app, frame)
- self.group = NewBrokerGroupParameter(app, "group")
- self.add_parameter(self.group)
+ self.form = BrokerGroupEditForm(app, self.name, self)
+ def get_title(self, session):
+ return "Edit"
+
+ def do_invoke(self, invoc, group, name, description):
+ assert group
+
+ group.name = name
+ group.description = description
+
+ conn = self.app.model.get_sql_connection()
+ cursor = conn.cursor()
+
+ try:
+ group.save(cursor)
+ finally:
+ cursor.close()
+
+ conn.commit()
+
+ invoc.end()
+
+class BrokerGroupEditForm(BrokerGroupForm):
def process_submit(self, session):
self.validate(session)
if not self.errors.get(session):
- group = self.group.get(session)
- name = self.group_name.get(session)
+ group = self.object.get(session)
+ name = self.name_.get(session)
+ description = self.description.get(session)
- self.task.invoke(session, group, name)
- self.task.exit_with_redirect(session, group)
+ self.task.invoke(session, group, name, description)
+ self.task.exit_with_redirect(session)
def process_display(self, session):
- group = self.group.get(session)
- self.group_name.set(session, group.name)
+ group = self.object.get(session)
+ self.name_.set(session, group.name)
+ self.description.set(session, group.description)
+
def render_title(self, session):
- group = self.group.get(session)
- return self.task.get_description(session, group)
+ group = self.object.get(session)
+ return self.task.get_description(session)
+
+class BrokerGroupRemove(ObjectTask):
+ def get_title(self, session):
+ return "Remove"
+
+ def do_exit(self, session):
+ self.app.main_page.main.messaging.view.show(session)
+
+ def do_invoke(self, invoc, group):
+ conn = self.app.model.get_sql_connection()
+ cursor = conn.cursor()
+
+ try:
+ group.delete(cursor)
+ finally:
+ cursor.close()
+
+ conn.commit()
+
+ invoc.end()
+
+class BrokerEngroup(ObjectTask):
+ def get_title(self, session):
+ return "Add to groups"
+
+ def do_invoke(self, invoc, broker, groups):
+ print "XXX engroup", broker, groups
+
+ invoc.end()
+
+ return
+
+ all_groups = BrokerGroup.select()
+ selected_ids = [x.id for x in selected_groups]
+ for group in all_groups:
+ sql_sel = "broker_id=%i and broker_group_id=%i" % \
+ (broker.id, group.id)
+ existing_mapping = BrokerGroupMapping.select(sql_sel)
+ if not group.id in selected_ids:
+ if existing_mapping.count() > 0:
+ # remove mapping if group is not checked and there
+ # is already a mapping
+ existing_mapping[0].destroySelf()
+ else:
+ if existing_mapping.count() == 0:
+ # add mapping if group is checked but there
+ # is not already a mapping
+ new_mapping = BrokerGroupMapping(brokerID=broker.id,
+ brokerGroupID=group.id)
+ new_mapping.syncUpdate()
Modified: mgmt/newdata/cumin/python/cumin/messaging/brokerlink.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/messaging/brokerlink.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/messaging/brokerlink.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -2,6 +2,9 @@
from mint import *
from wooly import *
+from cumin.objectframe import *
+from cumin.objectselector import *
+from cumin.objecttask import *
from wooly.widgets import *
from cumin.widgets import *
from cumin.parameters import *
@@ -15,6 +18,27 @@
strings = StringCatalog(__file__)
+class BrokerLinkFrame(ObjectFrame):
+ def __init__(self, app, name):
+ cls = app.rosemary.org_apache_qpid_broker.Link
+
+ super(BrokerLinkFrame, self).__init__(app, name, cls)
+
+ self.view.add_tab(RouteSelector(app, "routes", self.object))
+
+ self.route_add = RouteAdd(app, self)
+ self.remove = BrokerLinkRemove(app, self)
+
+class BrokerLinkRemove(ObjectTask):
+ def get_title(self, session):
+ return "Remove"
+
+ def do_exit(self, session):
+ self.app.main_page.main.messaging.broker.view.show(session)
+
+ def do_invoke(self, invoc, link):
+ self.qmf_call(invoc, link, "remove")
+
class BrokerLinkSelector(ObjectSelector):
def __init__(self, app, name, vhost):
cls = app.rosemary.org_apache_qpid_broker.Link
@@ -34,20 +58,17 @@
self.add_attribute_column(cls.transport)
self.add_attribute_column(cls.durable)
- self.add_task(app.messaging.BrokerLinkAdd, self.vhost)
- self.add_selection_task(app.messaging.BrokerLinkRemove)
+ self.remove = BrokerLinkSelectionRemove(app, self)
# Address column XXX
-class BrokerLinkFrame(ObjectFrame):
- def __init__(self, app, name):
- cls = app.rosemary.org_apache_qpid_broker.Link
+class BrokerLinkSelectionRemove(SelectionTask):
+ def get_title(self, session):
+ return "Remove"
- super(BrokerLinkFrame, self).__init__(app, name, cls)
+ def do_invoke(self, invoc, link):
+ self.qmf_call(invoc, link, "remove")
- routes = RouteSelector(app, "routes", self.object)
- self.view.add_tab(routes)
-
class RouteSelector(ObjectSelector):
def __init__(self, app, name, link):
cls = app.rosemary.org_apache_qpid_broker.Bridge
@@ -64,11 +85,27 @@
self.add_attribute_column(cls.tag)
self.add_attribute_column(cls.excludes)
- self.add_task(app.messaging.RouteAdd, self.link)
- self.add_selection_task(app.messaging.RouteRemove)
+ self.remove = RouteSelectionRemove(app, self)
+class RouteSelectionRemove(SelectionTask):
+ def get_title(self, session):
+ return "Remove"
+
+ def do_invoke(self, invoc, route):
+ self.qmf_call(invoc, route, "close")
+
# XXX RouteFrame
+class RouteRemove(ObjectTask):
+ def get_title(self, session):
+ return "Remove"
+
+ def do_exit(self, session):
+ self.app.main_page.main.messaging.broker.view.show(session)
+
+ def do_invoke(self, invoc, route):
+ self.qmf_call(invoc, route, "close")
+
class LinkView(CuminView):
def __init__(self, app, name, link):
super(LinkView, self).__init__(app, name, link)
@@ -78,9 +115,7 @@
#self.tabs.add_tab(LinkStats(app, "stats"))
- self.routes = RouteSet(app, "routes", link)
- self.tabs.add_tab(self.routes)
-
+ self.tabs.add_tab(RouteSet(app, "routes", link))
self.tabs.add_tab(CuminDetails(app, "details", link))
class LinkGeneralStatSet(StatSet):
@@ -172,12 +207,25 @@
def render_title(self, session):
return "Choose an Exchange"
-class RouteAddForm(FieldSubmitForm):
+class RouteAdd(ObjectTask):
+ def __init__(self, app, frame):
+ super(RouteAdd, self).__init__(app, frame)
+
+ self.form = RouteAddForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Add route"
+
+ def do_invoke(self, invoc, link, exchange, key, tag, dynamic, sync,
+ excludes):
+ self.qmf_call(invoc, link, "bridge",
+ link.durable, exchange.name, exchange.name,
+ key, tag, excludes, False, False, dynamic, sync)
+
+class RouteAddForm(ObjectTaskForm):
def __init__(self, app, name, task):
- super(RouteAddForm, self).__init__(app, name)
+ super(RouteAddForm, self).__init__(app, name, task)
- self.task = task
-
self.link = LinkParameter(app, "link")
self.add_parameter(self.link)
@@ -266,7 +314,7 @@
self.task.invoke(session, link, exchange, key, tag,
dynamic, sync, excludes)
- self.task.exit_with_redirect(session, link)
+ self.task.exit_with_redirect(session)
def render_title(self, session):
link = self.link.get(session)
@@ -281,6 +329,27 @@
self.object = ListParameter(app, "route", item)
self.add_parameter(self.object)
+class BrokerLinkAdd(ObjectTask):
+ def __init__(self, app, frame):
+ super(BrokerLinkAdd, self).__init__(app, frame)
+
+ self.form = BrokerLinkAddForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Add broker link"
+
+ def do_invoke(self, invoc, vhost, host, port, durable, username,
+ password, transport):
+ broker = vhost.broker # XXX
+
+ if username == "anonymous":
+ mech = "ANONYMOUS"
+ else:
+ mech = "PLAIN"
+
+ self.qmf_call(invoc, broker, "connect",
+ host, port, durable, mech, username, password, transport)
+
class BrokerLinkAddForm(ObjectTaskForm):
def __init__(self, app, name, task):
super(BrokerLinkAddForm, self).__init__(app, name, task)
@@ -323,7 +392,7 @@
self.task.invoke(session, vhost, host, port,
durable, username, password, transport)
- self.task.exit_with_redirect(session, vhost)
+ self.task.exit_with_redirect(session)
class Host(NameField):
def render_title(self, session):
Modified: mgmt/newdata/cumin/python/cumin/messaging/connection.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/messaging/connection.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/messaging/connection.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,9 +1,10 @@
from wooly import *
from wooly.widgets import *
from wooly.tables import *
-from datetime import datetime
+
from cumin.objectframe import *
from cumin.objectselector import *
+from cumin.objecttask import *
from cumin.stat import *
from cumin.widgets import *
from cumin.parameters import *
@@ -14,6 +15,43 @@
strings = StringCatalog(__file__)
+class ConnectionFrame(ObjectFrame):
+ def __init__(self, app, name):
+ cls = app.rosemary.org_apache_qpid_broker.Connection
+
+ super(ConnectionFrame, self).__init__(app, name, cls)
+
+ self.icon_href = "resource?name=client-36.png"
+
+ self.session = SessionFrame(app, "session")
+ self.add_mode(self.session)
+
+ self.overview = ConnectionOverview(app, "overview", self.object)
+ self.view.add_tab(self.overview)
+
+ self.view.add_tab(SessionSelector(app, "sessions", self.object))
+
+ self.close = ConnectionClose(app, self)
+
+class ConnectionClose(ObjectTask):
+ def get_title(self, session):
+ return "Close"
+
+ def do_invoke(self, invoc, conn):
+ # XXX generalize this check and use it for other closes
+
+ session_ids = set()
+
+ for broker in self.app.model.mint.model.qmf_brokers:
+ session_ids.add(broker.getSessionId())
+
+ for sess in conn.sessions:
+ if sess.name in session_ids:
+ raise Exception \
+ ("Cannot close management connection %s" % conn.address)
+
+ self.qmf_call(invoc, conn, "close")
+
class ConnectionSelector(ObjectSelector):
def __init__(self, app, name, vhost):
cls = app.rosemary.org_apache_qpid_broker.Connection
@@ -36,6 +74,27 @@
self.add_attribute_column(cls.bytesFromClient)
self.add_attribute_column(cls.bytesToClient)
+ self.close = ConnectionSelectionClose(app, self)
+
+class ConnectionSelectionClose(SelectionTask):
+ def get_title(self, session):
+ return "Close"
+
+ def do_invoke(self, invoc, conn):
+ # XXX generalize this check and use it for other closes
+
+ session_ids = set()
+
+ for broker in self.app.model.mint.model.qmf_brokers:
+ session_ids.add(broker.getSessionId())
+
+ for sess in conn.sessions:
+ if sess.name in session_ids:
+ raise Exception \
+ ("Cannot close management connection %s" % conn.address)
+
+ self.qmf_call(invoc, conn, "close")
+
class ConnectionProcessColumn(ObjectAttributeColumn):
def __init__(self, app, name, attr, pid_attr):
super(ConnectionProcessColumn, self).__init__(app, name, attr)
@@ -57,31 +116,6 @@
args = (record[self.field.index], record[self.pid_field.index])
return "%s (%i)" % args
-class ConnectionFrame(ObjectFrame):
- def __init__(self, app, name):
- cls = app.rosemary.org_apache_qpid_broker.Connection
-
- super(ConnectionFrame, self).__init__(app, name, cls)
-
- self.icon_href = "resource?name=client-36.png"
-
- self.overview = ConnectionStats(app, "overview", self.object)
- self.view.add_tab(self.overview)
-
-class ConnectionView(CuminView):
- def __init__(self, app, name, conn):
- super(ConnectionView, self).__init__(app, name, conn)
-
- self.tabs = TabbedModeSet(app, "tabs")
- self.add_child(self.tabs)
-
- self.tabs.add_tab(ConnectionStats(app, "stats", conn))
-
- self.sessions = SessionSet(app, "sessions", conn)
- self.tabs.add_tab(self.sessions)
-
- self.tabs.add_tab(CuminDetails(app, "details", conn))
-
class ConnectionGeneralStatSet(StatSet):
def __init__(self, app, name, object):
super(ConnectionGeneralStatSet, self).__init__(app, name, object)
@@ -95,9 +129,9 @@
self.attrs = ("bytesFromClient", "bytesToClient",
"framesFromClient", "framesToClient")
-class ConnectionStats(Widget):
+class ConnectionOverview(Widget):
def __init__(self, app, name, conn):
- super(ConnectionStats, self).__init__(app, name)
+ super(ConnectionOverview, self).__init__(app, name)
self.add_child(ConnectionIOStatSet(app, "io", conn))
self.add_child(ConnectionGeneralStatSet(app, "general", conn))
@@ -113,3 +147,60 @@
class SendReceiveRateChart(StatFlashChart):
def render_title(self, session):
return "Bytes sent and received"
+
+class SessionFrame(ObjectFrame):
+ def __init__(self, app, name):
+ cls = app.rosemary.org_apache_qpid_broker.Session
+
+ super(SessionFrame, self).__init__(app, name, cls)
+
+ self.close = SessionClose(app, self)
+ self.detach = SessionDetach(app, self)
+
+class SessionClose(ObjectTask):
+ def get_title(self, session):
+ return "Close"
+
+ def do_invoke(self, invoc, sess):
+ self.qmf_call(invoc, sess, "close")
+
+class SessionDetach(ObjectTask):
+ def get_title(self, session):
+ return "Detach"
+
+ def do_invoke(self, invoc, sess):
+ self.qmf_call(invoc, sess, "detach")
+
+class SessionSelector(ObjectSelector):
+ def __init__(self, app, name, conn):
+ cls = app.rosemary.org_apache_qpid_broker.Session
+
+ super(SessionSelector, self).__init__(app, name, cls)
+
+ self.conn = conn
+
+ frame = "main.messaging.broker.connection.session"
+ col = ObjectLinkColumn(app, "name", cls.name, cls._id, frame)
+ self.add_column(col)
+
+ self.add_attribute_column(cls.attached)
+ self.add_attribute_column(cls.detachedLifespan)
+ self.add_attribute_column(cls.framesOutstanding)
+ self.add_attribute_column(cls.clientCredit)
+
+ self.close = SessionSelectionClose(app, self)
+ self.detach = SessionSelectionDetach(app, self)
+
+class SessionSelectionClose(SelectionTask):
+ def get_title(self, session):
+ return "Close"
+
+ def do_invoke(self, invoc, sess):
+ self.qmf_call(invoc, sess, "close")
+
+class SessionSelectionDetach(SelectionTask):
+ def get_title(self, session):
+ return "Detach"
+
+ def do_invoke(self, invoc, sess):
+ self.qmf_call(invoc, sess, "detach")
Modified: mgmt/newdata/cumin/python/cumin/messaging/connection.strings
===================================================================
--- mgmt/newdata/cumin/python/cumin/messaging/connection.strings 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/messaging/connection.strings 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,4 +1,4 @@
-[ConnectionStats.html]
+[ConnectionOverview.html]
<table class="twocol">
<tbody>
<tr>
Modified: mgmt/newdata/cumin/python/cumin/messaging/exchange.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/messaging/exchange.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/messaging/exchange.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -18,6 +18,44 @@
strings = StringCatalog(__file__)
log = logging.getLogger("cumin.messaging.exchange")
+class ExchangeFrame(ObjectFrame):
+ def __init__(self, app, name):
+ cls = app.rosemary.org_apache_qpid_broker.Exchange
+
+ super(ExchangeFrame, self).__init__(app, name, cls)
+
+ self.icon_href = "resource?name=exchange-36.png"
+
+ self.overview = ExchangeOverview(app, "overview", self.object)
+ self.view.add_tab(self.overview)
+
+ self.bindings = ExchangeBindingSelector(app, "bindings", self.object)
+ self.view.add_tab(self.bindings)
+
+ self.remove = ExchangeRemove(app, self)
+
+ def get_title(self, session):
+ title = super(ExchangeFrame, self).get_title(session)
+
+ if title == "":
+ title = "Default exchange"
+
+ return title
+
+class ExchangeRemove(ObjectTask):
+ def get_title(self, session):
+ return "Remove"
+
+ def do_exit(self, session):
+ self.app.main_page.main.messaging.broker.view.show(session)
+
+ def do_invoke(self, invoc, exchange):
+ session = self.app.model.get_session_by_object(exchange)
+ session.exchange_delete(exchange=exchange.name)
+ session.sync()
+
+ invoc.end()
+
class ExchangeSelector(ObjectSelector):
def __init__(self, app, name, vhost):
cls = app.rosemary.org_apache_qpid_broker.Exchange
@@ -37,34 +75,19 @@
self.add_reference_filter(vhost, cls.vhostRef)
- self.add_task(app.messaging.ExchangeAdd, self.vhost)
- self.add_selection_task(app.messaging.ExchangeRemove)
+ self.remove = ExchangeSelectionRemove(app, self)
-class ExchangeFrame(ObjectFrame):
- def __init__(self, app, name):
- cls = app.rosemary.org_apache_qpid_broker.Exchange
+class ExchangeSelectionRemove(SelectionTask):
+ def get_title(self, session):
+ return "Remove"
- super(ExchangeFrame, self).__init__(app, name, cls)
+ def do_invoke(self, invoc, exchange):
+ session = self.app.model.get_session_by_object(exchange)
+ session.exchange_delete(exchange=exchange.name)
+ session.sync()
- self.icon_href = "resource?name=exchange-36.png"
+ invoc.end()
- self.overview = ExchangeOverview(app, "overview", self.object)
- self.view.add_tab(self.overview)
-
- self.bindings = ExchangeBindingSelector(app, "bindings", self.object)
- self.view.add_tab(self.bindings)
-
- # XXX self.add_summary_task(main.module.exchange_remove)
-
- def render_title(self, session):
- exchange = self.object.get(session)
-
- if exchange:
- if exchange.name:
- return super(ExchangeFrame, self).render_title(session)
- else:
- return "Default exchange"
-
class ExchangeBindingSelector(BindingSelector):
def __init__(self, app, name, exchange):
super(ExchangeBindingSelector, self).__init__(app, name)
@@ -194,6 +217,34 @@
def render_title(self, session):
return "Advanced options"
+class ExchangeAdd(ObjectTask):
+ MSG_SEQUENCE = "qpid.msg_sequence"
+ IVE = "qpid.ive"
+
+ def __init__(self, app, frame):
+ super(ExchangeAdd, self).__init__(app, frame)
+
+ self.form = ExchangeAddForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Add exchange"
+
+ def do_invoke(self, invoc, vhost, name, type, durable, sequence, ive):
+ args = dict()
+
+ if sequence:
+ args[self.MSG_SEQUENCE] = 1
+
+ if ive:
+ args[self.IVE] = 1
+
+ session = self.app.model.get_session_by_object(vhost)
+ session.exchange_declare \
+ (exchange=name, type=type, durable=durable, arguments=args)
+ session.sync()
+
+ invoc.end()
+
class ExchangeAddForm(ObjectTaskForm):
def __init__(self, app, name, task):
super(ExchangeAddForm, self).__init__(app, name, task)
@@ -226,7 +277,7 @@
self.task.invoke(session, vhost, name, type,
durable, sequence_numbers, initial_value)
- self.task.exit_with_redirect(session, vhost)
+ self.task.exit_with_redirect(session)
class ExchangeGeneralStatSet(StatSet):
def __init__(self, app, name, object):
@@ -272,4 +323,5 @@
@classmethod
def get_builtins(cls):
- return ["", "amq.direct", "amq.topic", "amq.match", "amq.fanout", "qpid.management"]
+ return ["", "amq.direct", "amq.topic", "amq.match",
+ "amq.fanout", "qpid.management"]
Modified: mgmt/newdata/cumin/python/cumin/messaging/main.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/messaging/main.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/messaging/main.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -7,7 +7,6 @@
from broker import *
from brokergroup import *
-from model import *
from test import *
strings = StringCatalog(__file__)
@@ -16,71 +15,11 @@
def __init__(self, app, name):
super(Module, self).__init__(app, name)
- cls = app.rosemary.org_apache_qpid_broker.Broker
+ self.frame = MessagingFrame(self.app, "messaging")
- BrokerEngroup(self, cls)
- ExchangeAdd(self, cls)
- BrokerLinkAdd(self, cls)
- QueueAdd(self, cls)
-
- cls = app.rosemary.com_redhat_cumin.BrokerGroup
-
- BrokerGroupAdd(self, cls)
- BrokerGroupEdit(self, cls)
-
- task = BrokerGroupRemove(self, cls)
- SelectionTask(task)
-
- cls = app.rosemary.org_apache_qpid_broker.Queue
-
- task = QueueRemove(self, cls)
- SelectionTask(task)
-
- task = QueuePurge(self, cls)
- SelectionTask(task)
-
- BindingAdd(self, cls)
- MoveMessages(self, cls)
-
- cls = app.rosemary.org_apache_qpid_broker.Exchange
-
- task = ExchangeRemove(self, cls)
- SelectionTask(task)
-
- cls = app.rosemary.org_apache_qpid_broker.Binding
-
- task = BindingRemove(self, cls)
- SelectionTask(task)
-
- cls = app.rosemary.org_apache_qpid_broker.Link
-
- task = BrokerLinkRemove(self, cls)
- SelectionTask(task)
-
- RouteAdd(self, cls)
-
- cls = app.rosemary.org_apache_qpid_broker.Bridge
-
- task = RouteRemove(self, cls)
- SelectionTask(task)
-
- cls = app.rosemary.org_apache_qpid_broker.Connection
-
- task = ConnectionClose(self, cls)
- SelectionTask(task)
-
- cls = app.rosemary.org_apache_qpid_broker.Session
-
- task = SessionDetach(self, cls)
- SelectionTask(task)
-
- task = SessionClose(self, cls)
- SelectionTask(task)
-
def init(self):
super(Module, self).init()
- self.frame = MessagingFrame(self.app, "messaging")
self.app.main_page.main.messaging = self.frame
self.app.main_page.main.add_tab(self.frame)
@@ -98,8 +37,8 @@
self.add_mode(self.broker)
self.add_sticky_view(self.broker)
- self.broker_group = BrokerGroupFrame(app, "brokergroup")
- self.add_mode(self.broker_group)
+ self.brokergroup = BrokerGroupFrame(app, "brokergroup")
+ self.add_mode(self.brokergroup)
def render_title(self, session):
return "Messaging"
Deleted: mgmt/newdata/cumin/python/cumin/messaging/model.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/messaging/model.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/messaging/model.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,330 +0,0 @@
-from wooly import Session
-
-from cumin.model import *
-from cumin.objecttask import *
-from cumin.util import *
-
-from broker import *
-from brokergroup import *
-from brokerlink import *
-from queue import *
-from exchange import *
-
-class ConnectionClose(ObjectTask):
- def get_title(self, session, conn):
- return "Close"
-
- def do_invoke(self, invoc, conn):
- # XXX generalize this check and use it for other closes
-
- session_ids = set()
-
- for broker in self.app.model.mint.model.qmf_brokers:
- session_ids.add(broker.getSessionId())
-
- for sess in conn.sessions:
- if sess.name in session_ids:
- raise Exception \
- ("Cannot close management connection %s" % conn.address)
-
- print "XXX conn.close", conn
-
- invoc.end()
-
-class SessionDetach(ObjectTask):
- def get_title(self, session, sess):
- return "Detach"
-
- def do_invoke(self, invoc, sess):
- self.qmf_call(invoc, sess, "detach")
-
-class SessionClose(ObjectTask):
- def get_title(self, session, sess):
- return "Close"
-
- def do_invoke(self, invoc, sess):
- self.qmf_call(invoc, sess, "close")
-
-class QueueAdd(ObjectTask):
- def __init__(self, module, cls):
- super(QueueAdd, self).__init__(module, cls)
-
- self.form = QueueAddForm(self.app, self.name, self)
-
- def get_title(self, session, vhost):
- return "Add queue"
-
-class QueueRemove(ObjectTask):
- def get_title(self, session, queue):
- return "Remove"
-
- def do_exit(self, session, queue):
- self.app.main_page.main.messaging.broker.view.show(session)
-
- def do_invoke(self, invoc, queue):
- session = self.app.model.get_session_by_object(queue)
- session.queue_delete(queue=queue.name)
- session.sync()
-
- invoc.end()
-
-class QueuePurge(ObjectTask):
- def __init__(self, module, cls):
- super(QueuePurge, self).__init__(module, cls)
-
- self.form = QueuePurgeForm(self.app, self.name, self)
-
- def get_title(self, session, queue):
- return "Purge"
-
- def do_invoke(self, invoc, queue, count=0):
- self.qmf_call(invoc, queue, "purge", count)
-
-class BindingAdd(ObjectTask):
- def __init__(self, module, cls):
- super(BindingAdd, self).__init__(module, cls)
-
- self.form = BindingAddForm(self.app, self.name, self)
-
- def get_title(self, session, vhost):
- return "Add binding"
-
- def do_invoke(self, invoc, vhost, queue, exchange, binding_key, args):
- session = self.app.model.get_session_by_object(vhost)
- session.exchange_bind(queue=queue.name, exchange=exchange.name,
- binding_key=binding_key, arguments=args)
- session.sync()
-
- invoc.end()
-
-class BindingRemove(ObjectTask):
- def get_title(self, session, binding):
- return "Remove"
-
- def do_invoke(self, invoc, binding):
- assert isinstance(binding, Binding)
-
- session = self.app.model.get_session_by_object(binding)
- session.exchange_unbind(queue=binding.queue.name,
- exchange=binding.exchange.name,
- binding_key=binding.bindingKey)
- session.sync()
-
- invoc.end()
-
-class MoveMessages(ObjectTask):
- def __init__(self, module, cls):
- super(MoveMessages, self).__init__(module, cls)
-
- self.form = MoveMessagesForm(self.app, self.name, self)
-
- def get_title(self, session, vhost):
- return "Move messages"
-
- def do_invoke(self, invoc, vhost, src, dst, count):
- broker = vhost.broker
- broker.queueMoveMessages(completion, src.name, dst.name, count)
-
-class ExchangeAdd(ObjectTask):
- MSG_SEQUENCE = "qpid.msg_sequence"
- IVE = "qpid.ive"
-
- def __init__(self, module, cls):
- super(ExchangeAdd, self).__init__(module, cls)
-
- self.form = ExchangeAddForm(self.app, self.name, self)
-
- def get_title(self, session, vhost):
- return "Add exchange"
-
- def do_invoke(self, invoc, vhost, name, type, durable, sequence, ive):
- args = dict()
-
- if sequence:
- args[self.MSG_SEQUENCE] = 1
-
- if ive:
- args[self.IVE] = 1
-
- session = self.app.model.get_session_by_object(vhost)
- session.exchange_declare \
- (exchange=name, type=type, durable=durable, arguments=args)
- session.sync()
-
- invoc.end()
-
-class ExchangeRemove(ObjectTask):
- def get_title(self, session, exchange):
- return "Remove"
-
- def do_exit(self, session, exchange):
- self.app.main_page.main.messaging.broker.view.show(session)
-
- def do_invoke(self, invoc, exchange):
- session = self.app.model.get_session_by_object(exchange)
- session.exchange_delete(exchange=exchange.name)
- session.sync()
-
- invoc.end()
-
-class BrokerLinkAdd(ObjectTask):
- def __init__(self, module, cls):
- super(BrokerLinkAdd, self).__init__(module, cls)
-
- self.form = BrokerLinkAddForm(self.app, self.name, self)
-
- def get_title(self, session, vhost):
- return "Add broker link"
-
- def do_invoke(self, invoc, vhost, host, port, durable, username,
- password, transport):
- broker = vhost.broker # XXX
-
- if username == "anonymous":
- mech = "ANONYMOUS"
- else:
- mech = "PLAIN"
-
- self.qmf_call(invoc, broker, "connect",
- host, port, durable, mech, username, password, transport)
-
-class BrokerLinkRemove(ObjectTask):
- def get_title(self, session, link):
- return "Remove"
-
- def do_exit(self, session, link):
- self.app.main_page.main.messaging.broker.view.show(session)
-
- def do_invoke(self, invoc, link):
- self.qmf_call(invoc, link, "remove")
-
-class RouteAdd(ObjectTask):
- def __init__(self, module, cls):
- super(RouteAdd, self).__init__(module, cls)
-
- self.form = RouteAddForm(self.app, self.name, self)
-
- def get_title(self, session, link):
- return "Add route"
-
- def do_invoke(self, invoc, link, exchange, key, tag, dynamic, sync,
- excludes):
- self.qmf_call(invoc, link, "bridge",
- link.durable, exchange.name, exchange.name,
- key, tag, excludes, False, False, dynamic, sync)
-
-class RouteRemove(ObjectTask):
- def get_title(self, session, route):
- return "Remove"
-
- def do_exit(self, session, route):
- self.app.main_page.main.messaging.broker.view.show(session)
-
- def do_invoke(self, invoc, route):
- self.qmf_call(invoc, "close")
-
-class BrokerGroupAdd(ObjectTask):
- def __init__(self, module, cls):
- super(BrokerGroupAdd, self).__init__(module, cls)
-
- self.form = BrokerGroupAddForm(self.app, self.name, self)
-
- def get_title(self, session, obj):
- return "Add broker group"
-
- def do_invoke(self, invoc, obj, name, description):
- conn = self.app.model.get_sql_connection()
- cursor = conn.cursor()
-
- group = self.cls.create_object(cursor)
- group.name = name
- group.description = description
-
- group.fake_qmf_values()
-
- try:
- group.save(cursor)
- finally:
- cursor.close()
-
- conn.commit()
-
- invoc.end()
-
- return group
-
-class BrokerGroupEdit(ObjectTask):
- def __init__(self, module, cls):
- super(BrokerGroupEdit, self).__init__(module, cls)
-
- self.form = BrokerGroupEditForm(self.app, self.name, self)
-
- def get_title(self, session, group):
- return "Edit"
-
- def do_invoke(self, invoc, group, name, description):
- assert group
-
- group.name = name
-
- conn = self.app.model.get_sql_connection()
- cursor = conn.cursor()
-
- try:
- group.save(cursor)
- finally:
- cursor.close()
-
- conn.commit()
-
- invoc.end()
-
-class BrokerGroupRemove(ObjectTask):
- def get_title(self, session, group):
- return "Remove"
-
- def do_exit(self, session, group):
- self.app.main_page.main.messaging.view.show(session)
-
- def do_invoke(self, invoc, group):
- conn = self.app.model.get_sql_connection()
- cursor = conn.cursor()
-
- try:
- group.delete(cursor)
- finally:
- cursor.close()
-
- conn.commit()
-
- invoc.end()
-
-class BrokerEngroup(ObjectTask):
- def get_title(self, session, broker):
- return "Add to groups"
-
- def do_invoke(self, invoc, broker, groups):
- print "XXX engroup", broker, groups
-
- invoc.end()
-
- return
-
- all_groups = BrokerGroup.select()
- selected_ids = [x.id for x in selected_groups]
- for group in all_groups:
- sql_sel = "broker_id=%i and broker_group_id=%i" % \
- (broker.id, group.id)
- existing_mapping = BrokerGroupMapping.select(sql_sel)
- if not group.id in selected_ids:
- if existing_mapping.count() > 0:
- # remove mapping if group is not checked and there
- # is already a mapping
- existing_mapping[0].destroySelf()
- else:
- if existing_mapping.count() == 0:
- # add mapping if group is checked but there
- # is not already a mapping
- new_mapping = BrokerGroupMapping(brokerID=broker.id,
- brokerGroupID=group.id)
- new_mapping.syncUpdate()
Modified: mgmt/newdata/cumin/python/cumin/messaging/queue.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/messaging/queue.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/messaging/queue.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -11,7 +11,6 @@
from cumin.parameters import *
from cumin.sqladapter import *
from cumin.stat import *
-from cumin.table import *
from cumin.util import *
from cumin.widgets import *
@@ -22,6 +21,44 @@
strings = StringCatalog(__file__)
log = logging.getLogger("cumin.messaging.queue")
+class QueueFrame(ObjectFrame):
+ def __init__(self, app, name):
+ cls = app.rosemary.org_apache_qpid_broker.Queue
+
+ super(QueueFrame, self).__init__(app, name, cls)
+
+ self.icon_href = "resource?name=queue-36.png"
+
+ self.overview = QueueOverview(app, "overview", self.object)
+ self.view.add_tab(self.overview)
+
+ self.bindings = QueueBindingSelector(app, "bindings", self.object)
+ self.view.add_tab(self.bindings)
+
+ self.subscriptions = SubscriptionSelector \
+ (app, "subscriptions", self.object)
+ self.view.add_tab(self.subscriptions)
+
+ self.subscription = SubscriptionFrame(app, "subscription")
+ self.add_mode(self.subscription)
+
+ self.remove = QueueRemove(app, self)
+ self.purge = QueuePurge(app, self)
+
+class QueueRemove(ObjectTask):
+ def get_title(self, session):
+ return "Remove"
+
+ def do_exit(self, session, queue):
+ self.app.main_page.main.messaging.broker.view.show(session)
+
+ def do_invoke(self, invoc, queue):
+ session = self.app.model.get_session_by_object(queue)
+ session.queue_delete(queue=queue.name)
+ session.sync()
+
+ invoc.end()
+
class QueueSelector(ObjectSelector):
def __init__(self, app, name, vhost):
cls = app.rosemary.org_apache_qpid_broker.Queue
@@ -41,31 +78,27 @@
self.add_reference_filter(vhost, cls.vhostRef)
- self.add_task(app.messaging.QueueAdd, self.vhost)
- self.add_selection_task(app.messaging.QueuePurge)
- self.add_selection_task(app.messaging.QueueRemove)
+ self.remove = QueueSelectionRemove(app, self)
+ self.purge = QueueSelectionPurge(app, self)
-class QueueFrame(ObjectFrame):
- def __init__(self, app, name):
- cls = app.rosemary.org_apache_qpid_broker.Queue
+class QueueSelectionRemove(SelectionTask):
+ def get_title(self, session):
+ return "Remove"
- super(QueueFrame, self).__init__(app, name, cls)
+ def do_invoke(self, invoc, queue):
+ session = self.app.model.get_session_by_object(queue)
+ session.queue_delete(queue=queue.name)
+ session.sync()
- self.icon_href = "resource?name=queue-36.png"
+ invoc.end()
- self.overview = QueueStats(app, "overview", self.object)
- self.view.add_tab(self.overview)
+class QueueSelectionPurge(SelectionTask):
+ def get_title(self, session):
+ return "Purge"
- self.bindings = QueueBindingSelector(app, "bindings", self.object)
- self.view.add_tab(self.bindings)
+ def do_invoke(self, invoc, queue, count=0):
+ self.qmf_call(invoc, queue, "purge", count)
- self.subscriptions = SubscriptionSelector \
- (app, "subscriptions", self.object)
- self.view.add_tab(self.subscriptions)
-
- self.subscription = SubscriptionFrame(app, "subscription")
- self.add_mode(self.subscription)
-
class QueueBindingSelector(BindingSelector):
def __init__(self, app, name, queue):
super(QueueBindingSelector, self).__init__(app, name)
@@ -76,6 +109,15 @@
self.queue_column.visible = False
+class QueueAdd(ObjectTask):
+ def __init__(self, app, frame):
+ super(QueueAdd, self).__init__(app, frame)
+
+ self.form = QueueAddForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Add queue"
+
class QueueAddForm(ObjectTaskForm):
def __init__(self, app, name, task):
super(QueueAddForm, self).__init__(app, name, task)
@@ -334,6 +376,18 @@
self.process_return(session)
+class QueuePurge(ObjectTask):
+ def __init__(self, app, frame):
+ super(QueuePurge, self).__init__(app, frame)
+
+ self.form = QueuePurgeForm(app, self.name, self)
+
+ def get_title(self, session):
+ return "Purge"
+
+ def do_invoke(self, invoc, queue, count=0):
+ self.qmf_call(invoc, queue, "purge", count)
+
class QueuePurgeForm(ObjectTaskForm):
def __init__(self, app, name, task):
super(QueuePurgeForm, self).__init__(app, name, task)
@@ -359,7 +413,7 @@
raise Exception("Wrong Value")
self.task.invoke(session, queue, count)
- self.task.exit_with_redirect(session, queue)
+ self.task.exit_with_redirect(session)
class BindSummaryPropertiesField(FormField):
def __init__(self, app, name, queue):
@@ -424,11 +478,11 @@
print "XXX queue binding add", queue, form_binding_info
#self.task.invoke(session, queue, args)
- self.task.exit_with_redirect(session, queue)
+ self.task.exit_with_redirect(session)
-class QueueStats(RadioModeSet):
+class QueueOverview(RadioModeSet):
def __init__(self, app, name, queue):
- super(QueueStats, self).__init__(app, name)
+ super(QueueOverview, self).__init__(app, name)
self.add_tab(QueueStatsGeneral(app, "gen", queue))
self.add_tab(QueueStatsDurability(app, "dur", queue))
@@ -527,17 +581,11 @@
class JournalAttribute(Attribute):
def get(self, session):
queue = self.widget.object.get(session)
-
cls = self.app.rosemary.com_redhat_rhm_store.Journal
- conn = self.app.model.get_sql_connection()
- cursor = conn.cursor()
+ journals = cls.get_selection(session.cursor,
+ _queueRef_id=queue._id)
- try:
- journals = cls.get_selection(cursor, _queueRef_id=queue._id)
- finally:
- cursor.close()
-
return len(journals)
def render_title(self, session):
@@ -632,38 +680,3 @@
except IndexError:
return None
-class MoveMessagesForm(FieldSubmitForm):
- def __init__(self, app, name, task):
- super(MoveMessagesForm, self).__init__(app, name)
-
- self.task = task
-
- self.queue = QueueParameter(app, "queue")
- self.add_parameter(self.queue)
-
- self.dest_queue = QueueSelectField(app, "dest", self)
- self.add_field(self.dest_queue)
-
- self.count = MultiplicityField(app, "count")
- self.add_field(self.count)
-
- def process_submit(self, session):
- self.validate(session)
-
- if not self.errors.get(session):
- queue = self.queue.get(session)
- dest_queue = self.dest_queue.get(session)
- scount = self.count.get(session)
-
- if scount == "all":
- count = 0
- elif scount == "top":
- count = 1
- elif scount == "N":
- count = self.count.top_n.get_n_value(session)
- else:
- raise Exception("Wrong Value")
-
- self.task.invoke(session, queue, dest_queue, count)
- self.task.exit_with_redirect(session, queue)
-
Modified: mgmt/newdata/cumin/python/cumin/model.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/model.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/model.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -173,207 +173,6 @@
finally:
self.lock.release()
-class Task(object):
- def __init__(self, app, cls):
- self.app = app
- self.cls = cls
-
- self.form = None
-
- self.aggregate = False
- self.navigable = True
-
- if self.cls:
- if self.__class__ not in self.cls.tasks_by_class:
- self.cls.add_task(self)
-
- # make this take app? XXX
- def init(self):
- if self.form:
- self.app.form_page.modes.add_mode(self.form)
- else:
- log.debug("Task %s has no form associated with it", self)
-
- def get_title(self, session):
- raise Exception("Not implemented")
-
- def is_enabled(self, session, object):
- return True
-
- def get_description(self, session, object):
- verb = self.get_title(session)
-
- if object is None:
- text = verb
- else:
- cls = self.cls.get_title(session)
- obj = self.cls.get_object_name(object)
- text = "%s %s '%s'" % (verb, cls, obj)
-
- return text
-
- def get_href(self, session, object):
- return self.enter(session, object).marshal()
-
- def enter(self, session, object):
- #log.debug("Entering %s", self)
-
- nsession = wooly.Session(self.app.form_page)
-
- self.form.return_url.set(nsession, session.marshal())
- self.form.show(nsession)
-
- self.do_enter(nsession, object)
-
- #log.info("Entered %s", self)
-
- return nsession
-
- def do_enter(self, session, object):
- pass
-
- def exit_with_redirect(self, session, object):
- osession = self.exit(session, object)
- self.form.page.redirect.set(session, osession.marshal())
-
- def exit(self, session, object):
- log.debug("Exiting %s", self)
-
- url = self.form.return_url.get(session)
- osession = wooly.Session.unmarshal(self.app, url)
-
- self.do_exit(osession, object)
-
- log.info("Exited %s", self)
-
- return osession
-
- def do_exit(self, session, object):
- pass
-
- def invoke(self, session, object, *args, **kwargs):
- invoc = self.start(session, object)
-
- try:
- result = self.do_invoke(session, object, *args, **kwargs)
-
- self.end(invoc)
- except Exception, e:
- self.exception(invoc, e)
-
- def do_invoke(self, *args, **kwargs):
- raise Exception("Not implemented")
-
- def start(self, session, object):
- log.debug("Starting %s", self)
-
- now = datetime.now()
- subject = None
-
- login = session.client_session.attributes["login_session"]
-
- invoc = TaskInvocation(self, login.user, object)
- invoc.status = invoc.PENDING
- invoc.start_time = now
- invoc.last_change_time = now
-
- self.app.model.task_invocations.append(invoc)
-
- log.info("Started %s", self)
-
- return invoc
-
- def end(self, invoc):
- log.debug("Ending %s", self)
-
- now = datetime.now()
-
- invoc.status = invoc.OK
- invoc.end_time = now
- invoc.last_change_time = now
-
- log.info("Ended %s", self)
-
- def exception(self, invoc, e):
- now = datetime.now()
-
- invoc.status = invoc.FAILED
- invoc.end_time = now
- invoc.last_change_time = now
- invoc.exception = e
-
- log.exception(e)
-
- def __str__(self):
- return "%s.%s" % (self.__module__, self.__class__.__name__)
-
-class SetTask(Task):
- def __init__(self, app, cls):
- super(SetTask, self).__init__(app, cls)
-
- self.item_task = None
- self.aggregate = True
-
- def init(self):
- super(SetTask, self).init()
-
- if not self.item_task:
- raise Exception("Task %s has no item task" % self)
-
- def get_title(self, session):
- return self.item_task.get_title(session)
-
- def get_description(self, session, objects):
- verb = self.item_task.get_title(session)
- cls = self.cls.get_title(session)
- count = len(objects)
- return "%s %i %s" % (verb, count, conjugate(cls, count))
-
- def invoke(self, session, objects, *args, **kwargs):
- for object in objects:
- self.item_task.invoke(session, object, *args, **kwargs)
-
-class QmfTask(Task):
- def invoke(self, session, object, *args, **kwargs):
- invoc = self.start(session, object)
-
- def completion(status_code, output_args):
- invoc.last_change_time = datetime.now()
-
- if status_code == 0 or status_code == "OK":
- invoc.status = invoc.OK
- else:
- invoc.status = invoc.FAILED
-
- invoc.status_code = status_code
- invoc.output_args = output_args
-
- try:
- self.do_invoke(completion, session, object, *args, **kwargs)
- except Exception, e:
- self.exception(invoc, e)
-
- def do_invoke(self, completion, session, object, *args, **kwargs):
- raise Exception("Not implemented")
-
-class TaskInvocation(object):
- PENDING = "pending"
- FAILED = "failed"
- OK = "ok"
-
- def __init__(self, task, subject, object):
- self.task = task
- self.subject = subject
- self.object = object
- self.start_time = None
- self.end_time = None
- self.last_change_time = None
- self.status = None
- self.exception = None
-
- self.status_code = None
- self.output_args = None
-
class CuminProperty(object):
def __init__(self, cls, name):
self.model = cls.model
Modified: mgmt/newdata/cumin/python/cumin/objectframe.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/objectframe.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/objectframe.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -22,13 +22,15 @@
self.object = Attribute(app, "object")
self.add_attribute(self.object)
- self.view = ObjectView(app, name, self.object)
+ self.view = ObjectView(app, "view", self.object)
self.add_child(self.view)
self.icon_href = "resource?name=action-36.png"
+ self.tasks = list()
+
self.summary_attributes = list()
- self.summary_tasks = list()
+ self.summary_tasks = list() # XXX
self.add_summary_attribute(cls._qmf_update_time)
@@ -37,16 +39,14 @@
self.summary_attributes.append(attr)
- def add_summary_task(self, task):
- assert task not in self.summary_tasks, task
-
- self.summary_tasks.append(task)
-
def init(self):
super(ObjectFrame, self).init()
assert self.cls, self
+ for task in self.tasks:
+ task.init()
+
def get_href(self, session, id):
branch = session.branch()
@@ -67,18 +67,15 @@
assert id
- conn = self.app.model.get_sql_connection()
- cursor = conn.cursor()
+ obj = self.get_object(session, id)
- try:
- obj = self.cls.get_object(cursor, id)
- finally:
- cursor.close()
-
self.object.set(session, obj)
super(ObjectFrame, self).do_process(session)
+ def get_object(self, session, id):
+ return self.cls.get_object(session.cursor, id)
+
class ObjectAttributes(Widget):
def __init__(self, app, name, object):
super(ObjectAttributes, self).__init__(app, name)
@@ -111,16 +108,34 @@
def render_value(self, session, name, value):
return xml_escape(str(value))
-class ObjectTaskLinks(WidgetSet):
+class ObjectTasks(Widget):
def __init__(self, app, name, object):
- super(ObjectTaskLinks, self).__init__(app, name)
+ super(ObjectTasks, self).__init__(app, name)
self.object = object
+ self.link = ObjectTasksLink(app, "link")
+ self.add_child(self.link)
+
+ def render_links(self, session):
+ writer = Writer()
+
+ for task in self.frame.tasks:
+ writer.write(self.link.render(session, task))
+
+ return writer.to_string()
+
+class ObjectTasksLink(Link):
+ def render_href(self, session, task):
+ return task.get_href(session)
+
+ def render_content(self, session, task):
+ return task.get_title(session)
+
class SummaryAttributes(ObjectAttributes):
pass
-class SummaryLinks(ObjectTaskLinks):
+class SummaryTasks(ObjectTasks):
pass
class ObjectView(Widget):
@@ -212,25 +227,13 @@
attributes = self.Attributes(app, "attributes", self.object)
self.add_child(attributes)
- links = self.Links(app, "links", self.object)
- self.add_child(links)
+ tasks = SummaryTasks(app, "tasks", self.object)
+ self.add_child(tasks)
class Attributes(SummaryAttributes):
def get_attributes(self, session):
return self.frame.summary_attributes
- class Links(SummaryLinks):
- def init(self):
- super(ObjectViewSummary.Links, self).init()
-
- for task in self.frame.summary_tasks:
- name = task.__class__.__name__
- link = ObjectTaskLink(self.app, name, task, self.object)
-
- self.add_child(link)
-
- link.init()
-
class ObjectDetails(Widget):
def __init__(self, app, name, object):
super(ObjectDetails, self).__init__(app, name)
Modified: mgmt/newdata/cumin/python/cumin/objectframe.strings
===================================================================
--- mgmt/newdata/cumin/python/cumin/objectframe.strings 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/objectframe.strings 2010-04-14 17:34:58 UTC (rev 3912)
@@ -55,9 +55,18 @@
[ObjectAttributesEntry.html]
<tr><th>{name}</th><td>{value}</td></tr>
+[ObjectTasks.html]
+<ul class="{class}">
+ {links}
+</ul>
+
+[ObjectTasksLink.html]
+<li><a href="{href}">{content}</a></li>
+
[SummaryAttributes.css]
div.SummaryAttributes {
width: 20em;
+ margin: 0 0 1.5em 0;
}
div.SummaryAttributes tbody tr {
@@ -79,12 +88,12 @@
</table>
</div>
-[SummaryLinks.css]
-ul.SummaryLinks {
+[SummaryTasks.css]
+ul.SummaryTasks {
width: 15em;
float: right;
list-style: none;
- margin: 0;
+ margin: 0 0 1.5em 0;
}
[ObjectView.css]
@@ -136,7 +145,7 @@
[ObjectViewSummary.html]
<div class="{class}">
- {links}
+ {tasks}
{attributes}
</div>
Modified: mgmt/newdata/cumin/python/cumin/objectselector.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/objectselector.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/objectselector.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -10,8 +10,6 @@
strings = StringCatalog(__file__)
-# XXX ObjectTable
-
class ObjectSelector(DataTable, Form):
def __init__(self, app, name, cls, adapter=None):
assert isinstance(cls, RosemaryClass), cls
@@ -29,16 +27,13 @@
item = IntegerParameter(app, "item")
- self.selection = ListParameter(app, "selection", item)
- self.add_parameter(self.selection)
+ self.ids = ListParameter(app, "id", item)
+ self.add_parameter(self.ids)
self.checkbox_column = ObjectCheckboxColumn \
- (app, "id", cls._id, self.selection)
+ (app, "id", cls._id, self.ids)
self.add_column(self.checkbox_column)
- self.links = ObjectSelectorLinks(app, "links")
- self.add_child(self.links)
-
self.switches = ObjectSelectorSwitches(app, "switches")
self.add_child(self.switches)
@@ -51,27 +46,29 @@
# (RosemaryAttribute this, RosemaryAttribute that, Attribute object)
self.filter_specs = list()
+ self.tasks = list()
+
def init(self):
+ super(ObjectSelector, self).init()
+
assert self.cls, self
assert self.adapter, self
assert self.adapter.id_field, self
- super(ObjectSelector, self).init()
+ for task in self.tasks:
+ task.init()
+ for task in self.tasks:
+ button = SelectionTaskButton(self.app, task)
+ self.buttons.add_child(button)
+ button.init()
+
def add_attribute_column(self, attr):
assert isinstance(attr, RosemaryAttribute), attr
col = ObjectAttributeColumn(self.app, attr.name, attr)
self.add_column(col)
- def add_task(self, task, attribute):
- link = ObjectTaskLink(self.app, task.name, task, attribute)
- self.links.add_child(link)
-
- def add_selection_task(self, task):
- button = SelectionTaskButton(self.app, task.name, task, self.selection)
- self.buttons.add_child(button)
-
def add_filter(self, attribute, this, that=None):
if not that:
that = this
@@ -177,9 +174,6 @@
if len(self.children):
return super(ObjectSelectorControl, self).do_render(session)
-class ObjectSelectorLinks(ObjectSelectorControl):
- pass
-
class ObjectSelectorSwitches(ObjectSelectorControl):
pass
Modified: mgmt/newdata/cumin/python/cumin/objectselector.strings
===================================================================
--- mgmt/newdata/cumin/python/cumin/objectselector.strings 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/objectselector.strings 2010-04-14 17:34:58 UTC (rev 3912)
@@ -5,11 +5,6 @@
padding: 0;
}
-div.ObjectSelectorLinks {
- margin: 0 0 1em 0;
- font-size: 0.9em;
-}
-
div.ObjectSelectorFilters {
float: right;
}
@@ -24,11 +19,6 @@
margin: 0 0.5em 0 0;
}
-div.ObjectSelectorButtons ul {
- padding: 0;
- margin: 0;
-}
-
div.ObjectSelectorButtons ul li {
margin: 0 0.4em 0 0;
display: inline;
@@ -36,8 +26,6 @@
[ObjectSelector.html]
<div id="{id}" class="{class}">
- {links}
-
{filters}
{switches}
Modified: mgmt/newdata/cumin/python/cumin/objecttask.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/objecttask.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/objecttask.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -12,64 +12,46 @@
strings = StringCatalog(__file__)
class Task(object):
- def __init__(self, module, cls, name):
- assert isinstance(cls, RosemaryClass), cls
+ def __init__(self, app):
+ self.app = app
+ self.name = self.__class__.__name__
- self.app = module.app
- self.module = module
- self.cls = cls
- self.name = name
-
self.form = None
- assert not hasattr(self.module, self.name), (self.module, self.name)
-
- self.module.tasks.append(self)
- setattr(self.module, self.name, self)
-
def init(self):
log.info("Initializing %s", self)
assert self.form, self.form
- self.module.app.form_page.modes.add_mode(self.form)
+ # XXX make this idempotent
+ self.app.form_page.modes.add_mode(self.form)
- def get_title(self, session, obj):
+ def get_title(self, session):
pass
- def get_description(self, session, obj):
- return self.get_title(session, obj)
+ def get_description(self, session):
+ return self.get_title(session)
- def get_href(self, session, obj):
- return self.enter(session, obj).marshal()
+ def get_href(self, session):
+ return self.enter(session).marshal()
- def enter(self, session, obj):
- nsession = wooly.Session(self.module.app.form_page)
-
- self.form.return_url.set(nsession, session.marshal())
- self.form.show(nsession)
-
- self.do_enter(nsession, obj)
-
- return nsession
-
- def exit(self, session, obj):
+ def exit(self, session):
log.debug("Exiting %s", self)
url = self.form.return_url.get(session)
osession = wooly.Session.unmarshal(self.app, url)
- self.do_exit(osession, obj)
+ self.do_exit(osession)
log.info("Exited %s", self)
return osession
- def do_exit(self, session, obj):
+ def do_exit(self, session):
pass
- def exit_with_redirect(self, session, obj):
- osession = self.exit(session, obj)
+ def exit_with_redirect(self, session):
+ osession = self.exit(session)
self.form.page.redirect.set(session, osession.marshal())
def start(self, session, obj):
@@ -77,23 +59,18 @@
login = session.client_session.attributes["login_session"]
- invoc = TaskInvocation(self, login.user, obj)
+ invoc = TaskInvocation(self, login)
now = datetime.now()
invoc.start_time = now
- invoc.last_change_time = now
+ invoc.update_time = now
invoc.status = invoc.PENDING
- self.app.model.task_invocations.append(invoc)
-
log.info("Started %s", self)
return invoc
- def do_invoke(self, invoc, obj, *args):
- pass
-
def qmf_call(self, invoc, obj, meth, *args):
def completion(status_code, output_args):
invoc.status_code = status_code
@@ -109,7 +86,7 @@
invoc.status = invoc.FAILED
invoc.end_time = now
- invoc.last_change_time = now
+ invoc.update_time = now
invoc.exception = e
log.exception(e)
@@ -117,39 +94,81 @@
def __str__(self):
return "%s.%s" % (self.__module__, self.__class__.__name__)
+class ObjectTask(Task):
+ def __init__(self, app, frame):
+ super(ObjectTask, self).__init__(app)
+
+ self.frame = frame
+ self.frame.tasks.append(self)
+
+ self.form = ObjectTaskForm(app, self.name, self)
+
+ def enter(self, session):
+ id = self.frame.id.get(session)
+
+ nsession = wooly.Session(self.app.form_page)
+
+ self.form.id.set(nsession, id)
+ self.form.return_url.set(nsession, session.marshal())
+ self.form.show(nsession)
+
+ self.do_enter(nsession)
+
+ return nsession
+
+ def do_enter(self, session):
+ pass
+
+ def invoke(self, session, obj, *args):
+ if obj:
+ assert isinstance(obj, RosemaryObject), obj
+
+ invoc = self.start(session, obj)
+
+ try:
+ self.do_invoke(invoc, obj, *args)
+ except Exception, e:
+ self.exception(invoc, e)
+
+ def do_invoke(self, invoc, obj, *args):
+ pass
+
class TaskInvocation(object):
PENDING = "pending"
FAILED = "failed"
OK = "ok"
- def __init__(self, task, user, obj):
+ def __init__(self, task, login_session):
self.task = task
- self.user = user
- self.object = obj
+
+ self.login_session = login_session
+ self.login_session.task_invocations.append(self)
+
self.start_time = None
self.end_time = None
- self.last_change_time = None
+ self.update_time = None
self.status = None
self.exception = None
self.status_code = None
self.output_args = None
- self.results_by_item = dict()
- self.outstanding_items = set()
+ def get_summary(self, session):
+ if self.exception:
+ return str(self.exception)
+ return self.status
+
def end(self):
log.debug("Ending %s", self.task)
- if self.status_code in (None, 0, "OK"):
+ if self.status is self.PENDING:
self.status = self.OK
- else:
- self.status = self.FAILED
now = datetime.now()
self.end_time = now
- self.last_change_time = now
+ self.update_time = now
log.info("Ended %s", self.task)
@@ -163,9 +182,12 @@
def do_get_items(self, session):
now = secs(datetime.now())
- invocs = sorted_by(self.app.model.task_invocations, "last_change_time")
- invocs = [x for x in invocs if now - secs(x.last_change_time) < 10]
+ login = session.client_session.attributes["login_session"]
+ invocs = sorted_by(login.task_invocations, "update_time")
+ invocs = [x for x in invocs
+ if now - secs(x.update_time) < 10 or x.status == x.FAILED]
+
return invocs
def do_render(self, session):
@@ -175,75 +197,60 @@
return super(TaskInvocationSet, self).do_render(session)
def render_item_content(self, session, item):
- description = item.task.get_description(session, item.object)
+ description = item.task.get_description(session)
description = xml_escape(description)
if not description:
description = ""
- status = item.status
- exc = str(item.exception)
- code = str(item.status_code)
- outs = str(item.output_args)
+ summary = item.get_summary(session)
- return ": ".join((description, status))
+ return ": ".join((description, summary))
-# XXX
-def completion(status_code=0, output_args=()):
- invoc.results_by_item[item] = (status_code, output_args)
- invoc.outstanding_items.remove(item)
+class SelectionTask(Task):
+ def __init__(self, app, selector):
+ super(SelectionTask, self).__init__(app)
- if not invoc.outstanding_items:
- self.end(invoc)
+ self.cls = selector.cls
-class ObjectTask(Task):
- def __init__(self, module, cls):
- name = self.__class__.__name__
+ self.selector = selector
+ self.selector.tasks.append(self)
- super(ObjectTask, self).__init__(module, cls, name)
+ self.form = SelectionTaskForm(app, self.name, self)
- self.form = ObjectTaskForm(self.app, self.name, self)
+ def get_title(self, session):
+ pass
- def do_enter(self, session, obj):
- if obj:
- self.form.id.set(session, obj._id)
+ def enter(self, session):
+ ids = self.selector.ids.get(session)
- def invoke(self, session, obj, *args):
- if obj:
- assert isinstance(obj, RosemaryObject), obj
+ nsession = wooly.Session(self.app.form_page)
- invoc = self.start(session, obj)
+ self.form.ids.set(nsession, ids)
+ self.form.return_url.set(nsession, session.marshal())
+ self.form.show(nsession)
- try:
- self.do_invoke(invoc, obj, *args)
- except Exception, e:
- self.exception(invoc, e)
+ self.do_enter(nsession)
-class SelectionTask(Task):
- def __init__(self, item_task):
- module = item_task.module
- cls = item_task.cls
- name = "%s_selection" % item_task.name
+ return nsession
- super(SelectionTask, self).__init__(module, cls, name)
+ def do_enter(self, session):
+ pass
- self.item_task = item_task
- self.item_task.selection_task = self
-
- self.form = SelectionTaskForm(self.module.app, self.name, self)
-
- def do_enter(self, session, ids):
- if ids:
- self.form.ids.set(session, ids)
-
def invoke(self, session, selection, *args):
- invoc = self.start(session, selection)
-
for item in selection:
- invoc.outstanding_items.add(item)
+ invoc = self.start(session, item)
- self.item_task.do_invoke(invoc, item, *args)
+ try:
+ self.do_invoke(invoc, item, *args)
+ except Exception, e:
+ invoc.exception = e
+ invoc.status = invoc.FAILED
+ invoc.end()
+ def do_invoke(self, invoc, item, *args):
+ pass
+
class TaskForm(FoldingFieldSubmitForm):
def __init__(self, app, name, task):
assert isinstance(task, Task)
@@ -252,10 +259,12 @@
self.task = task
-class ObjectTaskForm(TaskForm):
+class ObjectTaskForm(FoldingFieldSubmitForm):
def __init__(self, app, name, task):
- super(ObjectTaskForm, self).__init__(app, name, task)
+ super(ObjectTaskForm, self).__init__(app, name)
+ self.task = task
+
self.id = IntegerParameter(app, "id")
self.add_parameter(self.id)
@@ -265,14 +274,8 @@
id = self.id.get(session)
if id:
- conn = self.app.model.get_sql_connection()
- cursor = conn.cursor()
-
- try:
- obj = self.task.cls.get_object(cursor, id)
- finally:
- cursor.close()
-
+ # XXX don't love this; impl get_object on OTForm instead
+ obj = self.task.frame.get_object(session, id)
self.object.set(session, obj)
super(ObjectTaskForm, self).do_process(session)
@@ -281,16 +284,17 @@
obj = self.object.get(session)
self.task.invoke(session, obj)
- self.task.exit_with_redirect(session, obj)
+ self.task.exit_with_redirect(session)
def render_title(self, session):
- obj = self.object.get(session)
- return self.task.get_title(session, obj)
+ return self.task.get_title(session)
class SelectionTaskForm(TaskForm):
def __init__(self, app, name, task):
super(SelectionTaskForm, self).__init__(app, name, task)
+ self.cls = task.selector.cls
+
item = IntegerParameter(app, "item")
self.ids = ListParameter(app, "id", item)
@@ -303,63 +307,41 @@
self.selection.set(session, selection)
- conn = self.app.model.get_sql_connection()
- cursor = conn.cursor()
+ for id in self.ids.get(session):
+ item = self.cls.get_object(session.cursor, id)
+ selection.append(item)
- try:
- for id in self.ids.get(session):
- item = self.task.cls.get_object(cursor, id)
- selection.append(item)
- finally:
- cursor.close()
-
super(SelectionTaskForm, self).do_process(session)
def process_submit(self, session):
selection = self.selection.get(session)
self.task.invoke(session, selection)
- self.task.exit_with_redirect(session, selection)
+ self.task.exit_with_redirect(session)
class ObjectTaskLink(Link):
- def __init__(self, app, name, task, attribute=None):
+ def __init__(self, app, name, task):
assert isinstance(task, ObjectTask), task
super(ObjectTaskLink, self).__init__(app, name)
self.task = task
- self.attribute = attribute
- def get_object(self, session):
- if self.attribute:
- return self.attribute.get(session)
-
def render_href(self, session):
- return self.task.get_href(session, self.get_object(session))
+ return self.task.get_href(session)
def render_content(self, session):
- return self.task.get_title(session, self.get_object(session))
+ return self.task.get_title(session)
class SelectionTaskButton(FormButton):
- def __init__(self, app, name, task, attribute=None):
- assert isinstance(task, ObjectTask), task
+ def __init__(self, app, task):
+ super(SelectionTaskButton, self).__init__(app, task.name)
- super(SelectionTaskButton, self).__init__(app, name)
-
self.task = task
- self.attribute = attribute
- def get_selection(self, session):
- if self.attribute:
- return self.attribute.get(session)
-
def process_submit(self, session):
- selection = self.get_selection(session)
-
- href = self.task.selection_task.get_href(session, selection)
-
+ href = self.task.get_href(session)
self.page.redirect.set(session, href)
def render_content(self, session):
- selection = self.get_selection(session)
- return self.task.get_title(session, selection)
+ return self.task.get_title(session)
Modified: mgmt/newdata/cumin/python/cumin/parameters.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/parameters.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/parameters.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -20,16 +20,6 @@
obj = self.object.get(session)
return self.get_associate(session, obj)
-class CuminObjectParameter(Parameter):
- def __init__(self, app, name, cumin_class):
- self.cumin_class = cumin_class
-
- def do_unmarshal(self, string):
- return self.cumin_class.mint_class.get(int(string))
-
- def do_marshal(self, object):
- return str(object.id)
-
class CuminClassParameter(Parameter):
def do_unmarshal(self, string):
return getattr(self.app.model, string, None)
@@ -121,23 +111,6 @@
def do_marshal(self, broker):
return str(broker.id)
-class BrokerVhostAttribute(ObjectAssociateAttribute):
- def get_associate(self, session, broker):
- cls = self.app.rosemary.org_apache_qpid_broker.Vhost
-
- conn = self.app.model.get_sql_connection()
- cursor = conn.cursor()
-
- try:
- kwargs = {"_brokerRef_id": broker._id, "name": "/"}
-
- for obj in cls.get_selection(cursor, **kwargs):
- break
- finally:
- cursor.close()
-
- return obj
-
class ConnectionParameter(Parameter):
def do_unmarshal(self, string):
return ClientConnection.get(int(string))
Modified: mgmt/newdata/cumin/python/cumin/sqladapter.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/sqladapter.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/sqladapter.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -18,6 +18,8 @@
field.init()
def get_count(self, values):
+ # XXX urgh. I want session in here
+
conn = self.app.model.get_sql_connection()
cursor = conn.cursor()
Deleted: mgmt/newdata/cumin/python/cumin/table.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/table.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/table.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,109 +0,0 @@
-from wooly import *
-from wooly.datatable import *
-from wooly.forms import *
-from wooly.widgets import *
-
-from util import *
-from widgets import *
-
-strings = StringCatalog(__file__)
-
-# XXX delete this
-
-class SelectionTable(Form):
- def __init__(self, app, name, data):
- super(SelectionTable, self).__init__(app, name)
-
- self.table = self.SelectionDataTable(app, "table", data)
- self.table.update_enabled = True
- self.add_child(self.table)
-
- item = IntegerParameter(app, "item")
-
- self.selection = ListParameter(app, "selection", item)
- self.add_parameter(self.selection)
-
- self.links = SelectionTableLinks(app, "links")
- self.add_child(self.links)
-
- self.switches = SelectionTableSwitches(app, "switches")
- self.add_child(self.switches)
-
- self.filters = SelectionTableFilters(app, "filters")
- self.add_child(self.filters)
-
- self.buttons = SelectionTableButtons(app, "buttons")
- self.add_child(self.buttons)
-
- col = self.Checkbox(app, "checkbox", self.table, self.selection)
- self.table.add_column(col)
-
- col = self.Name(app, "name", self.table)
- self.table.add_column(col)
-
- self.frame_path = None
-
- def init(self):
- super(SelectionTable, self).init()
-
- assert self.frame_path, self
-
- # XXX this isn't what we do now
- def do_get_data(self, session, sort, ascending, limit, offset):
- return self.table.adapter.get_data(sort, ascending, limit, offset)
-
- def add_task(self, task):
- link = ObjectTaskLink(self.app, task.__class__.__name__, task)
- self.links.add_child(link)
-
- def add_selection_task(self, task):
- name = task.__class__.__name__
- button = TaskButton(self.app, name, task, self.selection)
- self.buttons.add_child(button)
-
- class Checkbox(NewCheckboxColumn):
- def render_cell_value(self, session, record):
- return record[self.table.adapter.id_field.index]
-
- class Name(LinkColumn):
- def render_header_content(self, session):
- return "Name"
-
- def render_cell_href(self, session, record):
- branch = session.branch()
-
- path = self.table.parent.frame_path
- id = record[self.table.adapter.id_field.index]
-
- frame = self.page.page_widgets_by_path[path]
-
- return frame.get_href(session, id)
-
- def render_cell_content(self, session, record):
- return record[self.table.adapter.name_field.index]
-
- class SelectionDataTable(DataTable):
- def do_get_data(self, session, sort, ascending, limit, offset):
- return self.parent.do_get_data \
- (session, sort, ascending, limit, offset)
-
-class SelectionTableControl(WidgetSet):
- def do_render(self, session):
- if len(self.children):
- return super(SelectionTableControl, self).do_render(session)
-
-class SelectionTableLinks(SelectionTableControl):
- def render_title(self, session):
- return "Links"
-
-class SelectionTableSwitches(SelectionTableControl):
- def render_title(self, session):
- return "Switches"
-
-class SelectionTableFilters(SelectionTableControl):
- def render_title(self, session):
- return "Filters"
-
-class SelectionTableButtons(SelectionTableControl):
- def render_title(self, session):
- return "Act on selection:"
Deleted: mgmt/newdata/cumin/python/cumin/table.strings
===================================================================
--- mgmt/newdata/cumin/python/cumin/table.strings 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/table.strings 2010-04-14 17:34:58 UTC (rev 3912)
@@ -1,73 +0,0 @@
-[SelectionTable.css]
-div.SelectionTable div.switches ul,
-div.SelectionTable div.filters ul,
-div.SelectionTable div.buttons ul {
- list-style: none;
- display: inline;
- padding: 0;
- margin: 0;
-}
-
-div.SelectionTable div.filters {
- float: right;
-}
-
-div.SelectionTable div.buttons ul li {
- margin: 0 0.4em 0 0;
- display: inline;
-}
-
-div.SelectionTable form {
- clear: both;
-}
-
-[SelectionTable.html]
-<div id="{id}" class="{class}">
- {links}
-
- {filters}
-
- {switches}
-
- <form method="post" action="?">
- {buttons}
-
- {table}
-
- <div>{hidden_inputs}</div>
- </form>
-</div>
-
-[SelectionTableControl.css]
-div.SelectionTableControl {
- padding: 0.35em 0.75em;
-}
-
-div.SelectionTableControl ul {
- list-style: none;
- display: inline;
- padding: 0;
- margin: 0;
-}
-
-[SelectionTableControl.html]
-<div class="{class}">
- <span>{title}</span>
-
- <ul>{widgets}</ul>
-</div>
-
-[SelectionTableButtons.css]
-div.SelectionTableButtons {
- background-color: #e7e7f7;
-}
-
-div.SelectionTableButtons span {
- font-size: 0.9em;
- margin: 0 0.5em 0 0;
-}
-
-div.SelectionTableButtons ul li {
- margin: 0 0.4em 0 0;
- display: inline;
-}
Modified: mgmt/newdata/cumin/python/cumin/widgets.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/widgets.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/cumin/python/cumin/widgets.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -5,9 +5,9 @@
from wooly.widgets import *
from wooly.forms import *
from wooly.sql import *
+from wooly.tables import *
from mint.schema import *
-from action import *
from objecttask import *
from parameters import *
from widgets import *
@@ -277,7 +277,7 @@
obj = self.object.get(session)
self.task.invoke(session, obj)
- self.task.exit_with_redirect(session, obj)
+ self.task.exit_with_redirect(session)
def render_submit_content(self, session):
return self.task.get_title(session)
@@ -1345,6 +1345,8 @@
self.created = datetime.now()
+ self.task_invocations = list()
+
class CuminPage(HtmlPage):
def __init__(self, app, name):
super(CuminPage, self).__init__(app, name)
Modified: mgmt/newdata/misc/boneyard.py
===================================================================
--- mgmt/newdata/misc/boneyard.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/misc/boneyard.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -246,64 +246,3 @@
value = p.value
return value
-
-class SessionSet(CuminSelectionTable):
- def __init__(self, app, name, conn):
- item = SessionParameter(app, "item")
- super(SessionSet, self).__init__(app, name, item)
-
- self.conn = conn
-
- col = self.NameColumn(app, "name")
- self.add_column(col)
- self.set_default_column(col)
-
- col = self.ExpiresColumn(app, "expires")
- self.add_column(col)
-
- col = self.StatusColumn(app, "attached")
- self.add_column(col)
-
- self.__phase = PhaseSwitch(app, "phase")
- self.add_child(self.__phase)
-
- task = main.module.session_set_detach
- self.buttons.add_child(TaskButton(app, "detach", task, self.selection))
-
- task = main.module.session_set_close
- self.buttons.add_child(TaskButton(app, "close", task, self.selection))
-
- def render_title(self, session):
- conn = self.conn.get(session)
- return "Sessions %s" % fmt_count(conn.sessions.count())
-
- def render_sql_where(self, session):
- conn = self.conn.get(session)
-
- elems = list()
- elems.append("s.client_connection_id = %(id)r")
- elems.append(self.__phase.get_sql_constraint(session, conn))
-
- return "where %s" % " and ".join(elems)
-
- def get_sql_values(self, session):
- conn = self.conn.get(session)
- return {"id": conn.id}
-
- class NameColumn(SqlTableColumn):
- def render_title(self, session):
- return "Name"
-
- class ExpiresColumn(SqlTableColumn):
- def render_title(self, session):
- return "Expires"
-
- def render_value(self, session, value):
- return fmt_datetime(value)
-
- class StatusColumn(SqlTableColumn):
- def render_title(self, session):
- return "Attached?"
-
- def render_content(self, session, data):
- return fmt_predicate(data["attached"])
Modified: mgmt/newdata/wooly/python/wooly/datatable.py
===================================================================
--- mgmt/newdata/wooly/python/wooly/datatable.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/wooly/python/wooly/datatable.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -31,8 +31,6 @@
self.adapter.fields.append(self)
self.adapter.fields_by_name[self.name] = self
- self.visible = True
-
def init(self):
pass
Modified: mgmt/newdata/wooly/python/wooly/table.py
===================================================================
--- mgmt/newdata/wooly/python/wooly/table.py 2010-04-14 14:53:31 UTC (rev 3911)
+++ mgmt/newdata/wooly/python/wooly/table.py 2010-04-14 17:34:58 UTC (rev 3912)
@@ -201,7 +201,7 @@
def __init__(self, app, name):
super(NewCheckboxColumn, self).__init__(app, name)
- self.selection = selection
+ self.selection = selection # XXX huh?
self.header = CheckboxColumnHeader(app, "header")
self.replace_child(self.header)
@@ -234,11 +234,5 @@
return self.input.render(session, record)
class CheckboxColumnInput(CheckboxInput):
- def render_onclick_attr(self, session, record):
- value = "wooly.clickTableCheckbox(this, '%s')" % \
- self.parent.parent.selection.path
-
- return "onclick=\"%s\"" % value
-
def render_value(self, session, record):
return self.parent.parent.render_cell_value(session, record)
14 years, 8 months
rhmessaging commits: r3911 - store/trunk/cpp/tools.
by rhmessaging-commits@lists.jboss.org
Author: kpvdr
Date: 2010-04-14 10:53:31 -0400 (Wed, 14 Apr 2010)
New Revision: 3911
Modified:
store/trunk/cpp/tools/store_chk
Log:
Fix for BZ 574529 "[store] Misleading -r parameter help in store_chk tool"
Modified: store/trunk/cpp/tools/store_chk
===================================================================
--- store/trunk/cpp/tools/store_chk 2010-04-14 14:41:04 UTC (rev 3910)
+++ store/trunk/cpp/tools/store_chk 2010-04-14 14:53:31 UTC (rev 3911)
@@ -113,7 +113,7 @@
help="Quiet (suppress all non-error output)")
op.add_option("-r", "--records",
action="store_true", dest="rflag",
- help="Print remaining records and transactions")
+ help="Print all records and transactions (including consumed/closed)")
op.add_option("-v", "--verbose",
action="store_true", dest="vflag",
help="Verbose output")
@@ -223,7 +223,7 @@
help="Quiet (suppress all non-error output)")
op.add_option("-r", "--records",
action="store_true", dest="rflag",
- help="Print remaining records and transactions")
+ help="Print all records and transactions (including consumed/closed)")
op.add_option("-t", "--test-num",
action="store", type="int", dest="tnum",
help="Test number from CSV file - only valid if CSV file named")
14 years, 8 months
rhmessaging commits: r3910 - in store/trunk/cpp: tests and 1 other directories.
by rhmessaging-commits@lists.jboss.org
Author: kpvdr
Date: 2010-04-14 10:41:04 -0400 (Wed, 14 Apr 2010)
New Revision: 3910
Added:
store/trunk/cpp/tests/python_tests/
store/trunk/cpp/tests/python_tests/__init__.py
store/trunk/cpp/tests/python_tests/client_persistence.py
store/trunk/cpp/tests/python_tests/flow_to_disk.py
store/trunk/cpp/tests/python_tests/store_test.py
store/trunk/cpp/tests/run_python_tests
Removed:
store/trunk/cpp/tests/new_python_tests/
store/trunk/cpp/tests/python_tests/__init__.py
store/trunk/cpp/tests/python_tests/client_persistence.py
store/trunk/cpp/tests/run_new_python_tests
Modified:
store/trunk/cpp/configure.ac
store/trunk/cpp/tests/Makefile.am
store/trunk/cpp/tests/run_long_python_tests
store/trunk/cpp/tests/run_short_python_tests
Log:
Fixed exception handling errors introduced in long tests by r.933560. Renamed pyton directories to more appropriate names. Applied patch from J?\195?\161n S?\195?\161ren?\195?\173k for handling newest version of BDB in configure. Adjusted content of make-short, make and make-long tests. Renamed package from "rhm" to "msg-store".
Modified: store/trunk/cpp/configure.ac
===================================================================
--- store/trunk/cpp/configure.ac 2010-04-14 14:22:16 UTC (rev 3909)
+++ store/trunk/cpp/configure.ac 2010-04-14 14:41:04 UTC (rev 3910)
@@ -21,7 +21,7 @@
dnl
dnl Process this file with autoconf to produce a configure script.
-AC_INIT([rhm], [0.6], [rhemrg-users-list(a)redhat.com])
+AC_INIT([msg-store], [0.6], [rhemrg-users-list(a)redhat.com])
AC_CONFIG_AUX_DIR([build-aux])
AM_INIT_AUTOMAKE([dist-bzip2])
@@ -55,7 +55,7 @@
# Warnings: Enable as many as possible, keep the code clean. Please
# do not disable warnings or remove -Werror without discussing on
-# rhm-users list.
+# rhemrg-users-list list.
#
# The following warnings are deliberately omitted, they warn on valid code.
# -Wunreachable-code -Wpadded -Winline
@@ -177,17 +177,17 @@
AC_SUBST([LIB_DLOPEN])
LIBS=$gl_saved_libs
-# Require libdb_cxx (any version between 4.2 and 4.7), for the library, and for db_cxx.h.
+# Require libdb_cxx (any version between 4.2 and 4.8), for the library, and for db_cxx.h.
db4_devel_fail=0
AC_CHECK_HEADER([db_cxx.h], ,[db4_devel_fail=1])
test $db4_devel_fail == 1 && \
AC_MSG_ERROR([db4-devel package missing. Please ensure both db4 and db4-devel are installed. (hint: "yum install db4-devel" should do it...)])
gl_saved_libs=$LIBS
-AC_SEARCH_LIBS([__db_open], [db_cxx-4.7 db_cxx-4.6 db_cxx-4.5 db_cxx-4.4 db_cxx-4.3 db_cxx-4.2],
+AC_SEARCH_LIBS([__db_open], [db_cxx-4.8 db_cxx-4.7 db_cxx-4.6 db_cxx-4.5 db_cxx-4.4 db_cxx-4.3 db_cxx-4.2],
[test "$ac_cv_search___db_open" = "none required" ||
LIB_BERKELEY_DB=$ac_cv_search___db_open],
- AC_MSG_ERROR([Couldn't find required library in range db_cxx-4.2 through db_cxx-4.6]))
+ AC_MSG_ERROR([Couldn't find required library in range db_cxx-4.2 through db_cxx-4.8]))
AC_SUBST([LIB_BERKELEY_DB])
LIBS=$gl_saved_libs
Modified: store/trunk/cpp/tests/Makefile.am
===================================================================
--- store/trunk/cpp/tests/Makefile.am 2010-04-14 14:22:16 UTC (rev 3909)
+++ store/trunk/cpp/tests/Makefile.am 2010-04-14 14:41:04 UTC (rev 3910)
@@ -39,12 +39,17 @@
OrderingTest \
TransactionalTest \
TwoPhaseCommitTest \
- run_new_python_tests \
+ run_python_tests \
system_test.sh \
clean.sh
LONG_TESTS = \
+ SimpleTest \
+ OrderingTest \
+ TransactionalTest \
+ TwoPhaseCommitTest \
run_long_python_tests \
+ system_test.sh \
clean.sh
SHORT_TESTS = \
@@ -77,10 +82,10 @@
EXTRA_DIST = \
clean.sh \
failing_python_tests.txt \
- new_python_tests \
+ python_tests \
persistence.py \
run_long_python_tests \
- run_new_python_tests \
+ run_python_tests \
run_short_python_tests \
run_test \
start_broker \
@@ -104,10 +109,10 @@
# Note: Auto-recursion is not supported for custom targets, so add a ${MAKE} -C for each dir in the SUBDIRS list above.
check-long: all
$(MAKE) -C jrnl check-long
+ $(MAKE) check TESTS="$(LONG_TESTS)" SUBDIRS=.
if DO_CLUSTER_TESTS
$(MAKE) -C cluster check-long
endif
- $(MAKE) check TESTS="$(LONG_TESTS)" SUBDIRS=.
check-short: all
$(MAKE) check TESTS="$(SHORT_TESTS)" SUBDIRS=.
Copied: store/trunk/cpp/tests/python_tests (from rev 3904, store/trunk/cpp/tests/new_python_tests)
Deleted: store/trunk/cpp/tests/python_tests/__init__.py
===================================================================
--- store/trunk/cpp/tests/new_python_tests/__init__.py 2010-04-12 21:41:54 UTC (rev 3904)
+++ store/trunk/cpp/tests/python_tests/__init__.py 2010-04-14 14:41:04 UTC (rev 3910)
@@ -1,24 +0,0 @@
-# Do not delete - marks this directory as a python package.
-
-# Copyright (c) 2008, 2009 Red Hat, Inc.
-#
-# This file is part of the Qpid async store library msgstore.so.
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
-# USA
-#
-# The GNU Lesser General Public License is available in the file COPYING.
-
-from client_persistence import *
Copied: store/trunk/cpp/tests/python_tests/__init__.py (from rev 3905, store/trunk/cpp/tests/new_python_tests/__init__.py)
===================================================================
--- store/trunk/cpp/tests/python_tests/__init__.py (rev 0)
+++ store/trunk/cpp/tests/python_tests/__init__.py 2010-04-14 14:41:04 UTC (rev 3910)
@@ -0,0 +1,25 @@
+# Do not delete - marks this directory as a python package.
+
+# Copyright (c) 2008, 2009 Red Hat, Inc.
+#
+# This file is part of the Qpid async store library msgstore.so.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
+# USA
+#
+# The GNU Lesser General Public License is available in the file COPYING.
+
+from client_persistence import *
+from flow_to_disk import *
Deleted: store/trunk/cpp/tests/python_tests/client_persistence.py
===================================================================
--- store/trunk/cpp/tests/new_python_tests/client_persistence.py 2010-04-12 21:41:54 UTC (rev 3904)
+++ store/trunk/cpp/tests/python_tests/client_persistence.py 2010-04-14 14:41:04 UTC (rev 3910)
@@ -1,240 +0,0 @@
-# Copyright (c) 2008 Red Hat, Inc.
-#
-# This file is part of the Qpid async store library msgstore.so.
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
-# USA
-#
-# The GNU Lesser General Public License is available in the file COPYING.
-
-from qpid.brokertest import *
-from qpid.messaging import Empty, Message
-from qmf.console import Session
-
-def storeArgs():
- assert BrokerTest.store_lib
- return ["--load-module", BrokerTest.store_lib]
-
-class Qmf:
- """
- QMF functions not yet available in the new QMF API. Remove this and replace with new API when it becomes available.
- """
- def __init__(self, broker):
- self.__session = Session()
- self.__broker = self.__session.addBroker("amqp://localhost:%d"%broker.port())
-
- def addExchange(self, exchangeName, exchangeType, altExchangeName=None, passive=False, durable=False, arguments = {}):
- """Add a new exchange"""
- amqpSession = self.__broker.getAmqpSession()
- if altExchangeName:
- amqpSession.exchange_declare(exchange=exchangeName, type=exchangeType, alternate_exchange=altExchangeName, passive=passive, durable=durable, arguments=arguments)
- else:
- amqpSession.exchange_declare(exchange=exchangeName, type=exchangeType, passive=passive, durable=durable, arguments=arguments)
-
- def addQueue(self, queueName, altExchangeName=None, passive=False, durable=False, arguments = {}):
- """Add a new queue"""
- amqpSession = self.__broker.getAmqpSession()
- if altExchangeName:
- amqpSession = amqpSession.queue_declare(queueName, alternate_exchange=altExchangeName, passive=passive, durable=durable, arguments=arguments)
- else:
- amqpSession = amqpSession.queue_declare(queueName, passive=passive, durable=durable, arguments=arguments)
-
- def __query(self, name, _class, package, altExchangeName=None):
- try:
- objList = self.__session.getObjects(_class=_class, _package=package)
- found = False
- for o in objList:
- if o.name == name:
- found = True
- if altExchangeName != None:
- altExchList = self.__session.getObjects(_objectId=o.altExchange)
- if len(altExchList) == 0 or altExchList[0].name != altExchangeName: return False
- break
- return found
- except: return False
-
-
- def queryExchange(self, exchangeName, altExchangeName=None):
- """Test for the presence of an exchange, and optionally whether it has an alternate exchange set to a known value."""
- return self.__query(exchangeName, "exchange", "org.apache.qpid.broker", altExchangeName)
-
- def queryQueue(self, queueName, altExchangeName=None):
- """Test for the presence of an exchange, and optionally whether it has an alternate exchange set to a known value."""
- return self.__query(queueName, "queue", "org.apache.qpid.broker", altExchangeName)
-
- def queueMsgCount(self, queueName):
- queueList = self.__session.getObjects(_class="queue", _name=queueName)
- if len(queueList):
- return queueList[0].msgDepth
-
- def queueEmpty(self, queueName):
- return self.queueMsgCount(queueName) == 0
-
-
-class StoreTest(BrokerTest):
- """
- This subclass of BrokerTest adds some convenience test/check functions
- """
-
- def __chkEmpty(self, queue, receiver):
- try:
- msg = receiver.fetch(timeout=0)
- self.assert_(False, "Queue \"%s\" not empty: found message: %s" % (queue, msg))
- except Empty: pass
-
- def chkMsg(self, broker, queue, msgChk, empty=False, ack=True):
- return self.chkMsgs(broker, queue, [msgChk], empty, ack)
-
- def chkMsgs(self, broker, queue, msgChkList, empty=False, ack=True):
- s = broker.connect().session()
- rcvr = s.receiver(queue + "; {create:always}", capacity=len(msgChkList))
- try: rmList = [rcvr.fetch(timeout=0) for i in range(len(msgChkList))]
- except Empty: self.assert_(False, "Queue \"%s\" is empty, unable to retrieve expected message %d." % (queue, i))
- for i in range(0, len(rmList)):
- self.assertEqual(rmList[i].content, msgChkList[i].content)
- self.assertEqual(rmList[i].correlation_id, msgChkList[i].correlation_id)
- if empty: self.__chkEmpty(queue, rcvr)
- if ack:
- s.acknowledge()
- s.connection.close()
- else:
- return s
-
-
-class ExchangeQueueTests(StoreTest):
- """
- Simple tests of the broker exchange and queue types
- """
-
- def testDirectExchange(self):
- """Test Direct exchange."""
- broker = self.broker(storeArgs(), name="testDirectExchange", expect=EXPECT_EXIT_OK)
- m1 = Message("A_Message1", durable=True, correlation_id="Msg0001")
- m2 = Message("B_Message1", durable=True, correlation_id="Msg0002")
- broker.send_message("a", m1)
- broker.send_message("b", m2)
- broker.terminate()
-
- broker = self.broker(storeArgs(), name="testDirectExchange")
- self.chkMsg(broker, "a", m1, True)
- self.chkMsg(broker, "b", m2, True)
-
- def testTopicExchange(self):
- """Test Topic exchange."""
- broker = self.broker(storeArgs(), name="testTopicExchange", expect=EXPECT_EXIT_OK)
- s = broker.connect().session()
- snd1 = s.sender("abc/key1; {create:always, node-properties:{durable:True, type:topic}}")
- snd2 = s.sender("abc/key2; {create:always, node-properties:{durable:True, type:topic}}")
- s.receiver("a; {create:always, node-properties:{durable:True, x-properties:{bindings:['abc/key1']}}}")
- s.receiver("b; {create:always, node-properties:{durable:True, x-properties:{bindings:['abc/key1']}}}")
- s.receiver("c; {create:always, node-properties:{durable:True, x-properties:{bindings:['abc/key1']}}}")
- m1 = Message("Message1", durable=True, correlation_id="Msg0003")
- snd1.send(m1)
- m2 = Message("Message2", durable=True, correlation_id="Msg0004")
- snd2.send(m2)
- s.connection.close()
- broker.terminate()
-
- broker = self.broker(storeArgs(), name="testTopicExchange")
- self.chkMsg(broker, "a", m1, True)
- self.chkMsg(broker, "b", m1, True)
- self.chkMsg(broker, "c", m1, True)
-
-
- def testLVQ(self):
- """Test LVQ."""
- broker = self.broker(storeArgs(), name="testLVQ", expect=EXPECT_EXIT_OK)
- ma1 = Message("A1", durable=True, correlation_id="Msg0005", properties={"qpid.LVQ_key":"A"})
- ma2 = Message("A2", durable=True, correlation_id="Msg0006", properties={"qpid.LVQ_key":"A"})
- mb1 = Message("B1", durable=True, correlation_id="Msg0007", properties={"qpid.LVQ_key":"B"})
- mb2 = Message("B2", durable=True, correlation_id="Msg0008", properties={"qpid.LVQ_key":"B"})
- mb3 = Message("B3", durable=True, correlation_id="Msg0009", properties={"qpid.LVQ_key":"B"})
- mc1 = Message("C1", durable=True, correlation_id="Msg0010", properties={"qpid.LVQ_key":"C"})
- broker.send_messages("lvq-test", [mb1, ma1, ma2, mb2, mb3, mc1], xprops="\"qpid.last_value_queue\":True")
- broker.terminate()
-
- broker = self.broker(storeArgs(), name="testLVQ", expect=EXPECT_EXIT_OK)
- s = self.chkMsgs(broker, "lvq-test", [ma2, mb3, mc1], empty=True, ack=False)
- # Add more messages while subscriber is active (no replacement):
- ma3 = Message("A3", durable=True, correlation_id="Msg0011", properties={"qpid.LVQ_key":"A"})
- ma4 = Message("A4", durable=True, correlation_id="Msg0012", properties={"qpid.LVQ_key":"A"})
- mc2 = Message("C2", durable=True, correlation_id="Msg0013", properties={"qpid.LVQ_key":"C"})
- mc3 = Message("C3", durable=True, correlation_id="Msg0014", properties={"qpid.LVQ_key":"C"})
- mc4 = Message("C4", durable=True, correlation_id="Msg0015", properties={"qpid.LVQ_key":"C"})
- broker.send_messages("lvq-test", [mc2, mc3, ma3, ma4, mc4], xprops="\"qpid.last_value_queue\":True", session=s)
- s.acknowledge()
- s.connection.close()
- broker.terminate()
-
- broker = self.broker(storeArgs(), name="testLVQ")
- self.chkMsgs(broker, "lvq-test", [mc4, ma4], True)
-
-
-class AlternateExchagePropertyTests(StoreTest):
- """
- Test the persistence of the Alternate Exchange property for exchanges and queues.
- """
-
- def testExchange(self):
- """Exchange alternate exchange property persistence test"""
- broker = self.broker(storeArgs(), name="testExchangeBroker", expect=EXPECT_EXIT_OK)
- qmf = Qmf(broker)
- qmf.addExchange("altExch", "direct", durable=True) # Serves as alternate exchange instance
- qmf.addExchange("testExch", "direct", durable=True, altExchangeName="altExch")
- broker.terminate()
-
- broker = self.broker(storeArgs(), name="testExchangeBroker")
- qmf = Qmf(broker)
- try: qmf.addExchange("altExch", "direct", passive=True)
- except Exception, e: self.fail("Alternate exchange (\"altExch\") instance not recovered: %s" % e)
- try: qmf.addExchange("testExch", "direct", passive=True)
- except Exception, e: self.fail("Test exchange (\"testExch\") instance not recovered: %s" % e)
- self.assertTrue(qmf.queryExchange("testExch", altExchangeName = "altExch"), "Alternate exchange property not found or is incorrect on exchange \"testExch\".")
-
- def testQueue(self):
- """Queue alternate exchange property persistexchangeNamece test"""
- broker = self.broker(storeArgs(), name="testQueueBroker", expect=EXPECT_EXIT_OK)
- qmf = Qmf(broker)
- qmf.addExchange("altExch", "direct", durable=True) # Serves as alternate exchange instance
- qmf.addQueue("testQueue", durable=True, altExchangeName="altExch")
- broker.terminate()
-
- broker = self.broker(storeArgs(), name="testQueueBroker")
- qmf = Qmf(broker)
- try: qmf.addExchange("altExch", "direct", passive=True)
- except Exception, e: self.fail("Alternate exchange (\"altExch\") instance not recovered: %s" % e)
- try: qmf.addQueue("testQueue", passive=True)
- except Exception, e: self.fail("Test queue (\"testQueue\") instance not recovered: %s" % e)
- self.assertTrue(qmf.queryQueue("testQueue", altExchangeName = "altExch"), "Alternate exchange property not found or is incorrect on queue \"testQueue\".")
-
-
-class RedeliveredTests(StoreTest):
- """
- Test the behavior of the redelivered flag in the context of persistence
- """
-
- def testBrokerRecovery(self):
- """Test that the redelivered flag is set on messages after recovery of broker"""
- broker = self.broker(storeArgs(), name="testAfterRecover", expect=EXPECT_EXIT_OK)
- mc = "xyz"*100
- m = Message(mc, durable=True)
- broker.send_message("testQueue", m)
- broker.terminate()
-
- broker = self.broker(storeArgs(), name="testAfterRecover")
- rm = broker.get_message("testQueue")
- self.assertEqual(mc, rm.content)
- self.assertTrue(rm.redelivered)
-
Copied: store/trunk/cpp/tests/python_tests/client_persistence.py (from rev 3905, store/trunk/cpp/tests/new_python_tests/client_persistence.py)
===================================================================
--- store/trunk/cpp/tests/python_tests/client_persistence.py (rev 0)
+++ store/trunk/cpp/tests/python_tests/client_persistence.py 2010-04-14 14:41:04 UTC (rev 3910)
@@ -0,0 +1,186 @@
+"""
+Copyright (c) 2008 Red Hat, Inc.
+
+This file is part of the Qpid async store library msgstore.so.
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
+USA
+
+The GNU Lesser General Public License is available in the file COPYING.
+"""
+
+from qpid.brokertest import EXPECT_EXIT_OK
+from store_test import StoreTest, Qmf, store_args
+from qpid.messaging import Message
+
+
+class ExchangeQueueTests(StoreTest):
+ """
+ Simple tests of the broker exchange and queue types
+ """
+
+ def test_direct_exchange(self):
+ """Test Direct exchange."""
+ broker = self.broker(store_args(), name="testDirectExchange", expect=EXPECT_EXIT_OK)
+ msg1 = Message("A_Message1", durable=True, correlation_id="Msg0001")
+ msg2 = Message("B_Message1", durable=True, correlation_id="Msg0002")
+ broker.send_message("a", msg1)
+ broker.send_message("b", msg2)
+ broker.terminate()
+
+ broker = self.broker(store_args(), name="testDirectExchange")
+ self.check_message(broker, "a", msg1, True)
+ self.check_message(broker, "b", msg2, True)
+
+ def test_topic_exchange(self):
+ """Test Topic exchange."""
+ broker = self.broker(store_args(), name="testTopicExchange", expect=EXPECT_EXIT_OK)
+ ssn = broker.connect().session()
+ snd1 = ssn.sender("abc/key1; {create:always, node:{type:topic, durable:True}}")
+ snd2 = ssn.sender("abc/key2; {create:always, node:{type:topic, durable:True}}")
+ ssn.receiver("a; {create:always, link:{durable:True, x-bindings:[{exchange:abc, key:key1}]}}")
+ ssn.receiver("b; {create:always, link:{durable:True, x-bindings:[{exchange:abc, key:key1}]}}")
+ ssn.receiver("c; {create:always, link:{durable:True, x-bindings:[{exchange:abc, key:key1}, "
+ "{exchange:abc, key: key2}]}}")
+ ssn.receiver("d; {create:always, link:{durable:True, x-bindings:[{exchange:abc, key:key2}]}}")
+ ssn.receiver("e; {create:always, link:{durable:True, x-bindings:[{exchange:abc, key:key2}]}}")
+ msg1 = Message("Message1", durable=True, correlation_id="Msg0003")
+ snd1.send(msg1)
+ msg2 = Message("Message2", durable=True, correlation_id="Msg0004")
+ snd2.send(msg2)
+ broker.terminate()
+
+ broker = self.broker(store_args(), name="testTopicExchange")
+ self.check_message(broker, "a", msg1, True)
+ self.check_message(broker, "b", msg1, True)
+ self.check_messages(broker, "c", [msg1, msg2], True)
+ self.check_message(broker, "d", msg2, True)
+ self.check_message(broker, "e", msg2, True)
+
+
+ def test_lvq(self):
+ """Test LVQ."""
+ broker = self.broker(store_args(), name="testLVQ", expect=EXPECT_EXIT_OK)
+ ma1 = Message("A1", durable=True, correlation_id="Msg0005", properties={"qpid.LVQ_key":"A"})
+ ma2 = Message("A2", durable=True, correlation_id="Msg0006", properties={"qpid.LVQ_key":"A"})
+ mb1 = Message("B1", durable=True, correlation_id="Msg0007", properties={"qpid.LVQ_key":"B"})
+ mb2 = Message("B2", durable=True, correlation_id="Msg0008", properties={"qpid.LVQ_key":"B"})
+ mb3 = Message("B3", durable=True, correlation_id="Msg0009", properties={"qpid.LVQ_key":"B"})
+ mc1 = Message("C1", durable=True, correlation_id="Msg0010", properties={"qpid.LVQ_key":"C"})
+ broker.send_messages("lvq-test", [mb1, ma1, ma2, mb2, mb3, mc1],
+ xprops="arguments:{\"qpid.last_value_queue\":True}")
+ broker.terminate()
+
+ broker = self.broker(store_args(), name="testLVQ", expect=EXPECT_EXIT_OK)
+ ssn = self.check_messages(broker, "lvq-test", [ma2, mb3, mc1], empty=True, ack=False)
+ # Add more messages while subscriber is active (no replacement):
+ ma3 = Message("A3", durable=True, correlation_id="Msg0011", properties={"qpid.LVQ_key":"A"})
+ ma4 = Message("A4", durable=True, correlation_id="Msg0012", properties={"qpid.LVQ_key":"A"})
+ mc2 = Message("C2", durable=True, correlation_id="Msg0013", properties={"qpid.LVQ_key":"C"})
+ mc3 = Message("C3", durable=True, correlation_id="Msg0014", properties={"qpid.LVQ_key":"C"})
+ mc4 = Message("C4", durable=True, correlation_id="Msg0015", properties={"qpid.LVQ_key":"C"})
+ broker.send_messages("lvq-test", [mc2, mc3, ma3, ma4, mc4], session=ssn)
+ ssn.acknowledge()
+ broker.terminate()
+
+ broker = self.broker(store_args(), name="testLVQ")
+ self.check_messages(broker, "lvq-test", [mc4, ma4], True)
+
+ def test_fanout_exchange(self):
+ """Test Fanout Exchange"""
+ broker = self.broker(store_args(), name="testFanout", expect=EXPECT_EXIT_OK)
+ ssn = broker.connect().session()
+ snd = ssn.sender("TestFanoutExchange; {create: always, node: {type: topic, x-declare: {type: fanout}}}")
+ ssn.receiver("TestFanoutExchange; {link: {name: \"q1\", durable: True}}")
+ ssn.receiver("TestFanoutExchange; {link: {name: \"q2\", durable: True}}")
+ ssn.receiver("TestFanoutExchange; {link: {name: \"q3\", durable: True}}")
+ msg1 = Message("Msg1", durable=True, correlation_id="Msg0001")
+ snd.send(msg1)
+ msg2 = Message("Msg2", durable=True, correlation_id="Msg0002")
+ snd.send(msg2)
+ broker.terminate()
+
+ broker = self.broker(store_args(), name="testFanout")
+ self.check_messages(broker, "q1", [msg1, msg2], True)
+ self.check_messages(broker, "q2", [msg1, msg2], True)
+ self.check_messages(broker, "q3", [msg1, msg2], True)
+
+
+class AlternateExchagePropertyTests(StoreTest):
+ """
+ Test the persistence of the Alternate Exchange property for exchanges and queues.
+ """
+
+ def test_exchange(self):
+ """Exchange alternate exchange property persistence test"""
+ broker = self.broker(store_args(), name="testExchangeBroker", expect=EXPECT_EXIT_OK)
+ qmf = Qmf(broker)
+ qmf.add_exchange("altExch", "direct", durable=True) # Serves as alternate exchange instance
+ qmf.add_exchange("testExch", "direct", durable=True, alt_exchange_name="altExch")
+ broker.terminate()
+
+ broker = self.broker(store_args(), name="testExchangeBroker")
+ qmf = Qmf(broker)
+ try:
+ qmf.add_exchange("altExch", "direct", passive=True)
+ except Exception, error:
+ self.fail("Alternate exchange (\"altExch\") instance not recovered: %s" % error)
+ try:
+ qmf.add_exchange("testExch", "direct", passive=True)
+ except Exception, error:
+ self.fail("Test exchange (\"testExch\") instance not recovered: %s" % error)
+ self.assertTrue(qmf.query_exchange("testExch", alt_exchange_name = "altExch"),
+ "Alternate exchange property not found or is incorrect on exchange \"testExch\".")
+
+ def test_queue(self):
+ """Queue alternate exchange property persistexchangeNamece test"""
+ broker = self.broker(store_args(), name="testQueueBroker", expect=EXPECT_EXIT_OK)
+ qmf = Qmf(broker)
+ qmf.add_exchange("altExch", "direct", durable=True) # Serves as alternate exchange instance
+ qmf.add_queue("testQueue", durable=True, alt_exchange_name="altExch")
+ broker.terminate()
+
+ broker = self.broker(store_args(), name="testQueueBroker")
+ qmf = Qmf(broker)
+ try:
+ qmf.add_exchange("altExch", "direct", passive=True)
+ except Exception, error:
+ self.fail("Alternate exchange (\"altExch\") instance not recovered: %s" % error)
+ try:
+ qmf.add_queue("testQueue", passive=True)
+ except Exception, error:
+ self.fail("Test queue (\"testQueue\") instance not recovered: %s" % error)
+ self.assertTrue(qmf.query_queue("testQueue", alt_exchange_name = "altExch"),
+ "Alternate exchange property not found or is incorrect on queue \"testQueue\".")
+
+
+class RedeliveredTests(StoreTest):
+ """
+ Test the behavior of the redelivered flag in the context of persistence
+ """
+
+ def test_broker_recovery(self):
+ """Test that the redelivered flag is set on messages after recovery of broker"""
+ broker = self.broker(store_args(), name="testAfterRecover", expect=EXPECT_EXIT_OK)
+ msg_content = "xyz"*100
+ msg = Message(msg_content, durable=True)
+ broker.send_message("testQueue", msg)
+ broker.terminate()
+
+ broker = self.broker(store_args(), name="testAfterRecover")
+ rcv_msg = broker.get_message("testQueue")
+ self.assertEqual(msg_content, rcv_msg.content)
+ self.assertTrue(rcv_msg.redelivered)
+
Copied: store/trunk/cpp/tests/python_tests/flow_to_disk.py (from rev 3905, store/trunk/cpp/tests/new_python_tests/flow_to_disk.py)
===================================================================
--- store/trunk/cpp/tests/python_tests/flow_to_disk.py (rev 0)
+++ store/trunk/cpp/tests/python_tests/flow_to_disk.py 2010-04-14 14:41:04 UTC (rev 3910)
@@ -0,0 +1,1208 @@
+"""
+Copyright (c) 2008 Red Hat, Inc.
+
+This file is part of the Qpid async store library msgstore.so.
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
+USA
+
+The GNU Lesser General Public License is available in the file COPYING.
+"""
+
+import qpid
+from qpid.brokertest import EXPECT_EXIT_OK, EXPECT_UNKNOWN
+from qpid.datatypes import uuid4
+from store_test import StoreTest, store_args
+from qpid.messaging import Message, TargetCapacityExceeded, ServerError #SessionError, SendError
+
+class FlowToDisk(StoreTest):
+ """Tests for async store flow-to-disk"""
+
+ @staticmethod
+ def _broker_name(queue_name, txn_produce, txn_consume):
+ """Create a broker name based on the queue name and the transaction parameters"""
+ name = queue_name
+ if txn_produce:
+ name += "_TxP"
+ if txn_consume:
+ name += "_TxC"
+ return name
+
+ def _tx_simple_limit(self, queue_name, kwargs):
+ """
+ Test a simple case of message limits which will force flow-to-disk.
+ * queue_args sets a limit - either max_count and/or max_size
+ * messages are added. Some will flow to disk.
+ * Consume all messages sent.
+ * Check the broker has no messages left.
+ """
+ # Unpack args
+ txn_produce = kwargs.get("txn_produce", False)
+ txn_consume = kwargs.get("txn_consume", False)
+ recover = kwargs.get("recover", False)
+ max_count = kwargs.get("max_count")
+ max_size = kwargs.get("max_size")
+ policy = kwargs.get("policy", "flow_to_disk")
+ num_msgs = kwargs.get("num_msgs", 15)
+ msg_size = kwargs.get("msg_size", 10)
+ msg_durable = kwargs.get("msg_durable", False)
+ sync = kwargs.get("sync", False)
+ browse = kwargs.get("browse", False)
+
+ bname = self._broker_name(queue_name, txn_produce, txn_consume)
+ if recover:
+ expect = EXPECT_UNKNOWN
+ else:
+ expect = EXPECT_EXIT_OK
+ broker = self.broker(store_args(), name=bname, expect=expect, log_level="debug+")
+ prod_session = broker.connect().session(transactional=txn_produce)
+ sender = prod_session.sender(self.snd_addr(queue_name, auto_create=True, durable=True, ftd_count=max_count,
+ ftd_size=max_size, policy=policy))
+
+ # Send messages
+ msgs = []
+ pre_recover_ftd_msgs = [] # msgs released before a recover
+ post_recover_ftd_msgs = [] # msgs released after a recover
+ cum_msg_size = 0
+ for index in range(0, num_msgs):
+ msg = Message(self.make_message(index, msg_size), durable=msg_durable, id=uuid4(),
+ correlation_id="msg-%04d"%index)
+ #print "Sending msg %s" % msg.id
+ msgs.append(msg)
+ cum_msg_size += msg_size
+ if (max_count != None and index >= max_count) or (max_size != None and cum_msg_size > max_size):
+ pre_recover_ftd_msgs.append(msg)
+ sender.send(msg, sync=sync)
+ if not sync:
+ sender.sync()
+ # Close transaction (if needed)
+ if txn_produce:
+ prod_session.commit()
+
+ # Browse messages
+ if browse:
+ self.check_messages(broker, queue_name, msgs, browse=True)
+
+ if recover:
+ broker.terminate()
+ if msg_durable:
+ post_recover_ftd_msgs = pre_recover_ftd_msgs
+ else:
+ del msgs[:] # Transient messages will be discarded on recover
+ old_broker = broker # keep for log analysis
+ broker = self.broker(store_args(), name=bname, expect=EXPECT_EXIT_OK, log_level="debug+")
+
+ # Browse messages after recover
+ if browse:
+ self.check_messages(broker, queue_name, msgs, browse=True)
+
+ # Consume messages
+ self.check_messages(broker, queue_name, msgs, transactional=txn_consume, empty=True)
+ broker.terminate()
+
+ # Check broker logs for released messages
+ if recover:
+ if txn_produce:
+ self.check_msg_release_on_commit(old_broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_release(old_broker, pre_recover_ftd_msgs)
+ self.check_msg_release_on_recover(broker, post_recover_ftd_msgs)
+ else:
+ if txn_produce:
+ self.check_msg_release_on_commit(broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_release(broker, pre_recover_ftd_msgs)
+
+ def simple_limit(self, queue_name, **kwargs):
+ """Adapter for adding transactions to test"""
+ # Cycle through the produce/consume block transaction combinations
+ for index in range(0, 4):
+ kwargs["txn_produce"] = index & 1 != 0 # Transactional produce
+ kwargs["txn_consume"] = index & 2 != 0 # Transactional consume
+ self._tx_simple_limit(queue_name, kwargs)
+
+class SimpleMaxCountTest(FlowToDisk):
+ """Flow-to-disk tests based on setting max_count"""
+
+ def test_base(self):
+ """Base test"""
+ self.simple_limit("SimpleMaxCount", max_count=10)
+
+ def test_recover(self):
+ """Recover test"""
+ self.simple_limit("SimpleMaxCountRecover", max_count=10, recover=True)
+
+ def test_durable(self):
+ """Durable message test"""
+ self.simple_limit("SimpleMaxCountDurable", max_count=10, msg_durable=True)
+
+ def test_durable_recover(self):
+ """Durable message recover test"""
+ self.simple_limit("SimpleMaxCountDurableRecover", max_count=10, msg_durable=True, recover=True)
+
+ def test_browse(self):
+ """Browse test"""
+ self.simple_limit("SimpleMaxCountBrowse", max_count=10, browse=True)
+
+ def test_browse_recover(self):
+ """Browse before and after recover test"""
+ self.simple_limit("SimpleMaxCountBrowseRecover", max_count=10, browse=True, recover=True)
+
+ def test_durable_browse(self):
+ """Browse durable message test"""
+ self.simple_limit("SimpleMaxCountDurableBrowse", max_count=10, msg_durable=True, browse=True)
+
+ def test_durable_browse_recover(self):
+ """Browse durable messages before and after recover"""
+ self.simple_limit("SimpleMaxCountDurableBrowseRecover", max_count=10, msg_durable=True, browse=True,
+ recover=True)
+
+ def test_large_msg(self):
+ """Large message test"""
+ self.simple_limit("SimpleMaxCountLargeMsg", max_count=10, max_size=10000000, num_msgs=100, msg_size=10000)
+
+ def test_large_msg_recover(self):
+ """Large message test"""
+ self.simple_limit("SimpleMaxCountLargeMsgRecover", max_count=10, max_size=10000000, num_msgs=100,
+ msg_size=10000, recover=True)
+
+ def test_large_msg_durable(self):
+ """Large durable message test"""
+ self.simple_limit("SimpleMaxCountLargeMsgDurable", max_count=10, max_size=10000000, msg_durable=True,
+ num_msgs=100, msg_size=10000)
+
+ def test_large_msg_durable_recover(self):
+ """Large durable message test"""
+ self.simple_limit("SimpleMaxCountLargeMsgDurableRecover", max_count=10, max_size=10000000, msg_durable=True,
+ num_msgs=100, msg_size=10000, recover=True)
+
+ def test_large_msg_browse(self):
+ """Large message browse test"""
+ self.simple_limit("SimpleMaxCountLargeMsgBrowse", max_count=10, max_size=10000000, browse=True, num_msgs=100,
+ msg_size=10000)
+
+ def test_large_msg_browse_recover(self):
+ """Large message browse test"""
+ self.simple_limit("SimpleMaxCountLargeMsgBrowseRecover", max_count=10, max_size=10000000, browse=True,
+ num_msgs=100, msg_size=10000, recover=True)
+
+ def test_large_msg_durable_browse(self):
+ """Large durable message browse test"""
+ self.simple_limit("SimpleMaxCountLargeMsgDurableBrowse", max_count=10, max_size=10000000, msg_durable=True,
+ browse=True, num_msgs=100, msg_size=10000)
+
+ def test_large_msg_durable_browse_recover(self):
+ """Large durable message browse test"""
+ self.simple_limit("SimpleMaxCountLargeMsgDurableBrowseRecover", max_count=10, max_size=10000000,
+ msg_durable=True, browse=True, num_msgs=100, msg_size=10000, recover=True)
+
+class SimpleMaxSizeTest(FlowToDisk):
+ """Flow-to-disk tests based on setting max_size"""
+
+ def test_base(self):
+ """Base test"""
+ self.simple_limit("SimpleMaxSize", max_size=100)
+
+ def test_recover(self):
+ """Recover test"""
+ self.simple_limit("SimpleMaxSizeRecover", max_size=100, recover=True)
+
+ def test_durable(self):
+ """Durable message test"""
+ self.simple_limit("SimpleMaxSizeDurable", max_size=100, msg_durable=True)
+
+ def test_durable_recover(self):
+ """Durable message recover test"""
+ self.simple_limit("SimpleMaxSizeDurable", max_size=100, msg_durable=True, recover=True)
+
+ def test_browse(self):
+ """Browse test"""
+ self.simple_limit("SimpleMaxSizeBrowse", max_size=100, browse=True)
+
+ def test_browse_recover(self):
+ """Browse before and after recover test"""
+ self.simple_limit("SimpleMaxSizeBrowseRecover", max_size=100, browse=True, recover=True)
+
+ def test_durable_browse(self):
+ """Browse durable message test"""
+ self.simple_limit("SimpleMaxSizeDurableBrowse", max_size=100, msg_durable=True, browse=True)
+
+ def test_durable_browse_recover(self):
+ """Browse durable messages before and after recover"""
+ self.simple_limit("SimpleMaxSizeDurableBrowseRecover", max_size=100, msg_durable=True, browse=True,
+ recover=True)
+
+ def test_large_msg(self):
+ """Large message test"""
+ self.simple_limit("SimpleMaxSizeLargeMsg", max_size=100000, num_msgs=100, msg_size=10000)
+
+ def test_large_msg_recover(self):
+ """Large message test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgRecover", max_size=100000, num_msgs=100, msg_size=10000, recover=True)
+
+ def test_large_msg_durable(self):
+ """Large durable message test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgDurable", max_size=100000, msg_durable=True, num_msgs=100,
+ msg_size=10000)
+
+ def test_large_msg_durable_recover(self):
+ """Large durable message test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgDurableRecover", max_size=100000, msg_durable=True, num_msgs=100,
+ msg_size=10000, recover=True)
+
+ def test_large_msg_browse(self):
+ """Large message browse test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgBrowse", max_size=100, browse=True, num_msgs=100, msg_size=10000)
+
+ def test_large_msg_browse_recover(self):
+ """Large message browse test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgBrowseRecover", max_size=100, browse=True, num_msgs=100, msg_size=10000,
+ recover=True)
+
+ def test_large_msg_durable_browse(self):
+ """Large durable message browse test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgDurableBrowse", max_size=100, msg_durable=True, browse=True,
+ num_msgs=100, msg_size=10000)
+
+ def test_large_msg_durable_browse_recover(self):
+ """Large durable message browse test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgDurableBrowseRecover", max_size=100, msg_durable=True, browse=True,
+ num_msgs=100, msg_size=10000, recover=True)
+
+class SimpleMaxSizeCountTest(FlowToDisk):
+ """Flow-to-disk tests based on setting both max_count and max_size at the same time"""
+
+ def test_base(self):
+ """Base test"""
+ self.simple_limit("MaxSizeMaxCount", max_count=10, max_size=1000)
+
+ def test_recover(self):
+ """Recover test"""
+ self.simple_limit("MaxSizeMaxCountRecover", max_count=10, max_size=1000, recover=True)
+
+ def test_durable(self):
+ """Durable message test"""
+ self.simple_limit("MaxSizeMaxCountDurable", max_count=10, max_size=1000, msg_size=250)
+
+ def test_durable_recover(self):
+ """Durable message recover test"""
+ self.simple_limit("MaxSizeMaxCountDurableRecover", max_count=10, max_size=1000, msg_size=250, recover=True)
+
+ def test_browse(self):
+ """Browse test"""
+ self.simple_limit("MaxSizeMaxCountBrowse", max_count=10, max_size=1000, browse=True)
+
+ def test_browse_recover(self):
+ """Browse before and after recover test"""
+ self.simple_limit("MaxSizeMaxCountBrowseRecover", max_count=10, max_size=1000, browse=True, recover=True)
+
+ def test_durable_browse(self):
+ """Browse durable message test"""
+ self.simple_limit("MaxSizeMaxCountDurableBrowse", max_count=10, max_size=1000, msg_size=250, browse=True)
+
+ def test_durable_browse_recover(self):
+ """Browse durable messages before and after recover"""
+ self.simple_limit("MaxSizeMaxCountDurableBrowseRecover", max_count=10, max_size=1000, msg_size=250, browse=True,
+ recover=True)
+
+# ======================================================================================================================
+
+class MultiQueueFlowToDisk(FlowToDisk):
+ """Tests for async store flow-to-disk involving multiple queues"""
+
+ def _multi_queue_setup(self, queue_map, broker, exchange_name, txn_produce, txn_consume, policy, exclusive = False):
+ """Create the send session and receive sessions for multi-queue scenarios"""
+ connection = broker.connect()
+ snd_session = connection.session(transactional=txn_produce)
+ addr = self.snd_addr(exchange_name, topic_flag=True, exchage_type="fanout")
+ #print "snd_addr=\"%s\"" % addr
+ sndr = snd_session.sender(addr)
+ for queue_name, queue_properties in queue_map.iteritems():
+ if "durable" in queue_properties.keys():
+ durable = queue_properties["durable"]
+ else:
+ durable = False
+ max_count = None
+ if "max_count" in queue_properties.keys():
+ max_count = queue_properties["max_count"]
+ max_size = None
+ if "max_size" in queue_properties.keys():
+ max_size = queue_properties["max_size"]
+ rcv_session = connection.session(transactional=txn_consume)
+ addr = self.rcv_addr(exchange_name, auto_create=False, link_name=queue_name, durable=durable,
+ exclusive=exclusive, ftd_count=max_count, ftd_size=max_size, policy=policy)
+ #print "rcv_addr=\"%s\"" % addr
+ rcv_session.receiver(addr)
+ return snd_session, sndr
+
+ @staticmethod
+ def _make_policy_dict(src, marker, delim=";"):
+ """Create a dictionary of key/value strings from a formatted string src of the form
+ '... marker key1=val1, key2=val2, ..., keyN=valN delimiter ...'
+ where the portion of interest starts at marker m until the following delimiter d (default: ';')."""
+ pos = src.find(marker) + len(marker)
+ res = []
+ for index in src[pos:src.find(delim, pos)].split():
+ if "=" in index:
+ res.append(index.strip(",").split("="))
+ if len(res) > 0:
+ return dict(res)
+
+ @staticmethod
+ def _make_policy_val(src, marker, delim=";"):
+ """Return a string value from a formatted string of the form '... marker val delimiter ...' where the value
+ lies between marker and delimiter d (default: ';')"""
+ pos = src.find(marker) + len(marker)
+ return src[pos:src.find(delim, pos)].strip()
+
+ @staticmethod
+ def _check_error(error_str, fail_list=None):
+ """Check a policy exception string to ensure the failure occurred on the expected queue and at the expected
+ count."""
+ if error_str.startswith("resource-limit-exceeded"):
+ fail_policy = MultiQueueFlowToDisk._make_policy_val(error_str, "type=", delim="(")
+ fail_queue_name = MultiQueueFlowToDisk._make_policy_val(error_str, "Policy exceeded on ", delim=",")
+ fail_count_dict = MultiQueueFlowToDisk._make_policy_dict(error_str, "count: ")
+ fail_size_dict = MultiQueueFlowToDisk._make_policy_dict(error_str, "size: ")
+ if fail_list == None:
+ return False # Not expected - no failure should have occurred
+ for fail in fail_list:
+ if fail_queue_name == fail["queue"]:
+ if fail_policy != fail["type"]:
+ return False
+ if (fail_count_dict != None and "count" in fail and \
+ int(fail_count_dict["current"]) != fail["count"]) \
+ or \
+ (fail_size_dict != None and "size" in fail and int(fail_size_dict["current"]) != fail["size"]):
+ return False
+ return True
+ return False
+
+ @staticmethod
+ def _check_target_capacity_exceeded_error(err, fail_list=None):
+ """Check that an error is a TargetCapacityExceeded."""
+ if not isinstance(err, TargetCapacityExceeded):
+ return False
+ return MultiQueueFlowToDisk._check_error(str(err), fail_list)
+
+ @staticmethod
+ def _check_server_error(err, txn=False):
+ """Check that an error is a ServerError."""
+ if not isinstance(err, ServerError):
+ return False
+ if txn and str(err).startswith("internal-error: Commit failed"):
+ return True
+ return False
+
+ @staticmethod
+ def _is_queue_durable(queue_map, index):
+ """Return true if the indexed queue is durable (indexed by queue_map.keys() or queue_map.values())"""
+ return "durable" in queue_map.values()[index] and queue_map.values()[index]["durable"]
+
+ @staticmethod
+ def _expected_msg_loss(fail_list):
+ """Examine the fail_list for expected failures and return a tuple containing the expected failure conditions"""
+ count_exp_loss = None
+ count_exp_loss_queues = None
+ size_exp_loss = None
+ size_exp_loss_queues = None
+ if fail_list != None:
+ for fail in fail_list:
+ if "count" in fail:
+ this_count = fail["count"]
+ if count_exp_loss == None:
+ count_exp_loss = this_count
+ count_exp_loss_queues = [fail["queue"]]
+ elif this_count < count_exp_loss:
+ count_exp_loss = this_count
+ count_exp_loss_queues = [fail["queue"]]
+ elif this_count == count_exp_loss:
+ count_exp_loss_queues.append(fail["queue"])
+ if "size" in fail:
+ this_size = fail["size"]
+ if size_exp_loss == None:
+ size_exp_loss = this_size
+ size_exp_loss_queues = [fail["queue"]]
+ elif this_size < size_exp_loss:
+ size_exp_loss = this_size
+ size_exp_loss_queues = [fail["queue"]]
+ elif this_size == size_exp_loss:
+ size_exp_loss_queues.append(fail["queue"])
+ return (count_exp_loss, count_exp_loss_queues, size_exp_loss, size_exp_loss_queues)
+
+ @staticmethod
+ def _expected_msg_ftd(queue_map):
+ max_count = None
+ max_size = None
+ for queue_props in queue_map.itervalues():
+ if "durable" in queue_props and queue_props["durable"]:
+ if "max_count" in queue_props and queue_props["max_count"] != None and \
+ (max_count == None or queue_props["max_count"] < max_count):
+ max_count = queue_props["max_count"]
+ if "max_size" in queue_props and queue_props["max_size"] != None and \
+ (max_size == None or queue_props["max_size"] < max_size):
+ max_size = queue_props["max_size"]
+ return (max_count, max_size)
+
+
+ def tx_multi_queue_limit(self, broker_base_name, queue_map, exchange_name, **kwargs):
+ """ Test a multi-queue case
+ queue_map = queue map where map is queue name (key) against queue args (value)
+ """
+ # Unpack args
+ msg_durable = kwargs.get("msg_durable", False)
+ num_msgs = kwargs.get("num_msgs", 15)
+ msg_size = kwargs.get("msg_size", 10)
+ txn_produce = kwargs.get("txn_produce", False)
+ txn_consume = kwargs.get("txn_consume", False)
+ browse = kwargs.get("browse", False)
+ policy = kwargs.get("policy", "flow_to_disk")
+ recover = kwargs.get("recover", False)
+ sync = kwargs.get("sync", False)
+ fail_list = kwargs.get("fail_list")
+
+ bname = self._broker_name(broker_base_name, txn_produce, txn_consume)
+ broker = self.broker(store_args(), name=bname, expect=EXPECT_EXIT_OK, log_level="debug+")
+ snd_session, sndr = self._multi_queue_setup(queue_map, broker, exchange_name, txn_produce, txn_consume, policy)
+
+ # Find expected limits
+ count_exp_loss, count_exp_loss_queues, size_exp_loss, size_exp_loss_queues = self._expected_msg_loss(fail_list)
+ max_count, max_size = self._expected_msg_ftd(queue_map)
+
+ # Send messages
+ try:
+ msgs = []
+ pre_recover_ftd_msgs = [] # msgs released before a recover
+ post_recover_ftd_msgs = [] # msgs released after a recover
+ cum_msg_size = 0
+ target_queues = []
+ for index in range(0, num_msgs):
+ msg = Message(self.make_message(index, msg_size), durable=msg_durable, id=uuid4(),
+ correlation_id="msg-%04d"%index)
+ #print "Sending msg %s" % msg.id
+ sndr.send(msg, sync=sync)
+ if msg_size != None:
+ cum_msg_size += msg_size
+ if count_exp_loss != None and index >= count_exp_loss:
+ target_queues.extend(count_exp_loss_queues)
+ if size_exp_loss != None and cum_msg_size > size_exp_loss:
+ target_queues.extend(size_exp_loss_queues)
+ if (count_exp_loss == None or index < count_exp_loss) and \
+ (size_exp_loss == None or cum_msg_size <= size_exp_loss):
+ msgs.append(msg)
+ if (max_count != None and index >= max_count) or (max_size != None and cum_msg_size > max_size):
+ pre_recover_ftd_msgs.append(msg)
+ if not sync:
+ sndr.sync()
+ if txn_produce:
+ snd_session.commit()
+ except TargetCapacityExceeded, err:
+ if not self._check_target_capacity_exceeded_error(err, fail_list):
+ raise
+ except ServerError, err:
+ msgs[:] = [] # Transaction failed, all messages lost
+ if not self._check_server_error(err, txn_produce):
+ raise
+
+ # Browse messages
+ if browse:
+ for index in range(0, len(queue_map)):
+ self.check_messages(broker, queue_map.keys()[index], msgs, browse=True)
+
+ if recover:
+ broker.terminate()
+ if msg_durable:
+ post_recover_ftd_msgs = pre_recover_ftd_msgs
+ else:
+ del msgs[:] # Transient messages will be discarded on recover
+ old_broker = broker # keep for log analysis
+ broker = self.broker(store_args(), name=bname, expect=EXPECT_EXIT_OK, log_level="debug+")
+ # Browse messages
+ if browse:
+ for index in range(0, len(queue_map)):
+ empty = not self._is_queue_durable(queue_map, index)
+ self.check_messages(broker, queue_map.keys()[index], msgs, browse=True, emtpy_flag=empty)
+
+ # Consume messages
+ for index in range(0, len(queue_map)):
+ empty_chk = txn_produce or queue_map.keys()[index] in target_queues
+ empty = recover and not self._is_queue_durable(queue_map, index)
+ self.check_messages(broker, queue_map.keys()[index], msgs, transactional=txn_consume, empty=empty_chk,
+ emtpy_flag=empty)
+
+ broker.terminate()
+
+ # Check broker logs for released messages
+ if recover:
+ if txn_produce:
+ if msg_durable:
+ self.check_msg_release_on_commit(old_broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_block_on_commit(old_broker, pre_recover_ftd_msgs)
+ else:
+ if msg_durable:
+ self.check_msg_release(old_broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_block(old_broker, pre_recover_ftd_msgs)
+ self.check_msg_release_on_recover(broker, post_recover_ftd_msgs)
+ else:
+ if txn_produce:
+ if msg_durable:
+ self.check_msg_release_on_commit(broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_block_on_commit(broker, pre_recover_ftd_msgs)
+ else:
+ if msg_durable:
+ self.check_msg_release(broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_block(broker, pre_recover_ftd_msgs)
+
+ # --- Parameterized test methods ---
+
+ def no_limit(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """No policy test"""
+ queue_map_1 = {"a%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None},
+ "b%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None} }
+ self.tx_multi_queue_limit("MultiQueue_NoLimit", queue_map_1, exchange_name="Fanout_a%02d" % num,
+ msg_durable=msg_durable, browse=browse, recover=recover, txn_produce=txn_produce,
+ txn_consume=txn_consume)
+
+ def max_count(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """Count policy test"""
+ queue_map_2 = {"c%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None},
+ "d%02d" % num : {"durable":queue_durable, "max_count":10, "max_size": None} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"d%02d" % num, "type":"reject", "count":10}]
+ self.tx_multi_queue_limit("MultiQueue_MaxCount", queue_map_2, exchange_name="Fanout_b%02d" % num,
+ msg_durable=msg_durable, browse=browse, recover=recover, fail_list=fail_list,
+ txn_produce=txn_produce, txn_consume=txn_consume)
+
+ def max_size(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """Size policy test"""
+ queue_map_3 = {"e%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None},
+ "f%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": 1000} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"f%02d" % num, "type":"reject", "size":1000}]
+ self.tx_multi_queue_limit("MultiQueue_MaxSize", queue_map_3, exchange_name="Fanout_c%02d" % num, msg_size=100,
+ msg_durable=msg_durable, browse=browse, recover=recover, fail_list=fail_list,
+ txn_produce=txn_produce, txn_consume=txn_consume)
+
+ def dual_max_count(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False,
+ txn_produce=False, txn_consume=False):
+ """Multiple count policy test"""
+ queue_map_4 = {"g%02d" % num : {"durable":queue_durable, "max_count":10, "max_size": None},
+ "h%02d" % num : {"durable":queue_durable, "max_count":8, "max_size": None} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"h%02d" % num, "type":"reject", "count":8}]
+ self.tx_multi_queue_limit("MultiQueue_DualMaxCount", queue_map_4, exchange_name="Fanout_d%02d" % num,
+ msg_durable=msg_durable, browse=browse, recover=recover, fail_list=fail_list,
+ txn_produce=txn_produce, txn_consume=txn_consume)
+
+ def dual_max_size(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """Multiple size policy test"""
+ queue_map_5 = {"i%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": 1000},
+ "j%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": 800} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"j%02d" % num, "type":"reject", "size":800}]
+ self.tx_multi_queue_limit("MultiQueue_DualMaxSize", queue_map_5, exchange_name="Fanout_e%02d" % num,
+ msg_size=100, msg_durable=msg_durable, browse=browse, recover=recover,
+ fail_list=fail_list, txn_produce=txn_produce, txn_consume=txn_consume)
+
+ def mixed_limit_1(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """Both count and size polices active with the same queue having equal probabilities of triggering the
+ policy"""
+ queue_map_6 = {"k%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None},
+ "l%02d" % num : {"durable":queue_durable, "max_count":10, "max_size": None},
+ "m%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": 1000},
+ "n%02d" % num : {"durable":queue_durable, "max_count":8, "max_size": 800} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"n%02d" % num, "type":"reject", "count":8, "size":800}]
+ self.tx_multi_queue_limit("MultiQueue_MixedLimit", queue_map_6, exchange_name="Fanout_f%02d" % num,
+ msg_size=100, msg_durable=msg_durable, browse=browse, recover=recover,
+ fail_list=fail_list, txn_produce=txn_produce, txn_consume=txn_consume)
+
+ def mixed_limit_2(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """Both count and size polices active with different queues having equal probabilities of triggering the
+ policy"""
+ queue_map_7 = {"o%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None},
+ "p%02d" % num : {"durable":queue_durable, "max_count":10, "max_size": None},
+ "q%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": 800},
+ "r%02d" % num : {"durable":queue_durable, "max_count":8, "max_size": 1000} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"q%02d" % num, "type":"reject", "size":800},
+ {"queue":"r%02d" % num, "type":"reject", "count":8,}]
+ self.tx_multi_queue_limit("MultiQueue_MixedLimit", queue_map_7, exchange_name="Fanout_g%02d" % num,
+ msg_size=100, msg_durable=msg_durable, browse=browse, recover=recover,
+ fail_list=fail_list, txn_produce=txn_produce, txn_consume=txn_consume)
+
+ # --- Non-parameterized test methods - these will be run by Python test framework ---
+
+ _num = None
+ _queue_durable = False
+ _msg_durable = False
+ _browse = False
+ _recover = False
+ _txn_produce = False
+ _txn_consume = False
+
+ def test_no_limit(self):
+ """No policy test (non-parameterized)"""
+ self.no_limit(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable, browse=self._browse,
+ recover=self._recover, txn_produce=self._txn_produce, txn_consume=self._txn_consume)
+
+ def test_max_count(self):
+ """Count policy test (non-parameterized)"""
+ self.max_count(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable, browse=self._browse,
+ recover=self._recover, txn_produce=self._txn_produce, txn_consume=self._txn_consume)
+
+ def test_max_size(self):
+ """Size policy test (non-parameterized)"""
+ self.max_size(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable, browse=self._browse,
+ recover=self._recover, txn_produce=self._txn_produce, txn_consume=self._txn_consume)
+
+ def test_dual_max_count(self):
+ """Multiple count policy test (non-parameterized)"""
+ self.dual_max_count(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable,
+ browse=self._browse, recover=self._recover, txn_produce=self._txn_produce,
+ txn_consume=self._txn_consume)
+
+ def test_dual_max_size(self):
+ """Multiple size policy test (non-parameterized)"""
+ self.dual_max_size(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable,
+ browse=self._browse, recover=self._recover, txn_produce=self._txn_produce,
+ txn_consume=self._txn_consume)
+
+ def test_mixed_limit_1(self):
+ """Both count and size polices active with the same queue having equal probabilities of triggering the
+ policy (non-parameterized)"""
+ self.mixed_limit_1(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable,
+ browse=self._browse, recover=self._recover, txn_produce=self._txn_produce,
+ txn_consume=self._txn_consume)
+
+ def test_mixed_limit_2(self):
+ """Both count and size polices active with different queues having equal probabilities of triggering the
+ policy (non-parameterized)"""
+ self.mixed_limit_2(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable,
+ browse=self._browse, recover=self._recover, txn_produce=self._txn_produce,
+ txn_consume=self._txn_consume)
+
+# --- Tests ---
+
+class MultiQueueTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues"""
+ _num = 1
+
+class MultiDurableQueueTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues"""
+ _num = 2
+ _queue_durable = True
+
+class MultiQueueDurableMsgTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues"""
+ _num = 3
+ _msg_durable = True
+
+class MultiDurableQueueDurableMsgTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues"""
+ _num = 4
+ _queue_durable = True
+ _msg_durable = True
+
+class MultiQueueBrowseTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues with messages browsed before being consumed"""
+ _num = 5
+ _browse = True
+
+class MultiDurableQueueBrowseTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues with messages browsed before being consumed"""
+ _num = 6
+ _queue_durable = True
+ _browse = True
+
+class MultiQueueDurableMsgBrowseTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues with messages browsed before being consumed"""
+ _num = 7
+ _msg_durable = True
+ _browse = True
+
+class MultiDurableQueueDurableMsgBrowseTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues with messages browsed before being consumed"""
+ _num = 8
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+
+class MultiQueueRecoverTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues and broker terminated/recovered"""
+ _num = 9
+ _recover = True
+
+class MultiDurableQueueRecoverTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues and broker terminated/recovered"""
+ _num = 10
+ _queue_durable = True
+ _recover = True
+
+class MultiQueueDurableMsgRecoverTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues and broker terminated/recovered"""
+ _num = 11
+ _msg_durable = True
+ _recover = True
+
+class MultiDurableQueueDurableMsgRecoverTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues and broker terminated/recovered"""
+ _num = 12
+ _queue_durable = True
+ _msg_durable = True
+ _recover = True
+
+class MultiQueueBrowseRecoverTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues with messages browsed before broker terminated/recovered and
+ are consumed"""
+ _num = 13
+ _browse = True
+ _recover = True
+
+class MultiDurableQueueBrowseRecoverTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues with messages browsed before broker terminated/recovered and
+ are consumed"""
+ _num = 14
+ _queue_durable = True
+ _browse = True
+ _recover = True
+
+class MultiQueueDurableMsgBrowseRecoverTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues with messages browsed before broker terminated/recovered and
+ are consumed"""
+ _num = 15
+ _msg_durable = True
+ _browse = True
+ _recover = True
+
+class MultiDurableQueueDurableMsgBrowseRecoverTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues with messages browsed before broker terminated/recovered and are
+ consumed"""
+ _num = 16
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _recover = True
+
+class MultiQueueTxPTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues under transactional produce"""
+ _num = 17
+ _txn_produce = True
+
+class MultiDurableQueueTxPTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues under transactional produce"""
+ _num = 18
+ _queue_durable = True
+ _txn_produce = True
+
+class MultiQueueDurableMsgTxPTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues under transactional produce"""
+ _num = 19
+ _msg_durable = True
+ _txn_produce = True
+
+class MultiDurableQueueDurableMsgTxPTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues under transactional produce"""
+ _num = 20
+ _queue_durable = True
+ _msg_durable = True
+ _txn_produce = True
+
+class MultiQueueBrowseTxPTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues under transactional produce with messages browsed before
+ being consumed"""
+ _num = 21
+ _browse = True
+ _txn_produce = True
+
+class MultiDurableQueueBrowseTxPTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues under transactional produce with messages browsed before
+ being consumed"""
+ _num = 22
+ _queue_durable = True
+ _browse = True
+ _txn_produce = True
+
+class MultiQueueDurableMsgBrowseTxPTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues under transactional produce with messages browsed before
+ being consumed"""
+ _num = 23
+ _msg_durable = True
+ _browse = True
+ _txn_produce = True
+
+class MultiDurableQueueDurableMsgBrowseTxPTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues under transactional produce with messages browsed before being
+ consumed"""
+ _num = 24
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _txn_produce = True
+
+class MultiQueueRecoverTxPTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues under transactional produce and broker
+ terminated/recovered"""
+ _num = 25
+ _recover = True
+ _txn_produce = True
+
+class MultiDurableQueueRecoverTxPTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues under transactional produce and broker terminated/recovered"""
+ _num = 26
+ _queue_durable = True
+ _recover = True
+ _txn_produce = True
+
+class MultiQueueDurableMsgRecoverTxPTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues under transactional produce and broker terminated/recovered"""
+ _num = 27
+ _msg_durable = True
+ _recover = True
+ _txn_produce = True
+
+class MultiDurableQueueDurableMsgRecoverTxPTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues under transactional produce and broker terminated/recovered"""
+ _num = 28
+ _queue_durable = True
+ _msg_durable = True
+ _recover = True
+ _txn_produce = True
+
+class MultiQueueBrowseRecoverTxPTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues under transactional produce with messages browsed before
+ broker terminated/recovered and are consumed"""
+ _num = 29
+ _browse = True
+ _recover = True
+ _txn_produce = True
+
+class MultiDurableQueueBrowseRecoverTxPTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues under transactional produce with messages browsed before
+ broker terminated/recovered and are consumed"""
+ _num = 30
+ _queue_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+
+class MultiQueueDurableMsgBrowseRecoverTxPTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues under transactional produce with messages browsed before
+ broker terminated/recovered and are consumed"""
+ _num = 31
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+
+class MultiDurableQueueDurableMsgBrowseRecoverTxPTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues under transactional produce with messages browsed before broker
+ terminated/recovered and are consumed"""
+ _num = 32
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+
+class MultiQueueTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues and consumed transactionally"""
+ _num = 33
+ _txn_consume = True
+
+class MultiDurableQueueTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues and consumed transactionally"""
+ _num = 34
+ _queue_durable = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues and consumed transactionally"""
+ _num = 35
+ _msg_durable = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues and consumed transactionally"""
+ _num = 36
+ _queue_durable = True
+ _msg_durable = True
+ _txn_consume = True
+
+class MultiQueueBrowseTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues with messages browsed before being consumed
+ transactionally"""
+ _num = 37
+ _browse = True
+ _txn_consume = True
+
+class MultiDurableQueueBrowseTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues with messages browsed before being consumed transactionally"""
+ _num = 38
+ _queue_durable = True
+ _browse = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgBrowseTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues with messages browsed before being consumed transactionally"""
+ _num = 39
+ _msg_durable = True
+ _browse = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgBrowseTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues with messages browsed before being consumed transactionally"""
+ _num = 40
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _txn_consume = True
+
+class MultiQueueRecoverTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues and broker terminated/recovered before being consumed
+ transactionally"""
+ _num = 41
+ _recover = True
+ _txn_consume = True
+
+class MultiDurableQueueRecoverTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues and broker terminated/recovered before being consumed
+ transactionally"""
+ _num = 42
+ _queue_durable = True
+ _recover = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgRecoverTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues and broker terminated/recovered before being consumed
+ transactionally"""
+ _num = 43
+ _msg_durable = True
+ _recover = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgRecoverTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues and broker terminated/recovered before being consumed
+ transactionally"""
+ _num = 44
+ _queue_durable = True
+ _msg_durable = True
+ _recover = True
+ _txn_consume = True
+
+class MultiQueueBrowseRecoverTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues with messages browsed before broker terminated/recovered and
+ are consumed transactionally"""
+ _num = 45
+ _browse = True
+ _recover = True
+ _txn_consume = True
+
+class MultiDurableQueueBrowseRecoverTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues with messages browsed before broker terminated/recovered and
+ are consumed transactionally"""
+ _num = 46
+ _queue_durable = True
+ _browse = True
+ _recover = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgBrowseRecoverTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues with messages browsed before broker terminated/recovered and
+ are consumed transactionally"""
+ _num = 47
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgBrowseRecoverTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues with messages browsed before broker terminated/recovered and
+ are consumed transactionally"""
+ _num = 48
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_consume = True
+
+class MultiQueueTxPTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues under transactional produce and are consumed
+ transactionally"""
+ _num = 49
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueTxPTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues under transactional produce and are consumed
+ transactionally"""
+ _num = 50
+ _queue_durable = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgTxPTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues under transactional produce and are consumed
+ transactionally"""
+ _num = 51
+ _msg_durable = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgTxPTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues under transactional produce and are consumed
+ transactionally"""
+ _num = 52
+ _queue_durable = True
+ _msg_durable = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueBrowseTxPTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues under transactional produce with messages browsed before
+ being consumed transactionally"""
+ _num = 53
+ _browse = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueBrowseTxPTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues under transactional produce with messages browsed before
+ being consumed transactionally"""
+ _num = 54
+ _queue_durable = True
+ _browse = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgBrowseTxPTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues under transactional produce with messages browsed before
+ being consumed transactionally"""
+ _num = 55
+ _msg_durable = True
+ _browse = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgBrowseTxPTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues under transactional produce with messages browsed before being
+ consumed transactionally"""
+ _num = 56
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues under transactional produce and broker
+ terminated/recovered before they are consumed transactionally"""
+ _num = 57
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues under transactional produce and broker terminated/recovered
+ before they are consumed transactionally"""
+ _num = 58
+ _queue_durable = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues under transactional produce and broker terminated/recovered
+ before they are consumed transactionally"""
+ _num = 59
+ _msg_durable = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues under transactional produce and broker terminated/recovered
+ before they are consumed transactionally"""
+ _num = 60
+ _queue_durable = True
+ _msg_durable = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueBrowseRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple transient queues under transactional produce with messages browsed before
+ broker terminated/recovered and are consumed transactionally"""
+ _num = 61
+ _browse = True
+ _recover = True
+ _txn_produce = True
+
+class MultiDurableQueueBrowseRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ """Transient messages sent to multiple durable queues under transactional produce with messages browsed before
+ broker terminated/recovered and are consumed transactionally"""
+ _num = 62
+ _queue_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgBrowseRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple transient queues under transactional produce with messages browsed before
+ broker terminated/recovered and are consumed transactionally"""
+ _num = 63
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgBrowseRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ """Durable messages sent to multiple durable queues under transactional produce with messages browsed before broker
+ terminated/recovered and are consumed transactionally"""
+ _num = 64
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+ # --- Long and randomized tests ---
+
+# def test_12_Randomized(self):
+# """Randomized flow-to-disk tests"""
+# seed = long(1000.0 * time.time())
+# print "seed=0x%x" % seed
+# random.seed(seed)
+# for index in range(0, 10):
+# self.randomLimit(index)
Copied: store/trunk/cpp/tests/python_tests/store_test.py (from rev 3905, store/trunk/cpp/tests/new_python_tests/store_test.py)
===================================================================
--- store/trunk/cpp/tests/python_tests/store_test.py (rev 0)
+++ store/trunk/cpp/tests/python_tests/store_test.py 2010-04-14 14:41:04 UTC (rev 3910)
@@ -0,0 +1,407 @@
+"""
+Copyright (c) 2008 Red Hat, Inc.
+
+This file is part of the Qpid async store library msgstore.so.
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
+ USA
+
+The GNU Lesser General Public License is available in the file COPYING.
+"""
+
+import re
+from qpid.brokertest import BrokerTest
+from qpid.messaging import Empty
+from qmf.console import Session
+
+
+def store_args():
+ """Return the broker args necessary to load the async store"""
+ assert BrokerTest.store_lib
+ return ["--load-module", BrokerTest.store_lib]
+
+class Qmf:
+ """
+ QMF functions not yet available in the new QMF API. Remove this and replace with new API when it becomes available.
+ """
+ def __init__(self, broker):
+ self.__session = Session()
+ self.__broker = self.__session.addBroker("amqp://localhost:%d"%broker.port())
+
+ def add_exchange(self, exchange_name, exchange_type, alt_exchange_name=None, passive=False, durable=False,
+ arguments = None):
+ """Add a new exchange"""
+ amqp_session = self.__broker.getAmqpSession()
+ if arguments == None:
+ arguments = {}
+ if alt_exchange_name:
+ amqp_session.exchange_declare(exchange=exchange_name, type=exchange_type,
+ alternate_exchange=alt_exchange_name, passive=passive, durable=durable,
+ arguments=arguments)
+ else:
+ amqp_session.exchange_declare(exchange=exchange_name, type=exchange_type, passive=passive, durable=durable,
+ arguments=arguments)
+
+ def add_queue(self, queue_name, alt_exchange_name=None, passive=False, durable=False, arguments = None):
+ """Add a new queue"""
+ amqp_session = self.__broker.getAmqpSession()
+ if arguments == None:
+ arguments = {}
+ if alt_exchange_name:
+ amqp_session.queue_declare(queue_name, alternate_exchange=alt_exchange_name, passive=passive,
+ durable=durable, arguments=arguments)
+ else:
+ amqp_session.queue_declare(queue_name, passive=passive, durable=durable, arguments=arguments)
+
+ def delete_queue(self, queue_name):
+ """Delete an existing queue"""
+ amqp_session = self.__broker.getAmqpSession()
+ amqp_session.queue_delete(queue_name)
+
+ def _query(self, name, _class, package, alt_exchange_name=None):
+ """Qmf query function which can optionally look for the presence of an alternate exchange name"""
+ try:
+ obj_list = self.__session.getObjects(_class=_class, _package=package)
+ found = False
+ for obj in obj_list:
+ if obj.name == name:
+ found = True
+ if alt_exchange_name != None:
+ alt_exch_list = self.__session.getObjects(_objectId=obj.altExchange)
+ if len(alt_exch_list) == 0 or alt_exch_list[0].name != alt_exchange_name:
+ return False
+ break
+ return found
+ except Exception:
+ return False
+
+
+ def query_exchange(self, exchange_name, alt_exchange_name=None):
+ """Test for the presence of an exchange, and optionally whether it has an alternate exchange set to a known
+ value."""
+ return self._query(exchange_name, "exchange", "org.apache.qpid.broker", alt_exchange_name)
+
+ def query_queue(self, queue_name, alt_exchange_name=None):
+ """Test for the presence of an exchange, and optionally whether it has an alternate exchange set to a known
+ value."""
+ return self._query(queue_name, "queue", "org.apache.qpid.broker", alt_exchange_name)
+
+ def queue_message_count(self, queue_name):
+ """Query the number of messages on a queue"""
+ queue_list = self.__session.getObjects(_class="queue", _name=queue_name)
+ if len(queue_list):
+ return queue_list[0].msgDepth
+
+ def queue_empty(self, queue_name):
+ """Check if a queue is empty (has no messages waiting)"""
+ return self.queue_message_count(queue_name) == 0
+
+
+class StoreTest(BrokerTest):
+ """
+ This subclass of BrokerTest adds some convenience test/check functions
+ """
+
+ def _chk_empty(self, queue, receiver):
+ """Check if a queue is empty (has no more messages)"""
+ try:
+ msg = receiver.fetch(timeout=0)
+ self.assert_(False, "Queue \"%s\" not empty: found message: %s" % (queue, msg))
+ except Empty:
+ pass
+
+ @staticmethod
+ def make_message(msg_count, msg_size):
+ """Make message content. Format: 'abcdef....' followed by 'msg-NNNN', where NNNN is the message count"""
+ msg = "msg-%04d" % msg_count
+ msg_len = len(msg)
+ buff = ""
+ if msg_size != None and msg_size > msg_len:
+ for index in range(0, msg_size - msg_len):
+ if index == msg_size - msg_len - 1:
+ buff += "-"
+ else:
+ buff += chr(ord('a') + (index % 26))
+ return buff + msg
+
+ # Functions for formatting address strings
+
+ @staticmethod
+ def _fmt_csv(string_list, list_braces = None):
+ """Format a list using comma-separation. Braces are optionally added."""
+ if len(string_list) == 0:
+ return ""
+ first = True
+ str_ = ""
+ if list_braces != None:
+ str_ += list_braces[0]
+ for string in string_list:
+ if string != None:
+ if first:
+ first = False
+ else:
+ str_ += ", "
+ str_ += string
+ if list_braces != None:
+ str_ += list_braces[1]
+ return str_
+
+ def _fmt_map(self, string_list):
+ """Format a map {l1, l2, l3, ...} from a string list. Each item in the list must be a formatted map
+ element('key:val')."""
+ return self._fmt_csv(string_list, list_braces="{}")
+
+ def _fmt_list(self, string_list):
+ """Format a list [l1, l2, l3, ...] from a string list."""
+ return self._fmt_csv(string_list, list_braces="[]")
+
+ def addr_fmt(self, node_name, **kwargs):
+ """Generic AMQP to new address formatter. Takes common (but not all) AMQP options and formats an address
+ string."""
+ # Get keyword args
+ node_subject = kwargs.get("node_subject")
+ create_policy = kwargs.get("create_policy")
+ delete_policy = kwargs.get("delete_policy")
+ assert_policy = kwargs.get("assert_policy")
+ mode = kwargs.get("mode")
+ link = kwargs.get("link", False)
+ link_name = kwargs.get("link_name")
+ node_type = kwargs.get("node_type")
+ durable = kwargs.get("durable", False)
+ link_reliability = kwargs.get("link_reliability")
+ x_declare_list = kwargs.get("x_declare_list", [])
+ x_bindings_list = kwargs.get("x_bindings_list", [])
+ x_subscribe_list = kwargs.get("x_subscribe_list", [])
+
+ node_flag = not link and (node_type != None or durable or len(x_declare_list) > 0 or len(x_bindings_list) > 0)
+ link_flag = link and (link_name != None or durable or link_reliability != None or len(x_declare_list) > 0 or
+ len(x_bindings_list) > 0 or len(x_subscribe_list) > 0)
+ assert not (node_flag and link_flag)
+
+ opt_str_list = []
+ if create_policy != None:
+ opt_str_list.append("create: %s" % create_policy)
+ if delete_policy != None:
+ opt_str_list.append("delete: %s" % delete_policy)
+ if assert_policy != None:
+ opt_str_list.append("assert: %s" % assert_policy)
+ if mode != None:
+ opt_str_list.append("mode: %s" % mode)
+ if node_flag or link_flag:
+ node_str_list = []
+ if link_name != None:
+ node_str_list.append("name: \"%s\"" % link_name)
+ if node_type != None:
+ node_str_list.append("type: %s" % node_type)
+ if durable:
+ node_str_list.append("durable: True")
+ if link_reliability != None:
+ node_str_list.append("reliability: %s" % link_reliability)
+ if len(x_declare_list) > 0:
+ node_str_list.append("x-declare: %s" % self._fmt_map(x_declare_list))
+ if len(x_bindings_list) > 0:
+ node_str_list.append("x-bindings: %s" % self._fmt_list(x_bindings_list))
+ if len(x_subscribe_list) > 0:
+ node_str_list.append("x-subscribe: %s" % self._fmt_map(x_subscribe_list))
+ if node_flag:
+ opt_str_list.append("node: %s" % self._fmt_map(node_str_list))
+ else:
+ opt_str_list.append("link: %s" % self._fmt_map(node_str_list))
+ addr_str = node_name
+ if node_subject != None:
+ addr_str += "/%s" % node_subject
+ if len(opt_str_list) > 0:
+ addr_str += "; %s" % self._fmt_map(opt_str_list)
+ return addr_str
+
+ def snd_addr(self, node_name, **kwargs):
+ """ Create a send (node) address"""
+ # Get keyword args
+ topic = kwargs.get("topic")
+ topic_flag = kwargs.get("topic_flag", False)
+ auto_create = kwargs.get("auto_create", True)
+ auto_delete = kwargs.get("auto_delete", False)
+ durable = kwargs.get("durable", False)
+ exclusive = kwargs.get("exclusive", False)
+ ftd_count = kwargs.get("ftd_count")
+ ftd_size = kwargs.get("ftd_size")
+ policy = kwargs.get("policy", "flow-to-disk")
+ exchage_type = kwargs.get("exchage_type")
+
+ create_policy = None
+ if auto_create:
+ create_policy = "always"
+ delete_policy = None
+ if auto_delete:
+ delete_policy = "always"
+ node_type = None
+ if topic != None or topic_flag:
+ node_type = "topic"
+ x_declare_list = ["\"exclusive\": %s" % exclusive]
+ if ftd_count != None or ftd_size != None:
+ queue_policy = ["\'qpid.policy_type\': %s" % policy]
+ if ftd_count:
+ queue_policy.append("\'qpid.max_count\': %d" % ftd_count)
+ if ftd_size:
+ queue_policy.append("\'qpid.max_size\': %d" % ftd_size)
+ x_declare_list.append("arguments: %s" % self._fmt_map(queue_policy))
+ if exchage_type != None:
+ x_declare_list.append("type: %s" % exchage_type)
+
+ return self.addr_fmt(node_name, topic=topic, create_policy=create_policy, delete_policy=delete_policy,
+ node_type=node_type, durable=durable, x_declare_list=x_declare_list)
+
+ def rcv_addr(self, node_name, **kwargs):
+ """ Create a receive (link) address"""
+ # Get keyword args
+ auto_create = kwargs.get("auto_create", True)
+ auto_delete = kwargs.get("auto_delete", False)
+ link_name = kwargs.get("link_name")
+ durable = kwargs.get("durable", False)
+ browse = kwargs.get("browse", False)
+ exclusive = kwargs.get("exclusive", False)
+ binding_list = kwargs.get("binding_list", [])
+ ftd_count = kwargs.get("ftd_count")
+ ftd_size = kwargs.get("ftd_size")
+ policy = kwargs.get("policy", "flow-to-disk")
+
+ create_policy = None
+ if auto_create:
+ create_policy = "always"
+ delete_policy = None
+ if auto_delete:
+ delete_policy = "always"
+ mode = None
+ if browse:
+ mode = "browse"
+ x_declare_list = ["\"exclusive\": %s" % exclusive]
+ if ftd_count != None or ftd_size != None:
+ queue_policy = ["\'qpid.policy_type\': %s" % policy]
+ if ftd_count:
+ queue_policy.append("\'qpid.max_count\': %d" % ftd_count)
+ if ftd_size:
+ queue_policy.append("\'qpid.max_size\': %d" % ftd_size)
+ x_declare_list.append("arguments: %s" % self._fmt_map(queue_policy))
+ x_bindings_list = []
+ for binding in binding_list:
+ x_bindings_list.append("{exchange: %s, key: %s}" % binding)
+ return self.addr_fmt(node_name, create_policy=create_policy, delete_policy=delete_policy, mode=mode, link=True,
+ link_name=link_name, durable=durable, x_declare_list=x_declare_list,
+ x_bindings_list=x_bindings_list)
+
+ def check_message(self, broker, queue, exp_msg, transactional=False, empty=False, ack=True, browse=False):
+ """Check that a message is on a queue by dequeuing it and comparing it to the expected message"""
+ return self.check_messages(broker, queue, [exp_msg], transactional, empty, ack, browse)
+
+ def check_messages(self, broker, queue, exp_msg_list, transactional=False, empty=False, ack=True, browse=False,
+ emtpy_flag=False):
+ """Check that messages is on a queue by dequeuing them and comparing them to the expected messages"""
+ if emtpy_flag:
+ num_msgs = 0
+ else:
+ num_msgs = len(exp_msg_list)
+ ssn = broker.connect().session(transactional=transactional)
+ rcvr = ssn.receiver(self.rcv_addr(queue, browse=browse), capacity=num_msgs)
+ if num_msgs > 0:
+ try:
+ recieved_msg_list = [rcvr.fetch(timeout=0) for i in range(num_msgs)]
+ except Empty:
+ self.assert_(False, "Queue \"%s\" is empty, unable to retrieve expected message %d." % (queue, i))
+ for i in range(0, len(recieved_msg_list)):
+ self.assertEqual(recieved_msg_list[i].content, exp_msg_list[i].content)
+ self.assertEqual(recieved_msg_list[i].correlation_id, exp_msg_list[i].correlation_id)
+ if empty:
+ self._chk_empty(queue, rcvr)
+ if ack:
+ ssn.acknowledge()
+ if transactional:
+ ssn.commit()
+ ssn.connection.close()
+ else:
+ if transactional:
+ ssn.commit()
+ return ssn
+
+ # Functions for finding strings in the broker log file (or other files)
+
+ @staticmethod
+ def _read_file(file_name):
+ """Returns the content of file named file_name as a string"""
+ file_handle = file(file_name)
+ try:
+ return file_handle.read()
+ finally:
+ file_handle.close()
+
+ def _get_hits(self, broker, search):
+ """Find all occurrences of the search in the broker log (eliminating possible duplicates from msgs on multiple
+ queues)"""
+ # TODO: Use sets when RHEL-4 is no longer supported
+ hits = []
+ for hit in search.findall(self._read_file(broker.log)):
+ if hit not in hits:
+ hits.append(hit)
+ return hits
+
+ def _reconsile_hits(self, broker, ftd_msgs, release_hits):
+ """Remove entries from list release_hits if they match the message id in ftd_msgs. Check for remaining
+ release_hits."""
+ for msg in ftd_msgs:
+ found = False
+ for hit in release_hits:
+ if str(msg.id) in hit:
+ release_hits.remove(hit)
+ #print "Found %s in %s" % (msg.id, broker.log)
+ found = True
+ break
+ if not found:
+ self.assert_(False, "Unable to locate released message %s in log %s" % (msg.id, broker.log))
+ if len(release_hits) > 0:
+ err = "Messages were unexpectedly released in log %s:\n" % broker.log
+ for hit in release_hits:
+ err += " %s\n" % hit
+ self.assert_(False, err)
+
+ def check_msg_release(self, broker, ftd_msgs):
+ """ Check for 'Content released' messages in broker log for messages in ftd_msgs"""
+ hits = self._get_hits(broker, re.compile("debug Message id=\"[0-9a-f-]{36}\"; pid=0x[0-9a-f]+: "
+ "Content released$", re.MULTILINE))
+ self._reconsile_hits(broker, ftd_msgs, hits)
+
+ def check_msg_release_on_commit(self, broker, ftd_msgs):
+ """ Check for 'Content released on commit' messages in broker log for messages in ftd_msgs"""
+ hits = self._get_hits(broker, re.compile("debug Message id=\"[0-9a-f-]{36}\"; pid=0x[0-9a-f]+: "
+ "Content released on commit$", re.MULTILINE))
+ self._reconsile_hits(broker, ftd_msgs, hits)
+
+ def check_msg_release_on_recover(self, broker, ftd_msgs):
+ """ Check for 'Content released after recovery' messages in broker log for messages in ftd_msgs"""
+ hits = self._get_hits(broker, re.compile("debug Message id=\"[0-9a-f-]{36}\"; pid=0x[0-9a-f]+: "
+ "Content released after recovery$", re.MULTILINE))
+ self._reconsile_hits(broker, ftd_msgs, hits)
+
+ def check_msg_block(self, broker, ftd_msgs):
+ """Check for 'Content release blocked' messages in broker log for messages in ftd_msgs"""
+ hits = self._get_hits(broker, re.compile("debug Message id=\"[0-9a-f-]{36}\"; pid=0x[0-9a-f]+: "
+ "Content release blocked$", re.MULTILINE))
+ self._reconsile_hits(broker, ftd_msgs, hits)
+
+ def check_msg_block_on_commit(self, broker, ftd_msgs):
+ """Check for 'Content release blocked' messages in broker log for messages in ftd_msgs"""
+ hits = self._get_hits(broker, re.compile("debug Message id=\"[0-9a-f-]{36}\"; pid=0x[0-9a-f]+: "
+ "Content release blocked on commit$", re.MULTILINE))
+ self._reconsile_hits(broker, ftd_msgs, hits)
+
+
Modified: store/trunk/cpp/tests/run_long_python_tests
===================================================================
--- store/trunk/cpp/tests/run_long_python_tests 2010-04-14 14:22:16 UTC (rev 3909)
+++ store/trunk/cpp/tests/run_long_python_tests 2010-04-14 14:41:04 UTC (rev 3910)
@@ -21,4 +21,4 @@
#
# The GNU Lesser General Public License is available in the file COPYING.
-./run_new_python_tests LONG_TEST
+./run_python_tests LONG_TEST
Deleted: store/trunk/cpp/tests/run_new_python_tests
===================================================================
--- store/trunk/cpp/tests/run_new_python_tests 2010-04-14 14:22:16 UTC (rev 3909)
+++ store/trunk/cpp/tests/run_new_python_tests 2010-04-14 14:41:04 UTC (rev 3910)
@@ -1,73 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2008, 2009 Red Hat, Inc.
-#
-# This file is part of the Qpid async store library msgstore.so.
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
-# USA
-#
-# The GNU Lesser General Public License is available in the file COPYING.
-
-if test -z ${QPID_DIR} ; then
- cat <<EOF
-
- =========== WARNING: PYTHON TESTS DISABLED ==============
-
- QPID_DIR not set.
-
- ===========================================================
-
-EOF
- exit
-fi
-
-. `dirname $0`/tests_env.sh
-
-echo "Running Python tests..."
-
-case $1 in
- SHORT_TEST)
- DEFAULT_PYTHON_TESTS="*.flow_to_disk.SimpleMaxSizeCountTest.test_durable_browse_recover *.flow_to_disk.MultiDurableQueueDurableMsgBrowseRecoverTxPTxCTest.test_mixed_limit_2";;
- LONG_TEST)
- DEFAULT_PYTHON_TESTS=;;
- *)
- DEFAULT_PYTHON_TESTS="*.flow_to_disk.SimpleMaxSizeCountTest.* *.flow_to_disk.MultiDurableQueueDurableMsg*.test_mixed_limit_1";;
-esac
-
-#if test -z $1; then
-# DEFAULT_PYTHON_TESTS="*.flow_to_disk.SimpleMaxSizeCountTest.* *.flow_to_disk.MultiDurableQueueDurableMsg*.test_mixed_limit_1"
-#else
-# if test x$1 == xSHORT_TEST; then
-# DEFAULT_PYTHON_TESTS="*.flow_to_disk.SimpleMaxSizeCountTest.test_durable_browse_recover *.flow_to_disk.MultiDurableQueueDurableMsgBrowseRecoverTxPTxCTest.test_mixed_limit_2"
-# else
-# DEFAULT_PYTHON_TESTS=$*
-# fi
-#fi
-
-PYTHON_TESTS=${PYTHON_TESTS:-${DEFAULT_PYTHON_TESTS}}
-
-OUTDIR=new_python_tests.tmp
-rm -rf $OUTDIR
-
-# To debug a test, add the following options to the end of the following line:
-# -v DEBUG -c qpid.messaging.io.ops [*.testName]
-${PYTHON_DIR}/qpid-python-test -m new_python_tests -I ${FAILING_PYTHON_TESTS} ${PYTHON_TESTS} -DOUTDIR=$OUTDIR
-RETCODE=$?
-
-if test x${RETCODE} != x0; then
- exit 1;
-fi
-exit 0
Copied: store/trunk/cpp/tests/run_python_tests (from rev 3905, store/trunk/cpp/tests/run_new_python_tests)
===================================================================
--- store/trunk/cpp/tests/run_python_tests (rev 0)
+++ store/trunk/cpp/tests/run_python_tests 2010-04-14 14:41:04 UTC (rev 3910)
@@ -0,0 +1,67 @@
+#!/bin/bash
+#
+# Copyright (c) 2008, 2009 Red Hat, Inc.
+#
+# This file is part of the Qpid async store library msgstore.so.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
+# USA
+#
+# The GNU Lesser General Public License is available in the file COPYING.
+
+if test -z ${QPID_DIR} ; then
+ cat <<EOF
+
+ =========== WARNING: PYTHON TESTS DISABLED ==============
+
+ QPID_DIR not set.
+
+ ===========================================================
+
+EOF
+ exit
+fi
+
+. `dirname $0`/tests_env.sh
+
+MODULENAME=python_tests
+
+echo "Running Python tests in module ${MODULENAME}..."
+
+case x$1 in
+ xSHORT_TEST)
+ DEFAULT_PYTHON_TESTS="*.client_persistence.ExchangeQueueTests.* *.flow_to_disk.SimpleMaxSizeCountTest.test_durable_browse_recover *.flow_to_disk.MultiDurableQueueDurableMsgBrowseRecoverTxPTxCTest.test_mixed_limit_2" ;;
+ xLONG_TEST)
+ DEFAULT_PYTHON_TESTS= ;;
+ x)
+ DEFAULT_PYTHON_TESTS="*.client_persistence.* *.flow_to_disk.SimpleMaxSizeCountTest.* *.flow_to_disk.MultiDurableQueue*.test_mixed_limit_1 *.flow_to_disk.MultiQueue*.test_mixed_limit_1" ;;
+ *)
+ DEFAULT_PYTHON_TESTS=$1
+esac
+
+PYTHON_TESTS=${PYTHON_TESTS:-${DEFAULT_PYTHON_TESTS}}
+
+OUTDIR=${MODULENAME}.tmp
+rm -rf $OUTDIR
+
+# To debug a test, add the following options to the end of the following line:
+# -v DEBUG -c qpid.messaging.io.ops [*.testName]
+${PYTHON_DIR}/qpid-python-test -m ${MODULENAME} -I ${FAILING_PYTHON_TESTS} ${PYTHON_TESTS} -DOUTDIR=$OUTDIR
+RETCODE=$?
+
+if test x${RETCODE} != x0; then
+ exit 1;
+fi
+exit 0
Modified: store/trunk/cpp/tests/run_short_python_tests
===================================================================
--- store/trunk/cpp/tests/run_short_python_tests 2010-04-14 14:22:16 UTC (rev 3909)
+++ store/trunk/cpp/tests/run_short_python_tests 2010-04-14 14:41:04 UTC (rev 3910)
@@ -21,4 +21,4 @@
#
# The GNU Lesser General Public License is available in the file COPYING.
-./run_new_python_tests SHORT_TEST
+./run_python_tests SHORT_TEST
14 years, 8 months
rhmessaging commits: r3909 - store/branches/java/0.5.x-dev/bin.
by rhmessaging-commits@lists.jboss.org
Author: ritchiem
Date: 2010-04-14 10:22:16 -0400 (Wed, 14 Apr 2010)
New Revision: 3909
Modified:
store/branches/java/0.5.x-dev/bin/backup.sh
Log:
Updated bin/backup.sh to use dirname rather than readlink.
Modified: store/branches/java/0.5.x-dev/bin/backup.sh
===================================================================
--- store/branches/java/0.5.x-dev/bin/backup.sh 2010-04-14 13:58:46 UTC (rev 3908)
+++ store/branches/java/0.5.x-dev/bin/backup.sh 2010-04-14 14:22:16 UTC (rev 3909)
@@ -27,10 +27,10 @@
fi
done
+WHEREAMI=`dirname $0`
if [ -z "$QPID_HOME" ]; then
- export QPID_HOME=$(dirname $(dirname $(readlink -f $0)))
+ export QPID_HOME=`cd $WHEREAMI/../ && pwd`
fi
-
VERSION=0.6
LIBS=$QPID_HOME/lib/qpid-bdbtools-$VERSION.jar:$QPID_HOME/lib/je-3.3.62.jar:$QPID_HOME/lib/qpid-bdbstore-$VERSION.jar:$QPID_HOME/lib/qpid-all.jar
14 years, 8 months
rhmessaging commits: r3908 - in store/branches/java/0.5.x-dev: bin and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: ritchiem
Date: 2010-04-14 09:58:46 -0400 (Wed, 14 Apr 2010)
New Revision: 3908
Modified:
store/branches/java/0.5.x-dev/bin/backup.sh
store/branches/java/0.5.x-dev/build.xml
Log:
Update build system to include backup-log4j.xml and update backup.sh to use the standard VERSION=X convention to setup the libs.
Modified: store/branches/java/0.5.x-dev/bin/backup.sh
===================================================================
--- store/branches/java/0.5.x-dev/bin/backup.sh 2010-04-14 12:35:29 UTC (rev 3907)
+++ store/branches/java/0.5.x-dev/bin/backup.sh 2010-04-14 13:58:46 UTC (rev 3908)
@@ -1,4 +1,22 @@
#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
# Parse arguments taking all - prefixed args as JAVA_OPTS
for arg in "$@"; do
@@ -11,14 +29,12 @@
if [ -z "$QPID_HOME" ]; then
export QPID_HOME=$(dirname $(dirname $(readlink -f $0)))
- export PATH=${PATH}:${QPID_HOME}/bin
fi
-if [ -z "$BDB_HOME" ]; then
- export BDB_HOME=$(dirname $(dirname $(readlink -f $0)))
-fi
+VERSION=0.6
-LIBS=$BDB_HOME/lib/qpid-bdbtools-M3.jar:$BDB_HOME/lib/je-3.3.62.jar:$BDB_HOME/lib/qpid-bdbstore-M3.jar:$QPID_HOME/lib/qpid-incubating.jar
+LIBS=$QPID_HOME/lib/qpid-bdbtools-$VERSION.jar:$QPID_HOME/lib/je-3.3.62.jar:$QPID_HOME/lib/qpid-bdbstore-$VERSION.jar:$QPID_HOME/lib/qpid-all.jar
+
echo "Starting Hot Backup Script"
java -Dlog4j.configuration=backup-log4j.xml ${JAVA_OPTS} -cp $LIBS org.apache.qpid.server.store.berkeleydb.BDBBackup ${ARGS}
Modified: store/branches/java/0.5.x-dev/build.xml
===================================================================
--- store/branches/java/0.5.x-dev/build.xml 2010-04-14 12:35:29 UTC (rev 3907)
+++ store/branches/java/0.5.x-dev/build.xml 2010-04-14 13:58:46 UTC (rev 3908)
@@ -7,6 +7,7 @@
<property name="project.version" value="0.5"/>
<property name="src.tools.dir" location="src/tools/java"/>
<property name="src.main.dir" location="src/main/java"/>
+ <property name="src.resources.dir" location="src/resources"/>
<property name="src.test.dir" location="src/test/java"/>
<property name="build.dir" location="build"/>
<property name="lib.dir" location="lib"/>
@@ -99,7 +100,10 @@
</target>
<target name="jar" depends="build">
- <jar destfile="${bdbstore.jar}" basedir="${build.classes}"/>
+ <jar destfile="${bdbstore.jar}">
+ <fileset dir="${build.classes}"/>
+ <fileset dir="${src.resources.dir}"/>
+ </jar>
<jar destfile="${bdbtools.jar}" basedir="${build.tools.classes}"/>
</target>
14 years, 8 months
rhmessaging commits: r3907 - in mgmt/newdata/cumin/python/cumin: messaging and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2010-04-14 08:35:29 -0400 (Wed, 14 Apr 2010)
New Revision: 3907
Modified:
mgmt/newdata/cumin/python/cumin/main.py
mgmt/newdata/cumin/python/cumin/messaging/queue.py
Log:
Added TopQueueTable back into the main Overview tab
Modified: mgmt/newdata/cumin/python/cumin/main.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/main.py 2010-04-14 12:33:15 UTC (rev 3906)
+++ mgmt/newdata/cumin/python/cumin/main.py 2010-04-14 12:35:29 UTC (rev 3907)
@@ -14,6 +14,7 @@
from config import *
from model import *
+from objectselector import *
from objecttask import *
from sqladapter import *
from table import *
@@ -168,6 +169,9 @@
def __init__(self, app, name):
super(OverviewView, self).__init__(app, name)
+ queues = TopQueueTable(app, "queues")
+ self.add_child(queues)
+
# XXX
#queues = messaging.queue.TopQueueSet(app, "queues")
@@ -188,3 +192,157 @@
class ConfigurationNotice(Widget):
pass
+
+class TopQueueAdapter(SqlAdapter):
+ def __init__(self, app, cls):
+ super(TopQueueAdapter, self).__init__(app, cls.sql_table)
+ self.cls = cls
+
+ """
+select
+ "Queue"."name",
+ ((sum("Queue"."msgTotalEnqueues") -
+ sum(s."msgTotalEnqueues")) / (count(1)-1)) / 30 as avg_60,
+ "Vhost"."_brokerRef_id",
+ "Queue"."_id"
+from "org.apache.qpid.broker"."Queue"
+inner join (
+ select
+ "Queue_samples"."_parent_id",
+ "Queue_samples"."msgTotalEnqueues"
+ from "org.apache.qpid.broker"."Queue_samples"
+ where "Queue_samples"."_qmf_update_time" >= now() - interval '60 seconds') as s on "Queue"."_id" = s._parent_id
+inner join "org.apache.qpid.broker"."Vhost" on "Queue"."_vhostRef_id" = "Vhost"."_id"
+group by "Queue".name, "Queue"._id, "Vhost"."_brokerRef_id" having count(1) > 1
+order by avg_60 desc
+limit 5 offset 0
+ """
+
+ def init(self):
+ super(TopQueueAdapter, self).init()
+
+ name_col = self.table._columns_by_name["name"]
+ avg_over_last_60_seconds_col = """((sum("Queue"."msgTotalEnqueues") -
+ sum(s."msgTotalEnqueues")) / (count(1)-1)) / 30 as avg_60"""
+ queue_id_col = self.table._columns_by_name["_id"]
+ vhostRef_col = self.table._columns_by_name["_vhostRef_id"]
+ vhost_table = self.app.rosemary.org_apache_qpid_broker.Vhost.sql_table
+ vhost_id_col = vhost_table._columns_by_name["_id"]
+ vhost_brokerRef_col = vhost_table._columns_by_name["_brokerRef_id"]
+
+ self.columns.append(name_col)
+ self.columns.append(avg_over_last_60_seconds_col)
+ self.columns.append(vhost_brokerRef_col)
+ self.columns.append(queue_id_col)
+
+ sub_query = "(%s) as s" % self.get_sub_query_text()
+ SqlInnerJoin(self.query, sub_query,
+ queue_id_col, "s._parent_id")
+
+ SqlInnerJoin(self.query, vhost_table,
+ vhostRef_col, vhost_id_col)
+
+ def get_sub_query_text(self):
+ samples_table = self.cls.sql_samples_table
+ subquery = SqlQuery(samples_table)
+ parent_col = samples_table._columns_by_name["_parent_id"]
+ updated_col = samples_table._columns_by_name["_qmf_update_time"]
+ enqueues_col = samples_table._columns_by_name["msgTotalEnqueues"]
+
+ when = "now() - interval '60 seconds'"
+ SqlComparisonFilter(subquery, updated_col, when, ">=")
+
+ columns = list()
+ columns.append(parent_col)
+ columns.append(enqueues_col)
+
+ return subquery.emit(columns)
+
+ def get_data(self, values, options):
+ options.sort_column = "avg_60"
+ options.sort_ascending = False
+
+ options.group_column = "%s, %s, %s" % ("\"Queue\".name", "\"Queue\"._id", "\"Vhost\".\"_brokerRef_id\"")
+ having = SqlComparisonFilter(None, "count(1)", "1", ">")
+ options.group_having.append(having)
+
+ try:
+ data = super(TopQueueAdapter, self).get_data(values, options)
+ except:
+ data = []
+ return data
+
+ def get_sql_options(self, options):
+ return options
+
+class TopQueueTable(DataTable, Form):
+ def __init__(self, app, name):
+ cls = app.rosemary.org_apache_qpid_broker.Queue
+ adapter = TopQueueAdapter(app, cls)
+
+ super(TopQueueTable, self).__init__(app, name, adapter)
+
+ col = self.Name(app, "name")
+ self.add_column(col)
+
+ col = self.MsgEnqueuesColumn(app, cls.msgTotalEnqueues.name,
+ cls.msgTotalEnqueues)
+ self.add_column(col)
+
+ self.header = TopTableHeader(app, "header")
+ self.replace_child(self.header)
+
+ self.footer = TopTableFooter(app, "footer")
+ self.replace_child(self.footer)
+
+ self.update_enabled = True
+
+ def get_data_options(self, session):
+ options = SqlQueryOptions()
+
+ options.limit = 5
+ options.offset = 0
+
+ return options
+
+ def get_count(self, session):
+ # avoid extra sql call since we don't show the record count
+ return 0
+
+ class MsgEnqueuesColumn(DataTableColumn):
+ def render_header_content(self, session):
+ return "Recent Enqueues / sec"
+
+ def render_cell_content(self, session, record):
+ return "%.1f" % float(record[1])
+
+ def render_text_align(self, session):
+ # type is str: "count64"
+ return "right"
+
+ class Name(LinkColumn):
+ def render_header_content(self, session):
+ return "Name"
+
+ def render_cell_href(self, session, record):
+ branch = session.branch()
+
+ self.page.main.messaging.broker.id.set(branch, record[2])
+ self.page.main.messaging.broker.queue.id.set(branch, record[3])
+ self.page.main.messaging.broker.queue.view.show(branch)
+ return branch.marshal()
+
+ def render_cell_content(self, session, record):
+ return record[0]
+
+class TopTableHeader(TableHeader):
+ def __init__(self, app, name):
+ super(TopTableHeader, self).__init__(app, name)
+
+ self.font = Attribute(app, "font")
+ self.font.default = 0.9
+ self.add_attribute(self.font)
+
+class TopTableFooter(Widget):
+ def render(self, session):
+ return ""
\ No newline at end of file
Modified: mgmt/newdata/cumin/python/cumin/messaging/queue.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/messaging/queue.py 2010-04-14 12:33:15 UTC (rev 3906)
+++ mgmt/newdata/cumin/python/cumin/messaging/queue.py 2010-04-14 12:35:29 UTC (rev 3907)
@@ -667,36 +667,3 @@
self.task.invoke(session, queue, dest_queue, count)
self.task.exit_with_redirect(session, queue)
-class TopQueueSet(TopTable):
- def __init__(self, app, name):
- super(TopQueueSet, self).__init__(app, name)
-
- col = self.NameColumn(app, "name")
- col.width = "60%"
- self.add_column(col)
-
- self.set_default_column(col)
-
- col = self.EnqueuesColumn(app, "enqueues")
- col.width = "35%"
- col.align = "right"
- self.add_column(col)
-
- class NameColumn(TopTableColumn):
- def render_title(self, session):
- return "Name"
-
- def render_content(self, session, data):
- broker = Identifiable(data["broker_id"])
- queue = Identifiable(data["id"])
-
- branch = session.branch()
- self.page.main.messaging.broker.object.set(branch, broker)
- self.page.main.messaging.broker.queue.object.set(branch, queue)
- self.page.main.messaging.broker.queue.show(branch)
- return fmt_link \
- (branch.marshal(), data["name"], link_title=data["name"])
-
- class EnqueuesColumn(TopTableColumn):
- def render_title(self, session):
- return "Recent Enqueues"
14 years, 8 months
rhmessaging commits: r3906 - mgmt/newdata/rosemary/python/rosemary.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2010-04-14 08:33:15 -0400 (Wed, 14 Apr 2010)
New Revision: 3906
Modified:
mgmt/newdata/rosemary/python/rosemary/sqlquery.py
Log:
Added optional HAVING clause to GROUP BY
Modified: mgmt/newdata/rosemary/python/rosemary/sqlquery.py
===================================================================
--- mgmt/newdata/rosemary/python/rosemary/sqlquery.py 2010-04-13 17:30:22 UTC (rev 3905)
+++ mgmt/newdata/rosemary/python/rosemary/sqlquery.py 2010-04-14 12:33:15 UTC (rev 3906)
@@ -45,7 +45,7 @@
if options:
if options.group_column:
- tokens.append(self.group_by.emit(options.group_column))
+ tokens.append(self.group_by.emit(options.group_column, options.group_having))
if options.sort_column:
tokens.append(self.order_by.emit(options.sort_column,
@@ -74,14 +74,20 @@
return "limit %s offset %i" % (str(limit), offset)
class GroupBy(object):
- def emit(self, column):
+ def emit(self, column, filters):
+ having = ""
+ if filters:
+ f_text = list()
+ f_text.extend([x.emit() for x in filters])
+ having = " having %s" % " and ".join(f_text)
+
column = getattr(column, "identifier", column)
+ return "group by %s%s" % (column, having)
- return "group by %s" % column
-
class SqlQueryOptions(object):
def __init__(self):
self.group_column = None
+ self.group_having = list()
self.sort_column = None
self.sort_ascending = True
self.limit = None
@@ -94,7 +100,7 @@
assert table
assert this
assert that
-
+
self.query = query
self.table = getattr(table, "identifier", table)
self.this = getattr(this, "identifier", this)
14 years, 8 months
rhmessaging commits: r3905 - in store/trunk/cpp: tests and 2 other directories.
by rhmessaging-commits@lists.jboss.org
Author: kpvdr
Date: 2010-04-13 13:30:22 -0400 (Tue, 13 Apr 2010)
New Revision: 3905
Added:
store/trunk/cpp/tests/new_python_tests/flow_to_disk.py
store/trunk/cpp/tests/new_python_tests/store_test.py
store/trunk/cpp/tests/run_short_python_tests
Removed:
store/trunk/cpp/tests/old_python_tests/
store/trunk/cpp/tests/run_old_python_tests
Modified:
store/trunk/cpp/lib/MessageStoreImpl.cpp
store/trunk/cpp/tests/Makefile.am
store/trunk/cpp/tests/jrnl/jtt/Makefile.am
store/trunk/cpp/tests/new_python_tests/__init__.py
store/trunk/cpp/tests/new_python_tests/client_persistence.py
store/trunk/cpp/tests/run_long_python_tests
store/trunk/cpp/tests/run_new_python_tests
Log:
Fix for QPID-2470 - Broker does not honour flow-to-disk policy on recovery. Added new flow-to-disk tests which detect this condition. Also reorganized tests into short, regular and long tests.
Modified: store/trunk/cpp/lib/MessageStoreImpl.cpp
===================================================================
--- store/trunk/cpp/lib/MessageStoreImpl.cpp 2010-04-12 21:41:54 UTC (rev 3904)
+++ store/trunk/cpp/lib/MessageStoreImpl.cpp 2010-04-13 17:30:22 UTC (rev 3905)
@@ -1007,8 +1007,7 @@
} // switch
} // while
} catch (const journal::jexception& e) {
- THROW_STORE_EXCEPTION(std::string("Queue ") + queue->getName() +
- ": recoverMessages() failed: " + e.what());
+ THROW_STORE_EXCEPTION(std::string("Queue ") + queue->getName() + ": recoverMessages() failed: " + e.what());
}
}
Modified: store/trunk/cpp/tests/Makefile.am
===================================================================
--- store/trunk/cpp/tests/Makefile.am 2010-04-12 21:41:54 UTC (rev 3904)
+++ store/trunk/cpp/tests/Makefile.am 2010-04-13 17:30:22 UTC (rev 3905)
@@ -27,7 +27,7 @@
INCLUDES=-I$(top_srcdir)/lib -I$(top_srcdir)/lib/gen
TMP_DATA_DIR=$(abs_srcdir)/tmp_data_dir
-
+
if DO_CLUSTER_TESTS
SUBDIRS = jrnl . cluster
else
@@ -39,7 +39,6 @@
OrderingTest \
TransactionalTest \
TwoPhaseCommitTest \
- run_old_python_tests \
run_new_python_tests \
system_test.sh \
clean.sh
@@ -51,7 +50,7 @@
SHORT_TESTS = \
SimpleTest \
TransactionalTest \
- system_test.sh \
+ run_short_python_tests \
clean.sh
check_PROGRAMS = \
@@ -79,11 +78,10 @@
clean.sh \
failing_python_tests.txt \
new_python_tests \
- old_python_tests \
persistence.py \
run_long_python_tests \
- run_old_python_tests \
run_new_python_tests \
+ run_short_python_tests \
run_test \
start_broker \
stop_broker \
Modified: store/trunk/cpp/tests/jrnl/jtt/Makefile.am
===================================================================
--- store/trunk/cpp/tests/jrnl/jtt/Makefile.am 2010-04-12 21:41:54 UTC (rev 3904)
+++ store/trunk/cpp/tests/jrnl/jtt/Makefile.am 2010-04-13 17:30:22 UTC (rev 3905)
@@ -40,8 +40,9 @@
.valgrind.supp: $(top_srcdir)/tests/.valgrind.supp
cp $^ .
-TESTS = \
+LONG_TESTS = \
_ut_data_src \
+ _ut_long_data_src \
_ut_jrnl_init_params \
_ut_read_arg \
_ut_test_case \
@@ -50,9 +51,6 @@
_ut_test_case_set \
_ut_jrnl_instance
-LONG_TESTS = \
- _ut_long_data_src
-
check_PROGRAMS = jtt \
_ut_data_src \
_ut_long_data_src \
Modified: store/trunk/cpp/tests/new_python_tests/__init__.py
===================================================================
--- store/trunk/cpp/tests/new_python_tests/__init__.py 2010-04-12 21:41:54 UTC (rev 3904)
+++ store/trunk/cpp/tests/new_python_tests/__init__.py 2010-04-13 17:30:22 UTC (rev 3905)
@@ -22,3 +22,4 @@
# The GNU Lesser General Public License is available in the file COPYING.
from client_persistence import *
+from flow_to_disk import *
Modified: store/trunk/cpp/tests/new_python_tests/client_persistence.py
===================================================================
--- store/trunk/cpp/tests/new_python_tests/client_persistence.py 2010-04-12 21:41:54 UTC (rev 3904)
+++ store/trunk/cpp/tests/new_python_tests/client_persistence.py 2010-04-13 17:30:22 UTC (rev 3905)
@@ -1,223 +1,169 @@
-# Copyright (c) 2008 Red Hat, Inc.
-#
-# This file is part of the Qpid async store library msgstore.so.
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
-# USA
-#
-# The GNU Lesser General Public License is available in the file COPYING.
+"""
+Copyright (c) 2008 Red Hat, Inc.
-from qpid.brokertest import *
-from qpid.messaging import Empty, Message
-from qmf.console import Session
-
-def storeArgs():
- assert BrokerTest.store_lib
- return ["--load-module", BrokerTest.store_lib]
+This file is part of the Qpid async store library msgstore.so.
-class Qmf:
- """
- QMF functions not yet available in the new QMF API. Remove this and replace with new API when it becomes available.
- """
- def __init__(self, broker):
- self.__session = Session()
- self.__broker = self.__session.addBroker("amqp://localhost:%d"%broker.port())
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
- def addExchange(self, exchangeName, exchangeType, altExchangeName=None, passive=False, durable=False, arguments = {}):
- """Add a new exchange"""
- amqpSession = self.__broker.getAmqpSession()
- if altExchangeName:
- amqpSession.exchange_declare(exchange=exchangeName, type=exchangeType, alternate_exchange=altExchangeName, passive=passive, durable=durable, arguments=arguments)
- else:
- amqpSession.exchange_declare(exchange=exchangeName, type=exchangeType, passive=passive, durable=durable, arguments=arguments)
-
- def addQueue(self, queueName, altExchangeName=None, passive=False, durable=False, arguments = {}):
- """Add a new queue"""
- amqpSession = self.__broker.getAmqpSession()
- if altExchangeName:
- amqpSession = amqpSession.queue_declare(queueName, alternate_exchange=altExchangeName, passive=passive, durable=durable, arguments=arguments)
- else:
- amqpSession = amqpSession.queue_declare(queueName, passive=passive, durable=durable, arguments=arguments)
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
- def __query(self, name, _class, package, altExchangeName=None):
- try:
- objList = self.__session.getObjects(_class=_class, _package=package)
- found = False
- for o in objList:
- if o.name == name:
- found = True
- if altExchangeName != None:
- altExchList = self.__session.getObjects(_objectId=o.altExchange)
- if len(altExchList) == 0 or altExchList[0].name != altExchangeName: return False
- break
- return found
- except: return False
-
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
+USA
- def queryExchange(self, exchangeName, altExchangeName=None):
- """Test for the presence of an exchange, and optionally whether it has an alternate exchange set to a known value."""
- return self.__query(exchangeName, "exchange", "org.apache.qpid.broker", altExchangeName)
-
- def queryQueue(self, queueName, altExchangeName=None):
- """Test for the presence of an exchange, and optionally whether it has an alternate exchange set to a known value."""
- return self.__query(queueName, "queue", "org.apache.qpid.broker", altExchangeName)
-
- def queueMsgCount(self, queueName):
- queueList = self.__session.getObjects(_class="queue", _name=queueName)
- if len(queueList):
- return queueList[0].msgDepth
-
- def queueEmpty(self, queueName):
- return self.queueMsgCount(queueName) == 0
+The GNU Lesser General Public License is available in the file COPYING.
+"""
-
-class StoreTest(BrokerTest):
- """
- This subclass of BrokerTest adds some convenience test/check functions
- """
+from qpid.brokertest import EXPECT_EXIT_OK
+from store_test import StoreTest, Qmf, store_args
+from qpid.messaging import Message
- def __chkEmpty(self, queue, receiver):
- try:
- msg = receiver.fetch(timeout=0)
- self.assert_(False, "Queue \"%s\" not empty: found message: %s" % (queue, msg))
- except Empty: pass
-
- def chkMsg(self, broker, queue, msgChk, empty=False, ack=True):
- return self.chkMsgs(broker, queue, [msgChk], empty, ack)
-
- def chkMsgs(self, broker, queue, msgChkList, empty=False, ack=True):
- s = broker.connect().session()
- rcvr = s.receiver(queue + "; {create:always}", capacity=len(msgChkList))
- try: rmList = [rcvr.fetch(timeout=0) for i in range(len(msgChkList))]
- except Empty: self.assert_(False, "Queue \"%s\" is empty, unable to retrieve expected message %d." % (queue, i))
- for i in range(0, len(rmList)):
- self.assertEqual(rmList[i].content, msgChkList[i].content)
- self.assertEqual(rmList[i].correlation_id, msgChkList[i].correlation_id)
- if empty: self.__chkEmpty(queue, rcvr)
- if ack:
- s.acknowledge()
- s.connection.close()
- else:
- return s
-
class ExchangeQueueTests(StoreTest):
"""
Simple tests of the broker exchange and queue types
"""
- def testDirectExchange(self):
+ def test_direct_exchange(self):
"""Test Direct exchange."""
- broker = self.broker(storeArgs(), name="testDirectExchange", expect=EXPECT_EXIT_OK)
- m1 = Message("A_Message1", durable=True, correlation_id="Msg0001")
- m2 = Message("B_Message1", durable=True, correlation_id="Msg0002")
- broker.send_message("a", m1)
- broker.send_message("b", m2)
+ broker = self.broker(store_args(), name="testDirectExchange", expect=EXPECT_EXIT_OK)
+ msg1 = Message("A_Message1", durable=True, correlation_id="Msg0001")
+ msg2 = Message("B_Message1", durable=True, correlation_id="Msg0002")
+ broker.send_message("a", msg1)
+ broker.send_message("b", msg2)
broker.terminate()
- broker = self.broker(storeArgs(), name="testDirectExchange")
- self.chkMsg(broker, "a", m1, True)
- self.chkMsg(broker, "b", m2, True)
+ broker = self.broker(store_args(), name="testDirectExchange")
+ self.check_message(broker, "a", msg1, True)
+ self.check_message(broker, "b", msg2, True)
- def testTopicExchange(self):
+ def test_topic_exchange(self):
"""Test Topic exchange."""
- broker = self.broker(storeArgs(), name="testTopicExchange", expect=EXPECT_EXIT_OK)
- s = broker.connect().session()
- snd1 = s.sender("abc/key1; {create:always, node-properties:{durable:True, type:topic}}")
- snd2 = s.sender("abc/key2; {create:always, node-properties:{durable:True, type:topic}}")
- s.receiver("a; {create:always, node-properties:{durable:True, x-properties:{bindings:['abc/key1']}}}")
- s.receiver("b; {create:always, node-properties:{durable:True, x-properties:{bindings:['abc/key1']}}}")
- s.receiver("c; {create:always, node-properties:{durable:True, x-properties:{bindings:['abc/key1']}}}")
- m1 = Message("Message1", durable=True, correlation_id="Msg0003")
- snd1.send(m1)
- m2 = Message("Message2", durable=True, correlation_id="Msg0004")
- snd2.send(m2)
- s.connection.close()
+ broker = self.broker(store_args(), name="testTopicExchange", expect=EXPECT_EXIT_OK)
+ ssn = broker.connect().session()
+ snd1 = ssn.sender("abc/key1; {create:always, node:{type:topic, durable:True}}")
+ snd2 = ssn.sender("abc/key2; {create:always, node:{type:topic, durable:True}}")
+ ssn.receiver("a; {create:always, link:{durable:True, x-bindings:[{exchange:abc, key:key1}]}}")
+ ssn.receiver("b; {create:always, link:{durable:True, x-bindings:[{exchange:abc, key:key1}]}}")
+ ssn.receiver("c; {create:always, link:{durable:True, x-bindings:[{exchange:abc, key:key1}, "
+ "{exchange:abc, key: key2}]}}")
+ ssn.receiver("d; {create:always, link:{durable:True, x-bindings:[{exchange:abc, key:key2}]}}")
+ ssn.receiver("e; {create:always, link:{durable:True, x-bindings:[{exchange:abc, key:key2}]}}")
+ msg1 = Message("Message1", durable=True, correlation_id="Msg0003")
+ snd1.send(msg1)
+ msg2 = Message("Message2", durable=True, correlation_id="Msg0004")
+ snd2.send(msg2)
broker.terminate()
- broker = self.broker(storeArgs(), name="testTopicExchange")
- self.chkMsg(broker, "a", m1, True)
- self.chkMsg(broker, "b", m1, True)
- self.chkMsg(broker, "c", m1, True)
+ broker = self.broker(store_args(), name="testTopicExchange")
+ self.check_message(broker, "a", msg1, True)
+ self.check_message(broker, "b", msg1, True)
+ self.check_messages(broker, "c", [msg1, msg2], True)
+ self.check_message(broker, "d", msg2, True)
+ self.check_message(broker, "e", msg2, True)
- def testLVQ(self):
+ def test_lvq(self):
"""Test LVQ."""
- broker = self.broker(storeArgs(), name="testLVQ", expect=EXPECT_EXIT_OK)
+ broker = self.broker(store_args(), name="testLVQ", expect=EXPECT_EXIT_OK)
ma1 = Message("A1", durable=True, correlation_id="Msg0005", properties={"qpid.LVQ_key":"A"})
ma2 = Message("A2", durable=True, correlation_id="Msg0006", properties={"qpid.LVQ_key":"A"})
mb1 = Message("B1", durable=True, correlation_id="Msg0007", properties={"qpid.LVQ_key":"B"})
mb2 = Message("B2", durable=True, correlation_id="Msg0008", properties={"qpid.LVQ_key":"B"})
mb3 = Message("B3", durable=True, correlation_id="Msg0009", properties={"qpid.LVQ_key":"B"})
mc1 = Message("C1", durable=True, correlation_id="Msg0010", properties={"qpid.LVQ_key":"C"})
- broker.send_messages("lvq-test", [mb1, ma1, ma2, mb2, mb3, mc1], xprops="\"qpid.last_value_queue\":True")
+ broker.send_messages("lvq-test", [mb1, ma1, ma2, mb2, mb3, mc1],
+ xprops="arguments:{\"qpid.last_value_queue\":True}")
broker.terminate()
- broker = self.broker(storeArgs(), name="testLVQ", expect=EXPECT_EXIT_OK)
- s = self.chkMsgs(broker, "lvq-test", [ma2, mb3, mc1], empty=True, ack=False)
+ broker = self.broker(store_args(), name="testLVQ", expect=EXPECT_EXIT_OK)
+ ssn = self.check_messages(broker, "lvq-test", [ma2, mb3, mc1], empty=True, ack=False)
# Add more messages while subscriber is active (no replacement):
ma3 = Message("A3", durable=True, correlation_id="Msg0011", properties={"qpid.LVQ_key":"A"})
ma4 = Message("A4", durable=True, correlation_id="Msg0012", properties={"qpid.LVQ_key":"A"})
mc2 = Message("C2", durable=True, correlation_id="Msg0013", properties={"qpid.LVQ_key":"C"})
mc3 = Message("C3", durable=True, correlation_id="Msg0014", properties={"qpid.LVQ_key":"C"})
mc4 = Message("C4", durable=True, correlation_id="Msg0015", properties={"qpid.LVQ_key":"C"})
- broker.send_messages("lvq-test", [mc2, mc3, ma3, ma4, mc4], xprops="\"qpid.last_value_queue\":True", session=s)
- s.acknowledge()
- s.connection.close()
+ broker.send_messages("lvq-test", [mc2, mc3, ma3, ma4, mc4], session=ssn)
+ ssn.acknowledge()
broker.terminate()
- broker = self.broker(storeArgs(), name="testLVQ")
- self.chkMsgs(broker, "lvq-test", [mc4, ma4], True)
+ broker = self.broker(store_args(), name="testLVQ")
+ self.check_messages(broker, "lvq-test", [mc4, ma4], True)
+
+ def test_fanout_exchange(self):
+ """Test Fanout Exchange"""
+ broker = self.broker(store_args(), name="testFanout", expect=EXPECT_EXIT_OK)
+ ssn = broker.connect().session()
+ snd = ssn.sender("TestFanoutExchange; {create: always, node: {type: topic, x-declare: {type: fanout}}}")
+ ssn.receiver("TestFanoutExchange; {link: {name: \"q1\", durable: True}}")
+ ssn.receiver("TestFanoutExchange; {link: {name: \"q2\", durable: True}}")
+ ssn.receiver("TestFanoutExchange; {link: {name: \"q3\", durable: True}}")
+ msg1 = Message("Msg1", durable=True, correlation_id="Msg0001")
+ snd.send(msg1)
+ msg2 = Message("Msg2", durable=True, correlation_id="Msg0002")
+ snd.send(msg2)
+ broker.terminate()
+ broker = self.broker(store_args(), name="testFanout")
+ self.check_messages(broker, "q1", [msg1, msg2], True)
+ self.check_messages(broker, "q2", [msg1, msg2], True)
+ self.check_messages(broker, "q3", [msg1, msg2], True)
+
class AlternateExchagePropertyTests(StoreTest):
"""
Test the persistence of the Alternate Exchange property for exchanges and queues.
"""
- def testExchange(self):
+ def test_exchange(self):
"""Exchange alternate exchange property persistence test"""
- broker = self.broker(storeArgs(), name="testExchangeBroker", expect=EXPECT_EXIT_OK)
+ broker = self.broker(store_args(), name="testExchangeBroker", expect=EXPECT_EXIT_OK)
qmf = Qmf(broker)
- qmf.addExchange("altExch", "direct", durable=True) # Serves as alternate exchange instance
- qmf.addExchange("testExch", "direct", durable=True, altExchangeName="altExch")
+ qmf.add_exchange("altExch", "direct", durable=True) # Serves as alternate exchange instance
+ qmf.add_exchange("testExch", "direct", durable=True, alt_exchange_name="altExch")
broker.terminate()
- broker = self.broker(storeArgs(), name="testExchangeBroker")
+ broker = self.broker(store_args(), name="testExchangeBroker")
qmf = Qmf(broker)
- try: qmf.addExchange("altExch", "direct", passive=True)
- except Exception, e: self.fail("Alternate exchange (\"altExch\") instance not recovered: %s" % e)
- try: qmf.addExchange("testExch", "direct", passive=True)
- except Exception, e: self.fail("Test exchange (\"testExch\") instance not recovered: %s" % e)
- self.assertTrue(qmf.queryExchange("testExch", altExchangeName = "altExch"), "Alternate exchange property not found or is incorrect on exchange \"testExch\".")
+ try:
+ qmf.add_exchange("altExch", "direct", passive=True)
+ except Exception, error:
+ self.fail("Alternate exchange (\"altExch\") instance not recovered: %s" % error)
+ try:
+ qmf.add_exchange("testExch", "direct", passive=True)
+ except Exception, error:
+ self.fail("Test exchange (\"testExch\") instance not recovered: %s" % error)
+ self.assertTrue(qmf.query_exchange("testExch", alt_exchange_name = "altExch"),
+ "Alternate exchange property not found or is incorrect on exchange \"testExch\".")
- def testQueue(self):
+ def test_queue(self):
"""Queue alternate exchange property persistexchangeNamece test"""
- broker = self.broker(storeArgs(), name="testQueueBroker", expect=EXPECT_EXIT_OK)
+ broker = self.broker(store_args(), name="testQueueBroker", expect=EXPECT_EXIT_OK)
qmf = Qmf(broker)
- qmf.addExchange("altExch", "direct", durable=True) # Serves as alternate exchange instance
- qmf.addQueue("testQueue", durable=True, altExchangeName="altExch")
+ qmf.add_exchange("altExch", "direct", durable=True) # Serves as alternate exchange instance
+ qmf.add_queue("testQueue", durable=True, alt_exchange_name="altExch")
broker.terminate()
- broker = self.broker(storeArgs(), name="testQueueBroker")
+ broker = self.broker(store_args(), name="testQueueBroker")
qmf = Qmf(broker)
- try: qmf.addExchange("altExch", "direct", passive=True)
- except Exception, e: self.fail("Alternate exchange (\"altExch\") instance not recovered: %s" % e)
- try: qmf.addQueue("testQueue", passive=True)
- except Exception, e: self.fail("Test queue (\"testQueue\") instance not recovered: %s" % e)
- self.assertTrue(qmf.queryQueue("testQueue", altExchangeName = "altExch"), "Alternate exchange property not found or is incorrect on queue \"testQueue\".")
+ try:
+ qmf.add_exchange("altExch", "direct", passive=True)
+ except Exception, error:
+ self.fail("Alternate exchange (\"altExch\") instance not recovered: %s" % error)
+ try:
+ qmf.add_queue("testQueue", passive=True)
+ except Exception, error:
+ self.fail("Test queue (\"testQueue\") instance not recovered: %s" % error)
+ self.assertTrue(qmf.query_queue("testQueue", alt_exchange_name = "altExch"),
+ "Alternate exchange property not found or is incorrect on queue \"testQueue\".")
class RedeliveredTests(StoreTest):
@@ -225,16 +171,16 @@
Test the behavior of the redelivered flag in the context of persistence
"""
- def testBrokerRecovery(self):
+ def test_broker_recovery(self):
"""Test that the redelivered flag is set on messages after recovery of broker"""
- broker = self.broker(storeArgs(), name="testAfterRecover", expect=EXPECT_EXIT_OK)
- mc = "xyz"*100
- m = Message(mc, durable=True)
- broker.send_message("testQueue", m)
+ broker = self.broker(store_args(), name="testAfterRecover", expect=EXPECT_EXIT_OK)
+ msg_content = "xyz"*100
+ msg = Message(msg_content, durable=True)
+ broker.send_message("testQueue", msg)
broker.terminate()
- broker = self.broker(storeArgs(), name="testAfterRecover")
- rm = broker.get_message("testQueue")
- self.assertEqual(mc, rm.content)
- self.assertTrue(rm.redelivered)
+ broker = self.broker(store_args(), name="testAfterRecover")
+ rcv_msg = broker.get_message("testQueue")
+ self.assertEqual(msg_content, rcv_msg.content)
+ self.assertTrue(rcv_msg.redelivered)
Added: store/trunk/cpp/tests/new_python_tests/flow_to_disk.py
===================================================================
--- store/trunk/cpp/tests/new_python_tests/flow_to_disk.py (rev 0)
+++ store/trunk/cpp/tests/new_python_tests/flow_to_disk.py 2010-04-13 17:30:22 UTC (rev 3905)
@@ -0,0 +1,1127 @@
+"""
+Copyright (c) 2008 Red Hat, Inc.
+
+This file is part of the Qpid async store library msgstore.so.
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
+USA
+
+The GNU Lesser General Public License is available in the file COPYING.
+"""
+
+import qpid
+from qpid.brokertest import EXPECT_EXIT_OK, EXPECT_UNKNOWN
+from qpid.datatypes import uuid4
+from store_test import StoreTest, store_args
+from qpid.messaging import Message, SessionError, SendError
+
+class FlowToDisk(StoreTest):
+ """Tests for async store flow-to-disk"""
+
+ @staticmethod
+ def _broker_name(queue_name, txn_produce, txn_consume):
+ """Create a broker name based on the queue name and the transaction parameters"""
+ name = queue_name
+ if txn_produce:
+ name += "_TxP"
+ if txn_consume:
+ name += "_TxC"
+ return name
+
+ def _tx_simple_limit(self, queue_name, kwargs):
+ """
+ Test a simple case of message limits which will force flow-to-disk.
+ * queue_args sets a limit - either max_count and/or max_size
+ * messages are added. Some will flow to disk.
+ * Consume all messages sent.
+ * Check the broker has no messages left.
+ """
+ # Unpack args
+ txn_produce = kwargs.get("txn_produce", False)
+ txn_consume = kwargs.get("txn_consume", False)
+ recover = kwargs.get("recover", False)
+ max_count = kwargs.get("max_count")
+ max_size = kwargs.get("max_size")
+ policy = kwargs.get("policy", "flow_to_disk")
+ num_msgs = kwargs.get("num_msgs", 15)
+ msg_size = kwargs.get("msg_size", 10)
+ msg_durable = kwargs.get("msg_durable", False)
+ sync = kwargs.get("sync", False)
+ browse = kwargs.get("browse", False)
+
+ bname = self._broker_name(queue_name, txn_produce, txn_consume)
+ if recover:
+ expect = EXPECT_UNKNOWN
+ else:
+ expect = EXPECT_EXIT_OK
+ broker = self.broker(store_args(), name=bname, expect=expect, log_level="debug+")
+ prod_session = broker.connect().session(transactional=txn_produce)
+ sender = prod_session.sender(self.snd_addr(queue_name, auto_create=True, durable=True, ftd_count=max_count,
+ ftd_size=max_size, policy=policy))
+
+ # Send messages
+ msgs = []
+ pre_recover_ftd_msgs = [] # msgs released before a recover
+ post_recover_ftd_msgs = [] # msgs released after a recover
+ cum_msg_size = 0
+ for index in range(0, num_msgs):
+ msg = Message(self.make_message(index, msg_size), durable=msg_durable, id=uuid4(),
+ correlation_id="msg-%04d"%index)
+ #print "Sending msg %s" % msg.id
+ msgs.append(msg)
+ cum_msg_size += msg_size
+ if (max_count != None and index >= max_count) or (max_size != None and cum_msg_size > max_size):
+ pre_recover_ftd_msgs.append(msg)
+ sender.send(msg, sync=sync)
+ if not sync:
+ sender.sync()
+ # Close transaction (if needed)
+ if txn_produce:
+ prod_session.commit()
+
+ # Browse messages
+ if browse:
+ self.check_messages(broker, queue_name, msgs, browse=True)
+
+ if recover:
+ broker.terminate()
+ if msg_durable:
+ post_recover_ftd_msgs = pre_recover_ftd_msgs
+ else:
+ del msgs[:] # Transient messages will be discarded on recover
+ old_broker = broker # keep for log analysis
+ broker = self.broker(store_args(), name=bname, expect=EXPECT_EXIT_OK, log_level="debug+")
+
+ # Browse messages after recover
+ if browse:
+ self.check_messages(broker, queue_name, msgs, browse=True)
+
+ # Consume messages
+ self.check_messages(broker, queue_name, msgs, transactional=txn_consume, empty=True)
+ broker.terminate()
+
+ # Check broker logs for released messages
+ if recover:
+ if txn_produce:
+ self.check_msg_release_on_commit(old_broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_release(old_broker, pre_recover_ftd_msgs)
+ self.check_msg_release_on_recover(broker, post_recover_ftd_msgs)
+ else:
+ if txn_produce:
+ self.check_msg_release_on_commit(broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_release(broker, pre_recover_ftd_msgs)
+
+ def simple_limit(self, queue_name, **kwargs):
+ """Adapter for adding transactions to test"""
+ # Cycle through the produce/consume block transaction combinations
+ for index in range(0, 4):
+ kwargs["txn_produce"] = index & 1 != 0 # Transactional produce
+ kwargs["txn_consume"] = index & 2 != 0 # Transactional consume
+ self._tx_simple_limit(queue_name, kwargs)
+
+class SimpleMaxCountTest(FlowToDisk):
+ """Flow-to-disk tests based on setting max_count"""
+
+ def test_base(self):
+ """Base test"""
+ self.simple_limit("SimpleMaxCount", max_count=10)
+
+ def test_recover(self):
+ """Recover test"""
+ self.simple_limit("SimpleMaxCountRecover", max_count=10, recover=True)
+
+ def test_durable(self):
+ """Durable message test"""
+ self.simple_limit("SimpleMaxCountDurable", max_count=10, msg_durable=True)
+
+ def test_durable_recover(self):
+ """Durable message recover test"""
+ self.simple_limit("SimpleMaxCountDurableRecover", max_count=10, msg_durable=True, recover=True)
+
+ def test_browse(self):
+ """Browse test"""
+ self.simple_limit("SimpleMaxCountBrowse", max_count=10, browse=True)
+
+ def test_browse_recover(self):
+ """Browse before and after recover test"""
+ self.simple_limit("SimpleMaxCountBrowseRecover", max_count=10, browse=True, recover=True)
+
+ def test_durable_browse(self):
+ """Browse durable message test"""
+ self.simple_limit("SimpleMaxCountDurableBrowse", max_count=10, msg_durable=True, browse=True)
+
+ def test_durable_browse_recover(self):
+ """Browse durable messages before and after recover"""
+ self.simple_limit("SimpleMaxCountDurableBrowseRecover", max_count=10, msg_durable=True, browse=True,
+ recover=True)
+
+ def test_large_msg(self):
+ """Large message test"""
+ self.simple_limit("SimpleMaxCountLargeMsg", max_count=10, max_size=10000000, num_msgs=100, msg_size=10000)
+
+ def test_large_msg_recover(self):
+ """Large message test"""
+ self.simple_limit("SimpleMaxCountLargeMsgRecover", max_count=10, max_size=10000000, num_msgs=100,
+ msg_size=10000, recover=True)
+
+ def test_large_msg_durable(self):
+ """Large durable message test"""
+ self.simple_limit("SimpleMaxCountLargeMsgDurable", max_count=10, max_size=10000000, msg_durable=True,
+ num_msgs=100, msg_size=10000)
+
+ def test_large_msg_durable_recover(self):
+ """Large durable message test"""
+ self.simple_limit("SimpleMaxCountLargeMsgDurableRecover", max_count=10, max_size=10000000, msg_durable=True,
+ num_msgs=100, msg_size=10000, recover=True)
+
+ def test_large_msg_browse(self):
+ """Large message browse test"""
+ self.simple_limit("SimpleMaxCountLargeMsgBrowse", max_count=10, max_size=10000000, browse=True, num_msgs=100,
+ msg_size=10000)
+
+ def test_large_msg_browse_recover(self):
+ """Large message browse test"""
+ self.simple_limit("SimpleMaxCountLargeMsgBrowseRecover", max_count=10, max_size=10000000, browse=True,
+ num_msgs=100, msg_size=10000, recover=True)
+
+ def test_large_msg_durable_browse(self):
+ """Large durable message browse test"""
+ self.simple_limit("SimpleMaxCountLargeMsgDurableBrowse", max_count=10, max_size=10000000, msg_durable=True,
+ browse=True, num_msgs=100, msg_size=10000)
+
+ def test_large_msg_durable_browse_recover(self):
+ """Large durable message browse test"""
+ self.simple_limit("SimpleMaxCountLargeMsgDurableBrowseRecover", max_count=10, max_size=10000000,
+ msg_durable=True, browse=True, num_msgs=100, msg_size=10000, recover=True)
+
+class SimpleMaxSizeTest(FlowToDisk):
+ """Flow-to-disk tests based on setting max_size"""
+
+ def test_base(self):
+ """Base test"""
+ self.simple_limit("SimpleMaxSize", max_size=100)
+
+ def test_recover(self):
+ """Recover test"""
+ self.simple_limit("SimpleMaxSizeRecover", max_size=100, recover=True)
+
+ def test_durable(self):
+ """Durable message test"""
+ self.simple_limit("SimpleMaxSizeDurable", max_size=100, msg_durable=True)
+
+ def test_durable_recover(self):
+ """Durable message recover test"""
+ self.simple_limit("SimpleMaxSizeDurable", max_size=100, msg_durable=True, recover=True)
+
+ def test_browse(self):
+ """Browse test"""
+ self.simple_limit("SimpleMaxSizeBrowse", max_size=100, browse=True)
+
+ def test_browse_recover(self):
+ """Browse before and after recover test"""
+ self.simple_limit("SimpleMaxSizeBrowseRecover", max_size=100, browse=True, recover=True)
+
+ def test_durable_browse(self):
+ """Browse durable message test"""
+ self.simple_limit("SimpleMaxSizeDurableBrowse", max_size=100, msg_durable=True, browse=True)
+
+ def test_durable_browse_recover(self):
+ """Browse durable messages before and after recover"""
+ self.simple_limit("SimpleMaxSizeDurableBrowseRecover", max_size=100, msg_durable=True, browse=True,
+ recover=True)
+
+ def test_large_msg(self):
+ """Large message test"""
+ self.simple_limit("SimpleMaxSizeLargeMsg", max_size=100000, num_msgs=100, msg_size=10000)
+
+ def test_large_msg_recover(self):
+ """Large message test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgRecover", max_size=100000, num_msgs=100, msg_size=10000, recover=True)
+
+ def test_large_msg_durable(self):
+ """Large durable message test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgDurable", max_size=100000, msg_durable=True, num_msgs=100,
+ msg_size=10000)
+
+ def test_large_msg_durable_recover(self):
+ """Large durable message test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgDurableRecover", max_size=100000, msg_durable=True, num_msgs=100,
+ msg_size=10000, recover=True)
+
+ def test_large_msg_browse(self):
+ """Large message browse test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgBrowse", max_size=100, browse=True, num_msgs=100, msg_size=10000)
+
+ def test_large_msg_browse_recover(self):
+ """Large message browse test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgBrowseRecover", max_size=100, browse=True, num_msgs=100, msg_size=10000,
+ recover=True)
+
+ def test_large_msg_durable_browse(self):
+ """Large durable message browse test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgDurableBrowse", max_size=100, msg_durable=True, browse=True,
+ num_msgs=100, msg_size=10000)
+
+ def test_large_msg_durable_browse_recover(self):
+ """Large durable message browse test"""
+ self.simple_limit("SimpleMaxSizeLargeMsgDurableBrowseRecover", max_size=100, msg_durable=True, browse=True,
+ num_msgs=100, msg_size=10000, recover=True)
+
+class SimpleMaxSizeCountTest(FlowToDisk):
+ """Flow-to-disk tests based on setting both max_count and max_size at the same time"""
+
+ def test_base(self):
+ """Base test"""
+ self.simple_limit("MaxSizeMaxCount", max_count=10, max_size=1000)
+
+ def test_recover(self):
+ """Recover test"""
+ self.simple_limit("MaxSizeMaxCountRecover", max_count=10, max_size=1000, recover=True)
+
+ def test_durable(self):
+ """Durable message test"""
+ self.simple_limit("MaxSizeMaxCountDurable", max_count=10, max_size=1000, msg_size=250)
+
+ def test_durable_recover(self):
+ """Durable message recover test"""
+ self.simple_limit("MaxSizeMaxCountDurableRecover", max_count=10, max_size=1000, msg_size=250, recover=True)
+
+ def test_browse(self):
+ """Browse test"""
+ self.simple_limit("MaxSizeMaxCountBrowse", max_count=10, max_size=1000, browse=True)
+
+ def test_browse_recover(self):
+ """Browse before and after recover test"""
+ self.simple_limit("MaxSizeMaxCountBrowseRecover", max_count=10, max_size=1000, browse=True, recover=True)
+
+ def test_durable_browse(self):
+ """Browse durable message test"""
+ self.simple_limit("MaxSizeMaxCountDurableBrowse", max_count=10, max_size=1000, msg_size=250, browse=True)
+
+ def test_durable_browse_recover(self):
+ """Browse durable messages before and after recover"""
+ self.simple_limit("MaxSizeMaxCountDurableBrowseRecover", max_count=10, max_size=1000, msg_size=250, browse=True,
+ recover=True)
+
+# ======================================================================================================================
+
+class MultiQueueFlowToDisk(FlowToDisk):
+ """Tests for async store flow-to-disk involving multiple queues"""
+
+ def _multi_queue_setup(self, queue_map, broker, exchange_name, txn_produce, txn_consume, policy, exclusive = False):
+ """Create the send session and receive sessions for multi-queue scenarios"""
+ connection = broker.connect()
+ snd_session = connection.session(transactional=txn_produce)
+ addr = self.snd_addr(exchange_name, topic_flag=True, exchage_type="fanout")
+ #print "snd_addr=\"%s\"" % addr
+ sndr = snd_session.sender(addr)
+ for queue_name, queue_properties in queue_map.iteritems():
+ if "durable" in queue_properties.keys():
+ durable = queue_properties["durable"]
+ else:
+ durable = False
+ max_count = None
+ if "max_count" in queue_properties.keys():
+ max_count = queue_properties["max_count"]
+ max_size = None
+ if "max_size" in queue_properties.keys():
+ max_size = queue_properties["max_size"]
+ rcv_session = connection.session(transactional=txn_consume)
+ addr = self.rcv_addr(exchange_name, auto_create=False, link_name=queue_name, durable=durable,
+ exclusive=exclusive, ftd_count=max_count, ftd_size=max_size, policy=policy)
+ #print "rcv_addr=\"%s\"" % addr
+ rcv_session.receiver(addr)
+ return snd_session, sndr
+
+ @staticmethod
+ def _make_policy_dict(src, marker, delim=";"):
+ """Create a dictionary of key/value strings from a formatted string src of the form
+ '... marker key1=val1, key2=val2, ..., keyN=valN delimiter ...'
+ where the portion of interest starts at marker m until the following delimiter d (default: ';')."""
+ pos = src.find(marker) + len(marker)
+ res = []
+ for index in src[pos:src.find(delim, pos)].split():
+ if "=" in index:
+ res.append(index.strip(",").split("="))
+ if len(res) > 0:
+ return dict(res)
+
+ @staticmethod
+ def _make_policy_val(src, marker, delim=";"):
+ """Return a string value from a formatted string of the form '... marker val delimiter ...' where the value
+ lies between marker and delimiter d (default: ';')"""
+ pos = src.find(marker) + len(marker)
+ return src[pos:src.find(delim, pos)].strip()
+
+ @staticmethod
+ def _check_error(error_str, fail_list=None):
+ """Check a policy exception string to ensure the failure occurred on the expected queue and at the expected
+ count."""
+ if error_str.startswith("resource-limit-exceeded"):
+ fail_policy = MultiQueueFlowToDisk._make_policy_val(error_str, "type=", delim="(")
+ fail_queue_name = MultiQueueFlowToDisk._make_policy_val(error_str, "Policy exceeded on ", delim=",")
+ fail_count_dict = MultiQueueFlowToDisk._make_policy_dict(error_str, "count: ")
+ fail_size_dict = MultiQueueFlowToDisk._make_policy_dict(error_str, "size: ")
+ #print "+++", fail_policy, fail_queue_name, fail_count_dict, fail_size_dict
+ #print "===", fail_list
+ if fail_list == None:
+ return False # Not expected - no failure should have occurred
+ for fail in fail_list:
+ if fail_queue_name == fail["queue"]:
+ #print "<<<", fail
+ if fail_policy != fail["type"]:
+ return False
+ if (fail_count_dict != None and "count" in fail and \
+ int(fail_count_dict["current"]) != fail["count"]) \
+ or \
+ (fail_size_dict != None and "size" in fail and int(fail_size_dict["current"]) != fail["size"]):
+ return False
+ #print ">>> Failure expected"
+ return True
+ return False
+
+ @staticmethod
+ def _check_send_error(error, fail_list=None):
+ """Check that an error is a SendError which in turn contains a qpid.ops.ExecutionException."""
+ if not isinstance(error.args[0], qpid.ops.ExecutionException):
+ return False
+ if "description" not in error.args[0].args():
+ return False
+ return MultiQueueFlowToDisk._check_error(error.args[0].args()["description"], fail_list)
+
+ @staticmethod
+ def _check_session_error(error, txn=False):
+ """Check that an error is a SessionError which in turn contains a qpid.ops.ExecutionException."""
+ if not isinstance(error.args[0], qpid.ops.ExecutionException):
+ return False
+ if "description" not in error.args[0].args():
+ return False
+ if txn and error.args[0].args()["description"].startswith("internal-error: Commit failed"):
+ #print ">>> Txn commit failure: expected"
+ return True
+ return False
+
+ @staticmethod
+ def _is_queue_durable(queue_map, index):
+ """Return true if the indexed queue is durable (indexed by queue_map.keys() or queue_map.values())"""
+ return "durable" in queue_map.values()[index] and queue_map.values()[index]["durable"]
+
+ @staticmethod
+ def _expected_msg_loss(fail_list):
+ """Examine the fail_list for expected failures and return a tuple containing the expected failure conditions"""
+ count_exp_loss = None
+ count_exp_loss_queues = None
+ size_exp_loss = None
+ size_exp_loss_queues = None
+ if fail_list != None:
+ for fail in fail_list:
+ if "count" in fail:
+ this_count = fail["count"]
+ if count_exp_loss == None:
+ count_exp_loss = this_count
+ count_exp_loss_queues = [fail["queue"]]
+ elif this_count < count_exp_loss:
+ count_exp_loss = this_count
+ count_exp_loss_queues = [fail["queue"]]
+ elif this_count == count_exp_loss:
+ count_exp_loss_queues.append(fail["queue"])
+ if "size" in fail:
+ this_size = fail["size"]
+ if size_exp_loss == None:
+ size_exp_loss = this_size
+ size_exp_loss_queues = [fail["queue"]]
+ elif this_size < size_exp_loss:
+ size_exp_loss = this_size
+ size_exp_loss_queues = [fail["queue"]]
+ elif this_size == size_exp_loss:
+ size_exp_loss_queues.append(fail["queue"])
+ return (count_exp_loss, count_exp_loss_queues, size_exp_loss, size_exp_loss_queues)
+
+ @staticmethod
+ def _expected_msg_ftd(queue_map):
+ max_count = None
+ max_size = None
+ for queue_props in queue_map.itervalues():
+ if "durable" in queue_props and queue_props["durable"]:
+ if "max_count" in queue_props and queue_props["max_count"] != None and \
+ (max_count == None or queue_props["max_count"] < max_count):
+ max_count = queue_props["max_count"]
+ if "max_size" in queue_props and queue_props["max_size"] != None and \
+ (max_size == None or queue_props["max_size"] < max_size):
+ max_size = queue_props["max_size"]
+ return (max_count, max_size)
+
+
+ def tx_multi_queue_limit(self, broker_base_name, queue_map, exchange_name, **kwargs):
+ """ Test a multi-queue case
+ queue_map = queue map where map is queue name (key) against queue args (value)
+ """
+ # Unpack args
+ msg_durable = kwargs.get("msg_durable", False)
+ num_msgs = kwargs.get("num_msgs", 15)
+ msg_size = kwargs.get("msg_size", 10)
+ txn_produce = kwargs.get("txn_produce", False)
+ txn_consume = kwargs.get("txn_consume", False)
+ browse = kwargs.get("browse", False)
+ policy = kwargs.get("policy", "flow_to_disk")
+ recover = kwargs.get("recover", False)
+ sync = kwargs.get("sync", False)
+ fail_list = kwargs.get("fail_list")
+
+ bname = self._broker_name(broker_base_name, txn_produce, txn_consume)
+ broker = self.broker(store_args(), name=bname, expect=EXPECT_EXIT_OK, log_level="debug+")
+ #print "+++ Started broker %s" % bname
+ snd_session, sndr = self._multi_queue_setup(queue_map, broker, exchange_name, txn_produce, txn_consume, policy)
+
+ # Find expected limits
+ count_exp_loss, count_exp_loss_queues, size_exp_loss, size_exp_loss_queues = self._expected_msg_loss(fail_list)
+ max_count, max_size = self._expected_msg_ftd(queue_map)
+
+ # Send messages
+ try:
+ msgs = []
+ pre_recover_ftd_msgs = [] # msgs released before a recover
+ post_recover_ftd_msgs = [] # msgs released after a recover
+ cum_msg_size = 0
+ target_queues = []
+ for index in range(0, num_msgs):
+ msg = Message(self.make_message(index, msg_size), durable=msg_durable, id=uuid4(),
+ correlation_id="msg-%04d"%index)
+ #print "Sending msg %s" % msg.id
+ sndr.send(msg, sync=sync)
+ if msg_size != None:
+ cum_msg_size += msg_size
+ if count_exp_loss != None and index >= count_exp_loss:
+ target_queues.extend(count_exp_loss_queues)
+ if size_exp_loss != None and cum_msg_size > size_exp_loss:
+ target_queues.extend(size_exp_loss_queues)
+ if (count_exp_loss == None or index < count_exp_loss) and \
+ (size_exp_loss == None or cum_msg_size <= size_exp_loss):
+ msgs.append(msg)
+ if (max_count != None and index >= max_count) or (max_size != None and cum_msg_size > max_size):
+ pre_recover_ftd_msgs.append(msg)
+ if not sync:
+ sndr.sync()
+ if txn_produce:
+ snd_session.commit()
+ except SendError, error:
+ if not self._check_send_error(error, fail_list):
+ raise
+ except SessionError, error:
+ msgs[:] = [] # Transaction failed, all messages lost
+ if not self._check_session_error(error, txn_produce):
+ raise
+
+ # Browse messages
+ if browse:
+ for index in range(0, len(queue_map)):
+ self.check_messages(broker, queue_map.keys()[index], msgs, browse=True)
+
+ if recover:
+ broker.terminate()
+ #print "--- Terminated broker %s" % bname
+ if msg_durable:
+ post_recover_ftd_msgs = pre_recover_ftd_msgs
+ else:
+ del msgs[:] # Transient messages will be discarded on recover
+ old_broker = broker # keep for log analysis
+ broker = self.broker(store_args(), name=bname, expect=EXPECT_EXIT_OK, log_level="debug+")
+ #print "+++ Restarted broker %s" % bname
+ # Browse messages
+ if browse:
+ for index in range(0, len(queue_map)):
+ empty = not self._is_queue_durable(queue_map, index)
+ self.check_messages(broker, queue_map.keys()[index], msgs, browse=True, emtpy_flag=empty)
+
+ # Consume messages
+ for index in range(0, len(queue_map)):
+ empty_chk = txn_produce or queue_map.keys()[index] in target_queues
+ empty = recover and not self._is_queue_durable(queue_map, index)
+ self.check_messages(broker, queue_map.keys()[index], msgs, transactional=txn_consume, empty=empty_chk,
+ emtpy_flag=empty)
+
+ broker.terminate()
+ #print "--- Stopped broker %s" % bname
+
+ # Check broker logs for released messages
+ if recover:
+ if txn_produce:
+ if msg_durable:
+ self.check_msg_release_on_commit(old_broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_block_on_commit(old_broker, pre_recover_ftd_msgs)
+ else:
+ if msg_durable:
+ self.check_msg_release(old_broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_block(old_broker, pre_recover_ftd_msgs)
+ self.check_msg_release_on_recover(broker, post_recover_ftd_msgs)
+ else:
+ if txn_produce:
+ if msg_durable:
+ self.check_msg_release_on_commit(broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_block_on_commit(broker, pre_recover_ftd_msgs)
+ else:
+ if msg_durable:
+ self.check_msg_release(broker, pre_recover_ftd_msgs)
+ else:
+ self.check_msg_block(broker, pre_recover_ftd_msgs)
+
+# def multi_queue_limit(self, broker_name, queue_map, exchange_name, **kwargs):
+# """Adapter for adding transactions to test"""
+# # Cycle through the produce/consume block transaction combinations
+# for index in range(0, 4):
+# kwargs["txn_produce"] = index & 1 != 0 # Transactional produce
+# kwargs["txn_consume"] = index & 2 != 0 # Transactional consume
+# self._tx_multi_queue_limit(broker_name, queue_map, exchange_name, kwargs)
+
+ # --- Parameterized test methods ---
+
+ def no_limit(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """No policy test"""
+ queue_map_1 = {"a%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None},
+ "b%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None} }
+ self.tx_multi_queue_limit("MultiQueue_NoLimit", queue_map_1, exchange_name="Fanout_a%02d" % num,
+ msg_durable=msg_durable, browse=browse, recover=recover, txn_produce=txn_produce,
+ txn_consume=txn_consume)
+
+ def max_count(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """Count policy test"""
+ queue_map_2 = {"c%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None},
+ "d%02d" % num : {"durable":queue_durable, "max_count":10, "max_size": None} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"d%02d" % num, "type":"reject", "count":10}]
+ self.tx_multi_queue_limit("MultiQueue_MaxCount", queue_map_2, exchange_name="Fanout_b%02d" % num,
+ msg_durable=msg_durable, browse=browse, recover=recover, fail_list=fail_list,
+ txn_produce=txn_produce, txn_consume=txn_consume)
+
+ def max_size(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """Size policy test"""
+ queue_map_3 = {"e%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None},
+ "f%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": 1000} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"f%02d" % num, "type":"reject", "size":1000}]
+ self.tx_multi_queue_limit("MultiQueue_MaxSize", queue_map_3, exchange_name="Fanout_c%02d" % num, msg_size=100,
+ msg_durable=msg_durable, browse=browse, recover=recover, fail_list=fail_list,
+ txn_produce=txn_produce, txn_consume=txn_consume)
+
+ def dual_max_count(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False,
+ txn_produce=False, txn_consume=False):
+ """Multiple count policy test"""
+ queue_map_4 = {"g%02d" % num : {"durable":queue_durable, "max_count":10, "max_size": None},
+ "h%02d" % num : {"durable":queue_durable, "max_count":8, "max_size": None} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"h%02d" % num, "type":"reject", "count":8}]
+ self.tx_multi_queue_limit("MultiQueue_DualMaxCount", queue_map_4, exchange_name="Fanout_d%02d" % num,
+ msg_durable=msg_durable, browse=browse, recover=recover, fail_list=fail_list,
+ txn_produce=txn_produce, txn_consume=txn_consume)
+
+ def dual_max_size(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """Multiple size policy test"""
+ queue_map_5 = {"i%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": 1000},
+ "j%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": 800} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"j%02d" % num, "type":"reject", "size":800}]
+ self.tx_multi_queue_limit("MultiQueue_DualMaxSize", queue_map_5, exchange_name="Fanout_e%02d" % num,
+ msg_size=100, msg_durable=msg_durable, browse=browse, recover=recover,
+ fail_list=fail_list, txn_produce=txn_produce, txn_consume=txn_consume)
+
+ def mixed_limit_1(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """Both count and size polices active with the same queue having equal probabilities of triggering the
+ policy"""
+ queue_map_6 = {"k%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None},
+ "l%02d" % num : {"durable":queue_durable, "max_count":10, "max_size": None},
+ "m%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": 1000},
+ "n%02d" % num : {"durable":queue_durable, "max_count":8, "max_size": 800} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"n%02d" % num, "type":"reject", "count":8, "size":800}]
+ self.tx_multi_queue_limit("MultiQueue_MixedLimit", queue_map_6, exchange_name="Fanout_f%02d" % num,
+ msg_size=100, msg_durable=msg_durable, browse=browse, recover=recover,
+ fail_list=fail_list, txn_produce=txn_produce, txn_consume=txn_consume)
+
+ def mixed_limit_2(self, num, queue_durable=False, msg_durable=False, browse=False, recover=False, txn_produce=False,
+ txn_consume=False):
+ """Both count and size polices active with different queues having equal probabilities of triggering the
+ policy"""
+ queue_map_7 = {"o%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": None},
+ "p%02d" % num : {"durable":queue_durable, "max_count":10, "max_size": None},
+ "q%02d" % num : {"durable":queue_durable, "max_count":None, "max_size": 800},
+ "r%02d" % num : {"durable":queue_durable, "max_count":8, "max_size": 1000} }
+ fail_list = None
+ if not queue_durable:
+ fail_list = [{"queue":"q%02d" % num, "type":"reject", "size":800},
+ {"queue":"r%02d" % num, "type":"reject", "count":8,}]
+ self.tx_multi_queue_limit("MultiQueue_MixedLimit", queue_map_7, exchange_name="Fanout_g%02d" % num,
+ msg_size=100, msg_durable=msg_durable, browse=browse, recover=recover,
+ fail_list=fail_list, txn_produce=txn_produce, txn_consume=txn_consume)
+
+ # --- Non-parameterized test methods - these will be run by Python test framework ---
+
+ _num = None
+ _queue_durable = False
+ _msg_durable = False
+ _browse = False
+ _recover = False
+ _txn_produce = False
+ _txn_consume = False
+
+ def test_no_limit(self):
+ """No policy test (non-parameterized)"""
+ self.no_limit(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable, browse=self._browse,
+ recover=self._recover, txn_produce=self._txn_produce, txn_consume=self._txn_consume)
+
+ def test_max_count(self):
+ """Count policy test (non-parameterized)"""
+ self.max_count(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable, browse=self._browse,
+ recover=self._recover, txn_produce=self._txn_produce, txn_consume=self._txn_consume)
+
+ def test_max_size(self):
+ """Size policy test (non-parameterized)"""
+ self.max_size(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable, browse=self._browse,
+ recover=self._recover, txn_produce=self._txn_produce, txn_consume=self._txn_consume)
+
+ def test_dual_max_count(self):
+ """Multiple count policy test (non-parameterized)"""
+ self.dual_max_count(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable,
+ browse=self._browse, recover=self._recover, txn_produce=self._txn_produce,
+ txn_consume=self._txn_consume)
+
+ def test_dual_max_size(self):
+ """Multiple size policy test (non-parameterized)"""
+ self.dual_max_size(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable,
+ browse=self._browse, recover=self._recover, txn_produce=self._txn_produce,
+ txn_consume=self._txn_consume)
+
+ def test_mixed_limit_1(self):
+ """Both count and size polices active with the same queue having equal probabilities of triggering the
+ policy (non-parameterized)"""
+ self.mixed_limit_1(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable,
+ browse=self._browse, recover=self._recover, txn_produce=self._txn_produce,
+ txn_consume=self._txn_consume)
+
+ def test_mixed_limit_2(self):
+ """Both count and size polices active with different queues having equal probabilities of triggering the
+ policy (non-parameterized)"""
+ self.mixed_limit_2(self._num, queue_durable=self._queue_durable, msg_durable=self._msg_durable,
+ browse=self._browse, recover=self._recover, txn_produce=self._txn_produce,
+ txn_consume=self._txn_consume)
+
+# --- Tests ---
+
+class MultiQueueTest(MultiQueueFlowToDisk):
+ _num = 1
+
+class MultiDurableQueueTest(MultiQueueFlowToDisk):
+ _num = 2
+ _queue_durable = True
+
+class MultiQueueDurableMsgTest(MultiQueueFlowToDisk):
+ _num = 3
+ _msg_durable = True
+
+class MultiDurableQueueDurableMsgTest(MultiQueueFlowToDisk):
+ _num = 4
+ _queue_durable = True
+ _msg_durable = True
+
+class MultiQueueBrowseTest(MultiQueueFlowToDisk):
+ _num = 5
+ _browse = True
+
+class MultiDurableQueueBrowseTest(MultiQueueFlowToDisk):
+ _num = 6
+ _queue_durable = True
+ _browse = True
+
+class MultiQueueDurableMsgBrowseTest(MultiQueueFlowToDisk):
+ _num = 7
+ _msg_durable = True
+ _browse = True
+
+class MultiDurableQueueDurableMsgBrowseTest(MultiQueueFlowToDisk):
+ _num = 8
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+
+class MultiQueueRecoverTest(MultiQueueFlowToDisk):
+ _num = 9
+ _recover = True
+
+class MultiDurableQueueRecoverTest(MultiQueueFlowToDisk):
+ _num = 10
+ _queue_durable = True
+ _recover = True
+
+class MultiQueueDurableMsgRecoverTest(MultiQueueFlowToDisk):
+ _num = 11
+ _msg_durable = True
+ _recover = True
+
+class MultiDurableQueueDurableMsgRecoverTest(MultiQueueFlowToDisk):
+ _num = 12
+ _queue_durable = True
+ _msg_durable = True
+ _recover = True
+
+class MultiQueueBrowseRecoverTest(MultiQueueFlowToDisk):
+ _num = 13
+ _browse = True
+ _recover = True
+
+class MultiDurableQueueBrowseRecoverTest(MultiQueueFlowToDisk):
+ _num = 14
+ _queue_durable = True
+ _browse = True
+ _recover = True
+
+class MultiQueueDurableMsgBrowseRecoverTest(MultiQueueFlowToDisk):
+ _num = 15
+ _msg_durable = True
+ _browse = True
+ _recover = True
+
+class MultiDurableQueueDurableMsgBrowseRecoverTest(MultiQueueFlowToDisk):
+ _num = 16
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _recover = True
+
+class MultiQueueTxPTest(MultiQueueFlowToDisk):
+ _num = 17
+ _txn_produce = True
+
+class MultiDurableQueueTxPTest(MultiQueueFlowToDisk):
+ _num = 18
+ _queue_durable = True
+ _txn_produce = True
+
+class MultiQueueDurableMsgTxPTest(MultiQueueFlowToDisk):
+ _num = 19
+ _msg_durable = True
+ _txn_produce = True
+
+class MultiDurableQueueDurableMsgTxPTest(MultiQueueFlowToDisk):
+ _num = 20
+ _queue_durable = True
+ _msg_durable = True
+ _txn_produce = True
+
+class MultiQueueBrowseTxPTest(MultiQueueFlowToDisk):
+ _num = 21
+ _browse = True
+ _txn_produce = True
+
+class MultiDurableQueueBrowseTxPTest(MultiQueueFlowToDisk):
+ _num = 22
+ _queue_durable = True
+ _browse = True
+ _txn_produce = True
+
+class MultiQueueDurableMsgBrowseTxPTest(MultiQueueFlowToDisk):
+ _num = 23
+ _msg_durable = True
+ _browse = True
+ _txn_produce = True
+
+class MultiDurableQueueDurableMsgBrowseTxPTest(MultiQueueFlowToDisk):
+ _num = 24
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _txn_produce = True
+
+class MultiQueueRecoverTxPTest(MultiQueueFlowToDisk):
+ _num = 25
+ _recover = True
+ _txn_produce = True
+
+class MultiDurableQueueRecoverTxPTest(MultiQueueFlowToDisk):
+ _num = 26
+ _queue_durable = True
+ _recover = True
+ _txn_produce = True
+
+class MultiQueueDurableMsgRecoverTxPTest(MultiQueueFlowToDisk):
+ _num = 27
+ _msg_durable = True
+ _recover = True
+ _txn_produce = True
+
+class MultiDurableQueueDurableMsgRecoverTxPTest(MultiQueueFlowToDisk):
+ _num = 28
+ _queue_durable = True
+ _msg_durable = True
+ _recover = True
+ _txn_produce = True
+
+class MultiQueueBrowseRecoverTxPTest(MultiQueueFlowToDisk):
+ _num = 29
+ _browse = True
+ _recover = True
+ _txn_produce = True
+
+class MultiDurableQueueBrowseRecoverTxPTest(MultiQueueFlowToDisk):
+ _num = 30
+ _queue_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+
+class MultiQueueDurableMsgBrowseRecoverTxPTest(MultiQueueFlowToDisk):
+ _num = 31
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+
+class MultiDurableQueueDurableMsgBrowseRecoverTxPTest(MultiQueueFlowToDisk):
+ _num = 32
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+
+class MultiQueueTxCTest(MultiQueueFlowToDisk):
+ _num = 33
+ _txn_consume = True
+
+class MultiDurableQueueTxCTest(MultiQueueFlowToDisk):
+ _num = 34
+ _queue_durable = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgTxCTest(MultiQueueFlowToDisk):
+ _num = 35
+ _msg_durable = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgTxCTest(MultiQueueFlowToDisk):
+ _num = 36
+ _queue_durable = True
+ _msg_durable = True
+ _txn_consume = True
+
+class MultiQueueBrowseTxCTest(MultiQueueFlowToDisk):
+ _num = 37
+ _browse = True
+ _txn_consume = True
+
+class MultiDurableQueueBrowseTxCTest(MultiQueueFlowToDisk):
+ _num = 38
+ _queue_durable = True
+ _browse = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgBrowseTxCTest(MultiQueueFlowToDisk):
+ _num = 39
+ _msg_durable = True
+ _browse = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgBrowseTxCTest(MultiQueueFlowToDisk):
+ _num = 40
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _txn_consume = True
+
+class MultiQueueRecoverTxCTest(MultiQueueFlowToDisk):
+ _num = 41
+ _recover = True
+ _txn_consume = True
+
+class MultiDurableQueueRecoverTxCTest(MultiQueueFlowToDisk):
+ _num = 42
+ _queue_durable = True
+ _recover = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgRecoverTxCTest(MultiQueueFlowToDisk):
+ _num = 43
+ _msg_durable = True
+ _recover = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgRecoverTxCTest(MultiQueueFlowToDisk):
+ _num = 44
+ _queue_durable = True
+ _msg_durable = True
+ _recover = True
+ _txn_consume = True
+
+class MultiQueueBrowseRecoverTxCTest(MultiQueueFlowToDisk):
+ _num = 45
+ _browse = True
+ _recover = True
+ _txn_consume = True
+
+class MultiDurableQueueBrowseRecoverTxCTest(MultiQueueFlowToDisk):
+ _num = 46
+ _queue_durable = True
+ _browse = True
+ _recover = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgBrowseRecoverTxCTest(MultiQueueFlowToDisk):
+ _num = 47
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgBrowseRecoverTxCTest(MultiQueueFlowToDisk):
+ _num = 48
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_consume = True
+
+class MultiQueueTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 49
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 50
+ _queue_durable = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 51
+ _msg_durable = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 52
+ _queue_durable = True
+ _msg_durable = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueBrowseTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 53
+ _browse = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueBrowseTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 54
+ _queue_durable = True
+ _browse = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgBrowseTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 55
+ _msg_durable = True
+ _browse = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgBrowseTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 56
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 57
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 58
+ _queue_durable = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 59
+ _msg_durable = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 60
+ _queue_durable = True
+ _msg_durable = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueBrowseRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 61
+ _browse = True
+ _recover = True
+ _txn_produce = True
+
+class MultiDurableQueueBrowseRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 62
+ _queue_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiQueueDurableMsgBrowseRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 63
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+class MultiDurableQueueDurableMsgBrowseRecoverTxPTxCTest(MultiQueueFlowToDisk):
+ _num = 64
+ _queue_durable = True
+ _msg_durable = True
+ _browse = True
+ _recover = True
+ _txn_produce = True
+ _txn_consume = True
+
+ # --- Long and randomized tests ---
+
+# def test_12_Randomized(self):
+# """Randomized flow-to-disk tests"""
+# seed = long(1000.0 * time.time())
+# print "seed=0x%x" % seed
+# random.seed(seed)
+# for index in range(0, 10):
+# self.randomLimit(index)
Added: store/trunk/cpp/tests/new_python_tests/store_test.py
===================================================================
--- store/trunk/cpp/tests/new_python_tests/store_test.py (rev 0)
+++ store/trunk/cpp/tests/new_python_tests/store_test.py 2010-04-13 17:30:22 UTC (rev 3905)
@@ -0,0 +1,407 @@
+"""
+Copyright (c) 2008 Red Hat, Inc.
+
+This file is part of the Qpid async store library msgstore.so.
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
+ USA
+
+The GNU Lesser General Public License is available in the file COPYING.
+"""
+
+import re
+from qpid.brokertest import BrokerTest
+from qpid.messaging import Empty
+from qmf.console import Session
+
+
+def store_args():
+ """Return the broker args necessary to load the async store"""
+ assert BrokerTest.store_lib
+ return ["--load-module", BrokerTest.store_lib]
+
+class Qmf:
+ """
+ QMF functions not yet available in the new QMF API. Remove this and replace with new API when it becomes available.
+ """
+ def __init__(self, broker):
+ self.__session = Session()
+ self.__broker = self.__session.addBroker("amqp://localhost:%d"%broker.port())
+
+ def add_exchange(self, exchange_name, exchange_type, alt_exchange_name=None, passive=False, durable=False,
+ arguments = None):
+ """Add a new exchange"""
+ amqp_session = self.__broker.getAmqpSession()
+ if arguments == None:
+ arguments = {}
+ if alt_exchange_name:
+ amqp_session.exchange_declare(exchange=exchange_name, type=exchange_type,
+ alternate_exchange=alt_exchange_name, passive=passive, durable=durable,
+ arguments=arguments)
+ else:
+ amqp_session.exchange_declare(exchange=exchange_name, type=exchange_type, passive=passive, durable=durable,
+ arguments=arguments)
+
+ def add_queue(self, queue_name, alt_exchange_name=None, passive=False, durable=False, arguments = None):
+ """Add a new queue"""
+ amqp_session = self.__broker.getAmqpSession()
+ if arguments == None:
+ arguments = {}
+ if alt_exchange_name:
+ amqp_session.queue_declare(queue_name, alternate_exchange=alt_exchange_name, passive=passive,
+ durable=durable, arguments=arguments)
+ else:
+ amqp_session.queue_declare(queue_name, passive=passive, durable=durable, arguments=arguments)
+
+ def delete_queue(self, queue_name):
+ """Delete an existing queue"""
+ amqp_session = self.__broker.getAmqpSession()
+ amqp_session.queue_delete(queue_name)
+
+ def _query(self, name, _class, package, alt_exchange_name=None):
+ """Qmf query function which can optionally look for the presence of an alternate exchange name"""
+ try:
+ obj_list = self.__session.getObjects(_class=_class, _package=package)
+ found = False
+ for obj in obj_list:
+ if obj.name == name:
+ found = True
+ if alt_exchange_name != None:
+ alt_exch_list = self.__session.getObjects(_objectId=obj.altExchange)
+ if len(alt_exch_list) == 0 or alt_exch_list[0].name != alt_exchange_name:
+ return False
+ break
+ return found
+ except Exception:
+ return False
+
+
+ def query_exchange(self, exchange_name, alt_exchange_name=None):
+ """Test for the presence of an exchange, and optionally whether it has an alternate exchange set to a known
+ value."""
+ return self._query(exchange_name, "exchange", "org.apache.qpid.broker", alt_exchange_name)
+
+ def query_queue(self, queue_name, alt_exchange_name=None):
+ """Test for the presence of an exchange, and optionally whether it has an alternate exchange set to a known
+ value."""
+ return self._query(queue_name, "queue", "org.apache.qpid.broker", alt_exchange_name)
+
+ def queue_message_count(self, queue_name):
+ """Query the number of messages on a queue"""
+ queue_list = self.__session.getObjects(_class="queue", _name=queue_name)
+ if len(queue_list):
+ return queue_list[0].msgDepth
+
+ def queue_empty(self, queue_name):
+ """Check if a queue is empty (has no messages waiting)"""
+ return self.queue_message_count(queue_name) == 0
+
+
+class StoreTest(BrokerTest):
+ """
+ This subclass of BrokerTest adds some convenience test/check functions
+ """
+
+ def _chk_empty(self, queue, receiver):
+ """Check if a queue is empty (has no more messages)"""
+ try:
+ msg = receiver.fetch(timeout=0)
+ self.assert_(False, "Queue \"%s\" not empty: found message: %s" % (queue, msg))
+ except Empty:
+ pass
+
+ @staticmethod
+ def make_message(msg_count, msg_size):
+ """Make message content. Format: 'abcdef....' followed by 'msg-NNNN', where NNNN is the message count"""
+ msg = "msg-%04d" % msg_count
+ msg_len = len(msg)
+ buff = ""
+ if msg_size != None and msg_size > msg_len:
+ for index in range(0, msg_size - msg_len):
+ if index == msg_size - msg_len - 1:
+ buff += "-"
+ else:
+ buff += chr(ord('a') + (index % 26))
+ return buff + msg
+
+ # Functions for formatting address strings
+
+ @staticmethod
+ def _fmt_csv(string_list, list_braces = None):
+ """Format a list using comma-separation. Braces are optionally added."""
+ if len(string_list) == 0:
+ return ""
+ first = True
+ str_ = ""
+ if list_braces != None:
+ str_ += list_braces[0]
+ for string in string_list:
+ if string != None:
+ if first:
+ first = False
+ else:
+ str_ += ", "
+ str_ += string
+ if list_braces != None:
+ str_ += list_braces[1]
+ return str_
+
+ def _fmt_map(self, string_list):
+ """Format a map {l1, l2, l3, ...} from a string list. Each item in the list must be a formatted map
+ element('key:val')."""
+ return self._fmt_csv(string_list, list_braces="{}")
+
+ def _fmt_list(self, string_list):
+ """Format a list [l1, l2, l3, ...] from a string list."""
+ return self._fmt_csv(string_list, list_braces="[]")
+
+ def addr_fmt(self, node_name, **kwargs):
+ """Generic AMQP to new address formatter. Takes common (but not all) AMQP options and formats an address
+ string."""
+ # Get keyword args
+ node_subject = kwargs.get("node_subject")
+ create_policy = kwargs.get("create_policy")
+ delete_policy = kwargs.get("delete_policy")
+ assert_policy = kwargs.get("assert_policy")
+ mode = kwargs.get("mode")
+ link = kwargs.get("link", False)
+ link_name = kwargs.get("link_name")
+ node_type = kwargs.get("node_type")
+ durable = kwargs.get("durable", False)
+ link_reliability = kwargs.get("link_reliability")
+ x_declare_list = kwargs.get("x_declare_list", [])
+ x_bindings_list = kwargs.get("x_bindings_list", [])
+ x_subscribe_list = kwargs.get("x_subscribe_list", [])
+
+ node_flag = not link and (node_type != None or durable or len(x_declare_list) > 0 or len(x_bindings_list) > 0)
+ link_flag = link and (link_name != None or durable or link_reliability != None or len(x_declare_list) > 0 or
+ len(x_bindings_list) > 0 or len(x_subscribe_list) > 0)
+ assert not (node_flag and link_flag)
+
+ opt_str_list = []
+ if create_policy != None:
+ opt_str_list.append("create: %s" % create_policy)
+ if delete_policy != None:
+ opt_str_list.append("delete: %s" % delete_policy)
+ if assert_policy != None:
+ opt_str_list.append("assert: %s" % assert_policy)
+ if mode != None:
+ opt_str_list.append("mode: %s" % mode)
+ if node_flag or link_flag:
+ node_str_list = []
+ if link_name != None:
+ node_str_list.append("name: \"%s\"" % link_name)
+ if node_type != None:
+ node_str_list.append("type: %s" % node_type)
+ if durable:
+ node_str_list.append("durable: True")
+ if link_reliability != None:
+ node_str_list.append("reliability: %s" % link_reliability)
+ if len(x_declare_list) > 0:
+ node_str_list.append("x-declare: %s" % self._fmt_map(x_declare_list))
+ if len(x_bindings_list) > 0:
+ node_str_list.append("x-bindings: %s" % self._fmt_list(x_bindings_list))
+ if len(x_subscribe_list) > 0:
+ node_str_list.append("x-subscribe: %s" % self._fmt_map(x_subscribe_list))
+ if node_flag:
+ opt_str_list.append("node: %s" % self._fmt_map(node_str_list))
+ else:
+ opt_str_list.append("link: %s" % self._fmt_map(node_str_list))
+ addr_str = node_name
+ if node_subject != None:
+ addr_str += "/%s" % node_subject
+ if len(opt_str_list) > 0:
+ addr_str += "; %s" % self._fmt_map(opt_str_list)
+ return addr_str
+
+ def snd_addr(self, node_name, **kwargs):
+ """ Create a send (node) address"""
+ # Get keyword args
+ topic = kwargs.get("topic")
+ topic_flag = kwargs.get("topic_flag", False)
+ auto_create = kwargs.get("auto_create", True)
+ auto_delete = kwargs.get("auto_delete", False)
+ durable = kwargs.get("durable", False)
+ exclusive = kwargs.get("exclusive", False)
+ ftd_count = kwargs.get("ftd_count")
+ ftd_size = kwargs.get("ftd_size")
+ policy = kwargs.get("policy", "flow-to-disk")
+ exchage_type = kwargs.get("exchage_type")
+
+ create_policy = None
+ if auto_create:
+ create_policy = "always"
+ delete_policy = None
+ if auto_delete:
+ delete_policy = "always"
+ node_type = None
+ if topic != None or topic_flag:
+ node_type = "topic"
+ x_declare_list = ["\"exclusive\": %s" % exclusive]
+ if ftd_count != None or ftd_size != None:
+ queue_policy = ["\'qpid.policy_type\': %s" % policy]
+ if ftd_count:
+ queue_policy.append("\'qpid.max_count\': %d" % ftd_count)
+ if ftd_size:
+ queue_policy.append("\'qpid.max_size\': %d" % ftd_size)
+ x_declare_list.append("arguments: %s" % self._fmt_map(queue_policy))
+ if exchage_type != None:
+ x_declare_list.append("type: %s" % exchage_type)
+
+ return self.addr_fmt(node_name, topic=topic, create_policy=create_policy, delete_policy=delete_policy,
+ node_type=node_type, durable=durable, x_declare_list=x_declare_list)
+
+ def rcv_addr(self, node_name, **kwargs):
+ """ Create a receive (link) address"""
+ # Get keyword args
+ auto_create = kwargs.get("auto_create", True)
+ auto_delete = kwargs.get("auto_delete", False)
+ link_name = kwargs.get("link_name")
+ durable = kwargs.get("durable", False)
+ browse = kwargs.get("browse", False)
+ exclusive = kwargs.get("exclusive", False)
+ binding_list = kwargs.get("binding_list", [])
+ ftd_count = kwargs.get("ftd_count")
+ ftd_size = kwargs.get("ftd_size")
+ policy = kwargs.get("policy", "flow-to-disk")
+
+ create_policy = None
+ if auto_create:
+ create_policy = "always"
+ delete_policy = None
+ if auto_delete:
+ delete_policy = "always"
+ mode = None
+ if browse:
+ mode = "browse"
+ x_declare_list = ["\"exclusive\": %s" % exclusive]
+ if ftd_count != None or ftd_size != None:
+ queue_policy = ["\'qpid.policy_type\': %s" % policy]
+ if ftd_count:
+ queue_policy.append("\'qpid.max_count\': %d" % ftd_count)
+ if ftd_size:
+ queue_policy.append("\'qpid.max_size\': %d" % ftd_size)
+ x_declare_list.append("arguments: %s" % self._fmt_map(queue_policy))
+ x_bindings_list = []
+ for binding in binding_list:
+ x_bindings_list.append("{exchange: %s, key: %s}" % binding)
+ return self.addr_fmt(node_name, create_policy=create_policy, delete_policy=delete_policy, mode=mode, link=True,
+ link_name=link_name, durable=durable, x_declare_list=x_declare_list,
+ x_bindings_list=x_bindings_list)
+
+ def check_message(self, broker, queue, exp_msg, transactional=False, empty=False, ack=True, browse=False):
+ """Check that a message is on a queue by dequeuing it and comparing it to the expected message"""
+ return self.check_messages(broker, queue, [exp_msg], transactional, empty, ack, browse)
+
+ def check_messages(self, broker, queue, exp_msg_list, transactional=False, empty=False, ack=True, browse=False,
+ emtpy_flag=False):
+ """Check that messages is on a queue by dequeuing them and comparing them to the expected messages"""
+ if emtpy_flag:
+ num_msgs = 0
+ else:
+ num_msgs = len(exp_msg_list)
+ ssn = broker.connect().session(transactional=transactional)
+ rcvr = ssn.receiver(self.rcv_addr(queue, browse=browse), capacity=num_msgs)
+ if num_msgs > 0:
+ try:
+ recieved_msg_list = [rcvr.fetch(timeout=0) for i in range(num_msgs)]
+ except Empty:
+ self.assert_(False, "Queue \"%s\" is empty, unable to retrieve expected message %d." % (queue, i))
+ for i in range(0, len(recieved_msg_list)):
+ self.assertEqual(recieved_msg_list[i].content, exp_msg_list[i].content)
+ self.assertEqual(recieved_msg_list[i].correlation_id, exp_msg_list[i].correlation_id)
+ if empty:
+ self._chk_empty(queue, rcvr)
+ if ack:
+ ssn.acknowledge()
+ if transactional:
+ ssn.commit()
+ ssn.connection.close()
+ else:
+ if transactional:
+ ssn.commit()
+ return ssn
+
+ # Functions for finding strings in the broker log file (or other files)
+
+ @staticmethod
+ def _read_file(file_name):
+ """Returns the content of file named file_name as a string"""
+ file_handle = file(file_name)
+ try:
+ return file_handle.read()
+ finally:
+ file_handle.close()
+
+ def _get_hits(self, broker, search):
+ """Find all occurrences of the search in the broker log (eliminating possible duplicates from msgs on multiple
+ queues)"""
+ # TODO: Use sets when RHEL-4 is no longer supported
+ hits = []
+ for hit in search.findall(self._read_file(broker.log)):
+ if hit not in hits:
+ hits.append(hit)
+ return hits
+
+ def _reconsile_hits(self, broker, ftd_msgs, release_hits):
+ """Remove entries from list release_hits if they match the message id in ftd_msgs. Check for remaining
+ release_hits."""
+ for msg in ftd_msgs:
+ found = False
+ for hit in release_hits:
+ if str(msg.id) in hit:
+ release_hits.remove(hit)
+ #print "Found %s in %s" % (msg.id, broker.log)
+ found = True
+ break
+ if not found:
+ self.assert_(False, "Unable to locate released message %s in log %s" % (msg.id, broker.log))
+ if len(release_hits) > 0:
+ err = "Messages were unexpectedly released in log %s:\n" % broker.log
+ for hit in release_hits:
+ err += " %s\n" % hit
+ self.assert_(False, err)
+
+ def check_msg_release(self, broker, ftd_msgs):
+ """ Check for 'Content released' messages in broker log for messages in ftd_msgs"""
+ hits = self._get_hits(broker, re.compile("debug Message id=\"[0-9a-f-]{36}\"; pid=0x[0-9a-f]+: "
+ "Content released$", re.MULTILINE))
+ self._reconsile_hits(broker, ftd_msgs, hits)
+
+ def check_msg_release_on_commit(self, broker, ftd_msgs):
+ """ Check for 'Content released on commit' messages in broker log for messages in ftd_msgs"""
+ hits = self._get_hits(broker, re.compile("debug Message id=\"[0-9a-f-]{36}\"; pid=0x[0-9a-f]+: "
+ "Content released on commit$", re.MULTILINE))
+ self._reconsile_hits(broker, ftd_msgs, hits)
+
+ def check_msg_release_on_recover(self, broker, ftd_msgs):
+ """ Check for 'Content released after recovery' messages in broker log for messages in ftd_msgs"""
+ hits = self._get_hits(broker, re.compile("debug Message id=\"[0-9a-f-]{36}\"; pid=0x[0-9a-f]+: "
+ "Content released after recovery$", re.MULTILINE))
+ self._reconsile_hits(broker, ftd_msgs, hits)
+
+ def check_msg_block(self, broker, ftd_msgs):
+ """Check for 'Content release blocked' messages in broker log for messages in ftd_msgs"""
+ hits = self._get_hits(broker, re.compile("debug Message id=\"[0-9a-f-]{36}\"; pid=0x[0-9a-f]+: "
+ "Content release blocked$", re.MULTILINE))
+ self._reconsile_hits(broker, ftd_msgs, hits)
+
+ def check_msg_block_on_commit(self, broker, ftd_msgs):
+ """Check for 'Content release blocked' messages in broker log for messages in ftd_msgs"""
+ hits = self._get_hits(broker, re.compile("debug Message id=\"[0-9a-f-]{36}\"; pid=0x[0-9a-f]+: "
+ "Content release blocked on commit$", re.MULTILINE))
+ self._reconsile_hits(broker, ftd_msgs, hits)
+
+
Modified: store/trunk/cpp/tests/run_long_python_tests
===================================================================
--- store/trunk/cpp/tests/run_long_python_tests 2010-04-12 21:41:54 UTC (rev 3904)
+++ store/trunk/cpp/tests/run_long_python_tests 2010-04-13 17:30:22 UTC (rev 3905)
@@ -21,4 +21,4 @@
#
# The GNU Lesser General Public License is available in the file COPYING.
-./run_old_python_tests LONG_TEST
+./run_new_python_tests LONG_TEST
Modified: store/trunk/cpp/tests/run_new_python_tests
===================================================================
--- store/trunk/cpp/tests/run_new_python_tests 2010-04-12 21:41:54 UTC (rev 3904)
+++ store/trunk/cpp/tests/run_new_python_tests 2010-04-13 17:30:22 UTC (rev 3905)
@@ -38,8 +38,27 @@
echo "Running Python tests..."
-PYTHON_TESTS=${PYTHON_TESTS:-$*}
+case $1 in
+ SHORT_TEST)
+ DEFAULT_PYTHON_TESTS="*.flow_to_disk.SimpleMaxSizeCountTest.test_durable_browse_recover *.flow_to_disk.MultiDurableQueueDurableMsgBrowseRecoverTxPTxCTest.test_mixed_limit_2";;
+ LONG_TEST)
+ DEFAULT_PYTHON_TESTS=;;
+ *)
+ DEFAULT_PYTHON_TESTS="*.flow_to_disk.SimpleMaxSizeCountTest.* *.flow_to_disk.MultiDurableQueueDurableMsg*.test_mixed_limit_1";;
+esac
+#if test -z $1; then
+# DEFAULT_PYTHON_TESTS="*.flow_to_disk.SimpleMaxSizeCountTest.* *.flow_to_disk.MultiDurableQueueDurableMsg*.test_mixed_limit_1"
+#else
+# if test x$1 == xSHORT_TEST; then
+# DEFAULT_PYTHON_TESTS="*.flow_to_disk.SimpleMaxSizeCountTest.test_durable_browse_recover *.flow_to_disk.MultiDurableQueueDurableMsgBrowseRecoverTxPTxCTest.test_mixed_limit_2"
+# else
+# DEFAULT_PYTHON_TESTS=$*
+# fi
+#fi
+
+PYTHON_TESTS=${PYTHON_TESTS:-${DEFAULT_PYTHON_TESTS}}
+
OUTDIR=new_python_tests.tmp
rm -rf $OUTDIR
Deleted: store/trunk/cpp/tests/run_old_python_tests
===================================================================
--- store/trunk/cpp/tests/run_old_python_tests 2010-04-12 21:41:54 UTC (rev 3904)
+++ store/trunk/cpp/tests/run_old_python_tests 2010-04-13 17:30:22 UTC (rev 3905)
@@ -1,96 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2008, 2009 Red Hat, Inc.
-#
-# This file is part of the Qpid async store library msgstore.so.
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
-# USA
-#
-# The GNU Lesser General Public License is available in the file COPYING.
-
-if test x$1 == x"LONG_TEST"; then
- echo "Running long tests..."
- LONG_TEST=1
-fi
-
-if test -z ${QPID_DIR} ; then
- cat <<EOF
-
- =========== WARNING: PYTHON TESTS DISABLED ==============
-
- QPID_DIR not set.
-
- ===========================================================
-
-EOF
- exit
-fi
-
-QPID_PYTHON_DIR=${QPID_DIR}/python
-export PYTHONPATH=${QPID_PYTHON_DIR}:${abs_srcdir}
-
-if python -c "import qpid" ; then
- PYTHON_TESTS=
- FAILING_PYTHON_TESTS=${abs_srcdir}/failing_python_tests.txt
-else
- cat <<EOF
-
- =========== WARNING: PYTHON TESTS DISABLED ==============
-
- Unable to load python qpid module - skipping python tests.
-
- QPID_DIR=${QPID_DIR}"
- PYTHONPATH=${PYTHONPATH}"
-
- ===========================================================
-
-EOF
- exit
-fi
-
-STORE_DIR=${TMP_DATA_DIR}/python
-
-#Make sure temp dir exists if this is the first to use it
-if test -d ${STORE_DIR} ; then
- rm -rf ${STORE_DIR}
-fi
-mkdir -p ${STORE_DIR}
-
-if test -z ${QPIDD} ; then
- export QPIDD=${QPID_BLD}/src/qpidd
-fi
-
-trap stop_broker INT TERM QUIT
-
-start_broker() {
- ${QPIDD} --daemon --port 0 --no-module-dir --load-module=${STORE_LIB} --data-dir=${STORE_DIR} --auth=no --log-enable info+ --log-to-file ${STORE_DIR}/broker.python-test.log > qpidd.port
- LOCAL_PORT=`cat qpidd.port`
- echo "run_old_python_tests: Started qpidd on port ${LOCAL_PORT}"
-}
-
-stop_broker() {
- echo "run_old_python_tests: Stopping broker on port ${LOCAL_PORT}"
- ${QPIDD} -q --port ${LOCAL_PORT}
-}
-
-fail=0
-
-# Run all python tests
-start_broker
-$QPID_PYTHON_DIR/qpid-python-test -m old_python_tests -b localhost:$LOCAL_PORT -I ${FAILING_PYTHON_TESTS} ${PYTHON_TESTS} || { echo "FAIL: old_python_tests"; fail=1; }
-stop_broker || fail=1
-
-exit ${fail}
Added: store/trunk/cpp/tests/run_short_python_tests
===================================================================
--- store/trunk/cpp/tests/run_short_python_tests (rev 0)
+++ store/trunk/cpp/tests/run_short_python_tests 2010-04-13 17:30:22 UTC (rev 3905)
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright (c) 2008, 2009 Red Hat, Inc.
+#
+# This file is part of the Qpid async store library msgstore.so.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
+# USA
+#
+# The GNU Lesser General Public License is available in the file COPYING.
+
+./run_new_python_tests SHORT_TEST
Property changes on: store/trunk/cpp/tests/run_short_python_tests
___________________________________________________________________
Name: svn:executable
+ *
14 years, 8 months