rhmessaging commits: r1327 - in mgmt/cumin: python/cumin and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-11-16 14:55:33 -0500 (Fri, 16 Nov 2007)
New Revision: 1327
Modified:
mgmt/cumin/bin/cumin-test
mgmt/cumin/python/cumin/__init__.py
mgmt/cumin/python/cumin/demo.py
mgmt/cumin/python/cumin/measurement.py
mgmt/cumin/python/cumin/measurement.strings
mgmt/cumin/python/cumin/model.py
mgmt/cumin/python/cumin/queue.strings
mgmt/cumin/python/cumin/widgets.py
Log:
Big overhaul of model metadata in preparation for mint model
integration. UI metadata is now handled in a parallel set of
CuminClasses, one for each mint class.
Uses this metadata to generate xml for queue ajax.
Modified: mgmt/cumin/bin/cumin-test
===================================================================
--- mgmt/cumin/bin/cumin-test 2007-11-16 19:53:44 UTC (rev 1326)
+++ mgmt/cumin/bin/cumin-test 2007-11-16 19:55:33 UTC (rev 1327)
@@ -67,7 +67,7 @@
from cumin.model import *
def do_main(port, broker, bench_hits, debug=True, demodata=True):
- model = CuminModel()
+ model = DummyModel()
app = Cumin(model)
if broker:
Modified: mgmt/cumin/python/cumin/__init__.py
===================================================================
--- mgmt/cumin/python/cumin/__init__.py 2007-11-16 19:53:44 UTC (rev 1326)
+++ mgmt/cumin/python/cumin/__init__.py 2007-11-16 19:55:33 UTC (rev 1327)
@@ -10,7 +10,7 @@
from mint.schema import *
from sqlobject.main import *
-from model import CuminModel
+from model import DummyModel, CuminModel
from demo import DemoData
from page import CuminPage
from queue import QueueXmlPage, QueueChartPage
@@ -29,6 +29,7 @@
self.add_resource_dir(os.path.join(self.home, "resources"))
self.model = model
+ self.cmodel = CuminModel()
self.mint = MintModel()
@@ -60,7 +61,7 @@
class CuminServer(WebServer):
def __init__(self, port=9090):
- model = CuminModel()
+ model = DummyModel()
data = DemoData(model)
data.load()
Modified: mgmt/cumin/python/cumin/demo.py
===================================================================
--- mgmt/cumin/python/cumin/demo.py 2007-11-16 19:53:44 UTC (rev 1326)
+++ mgmt/cumin/python/cumin/demo.py 2007-11-16 19:55:33 UTC (rev 1327)
@@ -174,7 +174,7 @@
def frob(self, mobject):
mobject.lock()
try:
- if isinstance(mobject, CuminModelObject):
+ if isinstance(mobject, DummyModelObject):
for measure in mobject.measurements:
self.frob_measure(measure)
@@ -240,7 +240,7 @@
if __name__ == "__main__":
import sys
- model = CuminModel()
+ model = DummyModel()
data = DemoData(model)
data.load()
Modified: mgmt/cumin/python/cumin/measurement.py
===================================================================
--- mgmt/cumin/python/cumin/measurement.py 2007-11-16 19:53:44 UTC (rev 1326)
+++ mgmt/cumin/python/cumin/measurement.py 2007-11-16 19:55:33 UTC (rev 1327)
@@ -9,205 +9,6 @@
strings = StringCatalog(__file__)
-class Measure(object):
- def __init__(self, name, unit, title=None, categories=()):
- self.name = name
- self.unit = unit
- self.title = None
- self.categories = categories
-
- self.link_cb = None
- self.highlow = False
-
- def get(self, object):
- return getattr(object, self.name)
-
-class MeasureMetadata(object):
- def __init__(self):
- self.queue = list()
- self.exchange = list()
- self.client = list()
-
- m = Measure("consumers", "int")
- m.title = "Consumers"
- m.unit = "consumer"
- m.categories = ("general")
- m.highlow = True
- self.queue.append(m)
-
- m = Measure("bindings", "int")
- m.title = "Bindings"
- m.unit = "binding"
- m.categories = ("general")
- m.highlow = True
- self.queue.append(m)
-
- m = Measure("msgDepth", "int")
- m.title = "Message Depth"
- m.unit = "message"
- m.categories = ("message", "general")
- m.highlow = True
- self.queue.append(m)
-
- m = Measure("msgTotalEnqueues", "int")
- m.title = "Msgs. Enqueued"
- m.unit = "message"
- m.categories = ("message", "general")
- self.queue.append(m)
-
- m = Measure("msgTotalDequeues", "int")
- m.title = "Msgs. Dequeued"
- m.unit = "message"
- m.categories = ("message", "general")
- self.queue.append(m)
-
- m = Measure("byteDepth", "int")
- m.title = "Byte Depth"
- m.unit = "byte"
- m.categories = ("byte", "general")
- m.highlow = True
- self.queue.append(m)
-
- m = Measure("byteTotalEnqueues", "int")
- m.title = "Bytes Enqueued"
- m.unit = "byte"
- m.categories = ("byte", "general")
- self.queue.append(m)
-
- m = Measure("byteTotalDequeues", "int")
- m.title = "Bytes Dequeued"
- m.unit = "byte"
- m.categories = ("byte", "general")
- self.queue.append(m)
-
- m = Measure("unackedMessages", "int")
- m.title = "Msgs. Unacked"
- m.unit = "message"
- m.categories = ("general")
- self.queue.append(m)
-
- # Disk
-
- #m = Measure("diskPageSize", "int")
- #m.title = "Page size"
- #m.categories = ("disk")
- #self.queue.append(m)
-
- m = Measure("diskPages", "int")
- m.title = "Disk Pages"
- m.unit = "page"
- m.categories = ("general")
- self.queue.append(m)
-
- #m = Measure("diskAvailableSize", "int")
- #m.title = "Available size"
- #m.categories = ("disk")
- #self.queue.append(m)
-
- # Transactional
-
- m = Measure("msgTxnEnqueues", "int")
- m.title = "Msgs. Enqueued"
- m.unit = "message"
- m.categories = ("message", "transactional")
- self.queue.append(m)
-
- m = Measure("msgTxnDequeues", "int")
- m.title = "Msgs. Dequeued"
- m.unit = "message"
- m.categories = ("message", "transactional")
- self.queue.append(m)
-
- m = Measure("byteTxnEnqueues", "int")
- m.title = "Bytes Enqueued"
- m.unit = "byte"
- m.categories = ("byte", "transactional")
- self.queue.append(m)
-
- m = Measure("byteTxnDequeues", "int")
- m.title = "Bytes Dequeued"
- m.unit = "byte"
- m.categories = ("byte", "transactional")
- self.queue.append(m)
-
- m = Measure("enqueueTxnStarts", "int")
- m.title = "Enq. Trans. Started"
- m.unit = "transaction"
- m.categories = ("transaction")
- self.queue.append(m)
-
- m = Measure("enqueueTxnCommits", "int")
- m.title = "Enq. Trans. Committed"
- m.unit = "transaction"
- m.categories = ("transaction")
- self.queue.append(m)
-
- m = Measure("enqueueTxnRejects", "int")
- m.title = "Enq. Trans. Rejected"
- m.unit = "transaction"
- m.categories = ("transaction")
- self.queue.append(m)
-
- m = Measure("enqueueTxnCount", "int")
- m.title = "Enq. Trans. Pending"
- m.unit = "transaction"
- m.categories = ("transaction")
- m.highlow = True
- self.queue.append(m)
-
- m = Measure("dequeueTxnStarts", "int")
- m.title = "Deq. Trans. Started"
- m.unit = "transaction"
- m.categories = ("transaction")
- self.queue.append(m)
-
- m = Measure("dequeueTxnCommits", "int")
- m.title = "Deq. Trans. Committed"
- m.unit = "transaction"
- m.categories = ("transaction")
- self.queue.append(m)
-
- m = Measure("dequeueTxnRejects", "int")
- m.title = "Deq. Trans. Rejected"
- m.unit = "transaction"
- m.categories = ("transaction")
- self.queue.append(m)
-
- m = Measure("dequeueTxnCount", "int")
- m.title = "Deq. Trans. Pending"
- m.unit = "transaction"
- m.categories = ("transaction")
- m.highlow = True
- self.queue.append(m)
-
- # Persistent
-
- m = Measure("msgPersistEnqueues", "int")
- m.title = "Msgs. Enqueued"
- m.unit = "message"
- m.categories = ("message", "persistent")
- self.queue.append(m)
-
- m = Measure("msgPersistDequeues", "int")
- m.title = "Msgs. Dequeued"
- m.unit = "message"
- m.categories = ("message", "persistent")
- self.queue.append(m)
-
- m = Measure("bytePersistEnqueues", "int")
- m.title = "Bytes Enqueued"
- m.unit = "byte"
- m.categories = ("byte", "persistent")
- self.queue.append(m)
-
- m = Measure("bytePersistDequeues", "int")
- m.title = "Bytes Dequeued"
- m.unit = "byte"
- m.categories = ("byte", "persistent")
- self.queue.append(m)
-
-meta = MeasureMetadata()
-
class MeasurementSet(ItemSet):
unit_abbrevs = dict()
unit_abbrevs["message"] = "msg"
@@ -222,52 +23,46 @@
self.add_attribute(self.object);
def get_items(self, session, object):
- self.object.set(session, object);
+ stats = list()
+ cls = self.app.cmodel.get_class(object)
- if isinstance(object, MintQueue):
- stats = object.mintQueueStats
- elif isinstance(object, MintExchange):
- stats = object.mintExchangeStats
- elif isinstance(object, MintClient):
- stats = object.mintClientStats
- else:
- raise Exception()
-
- measures = list()
-
- for measure in meta.queue:
- if self.category in measure.categories:
- measures.append((measure, stats))
+ for stat in cls.stats:
+ if self.category in stat.categories:
+ stats.append((stat, object))
- return measures
+ return stats
def render_item_title(self, session, args):
- measure, stats = args
- return measure.title
+ stat, object = args
+ return stat.title
def render_item_name(self, session, args):
- measure, stats = args
- return measure.name
+ stat, object = args
+ return stat.name
def render_item_value(self, session, args):
- measure, stats = args
+ stat, object = args
- if measure.link_cb:
+ if stat.link_cb:
branch = session.branch()
- measure.link_cb(self.page(), branch, self.object.get(session))
- return fmt_link(branch.marshal(), measure.get(stats))
+ stat.link_cb(self.page(), branch, object)
+ return fmt_link(branch.marshal(), stat.value(object))
else:
- return measure.get(stats)
+ return stat.value(object)
- def render_item_extra(self, session, measure):
+ def render_item_extra(self, session, args):
+ stat, object = args
+
return None #XXX
- if measure.highlow:
- return "<small>high</small> <span>%i</span> <small>low</small> <span>%i</span>" \
- % (measure.get_high() or 0, measure.get_low() or 0)
+ if stat.highlow:
+ return "<small>high</small> <span>%i</span> " + \
+ "<small>low</small> <span>%i</span>" \
+ % (stat.high(object) or 0, stat.low(object) or 0)
else:
- unit = self.unit_abbrevs.get(measure.unit, measure.unit)
- return fmt_rate(measure.get_rate(), unit, "sec")
+ unit = self.unit_abbrevs.get(stat.unit, stat.unit)
+ return fmt_rate(stat.rate(object), unit, "sec")
- def render_item_average(self, session, measure):
+ def render_item_average(self, session, args):
+ stat, object = args
return None #XXX "%0.2f" % (sum(measure.values) / float(len(measure.values)))
Modified: mgmt/cumin/python/cumin/measurement.strings
===================================================================
--- mgmt/cumin/python/cumin/measurement.strings 2007-11-16 19:53:44 UTC (rev 1326)
+++ mgmt/cumin/python/cumin/measurement.strings 2007-11-16 19:55:33 UTC (rev 1327)
@@ -5,13 +5,13 @@
var tr = trs.next();
while (tr) {
- var attr = tr.getattr("m");
+ var attr = tr.getattr("stat");
if (attr) {
- var m = object.measurement[attr];
+ var stat = object.stat[attr];
var tds = tr.elems("td", null, null, 0, 2);
- tds.next().text().set(m.value);
+ tds.next().text().set(stat.value);
var td = tds.next();
@@ -19,13 +19,13 @@
var ph = phs.next();
if (ph) {
- ph.set(m.high);
+ ph.set(stat.high);
ph = phs.next();
- ph.set(m.low);
+ ph.set(stat.low);
} else {
- td.text().set(m.rate);
+ td.text().set(stat.rate);
}
}
@@ -45,8 +45,8 @@
</table>
[MeasurementSet.item_html]
-<tr m="{item_name}">
+<tr stat="{item_name}">
<th>{item_title}</th>
- <td class="ralign">{item_value}</td>
- <td class="ralign">{item_extra}</td>
+ <td class="ralign"> {item_value}</td>
+ <td class="ralign"> {item_extra}</td>
</tr>
Modified: mgmt/cumin/python/cumin/model.py
===================================================================
--- mgmt/cumin/python/cumin/model.py 2007-11-16 19:53:44 UTC (rev 1326)
+++ mgmt/cumin/python/cumin/model.py 2007-11-16 19:55:33 UTC (rev 1327)
@@ -1,10 +1,271 @@
+from mint.schema import *
from wooly import *
from wooly.model import *
-class CuminModel(Model):
+class CuminModel(object):
def __init__(self):
- super(CuminModel, self).__init__()
+ self.classes = dict()
+ self.queue = CuminQueue()
+ self.add_class(self.queue)
+
+ def add_class(self, cls):
+ self.classes[cls.mint_class] = cls
+
+ def get_class(self, mint_object):
+ return self.classes[mint_object.__class__]
+
+class CuminClass(object):
+ def __init__(self, mint_class):
+ self.mint_class = mint_class
+
+ self.stats = list()
+
+ def add_stat(self, stat):
+ self.stats.append(stat)
+
+ def get_stat(self, name):
+ for stat in self.stats:
+ if stat.name == name:
+ return stat
+
+ def write_error_xml(self, object, writer):
+ writer.write("<error-count>%i</error-count>" % 0)
+ writer.write("<warning-count>%i</warning-count>" % 0)
+
+ def write_stat_xml(self, object, writer):
+ for stat in self.stats:
+ stat.write_xml(object, writer)
+
+class CuminStat(object):
+ def __init__(self, name, unit, title=None, categories=()):
+ self.name = name
+ self.unit = unit
+ self.title = None
+ self.categories = categories
+
+ self.link_cb = None
+ self.highlow = False
+
+ def stats(self, object):
+ if isinstance(object, MintQueue):
+ stats = object.mintQueueStats
+ elif isinstance(object, MintExchange):
+ stats = object.mintExchangeStats
+ elif isinstance(object, MintClient):
+ stats = object.mintClientStats
+ else:
+ raise Exception()
+
+ return stats
+
+ def value(self, object):
+ return getattr(self.stats(object), self.name)
+
+ def rate(self, object):
+ return getattr(self.stats(object), self.name)
+
+ def write_xml(self, object, writer):
+ writer.write("<stat name=\"%s\" value=\"%i\" rate=\"%i\"/>" \
+ % (self.name,
+ self.value(object) or 0,
+ self.rate(object) or 0))
+
+class CuminQueue(CuminClass):
+ def __init__(self):
+ super(CuminQueue, self).__init__(MintQueue)
+
+ stat = CuminStat("consumers", "int")
+ stat.title = "Consumers"
+ stat.unit = "consumer"
+ stat.categories = ("general")
+ stat.highlow = True
+ self.add_stat(stat)
+
+ stat = CuminStat("bindings", "int")
+ stat.title = "Bindings"
+ stat.unit = "binding"
+ stat.categories = ("general")
+ stat.highlow = True
+ self.add_stat(stat)
+
+ stat = CuminStat("msgDepth", "int")
+ stat.title = "Message Depth"
+ stat.unit = "message"
+ stat.categories = ("message", "general")
+ stat.highlow = True
+ self.add_stat(stat)
+
+ stat = CuminStat("msgTotalEnqueues", "int")
+ stat.title = "Msgs. Enqueued"
+ stat.unit = "message"
+ stat.categories = ("message", "general")
+ self.add_stat(stat)
+
+ stat = CuminStat("msgTotalDequeues", "int")
+ stat.title = "Msgs. Dequeued"
+ stat.unit = "message"
+ stat.categories = ("message", "general")
+ self.add_stat(stat)
+
+ stat = CuminStat("byteDepth", "int")
+ stat.title = "Byte Depth"
+ stat.unit = "byte"
+ stat.categories = ("byte", "general")
+ stat.highlow = True
+ self.add_stat(stat)
+
+ stat = CuminStat("byteTotalEnqueues", "int")
+ stat.title = "Bytes Enqueued"
+ stat.unit = "byte"
+ stat.categories = ("byte", "general")
+ self.add_stat(stat)
+
+ stat = CuminStat("byteTotalDequeues", "int")
+ stat.title = "Bytes Dequeued"
+ stat.unit = "byte"
+ stat.categories = ("byte", "general")
+ self.add_stat(stat)
+
+ stat = CuminStat("unackedMessages", "int")
+ stat.title = "Msgs. Unacked"
+ stat.unit = "message"
+ stat.categories = ("general")
+ self.add_stat(stat)
+
+ # Disk
+
+ #stat = CuminStat("diskPageSize", "int")
+ #stat.title = "Page size"
+ #stat.categories = ("disk")
+ #self.add_stat(stat)
+
+ stat = CuminStat("diskPages", "int")
+ stat.title = "Disk Pages"
+ stat.unit = "page"
+ stat.categories = ("general")
+ self.add_stat(stat)
+
+ #stat = CuminStat("diskAvailableSize", "int")
+ #stat.title = "Available size"
+ #stat.categories = ("disk")
+ #self.add_stat(stat)
+
+ # Transactional
+
+ stat = CuminStat("msgTxnEnqueues", "int")
+ stat.title = "Msgs. Enqueued"
+ stat.unit = "message"
+ stat.categories = ("message", "transactional")
+ self.add_stat(stat)
+
+ stat = CuminStat("msgTxnDequeues", "int")
+ stat.title = "Msgs. Dequeued"
+ stat.unit = "message"
+ stat.categories = ("message", "transactional")
+ self.add_stat(stat)
+
+ stat = CuminStat("byteTxnEnqueues", "int")
+ stat.title = "Bytes Enqueued"
+ stat.unit = "byte"
+ stat.categories = ("byte", "transactional")
+ self.add_stat(stat)
+
+ stat = CuminStat("byteTxnDequeues", "int")
+ stat.title = "Bytes Dequeued"
+ stat.unit = "byte"
+ stat.categories = ("byte", "transactional")
+ self.add_stat(stat)
+
+ stat = CuminStat("enqueueTxnStarts", "int")
+ stat.title = "Enq. Trans. Started"
+ stat.unit = "transaction"
+ stat.categories = ("transaction")
+ self.add_stat(stat)
+
+ stat = CuminStat("enqueueTxnCommits", "int")
+ stat.title = "Enq. Trans. Committed"
+ stat.unit = "transaction"
+ stat.categories = ("transaction")
+ self.add_stat(stat)
+
+ stat = CuminStat("enqueueTxnRejects", "int")
+ stat.title = "Enq. Trans. Rejected"
+ stat.unit = "transaction"
+ stat.categories = ("transaction")
+ self.add_stat(stat)
+
+ stat = CuminStat("enqueueTxnCount", "int")
+ stat.title = "Enq. Trans. Pending"
+ stat.unit = "transaction"
+ stat.categories = ("transaction")
+ stat.highlow = True
+ self.add_stat(stat)
+
+ stat = CuminStat("dequeueTxnStarts", "int")
+ stat.title = "Deq. Trans. Started"
+ stat.unit = "transaction"
+ stat.categories = ("transaction")
+ self.add_stat(stat)
+
+ stat = CuminStat("dequeueTxnCommits", "int")
+ stat.title = "Deq. Trans. Committed"
+ stat.unit = "transaction"
+ stat.categories = ("transaction")
+ self.add_stat(stat)
+
+ stat = CuminStat("dequeueTxnRejects", "int")
+ stat.title = "Deq. Trans. Rejected"
+ stat.unit = "transaction"
+ stat.categories = ("transaction")
+ self.add_stat(stat)
+
+ stat = CuminStat("dequeueTxnCount", "int")
+ stat.title = "Deq. Trans. Pending"
+ stat.unit = "transaction"
+ stat.categories = ("transaction")
+ stat.highlow = True
+ self.add_stat(stat)
+
+ # Persistent
+
+ stat = CuminStat("msgPersistEnqueues", "int")
+ stat.title = "Msgs. Enqueued"
+ stat.unit = "message"
+ stat.categories = ("message", "persistent")
+ self.add_stat(stat)
+
+ stat = CuminStat("msgPersistDequeues", "int")
+ stat.title = "Msgs. Dequeued"
+ stat.unit = "message"
+ stat.categories = ("message", "persistent")
+ self.add_stat(stat)
+
+ stat = CuminStat("bytePersistEnqueues", "int")
+ stat.title = "Bytes Enqueued"
+ stat.unit = "byte"
+ stat.categories = ("byte", "persistent")
+ self.add_stat(stat)
+
+ stat = CuminStat("bytePersistDequeues", "int")
+ stat.title = "Bytes Dequeued"
+ stat.unit = "byte"
+ stat.categories = ("byte", "persistent")
+ self.add_stat(stat)
+
+ def write_xml(self, queue, writer):
+ writer.write("<queue id=\"%i\">" % queue.id)
+ writer.write("<name>%s</name>" % queue.name)
+
+ self.write_error_xml(queue, writer)
+ self.write_stat_xml(queue, writer)
+
+ writer.write("</queue>")
+
+class DummyModel(Model):
+ def __init__(self):
+ super(DummyModel, self).__init__()
+
self.broker = ModelClass(self, "broker")
self.broker_cluster = ModelClass(self, "broker_cluster")
self.broker_profile = ModelClass(self, "broker_profile")
@@ -175,9 +436,9 @@
def get_realm(self, id):
return self.get_index(self.realm).get(id)
-class CuminModelObject(ModelObject):
+class DummyModelObject(ModelObject):
def __init__(self, model, mclass):
- super(CuminModelObject, self).__init__(model, mclass)
+ super(DummyModelObject, self).__init__(model, mclass)
self.errors = list()
self.warnings = list()
@@ -196,13 +457,13 @@
for measure in self.measurements:
measure.write_xml(writer)
-class BrokerCluster(CuminModelObject):
+class BrokerCluster(DummyModelObject):
def __init__(self, model):
super(BrokerCluster, self).__init__(model, model.broker_cluster)
self.name = None
-class Broker(CuminModelObject):
+class Broker(DummyModelObject):
def __init__(self, model):
super(Broker, self).__init__(model, model.broker)
@@ -222,7 +483,7 @@
writer.write("</broker>")
-class BrokerProfile(CuminModelObject):
+class BrokerProfile(DummyModelObject):
def __init__(self, model):
super(BrokerProfile, self).__init__(model, model.broker_profile)
@@ -237,7 +498,7 @@
self.broker_value = None
self.type = None # ("boolean", "integer", "string")
-class BrokerGroup(CuminModelObject):
+class BrokerGroup(DummyModelObject):
def __init__(self, model):
super(BrokerGroup, self).__init__(model, model.broker_group)
@@ -249,7 +510,7 @@
self.name = None
-class VirtualHost(CuminModelObject):
+class VirtualHost(DummyModelObject):
def __init__(self, model):
super(VirtualHost, self).__init__(model, model.virtual_host)
@@ -297,7 +558,7 @@
self.name = None
-class Realm(CuminModelObject):
+class Realm(DummyModelObject):
def __init__(self, model):
super(Realm, self).__init__(model, model.realm)
@@ -391,7 +652,7 @@
writer.write("<measurement name=\"%s\" value=\"%i\" rate=\"%i\" %s/>" \
% (self.name, self.get_value(), self.get_rate(), hl))
-class Queue(CuminModelObject):
+class Queue(DummyModelObject):
def __init__(self, model):
super(Queue, self).__init__(model, model.queue)
@@ -600,7 +861,7 @@
writer.write("</queue>")
-class Consumer(CuminModelObject):
+class Consumer(DummyModelObject):
def __init__(self, model):
super(Consumer, self).__init__(model, model.consumer)
@@ -625,7 +886,7 @@
measure.highlow = True
self.measurements.append(measure)
-class Exchange(CuminModelObject):
+class Exchange(DummyModelObject):
def __init__(self, model):
super(Exchange, self).__init__(model, model.exchange)
@@ -701,7 +962,7 @@
writer.write("</exchange>")
-class Producer(CuminModelObject):
+class Producer(DummyModelObject):
def __init__(self, model):
super(Producer, self).__init__(model, model.producer)
@@ -717,7 +978,7 @@
measure.categories = ("byte", "general")
self.measurements.append(measure)
-class Binding(CuminModelObject):
+class Binding(DummyModelObject):
def __init__(self, model):
super(Binding, self).__init__(model, model.binding)
@@ -739,7 +1000,7 @@
writer.write("</binding>")
-class Client(CuminModelObject):
+class Client(DummyModelObject):
def __init__(self, model):
super(Client, self).__init__(model, model.client)
@@ -774,7 +1035,7 @@
writer.write("</client>")
-class Session(CuminModelObject):
+class Session(DummyModelObject):
def __init__(self, model):
super(Session, self).__init__(model, model.session)
Modified: mgmt/cumin/python/cumin/queue.strings
===================================================================
--- mgmt/cumin/python/cumin/queue.strings 2007-11-16 19:53:44 UTC (rev 1326)
+++ mgmt/cumin/python/cumin/queue.strings 2007-11-16 19:55:33 UTC (rev 1327)
@@ -92,18 +92,18 @@
function updateQueueStatus(id, queue) {
updateStatus(id, queue);
- var m = queue.measurement;
- var mdata = {
+ var s = queue.stat;
+ var sdata = {
"tr": [
null,
- {"td": [m.msgTotalEnqueues.rate, m.byteTotalEnqueues.rate]},
- {"td": [m.msgTotalDequeues.rate, m.byteTotalDequeues.rate]},
- {"td": [m.msgDepth.value, m.byteDepth.value]},
- {"td": [m.msgDepth.rate, m.byteDepth.rate]}
+ {"td": [s.msgTotalEnqueues.rate, s.byteTotalEnqueues.rate]},
+ {"td": [s.msgTotalDequeues.rate, s.byteTotalDequeues.rate]},
+ {"td": [s.msgDepth.value, s.byteDepth.value]},
+ {"td": [s.msgDepth.rate, s.byteDepth.rate]}
]
};
- wooly.doc().elembyid(id).descendant("table.tbody").update(mdata);
+ wooly.doc().elembyid(id).descendant("table.tbody").update(sdata);
}
[QueueStatus.html]
Modified: mgmt/cumin/python/cumin/widgets.py
===================================================================
--- mgmt/cumin/python/cumin/widgets.py 2007-11-16 19:53:44 UTC (rev 1326)
+++ mgmt/cumin/python/cumin/widgets.py 2007-11-16 19:55:33 UTC (rev 1327)
@@ -199,7 +199,8 @@
writer.write("<objects>");
for object in objects:
- object.write_xml(writer)
+ cls = self.app.cmodel.get_class(object)
+ cls.write_xml(object, writer)
writer.write("</objects>");
17 years, 1 month
rhmessaging commits: r1326 - in mgmt/mint: bin and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: nunofsantos
Date: 2007-11-16 14:53:44 -0500 (Fri, 16 Nov 2007)
New Revision: 1326
Added:
mgmt/mint/bin/parse
Removed:
mgmt/mint/parse
Log:
moved parse script to bin directory
Copied: mgmt/mint/bin/parse (from rev 1325, mgmt/mint/parse)
===================================================================
--- mgmt/mint/bin/parse (rev 0)
+++ mgmt/mint/bin/parse 2007-11-16 19:53:44 UTC (rev 1326)
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+# generate Python
+echo "Generating Python code..."
+python -c "
+from schemaparser import *
+parser = SchemaParser()
+parser.generateCode()
+"
+
+# generate SQL
+echo "Generating SQL code..."
+
+SQLOUTPUT=schema.sql
+
+PYTHONPATH=. sqlobject-admin sql -m schema > $SQLOUTPUT
+RESULT=`grep "circular reference" $SQLOUTPUT | wc -l`
+if [[ $RESULT -gt 0 ]]; then
+ mv $SQLOUTPUT $SQLOUTPUT.1
+ tail -n+3 $SQLOUTPUT.1 > $SQLOUTPUT
+ rm $SQLOUTPUT.1
+fi
+
+echo "Done."
Deleted: mgmt/mint/parse
===================================================================
--- mgmt/mint/parse 2007-11-16 19:50:33 UTC (rev 1325)
+++ mgmt/mint/parse 2007-11-16 19:53:44 UTC (rev 1326)
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-# generate Python
-echo "Generating Python code..."
-python -c "
-from schemaparser import *
-parser = SchemaParser()
-parser.generateCode()
-"
-
-# generate SQL
-echo "Generating SQL code..."
-
-SQLOUTPUT=schema.sql
-
-PYTHONPATH=. sqlobject-admin sql -m schema > $SQLOUTPUT
-RESULT=`grep "circular reference" $SQLOUTPUT | wc -l`
-if [[ $RESULT -gt 0 ]]; then
- mv $SQLOUTPUT $SQLOUTPUT.1
- tail -n+3 $SQLOUTPUT.1 > $SQLOUTPUT
- rm $SQLOUTPUT.1
-fi
-
-echo "Done."
17 years, 1 month
rhmessaging commits: r1325 - mgmt/mint/python/mint.
by rhmessaging-commits@lists.jboss.org
Author: nunofsantos
Date: 2007-11-16 14:50:33 -0500 (Fri, 16 Nov 2007)
New Revision: 1325
Modified:
mgmt/mint/python/mint/schema.py
mgmt/mint/python/mint/schema.sql
Log:
new approach to code generation: generate Python from XML, and then use sqlobject-admin to generate the corresponding SQL
Modified: mgmt/mint/python/mint/schema.py
===================================================================
--- mgmt/mint/python/mint/schema.py 2007-11-16 19:46:54 UTC (rev 1324)
+++ mgmt/mint/python/mint/schema.py 2007-11-16 19:50:33 UTC (rev 1325)
@@ -1,241 +1,366 @@
from sqlobject import *
+from datetime import datetime
+conn = connectionForURI("postgresql://localhost/")
+sqlhub.processConnection = conn
-class MintSystemStats(SQLObject):
- class sqlmeta:
- fromDatabase = True
-class MintSystem(SQLObject):
- schemaId = 1
- schemaName = "system"
- managedBroker = None
- class sqlmeta:
- fromDatabase = True
- _SO_class_Mint_system_stats = MintSystemStats
+class System(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ creationTime = TimestampCol(default=None)
+ deletionTime = TimestampCol(default=None)
+ managedBroker = StringCol(length=1000, default=None)
+ stats = ForeignKey('SystemStats', cascade='null', default=None)
+ sysId = StringCol(length=1000, default=None)
-class MintBrokerStats(SQLObject):
- class sqlmeta:
- fromDatabase = True
+class SystemStats(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ system = ForeignKey('System', cascade='null', default=None)
-class MintBroker(SQLObject):
- schemaId = 2
- schemaName = "broker"
- managedBroker = None
- class sqlmeta:
- fromDatabase = True
- def joinCluster(self, model, callbackMethod, clusterName):
+class Broker(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ creationTime = TimestampCol(default=None)
+ deletionTime = TimestampCol(default=None)
+ managedBroker = StringCol(length=1000, default=None)
+ stats = ForeignKey('BrokerStats', cascade='null', default=None)
+ system = ForeignKey('System', cascade='null', default=None)
+ port = SmallIntCol(default=None)
+ workerThreads = SmallIntCol(default=None)
+ maxConns = SmallIntCol(default=None)
+ connBacklog = SmallIntCol(default=None)
+ stagingThreshold = IntCol(default=None)
+ storeLib = StringCol(length=1000, default=None)
+ asyncStore = BoolCol(default=None)
+ mgmtPubInterval = SmallIntCol(default=None)
+ initialDiskPageSize = IntCol(default=None)
+ initialPagesPerQueue = IntCol(default=None)
+ clusterName = StringCol(length=1000, default=None)
+ version = StringCol(length=1000, default=None)
+
+ def joinCluster(self, model, managedBrokerLabel, callbackMethod, clusterName):
actualArgs = dict()
actualArgs["clusterName"] = clusterName
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "joinCluster", args=actualArgs, packageName="qpid")
- def leaveCluster(self, model, callbackMethod):
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "joinCluster", args=actualArgs, packageName="qpid")
+
+ def leaveCluster(self, model, managedBrokerLabel, callbackMethod):
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "leaveCluster", args=actualArgs, packageName="qpid")
- def echo(self, model, callbackMethod, sequence, body):
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "leaveCluster", args=actualArgs, packageName="qpid")
+
+ def echo(self, model, managedBrokerLabel, callbackMethod, sequence, body):
actualArgs = dict()
actualArgs["sequence"] = sequence
actualArgs["body"] = body
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "echo", args=actualArgs, packageName="qpid")
- def crash(self, model, callbackMethod):
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "echo", args=actualArgs, packageName="qpid")
+
+ def crash(self, model, managedBrokerLabel, callbackMethod):
"""Temporary test method to crash the broker"""
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "crash", args=actualArgs, packageName="qpid")
- _SO_class_Mint_broker_stats = MintBrokerStats
- _SO_class_Mint_system = MintSystem
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "crash", args=actualArgs, packageName="qpid")
-class MintVhostStats(SQLObject):
- class sqlmeta:
- fromDatabase = True
+class BrokerStats(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ broker = ForeignKey('Broker', cascade='null', default=None)
-MintSystem.sqlmeta.addJoin(MultipleJoin('MintBroker', joinMethodName='brokers'))
+class Vhost(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ creationTime = TimestampCol(default=None)
+ deletionTime = TimestampCol(default=None)
+ managedBroker = StringCol(length=1000, default=None)
+ stats = ForeignKey('VhostStats', cascade='null', default=None)
+ broker = ForeignKey('Broker', cascade='null', default=None)
+ name = StringCol(length=1000, default=None)
-class MintVhost(SQLObject):
- schemaId = 3
- schemaName = "vhost"
- managedBroker = None
- class sqlmeta:
- fromDatabase = True
- _SO_class_Mint_vhost_stats = MintVhostStats
- _SO_class_Mint_broker = MintBroker
+class VhostStats(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ vhost = ForeignKey('Vhost', cascade='null', default=None)
-class MintQueueStats(SQLObject):
- class sqlmeta:
- fromDatabase = True
+class Queue(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ creationTime = TimestampCol(default=None)
+ deletionTime = TimestampCol(default=None)
+ managedBroker = StringCol(length=1000, default=None)
+ stats = ForeignKey('QueueStats', cascade='null', default=None)
+ vhost = ForeignKey('Vhost', cascade='null', default=None)
+ name = StringCol(length=1000, default=None)
+ durable = BoolCol(default=None)
+ autoDelete = BoolCol(default=None)
+ exclusive = BoolCol(default=None)
+ pageMemoryLimit = IntCol(default=None)
-MintBroker.sqlmeta.addJoin(MultipleJoin('MintVhost', joinMethodName='vhosts'))
-
-class MintQueue(SQLObject):
- schemaId = 4
- schemaName = "queue"
- managedBroker = None
- class sqlmeta:
- fromDatabase = True
- def purge(self, model, callbackMethod):
+ def purge(self, model, managedBrokerLabel, callbackMethod):
"""Discard all messages on queue"""
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "purge", args=actualArgs, packageName="qpid")
- def increaseDiskSize(self, model, callbackMethod, pages):
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "purge", args=actualArgs, packageName="qpid")
+
+ def increaseDiskSize(self, model, managedBrokerLabel, callbackMethod, pages):
"""Increase number of disk pages allocated for this queue"""
actualArgs = dict()
actualArgs["pages"] = pages
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "increaseDiskSize", args=actualArgs, packageName="qpid")
- _SO_class_Mint_queue_stats = MintQueueStats
- _SO_class_Mint_vhost = MintVhost
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "increaseDiskSize", args=actualArgs, packageName="qpid")
-class MintExchangeStats(SQLObject):
- class sqlmeta:
- fromDatabase = True
+class QueueStats(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ queue = ForeignKey('Queue', cascade='null', default=None)
+ diskPageSize = IntCol(default=None)
+ diskPages = IntCol(default=None)
+ diskAvailableSize = IntCol(default=None)
+ msgTotalEnqueues = BigIntCol(default=None)
+ msgTotalDequeues = BigIntCol(default=None)
+ msgTxnEnqueues = BigIntCol(default=None)
+ msgTxnDequeues = BigIntCol(default=None)
+ msgPersistEnqueues = BigIntCol(default=None)
+ msgPersistDequeues = BigIntCol(default=None)
+ msgDepth = IntCol(default=None)
+ msgDepthLow = IntCol(default=None)
+ msgDepthHigh = IntCol(default=None)
+ byteTotalEnqueues = BigIntCol(default=None)
+ byteTotalDequeues = BigIntCol(default=None)
+ byteTxnEnqueues = BigIntCol(default=None)
+ byteTxnDequeues = BigIntCol(default=None)
+ bytePersistEnqueues = BigIntCol(default=None)
+ bytePersistDequeues = BigIntCol(default=None)
+ byteDepth = IntCol(default=None)
+ byteDepthLow = IntCol(default=None)
+ byteDepthHigh = IntCol(default=None)
+ enqueueTxnStarts = BigIntCol(default=None)
+ enqueueTxnCommits = BigIntCol(default=None)
+ enqueueTxnRejects = BigIntCol(default=None)
+ enqueueTxnCount = IntCol(default=None)
+ enqueueTxnCountLow = IntCol(default=None)
+ enqueueTxnCountHigh = IntCol(default=None)
+ dequeueTxnStarts = BigIntCol(default=None)
+ dequeueTxnCommits = BigIntCol(default=None)
+ dequeueTxnRejects = BigIntCol(default=None)
+ dequeueTxnCount = IntCol(default=None)
+ dequeueTxnCountLow = IntCol(default=None)
+ dequeueTxnCountHigh = IntCol(default=None)
+ consumers = IntCol(default=None)
+ consumersLow = IntCol(default=None)
+ consumersHigh = IntCol(default=None)
+ bindings = IntCol(default=None)
+ bindingsLow = IntCol(default=None)
+ bindingsHigh = IntCol(default=None)
+ unackedMessages = IntCol(default=None)
+ unackedMessagesLow = IntCol(default=None)
+ unackedMessagesHigh = IntCol(default=None)
-MintVhost.sqlmeta.addJoin(MultipleJoin('MintQueue', joinMethodName='queues'))
+class Exchange(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ creationTime = TimestampCol(default=None)
+ deletionTime = TimestampCol(default=None)
+ managedBroker = StringCol(length=1000, default=None)
+ stats = ForeignKey('ExchangeStats', cascade='null', default=None)
+ vhost = ForeignKey('Vhost', cascade='null', default=None)
+ name = StringCol(length=1000, default=None)
+ type = StringCol(length=1000, default=None)
-class MintExchange(SQLObject):
- schemaId = 5
- schemaName = "exchange"
- managedBroker = None
- class sqlmeta:
- fromDatabase = True
- _SO_class_Mint_exchange_stats = MintExchangeStats
- _SO_class_Mint_vhost = MintVhost
+class ExchangeStats(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ exchange = ForeignKey('Exchange', cascade='null', default=None)
+ producers = IntCol(default=None)
+ producersLow = IntCol(default=None)
+ producersHigh = IntCol(default=None)
+ bindings = IntCol(default=None)
+ bindingsLow = IntCol(default=None)
+ bindingsHigh = IntCol(default=None)
+ msgReceives = BigIntCol(default=None)
+ msgDrops = BigIntCol(default=None)
+ msgRoutes = BigIntCol(default=None)
+ byteReceives = BigIntCol(default=None)
+ byteDrops = BigIntCol(default=None)
+ byteRoutes = BigIntCol(default=None)
-class MintBindingStats(SQLObject):
- class sqlmeta:
- fromDatabase = True
+class Binding(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ creationTime = TimestampCol(default=None)
+ deletionTime = TimestampCol(default=None)
+ managedBroker = StringCol(length=1000, default=None)
+ stats = ForeignKey('BindingStats', cascade='null', default=None)
+ queue = ForeignKey('Queue', cascade='null', default=None)
+ exchange = ForeignKey('Exchange', cascade='null', default=None)
+ bindingKey = StringCol(length=1000, default=None)
-MintVhost.sqlmeta.addJoin(MultipleJoin('MintExchange', joinMethodName='exchanges'))
+class BindingStats(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ binding = ForeignKey('Binding', cascade='null', default=None)
+ msgMatched = BigIntCol(default=None)
-class MintBinding(SQLObject):
- schemaId = 6
- schemaName = "binding"
- managedBroker = None
- class sqlmeta:
- fromDatabase = True
- _SO_class_Mint_binding_stats = MintBindingStats
- _SO_class_Mint_queue = MintQueue
- _SO_class_Mint_exchange = MintExchange
+class Client(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ creationTime = TimestampCol(default=None)
+ deletionTime = TimestampCol(default=None)
+ managedBroker = StringCol(length=1000, default=None)
+ stats = ForeignKey('ClientStats', cascade='null', default=None)
+ vhost = ForeignKey('Vhost', cascade='null', default=None)
+ ipAddr = IntCol(default=None)
+ port = SmallIntCol(default=None)
-class MintClientStats(SQLObject):
- class sqlmeta:
- fromDatabase = True
-
-MintQueue.sqlmeta.addJoin(MultipleJoin('MintBinding', joinMethodName='bindings'))
-MintExchange.sqlmeta.addJoin(MultipleJoin('MintBinding', joinMethodName='bindings'))
-
-class MintClient(SQLObject):
- schemaId = 7
- schemaName = "client"
- managedBroker = None
- class sqlmeta:
- fromDatabase = True
- def close(self, model, callbackMethod):
+ def close(self, model, managedBrokerLabel, callbackMethod):
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "close", args=actualArgs, packageName="qpid")
- def detach(self, model, callbackMethod):
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "close", args=actualArgs, packageName="qpid")
+
+ def detach(self, model, managedBrokerLabel, callbackMethod):
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "detach", args=actualArgs, packageName="qpid")
- _SO_class_Mint_client_stats = MintClientStats
- _SO_class_Mint_vhost = MintVhost
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "detach", args=actualArgs, packageName="qpid")
-class MintSessionStats(SQLObject):
- class sqlmeta:
- fromDatabase = True
+class ClientStats(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ client = ForeignKey('Client', cascade='null', default=None)
+ authIdentity = StringCol(length=1000, default=None)
+ msgsProduced = BigIntCol(default=None)
+ msgsConsumed = BigIntCol(default=None)
+ bytesProduced = BigIntCol(default=None)
+ bytesConsumed = BigIntCol(default=None)
-MintVhost.sqlmeta.addJoin(MultipleJoin('MintClient', joinMethodName='clients'))
+class Session(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ creationTime = TimestampCol(default=None)
+ deletionTime = TimestampCol(default=None)
+ managedBroker = StringCol(length=1000, default=None)
+ stats = ForeignKey('SessionStats', cascade='null', default=None)
+ vhost = ForeignKey('Vhost', cascade='null', default=None)
+ name = StringCol(length=1000, default=None)
+ client = ForeignKey('Client', cascade='null', default=None)
+ detachedLifespan = IntCol(default=None)
-class MintSession(SQLObject):
- schemaId = 8
- schemaName = "session"
- managedBroker = None
- class sqlmeta:
- fromDatabase = True
- def solicitAck(self, model, callbackMethod):
+ def solicitAck(self, model, managedBrokerLabel, callbackMethod):
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "solicitAck", args=actualArgs, packageName="qpid")
- def detach(self, model, callbackMethod):
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "solicitAck", args=actualArgs, packageName="qpid")
+
+ def detach(self, model, managedBrokerLabel, callbackMethod):
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "detach", args=actualArgs, packageName="qpid")
- def resetLifespan(self, model, callbackMethod):
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "detach", args=actualArgs, packageName="qpid")
+
+ def resetLifespan(self, model, managedBrokerLabel, callbackMethod):
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "resetLifespan", args=actualArgs, packageName="qpid")
- def close(self, model, callbackMethod):
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "resetLifespan", args=actualArgs, packageName="qpid")
+
+ def close(self, model, managedBrokerLabel, callbackMethod):
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "close", args=actualArgs, packageName="qpid")
- _SO_class_Mint_session_stats = MintSessionStats
- _SO_class_Mint_vhost = MintVhost
- _SO_class_Mint_client = MintClient
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "close", args=actualArgs, packageName="qpid")
-class MintDestinationStats(SQLObject):
- class sqlmeta:
- fromDatabase = True
+class SessionStats(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ session = ForeignKey('Session', cascade='null', default=None)
+ attached = BoolCol(default=None)
+ remainingLifespan = IntCol(default=None)
+ framesOutstanding = IntCol(default=None)
-MintVhost.sqlmeta.addJoin(MultipleJoin('MintSession', joinMethodName='sessions'))
-MintClient.sqlmeta.addJoin(MultipleJoin('MintSession', joinMethodName='sessions'))
+class Destination(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ creationTime = TimestampCol(default=None)
+ deletionTime = TimestampCol(default=None)
+ managedBroker = StringCol(length=1000, default=None)
+ stats = ForeignKey('DestinationStats', cascade='null', default=None)
+ session = ForeignKey('Session', cascade='null', default=None)
+ name = StringCol(length=1000, default=None)
-class MintDestination(SQLObject):
- schemaId = 9
- schemaName = "destination"
- managedBroker = None
- class sqlmeta:
- fromDatabase = True
- def throttle(self, model, callbackMethod, strength):
+ def throttle(self, model, managedBrokerLabel, callbackMethod, strength):
"""Apply extra rate limiting to destination: 0 = Normal, 10 = Maximum"""
actualArgs = dict()
actualArgs["strength"] = strength
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "throttle", args=actualArgs, packageName="qpid")
- def stop(self, model, callbackMethod):
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "throttle", args=actualArgs, packageName="qpid")
+
+ def stop(self, model, managedBrokerLabel, callbackMethod):
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "stop", args=actualArgs, packageName="qpid")
- def start(self, model, callbackMethod):
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "stop", args=actualArgs, packageName="qpid")
+
+ def start(self, model, managedBrokerLabel, callbackMethod):
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "start", args=actualArgs, packageName="qpid")
- _SO_class_Mint_destination_stats = MintDestinationStats
- _SO_class_Mint_session = MintSession
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "start", args=actualArgs, packageName="qpid")
-class MintProducerStats(SQLObject):
- class sqlmeta:
- fromDatabase = True
+class DestinationStats(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ destination = ForeignKey('Destination', cascade='null', default=None)
+ flowMode = SmallIntCol(default=None)
+ maxMsgCredits = IntCol(default=None)
+ maxByteCredits = IntCol(default=None)
+ msgCredits = IntCol(default=None)
+ byteCredits = IntCol(default=None)
-MintSession.sqlmeta.addJoin(MultipleJoin('MintDestination', joinMethodName='destinations'))
+class Producer(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ creationTime = TimestampCol(default=None)
+ deletionTime = TimestampCol(default=None)
+ managedBroker = StringCol(length=1000, default=None)
+ stats = ForeignKey('ProducerStats', cascade='null', default=None)
+ destination = ForeignKey('Destination', cascade='null', default=None)
+ exchange = ForeignKey('Exchange', cascade='null', default=None)
-class MintProducer(SQLObject):
- schemaId = 10
- schemaName = "producer"
- managedBroker = None
- class sqlmeta:
- fromDatabase = True
- _SO_class_Mint_producer_stats = MintProducerStats
- _SO_class_Mint_destination = MintDestination
- _SO_class_Mint_exchange = MintExchange
+class ProducerStats(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ producer = ForeignKey('Producer', cascade='null', default=None)
+ msgsProduced = BigIntCol(default=None)
+ bytesProduced = BigIntCol(default=None)
-class MintConsumerStats(SQLObject):
- class sqlmeta:
- fromDatabase = True
+class Consumer(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ creationTime = TimestampCol(default=None)
+ deletionTime = TimestampCol(default=None)
+ managedBroker = StringCol(length=1000, default=None)
+ stats = ForeignKey('ConsumerStats', cascade='null', default=None)
+ destination = ForeignKey('Destination', cascade='null', default=None)
+ queue = ForeignKey('Queue', cascade='null', default=None)
-MintDestination.sqlmeta.addJoin(MultipleJoin('MintProducer', joinMethodName='producers'))
-MintExchange.sqlmeta.addJoin(MultipleJoin('MintProducer', joinMethodName='producers'))
-
-class MintConsumer(SQLObject):
- schemaId = 11
- schemaName = "consumer"
- managedBroker = None
- class sqlmeta:
- fromDatabase = True
- def close(self, model, callbackMethod):
+ def close(self, model, managedBrokerLabel, callbackMethod):
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
- self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "close", args=actualArgs, packageName="qpid")
- _SO_class_Mint_consumer_stats = MintConsumerStats
- _SO_class_Mint_destination = MintDestination
- _SO_class_Mint_queue = MintQueue
+ model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], "close", args=actualArgs, packageName="qpid")
+
+class ConsumerStats(SQLObject):
+ idOriginal = BigIntCol(default=None)
+ recTime = TimestampCol(default=None)
+ consumer = ForeignKey('Consumer', cascade='null', default=None)
+ msgsConsumed = BigIntCol(default=None)
+ bytesConsumed = BigIntCol(default=None)
+ unackedMessages = IntCol(default=None)
+ unackedMessagesLow = IntCol(default=None)
+ unackedMessagesHigh = IntCol(default=None)
+
+classToSchemaNameMap = dict()
+classToSchemaNameMap['System'] = 'system'
+classToSchemaNameMap['Broker'] = 'broker'
+classToSchemaNameMap['Vhost'] = 'vhost'
+classToSchemaNameMap['Queue'] = 'queue'
+classToSchemaNameMap['Exchange'] = 'exchange'
+classToSchemaNameMap['Binding'] = 'binding'
+classToSchemaNameMap['Client'] = 'client'
+classToSchemaNameMap['Session'] = 'session'
+classToSchemaNameMap['Destination'] = 'destination'
+classToSchemaNameMap['Producer'] = 'producer'
+classToSchemaNameMap['Consumer'] = 'consumer'
Modified: mgmt/mint/python/mint/schema.sql
===================================================================
--- mgmt/mint/python/mint/schema.sql 2007-11-16 19:46:54 UTC (rev 1324)
+++ mgmt/mint/python/mint/schema.sql 2007-11-16 19:50:33 UTC (rev 1325)
@@ -1,332 +1,375 @@
+CREATE TABLE binding (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ creation_time TIMESTAMP,
+ deletion_time TIMESTAMP,
+ managed_broker VARCHAR(1000),
+ stats_id INT,
+ queue_id INT,
+ exchange_id INT,
+ binding_key VARCHAR(1000)
+);
-CREATE TABLE mint_system (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_system_stats_id BIGINT,
- rec_time TIMESTAMP,
- creation_time TIMESTAMP,
- deletion_time TIMESTAMP,
- sys_ident VARCHAR(1000)
+CREATE TABLE binding_stats (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ binding_id INT,
+ msg_matched BIGINT
);
-CREATE INDEX mint_system_sys_ident_index ON mint_system(sys_ident);
+CREATE TABLE broker (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ creation_time TIMESTAMP,
+ deletion_time TIMESTAMP,
+ managed_broker VARCHAR(1000),
+ stats_id INT,
+ system_id INT,
+ port SMALLINT,
+ worker_threads SMALLINT,
+ max_conns SMALLINT,
+ conn_backlog SMALLINT,
+ staging_threshold INT,
+ store_lib VARCHAR(1000),
+ async_store BOOL,
+ mgmt_pub_interval SMALLINT,
+ initial_disk_page_size INT,
+ initial_pages_per_queue INT,
+ cluster_name VARCHAR(1000),
+ version VARCHAR(1000)
+);
-CREATE TABLE mint_system_stats (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_system_id BIGINT REFERENCES mint_system ,
- rec_time TIMESTAMP
+CREATE TABLE broker_stats (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ broker_id INT
);
-ALTER TABLE mint_system ADD FOREIGN KEY (mint_system_stats_id) REFERENCES mint_system_stats;
+CREATE TABLE client (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ creation_time TIMESTAMP,
+ deletion_time TIMESTAMP,
+ managed_broker VARCHAR(1000),
+ stats_id INT,
+ vhost_id INT,
+ ip_addr INT,
+ port SMALLINT
+);
-CREATE TABLE mint_broker (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_broker_stats_id BIGINT,
- rec_time TIMESTAMP,
- creation_time TIMESTAMP,
- deletion_time TIMESTAMP,
- mint_system_id BIGINT REFERENCES mint_system,
- port INT2 ,
- worker_threads INT2 ,
- max_conns INT2 ,
- conn_backlog INT2 ,
- staging_threshold INT4 ,
- store_lib VARCHAR(1000) ,
- async_store BOOLEAN ,
- mgmt_pub_interval INT2 ,
- initial_disk_page_size INT4 ,
- initial_pages_per_queue INT4 ,
- cluster_name VARCHAR(1000) ,
- version VARCHAR(1000)
+CREATE TABLE client_stats (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ client_id INT,
+ auth_identity VARCHAR(1000),
+ msgs_produced BIGINT,
+ msgs_consumed BIGINT,
+ bytes_produced BIGINT,
+ bytes_consumed BIGINT
);
-CREATE INDEX mint_broker_port_index ON mint_broker(port);
+CREATE TABLE consumer (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ creation_time TIMESTAMP,
+ deletion_time TIMESTAMP,
+ managed_broker VARCHAR(1000),
+ stats_id INT,
+ destination_id INT,
+ queue_id INT
+);
-CREATE TABLE mint_broker_stats (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_broker_id BIGINT REFERENCES mint_broker ,
- rec_time TIMESTAMP
+CREATE TABLE consumer_stats (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ consumer_id INT,
+ msgs_consumed BIGINT,
+ bytes_consumed BIGINT,
+ unacked_messages INT,
+ unacked_messages_low INT,
+ unacked_messages_high INT
);
-ALTER TABLE mint_broker ADD FOREIGN KEY (mint_broker_stats_id) REFERENCES mint_broker_stats;
+CREATE TABLE destination (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ creation_time TIMESTAMP,
+ deletion_time TIMESTAMP,
+ managed_broker VARCHAR(1000),
+ stats_id INT,
+ session_id INT,
+ name VARCHAR(1000)
+);
-CREATE TABLE mint_vhost (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_vhost_stats_id BIGINT,
- rec_time TIMESTAMP,
- creation_time TIMESTAMP,
- deletion_time TIMESTAMP,
- mint_broker_id BIGINT REFERENCES mint_broker,
- name VARCHAR(1000)
+CREATE TABLE destination_stats (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ destination_id INT,
+ flow_mode SMALLINT,
+ max_msg_credits INT,
+ max_byte_credits INT,
+ msg_credits INT,
+ byte_credits INT
);
-CREATE INDEX mint_vhost_name_index ON mint_vhost(name);
+CREATE TABLE exchange (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ creation_time TIMESTAMP,
+ deletion_time TIMESTAMP,
+ managed_broker VARCHAR(1000),
+ stats_id INT,
+ vhost_id INT,
+ name VARCHAR(1000),
+ type VARCHAR(1000)
+);
-CREATE TABLE mint_vhost_stats (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_vhost_id BIGINT REFERENCES mint_vhost ,
- rec_time TIMESTAMP
+CREATE TABLE exchange_stats (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ exchange_id INT,
+ producers INT,
+ producers_low INT,
+ producers_high INT,
+ bindings INT,
+ bindings_low INT,
+ bindings_high INT,
+ msg_receives BIGINT,
+ msg_drops BIGINT,
+ msg_routes BIGINT,
+ byte_receives BIGINT,
+ byte_drops BIGINT,
+ byte_routes BIGINT
);
-ALTER TABLE mint_vhost ADD FOREIGN KEY (mint_vhost_stats_id) REFERENCES mint_vhost_stats;
+CREATE TABLE producer (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ creation_time TIMESTAMP,
+ deletion_time TIMESTAMP,
+ managed_broker VARCHAR(1000),
+ stats_id INT,
+ destination_id INT,
+ exchange_id INT
+);
-CREATE TABLE mint_queue (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_queue_stats_id BIGINT,
- rec_time TIMESTAMP,
- creation_time TIMESTAMP,
- deletion_time TIMESTAMP,
- mint_vhost_id BIGINT REFERENCES mint_vhost,
- name VARCHAR(1000) ,
- durable BOOLEAN ,
- auto_delete BOOLEAN ,
- exclusive BOOLEAN ,
- page_memory_limit INT4
+CREATE TABLE producer_stats (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ producer_id INT,
+ msgs_produced BIGINT,
+ bytes_produced BIGINT
);
-CREATE INDEX mint_queue_name_index ON mint_queue(name);
+CREATE TABLE queue (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ creation_time TIMESTAMP,
+ deletion_time TIMESTAMP,
+ managed_broker VARCHAR(1000),
+ stats_id INT,
+ vhost_id INT,
+ name VARCHAR(1000),
+ durable BOOL,
+ auto_delete BOOL,
+ exclusive BOOL,
+ page_memory_limit INT
+);
-CREATE TABLE mint_queue_stats (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_queue_id BIGINT REFERENCES mint_queue ,
- rec_time TIMESTAMP,
- disk_page_size INT4 ,
- disk_pages INT4 ,
- disk_available_size INT4 ,
- msg_total_enqueues INT8 ,
- msg_total_dequeues INT8 ,
- msg_txn_enqueues INT8 ,
- msg_txn_dequeues INT8 ,
- msg_persist_enqueues INT8 ,
- msg_persist_dequeues INT8 ,
- msg_depth INT4 ,
- msg_depth_high INT4 ,
- msg_depth_low INT4 ,
- byte_total_enqueues INT8 ,
- byte_total_dequeues INT8 ,
- byte_txn_enqueues INT8 ,
- byte_txn_dequeues INT8 ,
- byte_persist_enqueues INT8 ,
- byte_persist_dequeues INT8 ,
- byte_depth INT4 ,
- byte_depth_high INT4 ,
- byte_depth_low INT4 ,
- enqueue_txn_starts INT8 ,
- enqueue_txn_commits INT8 ,
- enqueue_txn_rejects INT8 ,
- enqueue_txn_count INT4 ,
- enqueue_txn_count_high INT4 ,
- enqueue_txn_count_low INT4 ,
- dequeue_txn_starts INT8 ,
- dequeue_txn_commits INT8 ,
- dequeue_txn_rejects INT8 ,
- dequeue_txn_count INT4 ,
- dequeue_txn_count_high INT4 ,
- dequeue_txn_count_low INT4 ,
- consumers INT4 ,
- consumers_high INT4 ,
- consumers_low INT4 ,
- bindings INT4 ,
- bindings_high INT4 ,
- bindings_low INT4 ,
- unacked_messages INT4 ,
- unacked_messages_high INT4 ,
- unacked_messages_low INT4
+CREATE TABLE queue_stats (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ queue_id INT,
+ disk_page_size INT,
+ disk_pages INT,
+ disk_available_size INT,
+ msg_total_enqueues BIGINT,
+ msg_total_dequeues BIGINT,
+ msg_txn_enqueues BIGINT,
+ msg_txn_dequeues BIGINT,
+ msg_persist_enqueues BIGINT,
+ msg_persist_dequeues BIGINT,
+ msg_depth INT,
+ msg_depth_low INT,
+ msg_depth_high INT,
+ byte_total_enqueues BIGINT,
+ byte_total_dequeues BIGINT,
+ byte_txn_enqueues BIGINT,
+ byte_txn_dequeues BIGINT,
+ byte_persist_enqueues BIGINT,
+ byte_persist_dequeues BIGINT,
+ byte_depth INT,
+ byte_depth_low INT,
+ byte_depth_high INT,
+ enqueue_txn_starts BIGINT,
+ enqueue_txn_commits BIGINT,
+ enqueue_txn_rejects BIGINT,
+ enqueue_txn_count INT,
+ enqueue_txn_count_low INT,
+ enqueue_txn_count_high INT,
+ dequeue_txn_starts BIGINT,
+ dequeue_txn_commits BIGINT,
+ dequeue_txn_rejects BIGINT,
+ dequeue_txn_count INT,
+ dequeue_txn_count_low INT,
+ dequeue_txn_count_high INT,
+ consumers INT,
+ consumers_low INT,
+ consumers_high INT,
+ bindings INT,
+ bindings_low INT,
+ bindings_high INT,
+ unacked_messages INT,
+ unacked_messages_low INT,
+ unacked_messages_high INT
);
-ALTER TABLE mint_queue ADD FOREIGN KEY (mint_queue_stats_id) REFERENCES mint_queue_stats;
+CREATE TABLE session (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ creation_time TIMESTAMP,
+ deletion_time TIMESTAMP,
+ managed_broker VARCHAR(1000),
+ stats_id INT,
+ vhost_id INT,
+ name VARCHAR(1000),
+ client_id INT,
+ detached_lifespan INT
+);
-CREATE TABLE mint_exchange (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_exchange_stats_id BIGINT,
- rec_time TIMESTAMP,
- creation_time TIMESTAMP,
- deletion_time TIMESTAMP,
- mint_vhost_id BIGINT REFERENCES mint_vhost,
- name VARCHAR(1000) ,
- type VARCHAR(1000)
+CREATE TABLE session_stats (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ session_id INT,
+ attached BOOL,
+ remaining_lifespan INT,
+ frames_outstanding INT
);
-CREATE INDEX mint_exchange_name_index ON mint_exchange(name);
+CREATE TABLE system (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ creation_time TIMESTAMP,
+ deletion_time TIMESTAMP,
+ managed_broker VARCHAR(1000),
+ stats_id INT,
+ sys_id VARCHAR(1000)
+);
-CREATE TABLE mint_exchange_stats (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_exchange_id BIGINT REFERENCES mint_exchange ,
- rec_time TIMESTAMP,
- producers INT4 ,
- producers_high INT4 ,
- producers_low INT4 ,
- bindings INT4 ,
- bindings_high INT4 ,
- bindings_low INT4 ,
- msg_receives INT8 ,
- msg_drops INT8 ,
- msg_routes INT8 ,
- byte_receives INT8 ,
- byte_drops INT8 ,
- byte_routes INT8
+CREATE TABLE system_stats (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ system_id INT
);
-ALTER TABLE mint_exchange ADD FOREIGN KEY (mint_exchange_stats_id) REFERENCES mint_exchange_stats;
-
-CREATE TABLE mint_binding (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_binding_stats_id BIGINT,
- rec_time TIMESTAMP,
- creation_time TIMESTAMP,
- deletion_time TIMESTAMP,
- mint_queue_id BIGINT REFERENCES mint_queue,
- mint_exchange_id BIGINT REFERENCES mint_exchange,
- binding_key VARCHAR(1000)
+CREATE TABLE vhost (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ creation_time TIMESTAMP,
+ deletion_time TIMESTAMP,
+ managed_broker VARCHAR(1000),
+ stats_id INT,
+ broker_id INT,
+ name VARCHAR(1000)
);
-CREATE TABLE mint_binding_stats (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_binding_id BIGINT REFERENCES mint_binding ,
- rec_time TIMESTAMP,
- msg_matched INT8
+CREATE TABLE vhost_stats (
+ id SERIAL PRIMARY KEY,
+ id_original BIGINT,
+ rec_time TIMESTAMP,
+ vhost_id INT
);
-ALTER TABLE mint_binding ADD FOREIGN KEY (mint_binding_stats_id) REFERENCES mint_binding_stats;
+ALTER TABLE binding ADD CONSTRAINT stats_id_exists FOREIGN KEY (stats_id) REFERENCES binding_stats (id) ON DELETE SET NULL;
-CREATE TABLE mint_client (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_client_stats_id BIGINT,
- rec_time TIMESTAMP,
- creation_time TIMESTAMP,
- deletion_time TIMESTAMP,
- mint_vhost_id BIGINT REFERENCES mint_vhost,
- ip_addr INT4 ,
- port INT2
-);
+ALTER TABLE binding ADD CONSTRAINT queue_id_exists FOREIGN KEY (queue_id) REFERENCES queue (id) ON DELETE SET NULL;
-CREATE INDEX mint_client_ip_addr_index ON mint_client(ip_addr);
+ALTER TABLE binding ADD CONSTRAINT exchange_id_exists FOREIGN KEY (exchange_id) REFERENCES exchange (id) ON DELETE SET NULL;
-CREATE INDEX mint_client_port_index ON mint_client(port);
+ALTER TABLE binding_stats ADD CONSTRAINT binding_id_exists FOREIGN KEY (binding_id) REFERENCES binding (id) ON DELETE SET NULL;
-CREATE TABLE mint_client_stats (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_client_id BIGINT REFERENCES mint_client ,
- rec_time TIMESTAMP,
- auth_identity VARCHAR(1000) ,
- msgs_produced INT8 ,
- msgs_consumed INT8 ,
- bytes_produced INT8 ,
- bytes_consumed INT8
-);
+ALTER TABLE broker ADD CONSTRAINT stats_id_exists FOREIGN KEY (stats_id) REFERENCES broker_stats (id) ON DELETE SET NULL;
-ALTER TABLE mint_client ADD FOREIGN KEY (mint_client_stats_id) REFERENCES mint_client_stats;
+ALTER TABLE broker ADD CONSTRAINT system_id_exists FOREIGN KEY (system_id) REFERENCES system (id) ON DELETE SET NULL;
-CREATE TABLE mint_session (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_session_stats_id BIGINT,
- rec_time TIMESTAMP,
- creation_time TIMESTAMP,
- deletion_time TIMESTAMP,
- mint_vhost_id BIGINT REFERENCES mint_vhost,
- name VARCHAR(1000) ,
- mint_client_id BIGINT REFERENCES mint_client,
- detached_lifespan INT4
-);
+ALTER TABLE broker_stats ADD CONSTRAINT broker_id_exists FOREIGN KEY (broker_id) REFERENCES broker (id) ON DELETE SET NULL;
-CREATE INDEX mint_session_name_index ON mint_session(name);
+ALTER TABLE client ADD CONSTRAINT stats_id_exists FOREIGN KEY (stats_id) REFERENCES client_stats (id) ON DELETE SET NULL;
-CREATE TABLE mint_session_stats (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_session_id BIGINT REFERENCES mint_session ,
- rec_time TIMESTAMP,
- attached BOOLEAN ,
- remaining_lifespan INT4 ,
- frames_outstanding INT4
-);
+ALTER TABLE client ADD CONSTRAINT vhost_id_exists FOREIGN KEY (vhost_id) REFERENCES vhost (id) ON DELETE SET NULL;
-ALTER TABLE mint_session ADD FOREIGN KEY (mint_session_stats_id) REFERENCES mint_session_stats;
+ALTER TABLE client_stats ADD CONSTRAINT client_id_exists FOREIGN KEY (client_id) REFERENCES client (id) ON DELETE SET NULL;
-CREATE TABLE mint_destination (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_destination_stats_id BIGINT,
- rec_time TIMESTAMP,
- creation_time TIMESTAMP,
- deletion_time TIMESTAMP,
- mint_session_id BIGINT REFERENCES mint_session,
- name VARCHAR(1000)
-);
+ALTER TABLE consumer ADD CONSTRAINT stats_id_exists FOREIGN KEY (stats_id) REFERENCES consumer_stats (id) ON DELETE SET NULL;
-CREATE INDEX mint_destination_name_index ON mint_destination(name);
+ALTER TABLE consumer ADD CONSTRAINT destination_id_exists FOREIGN KEY (destination_id) REFERENCES destination (id) ON DELETE SET NULL;
-CREATE TABLE mint_destination_stats (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_destination_id BIGINT REFERENCES mint_destination ,
- rec_time TIMESTAMP,
- flow_mode INT2 ,
- max_msg_credits INT4 ,
- max_byte_credits INT4 ,
- msg_credits INT4 ,
- byte_credits INT4
-);
+ALTER TABLE consumer ADD CONSTRAINT queue_id_exists FOREIGN KEY (queue_id) REFERENCES queue (id) ON DELETE SET NULL;
-ALTER TABLE mint_destination ADD FOREIGN KEY (mint_destination_stats_id) REFERENCES mint_destination_stats;
+ALTER TABLE consumer_stats ADD CONSTRAINT consumer_id_exists FOREIGN KEY (consumer_id) REFERENCES consumer (id) ON DELETE SET NULL;
-CREATE TABLE mint_producer (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_producer_stats_id BIGINT,
- rec_time TIMESTAMP,
- creation_time TIMESTAMP,
- deletion_time TIMESTAMP,
- mint_destination_id BIGINT REFERENCES mint_destination,
- mint_exchange_id BIGINT REFERENCES mint_exchange
-);
+ALTER TABLE destination ADD CONSTRAINT stats_id_exists FOREIGN KEY (stats_id) REFERENCES destination_stats (id) ON DELETE SET NULL;
-CREATE TABLE mint_producer_stats (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_producer_id BIGINT REFERENCES mint_producer ,
- rec_time TIMESTAMP,
- msgs_produced INT8 ,
- bytes_produced INT8
-);
+ALTER TABLE destination ADD CONSTRAINT session_id_exists FOREIGN KEY (session_id) REFERENCES session (id) ON DELETE SET NULL;
-ALTER TABLE mint_producer ADD FOREIGN KEY (mint_producer_stats_id) REFERENCES mint_producer_stats;
+ALTER TABLE destination_stats ADD CONSTRAINT destination_id_exists FOREIGN KEY (destination_id) REFERENCES destination (id) ON DELETE SET NULL;
-CREATE TABLE mint_consumer (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_consumer_stats_id BIGINT,
- rec_time TIMESTAMP,
- creation_time TIMESTAMP,
- deletion_time TIMESTAMP,
- mint_destination_id BIGINT REFERENCES mint_destination,
- mint_queue_id BIGINT REFERENCES mint_queue
-);
+ALTER TABLE exchange ADD CONSTRAINT stats_id_exists FOREIGN KEY (stats_id) REFERENCES exchange_stats (id) ON DELETE SET NULL;
-CREATE TABLE mint_consumer_stats (
- id BIGSERIAL PRIMARY KEY,
- id_original BIGINT,
- mint_consumer_id BIGINT REFERENCES mint_consumer ,
- rec_time TIMESTAMP,
- msgs_consumed INT8 ,
- bytes_consumed INT8 ,
- unacked_messages INT4 ,
- unacked_messages_high INT4 ,
- unacked_messages_low INT4
-);
+ALTER TABLE exchange ADD CONSTRAINT vhost_id_exists FOREIGN KEY (vhost_id) REFERENCES vhost (id) ON DELETE SET NULL;
-ALTER TABLE mint_consumer ADD FOREIGN KEY (mint_consumer_stats_id) REFERENCES mint_consumer_stats;
+ALTER TABLE exchange_stats ADD CONSTRAINT exchange_id_exists FOREIGN KEY (exchange_id) REFERENCES exchange (id) ON DELETE SET NULL;
+
+ALTER TABLE producer ADD CONSTRAINT stats_id_exists FOREIGN KEY (stats_id) REFERENCES producer_stats (id) ON DELETE SET NULL;
+
+ALTER TABLE producer ADD CONSTRAINT destination_id_exists FOREIGN KEY (destination_id) REFERENCES destination (id) ON DELETE SET NULL;
+
+ALTER TABLE producer ADD CONSTRAINT exchange_id_exists FOREIGN KEY (exchange_id) REFERENCES exchange (id) ON DELETE SET NULL;
+
+ALTER TABLE producer_stats ADD CONSTRAINT producer_id_exists FOREIGN KEY (producer_id) REFERENCES producer (id) ON DELETE SET NULL;
+
+ALTER TABLE queue ADD CONSTRAINT stats_id_exists FOREIGN KEY (stats_id) REFERENCES queue_stats (id) ON DELETE SET NULL;
+
+ALTER TABLE queue ADD CONSTRAINT vhost_id_exists FOREIGN KEY (vhost_id) REFERENCES vhost (id) ON DELETE SET NULL;
+
+ALTER TABLE queue_stats ADD CONSTRAINT queue_id_exists FOREIGN KEY (queue_id) REFERENCES queue (id) ON DELETE SET NULL;
+
+ALTER TABLE session ADD CONSTRAINT stats_id_exists FOREIGN KEY (stats_id) REFERENCES session_stats (id) ON DELETE SET NULL;
+
+ALTER TABLE session ADD CONSTRAINT vhost_id_exists FOREIGN KEY (vhost_id) REFERENCES vhost (id) ON DELETE SET NULL;
+
+ALTER TABLE session ADD CONSTRAINT client_id_exists FOREIGN KEY (client_id) REFERENCES client (id) ON DELETE SET NULL;
+
+ALTER TABLE session_stats ADD CONSTRAINT session_id_exists FOREIGN KEY (session_id) REFERENCES session (id) ON DELETE SET NULL;
+
+ALTER TABLE system ADD CONSTRAINT stats_id_exists FOREIGN KEY (stats_id) REFERENCES system_stats (id) ON DELETE SET NULL;
+
+ALTER TABLE system_stats ADD CONSTRAINT system_id_exists FOREIGN KEY (system_id) REFERENCES system (id) ON DELETE SET NULL;
+
+ALTER TABLE vhost ADD CONSTRAINT stats_id_exists FOREIGN KEY (stats_id) REFERENCES vhost_stats (id) ON DELETE SET NULL;
+
+ALTER TABLE vhost ADD CONSTRAINT broker_id_exists FOREIGN KEY (broker_id) REFERENCES broker (id) ON DELETE SET NULL;
+
+ALTER TABLE vhost_stats ADD CONSTRAINT vhost_id_exists FOREIGN KEY (vhost_id) REFERENCES vhost (id) ON DELETE SET NULL;
+
17 years, 1 month
rhmessaging commits: r1324 - in mgmt/mint: python/mint and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: nunofsantos
Date: 2007-11-16 14:46:54 -0500 (Fri, 16 Nov 2007)
New Revision: 1324
Added:
mgmt/mint/parse
Modified:
mgmt/mint/config.xml
mgmt/mint/python/mint/__init__.py
mgmt/mint/schemaparser.py
Log:
new approach to code generation: generate Python from XML, and then use sqlobject-admin to generate the corresponding SQL
Modified: mgmt/mint/config.xml
===================================================================
--- mgmt/mint/config.xml 2007-11-16 17:50:29 UTC (rev 1323)
+++ mgmt/mint/config.xml 2007-11-16 19:46:54 UTC (rev 1324)
@@ -1,6 +1,5 @@
<config>
<configOption name="dsn" value="postgresql://localhost/" />
<configOption name="pythonOutput" value="schema.py" />
- <configOption name="sqlOutput" value="schema.sql" />
<configOption name="schemaXML" value="xml/MgmtSchema.xml" />
</config>
Added: mgmt/mint/parse
===================================================================
--- mgmt/mint/parse (rev 0)
+++ mgmt/mint/parse 2007-11-16 19:46:54 UTC (rev 1324)
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+# generate Python
+echo "Generating Python code..."
+python -c "
+from schemaparser import *
+parser = SchemaParser()
+parser.generateCode()
+"
+
+# generate SQL
+echo "Generating SQL code..."
+
+SQLOUTPUT=schema.sql
+
+PYTHONPATH=. sqlobject-admin sql -m schema > $SQLOUTPUT
+RESULT=`grep "circular reference" $SQLOUTPUT | wc -l`
+if [[ $RESULT -gt 0 ]]; then
+ mv $SQLOUTPUT $SQLOUTPUT.1
+ tail -n+3 $SQLOUTPUT.1 > $SQLOUTPUT
+ rm $SQLOUTPUT.1
+fi
+
+echo "Done."
Property changes on: mgmt/mint/parse
___________________________________________________________________
Name: svn:executable
+ *
Modified: mgmt/mint/python/mint/__init__.py
===================================================================
--- mgmt/mint/python/mint/__init__.py 2007-11-16 17:50:29 UTC (rev 1323)
+++ mgmt/mint/python/mint/__init__.py 2007-11-16 19:46:54 UTC (rev 1324)
@@ -2,179 +2,156 @@
from datetime import *
from sqlobject import *
-from schema import *
+import schema
-class MintModel:
+class OriginalIdDict:
def __init__(self):
- self.currentMethodId = 1
- self.outstandingMethodCalls = dict()
- self.managedBrokers = dict()
+ self.idMap = dict()
+
+ def set(self, idOriginal, obj):
+ self.idMap[idOriginal] = obj
- def getQueueByOriginalId(self, id, create=False):
- queue = None
- try:
- queue = MintQueue.selectBy(idOriginal=id)[:1][0]
- except IndexError:
- if (create): queue = MintQueue(idOriginal=id)
- return queue
+ def getByOriginalId(self, objType, idOriginal, create=False, args={}):
+ if (idOriginal in self.idMap):
+ #print "\n\n=============== %s %d found\n\n" % (objType.__name__, idOriginal)
+ return self.idMap[idOriginal]
+ elif (create):
+ #print "\n\n=============== %s %d NOT found, creating\n\n" % (objType.__name__, idOriginal)
+ obj = objType.__new__(objType)
+ obj.__init__()
+ self.idMap[idOriginal] = obj
+ return obj
+ else:
+ #print "\n\n=============== %s %d NOT found, NOT creating\n\n" % (objType.__name__, idOriginal)
+ return None
- def getQueueByName(self, name, vhost, create=False):
- queue = None
- try:
- queue = MintQueue.selectBy(name=name, mintVhost=vhost)[:1][0]
- except IndexError:
- if (create): queue = MintQueue(name=name, mintVhost=vhost)
- return queue
+ def getByIndexAttrib(self, objType, indexAttrib, indexValue, create=False, args={}):
+ ###FIX
+ return None
+
+class ConnectedBroker:
+ def __init__(self, managedBroker):
+ self.managedBroker = managedBroker
+ self.objs = OriginalIdDict()
- def getVhostByName(self, name, broker, create=False):
- vhost = None
- try:
- vhost = MintVhost.selectBy(name=name, mintBroker=broker)[:1][0]
- except IndexError:
- if (create): vhost = MintVhost(name=name, mintBroker=broker)
- return vhost
+ def getByOriginalId(self, objType, idOriginal, create=False, args={}):
+ return self.objs.getByOriginalId(objType, idOriginal, create, args)
- def getVhostByOriginalId(self, id, create=False):
- vhost = None
- try:
- vhost = MintVhost.selectBy(idOriginal=id)[:1][0]
- except IndexError:
- if (create): vhost = MintVhost(idOriginal=id)
- return vhost
+ def getByIndexAttrib(self, objType, indexAttrib, indexValue, parent, create=False, args={}):
+ return self.objs.getByIndexAttrib(objType, indexAttrib, indexValue, create, args)
- def getBrokerByPort(self, port, system, create=False):
- broker = None
- try:
- broker = MintBroker.selectBy(port=port, mintSystem=system)[:1][0]
- except IndexError:
- if (create): broker = MintBroker(port=port, mintSystem=system)
- return broker
- def getBrokerByOriginalId(self, id, create=False):
- broker = None
- try:
- broker = MintBroker.selectBy(idOriginal=id)[:1][0]
- except IndexError:
- if (create): broker = MintBroker(idOriginal=id)
- return broker
-
- def getSystemByOriginalId(self, id, create=False):
- system = None
- try:
- system = MintSystem.selectBy(idOriginal=id)[:1][0]
- except IndexError:
- if (create): system = MintSystem(idOriginal=id)
- return system
-
+class MintModel:
+ def __init__(self):
+ self.currentMethodId = 1
+ self.outstandingMethodCalls = dict()
+ self.connectedBrokers = dict()
+
def sanitizeDict(self, d):
for k in d.iterkeys():
- if (k.endswith("Id")):
- d[self.convertKey(k)] = d.pop(k)
- elif (k == "id"):
- d[self.convertKey(k)] = d.pop(k)
- for k in d.iterkeys():
- if (k.endswith("Ref")):
- d[self.convertKey(k)] = d.pop(k)
+ if (k == "id"):
+ d[self.convertIdKey(k)] = d.pop(k)
+ elif (k.endswith("Ref")):
+ d[self.convertRefKey(k)] = d.pop(k)
return d
- def convertKey(self, k):
- if (k == "id"):
- return k + "Original"
- if (k.endswith("Id")):
- return k + "ent"
- elif (k.endswith("Ref")):
- oldK = k
- k = k[0].upper() + k[1:]
- return "mint" + k.replace("Ref", "ID")
+ def convertIdKey(self, k):
+ return "idOriginal"
+ def convertRefKey(self, k):
+ return k.replace("Ref", "")
+
def configCallback(self, broker, objectName, list, timestamps):
print "\nCONFIG---------------------------------------------------"
- print "broker=" + broker
print objectName
- print list
result = None
d = self.sanitizeDict(dict(list))
- d["managedBroker"] = self.managedBrokers[broker]
- print d
-
+ connectedBroker = self.connectedBrokers[broker]
+ d["managedBroker"] = broker
d["recTime"] = datetime.fromtimestamp(timestamps[0]/1000000000)
d["creationTime"] = datetime.fromtimestamp(timestamps[1]/1000000000)
+ print d
+
if (objectName == "queue"):
- print "* QUEUE"
- queue = self.getQueueByName(d["name"], self.getVhostByOriginalId(d.pop(self.convertKey("vhostRef"))), True)
+ d["vhost"] = connectedBroker.getByOriginalId(schema.Vhost, d.pop(self.convertRefKey("vhostRef")))
+ queue = connectedBroker.getByOriginalId(schema.Queue, d["idOriginal"], create=True)
queue.set(**d)
- print "queue id = %d" % (queue.id)
result = queue
elif (objectName == "vhost"):
- print "* VHOST"
- vhost = self.getVhostByName(d["name"], self.getBrokerByOriginalId(d.pop(self.convertKey("brokerRef"))), True)
+ d["broker"] = connectedBroker.getByOriginalId(schema.Broker, d.pop(self.convertRefKey("brokerRef")))
+ vhost = connectedBroker.getByOriginalId(schema.Vhost, d["idOriginal"], create=True)
vhost.set(**d)
- print "vhost id = %d" % (vhost.id)
result = vhost
elif (objectName == "broker"):
- print "* BROKER"
- d.pop(self.convertKey("systemRef"))
- broker = self.getBrokerByPort(d["port"], self.getSystemByOriginalId("0"), True)
+#FIX
+ d.pop(self.convertRefKey("systemRef"))
+# d["system"] = connectedBroker.getByOriginalId(System, 0)
+ d["system"] = schema.System.selectBy(idOriginal=0)[:1][0]
+ connectedBroker.objs.set(0, d["system"])
+#FIX
+ broker = connectedBroker.getByOriginalId(schema.Broker, d["idOriginal"], create=True)
broker.set(**d)
- broker.sync()
- print "broker id = %d" % (broker.id)
result = broker
print "END CONFIG---------------------------------------------------\n"
return result
def instCallback(self, broker, objectName, list, timestamps):
print "\nINST---------------------------------------------------"
- print "broker=" + broker
print objectName
- print list
result = None
d = self.sanitizeDict(dict(list))
+ connectedBroker = self.connectedBrokers[broker]
+ d["recTime"] = datetime.fromtimestamp(timestamps[0]/1000000000)
+ print d
if (objectName == "queue"):
- print "* QUEUE"
- queue = self.getQueueByOriginalId(d[self.convertKey("id")])
- d["mintQueue"] = queue.id
- d["recTime"] = datetime.fromtimestamp(timestamps[0]/1000000000)
- queueStats = MintQueueStats()
+ queue = connectedBroker.getByOriginalId(schema.Queue, d[self.convertIdKey("id")])
+ d["queue"] = queue
+ queueStats = schema.QueueStats()
queueStats.set(**d)
d = dict()
if (timestamps[2] != 0):
d["deletionTime"] = datetime.fromtimestamp(timestamps[2]/1000000000)
- d["mintQueueStats"] = queueStats
+ d["stats"] = queueStats
queue.set(**d)
- print queue.id
result = queueStats
elif (objectName == "vhost"):
- print "* VHOST"
+ pass
elif (objectName == "broker"):
- print "* BROKER"
- print "END INST---------------------------------------------------\n"
+ pass
+ print "END INST---------------------------------------------------\n"
return result
def methodCallback(self, broker, methodId, errorNo, errorText, args):
print "\nMETHOD---------------------------------------------------"
- print "broker=" + broker
print "MethodId=%d" % (methodId)
print "Error: %d %s" % (errorNo, errorText)
+ print "Args: "
print args
method = self.outstandingMethodCalls.pop(methodId)
method(errorText, args)
print "END METHOD---------------------------------------------------\n"
- def addManagedBroker(self, host, port):
+ def connectToBroker(self, host, port):
broker = ManagedBroker(host=host, port=port)
label = "%s:%d" % (host, port)
- self.managedBrokers[label] = broker
+ self.connectedBrokers[label] = ConnectedBroker(broker)
broker.configListener(label, self.configCallback)
broker.instrumentationListener (label, self.instCallback)
broker.methodListener (label, self.methodCallback)
broker.start()
return label
+ def getConnectedBroker(self, label):
+ if (label in self.connectedBrokers):
+ return self.connectedBrokers[label].managedBroker
+ else:
+ return None
+
+ # for compatibility
+ addManagedBroker = connectToBroker
+
def registerCallback(self, callback):
self.currentMethodId += 1
methodId = self.currentMethodId
self.outstandingMethodCalls[methodId] = callback
return methodId
-
- def systems(self):
- return MintSystem.select()
Modified: mgmt/mint/schemaparser.py
===================================================================
--- mgmt/mint/schemaparser.py 2007-11-16 17:50:29 UTC (rev 1323)
+++ mgmt/mint/schemaparser.py 2007-11-16 19:46:54 UTC (rev 1324)
@@ -1,145 +1,94 @@
import mllib
-from sqlobject import connectionForURI, sqlhub, MixedCaseUnderscoreStyle
+from sqlobject import *
+class SchemaParser:
+ """parses broker XML schema"""
-class SqlGenerator:
- """generates SQL code from broker XML schema"""
-
- def __init__(self, style):
- self.sqlOutput = ""
- self.indexes = ""
- self.currentTable = ""
- self.style = style
+ def __init__(self):
+ self.options = dict()
+ self.parseConfigFile()
+ self.style = MixedCaseUnderscoreStyle()
+ self.pythonOutput = "from sqlobject import *\n"
+ self.pythonOutput += "from datetime import datetime\n"
+ self.pythonOutput += "conn = connectionForURI(\"%s\")\n" % (self.options["dsn"])
+ self.pythonOutput += "sqlhub.processConnection = conn\n\n"
+ self.additional = ""
+ self.final = "\nclassToSchemaNameMap = dict()\n"
# mapping between xml schema types and database column types
# see xml/MintTypes.xml
self.dataTypesMap = dict()
- self.dataTypesMap["objId"] = "INT8"
- self.dataTypesMap["uint8"] = self.dataTypesMap["hilo8"] = self.dataTypesMap["count8"] = "INT2"
- self.dataTypesMap["uint16"] = self.dataTypesMap["hilo16"] = self.dataTypesMap["count16"] = "INT2"
- self.dataTypesMap["uint32"] = self.dataTypesMap["hilo32"] = self.dataTypesMap["count32"] = "INT4"
- self.dataTypesMap["uint64"] = self.dataTypesMap["hilo64"] = self.dataTypesMap["count64"] = "INT8"
- self.dataTypesMap["bool"] = "BOOLEAN"
- self.dataTypesMap["sstr"] = "VARCHAR(1000)"
- self.dataTypesMap["lstr"] = "VARCHAR(4000)"
-
- def generate(self, name, elements, type):
- self.startTable(name, type)
- for elem in elements:
- self.generateColumn(elem)
- self.endTable(name, type)
-
- def startTable(self, name, type):
- if (type == "config"):
- actualName = name
- else:
- actualName = name + "_stats"
- self.currentTable = actualName
- self.sqlOutput += "\nCREATE TABLE %s (\n" % (actualName)
- self.sqlOutput += " id BIGSERIAL PRIMARY KEY,\n"
- self.sqlOutput += " id_original BIGINT,\n"
- if (type == "config"):
- self.sqlOutput += " %s_stats_id BIGINT,\n" % (name)
- else:
- self.sqlOutput += " %s_id BIGINT REFERENCES %s ,\n" % (name, name)
- self.generateTimestampColumn("rec")
- if (type == "config"):
- self.generateTimestampColumn("creation")
- self.generateTimestampColumn("deletion")
-
- def generateTimestampColumn(self, col):
- self.sqlOutput += " %s_time TIMESTAMP,\n" % (col)
+ self.dataTypesMap["objId"] = "ForeignKey"
+ self.dataTypesMap["uint8"] = self.dataTypesMap["hilo8"] = self.dataTypesMap["count8"] = "SmallIntCol"
+ self.dataTypesMap["uint16"] = self.dataTypesMap["hilo16"] = self.dataTypesMap["count16"] = "SmallIntCol"
+ self.dataTypesMap["uint32"] = self.dataTypesMap["hilo32"] = self.dataTypesMap["count32"] = "IntCol"
+ self.dataTypesMap["uint64"] = self.dataTypesMap["hilo64"] = self.dataTypesMap["count64"] = "BigIntCol"
+ self.dataTypesMap["bool"] = "BoolCol"
+ self.dataTypesMap["sstr"] = self.dataTypesMap["lstr"] = "StringCol"
- def generateIndex(self, name):
- self.indexes += "\nCREATE INDEX %s_%s_index ON %s(%s);\n" % (self.currentTable, name, self.currentTable, name)
-
- def generateColumn(self, elem, suffix=""):
- actualName = self.style.pythonAttrToDBColumn(elem["@name"] + suffix)
- if (actualName.endswith("_id")):
- # handle case where XML attrib ends in "Id", which confuses SqlObject into thinking it's a foreign key
- actualName = actualName.replace("_id", "_ident")
- if (elem["@type"].startswith("enum")):
- actualType = "enum"
- params = actualName + " IN ("
- for i in elem["@type"].replace("enum(", "").replace(")", "").split(","):
- params += "'" + i + "',"
- params = params[:-1] + "))"
- else:
- actualType = elem["@type"]
- params = ""
- if (elem["@name"].endswith("Ref")):
- foreignKeyTable = "mint_" + actualName.replace("_ref", "")
- self.sqlOutput += " %s_id BIGINT REFERENCES %s,\n" % (foreignKeyTable, foreignKeyTable)
- else:
- self.sqlOutput += " %s %s %s,\n" % (actualName, self.dataTypesMap[actualType], params)
- if (elem["@type"].startswith("hilo") and suffix == ""):
- self.generateColumn(elem, "High")
- self.generateColumn(elem, "Low")
- if (elem["@index"] == "y"):
- self.generateIndex(actualName)
+ def attrNameFromDbColumn(self, name, removeSuffix=""):
+ return self.style.dbColumnToPythonAttr(name.replace(removeSuffix, ""))
+
+ def generateAttrib(self, attribName, attribType, params=""):
+ if (params.find("default") < 0):
+ if (params == ""):
+ params = "default=None"
+ else:
+ params += ", default=None"
+ self.pythonOutput += " %s = %s(%s)\n" % (attribName, attribType, params)
- def endTable(self, name, type):
- self.sqlOutput = self.sqlOutput[:-2] + "\n);\n" + self.indexes
- if (type == "inst"):
- self.sqlOutput += "\nALTER TABLE %s ADD FOREIGN KEY (%s_stats_id) REFERENCES %s_stats;\n" % (name, name, name)
- self.currentTable = ""
- self.indexes = ""
+ def generateTimestampAttrib(self, col):
+ self.generateAttrib(col + "Time", "TimestampCol") #, "default=datetime.min")
- def getCode(self):
- return self.sqlOutput
-
-
-
-class PythonGenerator:
- """generates Python code from broker XML schema"""
-
- def __init__(self, style, dsn):
- self.pythonOutput = "from sqlobject import *\n\n"
- self.pythonOutput += "conn = connectionForURI(\"%s\")\n" % (dsn)
- self.pythonOutput += "sqlhub.processConnection = conn\n\n"
- self.additional = ""
- self.style = style
+ def generateForeignKeyAttrib(self, name, reference):
+ params = "'%s', cascade='null'" % (reference)
+ self.generateAttrib(name, "ForeignKey", params)
- def attrNameFromDbColumn(self, name, prefix="", removeSuffix=""):
- name = self.style.dbColumnToPythonAttr(name.replace(removeSuffix, ""))
- name = prefix + name[0].upper() + name[1:]
- return name
-
- def generate(self, name, schemaName, schemaId, elements):
- pythonName = self.attrNameFromDbColumn(name)
- self.startClass(pythonName + "Stats")
- self.endClass()
- self.startClass(pythonName, schemaName, schemaId)
- for elem in elements:
- self.generateMethod(elem)
- self.endClass(pythonName)
+ def generateHiLoAttrib(self, name, type):
+ self.generateAttrib(name, type)
+ self.generateAttrib(name + "Low", type)
+ self.generateAttrib(name + "High", type)
- def generateAdditionalAttribs(self, name, elements):
- name = self.attrNameFromDbColumn(name)
+ def generateClassAttribs(self, schemaName, elements):
for elem in elements:
- refName = elem["@name"]
- if (refName.endswith("Ref")):
- # generate foreign keys
- refName = self.attrNameFromDbColumn(refName, "Mint", "Ref")
- # add missing attribute (not added correctly with SqlObject 0.7.7; may need to be removed in later versions)
- self.pythonOutput += " _SO_class_%s = %s\n" % (self.style.pythonAttrToDBColumn(refName).capitalize(), refName)
- self.additional += "\n%s.sqlmeta.addJoin(MultipleJoin('%s" % (refName, name)
- self.additional += "', joinMethodName='%ss'))" % (name.replace("Mint", "").lower())
+ if (elem["@name"].endswith("Ref")):
+ reference = self.style.dbTableToPythonClass(elem["@name"]).replace("Ref", "")
+ attrib = reference[0].lower() + reference[1:]
+ self.generateForeignKeyAttrib(attrib, reference)
+ elif (elem["@type"].startswith("hilo")):
+ self.generateHiLoAttrib(self.attrNameFromDbColumn(elem["@name"]), self.dataTypesMap[elem["@type"]])
+ else:
+ args = ""
+ if (elem["@type"] == "sstr"):
+ args += "length=1000"
+ elif (elem["@type"] == "lstr"):
+ args += "length=4000"
+ self.generateAttrib(self.attrNameFromDbColumn(elem["@name"]), self.dataTypesMap[elem["@type"]], args)
- def startClass(self, pythonName, schemaName="", schemaId=""):
+ def startClass(self, schemaName, stats=False):
+ if (stats):
+ pythonName = self.style.dbTableToPythonClass(schemaName + "_stats")
+ colPythonName = self.style.dbColumnToPythonAttr(schemaName)
+ keyPythonName = self.style.dbTableToPythonClass(schemaName)
+ else:
+ pythonName = self.style.dbTableToPythonClass(schemaName)
+ statsPythonName = self.style.dbTableToPythonClass(schemaName + "_stats")
self.pythonOutput += "\nclass %s(SQLObject):\n" % (pythonName)
- if (schemaName != ""):
- self.pythonOutput += " schemaId = %d\n" % int(schemaId)
- self.pythonOutput += " schemaName = \"%s\"\n" % schemaName
- self.pythonOutput += " managedBroker = None\n"
- self.pythonOutput += " class sqlmeta:\n"
- self.pythonOutput += " fromDatabase = True\n"
-
+ self.generateAttrib("idOriginal", "BigIntCol")
+ self.generateTimestampAttrib("rec")
+ if (stats):
+ self.generateForeignKeyAttrib(colPythonName, keyPythonName)
+ else:
+ self.generateTimestampAttrib("creation")
+ self.generateTimestampAttrib("deletion")
+ self.generateAttrib("managedBroker", "StringCol", "length=1000")
+ self.generateForeignKeyAttrib("stats", statsPythonName)
+ self.final += "classToSchemaNameMap['%s'] = '%s'\n" % (pythonName, schemaName)
+
def generateMethod(self, elem):
if (elem["@desc"] != None):
comment = ' """' + elem["@desc"] + '"""\n'
else:
comment = ""
-
formalArgs = ", "
actualArgs = " actualArgs = dict()\n"
for arg in elem.query["arg"]:
@@ -149,36 +98,20 @@
formalArgs = formalArgs[:-2]
else:
formalArgs = ""
- self.pythonOutput += " def %s(self, model, callbackMethod%s):\n" % (elem["@name"], formalArgs)
+ self.pythonOutput += "\n def %s(self, model, managedBrokerLabel, callbackMethod%s):\n" % (elem["@name"], formalArgs)
self.pythonOutput += comment
self.pythonOutput += actualArgs
self.pythonOutput += " methodId = model.registerCallback(callbackMethod)\n"
- self.pythonOutput += " self.managedBroker.method(methodId, self.idOriginal, self.schemaName, \"%s\", " % (elem["@name"])
+ self.pythonOutput += " model.getConnectedBroker(managedBrokerLabel).method(methodId, self.idOriginal, classToSchemaNameMap[self.__class__.__name__], \"%s\", " % (elem["@name"])
self.pythonOutput += "args=actualArgs, packageName=\"qpid\")\n"
- def endClass(self, name=""):
+ def endClass(self):
if (self.additional != ""):
self.pythonOutput += self.additional + "\n"
self.additional = ""
- if (name != "" and not name.endswith("Stats")):
- # add missing attribute (not added correctly with SqlObject 0.7.7; may need to be removed in later versions)
- self.pythonOutput += " _SO_class_%s_stats = %sStats\n" % (self.style.pythonAttrToDBColumn(name).capitalize(), name)
+ if (self.pythonOutput.endswith("(SQLObject):\n")):
+ self.pythonOutput += " pass\n"
- def getCode(self):
- return self.pythonOutput
-
-
-
-class SchemaParser:
- """parses broker XML schema"""
-
- def __init__(self):
- self.options = dict()
- self.parseConfigFile()
- self.style = MixedCaseUnderscoreStyle()
- self.pythonGen = PythonGenerator(self.style, self.options["dsn"])
- self.sqlGen = SqlGenerator(self.style)
-
def parseConfigFile(self):
config = mllib.xml_parse("config.xml")
configOptions = config.query["config/configOption"]
@@ -186,22 +119,20 @@
self.options[opt["@name"]] = opt["@value"]
def generateCode(self):
- conn = connectionForURI(self.options["dsn"])
- sqlhub.processConnection = conn
- sqlFile = open(self.options["sqlOutput"], "w")
- pythonFile = open(self.options["pythonOutput"], "w")
+ outputFile = open(self.options["pythonOutput"], "w")
schema = mllib.xml_parse(self.options["schemaXML"])
classes = schema.query["schema/class"]
for cls in classes:
- actualName = "mint_" + cls["@name"]
- self.sqlGen.generate(actualName, cls.query["configElement"], "config")
- self.sqlGen.generate(actualName, cls.query["instElement"], "inst")
- self.pythonGen.generate(actualName, cls["@name"], cls["@schemaId"], cls.query["method"])
- self.pythonGen.generateAdditionalAttribs(actualName, cls.query["configElement"])
- sqlFile.write(self.sqlGen.getCode())
- pythonFile.write(self.pythonGen.getCode())
- sqlFile.close()
- pythonFile.close()
+ self.startClass(cls["@name"])
+ self.generateClassAttribs(cls["@name"], cls.query["configElement"])
+ for elem in cls.query["method"]:
+ self.generateMethod(elem)
+ self.endClass()
+ self.startClass(cls["@name"], stats=True)
+ self.generateClassAttribs(cls["@name"], cls.query["instElement"])
+ self.endClass()
+ outputFile.write(self.pythonOutput + self.final)
+ outputFile.close()
parser = SchemaParser()
17 years, 1 month
rhmessaging commits: r1323 - in mgmt/cumin: resources and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-11-16 12:50:29 -0500 (Fri, 16 Nov 2007)
New Revision: 1323
Added:
mgmt/cumin/resources/add-20.png
mgmt/cumin/resources/add-36.png
mgmt/cumin/resources/cancel-20.png
mgmt/cumin/resources/cancel-36.png
mgmt/cumin/resources/error-20.png
mgmt/cumin/resources/error-36.png
mgmt/cumin/resources/favicon.ico
mgmt/cumin/resources/help-20.png
mgmt/cumin/resources/help-36.png
mgmt/cumin/resources/notice-20.png
mgmt/cumin/resources/notice-36.png
mgmt/cumin/resources/remove-20.png
mgmt/cumin/resources/remove-36.png
mgmt/cumin/resources/rhm-32x14.png
mgmt/cumin/resources/rhm-36.png
mgmt/cumin/resources/submit-20.png
mgmt/cumin/resources/submit-36.png
mgmt/cumin/resources/warning-20.png
mgmt/cumin/resources/warning-36.png
Modified:
mgmt/cumin/python/cumin/page.strings
Log:
Adds some of the new graphics from mlanglie.
Uses the new logo graphics in the nav and top page. Adjusts the "Red
Hat purple" to match his graphics.
Modified: mgmt/cumin/python/cumin/page.strings
===================================================================
--- mgmt/cumin/python/cumin/page.strings 2007-11-15 22:14:50 UTC (rev 1322)
+++ mgmt/cumin/python/cumin/page.strings 2007-11-16 17:50:29 UTC (rev 1323)
@@ -51,12 +51,10 @@
#head {
padding: 0.4em 0.75em 0.2em 0.75em;
+ background-color: #685b8a;
+ /* background-color: #564979; */
}
-#head {
- background-color: #564979;
-}
-
#body {
padding: 1em;
}
@@ -321,7 +319,8 @@
form.mform .head {
font-weight: bold;
color: white;
- background-color: #564979;
+ background-color: #685b8a;
+ /* background-color: #564979; */
}
form.mform .head h1 {
@@ -504,7 +503,8 @@
}
.BrokerClusterBrowser.groups h2 {
- color: #564979;
+ color: #685b8a;
+ /* color: #564979; */
font-size: 0.9em;
border-bottom: 1px dotted #ddd;
width: 80%;
@@ -607,6 +607,7 @@
<head>
<title>{title}</title>
<link rel="stylesheet" type="text/css" href="cumin.css"/>
+ <link rel="shortcut icon" href="resource?name=favicon.ico" type="image/x-icon"/>
<!-- XXX import this via cumin.js instead -->
<script src="resource?name=wooly.js"> </script>
<script src="cumin.js"> </script>
@@ -618,7 +619,7 @@
<li><a class="nav" href="">Log Out</a></li>
</ul>
- <a id="logo" href="{href}"><img src="resource?name=logo.png"/></a>
+ <a id="logo" href="{href}"><img src="resource?name=rhm-32x14.png"/></a>
<ul id="context">{frames}</ul>
</div>
<div id="body">{mode}</div>
@@ -632,7 +633,10 @@
[MainView.html]
<div class="oblock">
- <h1>{title}</h1>
+ <h1>
+ <img src="resource?name=rhm-36.png"/>
+ {title}
+ </h1>
<ul class="TabSet tabs">{tabs}</ul>
<div class="TabSet mode">{mode}</div>
Added: mgmt/cumin/resources/add-20.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/add-20.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/add-36.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/add-36.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/cancel-20.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/cancel-20.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/cancel-36.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/cancel-36.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/error-20.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/error-20.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/error-36.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/error-36.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/favicon.ico
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/favicon.ico
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/help-20.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/help-20.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/help-36.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/help-36.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/notice-20.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/notice-20.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/notice-36.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/notice-36.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/remove-20.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/remove-20.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/remove-36.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/remove-36.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/rhm-32x14.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/rhm-32x14.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/rhm-36.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/rhm-36.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/submit-20.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/submit-20.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/submit-36.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/submit-36.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/warning-20.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/warning-20.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: mgmt/cumin/resources/warning-36.png
===================================================================
(Binary files differ)
Property changes on: mgmt/cumin/resources/warning-36.png
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
17 years, 1 month
rhmessaging commits: r1322 - in store/trunk/cpp/lib: jrnl and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: kpvdr
Date: 2007-11-15 17:14:50 -0500 (Thu, 15 Nov 2007)
New Revision: 1322
Modified:
store/trunk/cpp/lib/BdbMessageStore.cpp
store/trunk/cpp/lib/BdbMessageStore.h
store/trunk/cpp/lib/BindingDbt.h
store/trunk/cpp/lib/BufferValue.h
store/trunk/cpp/lib/Cursor.h
store/trunk/cpp/lib/DataTokenImpl.h
store/trunk/cpp/lib/JournalImpl.h
store/trunk/cpp/lib/StoreException.h
store/trunk/cpp/lib/jrnl/data_tok.hpp
store/trunk/cpp/lib/jrnl/deq_rec.hpp
store/trunk/cpp/lib/jrnl/enq_map.hpp
store/trunk/cpp/lib/jrnl/enq_rec.hpp
store/trunk/cpp/lib/jrnl/jcntl.cpp
store/trunk/cpp/lib/jrnl/jcntl.hpp
store/trunk/cpp/lib/jrnl/jdir.cpp
store/trunk/cpp/lib/jrnl/jdir.hpp
store/trunk/cpp/lib/jrnl/jinf.hpp
store/trunk/cpp/lib/jrnl/rcvdat.hpp
store/trunk/cpp/lib/jrnl/rmgr.cpp
store/trunk/cpp/lib/jrnl/rmgr.hpp
store/trunk/cpp/lib/jrnl/txn_map.hpp
store/trunk/cpp/lib/jrnl/txn_rec.hpp
store/trunk/cpp/lib/jrnl/wmgr.hpp
store/trunk/cpp/lib/jrnl/wrfc.hpp
Log:
Bugfix to journal recover. Also made all destructors virtual to try to solve problem of qpidd coring when handling top-level exception... - but to no avail, unfortunately.
Modified: store/trunk/cpp/lib/BdbMessageStore.cpp
===================================================================
--- store/trunk/cpp/lib/BdbMessageStore.cpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/BdbMessageStore.cpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -483,8 +483,8 @@
//std::cout << jc->dirname() <<"-queueName:" << queue->getName() << "-enq count:" << jc->get_enq_cnt() << std::endl;
+ unsigned aio_sleep_cnt = 0;
while (read) {
-
rhm::journal::iores res = jc->read_data_record(&dbuff, dbuffSize, &xidbuff, xidbuffSize, transientFlag, externalFlag, &dtokp);
readSize = dtokp.dsize();
@@ -530,9 +530,12 @@
::free(xidbuff);
else if (dbuff)
::free(dbuff);
+ aio_sleep_cnt = 0;
break;
}
case rhm::journal::RHM_IORES_AIO_WAIT:
+ if (++aio_sleep_cnt > MAX_AIO_SLEEPS)
+ THROW_STORE_EXCEPTION("Timeout waiting for AIO");
::usleep(AIO_SLEEP_TIME);
break;
case rhm::journal::RHM_IORES_EMPTY:
@@ -936,6 +939,7 @@
dtokp->set_rid(message.getPersistenceId()); // set the messageID into the Journal header (record-id)
bool written = false;
+ unsigned aio_sleep_cnt = 0;
while (!written)
{
JournalImpl* jc = static_cast<JournalImpl*>(queue->getExternalQueueStore());
@@ -958,12 +962,17 @@
case rhm::journal::RHM_IORES_SUCCESS:
if (dtokp.get()->wstate() >= DataTokenImpl::ENQ_SUBM)
written = true;
+ aio_sleep_cnt = 0;
break;
case rhm::journal::RHM_IORES_AIO_WAIT:
+ if (++aio_sleep_cnt > MAX_AIO_SLEEPS)
+ THROW_STORE_EXCEPTION("Timeout waiting for AIO");
usleep(AIO_SLEEP_TIME); // TODO move sleep to wait for IO in get events
jc->get_wr_events();
break;
case rhm::journal::RHM_IORES_FULL:
+// Temporary error msg till exception handler core problem solved...
+std::cerr << "Error storing message -- Journal full for queue " << queue->getName() << std::endl << std::flush;
THROW_STORE_FULL_EXCEPTION("Error storing message -- Journal full :" + queue->getName());
break;
default:
@@ -1064,6 +1073,8 @@
tid = txn->getXid();
}
+ unsigned aio_sleep_cnt = 0;
+ unsigned busy_sleep_cnt = 0;
while (!written)
{
rhm::journal::iores dres;
@@ -1079,14 +1090,19 @@
switch (dres)
{
case rhm::journal::RHM_IORES_SUCCESS:
-
+ aio_sleep_cnt = 0;
+ busy_sleep_cnt = 0;
written = true;
break;
case rhm::journal::RHM_IORES_AIO_WAIT:
+ if (++aio_sleep_cnt > MAX_AIO_SLEEPS)
+ THROW_STORE_EXCEPTION("Timeout waiting for AIO");
usleep(AIO_SLEEP_TIME); // TODO add sleep time to get events call as option
jc->get_wr_events();
break;
case rhm::journal::RHM_IORES_BUSY:
+ if (++busy_sleep_cnt > MAX_AIO_SLEEPS)
+ THROW_STORE_EXCEPTION("Timeout waiting for AIO");
usleep(AIO_SLEEP_TIME); // TODO add sleep time to get events call as option
break;
default:
Modified: store/trunk/cpp/lib/BdbMessageStore.h
===================================================================
--- store/trunk/cpp/lib/BdbMessageStore.h 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/BdbMessageStore.h 2007-11-15 22:14:50 UTC (rev 1322)
@@ -131,7 +131,7 @@
public:
BdbMessageStore(const char* envpath = 0);
bool init(const std::string& dir, const bool async, const bool force = false);
- ~BdbMessageStore();
+ virtual ~BdbMessageStore();
void truncate();
Modified: store/trunk/cpp/lib/BindingDbt.h
===================================================================
--- store/trunk/cpp/lib/BindingDbt.h 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/BindingDbt.h 2007-11-15 22:14:50 UTC (rev 1322)
@@ -49,7 +49,7 @@
const std::string& k,
const qpid::framing::FieldTable& a);
- ~BindingDbt();
+ virtual ~BindingDbt();
};
Modified: store/trunk/cpp/lib/BufferValue.h
===================================================================
--- store/trunk/cpp/lib/BufferValue.h 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/BufferValue.h 2007-11-15 22:14:50 UTC (rev 1322)
@@ -40,7 +40,7 @@
BufferValue(u_int32_t size, u_int64_t offset);
BufferValue(const qpid::broker::Persistable& p);
- ~BufferValue();
+ virtual ~BufferValue();
};
}}
Modified: store/trunk/cpp/lib/Cursor.h
===================================================================
--- store/trunk/cpp/lib/Cursor.h 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/Cursor.h 2007-11-15 22:14:50 UTC (rev 1322)
@@ -34,7 +34,7 @@
Dbc* cursor;
public:
Cursor() : cursor(0) {}
- ~Cursor() { if(cursor) cursor->close(); }
+ virtual ~Cursor() { if(cursor) cursor->close(); }
void open(Db& db, DbTxn* txn, u_int32_t flags = 0) { db.cursor(txn, &cursor, flags); }
void close() { if(cursor) cursor->close(); cursor = 0; }
Dbc* get() { return cursor; }
Modified: store/trunk/cpp/lib/DataTokenImpl.h
===================================================================
--- store/trunk/cpp/lib/DataTokenImpl.h 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/DataTokenImpl.h 2007-11-15 22:14:50 UTC (rev 1322)
@@ -33,7 +33,7 @@
{
public:
DataTokenImpl();
- ~DataTokenImpl();
+ virtual ~DataTokenImpl();
};
} // namespace bdbstore
Modified: store/trunk/cpp/lib/JournalImpl.h
===================================================================
--- store/trunk/cpp/lib/JournalImpl.h 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/JournalImpl.h 2007-11-15 22:14:50 UTC (rev 1322)
@@ -87,7 +87,7 @@
const std::string& journalBaseFilename,
const qpid::sys::Duration getEventsTimeout,
const qpid::sys::Duration flushTimeout);
- ~JournalImpl();
+ virtual ~JournalImpl();
void recover(std::deque<journal::data_tok*>* rd_dtokl, const journal::aio_cb rd_cb,
std::deque<journal::data_tok*>* wr_dtokl, const journal::aio_cb wr_cb,
Modified: store/trunk/cpp/lib/StoreException.h
===================================================================
--- store/trunk/cpp/lib/StoreException.h 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/StoreException.h 2007-11-15 22:14:50 UTC (rev 1322)
@@ -33,8 +33,8 @@
public:
StoreException(const std::string& _text) : text(_text) {}
StoreException(const std::string& _text, DbException& cause) : text(_text + ": " + cause.what()) {}
- ~StoreException() throw() {}
- const char* what() const throw() { return text.c_str(); }
+ virtual ~StoreException() throw() {}
+ virtual const char* what() const throw() { return text.c_str(); }
};
class StoreFullException : public StoreException
@@ -42,7 +42,7 @@
public:
StoreFullException(const std::string& _text) : StoreException(_text) {}
StoreFullException(const std::string& _text, DbException& cause) : StoreException(_text, cause) {}
- ~StoreFullException() throw() {}
+ virtual ~StoreFullException() throw() {}
};
Modified: store/trunk/cpp/lib/jrnl/data_tok.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/data_tok.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/data_tok.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -117,7 +117,7 @@
public:
data_tok();
- ~data_tok();
+ virtual ~data_tok();
inline size_t refcnt(void) { return _ref_cnt;}
inline void ref(void) { _ref_cnt++; }
Modified: store/trunk/cpp/lib/jrnl/deq_rec.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/deq_rec.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/deq_rec.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -66,7 +66,7 @@
// constructor used for write operations, where xid already exists
deq_rec(const u_int64_t rid, const u_int64_t drid, const void* const xidp,
const size_t xidlen);
- ~deq_rec();
+ virtual ~deq_rec();
// Prepare instance for use in reading data from journal
void reset();
Modified: store/trunk/cpp/lib/jrnl/enq_map.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/enq_map.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/enq_map.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -69,7 +69,7 @@
public:
enq_map();
- ~enq_map();
+ virtual ~enq_map();
void insert_fid(const u_int64_t rid, const u_int16_t fid) throw (jexception);
void insert_fid(const u_int64_t rid, const u_int16_t fid, const bool locked)
Modified: store/trunk/cpp/lib/jrnl/enq_rec.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/enq_rec.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/enq_rec.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -76,7 +76,7 @@
/**
* \brief Destructor
*/
- ~enq_rec();
+ virtual ~enq_rec();
// Prepare instance for use in reading data from journal, xid and data will be allocated
void reset();
Modified: store/trunk/cpp/lib/jrnl/jcntl.cpp
===================================================================
--- store/trunk/cpp/lib/jrnl/jcntl.cpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/jcntl.cpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -136,6 +136,9 @@
_tmap.clear();
rcvr_janalyze(_rcvdat, prep_txn_list);
+// Debug info, but may be useful to print with a flag
+//_rcvdat.print();
+
if (_datafh)
{
for (u_int32_t i=0; i<JRNL_NUM_FILES; i++)
@@ -178,6 +181,7 @@
_rrfc.initialize(JRNL_NUM_FILES, (nlfh**)_datafh, _rcvdat._ffid);
_rmgr.recover_complete();
_readonly_flag = false;
+//std::cout << "Journal revovery complete." << std::endl;
}
void
@@ -406,9 +410,11 @@
while (rcvr_get_next_record(fid, &ifs, rd));
std::vector<std::string> xid_list;
_tmap.xid_list(xid_list);
- for (std::vector<std::string>::iterator itr = xid_list.begin(); itr != xid_list.end(); itr++)
+ for (std::vector<std::string>::iterator itr = xid_list.begin(); itr != xid_list.end();
+ itr++)
{
- std::vector<std::string>::const_iterator pitr = std::find(prep_txn_list.begin(), prep_txn_list.end(), *itr);
+ std::vector<std::string>::const_iterator pitr = std::find(prep_txn_list.begin(),
+ prep_txn_list.end(), *itr);
if (pitr == prep_txn_list.end())
_tmap.get_remove_tdata_list(*itr);
}
@@ -422,7 +428,9 @@
bool done = false;
void* xidp = NULL;
hdr h;
- jfile_cycle(fid, ifsp, rd, true);
+ if (!jfile_cycle(fid, ifsp, rd, true))
+ return false;
+ std::streampos read_pos = ifsp->tellg();
ifsp->read((char*)&h, sizeof(hdr));
switch(h._magic)
{
@@ -432,7 +440,8 @@
while (!done)
{
done = er.rcv_decode(h, ifsp, cum_size_read);
- jfile_cycle(fid, ifsp, rd, false);
+ if (!jfile_cycle(fid, ifsp, rd, true))
+ return false;
}
//std::cout << " E";
if (!er.is_transient()) // Ignore transient msgs
@@ -460,7 +469,8 @@
while (!done)
{
done = dr.rcv_decode(h, ifsp, cum_size_read);
- jfile_cycle(fid, ifsp, rd, false);
+ if (!jfile_cycle(fid, ifsp, rd, true))
+ return false;
}
//std::cout << " D";
if (dr.xid_size())
@@ -501,7 +511,8 @@
while (!done)
{
done = ar.rcv_decode(h, ifsp, cum_size_read);
- jfile_cycle(fid, ifsp, rd, false);
+ if (!jfile_cycle(fid, ifsp, rd, true))
+ return false;
}
//std::cout << " A";
// Delete this txn from tmap, unlock any locked records in emap
@@ -535,7 +546,8 @@
while (!done)
{
done = cr.rcv_decode(h, ifsp, cum_size_read);
- jfile_cycle(fid, ifsp, rd, false);
+ if (!jfile_cycle(fid, ifsp, rd, true))
+ return false;
}
//std::cout << " C";
// Delete this txn from tmap, process records into emap
@@ -574,6 +586,7 @@
default:
std::stringstream ss;
ss << std::hex << std::setfill('0') << "Magic=0x" << std::setw(8) << h._magic;
+ ss << " fid=" << fid << " foffs=0x" << std::setw(8) << read_pos;
throw jexception(jerrno::JERR_JCNTL_UNKNOWNMAGIC, ss.str().c_str(), "jcntl",
"rcvr_get_next_record");
}
@@ -588,8 +601,9 @@
{
if (ifsp->eof() || !ifsp->good())
{
+ rd._eo = ifsp->tellg(); // remember file offset before closing
+ rd._lfid = fid++;
ifsp->close();
- fid++;
if (fid >= JRNL_NUM_FILES)
fid = 0;
if (fid == rd._ffid) // used up all journal files
Modified: store/trunk/cpp/lib/jrnl/jcntl.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/jcntl.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/jcntl.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -159,7 +159,7 @@
/**
* \brief Destructor.
*/
- ~jcntl();
+ virtual ~jcntl();
/**
* \brief Initialize the journal for storing data.
Modified: store/trunk/cpp/lib/jrnl/jdir.cpp
===================================================================
--- store/trunk/cpp/lib/jrnl/jdir.cpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/jdir.cpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -52,6 +52,9 @@
_base_filename(_base_filename)
{}
+jdir::~jdir()
+{}
+
// === create_dir ===
void
Modified: store/trunk/cpp/lib/jrnl/jdir.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/jdir.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/jdir.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -70,6 +70,8 @@
* and sub-directories.
*/
jdir(const std::string& dirname, const std::string& base_filename);
+
+ virtual ~jdir();
/**
@@ -152,17 +154,14 @@
/**
- * \brief Clear the journal directory of files matching the base filename
- * by moving them into a subdirectory. This fn uses the dirname and base_filename
- * that were set on construction.
+ * \brief ??
*
* \exception ??
*/
void verify_dir() throw (jexception);
/**
- * \brief Clear the directory dirname of journal files matching base_filename
- * by moving them into a subdirectory.
+ * \brief ??
*
* \param dirname C-string containing name of journal directory.
* \param base_filename C-string containing base filename of journal files to be matched
@@ -173,8 +172,7 @@
static void verify_dir(const char* dirname, const char* base_filename) throw (jexception);
/**
- * \brief Clear the directory dirname of journal files matching base_filename
- * by moving them into a subdirectory.
+ * \brief ??
*
* \param dirname String containing name of journal directory.
* \param base_filename String containing base filename of journal files to be matched
@@ -247,7 +245,6 @@
* for moving into subdirectory.
*
* \exception ??
- *
*/
static std::string create_bak_dir(const std::string& dirname,
const std::string& base_filename) throw (jexception);
Modified: store/trunk/cpp/lib/jrnl/jinf.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/jinf.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/jinf.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -71,7 +71,7 @@
// constructor for writing jinf file
jinf(const std::string& jid, const std::string& jdir, const std::string& base_filename,
const timespec& ts);
- ~jinf();
+ virtual ~jinf();
void validate() throw (jexception);
const u_int16_t analyze() throw (jexception);
Modified: store/trunk/cpp/lib/jrnl/rcvdat.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/rcvdat.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/rcvdat.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -59,6 +59,7 @@
_h_rid(0),
_enq_cnt_list(JRNL_NUM_FILES, 0)
{}
+
void reset()
{
_empty=true;
@@ -70,16 +71,22 @@
for (unsigned f=0; f<_enq_cnt_list.size(); f++)
_enq_cnt_list[f] = 0;
}
+
void print()
{
- std::cout << "_empty=" << (_empty?"T":"F") << std::endl;
- std::cout << "_ffid=" << _ffid << std::endl;
- std::cout << "_fro=" << _fro << std::endl;
- std::cout << "_lfid=" << _lfid << std::endl;
- std::cout << "_eo=" << _eo << std::endl;
- std::cout << "_h_rid=" << _h_rid << std::endl;
+ std::cout << "Recovery jorunal file analysis summary:" << std::endl;
+ std::cout << " Journal empty (_empty) = " << (_empty ? "TRUE" : "FALSE") <<
+ std::endl;
+ std::cout << " First fid (_ffid) = " << _ffid << std::endl;
+ std::cout << " First record offset in first fid (_fro) = 0x" << std::hex << _fro <<
+ std::dec << " (" << (_fro/JRNL_DBLK_SIZE) << " dblks)" << std::endl;
+ std::cout << " Last fid (_lfid) = " << _lfid << std::endl;
+ std::cout << " End offset (_eo) = 0x" << std::hex << _eo << std::dec << " (" <<
+ (_eo/JRNL_DBLK_SIZE) << " dblks)" << std::endl;
+ std::cout << " Highest rid (_h_rid) = " << _h_rid << std::endl;
+ std::cout << " Enqueued records (txn & non-txn):" << std::endl;
for (unsigned i=0; i<_enq_cnt_list.size(); i++)
- std::cout << "_enq_cnt_list[" << i << "]=" << _enq_cnt_list[i] << std::endl;
+ std::cout << " File " << i << ": " << _enq_cnt_list[i] << std::endl;
}
};
}
Modified: store/trunk/cpp/lib/jrnl/rmgr.cpp
===================================================================
--- store/trunk/cpp/lib/jrnl/rmgr.cpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/rmgr.cpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -262,6 +262,7 @@
//std::cout << " g" << std::flush;
if (_page_cb_arr[_pg_index]._state != AIO_COMPLETE)
{
+//std::cout << "[" << _pg_index << "]=" << page_state_str(_page_cb_arr[_pg_index]._state) << std::flush;
aio_cycle();
return RHM_IORES_AIO_WAIT;
}
@@ -346,8 +347,8 @@
ss << "rid=0x" << std::setw(16) << _hdr._rid;
ss << "; dtok_rid=" << std::setw(16) << dtokp->rid();
ss << "; dtok_id=0x" << std::setw(8) << dtokp->id();
- throw jexception(jerrno::JERR_RMGR_RIDMISMATCH, ss.str().c_str(), "rmgr",
- "read");
+ throw jexception(jerrno::JERR_RMGR_RIDMISMATCH, ss.str().c_str(),
+ "rmgr", "read");
}
}
else
@@ -692,7 +693,7 @@
for (int16_t i=0; i<num_uninit; i++)
{
if (_rrfc.empty()) // Nothing to do; this file not yet written to
- break;
+ break;
// If this is the first read from a file, increase the read pointers to beyond fhdr
// or consume fhdr here for analysis (not req'd at present)
Modified: store/trunk/cpp/lib/jrnl/rmgr.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/rmgr.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/rmgr.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -65,7 +65,7 @@
rmgr(jcntl* jc, enq_map& emap, txn_map& tmap, rrfc& rrfc);
rmgr(jcntl* jc, enq_map& emap, txn_map& tmap, rrfc& rrfc,
std::deque<data_tok*>* const dtokl) throw (jexception);
- ~rmgr();
+ virtual ~rmgr();
void initialize(std::deque<data_tok*>* const dtoklp, const aio_cb rd_cb) throw (jexception);
const iores get(const u_int64_t& rid, const size_t& dsize, const size_t& dsize_avail,
Modified: store/trunk/cpp/lib/jrnl/txn_map.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/txn_map.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/txn_map.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -77,7 +77,7 @@
public:
txn_map();
- ~txn_map();
+ virtual ~txn_map();
const bool insert_txn_data(const std::string& xid, const txn_data& td) throw (jexception);
const txn_data_list get_tdata_list(const std::string& xid) throw (jexception);
Modified: store/trunk/cpp/lib/jrnl/txn_rec.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/txn_rec.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/txn_rec.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -66,7 +66,7 @@
// constructor used for write operations, where xid already exists
txn_rec(const u_int32_t magic, const u_int64_t rid, const void* const xidp,
const size_t xidlen);
- ~txn_rec();
+ virtual ~txn_rec();
// Prepare instance for use in reading data from journal
void reset(const u_int32_t magic);
Modified: store/trunk/cpp/lib/jrnl/wmgr.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/wmgr.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/wmgr.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -99,7 +99,7 @@
wmgr(jcntl* jc, enq_map& emap, txn_map& tmap, wrfc& wrfc,
std::deque<data_tok*>* const dtoklp, const u_int32_t max_dtokpp,
const u_int32_t max_iowait_us) throw (jexception);
- ~wmgr();
+ virtual ~wmgr();
void initialize(std::deque<data_tok*>* const dtoklp, aio_cb wr_cb,
const u_int32_t max_dtokpp, const u_int32_t max_iowait_us) throw (jexception);
Modified: store/trunk/cpp/lib/jrnl/wrfc.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/wrfc.hpp 2007-11-15 20:57:45 UTC (rev 1321)
+++ store/trunk/cpp/lib/jrnl/wrfc.hpp 2007-11-15 22:14:50 UTC (rev 1322)
@@ -60,7 +60,7 @@
public:
wrfc();
- ~wrfc();
+ virtual ~wrfc();
/**
* \brief Initialize the controller.
17 years, 1 month
rhmessaging commits: r1321 - mgmt/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-11-15 15:57:45 -0500 (Thu, 15 Nov 2007)
New Revision: 1321
Modified:
mgmt/cumin/python/cumin/queue.py
mgmt/cumin/python/cumin/queue.strings
mgmt/cumin/python/cumin/widgets.strings
Log:
Adds a purge action to queue that currently just fires the echo method
on the managed broker.
Modified: mgmt/cumin/python/cumin/queue.py
===================================================================
--- mgmt/cumin/python/cumin/queue.py 2007-11-15 19:52:03 UTC (rev 1320)
+++ mgmt/cumin/python/cumin/queue.py 2007-11-15 20:57:45 UTC (rev 1321)
@@ -99,6 +99,12 @@
self.add_mode(self.view)
self.set_view_mode(self.view)
+ self.purge = QueuePurge(app, "purge")
+ self.add_mode(self.purge)
+
+ def show_purge(self, session):
+ return self.show_mode(session, self.purge)
+
# XXX not awesome
def get_object(self, session, object):
queue = self.param.get(session)
@@ -188,6 +194,11 @@
def render_updated(self, session, queue):
return fmt_datetime(datetime.utcnow())
+ def render_purge_href(self, session, queue):
+ branch = session.branch()
+ self.parent().show_purge(branch)
+ return branch.marshal()
+
class QueueBindingSet(ItemSet):
def get_title(self, session, queue):
return "Exchange Bindings %s" % fmt_count(len(queue.bindings))
@@ -332,6 +343,33 @@
def render_cancel_content(self, session, queue):
return "No, Cancel"
+def doit(error, args):
+ print error, args
+
+class QueuePurge(CuminConfirmForm):
+ def get_title(self, session, queue):
+ return "Purge Queue '%s'" % queue.name
+
+ def process_cancel(self, session, queue):
+ branch = session.branch()
+ self.page().show_queue(branch, queue).show_view(branch)
+ self.page().set_redirect_url(session, branch.marshal())
+
+ def process_submit(self, session, queue):
+ print "open purge"
+
+ queue.mintVhost.mintBroker.echo(self.app.mint, doit, 1, "dude!")
+
+ print "close purge"
+
+ self.process_cancel(session, queue)
+
+ def render_submit_content(self, session, queue):
+ return "Yes, Purge Queue '%s'" % queue.name
+
+ def render_cancel_content(self, session, queue):
+ return "No, Cancel"
+
class QueueBindingAdd(CuminForm):
def __init__(self, app, name):
super(QueueBindingAdd, self).__init__(app, name)
Modified: mgmt/cumin/python/cumin/queue.strings
===================================================================
--- mgmt/cumin/python/cumin/queue.strings 2007-11-15 19:52:03 UTC (rev 1320)
+++ mgmt/cumin/python/cumin/queue.strings 2007-11-15 20:57:45 UTC (rev 1321)
@@ -179,7 +179,7 @@
<tr>
<th class="actions" colspan="2">
<h2>Act on This Queue:</h2>
- <a href="{href}">Purge Messages</a>
+ <a href="{purge_href}">Purge Messages</a>
</th>
</tr>
</table>
Modified: mgmt/cumin/python/cumin/widgets.strings
===================================================================
--- mgmt/cumin/python/cumin/widgets.strings 2007-11-15 19:52:03 UTC (rev 1320)
+++ mgmt/cumin/python/cumin/widgets.strings 2007-11-15 20:57:45 UTC (rev 1321)
@@ -4,7 +4,7 @@
<h1>{title}</h1>
</div>
<div class="body">
- <div>{confirm}</div>
+ <div>{submit}</div>
<div>{cancel}</div>
{hidden_inputs}
</div>
17 years, 1 month
rhmessaging commits: r1320 - in mgmt/cumin: python/cumin and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-11-15 14:52:03 -0500 (Thu, 15 Nov 2007)
New Revision: 1320
Modified:
mgmt/cumin/bin/cumin-test
mgmt/cumin/python/cumin/__init__.py
Log:
Adds the ability to connect a managed broker to the cumin-test
runtime.
Modified: mgmt/cumin/bin/cumin-test
===================================================================
--- mgmt/cumin/bin/cumin-test 2007-11-15 19:27:19 UTC (rev 1319)
+++ mgmt/cumin/bin/cumin-test 2007-11-15 19:52:03 UTC (rev 1320)
@@ -1,6 +1,21 @@
#!/usr/bin/env python
import sys, os
+
+def usage():
+ print """
+Usage: cumin-test OPTIONS
+Options:
+ --port PORT
+ --data DATABASE-URL
+ --broker BROKER
+ --bench [HITS]
+ --profile
+ --no-debug
+ --no-demo-data
+""",
+ sys.exit(1)
+
from ConfigParser import SafeConfigParser
def load_args(argv):
@@ -40,8 +55,8 @@
conn = connectionForURI(connuri)
sqlhub.processConnection = conn
except KeyError:
- print "No data source; use --data DATABASE-URL"
- sys.exit(1)
+ print "No data source"
+ usage()
from time import time
from wooly.devel import BenchmarkHarness
@@ -51,11 +66,13 @@
from cumin.demo import *
from cumin.model import *
-def do_main(port, bench_hits, debug=True, demodata=True):
+def do_main(port, broker, bench_hits, debug=True, demodata=True):
model = CuminModel()
-
app = Cumin(model)
+ if broker:
+ app.add_managed_broker(broker)
+
if demodata:
data = DemoData(model)
data.load()
@@ -72,6 +89,7 @@
server.run()
def main():
+ in_broker = args.get("broker")
in_port = int(args.get("port", 9090))
in_profile = "profile" in args
in_debug = "no-debug" not in args
@@ -109,7 +127,7 @@
stats.strip_dirs()
else:
try:
- do_main(in_port, in_bench, in_debug, in_demodata)
+ do_main(in_port, in_broker, in_bench, in_debug, in_demodata)
except KeyboardInterrupt:
pass
Modified: mgmt/cumin/python/cumin/__init__.py
===================================================================
--- mgmt/cumin/python/cumin/__init__.py 2007-11-15 19:27:19 UTC (rev 1319)
+++ mgmt/cumin/python/cumin/__init__.py 2007-11-15 19:52:03 UTC (rev 1320)
@@ -54,6 +54,10 @@
self.add_page(ClientXmlPage(self, "client.xml"))
self.add_page(ClientChartPage(self, "client.png"))
+ def add_managed_broker(self, broker):
+ host, port = broker.split(":")
+ self.mint.addManagedBroker(host, int(port))
+
class CuminServer(WebServer):
def __init__(self, port=9090):
model = CuminModel()
17 years, 1 month
rhmessaging commits: r1319 - in mgmt: mint and 2 other directories.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-11-15 14:27:19 -0500 (Thu, 15 Nov 2007)
New Revision: 1319
Modified:
mgmt/cumin/python/cumin/__init__.py
mgmt/cumin/python/cumin/broker.py
mgmt/cumin/python/cumin/client.py
mgmt/cumin/python/cumin/exchange.py
mgmt/cumin/python/cumin/formats.py
mgmt/cumin/python/cumin/measurement.py
mgmt/cumin/python/cumin/page.py
mgmt/cumin/python/cumin/parameters.py
mgmt/cumin/python/cumin/queue.py
mgmt/cumin/python/cumin/widgets.py
mgmt/mint/python/mint/schema.py
mgmt/mint/python/mint/schema.sql
mgmt/mint/schemaparser.py
mgmt/misc/mint-test.py
Log:
Big changes to use the mint model from Nuno instead of the dummy
model. With this commit, brokers and all objects under brokers use
the new model. The old model is still used for the grouping
constructs.
Also revs the schema (as of 14 Nov) and uncomments some lines in
schemaparser.py.
Modified: mgmt/cumin/python/cumin/__init__.py
===================================================================
--- mgmt/cumin/python/cumin/__init__.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/cumin/python/cumin/__init__.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -33,9 +33,9 @@
self.mint = MintModel()
try:
- sys = MgmtSystem.get(1)
+ sys = MintSystem.get(1)
except SQLObjectNotFound:
- sys = MgmtSystem()
+ sys = MintSystem()
sys.set(sysIdent=0, idOriginal=0)
self.model.sys = sys
Modified: mgmt/cumin/python/cumin/broker.py
===================================================================
--- mgmt/cumin/python/cumin/broker.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/cumin/python/cumin/broker.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -19,15 +19,15 @@
return "Brokers %s" % fmt_count(len(model.get_brokers()))
def get_items(self, session, model):
- return sorted_by(model.get_brokers())
+ return sorted_by(model.sys.brokers, "id")
def render_item_link(self, session, broker):
branch = session.branch()
self.page().show_broker(branch, broker).show_view(branch)
- return fmt_olink(branch, broker)
+ return fmt_olink(branch, broker, name=broker.port)
def render_item_group_link(self, session, broker):
- group = broker.get_broker_group()
+ group = None #broker.get_broker_group()
if group:
branch = session.branch()
@@ -37,7 +37,7 @@
return fmt_none()
def render_item_profile_link(self, session, broker):
- profile = broker.get_broker_profile()
+ profile = None #broker.get_broker_profile()
if profile:
branch = session.branch()
@@ -47,7 +47,7 @@
return fmt_none()
def render_item_cluster_link(self, session, broker):
- cluster = broker.get_broker_cluster()
+ cluster = None #broker.get_broker_cluster()
if cluster:
branch = session.branch()
@@ -57,7 +57,7 @@
return fmt_none()
def render_item_status(self, session, broker):
- return fmt_status(len(broker.errors), len(broker.warnings))
+ return None #fmt_status(len(broker.errors), len(broker.warnings))
def render_item_load(self, session, broker):
return "%.2f" % random()
@@ -141,7 +141,7 @@
return self.show_mode(session, self.client)
def get_title(self, session, broker):
- return "Broker '%s'" % broker.name
+ return "Broker '%s'" % broker.port
class BrokerConfigPropertyForm(CuminForm, Frame):
def __init__(self, app, name):
@@ -249,13 +249,13 @@
return self.tabs.show_mode(session, self.config)
def get_title(self, session, broker):
- return "Broker '%s'" % broker.name
+ return "Broker '%s'" % broker.port
def render_name(self, session, broker):
- return broker.name
+ return broker.port
def render_cluster_link(self, session, broker):
- cluster = broker.get_broker_cluster()
+ cluster = None #broker.get_broker_cluster()
if cluster:
branch = session.branch()
@@ -265,7 +265,7 @@
return fmt_none()
def render_profile_link(self, session, broker):
- profile = broker.get_broker_profile()
+ profile = None #broker.get_broker_profile()
if profile:
branch = session.branch()
@@ -279,27 +279,27 @@
class BrokerQueueTab(QueueSet):
def get_title(self, session, broker):
- count = len(broker.default_virtual_host.queue_items())
- return "Queues %s" % fmt_count(count)
+ vhost = self.get_object(session, broker)
+ return "Queues %s" % fmt_count(len(vhost.queues))
def get_object(self, session, broker):
- return broker.default_virtual_host
+ return MintVhost.selectBy(mintBrokerID=broker.id, name="/")[0]
class BrokerExchangeTab(ExchangeSet):
def get_title(self, session, broker):
- count = len(broker.default_virtual_host.exchange_items())
- return "Exchanges %s" % fmt_count(count)
+ vhost = self.get_object(session, broker)
+ return "Exchanges %s" % fmt_count(len(vhost.exchanges))
def get_object(self, session, broker):
- return broker.default_virtual_host
+ return MintVhost.selectBy(mintBrokerID=broker.id, name="/")[0]
class BrokerClientTab(ClientSet):
def get_title(self, session, broker):
- count = len(broker.default_virtual_host.client_items())
- return "Clients %s" % fmt_count(count)
+ vhost = self.get_object(session, broker)
+ return "Clients %s" % fmt_count(len(vhost.clients))
def get_object(self, session, broker):
- return broker.default_virtual_host
+ return MintVhost.selectBy(mintBrokerID=broker.id, name="/")[0]
class BrokerVirtualHostTab(VirtualHostSet):
def get_title(self, session, broker):
@@ -314,7 +314,7 @@
return "Configuration"
def get_items(self, session, broker):
- return sorted_by(broker.config_property_items())
+ return list() #XXX sorted_by(broker.config_property_items())
def maybe_highlight(self, value, comparedto):
if str(value) != str(comparedto):
@@ -366,6 +366,9 @@
class BrowserBrokers(BrokerSetForm):
def get_items(self, session, model):
+ return super(BrokerBrowser.BrowserBrokers, self).get_items \
+ (session, model)
+
brokers = sorted_by(model.get_brokers())
group = self.parent().group.get(session)
profile = self.parent().profile.get(session)
Modified: mgmt/cumin/python/cumin/client.py
===================================================================
--- mgmt/cumin/python/cumin/client.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/cumin/python/cumin/client.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -17,39 +17,39 @@
self.add_child(self.unit)
def get_title(self, session, vhost):
- return "Clients %s" % fmt_count(len(vhost.client_items()))
+ return "Clients %s" % fmt_count(len(vhost.clients))
def render_unit_plural(self, session, vhost):
return self.unit.get(session) == "b" and "Bytes" or "Msgs."
def get_items(self, session, vhost):
- return sorted_by(vhost.client_items(), "address")
+ return sorted_by(vhost.clients, "ipAddr")
def render_item_link(self, session, client):
branch = session.branch()
self.page().show_client(branch, client).show_view(branch)
- return fmt_olink(branch, client.address)
+ return fmt_olink(branch, client, name=client.ipAddr)
def render_item_sessions(self, session, client):
branch = session.branch()
frame = self.page().show_client(branch, client)
frame.show_view(branch).show_sessions(branch)
- return fmt_link(branch.marshal(), len(client.session_items()))
+ return fmt_link(branch.marshal(), len(client.sessions))
def render_item_produced(self, session, client):
unit = self.unit.get(session)
key = unit == "b" and "bytesProduced" or "msgsProduced"
- value = client.get_measurement(key).get_rate()
+ value = getattr(client.mintClientStats, key)
return fmt_rate(value, unit == "b" and "byte" or "msg", "sec")
def render_item_consumed(self, session, client):
unit = self.unit.get(session)
key = unit == "b" and "bytesConsumed" or "msgsConsumed"
- value = client.get_measurement(key).get_rate()
+ value = getattr(client.mintClientStats, key)
return fmt_rate(value, unit == "b" and "byte" or "msg", "sec")
def render_item_status(self, session, client):
- return fmt_status(len(client.errors), len(client.warnings))
+ return None #XXX fmt_status(len(client.errors), len(client.warnings))
class ClientFrame(CuminFrame):
def __init__(self, app, name):
@@ -64,23 +64,23 @@
self.set_view_mode(self.view)
def get_title(self, session, client):
- return "Client %s" % client.address
+ return "Client %s" % client.ipAddr
class ClientStatus(CuminStatus):
def render_messages_produced(self, session, client):
- value = client.get_measurement("msgsProduced").get_rate()
+ value = client.mintClientStats.msgsProduced
return fmt_rate(value, "msg", "sec")
def render_messages_consumed(self, session, client):
- value = client.get_measurement("msgsConsumed").get_rate()
+ value = client.mintClientStats.msgsConsumed
return fmt_rate(value, "msg", "sec")
def render_bytes_produced(self, session, client):
- value = client.get_measurement("bytesProduced").get_rate()
+ value = client.mintClientStats.bytesProduced
return fmt_rate(value, "byte", "sec")
def render_bytes_consumed(self, session, client):
- value = client.get_measurement("bytesConsumed").get_rate()
+ value = client.mintClientStats.bytesConsumed
return fmt_rate(value, "byte", "sec")
class ClientView(Widget):
@@ -102,13 +102,13 @@
return self.tabs.show_mode(session, self.sessions)
def get_title(self, session, client):
- return "Client '%s'" % client.address
+ return "Client '%s'" % client.ipAddr
def render_data_url(self, session, client):
return "client.xml?id=%i" % client.id
def render_address(self, session, client):
- return client.address
+ return client.ipAddr
def render_auth_id(self, session, client):
return "e50e7dcaa8d6a039a"
@@ -161,18 +161,18 @@
return session_.name
def render_item_remaining_lifespan(self, session, session_):
- value = session_.get_measurement("remainingLifespan").get_value()
+ value = session_.mintSessionStats.remainingLifespan
return fmt_duration(value)
def render_item_frames_outstanding(self, session, session_):
- return session_.get_measurement("framesOutstanding").get_value()
+ return session_.mintSessionStats.framesOutstanding
def render_item_attached(self, session, session_):
- result = session_.get_measurement("attached").get_value()
- return fmt_predicate(result)
+ value = session_.mintSessionStats.attached
+ return fmt_predicate(value)
def render_item_status(self, session, session_):
- return fmt_status(len(session_.errors), len(session_.warnings))
+ return None #XXX fmt_status(len(session_.errors), len(session_.warnings))
class ClientXmlPage(CuminXmlPage):
def __init__(self, app, name):
Modified: mgmt/cumin/python/cumin/exchange.py
===================================================================
--- mgmt/cumin/python/cumin/exchange.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/cumin/python/cumin/exchange.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -21,7 +21,7 @@
self.set_parameter(param)
def get_items(self, session, vhost):
- return sorted_by(vhost.exchange_items())
+ return sorted_by(vhost.exchanges)
def render_item_value(self, session, exchange):
return exchange.id
@@ -40,13 +40,13 @@
self.add_child(self.unit)
def get_title(self, session, vhost):
- return "Exchanges (%s)" % len(vhost.exchange_items())
+ return "Exchanges %s" % fmt_count(len(vhost.exchanges))
def render_unit_plural(self, session, vhost):
return self.unit.get(session) == "b" and "Bytes" or "Msgs."
def get_items(self, session, vhost):
- return sorted_by(vhost.exchange_items())
+ return sorted_by(vhost.exchanges)
def render_item_link(self, session, exchange):
branch = session.branch()
@@ -57,33 +57,33 @@
branch = session.branch()
frame = self.page().show_exchange(branch, exchange)
frame.show_view(branch).show_producers(branch)
- return fmt_link(branch.marshal(), len(exchange.producer_items()))
+ return fmt_link(branch.marshal(), len(exchange.producers))
def render_item_bindings(self, session, exchange):
branch = session.branch()
frame = self.page().show_exchange(branch, exchange)
frame.show_view(branch).show_bindings(branch)
- return fmt_link(branch.marshal(), len(exchange.binding_items()))
+ return fmt_link(branch.marshal(), len(exchange.bindings))
def render_item_received(self, session, exchange):
unit = self.unit.get(session)
key = unit == "b" and "byteReceives" or "msgReceives"
- value = exchange.get_measurement(key).get_rate()
+ value = getattr(exchange.mintExchangeStats, key)
return fmt_rate(value, unit == "b" and "byte" or "msg", "sec")
def render_item_routed(self, session, exchange):
unit = self.unit.get(session)
key = unit == "b" and "byteRoutes" or "msgRoutes"
- value = exchange.get_measurement(key).get_rate()
+ value = getattr(exchange.mintExchangeStats, key)
return fmt_rate(value, unit == "b" and "byte" or "msg", "sec")
def render_item_dropped(self, session, exchange):
unit = self.unit.get(session)
key = unit == "b" and "byteDrops" or "msgDrops"
- return exchange.get_measurement(key).get_value()
+ return getattr(exchange.mintExchangeStats, key)
def render_item_status(self, session, exchange):
- return fmt_status(len(exchange.errors), len(exchange.warnings))
+ return None #XXX fmt_status(len(exchange.errors), len(exchange.warnings))
def show_producers(page, session, exchange):
frame = page.show_exchange(session, exchange).show_view(session)
@@ -192,27 +192,27 @@
class ExchangeBindingSet(ItemSet):
def get_title(self, session, exchange):
- return "Queue Bindings %s" % fmt_count(len(exchange.binding_items()))
+ return "Queue Bindings %s" % fmt_count(len(exchange.bindings))
def get_items(self, session, exchange):
- return sorted_by(exchange.binding_items(), "id")
+ return sorted_by(exchange.bindings, "id")
def render_item_href(self, session, binding):
branch = session.branch()
- self.page().show_queue(branch, binding.queue)
+ self.page().show_queue(branch, binding.mintQueue)
return branch.marshal()
def render_item_name(self, session, binding):
- return binding.get_queue().name
+ return binding.mintQueue.name
def render_item_binding_key(self, session, binding):
- return binding.binding_key
+ return binding.bindingKey
def render_item_messages_matched(self, session, binding):
- return binding.get_measurement("msgMatched").get_value()
+ return binding.mintBindingStats.msgMatched
def render_item_messages_matched_rate(self, session, binding):
- value = binding.get_measurement("msgMatched").get_rate()
+ value = binding.mintBindingStats.msgMatched
return fmt_rate(value, "msg", "sec")
class ExchangeForm(CuminForm):
@@ -367,26 +367,26 @@
class ExchangeProducerSet(ItemSet):
def get_title(self, session, queue):
- return "Producers %s" % fmt_count(len(queue.producer_items()))
+ return "Producers %s" % fmt_count(len(queue.producers))
def get_items(self, session, queue):
- return sorted_by(queue.producer_items())
+ return sorted_by(queue.producers)
def render_item_name(self, session, producer):
return producer.name
def render_item_messages_produced(self, session, producer):
- return producer.get_measurement("msgsProduced").get_value()
+ return producer.mintProducerStats.msgsProduced
def render_item_messages_produced_rate(self, session, producer):
- value = producer.get_measurement("msgsProduced").get_rate()
+ value = producer.mintProducerStats.msgsProduced
return fmt_rate(value, "msg", "sec")
def render_item_bytes_produced(self, session, producer):
- return producer.get_measurement("bytesProduced").get_value()
+ return producer.mintProducerStats.bytesProduced
def render_item_bytes_produced_rate(self, session, producer):
- value = producer.get_measurement("bytesProduced").get_rate()
+ value = producer.mintProducerStats.bytesProduced
return fmt_rate(value, "byte", "sec")
class ExchangeXmlPage(CuminXmlPage):
Modified: mgmt/cumin/python/cumin/formats.py
===================================================================
--- mgmt/cumin/python/cumin/formats.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/cumin/python/cumin/formats.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -62,7 +62,7 @@
return "<a href=\"%s\"%s>%s</a>" % \
(href, class_ and " class=\"%s\" " % class_ or " ", content)
-def fmt_olink(session, object, selected=False):
+def fmt_olink(session, object, selected=False, name=None):
return fmt_link(session.marshal(),
- getattr(object, "name", object),
+ getattr(object, "name", name),
selected and "selected")
Modified: mgmt/cumin/python/cumin/measurement.py
===================================================================
--- mgmt/cumin/python/cumin/measurement.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/cumin/python/cumin/measurement.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -1,5 +1,6 @@
from wooly import *
from wooly.widgets import *
+from mint.schema import *
from widgets import *
from parameters import *
@@ -8,6 +9,205 @@
strings = StringCatalog(__file__)
+class Measure(object):
+ def __init__(self, name, unit, title=None, categories=()):
+ self.name = name
+ self.unit = unit
+ self.title = None
+ self.categories = categories
+
+ self.link_cb = None
+ self.highlow = False
+
+ def get(self, object):
+ return getattr(object, self.name)
+
+class MeasureMetadata(object):
+ def __init__(self):
+ self.queue = list()
+ self.exchange = list()
+ self.client = list()
+
+ m = Measure("consumers", "int")
+ m.title = "Consumers"
+ m.unit = "consumer"
+ m.categories = ("general")
+ m.highlow = True
+ self.queue.append(m)
+
+ m = Measure("bindings", "int")
+ m.title = "Bindings"
+ m.unit = "binding"
+ m.categories = ("general")
+ m.highlow = True
+ self.queue.append(m)
+
+ m = Measure("msgDepth", "int")
+ m.title = "Message Depth"
+ m.unit = "message"
+ m.categories = ("message", "general")
+ m.highlow = True
+ self.queue.append(m)
+
+ m = Measure("msgTotalEnqueues", "int")
+ m.title = "Msgs. Enqueued"
+ m.unit = "message"
+ m.categories = ("message", "general")
+ self.queue.append(m)
+
+ m = Measure("msgTotalDequeues", "int")
+ m.title = "Msgs. Dequeued"
+ m.unit = "message"
+ m.categories = ("message", "general")
+ self.queue.append(m)
+
+ m = Measure("byteDepth", "int")
+ m.title = "Byte Depth"
+ m.unit = "byte"
+ m.categories = ("byte", "general")
+ m.highlow = True
+ self.queue.append(m)
+
+ m = Measure("byteTotalEnqueues", "int")
+ m.title = "Bytes Enqueued"
+ m.unit = "byte"
+ m.categories = ("byte", "general")
+ self.queue.append(m)
+
+ m = Measure("byteTotalDequeues", "int")
+ m.title = "Bytes Dequeued"
+ m.unit = "byte"
+ m.categories = ("byte", "general")
+ self.queue.append(m)
+
+ m = Measure("unackedMessages", "int")
+ m.title = "Msgs. Unacked"
+ m.unit = "message"
+ m.categories = ("general")
+ self.queue.append(m)
+
+ # Disk
+
+ #m = Measure("diskPageSize", "int")
+ #m.title = "Page size"
+ #m.categories = ("disk")
+ #self.queue.append(m)
+
+ m = Measure("diskPages", "int")
+ m.title = "Disk Pages"
+ m.unit = "page"
+ m.categories = ("general")
+ self.queue.append(m)
+
+ #m = Measure("diskAvailableSize", "int")
+ #m.title = "Available size"
+ #m.categories = ("disk")
+ #self.queue.append(m)
+
+ # Transactional
+
+ m = Measure("msgTxnEnqueues", "int")
+ m.title = "Msgs. Enqueued"
+ m.unit = "message"
+ m.categories = ("message", "transactional")
+ self.queue.append(m)
+
+ m = Measure("msgTxnDequeues", "int")
+ m.title = "Msgs. Dequeued"
+ m.unit = "message"
+ m.categories = ("message", "transactional")
+ self.queue.append(m)
+
+ m = Measure("byteTxnEnqueues", "int")
+ m.title = "Bytes Enqueued"
+ m.unit = "byte"
+ m.categories = ("byte", "transactional")
+ self.queue.append(m)
+
+ m = Measure("byteTxnDequeues", "int")
+ m.title = "Bytes Dequeued"
+ m.unit = "byte"
+ m.categories = ("byte", "transactional")
+ self.queue.append(m)
+
+ m = Measure("enqueueTxnStarts", "int")
+ m.title = "Enq. Trans. Started"
+ m.unit = "transaction"
+ m.categories = ("transaction")
+ self.queue.append(m)
+
+ m = Measure("enqueueTxnCommits", "int")
+ m.title = "Enq. Trans. Committed"
+ m.unit = "transaction"
+ m.categories = ("transaction")
+ self.queue.append(m)
+
+ m = Measure("enqueueTxnRejects", "int")
+ m.title = "Enq. Trans. Rejected"
+ m.unit = "transaction"
+ m.categories = ("transaction")
+ self.queue.append(m)
+
+ m = Measure("enqueueTxnCount", "int")
+ m.title = "Enq. Trans. Pending"
+ m.unit = "transaction"
+ m.categories = ("transaction")
+ m.highlow = True
+ self.queue.append(m)
+
+ m = Measure("dequeueTxnStarts", "int")
+ m.title = "Deq. Trans. Started"
+ m.unit = "transaction"
+ m.categories = ("transaction")
+ self.queue.append(m)
+
+ m = Measure("dequeueTxnCommits", "int")
+ m.title = "Deq. Trans. Committed"
+ m.unit = "transaction"
+ m.categories = ("transaction")
+ self.queue.append(m)
+
+ m = Measure("dequeueTxnRejects", "int")
+ m.title = "Deq. Trans. Rejected"
+ m.unit = "transaction"
+ m.categories = ("transaction")
+ self.queue.append(m)
+
+ m = Measure("dequeueTxnCount", "int")
+ m.title = "Deq. Trans. Pending"
+ m.unit = "transaction"
+ m.categories = ("transaction")
+ m.highlow = True
+ self.queue.append(m)
+
+ # Persistent
+
+ m = Measure("msgPersistEnqueues", "int")
+ m.title = "Msgs. Enqueued"
+ m.unit = "message"
+ m.categories = ("message", "persistent")
+ self.queue.append(m)
+
+ m = Measure("msgPersistDequeues", "int")
+ m.title = "Msgs. Dequeued"
+ m.unit = "message"
+ m.categories = ("message", "persistent")
+ self.queue.append(m)
+
+ m = Measure("bytePersistEnqueues", "int")
+ m.title = "Bytes Enqueued"
+ m.unit = "byte"
+ m.categories = ("byte", "persistent")
+ self.queue.append(m)
+
+ m = Measure("bytePersistDequeues", "int")
+ m.title = "Bytes Dequeued"
+ m.unit = "byte"
+ m.categories = ("byte", "persistent")
+ self.queue.append(m)
+
+meta = MeasureMetadata()
+
class MeasurementSet(ItemSet):
unit_abbrevs = dict()
unit_abbrevs["message"] = "msg"
@@ -23,30 +223,45 @@
def get_items(self, session, object):
self.object.set(session, object);
-
- measures = list(object.measurements)
- for measure in object.measurements:
- if self.category not in measure.categories:
- measures.remove(measure)
+ if isinstance(object, MintQueue):
+ stats = object.mintQueueStats
+ elif isinstance(object, MintExchange):
+ stats = object.mintExchangeStats
+ elif isinstance(object, MintClient):
+ stats = object.mintClientStats
+ else:
+ raise Exception()
+ measures = list()
+
+ for measure in meta.queue:
+ if self.category in measure.categories:
+ measures.append((measure, stats))
+
return measures
- def render_item_title(self, session, measure):
+ def render_item_title(self, session, args):
+ measure, stats = args
return measure.title
- def render_item_name(self, session, measure):
+ def render_item_name(self, session, args):
+ measure, stats = args
return measure.name
- def render_item_value(self, session, measure):
+ def render_item_value(self, session, args):
+ measure, stats = args
+
if measure.link_cb:
branch = session.branch()
measure.link_cb(self.page(), branch, self.object.get(session))
- return fmt_link(branch.marshal(), measure.get_value())
+ return fmt_link(branch.marshal(), measure.get(stats))
else:
- return measure.get_value()
+ return measure.get(stats)
def render_item_extra(self, session, measure):
+ return None #XXX
+
if measure.highlow:
return "<small>high</small> <span>%i</span> <small>low</small> <span>%i</span>" \
% (measure.get_high() or 0, measure.get_low() or 0)
@@ -55,4 +270,4 @@
return fmt_rate(measure.get_rate(), unit, "sec")
def render_item_average(self, session, measure):
- return "%0.2f" % (sum(measure.values) / float(len(measure.values)))
+ return None #XXX "%0.2f" % (sum(measure.values) / float(len(measure.values)))
Modified: mgmt/cumin/python/cumin/page.py
===================================================================
--- mgmt/cumin/python/cumin/page.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/cumin/python/cumin/page.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -75,7 +75,7 @@
return self.show_mode(session, self.view)
def show_broker(self, session, broker):
- cluster = broker.get_broker_cluster()
+ cluster = None #broker.get_broker_cluster()
if cluster:
frame = self.show_broker_cluster(session, cluster)
@@ -118,7 +118,7 @@
return self.set_current_frame(session, frame)
def show_queue(self, session, queue):
- broker = queue.get_virtual_host().get_broker()
+ broker = queue.mintVhost.mintBroker
frame = self.show_broker(session, broker)
frame = frame.show_queue(session, queue)
return self.set_current_frame(session, frame)
@@ -183,7 +183,7 @@
class BrokerTab(BrokerBrowser):
def get_title(self, session, model):
- return "Brokers %s" % fmt_count(len(model.get_brokers()))
+ return "Brokers %s" % fmt_count(len(model.sys.brokers))
class BrokerProfileTab(BrokerProfileSet):
def get_title(self, session, model):
Modified: mgmt/cumin/python/cumin/parameters.py
===================================================================
--- mgmt/cumin/python/cumin/parameters.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/cumin/python/cumin/parameters.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -1,4 +1,5 @@
from wooly import *
+from mint.schema import *
class BrokerClusterParameter(Parameter):
def do_unmarshal(self, string):
@@ -16,7 +17,7 @@
class BrokerParameter(Parameter):
def do_unmarshal(self, string):
- return self.app.model.get_broker(int(string))
+ return MintBroker.get(int(string))
def do_marshal(self, broker):
return str(broker.id)
@@ -30,7 +31,7 @@
class ClientParameter(Parameter):
def do_unmarshal(self, string):
- return self.app.model.get_client(int(string))
+ return MintClient.get(int(string))
def do_marshal(self, client):
return str(client.id)
@@ -44,14 +45,14 @@
class ExchangeParameter(Parameter):
def do_unmarshal(self, string):
- return self.app.model.get_exchange(int(string))
+ return MintExchange.get(int(string))
def do_marshal(self, exchange):
return str(exchange.id)
class QueueParameter(Parameter):
def do_unmarshal(self, string):
- return self.app.model.get_queue(int(string))
+ return MintQueue.get(int(string))
def do_marshal(self, queue):
return str(queue.id)
Modified: mgmt/cumin/python/cumin/queue.py
===================================================================
--- mgmt/cumin/python/cumin/queue.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/cumin/python/cumin/queue.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -21,7 +21,7 @@
self.add_child(self.unit)
def get_title(self, session, vhost):
- return "Queues (%s)" % len(vhost.queue_items())
+ return "Queues (%s)" % len(vhost.queues)
def render_unit_singular(self, session, vhost):
return self.unit.get(session) == "b" and "Byte" or "Msg."
@@ -30,7 +30,7 @@
return self.unit.get(session) == "b" and "Bytes" or "Msgs."
def get_items(self, session, vhost):
- return sorted_by(vhost.queue_items())
+ return sorted_by(vhost.queues)
def render_item_link(self, session, queue):
branch = session.branch()
@@ -41,41 +41,43 @@
return queue.name
def render_item_consumers(self, session, queue):
+ return None # XXX
+
branch = session.branch()
frame = self.page().show_queue(branch, queue)
frame.show_view(branch).show_consumers(branch)
- return fmt_link(branch.marshal(), len(queue.consumer_items()))
+ return fmt_link(branch.marshal(), len(queue.consumers))
def render_item_bindings(self, session, queue):
branch = session.branch()
frame = self.page().show_queue(branch, queue)
frame.show_view(branch).show_bindings(branch)
- return fmt_link(branch.marshal(), len(queue.binding_items()))
+ return fmt_link(branch.marshal(), len(queue.bindings))
def render_item_enqueued(self, session, queue):
unit = self.unit.get(session)
key = unit == "b" and "byteTotalEnqueues" or "msgTotalEnqueues"
- value = queue.get_measurement(key).get_rate()
+ value = getattr(queue.mintQueueStats, key)
return fmt_rate(value, unit == "b" and "byte" or "msg", "sec")
def render_item_dequeued(self, session, queue):
unit = self.unit.get(session)
key = unit == "b" and "byteTotalDequeues" or "msgTotalDequeues"
- value = queue.get_measurement(key).get_rate()
+ value = getattr(queue.mintQueueStats, key)
return fmt_rate(value, unit == "b" and "byte" or "msg", "sec")
def render_item_depth(self, session, queue):
key = self.unit.get(session) == "b" and "byteDepth" or "msgDepth"
- return queue.get_measurement(key).get_value()
+ return getattr(queue.mintQueueStats, key)
def render_item_depth_accel(self, session, queue):
unit = self.unit.get(session)
key = unit == "b" and "byteDepth" or "msgDepth"
- value = queue.get_measurement(key).get_rate()
+ value = getattr(queue.mintQueueStats, key)
return fmt_rate(value, unit == "b" and "byte" or "msg", "sec")
def render_item_status(self, session, queue):
- return fmt_status(len(queue.errors), len(queue.warnings))
+ return None #fmt_status(len(queue.errors), len(queue.warnings))
def show_consumers(page, session, queue):
frame = page.show_queue(session, queue).show_view(session)
@@ -100,8 +102,8 @@
# XXX not awesome
def get_object(self, session, object):
queue = self.param.get(session)
- queue.get_measurement("consumers").link_cb = show_consumers
- queue.get_measurement("bindings").link_cb = show_bindings
+ #queue_stat(queue, "consumers").link_cb = show_consumers
+ #queue_stat(queue, "bindings").link_cb = show_bindings
return queue
def get_title(self, session, queue):
@@ -109,36 +111,36 @@
class QueueStatus(CuminStatus):
def render_consumers(self, session, queue):
- return queue.get_measurement("consumers").get_value()
+ return queue.mintQueueStats.consumers
def render_messages_enqueued(self, session, queue):
- value = queue.get_measurement("msgTotalEnqueues").get_rate()
+ value = queue.mintQueueStats.msgTotalEnqueues
return fmt_rate(value, "msg", "sec")
def render_messages_dequeued(self, session, queue):
- value = queue.get_measurement("msgTotalDequeues").get_rate()
+ value = queue.mintQueueStats.msgTotalDequeues
return fmt_rate(value, "msg", "sec")
def render_message_depth(self, session, queue):
- return queue.get_measurement("msgDepth").get_value()
+ return queue.mintQueueStats.msgDepth
def render_message_depth_accel(self, session, queue):
- value = queue.get_measurement("msgDepth").get_rate()
+ value = queue.mintQueueStats.msgDepth
return fmt_rate(value, "msg", "sec")
def render_bytes_enqueued(self, session, queue):
- value = queue.get_measurement("byteTotalEnqueues").get_rate()
+ value = queue.mintQueueStats.byteTotalEnqueues
return fmt_rate(value, "byte", "sec")
def render_bytes_dequeued(self, session, queue):
- value = queue.get_measurement("byteTotalDequeues").get_rate()
+ value = queue.mintQueueStats.byteTotalDequeues
return fmt_rate(value, "byte", "sec")
def render_byte_depth(self, session, queue):
- return queue.get_measurement("byteDepth").get_value()
+ return queue.mintQueueStats.byteDepth
def render_byte_depth_accel(self, session, queue):
- value = queue.get_measurement("byteDepth").get_rate()
+ value = queue.mintQueueStats.byteDepth
return fmt_rate(value, "byte", "sec")
class QueueView(Widget):
@@ -175,10 +177,10 @@
return queue.name
def render_durable(self, session, queue):
- return fmt_predicate(queue.is_durable)
+ return fmt_predicate(queue.durable)
def render_exclusive(self, session, queue):
- return fmt_predicate(queue.is_exclusive)
+ return fmt_predicate(queue.exclusive)
def render_created_deleted(self, session, queue):
return "%s - %s" % (fmt_datetime(datetime.utcnow()), "")
@@ -188,10 +190,10 @@
class QueueBindingSet(ItemSet):
def get_title(self, session, queue):
- return "Exchange Bindings %s" % fmt_count(len(queue.binding_items()))
+ return "Exchange Bindings %s" % fmt_count(len(queue.bindings))
def get_items(self, session, queue):
- return sorted_by(queue.binding_items(), "id")
+ return sorted_by(queue.bindings, "id")
def render_item_href(self, session, binding):
branch = session.branch()
@@ -440,35 +442,35 @@
return "queue.png?id=%i;m=consumers" % queue.id
def render_transactions_chart_url(self, session, queue):
- return "queue.png?id=%i;m=enqueueTxCount;m=dequeueTxCount" \
+ return "queue.png?id=%i;m=enqueueTxnCount;m=dequeueTxnCount" \
% queue.id
class QueueConsumerSet(ItemSet):
def get_title(self, session, queue):
- return "Consumers %s" % fmt_count(len(queue.consumer_items()))
+ return "Consumers" #XXX %s" % fmt_count(len(queue.consumers))
def get_items(self, session, queue):
- return sorted_by(queue.consumer_items())
+ return list() #XXX sorted_by(queue.consumers)
def render_item_name(self, session, consumer):
return consumer.name
def render_item_messages_consumed(self, session, consumer):
- return consumer.get_measurement("msgsConsumed").get_value()
+ return consumer.mintConsumerStats.msgsConsumed
def render_item_messages_consumed_rate(self, session, consumer):
- value = consumer.get_measurement("msgsConsumed").get_rate()
+ value = consumer.mintConsumerStats.msgsConsumed
return fmt_rate(value, "msg", "sec")
def render_item_bytes_consumed(self, session, consumer):
- return consumer.get_measurement("bytesConsumed").get_value()
+ return consumer.mintConsumerStats.bytesConsumed
def render_item_bytes_consumed_rate(self, session, consumer):
- value = consumer.get_measurement("bytesConsumed").get_rate()
+ value = consumer.mintConsumerStats.bytesConsumed
return fmt_rate(value, "byte", "sec")
def render_item_unacked_messages(self, session, consumer):
- return consumer.get_measurement("unackedMessages").get_value()
+ return consumer.mintConsumerStats.unackedMessages
class QueueXmlPage(CuminXmlPage):
def __init__(self, app, name):
Modified: mgmt/cumin/python/cumin/widgets.py
===================================================================
--- mgmt/cumin/python/cumin/widgets.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/cumin/python/cumin/widgets.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -106,19 +106,24 @@
class CuminStatus(Widget):
def render_class(self, session, object):
- if object.errors:
- return "mstatus red"
- elif object.warnings:
- return "mstatus yellow"
+ if hasattr(object, "errors"):
+ if object.errors:
+ return "mstatus red"
+ elif object.warnings:
+ return "mstatus yellow"
+ else:
+ return "mstatus green"
else:
return "mstatus green"
def render_status_info(self, session, object):
- error_count = len(object.errors)
- warning_count = len(object.warnings)
+ if hasattr(object, "errors"):
+ error_count = len(object.errors)
+ warning_count = len(object.warnings)
- return "%i error%s, %i warning%s" % (error_count, ess(error_count),
- warning_count, ess(warning_count))
+ return "%i error%s, %i warning%s" % \
+ (error_count, ess(error_count),
+ warning_count, ess(warning_count))
class CuminChartPage(Page):
def __init__(self, app, name):
Modified: mgmt/mint/python/mint/schema.py
===================================================================
--- mgmt/mint/python/mint/schema.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/mint/python/mint/schema.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -1,22 +1,22 @@
from sqlobject import *
-class MgmtSystemStats(SQLObject):
+class MintSystemStats(SQLObject):
class sqlmeta:
fromDatabase = True
-class MgmtSystem(SQLObject):
+class MintSystem(SQLObject):
schemaId = 1
schemaName = "system"
managedBroker = None
class sqlmeta:
fromDatabase = True
- _SO_class_Mgmt_system_stats = None
+ _SO_class_Mint_system_stats = MintSystemStats
-class MgmtBrokerStats(SQLObject):
+class MintBrokerStats(SQLObject):
class sqlmeta:
fromDatabase = True
-class MgmtBroker(SQLObject):
+class MintBroker(SQLObject):
schemaId = 2
schemaName = "broker"
managedBroker = None
@@ -42,31 +42,31 @@
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "crash", args=actualArgs, packageName="qpid")
- _SO_class_Mgmt_broker_stats = None
- _SO_class_Mgmt_system = None
+ _SO_class_Mint_broker_stats = MintBrokerStats
+ _SO_class_Mint_system = MintSystem
-class MgmtVhostStats(SQLObject):
+class MintVhostStats(SQLObject):
class sqlmeta:
fromDatabase = True
-MgmtSystem.sqlmeta.addJoin(MultipleJoin('MgmtBroker', joinMethodName='allBrokers'))
+MintSystem.sqlmeta.addJoin(MultipleJoin('MintBroker', joinMethodName='brokers'))
-class MgmtVhost(SQLObject):
+class MintVhost(SQLObject):
schemaId = 3
schemaName = "vhost"
managedBroker = None
class sqlmeta:
fromDatabase = True
- _SO_class_Mgmt_vhost_stats = None
- _SO_class_Mgmt_broker = None
+ _SO_class_Mint_vhost_stats = MintVhostStats
+ _SO_class_Mint_broker = MintBroker
-class MgmtQueueStats(SQLObject):
+class MintQueueStats(SQLObject):
class sqlmeta:
fromDatabase = True
-MgmtBroker.sqlmeta.addJoin(MultipleJoin('MgmtVhost', joinMethodName='allVhosts'))
+MintBroker.sqlmeta.addJoin(MultipleJoin('MintVhost', joinMethodName='vhosts'))
-class MgmtQueue(SQLObject):
+class MintQueue(SQLObject):
schemaId = 4
schemaName = "queue"
managedBroker = None
@@ -83,48 +83,48 @@
actualArgs["pages"] = pages
methodId = model.registerCallback(callbackMethod)
self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "increaseDiskSize", args=actualArgs, packageName="qpid")
- _SO_class_Mgmt_queue_stats = None
- _SO_class_Mgmt_vhost = None
+ _SO_class_Mint_queue_stats = MintQueueStats
+ _SO_class_Mint_vhost = MintVhost
-class MgmtExchangeStats(SQLObject):
+class MintExchangeStats(SQLObject):
class sqlmeta:
fromDatabase = True
-MgmtVhost.sqlmeta.addJoin(MultipleJoin('MgmtQueue', joinMethodName='allQueues'))
+MintVhost.sqlmeta.addJoin(MultipleJoin('MintQueue', joinMethodName='queues'))
-class MgmtExchange(SQLObject):
+class MintExchange(SQLObject):
schemaId = 5
schemaName = "exchange"
managedBroker = None
class sqlmeta:
fromDatabase = True
- _SO_class_Mgmt_exchange_stats = None
- _SO_class_Mgmt_vhost = None
+ _SO_class_Mint_exchange_stats = MintExchangeStats
+ _SO_class_Mint_vhost = MintVhost
-class MgmtBindingStats(SQLObject):
+class MintBindingStats(SQLObject):
class sqlmeta:
fromDatabase = True
-MgmtVhost.sqlmeta.addJoin(MultipleJoin('MgmtExchange', joinMethodName='allExchanges'))
+MintVhost.sqlmeta.addJoin(MultipleJoin('MintExchange', joinMethodName='exchanges'))
-class MgmtBinding(SQLObject):
+class MintBinding(SQLObject):
schemaId = 6
schemaName = "binding"
managedBroker = None
class sqlmeta:
fromDatabase = True
- _SO_class_Mgmt_binding_stats = None
- _SO_class_Mgmt_queue = None
- _SO_class_Mgmt_exchange = None
+ _SO_class_Mint_binding_stats = MintBindingStats
+ _SO_class_Mint_queue = MintQueue
+ _SO_class_Mint_exchange = MintExchange
-class MgmtClientStats(SQLObject):
+class MintClientStats(SQLObject):
class sqlmeta:
fromDatabase = True
-MgmtQueue.sqlmeta.addJoin(MultipleJoin('MgmtBinding', joinMethodName='allBindings'))
-MgmtExchange.sqlmeta.addJoin(MultipleJoin('MgmtBinding', joinMethodName='allBindings'))
+MintQueue.sqlmeta.addJoin(MultipleJoin('MintBinding', joinMethodName='bindings'))
+MintExchange.sqlmeta.addJoin(MultipleJoin('MintBinding', joinMethodName='bindings'))
-class MgmtClient(SQLObject):
+class MintClient(SQLObject):
schemaId = 7
schemaName = "client"
managedBroker = None
@@ -138,16 +138,16 @@
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "detach", args=actualArgs, packageName="qpid")
- _SO_class_Mgmt_client_stats = None
- _SO_class_Mgmt_vhost = None
+ _SO_class_Mint_client_stats = MintClientStats
+ _SO_class_Mint_vhost = MintVhost
-class MgmtSessionStats(SQLObject):
+class MintSessionStats(SQLObject):
class sqlmeta:
fromDatabase = True
-MgmtVhost.sqlmeta.addJoin(MultipleJoin('MgmtClient', joinMethodName='allClients'))
+MintVhost.sqlmeta.addJoin(MultipleJoin('MintClient', joinMethodName='clients'))
-class MgmtSession(SQLObject):
+class MintSession(SQLObject):
schemaId = 8
schemaName = "session"
managedBroker = None
@@ -169,18 +169,18 @@
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "close", args=actualArgs, packageName="qpid")
- _SO_class_Mgmt_session_stats = None
- _SO_class_Mgmt_vhost = None
- _SO_class_Mgmt_client = None
+ _SO_class_Mint_session_stats = MintSessionStats
+ _SO_class_Mint_vhost = MintVhost
+ _SO_class_Mint_client = MintClient
-class MgmtDestinationStats(SQLObject):
+class MintDestinationStats(SQLObject):
class sqlmeta:
fromDatabase = True
-MgmtVhost.sqlmeta.addJoin(MultipleJoin('MgmtSession', joinMethodName='allSessions'))
-MgmtClient.sqlmeta.addJoin(MultipleJoin('MgmtSession', joinMethodName='allSessions'))
+MintVhost.sqlmeta.addJoin(MultipleJoin('MintSession', joinMethodName='sessions'))
+MintClient.sqlmeta.addJoin(MultipleJoin('MintSession', joinMethodName='sessions'))
-class MgmtDestination(SQLObject):
+class MintDestination(SQLObject):
schemaId = 9
schemaName = "destination"
managedBroker = None
@@ -200,33 +200,33 @@
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "start", args=actualArgs, packageName="qpid")
- _SO_class_Mgmt_destination_stats = None
- _SO_class_Mgmt_session = None
+ _SO_class_Mint_destination_stats = MintDestinationStats
+ _SO_class_Mint_session = MintSession
-class MgmtProducerStats(SQLObject):
+class MintProducerStats(SQLObject):
class sqlmeta:
fromDatabase = True
-MgmtSession.sqlmeta.addJoin(MultipleJoin('MgmtDestination', joinMethodName='allDestinations'))
+MintSession.sqlmeta.addJoin(MultipleJoin('MintDestination', joinMethodName='destinations'))
-class MgmtProducer(SQLObject):
+class MintProducer(SQLObject):
schemaId = 10
schemaName = "producer"
managedBroker = None
class sqlmeta:
fromDatabase = True
- _SO_class_Mgmt_producer_stats = None
- _SO_class_Mgmt_destination = None
- _SO_class_Mgmt_exchange = None
+ _SO_class_Mint_producer_stats = MintProducerStats
+ _SO_class_Mint_destination = MintDestination
+ _SO_class_Mint_exchange = MintExchange
-class MgmtConsumerStats(SQLObject):
+class MintConsumerStats(SQLObject):
class sqlmeta:
fromDatabase = True
-MgmtDestination.sqlmeta.addJoin(MultipleJoin('MgmtProducer', joinMethodName='allProducers'))
-MgmtExchange.sqlmeta.addJoin(MultipleJoin('MgmtProducer', joinMethodName='allProducers'))
+MintDestination.sqlmeta.addJoin(MultipleJoin('MintProducer', joinMethodName='producers'))
+MintExchange.sqlmeta.addJoin(MultipleJoin('MintProducer', joinMethodName='producers'))
-class MgmtConsumer(SQLObject):
+class MintConsumer(SQLObject):
schemaId = 11
schemaName = "consumer"
managedBroker = None
@@ -236,6 +236,6 @@
actualArgs = dict()
methodId = model.registerCallback(callbackMethod)
self.managedBroker.method(methodId, self.idOriginal, self.schemaName, "close", args=actualArgs, packageName="qpid")
- _SO_class_Mgmt_consumer_stats = None
- _SO_class_Mgmt_destination = None
- _SO_class_Mgmt_queue = None
+ _SO_class_Mint_consumer_stats = MintConsumerStats
+ _SO_class_Mint_destination = MintDestination
+ _SO_class_Mint_queue = MintQueue
Modified: mgmt/mint/python/mint/schema.sql
===================================================================
--- mgmt/mint/python/mint/schema.sql 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/mint/python/mint/schema.sql 2007-11-15 19:27:19 UTC (rev 1319)
@@ -1,33 +1,33 @@
-CREATE TABLE mgmt_system (
+CREATE TABLE mint_system (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_system_stats_id BIGINT,
+ mint_system_stats_id BIGINT,
rec_time TIMESTAMP,
creation_time TIMESTAMP,
deletion_time TIMESTAMP,
sys_ident VARCHAR(1000)
);
-CREATE INDEX mgmt_system_sys_ident_index ON mgmt_system(sys_ident);
+CREATE INDEX mint_system_sys_ident_index ON mint_system(sys_ident);
-CREATE TABLE mgmt_system_stats (
+CREATE TABLE mint_system_stats (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_system_id BIGINT REFERENCES mgmt_system ,
+ mint_system_id BIGINT REFERENCES mint_system ,
rec_time TIMESTAMP
);
-ALTER TABLE mgmt_system ADD FOREIGN KEY (mgmt_system_stats_id) REFERENCES mgmt_system_stats;
+ALTER TABLE mint_system ADD FOREIGN KEY (mint_system_stats_id) REFERENCES mint_system_stats;
-CREATE TABLE mgmt_broker (
+CREATE TABLE mint_broker (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_broker_stats_id BIGINT,
+ mint_broker_stats_id BIGINT,
rec_time TIMESTAMP,
creation_time TIMESTAMP,
deletion_time TIMESTAMP,
- mgmt_system_id BIGINT REFERENCES mgmt_system,
+ mint_system_id BIGINT REFERENCES mint_system,
port INT2 ,
worker_threads INT2 ,
max_conns INT2 ,
@@ -42,47 +42,47 @@
version VARCHAR(1000)
);
-CREATE INDEX mgmt_broker_port_index ON mgmt_broker(port);
+CREATE INDEX mint_broker_port_index ON mint_broker(port);
-CREATE TABLE mgmt_broker_stats (
+CREATE TABLE mint_broker_stats (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_broker_id BIGINT REFERENCES mgmt_broker ,
+ mint_broker_id BIGINT REFERENCES mint_broker ,
rec_time TIMESTAMP
);
-ALTER TABLE mgmt_broker ADD FOREIGN KEY (mgmt_broker_stats_id) REFERENCES mgmt_broker_stats;
+ALTER TABLE mint_broker ADD FOREIGN KEY (mint_broker_stats_id) REFERENCES mint_broker_stats;
-CREATE TABLE mgmt_vhost (
+CREATE TABLE mint_vhost (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_vhost_stats_id BIGINT,
+ mint_vhost_stats_id BIGINT,
rec_time TIMESTAMP,
creation_time TIMESTAMP,
deletion_time TIMESTAMP,
- mgmt_broker_id BIGINT REFERENCES mgmt_broker,
+ mint_broker_id BIGINT REFERENCES mint_broker,
name VARCHAR(1000)
);
-CREATE INDEX mgmt_vhost_name_index ON mgmt_vhost(name);
+CREATE INDEX mint_vhost_name_index ON mint_vhost(name);
-CREATE TABLE mgmt_vhost_stats (
+CREATE TABLE mint_vhost_stats (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_vhost_id BIGINT REFERENCES mgmt_vhost ,
+ mint_vhost_id BIGINT REFERENCES mint_vhost ,
rec_time TIMESTAMP
);
-ALTER TABLE mgmt_vhost ADD FOREIGN KEY (mgmt_vhost_stats_id) REFERENCES mgmt_vhost_stats;
+ALTER TABLE mint_vhost ADD FOREIGN KEY (mint_vhost_stats_id) REFERENCES mint_vhost_stats;
-CREATE TABLE mgmt_queue (
+CREATE TABLE mint_queue (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_queue_stats_id BIGINT,
+ mint_queue_stats_id BIGINT,
rec_time TIMESTAMP,
creation_time TIMESTAMP,
deletion_time TIMESTAMP,
- mgmt_vhost_id BIGINT REFERENCES mgmt_vhost,
+ mint_vhost_id BIGINT REFERENCES mint_vhost,
name VARCHAR(1000) ,
durable BOOLEAN ,
auto_delete BOOLEAN ,
@@ -90,12 +90,12 @@
page_memory_limit INT4
);
-CREATE INDEX mgmt_queue_name_index ON mgmt_queue(name);
+CREATE INDEX mint_queue_name_index ON mint_queue(name);
-CREATE TABLE mgmt_queue_stats (
+CREATE TABLE mint_queue_stats (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_queue_id BIGINT REFERENCES mgmt_queue ,
+ mint_queue_id BIGINT REFERENCES mint_queue ,
rec_time TIMESTAMP,
disk_page_size INT4 ,
disk_pages INT4 ,
@@ -141,26 +141,26 @@
unacked_messages_low INT4
);
-ALTER TABLE mgmt_queue ADD FOREIGN KEY (mgmt_queue_stats_id) REFERENCES mgmt_queue_stats;
+ALTER TABLE mint_queue ADD FOREIGN KEY (mint_queue_stats_id) REFERENCES mint_queue_stats;
-CREATE TABLE mgmt_exchange (
+CREATE TABLE mint_exchange (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_exchange_stats_id BIGINT,
+ mint_exchange_stats_id BIGINT,
rec_time TIMESTAMP,
creation_time TIMESTAMP,
deletion_time TIMESTAMP,
- mgmt_vhost_id BIGINT REFERENCES mgmt_vhost,
+ mint_vhost_id BIGINT REFERENCES mint_vhost,
name VARCHAR(1000) ,
type VARCHAR(1000)
);
-CREATE INDEX mgmt_exchange_name_index ON mgmt_exchange(name);
+CREATE INDEX mint_exchange_name_index ON mint_exchange(name);
-CREATE TABLE mgmt_exchange_stats (
+CREATE TABLE mint_exchange_stats (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_exchange_id BIGINT REFERENCES mgmt_exchange ,
+ mint_exchange_id BIGINT REFERENCES mint_exchange ,
rec_time TIMESTAMP,
producers INT4 ,
producers_high INT4 ,
@@ -176,50 +176,50 @@
byte_routes INT8
);
-ALTER TABLE mgmt_exchange ADD FOREIGN KEY (mgmt_exchange_stats_id) REFERENCES mgmt_exchange_stats;
+ALTER TABLE mint_exchange ADD FOREIGN KEY (mint_exchange_stats_id) REFERENCES mint_exchange_stats;
-CREATE TABLE mgmt_binding (
+CREATE TABLE mint_binding (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_binding_stats_id BIGINT,
+ mint_binding_stats_id BIGINT,
rec_time TIMESTAMP,
creation_time TIMESTAMP,
deletion_time TIMESTAMP,
- mgmt_queue_id BIGINT REFERENCES mgmt_queue,
- mgmt_exchange_id BIGINT REFERENCES mgmt_exchange,
+ mint_queue_id BIGINT REFERENCES mint_queue,
+ mint_exchange_id BIGINT REFERENCES mint_exchange,
binding_key VARCHAR(1000)
);
-CREATE TABLE mgmt_binding_stats (
+CREATE TABLE mint_binding_stats (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_binding_id BIGINT REFERENCES mgmt_binding ,
+ mint_binding_id BIGINT REFERENCES mint_binding ,
rec_time TIMESTAMP,
msg_matched INT8
);
-ALTER TABLE mgmt_binding ADD FOREIGN KEY (mgmt_binding_stats_id) REFERENCES mgmt_binding_stats;
+ALTER TABLE mint_binding ADD FOREIGN KEY (mint_binding_stats_id) REFERENCES mint_binding_stats;
-CREATE TABLE mgmt_client (
+CREATE TABLE mint_client (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_client_stats_id BIGINT,
+ mint_client_stats_id BIGINT,
rec_time TIMESTAMP,
creation_time TIMESTAMP,
deletion_time TIMESTAMP,
- mgmt_vhost_id BIGINT REFERENCES mgmt_vhost,
+ mint_vhost_id BIGINT REFERENCES mint_vhost,
ip_addr INT4 ,
port INT2
);
-CREATE INDEX mgmt_client_ip_addr_index ON mgmt_client(ip_addr);
+CREATE INDEX mint_client_ip_addr_index ON mint_client(ip_addr);
-CREATE INDEX mgmt_client_port_index ON mgmt_client(port);
+CREATE INDEX mint_client_port_index ON mint_client(port);
-CREATE TABLE mgmt_client_stats (
+CREATE TABLE mint_client_stats (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_client_id BIGINT REFERENCES mgmt_client ,
+ mint_client_id BIGINT REFERENCES mint_client ,
rec_time TIMESTAMP,
auth_identity VARCHAR(1000) ,
msgs_produced INT8 ,
@@ -228,52 +228,52 @@
bytes_consumed INT8
);
-ALTER TABLE mgmt_client ADD FOREIGN KEY (mgmt_client_stats_id) REFERENCES mgmt_client_stats;
+ALTER TABLE mint_client ADD FOREIGN KEY (mint_client_stats_id) REFERENCES mint_client_stats;
-CREATE TABLE mgmt_session (
+CREATE TABLE mint_session (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_session_stats_id BIGINT,
+ mint_session_stats_id BIGINT,
rec_time TIMESTAMP,
creation_time TIMESTAMP,
deletion_time TIMESTAMP,
- mgmt_vhost_id BIGINT REFERENCES mgmt_vhost,
+ mint_vhost_id BIGINT REFERENCES mint_vhost,
name VARCHAR(1000) ,
- mgmt_client_id BIGINT REFERENCES mgmt_client,
+ mint_client_id BIGINT REFERENCES mint_client,
detached_lifespan INT4
);
-CREATE INDEX mgmt_session_name_index ON mgmt_session(name);
+CREATE INDEX mint_session_name_index ON mint_session(name);
-CREATE TABLE mgmt_session_stats (
+CREATE TABLE mint_session_stats (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_session_id BIGINT REFERENCES mgmt_session ,
+ mint_session_id BIGINT REFERENCES mint_session ,
rec_time TIMESTAMP,
attached BOOLEAN ,
remaining_lifespan INT4 ,
frames_outstanding INT4
);
-ALTER TABLE mgmt_session ADD FOREIGN KEY (mgmt_session_stats_id) REFERENCES mgmt_session_stats;
+ALTER TABLE mint_session ADD FOREIGN KEY (mint_session_stats_id) REFERENCES mint_session_stats;
-CREATE TABLE mgmt_destination (
+CREATE TABLE mint_destination (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_destination_stats_id BIGINT,
+ mint_destination_stats_id BIGINT,
rec_time TIMESTAMP,
creation_time TIMESTAMP,
deletion_time TIMESTAMP,
- mgmt_session_id BIGINT REFERENCES mgmt_session,
+ mint_session_id BIGINT REFERENCES mint_session,
name VARCHAR(1000)
);
-CREATE INDEX mgmt_destination_name_index ON mgmt_destination(name);
+CREATE INDEX mint_destination_name_index ON mint_destination(name);
-CREATE TABLE mgmt_destination_stats (
+CREATE TABLE mint_destination_stats (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_destination_id BIGINT REFERENCES mgmt_destination ,
+ mint_destination_id BIGINT REFERENCES mint_destination ,
rec_time TIMESTAMP,
flow_mode INT2 ,
max_msg_credits INT4 ,
@@ -282,45 +282,45 @@
byte_credits INT4
);
-ALTER TABLE mgmt_destination ADD FOREIGN KEY (mgmt_destination_stats_id) REFERENCES mgmt_destination_stats;
+ALTER TABLE mint_destination ADD FOREIGN KEY (mint_destination_stats_id) REFERENCES mint_destination_stats;
-CREATE TABLE mgmt_producer (
+CREATE TABLE mint_producer (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_producer_stats_id BIGINT,
+ mint_producer_stats_id BIGINT,
rec_time TIMESTAMP,
creation_time TIMESTAMP,
deletion_time TIMESTAMP,
- mgmt_destination_id BIGINT REFERENCES mgmt_destination,
- mgmt_exchange_id BIGINT REFERENCES mgmt_exchange
+ mint_destination_id BIGINT REFERENCES mint_destination,
+ mint_exchange_id BIGINT REFERENCES mint_exchange
);
-CREATE TABLE mgmt_producer_stats (
+CREATE TABLE mint_producer_stats (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_producer_id BIGINT REFERENCES mgmt_producer ,
+ mint_producer_id BIGINT REFERENCES mint_producer ,
rec_time TIMESTAMP,
msgs_produced INT8 ,
bytes_produced INT8
);
-ALTER TABLE mgmt_producer ADD FOREIGN KEY (mgmt_producer_stats_id) REFERENCES mgmt_producer_stats;
+ALTER TABLE mint_producer ADD FOREIGN KEY (mint_producer_stats_id) REFERENCES mint_producer_stats;
-CREATE TABLE mgmt_consumer (
+CREATE TABLE mint_consumer (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_consumer_stats_id BIGINT,
+ mint_consumer_stats_id BIGINT,
rec_time TIMESTAMP,
creation_time TIMESTAMP,
deletion_time TIMESTAMP,
- mgmt_destination_id BIGINT REFERENCES mgmt_destination,
- mgmt_queue_id BIGINT REFERENCES mgmt_queue
+ mint_destination_id BIGINT REFERENCES mint_destination,
+ mint_queue_id BIGINT REFERENCES mint_queue
);
-CREATE TABLE mgmt_consumer_stats (
+CREATE TABLE mint_consumer_stats (
id BIGSERIAL PRIMARY KEY,
id_original BIGINT,
- mgmt_consumer_id BIGINT REFERENCES mgmt_consumer ,
+ mint_consumer_id BIGINT REFERENCES mint_consumer ,
rec_time TIMESTAMP,
msgs_consumed INT8 ,
bytes_consumed INT8 ,
@@ -329,4 +329,4 @@
unacked_messages_low INT4
);
-ALTER TABLE mgmt_consumer ADD FOREIGN KEY (mgmt_consumer_stats_id) REFERENCES mgmt_consumer_stats;
+ALTER TABLE mint_consumer ADD FOREIGN KEY (mint_consumer_stats_id) REFERENCES mint_consumer_stats;
Modified: mgmt/mint/schemaparser.py
===================================================================
--- mgmt/mint/schemaparser.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/mint/schemaparser.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -204,5 +204,5 @@
pythonFile.close()
-# parser = SchemaParser()
-# parser.generateCode()
+parser = SchemaParser()
+parser.generateCode()
Modified: mgmt/misc/mint-test.py
===================================================================
--- mgmt/misc/mint-test.py 2007-11-15 16:11:21 UTC (rev 1318)
+++ mgmt/misc/mint-test.py 2007-11-15 19:27:19 UTC (rev 1319)
@@ -13,10 +13,10 @@
def getQueueByName(name, create=False):
try:
- queues = MgmtQueue.selectBy(name=name)[:1]
+ queues = MintQueue.selectBy(name=name)[:1]
queue = queues[0]
except IndexError:
- if (create): queue = MgmtQueue()
+ if (create): queue = MintQueue()
return queue
def configCallback(broker, oid, list, timestamps):
17 years, 1 month
rhmessaging commits: r1318 - store/trunk/cpp/tests/jrnl.
by rhmessaging-commits@lists.jboss.org
Author: kpvdr
Date: 2007-11-15 11:11:21 -0500 (Thu, 15 Nov 2007)
New Revision: 1318
Modified:
store/trunk/cpp/tests/jrnl/unit_test_jdir.cpp
Log:
Mostly completed jdir unit test
Modified: store/trunk/cpp/tests/jrnl/unit_test_jdir.cpp
===================================================================
--- store/trunk/cpp/tests/jrnl/unit_test_jdir.cpp 2007-11-15 13:00:56 UTC (rev 1317)
+++ store/trunk/cpp/tests/jrnl/unit_test_jdir.cpp 2007-11-15 16:11:21 UTC (rev 1318)
@@ -32,21 +32,34 @@
#include <boost/test/results_reporter.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/test/unit_test_log.hpp>
+#include <dirent.h>
+#include <errno.h>
#include <fstream>
+#include <iomanip>
#include <iostream>
+#include <jrnl/file_hdr.hpp>
#include <jrnl/jdir.hpp>
#include <jrnl/jerrno.hpp>
#include <jrnl/jexception.hpp>
#include <sys/stat.h>
#define ERRORSTR(e) ::strerror(ret) << " (" << ret << ")"
+#define NUM_JDAT_FILES 20
+#define NUM_CLEAR_OPS 20
using namespace boost::unit_test;
using namespace rhm::journal;
// Helper function declarations
-void make_file(const char* filename, mode_t fmode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
-void make_jrnl_files(const char* dirname);
+void create_file(const char* filename, mode_t fmode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+void create_jrnl_fileset(const char* dirname, const char* base_filename);
+void create_jdat_file(const char* dirname, const char* base_filename, u_int32_t fid,
+ u_int64_t first_rid);
+void create_jinf_file(const char* dirname, const char* base_filename);
+void check_dir_contents(const char* dirname, const char* base_filename, unsigned num_subdirs,
+ bool jrnl_present);
+unsigned count_dir_contents(const char* dirname, bool incl_files = true, bool incl_dirs = true);
+void check_dir_not_existing(const char* dirname);
// Test functions of the form
// void fn() {...}
@@ -65,7 +78,7 @@
// Use instance
std::string dir_A("/tmp/A");
std::string dir_Ats("/tmp/A/"); // trailing '/'
- BOOST_CHECK(!jdir::exists(dir_A));
+ check_dir_not_existing("/tmp/A");
jdir dir1(dir_A, "test_base");
dir1.create_dir();
// check all combos of jdir::exists and jdir::is_dir()
@@ -77,30 +90,37 @@
BOOST_CHECK(jdir::is_dir(dir_Ats));
BOOST_CHECK(jdir::is_dir(dir_Ats.c_str()));
BOOST_CHECK(jdir::is_dir(dir_Ats.c_str()));
+ // do it a second time when dir exists
+ dir1.create_dir();
+ BOOST_CHECK(jdir::is_dir(dir_A));
dir1.delete_dir();
BOOST_CHECK(!jdir::exists(dir_A));
// Use static fn
- BOOST_CHECK(!jdir::exists("/tmp/B"));
+ check_dir_not_existing("/tmp/B");
jdir::create_dir("/tmp/B");
BOOST_CHECK(jdir::is_dir("/tmp/B"));
+ jdir::create_dir("/tmp/B");
+ BOOST_CHECK(jdir::is_dir("/tmp/B"));
jdir::delete_dir("/tmp/B");
BOOST_CHECK(!jdir::exists("/tmp/B"));
// Non-empty dirs
- BOOST_CHECK(!jdir::exists("/tmp/C"));
+ check_dir_not_existing("/tmp/C");
jdir::create_dir("/tmp/C");
BOOST_CHECK(jdir::is_dir("/tmp/C"));
- make_file("/tmp/C/test_file_1.txt"); // mode 644 (default)
- make_file("/tmp/C/test_file_2.txt", S_IRWXU | S_IRWXG | S_IRWXO); // mode 777
- make_file("/tmp/C/test_file_3.txt", S_IRUSR | S_IRGRP | S_IROTH); // mode 444 (read-only)
- make_file("/tmp/C/test_file_4.txt", 0); // mode 000 (no permissions)
+ create_file("/tmp/C/test_file_1.txt"); // mode 644 (default)
+ create_file("/tmp/C/test_file_2.txt", S_IRWXU | S_IRWXG | S_IRWXO); // mode 777
+ create_file("/tmp/C/test_file_3.txt", S_IRUSR | S_IRGRP | S_IROTH); // mode 444 (read-only)
+ create_file("/tmp/C/test_file_4.txt", 0); // mode 000 (no permissions)
BOOST_CHECK(jdir::is_dir("/tmp/C"));
+ jdir::create_dir("/tmp/C");
+ BOOST_CHECK(jdir::is_dir("/tmp/C"));
jdir::delete_dir("/tmp/C");
BOOST_CHECK(!jdir::exists("/tmp/C"));
// Check non-existent dirs fail
- BOOST_CHECK(!jdir::exists("/tmp/D"));
+ check_dir_not_existing("/tmp/D");
try
{
jdir::is_dir("/tmp/D");
@@ -115,7 +135,7 @@
void test_create_delete_dir_recursive()
{
// Use instances
- BOOST_CHECK(!jdir::exists("/tmp/E"));
+ check_dir_not_existing("/tmp/E");
jdir dir1("/tmp/E/F/G/H", "test_base");
dir1.create_dir();
BOOST_CHECK(jdir::is_dir("/tmp/E/F/G/H"));
@@ -125,7 +145,7 @@
jdir::delete_dir("/tmp/E"); // delete remaining dirs
BOOST_CHECK(!jdir::exists("/tmp/E"));
- BOOST_CHECK(!jdir::exists("/tmp/F"));
+ check_dir_not_existing("/tmp/F");
jdir dir2("/tmp/F/G/H/I/", "test_base"); // trailing '/'
dir2.create_dir();
BOOST_CHECK(jdir::is_dir("/tmp/F/G/H/I/"));
@@ -136,7 +156,7 @@
BOOST_CHECK(!jdir::exists("/tmp/F"));
// Use static fn
- BOOST_CHECK(!jdir::exists("/tmp/G"));
+ check_dir_not_existing("/tmp/G");
jdir::create_dir("/tmp/G/H/I/J");
BOOST_CHECK(jdir::is_dir("/tmp/G/H/I/J"));
jdir::delete_dir("/tmp/G/H/I/J");
@@ -145,7 +165,7 @@
jdir::delete_dir("/tmp/G");
BOOST_CHECK(!jdir::exists("/tmp/G"));
- BOOST_CHECK(!jdir::exists("/tmp/H"));
+ check_dir_not_existing("/tmp/H");
jdir::create_dir("/tmp/H/I/J/K/");
BOOST_CHECK(jdir::is_dir("/tmp/H/I/J/K/"));
jdir::delete_dir("/tmp/H/I/J/K/");
@@ -155,15 +175,15 @@
BOOST_CHECK(!jdir::exists("/tmp/H"));
// Non-empty dirs
- BOOST_CHECK(!jdir::exists("/tmp/I"));
+ check_dir_not_existing("/tmp/I");
jdir::create_dir("/tmp/I/J/K1/L1");
jdir::create_dir("/tmp/I/J/K1/L2");
jdir::create_dir("/tmp/I/J/K1/L3");
jdir::create_dir("/tmp/I/J/K1/L4");
- make_file("/tmp/I/J/K1/L4/test_file_1.txt"); // mode 644 (default)
- make_file("/tmp/I/J/K1/L4/test_file_2.txt", S_IRWXU | S_IRWXG | S_IRWXO); // mode 777
- make_file("/tmp/I/J/K1/L4/test_file_3.txt", S_IRUSR | S_IRGRP | S_IROTH); // mode 444
- make_file("/tmp/I/J/K1/L4/test_file_4.txt", 0); // mode 000 (no permissions)
+ create_file("/tmp/I/J/K1/L4/test_file_1.txt"); // mode 644 (default)
+ create_file("/tmp/I/J/K1/L4/test_file_2.txt", S_IRWXU | S_IRWXG | S_IRWXO); // mode 777
+ create_file("/tmp/I/J/K1/L4/test_file_3.txt", S_IRUSR | S_IRGRP | S_IROTH); // mode 444
+ create_file("/tmp/I/J/K1/L4/test_file_4.txt", 0); // mode 000 (no permissions)
jdir::create_dir("/tmp/I/J/K2");
jdir::create_dir("/tmp/I/J/K3/L5");
jdir::create_dir("/tmp/I/J/K3/L6");
@@ -178,64 +198,46 @@
BOOST_CHECK(!jdir::exists("/tmp/I"));
}
-void test_clear_dir()
+void test_clear_verify_dir()
{
// Use instances
- BOOST_CHECK(!jdir::exists("/tmp/test_dir_1"));
- jdir test_dir_1("/tmp/test_dir_1", "test_base");
+ const char* jrnl_dir = "/tmp/test_dir_1";
+ const char* bfn = "test_base";
+ check_dir_not_existing(jrnl_dir);
+ jdir test_dir_1(jrnl_dir, bfn);
test_dir_1.create_dir();
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_1"));
- // add some journal files
- make_jrnl_files("/tmp/test_dir_1");
- // clear dir
- test_dir_1.clear_dir();
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_1"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_1/_test_base.bak.0001"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_1/_test_base.bak.0001/test_base.jinf"));
- // add some more journal files
- make_jrnl_files("/tmp/test_dir_1");
- // clear dir
- test_dir_1.clear_dir();
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_1"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_1/_test_base.bak.0001"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_1/_test_base.bak.0001/test_base.jinf"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_1/_test_base.bak.0002"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_1/_test_base.bak.0002/test_base.jinf"));
- // add some more journal files
- make_jrnl_files("/tmp/test_dir_1");
- // clear dir
- test_dir_1.clear_dir();
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_1"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_1/_test_base.bak.0001"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_1/_test_base.bak.0001/test_base.jinf"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_1/_test_base.bak.0002"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_1/_test_base.bak.0002/test_base.jinf"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_1/_test_base.bak.0003"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_1/_test_base.bak.0003/test_base.jinf"));
+ BOOST_CHECK(jdir::is_dir(jrnl_dir));
+ // add journal files, check they exist, then clear them
+ unsigned cnt = 0;
+ while (cnt < NUM_CLEAR_OPS)
+ {
+ create_jrnl_fileset(jrnl_dir, bfn);
+ check_dir_contents(jrnl_dir, bfn, cnt, true);
+ test_dir_1.clear_dir();
+ check_dir_contents(jrnl_dir, bfn, ++cnt, false);
+ }
// clean up
test_dir_1.delete_dir();
- BOOST_CHECK(!jdir::exists("/tmp/test_dir_1"));
+ BOOST_CHECK(!jdir::exists(jrnl_dir));
- // non-existent dir with auto-create true
- BOOST_CHECK(!jdir::exists("/tmp/test_dir_2"));
- jdir test_dir_2("/tmp/test_dir_2", "test_base");
+ // Non-existent dir with auto-create true
+ jrnl_dir = "/tmp/test_dir_2";
+ check_dir_not_existing(jrnl_dir);
+ jdir test_dir_2(jrnl_dir, bfn);
// clear dir
test_dir_2.clear_dir(); // create flag is true by default
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_2"));
- // add some journal files
- make_jrnl_files("/tmp/test_dir_2");
- // clear dir
- test_dir_2.clear_dir();
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_2"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_2/_test_base.bak.0001"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_2/_test_base.bak.0001/test_base.jinf"));
+ check_dir_contents(jrnl_dir, bfn, 0, false);
+ // clear empty dir, should not create subdir
+ test_dir_2.clear_dir(); // create flag is true by default
+ check_dir_contents(jrnl_dir, bfn, 0, false);
// clean up
test_dir_2.delete_dir();
- BOOST_CHECK(!jdir::exists("/tmp/test_dir_2"));
+ BOOST_CHECK(!jdir::exists(jrnl_dir));
// non-existent dir with auto-create false
- BOOST_CHECK(!jdir::exists("/tmp/test_dir_3"));
- jdir test_dir_3("/tmp/test_dir_3", "test_base");
+ jrnl_dir = "/tmp/test_dir_3";
+ check_dir_not_existing(jrnl_dir);
+ jdir test_dir_3(jrnl_dir, bfn);
try
{
test_dir_3.clear_dir(false);
@@ -246,65 +248,144 @@
BOOST_CHECK_EQUAL(e.err_code(), jerrno::JERR_JDIR_OPENDIR);
}
- // static fn
- BOOST_CHECK(!jdir::exists("/tmp/test_dir_4"));
- jdir::create_dir("/tmp/test_dir_4");
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_4"));
- // add some journal files
- make_jrnl_files("/tmp/test_dir_4");
- // clear dir
- jdir::clear_dir("/tmp/test_dir_4", "test_base");
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_4"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_4/_test_base.bak.0001"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_4/_test_base.bak.0001/test_base.jinf"));
- // add some more journal files
- make_jrnl_files("/tmp/test_dir_4");
- // clear dir
- jdir::clear_dir("/tmp/test_dir_4", "test_base");
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_4"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_4/_test_base.bak.0001"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_4/_test_base.bak.0001/test_base.jinf"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_4/_test_base.bak.0002"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_4/_test_base.bak.0002/test_base.jinf"));
- // add some more journal files
- make_jrnl_files("/tmp/test_dir_4");
- // clear dir
- jdir::clear_dir("/tmp/test_dir_4", "test_base");
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_4"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_4/_test_base.bak.0001"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_4/_test_base.bak.0001/test_base.jinf"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_4/_test_base.bak.0002"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_4/_test_base.bak.0002/test_base.jinf"));
- BOOST_CHECK(jdir::is_dir("/tmp/test_dir_4/_test_base.bak.0003"));
- BOOST_CHECK(jdir::exists("/tmp/test_dir_4/_test_base.bak.0003/test_base.jinf"));
+ // Use static fn
+ jrnl_dir = "/tmp/test_dir_4";
+ check_dir_not_existing(jrnl_dir);
+ jdir::clear_dir(jrnl_dir, bfn); // should create dir if it does not exist
+ // add journal files, check they exist, then clear them
+ cnt = 0;
+ while (cnt < NUM_CLEAR_OPS)
+ {
+ create_jrnl_fileset(jrnl_dir, bfn);
+ check_dir_contents(jrnl_dir, bfn, cnt, true);
+ jdir::clear_dir(jrnl_dir, bfn);
+ check_dir_contents(jrnl_dir, bfn, ++cnt, false);
+ }
// clean up
- jdir::delete_dir("/tmp/test_dir_4");
- BOOST_CHECK(!jdir::exists("/tmp/test_dir_4"));
+ jdir::delete_dir(jrnl_dir);
+ BOOST_CHECK(!jdir::exists(jrnl_dir));
}
// Helper functions
-void make_file(const char* filename, mode_t fmode)
+void create_file(const char* filename, mode_t fmode)
{
- std::ofstream of(filename, std::ofstream::out | std::ofstream::trunc);
- if (!of.good())
- BOOST_FAIL("Unable to open file " << filename << " for writing.");
- of.write(filename, ::strlen(filename));
- of.close();
- ::chmod(filename, fmode);
+ std::ofstream of(filename, std::ofstream::out | std::ofstream::trunc);
+ if (!of.good())
+ BOOST_FAIL("Unable to open file " << filename << " for writing.");
+ of.write(filename, ::strlen(filename));
+ of.close();
+ ::chmod(filename, fmode);
}
-void make_jrnl_files(const char* dirname)
+void create_jrnl_fileset(const char* dirname, const char* base_filename)
{
- make_file(std::string(dirname).append("/test_base.0000.jdat").c_str());
- make_file(std::string(dirname).append("/test_base.0001.jdat").c_str());
- make_file(std::string(dirname).append("/test_base.0002.jdat").c_str());
- make_file(std::string(dirname).append("/test_base.0003.jdat").c_str());
- make_file(std::string(dirname).append("/test_base.0004.jdat").c_str());
- make_file(std::string(dirname).append("/test_base.jinf").c_str());
- make_file(std::string(dirname).append("/test_base.12345.abcdef.crazy-filename").c_str());
+ create_jinf_file(dirname, base_filename);
+ for (u_int32_t fid = 0; fid < NUM_JDAT_FILES; fid++)
+ {
+ u_int64_t rid = 0x12340000 + (fid * 0x25);
+ create_jdat_file(dirname, base_filename, fid, rid);
+ }
}
+void create_jdat_file(const char* dirname, const char* base_filename, u_int32_t fid,
+ u_int64_t first_rid)
+{
+ std::stringstream fn;
+ fn << dirname << "/" << base_filename << ".";
+ fn << std::setfill('0') << std::setw(4) << std::hex << fid << ".jdat";
+ file_hdr fh(RHM_JDAT_FILE_MAGIC, RHM_JDAT_VERSION, 0, first_rid, fid, 0x200, true);
+ std::ofstream of(fn.str().c_str(), std::ofstream::out | std::ofstream::trunc);
+ if (!of.good())
+ BOOST_FAIL("Unable to open journal data file " << fn << " for writing.");
+ of.write((const char*)&fh, sizeof(file_hdr));
+ of.close();
+}
+
+void create_jinf_file(const char* dirname, const char* base_filename)
+{
+ timespec ts;
+ ::clock_gettime(CLOCK_REALTIME, &ts);
+ jinf ji("test journal id", dirname, base_filename, ts);
+ ji.write();
+}
+
+void check_dir_contents(const char* dirname, const char* base_filename, unsigned num_subdirs,
+ bool jrnl_present)
+{
+ if (jdir::is_dir(dirname))
+ {
+ // Subdir count
+ BOOST_CHECK_EQUAL(count_dir_contents(dirname, false, true), num_subdirs);
+
+ // Journal file count
+ unsigned num_jrnl_files = jrnl_present ? NUM_JDAT_FILES + 1 : 0;
+ BOOST_CHECK_EQUAL(count_dir_contents(dirname, true, false), num_jrnl_files);
+
+ // Check journal files are present
+ if (jrnl_present)
+ try { jdir::verify_dir(dirname, base_filename); }
+ catch(const jexception& e) { BOOST_ERROR(e); }
+ for (unsigned subdir_num = 1; subdir_num <= num_subdirs; subdir_num++)
+ {
+ std::stringstream subdir_name;
+ subdir_name << dirname << "/_" << base_filename << ".bak.";
+ subdir_name << std::hex << std::setfill('0') << std::setw(4) << subdir_num;
+ try { jdir::verify_dir(subdir_name.str().c_str(), base_filename); }
+ catch(const jexception& e) { BOOST_ERROR(e); }
+ }
+ }
+ else
+ BOOST_ERROR(dirname << " is not a directory");
+}
+
+unsigned count_dir_contents(const char* dirname, bool incl_files, bool incl_dirs)
+{
+ struct dirent* entry;
+ struct stat s;
+ unsigned file_cnt = 0;
+ unsigned dir_cnt = 0;
+ unsigned other_cnt = 0;
+ DIR* dir = ::opendir(dirname);
+ if (!dir)
+ BOOST_FAIL("Unable to open directory " << dirname);
+ while ((entry = ::readdir(dir)) != NULL)
+ {
+ // Ignore . and ..
+ if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0)
+ {
+ std::stringstream fn;
+ fn << dirname << "/" << entry->d_name;
+ if (::stat(fn.str().c_str(), &s))
+ BOOST_FAIL("Unable to stat dir entry " << entry->d_name << "; err=" <<
+ strerror(errno));
+ if (S_ISREG(s.st_mode))
+ file_cnt++;
+ else if (S_ISDIR(s.st_mode))
+ dir_cnt++;
+ else
+ other_cnt++;
+ }
+ }
+ if (incl_files)
+ {
+ if (incl_dirs)
+ return file_cnt + dir_cnt;
+ return file_cnt;
+ }
+ else if (incl_dirs)
+ return dir_cnt;
+ return other_cnt;
+}
+
+void check_dir_not_existing(const char* dirname)
+{
+ if (jdir::exists(dirname) && jdir::is_dir(dirname))
+ jdir::delete_dir(dirname);
+ if (jdir::exists(dirname))
+ BOOST_FAIL("Unable to remove directory " << dirname);
+}
+
// Initialize test suite and include test functions
test_suite* init_unit_test_suite(int, char**)
@@ -318,7 +399,7 @@
ts->add(BOOST_TEST_CASE(&test_constructor));
ts->add(BOOST_TEST_CASE(&test_create_delete_dir));
ts->add(BOOST_TEST_CASE(&test_create_delete_dir_recursive));
- ts->add(BOOST_TEST_CASE(&test_clear_dir));
+ ts->add(BOOST_TEST_CASE(&test_clear_verify_dir));
return ts;
}
17 years, 1 month