rhmessaging commits: r2920 - mgmt/trunk/mint/python/mint.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2008-12-03 16:55:29 -0500 (Wed, 03 Dec 2008)
New Revision: 2920
Modified:
mgmt/trunk/mint/python/mint/__init__.py
Log:
Fix references
Modified: mgmt/trunk/mint/python/mint/__init__.py
===================================================================
--- mgmt/trunk/mint/python/mint/__init__.py 2008-12-03 21:52:56 UTC (rev 2919)
+++ mgmt/trunk/mint/python/mint/__init__.py 2008-12-03 21:55:29 UTC (rev 2920)
@@ -518,9 +518,9 @@
regUrls.add(reg.url)
if reg.url not in self.model.mintBrokersByUrl:
- self.model.addBroker(broker.url)
+ self.model.addBroker(reg.url)
- for mbroker in self.model.mintBrokersByQmfObject.values():
+ for mbroker in self.model.mintBrokersByQmfBroker.values():
if mbroker.url not in regUrls:
self.model.delBroker(mbroker)
17 years, 4 months
rhmessaging commits: r2919 - mgmt/trunk/mint/python/mint.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2008-12-03 16:52:56 -0500 (Wed, 03 Dec 2008)
New Revision: 2919
Modified:
mgmt/trunk/mint/python/mint/__init__.py
Log:
Fix dumb mistake
Modified: mgmt/trunk/mint/python/mint/__init__.py
===================================================================
--- mgmt/trunk/mint/python/mint/__init__.py 2008-12-03 21:50:53 UTC (rev 2918)
+++ mgmt/trunk/mint/python/mint/__init__.py 2008-12-03 21:52:56 UTC (rev 2919)
@@ -520,8 +520,6 @@
if reg.url not in self.model.mintBrokersByUrl:
self.model.addBroker(broker.url)
- for reg in regs:
-
for mbroker in self.model.mintBrokersByQmfObject.values():
if mbroker.url not in regUrls:
self.model.delBroker(mbroker)
17 years, 4 months
rhmessaging commits: r2918 - in mgmt/trunk: mint/python/mint and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2008-12-03 16:50:53 -0500 (Wed, 03 Dec 2008)
New Revision: 2918
Added:
mgmt/trunk/mint/python/mint/util.py
Modified:
mgmt/trunk/cumin/python/cumin/__init__.py
mgmt/trunk/cumin/python/cumin/model.py
mgmt/trunk/mint/python/mint/__init__.py
Log:
Use a poller thread to keep registrations and qmf brokers in sync,
connectionwise.
Modified: mgmt/trunk/cumin/python/cumin/__init__.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/__init__.py 2008-12-03 17:24:54 UTC (rev 2917)
+++ mgmt/trunk/cumin/python/cumin/__init__.py 2008-12-03 21:50:53 UTC (rev 2918)
@@ -85,9 +85,6 @@
def start(self):
self.model.start()
- for reg in BrokerRegistration.select():
- reg.connect(self.model.data)
-
def stop(self):
self.model.stop()
Modified: mgmt/trunk/cumin/python/cumin/model.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/model.py 2008-12-03 17:24:54 UTC (rev 2917)
+++ mgmt/trunk/cumin/python/cumin/model.py 2008-12-03 21:50:53 UTC (rev 2918)
@@ -1723,8 +1723,6 @@
try:
object = self.cumin_class.mint_class(**args)
- object.connect(self.model.data)
-
completion("OK")
return object
Modified: mgmt/trunk/mint/python/mint/__init__.py
===================================================================
--- mgmt/trunk/mint/python/mint/__init__.py 2008-12-03 17:24:54 UTC (rev 2917)
+++ mgmt/trunk/mint/python/mint/__init__.py 2008-12-03 21:50:53 UTC (rev 2918)
@@ -14,6 +14,8 @@
from mint.cache import MintCache
from mint.dbexpire import DBExpireThread
from qmf.console import ClassKey
+from util import *
+from time import sleep
log = logging.getLogger("mint")
@@ -216,7 +218,6 @@
name = StringCol(length=1000, default=None, unique=True, notNone=True)
url = StringCol(length=1000, default=None)
- mintBroker = None
broker = ForeignKey("Broker", cascade="null", default=None)
groups = SQLRelatedJoin("BrokerGroup",
intermediateTable="broker_group_mapping",
@@ -226,24 +227,6 @@
url_unique = DatabaseIndex(url, unique=True)
- def connect(self, model):
- log.info("Connecting to broker '%s' at %s" % (self.name, self.url))
- try:
- self.mintBroker = model.addBroker(self.url)
- log.info("Connection succeeded")
- except Exception, e:
- log.info("Connection failed: %s ", e.message)
- print_exc()
-
- def disconnect(self, model):
- log.info("Disconnecting from broker '%s' at %s" % (self.name, self.url))
- try:
- model.delBroker(self.mintBroker)
- log.info("Disconnection succeeded")
- except Exception, e:
- log.info("Disconnection failed: %s ", e.message)
- print_exc()
-
def getBrokerId(self):
return self.mintBroker.qmfId
@@ -254,12 +237,6 @@
except IndexError:
return None
- def destroySelf(self):
- if MintModel.staticInstance:
- self.disconnect(MintModel.staticInstance)
-
- super(BrokerRegistration, self).destroySelf()
-
class BrokerGroup(SQLObject):
class sqlmeta:
lazyUpdate = True
@@ -318,6 +295,8 @@
def __init__(self, dataUri, dbExpireFrequency, dbExpireThreshold, debug=False):
self.dataUri = dataUri
self.debug = debug
+ self.updateObjects = True
+ self.pollRegistrations = False # XXX
assert MintModel.staticInstance is None
MintModel.staticInstance = self
@@ -330,11 +309,15 @@
self.mintBrokersByUrl = dict()
self.__lock = RLock()
+
self.dbConn = None
+ self.mgmtSession = qmf.console.Session(self, manageConnections=True)
- self.dbExpireThread = dbexpire.DBExpireThread(self, frequency=dbExpireFrequency, threshold=dbExpireThreshold)
self.updateThread = update.ModelUpdateThread(self)
- self.mgmtSession = qmf.console.Session(self, manageConnections=True)
+ self.dbExpireThread = dbexpire.DBExpireThread \
+ (self, frequency=dbExpireFrequency, threshold=dbExpireThreshold)
+ self.registrationThread = RegistrationThread(self)
+
self.outstandingMethodCalls = dict()
if self.debug:
@@ -360,10 +343,12 @@
def start(self):
self.updateThread.start()
self.dbExpireThread.start()
+ self.registrationThread.start()
def stop(self):
self.updateThread.stop()
self.dbExpireThread.stop()
+ self.registrationThread.stop()
def callMethod(self, brokerId, objId, classKey, methodName, callback, args):
self.lock()
@@ -384,6 +369,8 @@
return seq
def addBroker(self, url):
+ log.info("Adding qmf broker at %s", url)
+
self.lock()
try:
qbroker = self.mgmtSession.addBroker(url)
@@ -402,6 +389,8 @@
def delBroker(self, mbroker):
assert isinstance(mbroker, MintBroker)
+ log.info("Removing qmf broker at %s", mbroker.url)
+
self.lock()
try:
self.mgmtSession.delBroker(mbroker.qmfBroker)
@@ -506,3 +495,35 @@
up = update.MethodUpdate(self, mbroker, seq, response)
self.updateThread.enqueue(up)
+
+class RegistrationThread(MintDaemonThread):
+ def run(self):
+ log.info("Polling for registration changes every 5 seconds")
+
+ while True:
+ try:
+ if self.stopRequested:
+ break
+
+ self.do_run()
+ except Exception, e:
+ log.exception(e)
+
+ def do_run(self):
+ log.info("Polling")
+
+ regUrls = set()
+
+ for reg in BrokerRegistration.select():
+ regUrls.add(reg.url)
+
+ if reg.url not in self.model.mintBrokersByUrl:
+ self.model.addBroker(broker.url)
+
+ for reg in regs:
+
+ for mbroker in self.model.mintBrokersByQmfObject.values():
+ if mbroker.url not in regUrls:
+ self.model.delBroker(mbroker)
+
+ sleep(5)
Added: mgmt/trunk/mint/python/mint/util.py
===================================================================
--- mgmt/trunk/mint/python/mint/util.py (rev 0)
+++ mgmt/trunk/mint/python/mint/util.py 2008-12-03 21:50:53 UTC (rev 2918)
@@ -0,0 +1,19 @@
+import sys, os, logging
+
+from threading import Thread
+
+log = logging.getLogger("mint")
+
+class MintDaemonThread(Thread):
+ def __init__(self, model):
+ super(MintDaemonThread, self).__init__()
+
+ self.model = model
+
+ self.stopRequested = False
+
+ self.setDaemon(True)
+
+ def stop(self):
+ assert self.stopRequested is False
+ self.stopRequested = True
17 years, 4 months
rhmessaging commits: r2917 - mgmt/trunk/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2008-12-03 12:24:54 -0500 (Wed, 03 Dec 2008)
New Revision: 2917
Modified:
mgmt/trunk/cumin/python/cumin/system.py
Log:
Allow for more than one daemon of each type in the system sevices set.
Modified: mgmt/trunk/cumin/python/cumin/system.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/system.py 2008-12-03 17:17:01 UTC (rev 2916)
+++ mgmt/trunk/cumin/python/cumin/system.py 2008-12-03 17:24:54 UTC (rev 2917)
@@ -163,11 +163,12 @@
sql = "system = '%s'" % system.nodeName
for daemon in daemon_types:
- system_daemon = daemon.select(sql)
- try:
- daemons.append(system_daemon[0])
- except Exception, e:
- pass
+ system_daemons = daemon.select(sql)
+ for devil in system_daemons:
+ try:
+ daemons.append(devil)
+ except Exception, e:
+ pass
brokers = Broker.select("system_id = '%i'" % system.id)
for broker in brokers:
17 years, 4 months
rhmessaging commits: r2916 - mgmt/trunk/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2008-12-03 12:17:01 -0500 (Wed, 03 Dec 2008)
New Revision: 2916
Modified:
mgmt/trunk/cumin/python/cumin/model.py
mgmt/trunk/cumin/python/cumin/parameters.py
mgmt/trunk/cumin/python/cumin/system.py
mgmt/trunk/cumin/python/cumin/system.strings
Log:
Switching from System to Sysimage
Modified: mgmt/trunk/cumin/python/cumin/model.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/model.py 2008-12-03 16:29:12 UTC (rev 2915)
+++ mgmt/trunk/cumin/python/cumin/model.py 2008-12-03 17:17:01 UTC (rev 2916)
@@ -617,9 +617,9 @@
class CuminSystem(RemoteClass):
def __init__(self, model):
- super(CuminSystem, self).__init__(model, "system", System, SystemStats)
+ super(CuminSystem, self).__init__(model, "system", Sysimage, SysimageStats)
- prop = self.SystemIdProperty(self, "systemId")
+ prop = CuminProperty(self, "uuid")
prop.title = "System ID"
prop = CuminProperty(self, "nodeName")
@@ -670,12 +670,6 @@
def render_sql_orderby(self, session, *args):
return "order by machine, name asc"
- class SystemIdProperty(CuminProperty):
- def value(self, session, object):
- val = super(CuminSystem.SystemIdProperty, self).value(session, object)
-
- return val
-
class CuminMaster(RemoteClass):
def __init__(self, model):
super(CuminMaster, self).__init__(model, "master", Master, MasterStats)
Modified: mgmt/trunk/cumin/python/cumin/parameters.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/parameters.py 2008-12-03 16:29:12 UTC (rev 2915)
+++ mgmt/trunk/cumin/python/cumin/parameters.py 2008-12-03 17:17:01 UTC (rev 2916)
@@ -172,7 +172,7 @@
class SystemParameter(Parameter):
def do_unmarshal(self, string):
- return System.get(int(string))
+ return Sysimage.get(int(string))
def do_marshal(self, session):
return str(session.id)
Modified: mgmt/trunk/cumin/python/cumin/system.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/system.py 2008-12-03 16:29:12 UTC (rev 2915)
+++ mgmt/trunk/cumin/python/cumin/system.py 2008-12-03 17:17:01 UTC (rev 2916)
@@ -22,9 +22,8 @@
self.set_default_column(col)
def render_title(self, session, *args):
- count = System.select().count()
- return "Systems %s" % fmt_count(count)
-
+ return "Systems %s" % fmt_count(self.get_item_count(session, *args))
+
class NameColumn(SqlTableColumn):
def render_title(self, session, data):
return "Name"
Modified: mgmt/trunk/cumin/python/cumin/system.strings
===================================================================
--- mgmt/trunk/cumin/python/cumin/system.strings 2008-12-03 16:29:12 UTC (rev 2915)
+++ mgmt/trunk/cumin/python/cumin/system.strings 2008-12-03 17:17:01 UTC (rev 2916)
@@ -1,11 +1,11 @@
[SystemSet.sql]
select s.id, s.node_name as name
-from system as s
+from sysimage as s
{sql_orderby}
{sql_limit}
[SystemSet.count_sql]
-select count(*) from system
+select count(*) from sysimage
[SystemSet.html]
<form id="{id}" method="post" action="?">
17 years, 4 months
rhmessaging commits: r2915 - mgmt/trunk/mint/python/mint.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2008-12-03 11:29:12 -0500 (Wed, 03 Dec 2008)
New Revision: 2915
Modified:
mgmt/trunk/mint/python/mint/__init__.py
mgmt/trunk/mint/python/mint/tools.py
mgmt/trunk/mint/python/mint/update.py
Log:
Remove getSession on MintModel and instead implement addBroker and
delBroker directly there.
This was done in order to get some more accounting for qmf broker
conns, by id and url. These are needed to implement a
registration-based reconnect.
Modified: mgmt/trunk/mint/python/mint/__init__.py
===================================================================
--- mgmt/trunk/mint/python/mint/__init__.py 2008-12-03 16:18:20 UTC (rev 2914)
+++ mgmt/trunk/mint/python/mint/__init__.py 2008-12-03 16:29:12 UTC (rev 2915)
@@ -216,7 +216,7 @@
name = StringCol(length=1000, default=None, unique=True, notNone=True)
url = StringCol(length=1000, default=None)
- qmfBroker = None
+ mintBroker = None
broker = ForeignKey("Broker", cascade="null", default=None)
groups = SQLRelatedJoin("BrokerGroup",
intermediateTable="broker_group_mapping",
@@ -229,7 +229,7 @@
def connect(self, model):
log.info("Connecting to broker '%s' at %s" % (self.name, self.url))
try:
- self.qmfBroker = model.getSession().addBroker(self.url)
+ self.mintBroker = model.addBroker(self.url)
log.info("Connection succeeded")
except Exception, e:
log.info("Connection failed: %s ", e.message)
@@ -238,17 +238,14 @@
def disconnect(self, model):
log.info("Disconnecting from broker '%s' at %s" % (self.name, self.url))
try:
- model.getSession().delBroker(self.qmfBroker)
+ model.delBroker(self.mintBroker)
log.info("Disconnection succeeded")
except Exception, e:
log.info("Disconnection failed: %s ", e.message)
print_exc()
def getBrokerId(self):
- if self.qmfBroker is not None:
- return str(self.qmfBroker.getBrokerId())
- else:
- return None
+ return self.mintBroker.qmfId
def getDefaultVhost(self):
if self.broker:
@@ -297,14 +294,18 @@
properties = SQLMultipleJoin("ConfigProperty", joinColumn="profile_id")
class MintBroker(object):
- def __init__(self, qmfBroker):
+ def __init__(self, url, qmfBroker):
+ self.url = url
self.qmfBroker = qmfBroker
- self.qmfId = str(self.qmfBroker.getBrokerId())
+ self.qmfId = None
self.databaseId = None
+
self.objectDatabaseIds = MintCache() # database ids by qmf object id
self.orphans = dict() # updates by qmf object id
+ self.connected = False
+
def getAmqpSession(self):
return self.qmfBroker.getAmqpSession()
@@ -321,8 +322,13 @@
assert MintModel.staticInstance is None
MintModel.staticInstance = self
- self.mintBrokers = dict() # MintBrokers by qmfId
+ # Lookup tables used for recovering MintBroker objects, which have
+ # mint-specific accounting and wrap a qmf broker object
+ self.mintBrokersByQmfBroker = dict()
+ self.mintBrokersById = dict()
+ self.mintBrokersByUrl = dict()
+
self.__lock = RLock()
self.dbConn = None
@@ -359,13 +365,10 @@
self.updateThread.stop()
self.dbExpireThread.stop()
- def getSession(self):
- return self.mgmtSession
-
def callMethod(self, brokerId, objId, classKey, methodName, callback, args):
self.lock()
try:
- broker = self.mintBrokers[brokerId]
+ broker = self.mintBrokersById[brokerId]
finally:
self.unlock()
@@ -380,23 +383,77 @@
self.unlock()
return seq
- def brokerConnected(self, broker):
+ def addBroker(self, url):
+ self.lock()
+ try:
+ qbroker = self.mgmtSession.addBroker(url)
+ mbroker = MintBroker(url, qbroker)
+
+ # Can't add the by-id mapping here, as the id is not set yet;
+ # instead we add it when brokerConnected is called
+
+ self.mintBrokersByQmfBroker[qbroker] = mbroker
+ self.mintBrokersByUrl[url] = mbroker
+
+ return mbroker
+ finally:
+ self.unlock()
+
+ def delBroker(self, mbroker):
+ assert isinstance(mbroker, MintBroker)
+
+ self.lock()
+ try:
+ self.mgmtSession.delBroker(mbroker.qmfBroker)
+
+ del self.mintBrokersByQmfBroker[mbroker.qmfBroker]
+ del self.mintBrokersById[mbroker.qmfId]
+ del self.mintBrokersByUrl[mbroker.url]
+ finally:
+ self.unlock()
+
+ def brokerConnected(self, qbroker):
""" Invoked when a connection is established to a broker """
self.lock()
try:
- mbroker = MintBroker(broker)
- self.mintBrokers[mbroker.qmfId] = mbroker
+ mbroker = self.mintBrokersByQmfBroker[qbroker]
+
+ assert mbroker.connected is False
+
+ mbroker.connected = True
finally:
self.unlock()
- def brokerDisconnected(self, broker):
+ def brokerInfo(self, qbroker):
+ self.lock()
+ try:
+ id = str(qbroker.getBrokerId())
+ mbroker = self.mintBrokersByQmfBroker[qbroker]
+
+ mbroker.qmfId = id
+ self.mintBrokersById[id] = mbroker
+ finally:
+ self.unlock()
+
+ def brokerDisconnected(self, qbroker):
""" Invoked when the connection to a broker is lost """
self.lock()
try:
- del self.mintBrokers[str(broker.getBrokerId())]
+ mbroker = self.mintBrokersByQmfBroker[qbroker]
+
+ assert mbroker.connected is True
+
+ mbroker.connected = False
finally:
self.unlock()
+ def getMintBrokerByQmfBroker(self, qbroker):
+ self.lock()
+ try:
+ return self.mintBrokersByQmfBroker[qbroker]
+ finally:
+ self.unlock()
+
def newPackage(self, name):
""" Invoked when a QMF package is discovered. """
pass
@@ -417,7 +474,8 @@
def objectProps(self, broker, record):
""" Invoked when an object is updated. """
- mbroker = self.mintBrokers[str(broker.getBrokerId())]
+ mbroker = self.getMintBrokerByQmfBroker(broker)
+
up = update.PropertyUpdate(self, mbroker, record)
if record.getClassKey().getClassName() == "job":
@@ -428,7 +486,7 @@
def objectStats(self, broker, record):
""" Invoked when an object is updated. """
- mbroker = self.mintBrokers[str(broker.getBrokerId())]
+ mbroker = self.getMintBrokerByQmfBroker(broker)
up = update.StatisticUpdate(self, mbroker, record)
if record.getClassKey().getClassName() == "job":
@@ -443,17 +501,8 @@
def heartbeat(self, agent, timestamp):
pass
- def brokerInfo(self, broker):
- # XXX why do we do this?
- self.lock()
- try:
- mbroker = MintBroker(broker)
- self.mintBrokers[mbroker.qmfId] = mbroker
- finally:
- self.unlock()
-
def methodResponse(self, broker, seq, response):
- mbroker = self.mintBrokers[str(broker.getBrokerId())]
+ mbroker = self.getMintBrokerByQmfBroker(broker)
up = update.MethodUpdate(self, mbroker, seq, response)
self.updateThread.enqueue(up)
Modified: mgmt/trunk/mint/python/mint/tools.py
===================================================================
--- mgmt/trunk/mint/python/mint/tools.py 2008-12-03 16:18:20 UTC (rev 2914)
+++ mgmt/trunk/mint/python/mint/tools.py 2008-12-03 16:29:12 UTC (rev 2915)
@@ -89,7 +89,7 @@
try:
for arg in args[1:]:
- model.getSession().addBroker(arg)
+ model.addBroker(arg)
enq_last = 0
deq_last = 0
Modified: mgmt/trunk/mint/python/mint/update.py
===================================================================
--- mgmt/trunk/mint/python/mint/update.py 2008-12-03 16:18:20 UTC (rev 2914)
+++ mgmt/trunk/mint/python/mint/update.py 2008-12-03 16:29:12 UTC (rev 2915)
@@ -69,19 +69,19 @@
conn.commit()
- for broker in self.model.mintBrokers.values():
+ for broker in self.model.mintBrokersByQmfBroker.values():
broker.objectDatabaseIds.commit()
profile.commitTime += clock() - start
else:
conn.commit()
- for broker in self.model.mintBrokers.values():
+ for broker in self.model.mintBrokersByQmfBroker.values():
broker.objectDatabaseIds.commit()
except:
conn.rollback()
- for broker in self.model.mintBrokers.values():
+ for broker in self.model.mintBrokersByQmfBroker.values():
broker.objectDatabaseIds.rollback()
log.exception("Update failed")
@@ -203,7 +203,7 @@
attrs["qmfScopeId"] = oid.first
attrs["qmfObjectId"] = oid.second
attrs["qmfClassKey"] = str(self.object.getClassKey())
- attrs["qmfBrokerId"] = self.broker.qmfId
+ attrs["qmfBrokerId"] = str(self.broker.qmfBroker.getBrokerId())
cursor = conn.cursor()
@@ -272,26 +272,24 @@
pass
def processBroker(self, cursor, id):
- try:
- broker = self.model.mintBrokers[self.broker.qmfId]
- except KeyError:
- # XXX what does this mean?
- return
-
- if broker.databaseId is None:
+ if self.broker.databaseId is None:
op = SqlGetBrokerRegistration()
- op.execute(cursor, {"url": self.broker.getFullUrl()})
+ op.execute(cursor, {"url": self.broker.url})
rec = cursor.fetchone()
+ #print op.text, {"url": self.broker.url}
+
if rec:
rid = rec[0]
op = SqlAttachBroker()
op.execute(cursor, {"id": id, "registrationId": rid})
- broker.databaseId = id
+ #print op.text, {"id": id, "registrationId": rid}
+ self.broker.databaseId = id
+
class StatisticUpdate(ModelUpdate):
def process(self, conn):
try:
17 years, 4 months
rhmessaging commits: r2914 - mgmt/trunk/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2008-12-03 11:18:20 -0500 (Wed, 03 Dec 2008)
New Revision: 2914
Modified:
mgmt/trunk/cumin/python/cumin/pool.py
mgmt/trunk/cumin/python/cumin/slot.py
mgmt/trunk/cumin/python/cumin/slot.strings
Log:
Temp replacement of Slot status stub. This one is static.
Modified: mgmt/trunk/cumin/python/cumin/pool.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/pool.py 2008-12-03 15:32:36 UTC (rev 2913)
+++ mgmt/trunk/cumin/python/cumin/pool.py 2008-12-03 16:18:20 UTC (rev 2914)
@@ -17,7 +17,7 @@
from collector import CollectorSet, CollectorFrame, CollectorStart, CollectorStop
from negotiator import NegotiatorSet, NegotiatorFrame, NegStart, NegStop
from limits import LimitsSet, LimitsFrame
-from slot import SlotSet, MachineSet
+from slot import SlotSet, MachineSet, SlotStatSet
strings = StringCatalog(__file__)
log = logging.getLogger("cumin.pool")
@@ -455,8 +455,29 @@
return "Percentage"
class PoolStatus(CuminStatus):
- def render_status(self, session, pool):
- return "Active"
+ def __init__(self, app, name):
+ super(PoolStatus, self).__init__(app, name)
+
+ self.slotset = self.StatusPoolSlotSet(app, "status")
def render_title(self, session, pool):
return "Pool Status"
+
+ def render_status(self, session, pool):
+ cursor = self.slotset.get_items(session, pool)
+ slot_list = self.slotset.cursor_to_rows(cursor)
+ # should be only one record
+ data = slot_list[0]
+ idle = data["idle"]
+ total = data["total"]
+ return "%i of %i slots active" % (total - idle, total)
+
+ class StatusPoolSlotSet(SlotStatSet):
+ def render_sql_where(self, session, pool):
+ elems = list()
+ elems.append("s.pool = %(pool)s")
+ return "where %s" % " and ".join(elems)
+
+ def get_sql_values(self, session, pool):
+ values = {"pool": pool.id}
+ return values
Modified: mgmt/trunk/cumin/python/cumin/slot.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/slot.py 2008-12-03 15:32:36 UTC (rev 2913)
+++ mgmt/trunk/cumin/python/cumin/slot.py 2008-12-03 16:18:20 UTC (rev 2914)
@@ -25,3 +25,5 @@
class MachineSet(CuminTable):
pass
+class SlotStatSet(CuminTable):
+ pass
\ No newline at end of file
Modified: mgmt/trunk/cumin/python/cumin/slot.strings
===================================================================
--- mgmt/trunk/cumin/python/cumin/slot.strings 2008-12-03 15:32:36 UTC (rev 2913)
+++ mgmt/trunk/cumin/python/cumin/slot.strings 2008-12-03 16:18:20 UTC (rev 2914)
@@ -20,6 +20,22 @@
from (select distinct name, machine, system, pool, qmf_delete_time from slot as s {sql_where}) as s
+[SlotStatSet.sql]
+select
+ sum(case activity when 'Idle' then 1 else 0 end) / coll_count as idle,
+ sum(1) / coll_count as total
+from (select
+ machine, s.pool, c.activity, count(coll.id) as coll_count
+ from slot as s
+ left outer join slot_stats as c on c.id = s.stats_curr_id
+ left outer join collector as coll on coll.pool = s.pool
+ group by machine, s.pool, c.activity, s.stats_curr_id) as s
+{sql_where}
+group by coll_count
+
+[SlotStatSet.count_sql]
+1
+
[MachineSet.sql]
select
machine as id,
17 years, 4 months
rhmessaging commits: r2913 - in store/trunk/cpp: lib/jrnl and 2 other directories.
by rhmessaging-commits@lists.jboss.org
Author: kpvdr
Date: 2008-12-03 10:32:36 -0500 (Wed, 03 Dec 2008)
New Revision: 2913
Modified:
store/trunk/cpp/lib/JournalImpl.h
store/trunk/cpp/lib/jrnl/aio_callback.hpp
store/trunk/cpp/tests/jrnl/_st_helper_fns.hpp
store/trunk/cpp/tests/jrnl/jtt/jrnl_instance.hpp
Log:
corrected some virtualization issues
Modified: store/trunk/cpp/lib/JournalImpl.h
===================================================================
--- store/trunk/cpp/lib/JournalImpl.h 2008-12-03 00:10:28 UTC (rev 2912)
+++ store/trunk/cpp/lib/JournalImpl.h 2008-12-03 15:32:36 UTC (rev 2913)
@@ -25,7 +25,6 @@
#define _JournalImpl_
#include <set>
-#include "jrnl/data_tok.hpp"
#include "jrnl/jcntl.hpp"
#include "jrnl/slock.hpp"
#include "DataTokenImpl.h"
@@ -69,7 +68,7 @@
inline void cancel() { mrg::journal::slock s(&_gefe_mutex); parent = 0; }
};
- class JournalImpl : public qpid::broker::ExternalQueueStore, public journal::jcntl, public journal::aio_callback
+ class JournalImpl : public qpid::broker::ExternalQueueStore, public journal::jcntl, public virtual journal::aio_callback
{
private:
static qpid::broker::Timer* journalTimerPtr;
@@ -188,8 +187,8 @@
void flushFire();
// AIO callbacks
- void wr_aio_cb(std::vector<journal::data_tok*>& dtokl);
- void rd_aio_cb(std::vector<u_int16_t>& pil);
+ virtual void wr_aio_cb(std::vector<journal::data_tok*>& dtokl);
+ virtual void rd_aio_cb(std::vector<u_int16_t>& pil);
qpid::management::ManagementObject* GetManagementObject (void) const
{ return _mgmtObject; }
@@ -229,7 +228,7 @@
JournalImpl(journalId, journalDirectory, journalBaseFilename, getEventsTimeout, flushTimeout)
{}
- virtual ~TplJournalImpl() {}
+ ~TplJournalImpl() {}
// Special version of read_data_record that ignores transactions - needed when reading the TPL
inline journal::iores read_data_record(void** const datapp, std::size_t& dsize,
Modified: store/trunk/cpp/lib/jrnl/aio_callback.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/aio_callback.hpp 2008-12-03 00:10:28 UTC (rev 2912)
+++ store/trunk/cpp/lib/jrnl/aio_callback.hpp 2008-12-03 15:32:36 UTC (rev 2913)
@@ -44,9 +44,9 @@
class aio_callback
{
public:
+ virtual ~aio_callback() {}
virtual void wr_aio_cb(std::vector<data_tok*>& dtokl) = 0;
virtual void rd_aio_cb(std::vector<u_int16_t>& pil) = 0;
- virtual ~aio_callback() {};
};
} // namespace journal
Modified: store/trunk/cpp/tests/jrnl/_st_helper_fns.hpp
===================================================================
--- store/trunk/cpp/tests/jrnl/_st_helper_fns.hpp 2008-12-03 00:10:28 UTC (rev 2912)
+++ store/trunk/cpp/tests/jrnl/_st_helper_fns.hpp 2008-12-03 15:32:36 UTC (rev 2913)
@@ -61,7 +61,7 @@
bool done() { if (flag || _wstate == NONE) return true; else { flag = true; return false; } }
};
-class test_jrnl : public jcntl, public aio_callback
+class test_jrnl : public jcntl, public virtual aio_callback
{
public:
test_jrnl(const std::string& jid, const std::string& jdir, const std::string& base_filename) :
@@ -78,7 +78,7 @@
vector<string>* txn_list, u_int64_t& highest_rid)
{ jcntl::recover(num_jfiles, ae, ae_max_jfiles, jfsize_sblks, JRNL_WMGR_DEF_PAGES, JRNL_WMGR_DEF_PAGE_SIZE, this,
txn_list, highest_rid); }
- void wr_aio_cb(std::vector<data_tok*>& dtokl)
+ virtual void wr_aio_cb(std::vector<data_tok*>& dtokl)
{
for (std::vector<data_tok*>::const_iterator i=dtokl.begin(); i!=dtokl.end(); i++)
{
@@ -87,7 +87,7 @@
delete dtp;
}
}
- void rd_aio_cb(std::vector<u_int16_t>& /*pil*/) {}
+ virtual void rd_aio_cb(std::vector<u_int16_t>& /*pil*/) {}
};
/*
Modified: store/trunk/cpp/tests/jrnl/jtt/jrnl_instance.hpp
===================================================================
--- store/trunk/cpp/tests/jrnl/jtt/jrnl_instance.hpp 2008-12-03 00:10:28 UTC (rev 2912)
+++ store/trunk/cpp/tests/jrnl/jtt/jrnl_instance.hpp 2008-12-03 15:32:36 UTC (rev 2913)
@@ -41,7 +41,7 @@
namespace jtt
{
- class jrnl_instance : public mrg::journal::jcntl, public mrg::journal::aio_callback
+ class jrnl_instance : public mrg::journal::jcntl, public virtual mrg::journal::aio_callback
{
public:
typedef boost::shared_ptr<jrnl_instance> shared_ptr;
@@ -88,8 +88,8 @@
void tc_wait_compl() throw ();
// AIO callbacks
- void wr_aio_cb(std::vector<journal::data_tok*>& dtokl);
- void rd_aio_cb(std::vector<u_int16_t>& pil);
+ virtual void wr_aio_cb(std::vector<journal::data_tok*>& dtokl);
+ virtual void rd_aio_cb(std::vector<u_int16_t>& pil);
private:
void run_enq() throw ();
17 years, 4 months
rhmessaging commits: r2912 - in mgmt/trunk/mint: python/mint and 1 other directories.
by rhmessaging-commits@lists.jboss.org
Author: nunofsantos
Date: 2008-12-02 19:10:28 -0500 (Tue, 02 Dec 2008)
New Revision: 2912
Added:
mgmt/trunk/mint/sql/triggers.sql
Modified:
mgmt/trunk/mint/Makefile
mgmt/trunk/mint/python/mint/Makefile
mgmt/trunk/mint/python/mint/__init__.py
mgmt/trunk/mint/python/mint/schemaparser.py
mgmt/trunk/mint/sql/Makefile
Log:
automatically generate SQL triggers to update foreign keys to statistics tables on parent tables, upon insertion of statistics records
Modified: mgmt/trunk/mint/Makefile
===================================================================
--- mgmt/trunk/mint/Makefile 2008-12-02 22:58:08 UTC (rev 2911)
+++ mgmt/trunk/mint/Makefile 2008-12-03 00:10:28 UTC (rev 2912)
@@ -2,8 +2,6 @@
include ../etc/Makefile.common
-dsn := "postgresql://localhost/"
-
name := mint
lib := ${PYTHON_LIB_DIR}/${name}
Modified: mgmt/trunk/mint/python/mint/Makefile
===================================================================
--- mgmt/trunk/mint/python/mint/Makefile 2008-12-02 22:58:08 UTC (rev 2911)
+++ mgmt/trunk/mint/python/mint/Makefile 2008-12-03 00:10:28 UTC (rev 2912)
@@ -1,10 +1,9 @@
.PHONY: schema clean
-dsn := "postgresql://localhost/"
-
schema: schema.py
schema.py: schemaparser.py ../../xml/*.xml
- python schemaparser.py schema.py ${dsn} ../../xml/*.xml
+ python schemaparser.py schema.py ../../sql/triggers.sql ../../xml/*.xml
clean:
+ rm -f schema.py ../../sql/triggers.sql
Modified: mgmt/trunk/mint/python/mint/__init__.py
===================================================================
--- mgmt/trunk/mint/python/mint/__init__.py 2008-12-02 22:58:08 UTC (rev 2911)
+++ mgmt/trunk/mint/python/mint/__init__.py 2008-12-03 00:10:28 UTC (rev 2912)
@@ -94,6 +94,7 @@
pass
for path, text in scripts:
+ # TODO: fix splitting of sql statements by ';' before enabling triggers
stmts = text.split(";")
count = 0
Modified: mgmt/trunk/mint/python/mint/schemaparser.py
===================================================================
--- mgmt/trunk/mint/python/mint/schemaparser.py 2008-12-02 22:58:08 UTC (rev 2911)
+++ mgmt/trunk/mint/python/mint/schemaparser.py 2008-12-03 00:10:28 UTC (rev 2912)
@@ -4,15 +4,16 @@
class SchemaParser:
"""parses broker XML schema"""
- def __init__(self, pythonFilePath, dsn, xmlFilePaths):
+ def __init__(self, pythonFilePath, sqlTriggersFilePath, xmlFilePaths):
self.pythonFilePath = pythonFilePath
- self.dsn = dsn
+ self.sqlTriggersFilePath = sqlTriggersFilePath
self.xmlFilePaths = xmlFilePaths
self.style = MixedCaseUnderscoreStyle()
self.additionalPythonOutput = ""
self.currentClass = ""
self.pythonOutput = ""
self.finalPythonOutput = ""
+ self.sqlTriggersOutput = ""
self.entityClasses = []
self.statsClasses = []
self.groups = dict()
@@ -134,6 +135,9 @@
pythonName = self.style.dbTableToPythonClass(schemaName + "_stats")
colPythonName = self.style.dbColumnToPythonAttr(schemaName)
keyPythonName = self.style.dbTableToPythonClass(schemaName)
+ self.sqlTriggersOutput += "DROP TRIGGER update_stats ON %s; \n" % (self.style.pythonClassToDBTable(pythonName))
+ self.sqlTriggersOutput += "CREATE TRIGGER update_stats AFTER INSERT ON %s \n" % (self.style.pythonClassToDBTable(pythonName))
+ self.sqlTriggersOutput += " FOR EACH ROW EXECUTE PROCEDURE update_stats(); \n\n"
else:
pythonName = self.style.dbTableToPythonClass(schemaName)
statsPythonName = self.style.dbTableToPythonClass(schemaName + "_stats")
@@ -208,7 +212,27 @@
self.finalPythonOutput += ' "connectionRef": "clientConnectionRef", \n'
self.finalPythonOutput += ' "user": "gridUser", "User": "GridUser", \n'
self.finalPythonOutput += ' "registeredTo": "broker"} \n\n'
+
+ # TODO: optimize getting the id to the parent table from new.parent_table_id
+ self.sqlTriggersOutput += """
+CREATE OR REPLACE FUNCTION update_stats() RETURNS trigger AS '
+DECLARE
+ parent_table varchar;
+ update_parent varchar;
+BEGIN
+ -- remove "_stats" suffix from table name
+ parent_table := substr(tg_table_name, 0, char_length(tg_table_name) - 5);
+ update_parent := ''UPDATE '' || parent_table ||
+ '' SET stats_prev_id = stats_curr_id, stats_curr_id = '' || new.id ||
+ '' WHERE id = (SELECT '' || parent_table || ''_id FROM '' || tg_table_name || '' WHERE id = '' || new.id || '')'';
+ EXECUTE update_parent;
+ RETURN new;
+END
+' LANGUAGE plpgsql;
+
+"""
outputFile = open(self.pythonFilePath, "w")
+ sqlTriggersFile = open(self.sqlTriggersFilePath, "w")
for xmlFile in self.xmlFilePaths:
schema = mllib.xml_parse(xmlFile)
# parse groups and store their structure as is
@@ -239,12 +263,14 @@
self.finalPythonOutput += "\nstatsClasses = %s\n" % (self.statsClasses)
outputFile.write(self.pythonOutput + self.finalPythonOutput)
outputFile.close()
+ sqlTriggersFile.write(self.sqlTriggersOutput)
+ sqlTriggersFile.close()
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
- print "Usage: schemaparser.py OUTPUT-PYTHON-FILE DSN INPUT-XML-SCHEMA [INPUT-XML-SCHEMA]*"
+ print "Usage: schemaparser.py OUTPUT-PYTHON-FILE OUTPUT-SQL-TRIGGERS-FILE INPUT-XML-SCHEMA [INPUT-XML-SCHEMA]*"
sys.exit(1)
else:
parser = SchemaParser(sys.argv[1], sys.argv[2], sys.argv[3:])
Modified: mgmt/trunk/mint/sql/Makefile
===================================================================
--- mgmt/trunk/mint/sql/Makefile 2008-12-02 22:58:08 UTC (rev 2911)
+++ mgmt/trunk/mint/sql/Makefile 2008-12-03 00:10:28 UTC (rev 2912)
@@ -8,3 +8,4 @@
sqlobject-admin sql -m mint -m mint.schema -c ${dsn} | sed -e '1,2d' > schema.sql
clean:
+ rm -f schema.sql
Added: mgmt/trunk/mint/sql/triggers.sql
===================================================================
--- mgmt/trunk/mint/sql/triggers.sql (rev 0)
+++ mgmt/trunk/mint/sql/triggers.sql 2008-12-03 00:10:28 UTC (rev 2912)
@@ -0,0 +1,108 @@
+
+CREATE OR REPLACE FUNCTION update_stats() RETURNS trigger AS '
+DECLARE
+ parent_table varchar;
+ update_parent varchar;
+BEGIN
+ -- remove "_stats" suffix from table name
+ parent_table := substr(tg_table_name, 0, char_length(tg_table_name) - 5);
+ update_parent := ''UPDATE '' || parent_table ||
+ '' SET stats_prev_id = stats_curr_id, stats_curr_id = '' || new.id ||
+ '' WHERE id = (SELECT '' || parent_table || ''_id FROM '' || tg_table_name || '' WHERE id = '' || new.id || '')'';
+ EXECUTE update_parent;
+ RETURN new;
+END
+' LANGUAGE plpgsql;
+
+DROP TRIGGER update_stats ON slot_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON slot_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON job_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON job_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON scheduler_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON scheduler_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON submitter_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON submitter_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON negotiator_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON negotiator_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON collector_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON collector_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON master_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON master_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON acl_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON acl_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON cluster_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON cluster_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON store_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON store_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON journal_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON journal_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON system_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON system_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON broker_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON broker_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON agent_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON agent_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON vhost_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON vhost_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON queue_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON queue_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON exchange_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON exchange_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON binding_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON binding_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON client_connection_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON client_connection_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON link_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON link_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON bridge_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON bridge_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON session_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON session_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
+DROP TRIGGER update_stats ON sysimage_stats;
+CREATE TRIGGER update_stats AFTER INSERT ON sysimage_stats
+ FOR EACH ROW EXECUTE PROCEDURE update_stats();
+
17 years, 4 months
rhmessaging commits: r2911 - store/trunk/cpp/lib/jrnl.
by rhmessaging-commits@lists.jboss.org
Author: cctrieloff
Date: 2008-12-02 17:58:08 -0500 (Tue, 02 Dec 2008)
New Revision: 2911
Modified:
store/trunk/cpp/lib/jrnl/aio_callback.hpp
Log:
added missing virtual distructor
Modified: store/trunk/cpp/lib/jrnl/aio_callback.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/aio_callback.hpp 2008-12-02 22:26:27 UTC (rev 2910)
+++ store/trunk/cpp/lib/jrnl/aio_callback.hpp 2008-12-02 22:58:08 UTC (rev 2911)
@@ -46,7 +46,7 @@
public:
virtual void wr_aio_cb(std::vector<data_tok*>& dtokl) = 0;
virtual void rd_aio_cb(std::vector<u_int16_t>& pil) = 0;
-
+ virtual ~aio_callback() {};
};
} // namespace journal
17 years, 4 months