rhmessaging commits: r2485 - mgmt/trunk/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2008-09-16 17:54:14 -0400 (Tue, 16 Sep 2008)
New Revision: 2485
Modified:
mgmt/trunk/cumin/python/cumin/job.py
mgmt/trunk/cumin/python/cumin/job.strings
Log:
Scroll Job output to end when in Tail mode
Modified: mgmt/trunk/cumin/python/cumin/job.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/job.py 2008-09-16 19:09:19 UTC (rev 2484)
+++ mgmt/trunk/cumin/python/cumin/job.py 2008-09-16 21:54:14 UTC (rev 2485)
@@ -587,6 +587,10 @@
def render_out_time(self, session):
return "13:14:00 Sept 16 2008"
+ def render_tail_js(self, session):
+ is_tail = self.first_last.get(session) == "t"
+ return is_tail and "<script language=\"javascript\" type=\"text/javascript\">addEvent(window, \"load\", outputEnd);</script>" or ""
+
def render_the_output(self, session, *args):
raw = """fcrawler.looksmart.com - - [26/Apr/2000:00:00:12 -0400] "GET /contacts.html HTTP/1.0" 200 4595 "-" "FAST-WebCrawler/2.1-pre2 (ashen(a)looksmart.net)"
Modified: mgmt/trunk/cumin/python/cumin/job.strings
===================================================================
--- mgmt/trunk/cumin/python/cumin/job.strings 2008-09-16 19:09:19 UTC (rev 2484)
+++ mgmt/trunk/cumin/python/cumin/job.strings 2008-09-16 21:54:14 UTC (rev 2485)
@@ -285,6 +285,18 @@
top: -0.5em;
}
+[JobOutput.javascript]
+function scrollToEnd (element) {
+ if (typeof element.scrollTop != 'undefined' &&
+ typeof element.scrollHeight != 'undefined') {
+ element.scrollTop = element.scrollHeight;
+ }
+}
+function outputEnd() {
+ var tarea = document.getElementById("the_output");
+ if (tarea)
+ scrollToEnd(tarea);
+}
[JobOutput.html]
<form id="{id}" style="width:100%; border:0px;" class="mform" method="post" action="?">
@@ -297,4 +309,5 @@
{the_output}
</textarea>
<div>{hidden_inputs}</div>
-</form>
\ No newline at end of file
+</form>
+{tail_js}
17 years, 7 months
rhmessaging commits: r2484 - mgmt/trunk/cumin/python/wooly.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2008-09-16 15:09:19 -0400 (Tue, 16 Sep 2008)
New Revision: 2484
Modified:
mgmt/trunk/cumin/python/wooly/resources.py
Log:
Open the file out side the try/finally, to get a more useful error message
Modified: mgmt/trunk/cumin/python/wooly/resources.py
===================================================================
--- mgmt/trunk/cumin/python/wooly/resources.py 2008-09-16 18:30:05 UTC (rev 2483)
+++ mgmt/trunk/cumin/python/wooly/resources.py 2008-09-16 19:09:19 UTC (rev 2484)
@@ -10,8 +10,8 @@
self.strings = None
def load(self):
+ file = open(self.path)
try:
- file = open(self.path)
self.strings = parse_catalog_file(file)
finally:
file.close()
17 years, 7 months
rhmessaging commits: r2483 - mgmt/trunk/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2008-09-16 14:30:05 -0400 (Tue, 16 Sep 2008)
New Revision: 2483
Modified:
mgmt/trunk/cumin/python/cumin/job.py
mgmt/trunk/cumin/python/cumin/job.strings
Log:
Using real GetAd data
Sketched in Job Output page
Modified: mgmt/trunk/cumin/python/cumin/job.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/job.py 2008-09-16 18:28:57 UTC (rev 2482)
+++ mgmt/trunk/cumin/python/cumin/job.py 2008-09-16 18:30:05 UTC (rev 2483)
@@ -367,14 +367,12 @@
class JobAdsSet(PropertySet):
def __init__(self, app, name):
super(JobAdsSet, self).__init__(app, name, )
- self.got_data = False
def get_args(self, session):
return self.frame.get_args(session)
def do_get_items(self, session, job):
items = self.items.get(session)
- #items = None
if not items:
items = self.gen_items(session, job)
# cache the items
@@ -383,103 +381,29 @@
return items
def gen_items(self, session, job):
+ self.job_ads = dict()
+ self.got_data = False
- def completion(status, args):
+ def completion(status, job_ads):
+ self.job_ads = job_ads["JobAd"]
self.got_data = True
-
- ads = dict()
+
+ def predicate():
+ return self.got_data
+
model = self.app.model
- ret = job.GetAd(model.data, completion, ads)
+ job.GetAd(model.data, completion, self.job_ads)
- while not self.got_data:
- pass
-# wait(self.got_data)
-
- # stolen straight for qpid-tool
- ads = {u'CondorPlatform': u'$CondorPlatform: I386-LINUX_RHEL5 $',
- u'KillSig': u'SIGTERM',
- u'TransferFiles': u'ONEXIT',
- u'Requirements': u'(Arch == "INTEL") && (OpSys == "LINUX") && (Disk >= DiskUsage) && ((Memory * 1024) >= ImageSize) && ((HasFileTransfer) || (TARGET.FileSystemDomain == MY.FileSystemDomain))',
- u'ImageSize': 20,
- u'TargetType': u'Machine',
- u'OnExitHold': u'FALSE',
- u'NumCkpts': 0,
- u'PeriodicRelease': u'FALSE',
- u'BufferBlockSize': 32768,
- u'Environment': u'',
- u'RootDir': u'/',
- u'JobPrio': 0,
- u'MinHosts': 1,
- u'WantCheckpoint': u'FALSE',
- u'CumulativeSuspensionTime': 0,
- u'WantRemoteSyscalls': u'FALSE',
- u'FileSystemDomain': u'mrg2.lab.bos.redhat.com',
- u'GlobalJobId': u'mrg-test(a)#1219326763#1.0',
- u'ExitStatus': 0,
- u'WhenToTransferOutput': u'ON_EXIT',
- u'CurrentHosts': 0,
- u'ProcId': 0,
- u'ImageSize_RAW': 20,
- u'Iwd': u'/home/remote/mfarrell',
- u'HoldReasonCode': 15,
- u'NumSystemHolds': 0,
- u'PeriodicHold': u'FALSE',
- u'Args': u'100',
- u'ClusterId': 1,
- u'OnExitRemove': u'TRUE',
- u'RemoteWallClockTime': 0,
- u'HoldReason': u"submitted on hold at user's request",
- u'NiceUser': u'FALSE',
- u'PeriodicRemove': u'FALSE',
- u'UserLog': u'/home/remote/mfarrell/log.1.0',
- u'MyType': u'Job',
- u'WantRemoteIO': u'TRUE',
- u'LocalUserCpu': 0,
- u'BufferSize': 524288,
- u'ScheddBday': 12059630,
- u'CompletionDate': 0,
- u'JobLeaseDuration': 1200,
- u'LastSuspensionTime': 0,
- u'NumRestarts': 0,
- u'Err': u'/dev/null',
- u'QDate': 11367211,
- u'RemoteSysCpu': 0,
- u'Cmd': u'/bin/sleep',
- u'TransferOut': u'FALSE',
- u'DiskUsage_RAW': 20,
- u'ExecutableSize': 20,
- u'Owner': u'mfarrell',
- u'LeaveJobInQueue': u'FALSE',
- u'JobStatus': 5,
- u'ExecutableSize_RAW': 20,
- u'JobUniverse': 5,
- u'DiskUsage': 20,
- u'EnteredCurrentStatus': 11367211,
- u'TotalSuspensions': 0,
- u'ShouldTransferFiles': u'IF_NEEDED',
- u'NumCkpts_RAW': 0,
- u'MaxHosts': 1,
- u'CommittedTime': 0,
- u'In': u'/dev/null',
- u'CoreSize': 0,
- u'Rank': 0,
- u'RemoteUserCpu': 0,
- u'User': u'mfarrell(a)mrg2.lab.bos.redhat.com',
- u'LocalSysCpu': 0,
- u'TransferErr': u'FALSE',
- u'ExitBySignal': u'FALSE',
- u'CondorVersion': u'$CondorVersion: 7.0.4 Aug 6 2008 BuildID: RH-7.0.4-4.el5 $',
- u'TransferIn': u'FALSE',
- u'JobNotification': 0,
- u'NumJobStarts': 0,
- u'Out': u'/dev/null'}
-
+ # wait for up to 10 seconds for completion to be called
+ wait(predicate, timeout=10)
cls = self.app.model.get_class_by_object(job)
# list of dictionaries
# each disctionary has:
# name:, value:, type: [, error:] [, property:] [,path:]
- return [self.gen_item(x, ads[x], cls) for x in ads]
+ return [self.gen_item(x, self.job_ads[x], cls) for x in self.job_ads]
+#TODO: handle case where completion status isn't OK
+
def gen_item(self, name, value, cls, path=None, dtype=None, error=None):
idict = dict()
@@ -521,7 +445,7 @@
property = item["property"]
if property.renderer:
value = property.renderer(session, value)
- return escape_amp(value)
+ return escape_entity(str(value))
def render_inline_help(self, session, item):
if "property" in item:
@@ -641,14 +565,82 @@
action.invoke(job, just_ads)
self.process_cancel(session, job)
-class JobOutput(TabbedModeSet):
+class JobOutput(Form):
def __init__(self, app, name):
super(JobOutput, self).__init__(app, name)
+ self.which_file = self.FileSwitch(app, "file")
+ self.add_child(self.which_file)
+
+ self.first_last = self.FLSwitch(app, "first_last")
+ self.add_child(self.first_last)
+
+ self.__fetch = self.FetchButton(app, "refresh", self)
+ self.add_child(self.__fetch)
+ self.out_time = Attribute(app, "out_time")
+ self.add_attribute(self.out_time)
+
def render_title(self, session):
return "Output"
+
+ def render_out_time(self, session):
+ return "13:14:00 Sept 16 2008"
+ def render_the_output(self, session, *args):
+
+ raw = """fcrawler.looksmart.com - - [26/Apr/2000:00:00:12 -0400] "GET /contacts.html HTTP/1.0" 200 4595 "-" "FAST-WebCrawler/2.1-pre2 (ashen(a)looksmart.net)"
+fcrawler.looksmart.com - - [26/Apr/2000:00:17:19 -0400] "GET /news/news.html HTTP/1.0" 200 16716 "-" "FAST-WebCrawler/2.1-pre2 (ashen(a)looksmart.net)"
+
+ppp931.on.bellglobal.com - - [26/Apr/2000:00:16:12 -0400] "GET /download/windows/asctab31.zip HTTP/1.0" 200 1540096 "http://www.htmlgoodies.com/downloads/freeware/webdevelopment/15.html" "Mozilla/4.7 [en]C-SYMPA (Win95; U)"
+
+123.123.123.123 - - [26/Apr/2000:00:23:48 -0400] "GET /pics/wpaper.gif HTTP/1.0" 200 6248 "http://www.jafsoft.com/asctortf/" "Mozilla/4.05 (Macintosh; I; PPC)"
+123.123.123.123 - - [26/Apr/2000:00:23:47 -0400] "GET /asctortf/ HTTP/1.0" 200 8130 "http://search.netscape.com/Computers/Data_Formats/Document/Text/RTF" "Mozilla/4.05 (Macintosh; I; PPC)"
+123.123.123.123 - - [26/Apr/2000:00:23:48 -0400] "GET /pics/5star2000.gif HTTP/1.0" 200 4005 "http://www.jafsoft.com/asctortf/" "Mozilla/4.05 (Macintosh; I; PPC)"
+123.123.123.123 - - [26/Apr/2000:00:23:50 -0400] "GET /pics/5star.gif HTTP/1.0" 200 1031 "http://www.jafsoft.com/asctortf/" "Mozilla/4.05 (Macintosh; I; PPC)"
+123.123.123.123 - - [26/Apr/2000:00:23:51 -0400] "GET /pics/a2hlogo.jpg HTTP/1.0" 200 4282 "http://www.jafsoft.com/asctortf/" "Mozilla/4.05 (Macintosh; I; PPC)"
+123.123.123.123 - - [26/Apr/2000:00:23:51 -0400] "GET /cgi-bin/newcount?jafsof3&width=4&font=digital&noshow HTTP/1.0" 200 36 "http://www.jafsoft.com/asctortf/" "Mozilla/4.05 (Macintosh; I; PPC)"
+192.168.2.20 - - [28/Jul/2006:10:27:10 -0300] "GET /cgi-bin/try/ HTTP/1.0" 200 3395
+127.0.0.1 - - [28/Jul/2006:10:22:04 -0300] "GET / HTTP/1.0" 200 2216
+195.146.134.15 - - [20/Jan/2003:08:55:36 -0800] "GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
+195.146.134.15 - - [20/Jan/2003:08:55:36 -0800] "GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
+
+195.146.134.15 - - [20/Jan/2003:08:55:36 -0800] "GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
+195.146.134.15 - - [20/Jan/2003:08:55:36 -0800] "GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
+195.146.134.15 - - [20/Jan/2003:08:55:36 -0800] "GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
+195.146.134.15 - - [20/Jan/2003:08:55:36 -0800] "GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
+
+123.123.123.123 - - [26/Apr/2000:00:23:48 -0400] "GET /pics/5star2000.gif HTTP/1.0" 200 4005 "http://www.jafsoft.com/asctortf/" "Mozilla/4.05 (Macintosh; I; PPC)"
+123.123.123.123 - - [26/Apr/2000:00:23:50 -0400] "GET /pics/5star.gif HTTP/1.0" 200 1031 "http://www.jafsoft.com/asctortf/" "Mozilla/4.05 (Macintosh; I; PPC)"
+123.123.123.123 - - [26/Apr/2000:00:23:51 -0400] "GET /pics/a2hlogo.jpg HTTP/1.0" 200 4282 "http://www.jafsoft.com/asctortf/" "Mozilla/4.05 (Macintosh; I; PPC)"
+123.123.123.123 - - [26/Apr/2000:00:23:51 -0400] "GET /cgi-bin/newcount?jafsof3&width=4&font=digital&noshow HTTP/1.0" 200 36 "http://www.jafsoft.com/asctortf/" "Mozilla/4.05 (Macintosh; I; PPC)"
+192.168.2.20 - - [28/Jul/2006:10:27:10 -0300] "GET /cgi-bin/try/ HTTP/1.0" 200 3395
+
+ """
+ return escape_entity(raw)
+
+ class FetchButton(FormButton):
+ def process_submit(self, session):
+ pass
+
+ def render_content(self, session):
+ return "Refresh"
+
+ class FileSwitch(StateSwitch):
+ def __init__(self, app, name):
+ super(JobOutput.FileSwitch, self).__init__(app, name)
+
+ self.add_state("o", "Output")
+ self.add_state("e", "Error")
+ self.add_state("l", "UserLog")
+
+ class FLSwitch(StateSwitch):
+ def __init__(self, app, name):
+ super(JobOutput.FLSwitch, self).__init__(app, name)
+
+ self.add_state("t", "Tail")
+ self.add_state("h", "Head")
+
class JobStatus(CuminStatus):
def render_color(self, session, job):
return JobStatusInfo.get_status_color(job.JobStatus)
Modified: mgmt/trunk/cumin/python/cumin/job.strings
===================================================================
--- mgmt/trunk/cumin/python/cumin/job.strings 2008-09-16 18:28:57 UTC (rev 2482)
+++ mgmt/trunk/cumin/python/cumin/job.strings 2008-09-16 18:30:05 UTC (rev 2483)
@@ -20,6 +20,7 @@
left outer join job_stats as p on p.id = j.stats_prev_id
inner join scheduler as s on s.id = j.scheduler_id
{sql_where}
+{sql_orderby}
{sql_limit}
[JobSet.count_sql]
@@ -75,6 +76,7 @@
1 as jobs
from job as j
{sql_where}
+{sql_orderby}
{sql_limit}
[JobGroupSet.count_sql]
@@ -264,3 +266,35 @@
]]>
</script>
+
+[JobOutput.css]
+textarea#the_output {
+ height: 25em;
+ width: 100%;
+ border: 1px solid #EAEAEA;
+ font-family: Fixed, monospace;
+ line-height: 1.15em;
+ background-color: #FFF;
+ color: #333;
+}
+div.out_actions {
+ padding:1em 1em 0 1em;
+}
+div.refresh_info h2 {
+ position: relative;
+ top: -0.5em;
+}
+
+
+[JobOutput.html]
+<form id="{id}" style="width:100%; border:0px;" class="mform" method="post" action="?">
+ <div class="out_actions">
+ <div class="rfloat">{first_last}</div>
+ {file}
+ </div>
+<div class="sactions refresh_info">{refresh} <h2>Last refresh was at {out_time}</h2></div>
+<textarea name="the_output" id="the_output" disabled="disabled" rows="20">
+{the_output}
+</textarea>
+<div>{hidden_inputs}</div>
+</form>
\ No newline at end of file
17 years, 7 months
rhmessaging commits: r2482 - mgmt/trunk/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2008-09-16 14:28:57 -0400 (Tue, 16 Sep 2008)
New Revision: 2482
Modified:
mgmt/trunk/cumin/python/cumin/util.py
Log:
Adding wait method
Modified: mgmt/trunk/cumin/python/cumin/util.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/util.py 2008-09-16 18:14:41 UTC (rev 2481)
+++ mgmt/trunk/cumin/python/cumin/util.py 2008-09-16 18:28:57 UTC (rev 2482)
@@ -1,7 +1,7 @@
from ConfigParser import SafeConfigParser
from datetime import datetime, timedelta
from logging import getLogger
-from time import mktime
+from time import mktime, time, sleep
from random import randint
import sys
@@ -183,3 +183,16 @@
self.type = type
self.default = default
self.summary = summary
+
+
+def wait(predicate, timeout=30):
+ start = time()
+
+ while True:
+ if predicate():
+ return
+
+ if time() - start > timeout:
+ raise Exception("Operation timed out")
+
+ sleep(1)
17 years, 7 months
rhmessaging commits: r2481 - in mgmt/trunk/mint: sql and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: nunofsantos
Date: 2008-09-16 14:14:41 -0400 (Tue, 16 Sep 2008)
New Revision: 2481
Modified:
mgmt/trunk/mint/python/mint/schema.py
mgmt/trunk/mint/python/mint/schemaparser.py
mgmt/trunk/mint/python/mint/update.py
mgmt/trunk/mint/sql/schema.sql
Log:
support argument maps through SQLObject's PickleCol
Modified: mgmt/trunk/mint/python/mint/schema.py
===================================================================
--- mgmt/trunk/mint/python/mint/schema.py 2008-09-16 15:47:43 UTC (rev 2480)
+++ mgmt/trunk/mint/python/mint/schema.py 2008-09-16 18:14:41 UTC (rev 2481)
@@ -55,28 +55,10 @@
connBacklog = SmallIntCol(default=None)
stagingThreshold = IntCol(default=None)
mgmtPubInterval = SmallIntCol(default=None)
- clusterName = StringCol(length=1000, default=None)
version = StringCol(length=1000, default=None)
dataDir = StringCol(length=1000, default=None)
- def joinCluster(self, model, callback, clusterName):
- actualArgs = dict()
- actualArgs["clusterName"] = clusterName
- conn = model.connections[self.managedBroker]
- classInfo = self.classInfos[self.managedBroker]
- originalId = objectId(None, self.sourceScopeId, self.sourceObjectId)
- conn.callMethod(originalId, classInfo, "joinCluster",
- callback, args=actualArgs)
-
- def leaveCluster(self, model, callback):
- actualArgs = dict()
- conn = model.connections[self.managedBroker]
- classInfo = self.classInfos[self.managedBroker]
- originalId = objectId(None, self.sourceScopeId, self.sourceObjectId)
- conn.callMethod(originalId, classInfo, "leaveCluster",
- callback, args=actualArgs)
-
def echo(self, model, callback, sequence, body):
"""Request a response to test the path to the management broker"""
actualArgs = dict()
@@ -192,7 +174,7 @@
durable = BoolCol(default=None)
autoDelete = BoolCol(default=None)
exclusive = BoolCol(default=None)
- arguments = StringCol(default=None)
+ arguments = PickleCol(default=None)
def purge(self, model, callback, request):
@@ -309,7 +291,7 @@
exchange = ForeignKey('Exchange', cascade='null', default=None)
queue = ForeignKey('Queue', cascade='null', default=None)
bindingKey = StringCol(length=1000, default=None)
- arguments = StringCol(default=None)
+ arguments = PickleCol(default=None)
class BindingStats(SQLObject):
@@ -340,6 +322,7 @@
vhost = ForeignKey('Vhost', cascade='null', default=None)
address = StringCol(length=1000, default=None)
incoming = BoolCol(default=None)
+ SystemConnection = BoolCol(default=None)
def close(self, model, callback):
@@ -552,13 +535,6 @@
location = StringCol(length=1000, default=None)
defaultInitialFileCount = SmallIntCol(default=None)
defaultDataFileSize = IntCol(default=None)
- tplIsInitialized = BoolCol(default=None)
- tplDirectory = StringCol(length=1000, default=None)
- tplWritePageSize = IntCol(default=None)
- tplWritePages = IntCol(default=None)
- tplInitialFileCount = SmallIntCol(default=None)
- tplDataFileSize = IntCol(default=None)
- tplCurrentFileCount = IntCol(default=None)
class StoreStats(SQLObject):
@@ -568,15 +544,6 @@
recTime = TimestampCol(default=None)
store = ForeignKey('Store', cascade='null', default=None)
classInfos = dict() # brokerId => classInfo
- tplTransactionDepth = IntCol(default=None)
- tplTransactionDepthLow = IntCol(default=None)
- tplTransactionDepthHigh = IntCol(default=None)
- tplTxnPrepares = BigIntCol(default=None)
- tplTxnCommits = BigIntCol(default=None)
- tplTxnAborts = BigIntCol(default=None)
- tplOutstandingAIOs = IntCol(default=None)
- tplOutstandingAIOsLow = IntCol(default=None)
- tplOutstandingAIOsHigh = IntCol(default=None)
@@ -594,17 +561,14 @@
statsCurr = ForeignKey('JournalStats', cascade='null', default=None)
statsPrev = ForeignKey('JournalStats', cascade='null', default=None)
classInfos = dict() # brokerId => classInfo
- queue = ForeignKey('Queue', cascade='null', default=None)
name = StringCol(length=1000, default=None)
+ queue = ForeignKey('Queue', cascade='null', default=None)
directory = StringCol(length=1000, default=None)
baseFileName = StringCol(length=1000, default=None)
writePageSize = IntCol(default=None)
writePages = IntCol(default=None)
readPageSize = IntCol(default=None)
readPages = IntCol(default=None)
- initialFileCount = SmallIntCol(default=None)
- dataFileSize = IntCol(default=None)
- currentFileCount = IntCol(default=None)
def expand(self, model, callback, by):
@@ -624,15 +588,14 @@
recTime = TimestampCol(default=None)
journal = ForeignKey('Journal', cascade='null', default=None)
classInfos = dict() # brokerId => classInfo
+ initialFileCount = SmallIntCol(default=None)
+ dataFileSize = IntCol(default=None)
+ currentFileCount = IntCol(default=None)
recordDepth = IntCol(default=None)
recordDepthLow = IntCol(default=None)
recordDepthHigh = IntCol(default=None)
- enqueues = BigIntCol(default=None)
- dequeues = BigIntCol(default=None)
- txnEnqueues = BigIntCol(default=None)
- txnDequeues = BigIntCol(default=None)
- txnCommits = BigIntCol(default=None)
- txnAborts = BigIntCol(default=None)
+ recordEnqueues = BigIntCol(default=None)
+ recordDequeues = BigIntCol(default=None)
outstandingAIOs = IntCol(default=None)
outstandingAIOsLow = IntCol(default=None)
outstandingAIOsHigh = IntCol(default=None)
@@ -817,33 +780,6 @@
conn.callMethod(originalId, classInfo, "GetAd",
callback, args=actualArgs)
- def Hold(self, model, callback, Reason):
- actualArgs = dict()
- actualArgs["Reason"] = Reason
- conn = model.connections[self.managedBroker]
- classInfo = self.classInfos[self.managedBroker]
- originalId = objectId(None, self.sourceScopeId, self.sourceObjectId)
- conn.callMethod(originalId, classInfo, "Hold",
- callback, args=actualArgs)
-
- def Release(self, model, callback, Reason):
- actualArgs = dict()
- actualArgs["Reason"] = Reason
- conn = model.connections[self.managedBroker]
- classInfo = self.classInfos[self.managedBroker]
- originalId = objectId(None, self.sourceScopeId, self.sourceObjectId)
- conn.callMethod(originalId, classInfo, "Release",
- callback, args=actualArgs)
-
- def Remove(self, model, callback, Reason):
- actualArgs = dict()
- actualArgs["Reason"] = Reason
- conn = model.connections[self.managedBroker]
- classInfo = self.classInfos[self.managedBroker]
- originalId = objectId(None, self.sourceScopeId, self.sourceObjectId)
- conn.callMethod(originalId, classInfo, "Remove",
- callback, args=actualArgs)
-
class JobStats(SQLObject):
class sqlmeta:
lazyUpdate = True
Modified: mgmt/trunk/mint/python/mint/schemaparser.py
===================================================================
--- mgmt/trunk/mint/python/mint/schemaparser.py 2008-09-16 15:47:43 UTC (rev 2480)
+++ mgmt/trunk/mint/python/mint/schemaparser.py 2008-09-16 18:14:41 UTC (rev 2481)
@@ -27,7 +27,7 @@
self.dataTypesMap["absTime"] = self.dataTypesMap["deltaTime"] = "BigIntCol"
self.dataTypesMap["bool"] = "BoolCol"
self.dataTypesMap["sstr"] = self.dataTypesMap["lstr"] = "StringCol"
- self.dataTypesMap["map"] = "StringCol"
+ self.dataTypesMap["map"] = "PickleCol"
# mapping for identifiers in the XML schema that are reserved words in either SQL or Python
self.reservedWords = {"in": "inRsv", "In": "InRsv",
"connection": "clientConnection", "Connection": "ClientConnection",
Modified: mgmt/trunk/mint/python/mint/update.py
===================================================================
--- mgmt/trunk/mint/python/mint/update.py 2008-09-16 15:47:43 UTC (rev 2480)
+++ mgmt/trunk/mint/python/mint/update.py 2008-09-16 18:14:41 UTC (rev 2481)
@@ -77,10 +77,6 @@
if "connectionRef" in attrs:
attrs["clientConnectionRef"] = attrs.pop("connectionRef")
- #XXX FIX -- fix handling of field tables
- if "arguments" in attrs:
- del attrs["arguments"]
-
for name in attrs.keys():
rename = schemaReservedWordsMap.get(name)
Modified: mgmt/trunk/mint/sql/schema.sql
===================================================================
--- mgmt/trunk/mint/sql/schema.sql 2008-09-16 15:47:43 UTC (rev 2480)
+++ mgmt/trunk/mint/sql/schema.sql 2008-09-16 18:14:41 UTC (rev 2481)
@@ -105,7 +105,7 @@
exchange_id INT,
queue_id INT,
binding_key VARCHAR(1000),
- arguments TEXT
+ arguments BYTEA
);
CREATE TABLE binding_stats (
@@ -160,7 +160,6 @@
conn_backlog SMALLINT,
staging_threshold INT,
mgmt_pub_interval SMALLINT,
- cluster_name VARCHAR(1000),
version VARCHAR(1000),
data_dir VARCHAR(1000),
registration_id INT
@@ -184,7 +183,8 @@
stats_prev_id INT,
vhost_id INT,
address VARCHAR(1000),
- incoming BOOL
+ incoming BOOL,
+ system_connection BOOL
);
CREATE TABLE client_connection_stats (
@@ -283,32 +283,28 @@
managed_broker VARCHAR(1000),
stats_curr_id INT,
stats_prev_id INT,
- queue_id INT,
name VARCHAR(1000),
+ queue_id INT,
directory VARCHAR(1000),
base_file_name VARCHAR(1000),
write_page_size INT,
write_pages INT,
read_page_size INT,
- read_pages INT,
- initial_file_count SMALLINT,
- data_file_size INT,
- current_file_count INT
+ read_pages INT
);
CREATE TABLE journal_stats (
id SERIAL PRIMARY KEY,
rec_time TIMESTAMP,
journal_id INT,
+ initial_file_count SMALLINT,
+ data_file_size INT,
+ current_file_count INT,
record_depth INT,
record_depth_low INT,
record_depth_high INT,
- enqueues BIGINT,
- dequeues BIGINT,
- txn_enqueues BIGINT,
- txn_dequeues BIGINT,
- txn_commits BIGINT,
- txn_aborts BIGINT,
+ record_enqueues BIGINT,
+ record_dequeues BIGINT,
outstanding_ai_os INT,
outstanding_ai_os_low INT,
outstanding_ai_os_high INT,
@@ -370,7 +366,7 @@
durable BOOL,
auto_delete BOOL,
exclusive BOOL,
- arguments TEXT
+ arguments BYTEA
);
CREATE TABLE queue_stats (
@@ -597,29 +593,13 @@
broker_id INT,
location VARCHAR(1000),
default_initial_file_count SMALLINT,
- default_data_file_size INT,
- tpl_is_initialized BOOL,
- tpl_directory VARCHAR(1000),
- tpl_write_page_size INT,
- tpl_write_pages INT,
- tpl_initial_file_count SMALLINT,
- tpl_data_file_size INT,
- tpl_current_file_count INT
+ default_data_file_size INT
);
CREATE TABLE store_stats (
id SERIAL PRIMARY KEY,
rec_time TIMESTAMP,
- store_id INT,
- tpl_transaction_depth INT,
- tpl_transaction_depth_low INT,
- tpl_transaction_depth_high INT,
- tpl_txn_prepares BIGINT,
- tpl_txn_commits BIGINT,
- tpl_txn_aborts BIGINT,
- tpl_outstanding_ai_os INT,
- tpl_outstanding_ai_os_low INT,
- tpl_outstanding_ai_os_high INT
+ store_id INT
);
CREATE TABLE submitter (
17 years, 7 months
rhmessaging commits: r2480 - mgmt/trunk.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2008-09-16 11:47:43 -0400 (Tue, 16 Sep 2008)
New Revision: 2480
Modified:
mgmt/trunk/Makefile
mgmt/trunk/README
Log:
Correct some documentation errors
Modified: mgmt/trunk/Makefile
===================================================================
--- mgmt/trunk/Makefile 2008-09-16 15:41:55 UTC (rev 2479)
+++ mgmt/trunk/Makefile 2008-09-16 15:47:43 UTC (rev 2480)
@@ -7,8 +7,6 @@
@echo " help Print this message"
@echo " tags Rebuild the tag index"
@echo " dist Create a dist tarball"
- @echo " cumin Install cumin"
- @echo " mint Install mint"
@echo " clean Remove software installed at $$DEVEL_HOME/install"
dist: clean
Modified: mgmt/trunk/README
===================================================================
--- mgmt/trunk/README 2008-09-16 15:41:55 UTC (rev 2479)
+++ mgmt/trunk/README 2008-09-16 15:47:43 UTC (rev 2480)
@@ -126,8 +126,8 @@
Executed 6 statements from file '/home/jross/checkouts/mgmt/cumin-test-0/sql/indexes.sql'
At this point you should have a working database and schema that you
-can connect to at postgresql://exampleuser@localhost/exampledb. All
-that remains is to add a cumin user:
+can connect to at postgresql://cumin@localhost/cumin. All that
+remains is to add a cumin user:
Add a cumin user:
17 years, 7 months
rhmessaging commits: r2479 - mgmt/trunk/etc.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2008-09-16 11:41:55 -0400 (Tue, 16 Sep 2008)
New Revision: 2479
Modified:
mgmt/trunk/etc/devel.profile
mgmt/trunk/etc/devel.profile.tcsh
Log:
Go ahead and overwrite DEVEL_HOME
Modified: mgmt/trunk/etc/devel.profile
===================================================================
--- mgmt/trunk/etc/devel.profile 2008-09-16 15:13:05 UTC (rev 2478)
+++ mgmt/trunk/etc/devel.profile 2008-09-16 15:41:55 UTC (rev 2479)
@@ -1,7 +1,4 @@
-if [ -z "$DEVEL_HOME" ]; then
- export DEVEL_HOME="$PWD"
-fi
-
+export DEVEL_HOME="$PWD"
export DEVEL_MODULES="basil mint cumin"
# PYTHONPATH
Modified: mgmt/trunk/etc/devel.profile.tcsh
===================================================================
--- mgmt/trunk/etc/devel.profile.tcsh 2008-09-16 15:13:05 UTC (rev 2478)
+++ mgmt/trunk/etc/devel.profile.tcsh 2008-09-16 15:41:55 UTC (rev 2479)
@@ -1,7 +1,4 @@
-if (! $?DEVEL_HOME) then
- setenv DEVEL_HOME "$PWD"
-endif
-
+setenv DEVEL_HOME "$PWD"
set DEVEL_MODULES=(mint cumin)
# PYTHONPATH
17 years, 7 months
rhmessaging commits: r2478 - mgmt/trunk/mint/python/mint.
by rhmessaging-commits@lists.jboss.org
Author: nunofsantos
Date: 2008-09-16 11:13:05 -0400 (Tue, 16 Sep 2008)
New Revision: 2478
Modified:
mgmt/trunk/mint/python/mint/update.py
Log:
bz 461953: args was being shadowed in this scope
Modified: mgmt/trunk/mint/python/mint/update.py
===================================================================
--- mgmt/trunk/mint/python/mint/update.py 2008-09-16 14:27:20 UTC (rev 2477)
+++ mgmt/trunk/mint/python/mint/update.py 2008-09-16 15:13:05 UTC (rev 2478)
@@ -270,14 +270,14 @@
self.args = args
def process(self, model):
- args = ("method", self.conn.id, self.methodId, self.errorCode,
+ logArgs = ("method", self.conn.id, self.methodId, self.errorCode,
self.errorText)
- log.info("Processing %-8s %-16s %-12s %-12s %s" % args)
+ log.info("Processing %-8s %-16s %-12s %-12s %s" % logArgs)
model.lock()
try:
method = model.outstandingMethodCalls.pop(self.methodId)
- method(self.errorText, args)
+ method(self.errorText, self.args)
finally:
model.unlock()
17 years, 7 months
rhmessaging commits: r2477 - in mgmt/trunk/cumin/python: wooly and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2008-09-16 10:27:20 -0400 (Tue, 16 Sep 2008)
New Revision: 2477
Modified:
mgmt/trunk/cumin/python/cumin/binding.py
mgmt/trunk/cumin/python/cumin/brokerlink.py
mgmt/trunk/cumin/python/cumin/exchange.py
mgmt/trunk/cumin/python/cumin/job.py
mgmt/trunk/cumin/python/cumin/queue.py
mgmt/trunk/cumin/python/cumin/scheduler.py
mgmt/trunk/cumin/python/cumin/submitter.py
mgmt/trunk/cumin/python/cumin/widgets.py
mgmt/trunk/cumin/python/wooly/__init__.py
Log:
Removed pre_render and pre_process methods
Modified: mgmt/trunk/cumin/python/cumin/binding.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/binding.py 2008-09-15 19:24:38 UTC (rev 2476)
+++ mgmt/trunk/cumin/python/cumin/binding.py 2008-09-16 14:27:20 UTC (rev 2477)
@@ -325,10 +325,11 @@
return writer.to_string()
- def pre_process(self, session):
+ def render(self, session, *args):
phase = self.phase.get(session)
if phase:
self.state.set(session, phase)
+ return super(ExchangeKeysField, self).render(session, *args)
def get_binding_errors(self, session, queue_name):
Modified: mgmt/trunk/cumin/python/cumin/brokerlink.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/brokerlink.py 2008-09-15 19:24:38 UTC (rev 2476)
+++ mgmt/trunk/cumin/python/cumin/brokerlink.py 2008-09-16 14:27:20 UTC (rev 2477)
@@ -393,10 +393,11 @@
return writer.to_string()
- def pre_process(self, session, *args):
+ def render(self, session, *args):
phase = self.phase.get(session)
if phase:
self.state.set(session, phase)
+ return super(ExchangeRadioField, self).render(session, *args)
class BridgeAdd(CuminFieldForm):
def __init__(self, app, name):
@@ -446,8 +447,7 @@
def render_title(self, session, link):
return "Add Route to '%s:%d'" % (link.host, link.port)
- def pre_process(self, session, *args):
- self.exchange.pre_process(session, *args)
+ def process_display(self, session, *args):
if not self.tag.get(session):
self.tag.set(session, args[0].managedBroker)
Modified: mgmt/trunk/cumin/python/cumin/exchange.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/exchange.py 2008-09-15 19:24:38 UTC (rev 2476)
+++ mgmt/trunk/cumin/python/cumin/exchange.py 2008-09-16 14:27:20 UTC (rev 2477)
@@ -308,9 +308,10 @@
self.set_default_column_name("q_id")
- def pre_render(self, session, args):
+ def do_process(self, session, *args):
self.show_column(session, "q_id")
-
+ return super(ExchangeBindingSet, self).do_process(session, *args)
+
def render_title(self, session, exchange):
return "Queue Bindings %s" % \
fmt_count(exchange.bindings.count())
Modified: mgmt/trunk/cumin/python/cumin/job.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/job.py 2008-09-15 19:24:38 UTC (rev 2476)
+++ mgmt/trunk/cumin/python/cumin/job.py 2008-09-16 14:27:20 UTC (rev 2477)
@@ -286,12 +286,13 @@
self.__release = JobReleaseButton(app, "release", self)
self.add_child(self.__release)
- def pre_render(self, session, args):
+ def do_process(self, session, *args):
self.show_column(session, "global_job_id")
self.show_column(session, "cmd")
#self.show_column(session, "submitter")
self.show_column(session, "job_status")
self.show_column(session, "scheduler")
+ super(JobGroupJobSet, self).do_process(session, *args)
def render_title(self, session, group):
where_group = "custom_group = '%s'" % group.get_id()
@@ -748,7 +749,7 @@
self.__release = JobReleaseButton(app, "release", self)
self.add_child(self.__release)
- def pre_render(self, session, args):
+ def do_process(self, session, *args):
self.show_column(session, "global_job_id")
self.show_column(session, "custom_id")
self.show_column(session, "cmd")
@@ -756,6 +757,7 @@
self.show_column(session, "scheduler")
self.show_column(session, "submitter")
self.show_column(session, "job_status")
+ super(JobTab, self).do_process(session, *args)
def render_sql_where(self, session, *args):
phase_sql = self.get_phase_sql(session)
Modified: mgmt/trunk/cumin/python/cumin/queue.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/queue.py 2008-09-15 19:24:38 UTC (rev 2476)
+++ mgmt/trunk/cumin/python/cumin/queue.py 2008-09-16 14:27:20 UTC (rev 2477)
@@ -289,8 +289,9 @@
self.set_default_column_name("e_id")
- def pre_render(self, session, args):
+ def do_process(self, session, *args):
self.show_column(session, "e_id")
+ super(QueueBindingSet, self).do_process(session, *args)
def render_add_queue_binding_url(self, session, vhost):
branch = session.branch()
@@ -340,9 +341,6 @@
self.bindings = ExchangeKeysField(app, "bindings", self)
self.add_field(self.bindings)
- def pre_process(self, session):
- self.bindings.pre_process(session)
-
def validate(self, session, queue_name):
super_error = super(QueueForm, self).validate(session)
(errors, form_binding_info) = self.bindings.get_binding_errors(session, queue_name)
@@ -526,9 +524,6 @@
return "<ul class=\"errors\" style=\"margin:0; float:left;\"><li>%s</li></ul>" % \
"</li><li>".join(errors["no_exchanges"])
- def pre_process(self, session):
- self.bindings.pre_process(session)
-
def process_submit(self, session):
queue = self.frame.get_object(session)
(errors, form_binding_info) = self.bindings.get_binding_errors(session, queue.name)
Modified: mgmt/trunk/cumin/python/cumin/scheduler.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/scheduler.py 2008-09-15 19:24:38 UTC (rev 2476)
+++ mgmt/trunk/cumin/python/cumin/scheduler.py 2008-09-16 14:27:20 UTC (rev 2477)
@@ -141,13 +141,14 @@
self.__release = JobReleaseButton(app, "release", self)
self.add_child(self.__release)
- def pre_render(self, session, args):
+ def do_process(self, session, *args):
self.show_column(session, "global_job_id")
self.show_column(session, "custom_id")
self.show_column(session, "cmd")
self.show_column(session, "submitter")
self.show_column(session, "custom_group")
self.show_column(session, "job_status")
+ super(SchedulerJobSet, self).do_process(session, *args)
def render_sql_where(self, session, scheduler):
phase_sql = self.get_phase_sql(session)
Modified: mgmt/trunk/cumin/python/cumin/submitter.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/submitter.py 2008-09-15 19:24:38 UTC (rev 2476)
+++ mgmt/trunk/cumin/python/cumin/submitter.py 2008-09-16 14:27:20 UTC (rev 2477)
@@ -132,13 +132,14 @@
self.__release = JobReleaseButton(app, "release", self)
self.add_child(self.__release)
- def pre_render(self, session, args):
+ def do_process(self, session, *args):
self.show_column(session, "global_job_id")
self.show_column(session, "custom_id")
self.show_column(session, "cmd")
self.show_column(session, "scheduler")
self.show_column(session, "custom_group")
self.show_column(session, "job_status")
+ super(SubmitterJobSet, self).do_process(session, *args)
def render_sql_where(self, session, submitter):
phase_sql = self.get_phase_sql(session)
Modified: mgmt/trunk/cumin/python/cumin/widgets.py
===================================================================
--- mgmt/trunk/cumin/python/cumin/widgets.py 2008-09-15 19:24:38 UTC (rev 2476)
+++ mgmt/trunk/cumin/python/cumin/widgets.py 2008-09-16 14:27:20 UTC (rev 2477)
@@ -152,8 +152,6 @@
def do_process(self, session, *args):
self.page.set_modal(session, self.get_modal(session))
- self.pre_process(session, *args)
-
if self.__cancel.get(session):
self.__cancel.set(session, False)
@@ -179,9 +177,6 @@
def post_process(self, session, *args):
pass
- def pre_process(self, session, *args):
- pass
-
def render_cancel_content(self, session, *args):
return "Cancel"
Modified: mgmt/trunk/cumin/python/wooly/__init__.py
===================================================================
--- mgmt/trunk/cumin/python/wooly/__init__.py 2008-09-15 19:24:38 UTC (rev 2476)
+++ mgmt/trunk/cumin/python/wooly/__init__.py 2008-09-16 14:27:20 UTC (rev 2477)
@@ -232,11 +232,7 @@
return string
- def pre_render(self, session, *args):
- pass
-
def do_render(self, session, *args):
- self.pre_render(session, *args)
writer = Writer()
self.__main_tmpl.render(writer, session, *args)
return writer.to_string()
17 years, 7 months
rhmessaging commits: r2476 - in store/trunk/cpp/lib: gen and 5 other directories.
by rhmessaging-commits@lists.jboss.org
Author: tedross
Date: 2008-09-15 15:24:38 -0400 (Mon, 15 Sep 2008)
New Revision: 2476
Added:
store/trunk/cpp/lib/gen/qmf/
store/trunk/cpp/lib/gen/qmf/com/
store/trunk/cpp/lib/gen/qmf/com/redhat/
store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/
store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/
store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/ArgsJournalExpand.h
store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Journal.cpp
store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Journal.h
store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Package.cpp
store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Package.h
store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Store.cpp
store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Store.h
Removed:
store/trunk/cpp/lib/gen/com/
Modified:
store/trunk/cpp/lib/JournalImpl.cpp
store/trunk/cpp/lib/JournalImpl.h
store/trunk/cpp/lib/Makefile.am
store/trunk/cpp/lib/MessageStoreImpl.cpp
store/trunk/cpp/lib/MessageStoreImpl.h
Log:
Fixed usage of generated include files from qpid
Modified: store/trunk/cpp/lib/JournalImpl.cpp
===================================================================
--- store/trunk/cpp/lib/JournalImpl.cpp 2008-09-15 16:04:28 UTC (rev 2475)
+++ store/trunk/cpp/lib/JournalImpl.cpp 2008-09-15 19:24:38 UTC (rev 2476)
@@ -28,7 +28,7 @@
#include "jrnl/slock.hpp"
#include "qpid/log/Statement.h"
#include "qpid/agent/ManagementAgent.h"
-#include "com/redhat/rhm/store/ArgsJournalExpand.h"
+#include "qmf/com/redhat/rhm/store/ArgsJournalExpand.h"
#include "qpid/sys/Monitor.h"
#include "StoreException.h"
Modified: store/trunk/cpp/lib/JournalImpl.h
===================================================================
--- store/trunk/cpp/lib/JournalImpl.h 2008-09-15 16:04:28 UTC (rev 2475)
+++ store/trunk/cpp/lib/JournalImpl.h 2008-09-15 19:24:38 UTC (rev 2476)
@@ -34,7 +34,7 @@
#include <boost/ptr_container/ptr_list.hpp>
#include <boost/intrusive_ptr.hpp>
#include "qpid/management/Manageable.h"
-#include "com/redhat/rhm/store/Journal.h"
+#include "qmf/com/redhat/rhm/store/Journal.h"
namespace mrg {
namespace msgstore {
Modified: store/trunk/cpp/lib/Makefile.am
===================================================================
--- store/trunk/cpp/lib/Makefile.am 2008-09-15 16:04:28 UTC (rev 2475)
+++ store/trunk/cpp/lib/Makefile.am 2008-09-15 19:24:38 UTC (rev 2476)
@@ -97,13 +97,13 @@
jrnl/txn_rec.hpp \
jrnl/wmgr.hpp \
jrnl/wrfc.hpp \
- gen/com/redhat/rhm/store/Package.cpp \
- gen/com/redhat/rhm/store/Package.h \
- gen/com/redhat/rhm/store/Journal.cpp \
- gen/com/redhat/rhm/store/Journal.h \
- gen/com/redhat/rhm/store/Store.cpp \
- gen/com/redhat/rhm/store/Store.h \
- gen/com/redhat/rhm/store/ArgsJournalExpand.h
+ gen/qmf/com/redhat/rhm/store/Package.cpp \
+ gen/qmf/com/redhat/rhm/store/Package.h \
+ gen/qmf/com/redhat/rhm/store/Journal.cpp \
+ gen/qmf/com/redhat/rhm/store/Journal.h \
+ gen/qmf/com/redhat/rhm/store/Store.cpp \
+ gen/qmf/com/redhat/rhm/store/Store.h \
+ gen/qmf/com/redhat/rhm/store/ArgsJournalExpand.h
BUILT_SOURCES = db-inc.h
Modified: store/trunk/cpp/lib/MessageStoreImpl.cpp
===================================================================
--- store/trunk/cpp/lib/MessageStoreImpl.cpp 2008-09-15 16:04:28 UTC (rev 2475)
+++ store/trunk/cpp/lib/MessageStoreImpl.cpp 2008-09-15 19:24:38 UTC (rev 2476)
@@ -28,7 +28,7 @@
#include "IdPairDbt.h"
#include "jrnl/txn_map.hpp"
#include "qpid/log/Statement.h"
-#include "com/redhat/rhm/store/Package.h"
+#include "qmf/com/redhat/rhm/store/Package.h"
#define MAX_AIO_SLEEPS 1000 // ~1 second
#define AIO_SLEEP_TIME 1000 // 1 milisecond
Modified: store/trunk/cpp/lib/MessageStoreImpl.h
===================================================================
--- store/trunk/cpp/lib/MessageStoreImpl.h 2008-09-15 16:04:28 UTC (rev 2475)
+++ store/trunk/cpp/lib/MessageStoreImpl.h 2008-09-15 19:24:38 UTC (rev 2476)
@@ -36,7 +36,7 @@
#include "qpid/broker/Broker.h"
#include "qpid/broker/MessageStore.h"
#include "qpid/management/Manageable.h"
-#include "com/redhat/rhm/store/Store.h"
+#include "qmf/com/redhat/rhm/store/Store.h"
#include "TxnCtxt.h"
// Assume DB_VERSION_MAJOR == 4
Added: store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/ArgsJournalExpand.h
===================================================================
--- store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/ArgsJournalExpand.h (rev 0)
+++ store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/ArgsJournalExpand.h 2008-09-15 19:24:38 UTC (rev 2476)
@@ -0,0 +1,46 @@
+
+#ifndef _ARGS_JOURNALEXPAND_
+#define _ARGS_JOURNALEXPAND_
+
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+// This source file was created by a code generator.
+// Please do not edit.
+
+#include "qpid/management/Args.h"
+#include <string>
+
+namespace qmf {
+namespace com {
+namespace redhat {
+namespace rhm {
+namespace store {
+
+
+ class ArgsJournalExpand : public ::qpid::management::Args
+{
+ public:
+ uint32_t i_by;
+
+};
+
+}}}}}
+
+#endif /*!_ARGS_JOURNALEXPAND_*/
Added: store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Journal.cpp
===================================================================
--- store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Journal.cpp (rev 0)
+++ store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Journal.cpp 2008-09-15 19:24:38 UTC (rev 2476)
@@ -0,0 +1,576 @@
+
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+// This source file was created by a code generator.
+// Please do not edit.
+
+#include "qpid/log/Statement.h"
+#include "qpid/framing/FieldTable.h"
+#include "qpid/management/Manageable.h"
+#include "qpid/agent/ManagementAgent.h"
+#include "Journal.h"
+#include "ArgsJournalExpand.h"
+
+
+using namespace qmf::com::redhat::rhm::store;
+using namespace qpid::framing;
+using qpid::management::ManagementAgent;
+using qpid::management::Manageable;
+using qpid::management::ManagementObject;
+using qpid::management::Args;
+using std::string;
+
+string Journal::packageName = string ("com.redhat.rhm.store");
+string Journal::className = string ("journal");
+uint8_t Journal::md5Sum[16] =
+ {0x1e,0x63,0xa4,0x3d,0xa3,0x1b,0xc0,0x1,0x1,0x70,0x5a,0x2a,0xb4,0xa,0x1b,0x4e};
+
+Journal::Journal (ManagementAgent* _agent, Manageable* _core) :
+ ManagementObject(_agent, _core)
+{
+
+ recordDepth = 0;
+ recordDepthHigh = 0;
+ recordDepthLow = 0;
+ outstandingAIOs = 0;
+ outstandingAIOsHigh = 0;
+ outstandingAIOsLow = 0;
+ freeFileCount = 0;
+ freeFileCountHigh = 0;
+ freeFileCountLow = 0;
+ availableFileCount = 0;
+ availableFileCountHigh = 0;
+ availableFileCountLow = 0;
+ writePageCacheDepth = 0;
+ writePageCacheDepthHigh = 0;
+ writePageCacheDepthLow = 0;
+ readPageCacheDepth = 0;
+ readPageCacheDepthHigh = 0;
+ readPageCacheDepthLow = 0;
+
+
+
+ maxThreads = agent->getMaxThreads();
+ perThreadStatsArray = new struct PerThreadStats*[maxThreads];
+ for (int idx = 0; idx < maxThreads; idx++)
+ perThreadStatsArray[idx] = 0;
+
+}
+
+Journal::~Journal ()
+{
+
+ for (int idx = 0; idx < maxThreads; idx++)
+ if (perThreadStatsArray[idx] != 0)
+ delete perThreadStatsArray[idx];
+ delete[] perThreadStatsArray;
+
+}
+
+namespace {
+ const string NAME("name");
+ const string TYPE("type");
+ const string ACCESS("access");
+ const string INDEX("index");
+ const string OPTIONAL("optional");
+ const string UNIT("unit");
+ const string MIN("min");
+ const string MAX("max");
+ const string MAXLEN("maxlen");
+ const string DESC("desc");
+ const string ARGCOUNT("argCount");
+ const string ARGS("args");
+ const string DIR("dir");
+ const string DEFAULT("default");
+}
+
+void Journal::registerClass(ManagementAgent* agent)
+{
+ agent->RegisterClass(packageName, className, md5Sum, writeSchema);
+}
+
+void Journal::writeSchema (Buffer& buf)
+{
+ FieldTable ft;
+
+ // Schema class header:
+ buf.putShortString (packageName); // Package Name
+ buf.putShortString (className); // Class Name
+ buf.putBin128 (md5Sum); // Schema Hash
+ buf.putShort (11); // Config Element Count
+ buf.putShort (28); // Inst Element Count
+ buf.putShort (1); // Method Count
+ buf.putShort (0); // Event Count
+
+ // Properties
+ ft = FieldTable ();
+ ft.setString (NAME, "queueRef");
+ ft.setInt (TYPE, TYPE_REF);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "name");
+ ft.setInt (TYPE, TYPE_SSTR);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 1);
+ ft.setInt (OPTIONAL, 0);
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "directory");
+ ft.setInt (TYPE, TYPE_SSTR);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (DESC, "Directory containing journal files");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "baseFileName");
+ ft.setInt (TYPE, TYPE_SSTR);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (DESC, "Base filename prefix for journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "writePageSize");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "byte");
+ ft.setString (DESC, "Page size in write-page-cache");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "writePages");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "wpage");
+ ft.setString (DESC, "Number of pages in write-page-cache");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "readPageSize");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "byte");
+ ft.setString (DESC, "Page size in read-page-cache");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "readPages");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "rpage");
+ ft.setString (DESC, "Number of pages in read-page-cache");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "initialFileCount");
+ ft.setInt (TYPE, TYPE_U16);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "file");
+ ft.setString (DESC, "Number of files initially allocated to this journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "dataFileSize");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "byte");
+ ft.setString (DESC, "Size of each journal data file");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "currentFileCount");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "file");
+ ft.setString (DESC, "Number of files currently allocated to this journal");
+ buf.put (ft);
+
+
+ // Statistics
+ ft = FieldTable ();
+ ft.setString (NAME, "recordDepth");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Number of currently enqueued records (durable messages)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "recordDepthHigh");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Number of currently enqueued records (durable messages) (High)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "recordDepthLow");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Number of currently enqueued records (durable messages) (Low)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "enqueues");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Total enqueued records on journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "dequeues");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Total dequeued records on journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "txnEnqueues");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Total transactional enqueued records on journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "txnDequeues");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Total transactional dequeued records on journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "txnCommits");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Total transactional commit records on journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "txnAborts");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Total transactional abort records on journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "outstandingAIOs");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "aio_op");
+ ft.setString (DESC, "Number of currently outstanding AIO requests in Async IO system");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "outstandingAIOsHigh");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "aio_op");
+ ft.setString (DESC, "Number of currently outstanding AIO requests in Async IO system (High)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "outstandingAIOsLow");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "aio_op");
+ ft.setString (DESC, "Number of currently outstanding AIO requests in Async IO system (Low)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "freeFileCount");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "file");
+ ft.setString (DESC, "Number of files free on this journal. Includes free files trapped in holes.");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "freeFileCountHigh");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "file");
+ ft.setString (DESC, "Number of files free on this journal. Includes free files trapped in holes. (High)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "freeFileCountLow");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "file");
+ ft.setString (DESC, "Number of files free on this journal. Includes free files trapped in holes. (Low)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "availableFileCount");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "file");
+ ft.setString (DESC, "Number of files available to be written. Excluding holes");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "availableFileCountHigh");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "file");
+ ft.setString (DESC, "Number of files available to be written. Excluding holes (High)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "availableFileCountLow");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "file");
+ ft.setString (DESC, "Number of files available to be written. Excluding holes (Low)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "writeWaitFailures");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "AIO Wait failures on write");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "writeBusyFailures");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "AIO Busy failures on write");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "readRecordCount");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Records read from the journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "readBusyFailures");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "AIO Busy failures on read");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "writePageCacheDepth");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "wpage");
+ ft.setString (DESC, "Current depth of write-page-cache");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "writePageCacheDepthHigh");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "wpage");
+ ft.setString (DESC, "Current depth of write-page-cache (High)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "writePageCacheDepthLow");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "wpage");
+ ft.setString (DESC, "Current depth of write-page-cache (Low)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "readPageCacheDepth");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "rpage");
+ ft.setString (DESC, "Current depth of read-page-cache");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "readPageCacheDepthHigh");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "rpage");
+ ft.setString (DESC, "Current depth of read-page-cache (High)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "readPageCacheDepthLow");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "rpage");
+ ft.setString (DESC, "Current depth of read-page-cache (Low)");
+ buf.put (ft);
+
+
+ // Methods
+ ft = FieldTable ();
+ ft.setString (NAME, "expand");
+ ft.setInt (ARGCOUNT, 1);
+ ft.setString (DESC, "Increase number of files allocated for this journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "by");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (DIR, "I");
+ ft.setString (DESC, "Number of files to increase journal size by");
+ buf.put (ft);
+
+
+ // Events
+
+}
+
+
+void Journal::aggregatePerThreadStats(struct PerThreadStats* totals)
+{
+ totals->enqueues = 0;
+ totals->dequeues = 0;
+ totals->txnEnqueues = 0;
+ totals->txnDequeues = 0;
+ totals->txnCommits = 0;
+ totals->txnAborts = 0;
+ totals->writeWaitFailures = 0;
+ totals->writeBusyFailures = 0;
+ totals->readRecordCount = 0;
+ totals->readBusyFailures = 0;
+
+ for (int idx = 0; idx < maxThreads; idx++) {
+ struct PerThreadStats* threadStats = perThreadStatsArray[idx];
+ if (threadStats != 0) {
+ totals->enqueues += threadStats->enqueues;
+ totals->dequeues += threadStats->dequeues;
+ totals->txnEnqueues += threadStats->txnEnqueues;
+ totals->txnDequeues += threadStats->txnDequeues;
+ totals->txnCommits += threadStats->txnCommits;
+ totals->txnAborts += threadStats->txnAborts;
+ totals->writeWaitFailures += threadStats->writeWaitFailures;
+ totals->writeBusyFailures += threadStats->writeBusyFailures;
+ totals->readRecordCount += threadStats->readRecordCount;
+ totals->readBusyFailures += threadStats->readBusyFailures;
+
+ }
+ }
+}
+
+
+void Journal::writeProperties (Buffer& buf)
+{
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ configChanged = false;
+
+ writeTimestamps (buf);
+
+ queueRef.encode(buf);
+ buf.putShortString(name);
+ buf.putShortString(directory);
+ buf.putShortString(baseFileName);
+ buf.putLong(writePageSize);
+ buf.putLong(writePages);
+ buf.putLong(readPageSize);
+ buf.putLong(readPages);
+ buf.putShort(initialFileCount);
+ buf.putLong(dataFileSize);
+ buf.putLong(currentFileCount);
+
+}
+
+void Journal::writeStatistics (Buffer& buf, bool skipHeaders)
+{
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ instChanged = false;
+
+
+ struct PerThreadStats totals;
+ aggregatePerThreadStats(&totals);
+
+
+ if (!skipHeaders)
+ writeTimestamps (buf);
+ buf.putLong(recordDepth);
+ buf.putLong(recordDepthHigh);
+ buf.putLong(recordDepthLow);
+ buf.putLongLong(totals.enqueues);
+ buf.putLongLong(totals.dequeues);
+ buf.putLongLong(totals.txnEnqueues);
+ buf.putLongLong(totals.txnDequeues);
+ buf.putLongLong(totals.txnCommits);
+ buf.putLongLong(totals.txnAborts);
+ buf.putLong(outstandingAIOs);
+ buf.putLong(outstandingAIOsHigh);
+ buf.putLong(outstandingAIOsLow);
+ buf.putLong(freeFileCount);
+ buf.putLong(freeFileCountHigh);
+ buf.putLong(freeFileCountLow);
+ buf.putLong(availableFileCount);
+ buf.putLong(availableFileCountHigh);
+ buf.putLong(availableFileCountLow);
+ buf.putLongLong(totals.writeWaitFailures);
+ buf.putLongLong(totals.writeBusyFailures);
+ buf.putLongLong(totals.readRecordCount);
+ buf.putLongLong(totals.readBusyFailures);
+ buf.putLong(writePageCacheDepth);
+ buf.putLong(writePageCacheDepthHigh);
+ buf.putLong(writePageCacheDepthLow);
+ buf.putLong(readPageCacheDepth);
+ buf.putLong(readPageCacheDepthHigh);
+ buf.putLong(readPageCacheDepthLow);
+
+
+ // Maintenance of hi-lo statistics
+ recordDepthHigh = recordDepth;
+ recordDepthLow = recordDepth;
+ outstandingAIOsHigh = outstandingAIOs;
+ outstandingAIOsLow = outstandingAIOs;
+ freeFileCountHigh = freeFileCount;
+ freeFileCountLow = freeFileCount;
+ availableFileCountHigh = availableFileCount;
+ availableFileCountLow = availableFileCount;
+ writePageCacheDepthHigh = writePageCacheDepth;
+ writePageCacheDepthLow = writePageCacheDepth;
+ readPageCacheDepthHigh = readPageCacheDepth;
+ readPageCacheDepthLow = readPageCacheDepth;
+
+
+}
+
+void Journal::doMethod (string methodName, Buffer& inBuf, Buffer& outBuf)
+{
+ Manageable::status_t status = Manageable::STATUS_UNKNOWN_METHOD;
+ std::string text;
+
+ if (methodName == "expand") {
+ ArgsJournalExpand ioArgs;
+ ioArgs.i_by = inBuf.getLong();
+ status = coreObject->ManagementMethod (METHOD_EXPAND, ioArgs, text);
+ outBuf.putLong (status);
+ outBuf.putShortString (::qpid::management::Manageable::StatusText (status, text));
+ return;
+ }
+
+ outBuf.putLong(status);
+ outBuf.putShortString(Manageable::StatusText(status, text));
+}
+
+
Added: store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Journal.h
===================================================================
--- store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Journal.h (rev 0)
+++ store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Journal.h 2008-09-15 19:24:38 UTC (rev 2476)
@@ -0,0 +1,419 @@
+
+#ifndef _MANAGEMENT_JOURNAL_
+#define _MANAGEMENT_JOURNAL_
+
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+// This source file was created by a code generator.
+// Please do not edit.
+
+#include "qpid/management/ManagementObject.h"
+#include "qpid/framing/FieldTable.h"
+#include "qpid/framing/Uuid.h"
+
+namespace qmf {
+namespace com {
+namespace redhat {
+namespace rhm {
+namespace store {
+
+
+class Journal : public ::qpid::management::ManagementObject
+{
+ private:
+
+ static std::string packageName;
+ static std::string className;
+ static uint8_t md5Sum[16];
+
+
+ // Properties
+ ::qpid::management::ObjectId queueRef;
+ std::string name;
+ std::string directory;
+ std::string baseFileName;
+ uint32_t writePageSize;
+ uint32_t writePages;
+ uint32_t readPageSize;
+ uint32_t readPages;
+ uint16_t initialFileCount;
+ uint32_t dataFileSize;
+ uint32_t currentFileCount;
+
+ // Statistics
+ uint32_t recordDepth;
+ uint32_t recordDepthHigh;
+ uint32_t recordDepthLow;
+ uint32_t outstandingAIOs;
+ uint32_t outstandingAIOsHigh;
+ uint32_t outstandingAIOsLow;
+ uint32_t freeFileCount;
+ uint32_t freeFileCountHigh;
+ uint32_t freeFileCountLow;
+ uint32_t availableFileCount;
+ uint32_t availableFileCountHigh;
+ uint32_t availableFileCountLow;
+ uint32_t writePageCacheDepth;
+ uint32_t writePageCacheDepthHigh;
+ uint32_t writePageCacheDepthLow;
+ uint32_t readPageCacheDepth;
+ uint32_t readPageCacheDepthHigh;
+ uint32_t readPageCacheDepthLow;
+
+
+ // Per-Thread Statistics
+ struct PerThreadStats {
+ uint64_t enqueues;
+ uint64_t dequeues;
+ uint64_t txnEnqueues;
+ uint64_t txnDequeues;
+ uint64_t txnCommits;
+ uint64_t txnAborts;
+ uint64_t writeWaitFailures;
+ uint64_t writeBusyFailures;
+ uint64_t readRecordCount;
+ uint64_t readBusyFailures;
+
+ };
+
+ struct PerThreadStats** perThreadStatsArray;
+
+ inline struct PerThreadStats* getThreadStats() {
+ int index = getThreadIndex();
+ struct PerThreadStats* threadStats = perThreadStatsArray[index];
+ if (threadStats == 0) {
+ threadStats = new(PerThreadStats);
+ perThreadStatsArray[index] = threadStats;
+ threadStats->enqueues = 0;
+ threadStats->dequeues = 0;
+ threadStats->txnEnqueues = 0;
+ threadStats->txnDequeues = 0;
+ threadStats->txnCommits = 0;
+ threadStats->txnAborts = 0;
+ threadStats->writeWaitFailures = 0;
+ threadStats->writeBusyFailures = 0;
+ threadStats->readRecordCount = 0;
+ threadStats->readBusyFailures = 0;
+
+ }
+ return threadStats;
+ }
+
+ void aggregatePerThreadStats(struct PerThreadStats*);
+
+ // Private Methods
+ static void writeSchema (::qpid::framing::Buffer& buf);
+ void writeProperties (::qpid::framing::Buffer& buf);
+ void writeStatistics (::qpid::framing::Buffer& buf,
+ bool skipHeaders = false);
+ void doMethod (std::string methodName,
+ ::qpid::framing::Buffer& inBuf,
+ ::qpid::framing::Buffer& outBuf);
+ writeSchemaCall_t getWriteSchemaCall(void) { return writeSchema; }
+
+ public:
+
+ Journal (::qpid::management::ManagementAgent* agent,
+ ::qpid::management::Manageable* coreObject);
+ ~Journal (void);
+
+ void setReference(::qpid::management::ObjectId objectId) { queueRef = objectId; }
+
+
+ static void registerClass (::qpid::management::ManagementAgent* agent);
+ std::string& getPackageName (void) { return packageName; }
+ std::string& getClassName (void) { return className; }
+ uint8_t* getMd5Sum (void) { return md5Sum; }
+
+ // Method IDs
+ static const uint32_t METHOD_EXPAND = 1;
+
+ // Accessor Methods
+ inline void set_queueRef (const ::qpid::management::ObjectId& val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ queueRef = val;
+ configChanged = true;
+ }
+ inline const ::qpid::management::ObjectId& get_queueRef() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return queueRef;
+ }
+ inline void set_name (const std::string& val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ name = val;
+ configChanged = true;
+ }
+ inline const std::string& get_name() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return name;
+ }
+ inline void set_directory (const std::string& val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ directory = val;
+ configChanged = true;
+ }
+ inline const std::string& get_directory() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return directory;
+ }
+ inline void set_baseFileName (const std::string& val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ baseFileName = val;
+ configChanged = true;
+ }
+ inline const std::string& get_baseFileName() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return baseFileName;
+ }
+ inline void set_writePageSize (uint32_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ writePageSize = val;
+ configChanged = true;
+ }
+ inline uint32_t get_writePageSize() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return writePageSize;
+ }
+ inline void set_writePages (uint32_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ writePages = val;
+ configChanged = true;
+ }
+ inline uint32_t get_writePages() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return writePages;
+ }
+ inline void set_readPageSize (uint32_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ readPageSize = val;
+ configChanged = true;
+ }
+ inline uint32_t get_readPageSize() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return readPageSize;
+ }
+ inline void set_readPages (uint32_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ readPages = val;
+ configChanged = true;
+ }
+ inline uint32_t get_readPages() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return readPages;
+ }
+ inline void set_initialFileCount (uint16_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ initialFileCount = val;
+ configChanged = true;
+ }
+ inline uint16_t get_initialFileCount() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return initialFileCount;
+ }
+ inline void set_dataFileSize (uint32_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ dataFileSize = val;
+ configChanged = true;
+ }
+ inline uint32_t get_dataFileSize() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return dataFileSize;
+ }
+ inline void set_currentFileCount (uint32_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ currentFileCount = val;
+ configChanged = true;
+ }
+ inline uint32_t get_currentFileCount() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return currentFileCount;
+ }
+ inline void inc_recordDepth (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ recordDepth += by;
+ if (recordDepthHigh < recordDepth)
+ recordDepthHigh = recordDepth;
+ instChanged = true;
+ }
+ inline void dec_recordDepth (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ recordDepth -= by;
+ if (recordDepthLow > recordDepth)
+ recordDepthLow = recordDepth;
+ instChanged = true;
+ }
+ inline void inc_enqueues (uint64_t by = 1) {
+ getThreadStats()->enqueues += by;
+ instChanged = true;
+ }
+ inline void dec_enqueues (uint64_t by = 1) {
+ getThreadStats()->enqueues -= by;
+ instChanged = true;
+ }
+ inline void inc_dequeues (uint64_t by = 1) {
+ getThreadStats()->dequeues += by;
+ instChanged = true;
+ }
+ inline void dec_dequeues (uint64_t by = 1) {
+ getThreadStats()->dequeues -= by;
+ instChanged = true;
+ }
+ inline void inc_txnEnqueues (uint64_t by = 1) {
+ getThreadStats()->txnEnqueues += by;
+ instChanged = true;
+ }
+ inline void dec_txnEnqueues (uint64_t by = 1) {
+ getThreadStats()->txnEnqueues -= by;
+ instChanged = true;
+ }
+ inline void inc_txnDequeues (uint64_t by = 1) {
+ getThreadStats()->txnDequeues += by;
+ instChanged = true;
+ }
+ inline void dec_txnDequeues (uint64_t by = 1) {
+ getThreadStats()->txnDequeues -= by;
+ instChanged = true;
+ }
+ inline void inc_txnCommits (uint64_t by = 1) {
+ getThreadStats()->txnCommits += by;
+ instChanged = true;
+ }
+ inline void dec_txnCommits (uint64_t by = 1) {
+ getThreadStats()->txnCommits -= by;
+ instChanged = true;
+ }
+ inline void inc_txnAborts (uint64_t by = 1) {
+ getThreadStats()->txnAborts += by;
+ instChanged = true;
+ }
+ inline void dec_txnAborts (uint64_t by = 1) {
+ getThreadStats()->txnAborts -= by;
+ instChanged = true;
+ }
+ inline void inc_outstandingAIOs (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ outstandingAIOs += by;
+ if (outstandingAIOsHigh < outstandingAIOs)
+ outstandingAIOsHigh = outstandingAIOs;
+ instChanged = true;
+ }
+ inline void dec_outstandingAIOs (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ outstandingAIOs -= by;
+ if (outstandingAIOsLow > outstandingAIOs)
+ outstandingAIOsLow = outstandingAIOs;
+ instChanged = true;
+ }
+ inline void inc_freeFileCount (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ freeFileCount += by;
+ if (freeFileCountHigh < freeFileCount)
+ freeFileCountHigh = freeFileCount;
+ instChanged = true;
+ }
+ inline void dec_freeFileCount (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ freeFileCount -= by;
+ if (freeFileCountLow > freeFileCount)
+ freeFileCountLow = freeFileCount;
+ instChanged = true;
+ }
+ inline void inc_availableFileCount (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ availableFileCount += by;
+ if (availableFileCountHigh < availableFileCount)
+ availableFileCountHigh = availableFileCount;
+ instChanged = true;
+ }
+ inline void dec_availableFileCount (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ availableFileCount -= by;
+ if (availableFileCountLow > availableFileCount)
+ availableFileCountLow = availableFileCount;
+ instChanged = true;
+ }
+ inline void inc_writeWaitFailures (uint64_t by = 1) {
+ getThreadStats()->writeWaitFailures += by;
+ instChanged = true;
+ }
+ inline void dec_writeWaitFailures (uint64_t by = 1) {
+ getThreadStats()->writeWaitFailures -= by;
+ instChanged = true;
+ }
+ inline void inc_writeBusyFailures (uint64_t by = 1) {
+ getThreadStats()->writeBusyFailures += by;
+ instChanged = true;
+ }
+ inline void dec_writeBusyFailures (uint64_t by = 1) {
+ getThreadStats()->writeBusyFailures -= by;
+ instChanged = true;
+ }
+ inline void inc_readRecordCount (uint64_t by = 1) {
+ getThreadStats()->readRecordCount += by;
+ instChanged = true;
+ }
+ inline void dec_readRecordCount (uint64_t by = 1) {
+ getThreadStats()->readRecordCount -= by;
+ instChanged = true;
+ }
+ inline void inc_readBusyFailures (uint64_t by = 1) {
+ getThreadStats()->readBusyFailures += by;
+ instChanged = true;
+ }
+ inline void dec_readBusyFailures (uint64_t by = 1) {
+ getThreadStats()->readBusyFailures -= by;
+ instChanged = true;
+ }
+ inline void inc_writePageCacheDepth (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ writePageCacheDepth += by;
+ if (writePageCacheDepthHigh < writePageCacheDepth)
+ writePageCacheDepthHigh = writePageCacheDepth;
+ instChanged = true;
+ }
+ inline void dec_writePageCacheDepth (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ writePageCacheDepth -= by;
+ if (writePageCacheDepthLow > writePageCacheDepth)
+ writePageCacheDepthLow = writePageCacheDepth;
+ instChanged = true;
+ }
+ inline void inc_readPageCacheDepth (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ readPageCacheDepth += by;
+ if (readPageCacheDepthHigh < readPageCacheDepth)
+ readPageCacheDepthHigh = readPageCacheDepth;
+ instChanged = true;
+ }
+ inline void dec_readPageCacheDepth (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ readPageCacheDepth -= by;
+ if (readPageCacheDepthLow > readPageCacheDepth)
+ readPageCacheDepthLow = readPageCacheDepth;
+ instChanged = true;
+ }
+
+ // Event Methods
+
+};
+
+}}}}}
+
+#endif /*!_MANAGEMENT_JOURNAL_*/
Added: store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Package.cpp
===================================================================
--- store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Package.cpp (rev 0)
+++ store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Package.cpp 2008-09-15 19:24:38 UTC (rev 2476)
@@ -0,0 +1,37 @@
+
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+// This source file was created by a code generator.
+// Please do not edit.
+
+#include "Package.h"
+#include "Store.h"
+#include "Journal.h"
+
+
+using namespace qmf::com::redhat::rhm::store;
+
+Package::Package (::qpid::management::ManagementAgent* agent)
+{
+ Store::registerClass(agent);
+ Journal::registerClass(agent);
+
+}
+
Added: store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Package.h
===================================================================
--- store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Package.h (rev 0)
+++ store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Package.h 2008-09-15 19:24:38 UTC (rev 2476)
@@ -0,0 +1,46 @@
+
+#ifndef _MANAGEMENT_PACKAGE_COM_REDHAT_RHM_STORE_
+#define _MANAGEMENT_PACKAGE_COM_REDHAT_RHM_STORE_
+
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+// This source file was created by a code generator.
+// Please do not edit.
+
+#include "qpid/agent/ManagementAgent.h"
+
+namespace qmf {
+namespace com {
+namespace redhat {
+namespace rhm {
+namespace store {
+
+
+class Package
+{
+ public:
+ Package (::qpid::management::ManagementAgent* agent);
+ ~Package () {}
+};
+
+}}}}}
+
+
+#endif /*!_MANAGEMENT_PACKAGE_COM_REDHAT_RHM_STORE_*/
Added: store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Store.cpp
===================================================================
--- store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Store.cpp (rev 0)
+++ store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Store.cpp 2008-09-15 19:24:38 UTC (rev 2476)
@@ -0,0 +1,368 @@
+
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+// This source file was created by a code generator.
+// Please do not edit.
+
+#include "qpid/log/Statement.h"
+#include "qpid/framing/FieldTable.h"
+#include "qpid/management/Manageable.h"
+#include "qpid/agent/ManagementAgent.h"
+#include "Store.h"
+
+
+using namespace qmf::com::redhat::rhm::store;
+using namespace qpid::framing;
+using qpid::management::ManagementAgent;
+using qpid::management::Manageable;
+using qpid::management::ManagementObject;
+using qpid::management::Args;
+using std::string;
+
+string Store::packageName = string ("com.redhat.rhm.store");
+string Store::className = string ("store");
+uint8_t Store::md5Sum[16] =
+ {0x18,0xd,0xd4,0x15,0xd3,0x9a,0xf,0xbe,0x3a,0x40,0xe1,0x1b,0x9e,0x5b,0x7e,0x86};
+
+Store::Store (ManagementAgent* _agent, Manageable* _core, ::qpid::management::Manageable* _parent) :
+ ManagementObject(_agent, _core)
+{
+ brokerRef = _parent->GetManagementObject ()->getObjectId ();
+ tplTransactionDepth = 0;
+ tplTransactionDepthHigh = 0;
+ tplTransactionDepthLow = 0;
+ tplOutstandingAIOs = 0;
+ tplOutstandingAIOsHigh = 0;
+ tplOutstandingAIOsLow = 0;
+
+
+
+ maxThreads = agent->getMaxThreads();
+ perThreadStatsArray = new struct PerThreadStats*[maxThreads];
+ for (int idx = 0; idx < maxThreads; idx++)
+ perThreadStatsArray[idx] = 0;
+
+}
+
+Store::~Store ()
+{
+
+ for (int idx = 0; idx < maxThreads; idx++)
+ if (perThreadStatsArray[idx] != 0)
+ delete perThreadStatsArray[idx];
+ delete[] perThreadStatsArray;
+
+}
+
+namespace {
+ const string NAME("name");
+ const string TYPE("type");
+ const string ACCESS("access");
+ const string INDEX("index");
+ const string OPTIONAL("optional");
+ const string UNIT("unit");
+ const string MIN("min");
+ const string MAX("max");
+ const string MAXLEN("maxlen");
+ const string DESC("desc");
+ const string ARGCOUNT("argCount");
+ const string ARGS("args");
+ const string DIR("dir");
+ const string DEFAULT("default");
+}
+
+void Store::registerClass(ManagementAgent* agent)
+{
+ agent->RegisterClass(packageName, className, md5Sum, writeSchema);
+}
+
+void Store::writeSchema (Buffer& buf)
+{
+ FieldTable ft;
+
+ // Schema class header:
+ buf.putShortString (packageName); // Package Name
+ buf.putShortString (className); // Class Name
+ buf.putBin128 (md5Sum); // Schema Hash
+ buf.putShort (11); // Config Element Count
+ buf.putShort (9); // Inst Element Count
+ buf.putShort (0); // Method Count
+ buf.putShort (0); // Event Count
+
+ // Properties
+ ft = FieldTable ();
+ ft.setString (NAME, "brokerRef");
+ ft.setInt (TYPE, TYPE_REF);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 1);
+ ft.setInt (OPTIONAL, 0);
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "location");
+ ft.setInt (TYPE, TYPE_SSTR);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (DESC, "Logical directory on disk");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "defaultInitialFileCount");
+ ft.setInt (TYPE, TYPE_U16);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "file");
+ ft.setString (DESC, "Default number of files initially allocated to each journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "defaultDataFileSize");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "RdPg");
+ ft.setString (DESC, "Default size of each journal data file");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplIsInitialized");
+ ft.setInt (TYPE, TYPE_BOOL);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (DESC, "Transaction prepared list has been initialized by a transactional prepare");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplDirectory");
+ ft.setInt (TYPE, TYPE_SSTR);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (DESC, "Transaction prepared list directory");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplWritePageSize");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "byte");
+ ft.setString (DESC, "Page size in transaction prepared list write-page-cache");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplWritePages");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "wpage");
+ ft.setString (DESC, "Number of pages in transaction prepared list write-page-cache");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplInitialFileCount");
+ ft.setInt (TYPE, TYPE_U16);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "file");
+ ft.setString (DESC, "Number of files initially allocated to transaction prepared list journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplDataFileSize");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "byte");
+ ft.setString (DESC, "Size of each journal data file in transaction prepared list journal");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplCurrentFileCount");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setInt (ACCESS, ACCESS_RO);
+ ft.setInt (INDEX, 0);
+ ft.setInt (OPTIONAL, 0);
+ ft.setString (UNIT, "file");
+ ft.setString (DESC, "Number of files currently allocated to transaction prepared list journal");
+ buf.put (ft);
+
+
+ // Statistics
+ ft = FieldTable ();
+ ft.setString (NAME, "tplTransactionDepth");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "txn");
+ ft.setString (DESC, "Number of currently enqueued prepared transactions");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplTransactionDepthHigh");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "txn");
+ ft.setString (DESC, "Number of currently enqueued prepared transactions (High)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplTransactionDepthLow");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "txn");
+ ft.setString (DESC, "Number of currently enqueued prepared transactions (Low)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplTxnPrepares");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Total transaction prepares on transaction prepared list");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplTxnCommits");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Total transaction commits on transaction prepared list");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplTxnAborts");
+ ft.setInt (TYPE, TYPE_U64);
+ ft.setString (UNIT, "record");
+ ft.setString (DESC, "Total transaction aborts on transaction prepared list");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplOutstandingAIOs");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "aio_op");
+ ft.setString (DESC, "Number of currently outstanding AIO requests in Async IO system");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplOutstandingAIOsHigh");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "aio_op");
+ ft.setString (DESC, "Number of currently outstanding AIO requests in Async IO system (High)");
+ buf.put (ft);
+
+ ft = FieldTable ();
+ ft.setString (NAME, "tplOutstandingAIOsLow");
+ ft.setInt (TYPE, TYPE_U32);
+ ft.setString (UNIT, "aio_op");
+ ft.setString (DESC, "Number of currently outstanding AIO requests in Async IO system (Low)");
+ buf.put (ft);
+
+
+ // Methods
+
+ // Events
+
+}
+
+
+void Store::aggregatePerThreadStats(struct PerThreadStats* totals)
+{
+ totals->tplTxnPrepares = 0;
+ totals->tplTxnCommits = 0;
+ totals->tplTxnAborts = 0;
+
+ for (int idx = 0; idx < maxThreads; idx++) {
+ struct PerThreadStats* threadStats = perThreadStatsArray[idx];
+ if (threadStats != 0) {
+ totals->tplTxnPrepares += threadStats->tplTxnPrepares;
+ totals->tplTxnCommits += threadStats->tplTxnCommits;
+ totals->tplTxnAborts += threadStats->tplTxnAborts;
+
+ }
+ }
+}
+
+
+void Store::writeProperties (Buffer& buf)
+{
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ configChanged = false;
+
+ writeTimestamps (buf);
+
+ brokerRef.encode(buf);
+ buf.putShortString(location);
+ buf.putShort(defaultInitialFileCount);
+ buf.putLong(defaultDataFileSize);
+ buf.putOctet(tplIsInitialized?1:0);
+ buf.putShortString(tplDirectory);
+ buf.putLong(tplWritePageSize);
+ buf.putLong(tplWritePages);
+ buf.putShort(tplInitialFileCount);
+ buf.putLong(tplDataFileSize);
+ buf.putLong(tplCurrentFileCount);
+
+}
+
+void Store::writeStatistics (Buffer& buf, bool skipHeaders)
+{
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ instChanged = false;
+
+
+ struct PerThreadStats totals;
+ aggregatePerThreadStats(&totals);
+
+
+ if (!skipHeaders)
+ writeTimestamps (buf);
+ buf.putLong(tplTransactionDepth);
+ buf.putLong(tplTransactionDepthHigh);
+ buf.putLong(tplTransactionDepthLow);
+ buf.putLongLong(totals.tplTxnPrepares);
+ buf.putLongLong(totals.tplTxnCommits);
+ buf.putLongLong(totals.tplTxnAborts);
+ buf.putLong(tplOutstandingAIOs);
+ buf.putLong(tplOutstandingAIOsHigh);
+ buf.putLong(tplOutstandingAIOsLow);
+
+
+ // Maintenance of hi-lo statistics
+ tplTransactionDepthHigh = tplTransactionDepth;
+ tplTransactionDepthLow = tplTransactionDepth;
+ tplOutstandingAIOsHigh = tplOutstandingAIOs;
+ tplOutstandingAIOsLow = tplOutstandingAIOs;
+
+
+}
+
+void Store::doMethod (string, Buffer&, Buffer& outBuf)
+{
+ Manageable::status_t status = Manageable::STATUS_UNKNOWN_METHOD;
+ std::string text;
+
+ outBuf.putLong(status);
+ outBuf.putShortString(Manageable::StatusText(status, text));
+}
+
+
Added: store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Store.h
===================================================================
--- store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Store.h (rev 0)
+++ store/trunk/cpp/lib/gen/qmf/com/redhat/rhm/store/Store.h 2008-09-15 19:24:38 UTC (rev 2476)
@@ -0,0 +1,279 @@
+
+#ifndef _MANAGEMENT_STORE_
+#define _MANAGEMENT_STORE_
+
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+// This source file was created by a code generator.
+// Please do not edit.
+
+#include "qpid/management/ManagementObject.h"
+#include "qpid/framing/FieldTable.h"
+#include "qpid/framing/Uuid.h"
+
+namespace qmf {
+namespace com {
+namespace redhat {
+namespace rhm {
+namespace store {
+
+
+class Store : public ::qpid::management::ManagementObject
+{
+ private:
+
+ static std::string packageName;
+ static std::string className;
+ static uint8_t md5Sum[16];
+
+
+ // Properties
+ ::qpid::management::ObjectId brokerRef;
+ std::string location;
+ uint16_t defaultInitialFileCount;
+ uint32_t defaultDataFileSize;
+ uint8_t tplIsInitialized;
+ std::string tplDirectory;
+ uint32_t tplWritePageSize;
+ uint32_t tplWritePages;
+ uint16_t tplInitialFileCount;
+ uint32_t tplDataFileSize;
+ uint32_t tplCurrentFileCount;
+
+ // Statistics
+ uint32_t tplTransactionDepth;
+ uint32_t tplTransactionDepthHigh;
+ uint32_t tplTransactionDepthLow;
+ uint32_t tplOutstandingAIOs;
+ uint32_t tplOutstandingAIOsHigh;
+ uint32_t tplOutstandingAIOsLow;
+
+
+ // Per-Thread Statistics
+ struct PerThreadStats {
+ uint64_t tplTxnPrepares;
+ uint64_t tplTxnCommits;
+ uint64_t tplTxnAborts;
+
+ };
+
+ struct PerThreadStats** perThreadStatsArray;
+
+ inline struct PerThreadStats* getThreadStats() {
+ int index = getThreadIndex();
+ struct PerThreadStats* threadStats = perThreadStatsArray[index];
+ if (threadStats == 0) {
+ threadStats = new(PerThreadStats);
+ perThreadStatsArray[index] = threadStats;
+ threadStats->tplTxnPrepares = 0;
+ threadStats->tplTxnCommits = 0;
+ threadStats->tplTxnAborts = 0;
+
+ }
+ return threadStats;
+ }
+
+ void aggregatePerThreadStats(struct PerThreadStats*);
+
+ // Private Methods
+ static void writeSchema (::qpid::framing::Buffer& buf);
+ void writeProperties (::qpid::framing::Buffer& buf);
+ void writeStatistics (::qpid::framing::Buffer& buf,
+ bool skipHeaders = false);
+ void doMethod (std::string methodName,
+ ::qpid::framing::Buffer& inBuf,
+ ::qpid::framing::Buffer& outBuf);
+ writeSchemaCall_t getWriteSchemaCall(void) { return writeSchema; }
+
+ public:
+
+ Store (::qpid::management::ManagementAgent* agent,
+ ::qpid::management::Manageable* coreObject, ::qpid::management::Manageable* _parent);
+ ~Store (void);
+
+
+
+ static void registerClass (::qpid::management::ManagementAgent* agent);
+ std::string& getPackageName (void) { return packageName; }
+ std::string& getClassName (void) { return className; }
+ uint8_t* getMd5Sum (void) { return md5Sum; }
+
+ // Method IDs
+
+ // Accessor Methods
+ inline void set_brokerRef (const ::qpid::management::ObjectId& val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ brokerRef = val;
+ configChanged = true;
+ }
+ inline const ::qpid::management::ObjectId& get_brokerRef() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return brokerRef;
+ }
+ inline void set_location (const std::string& val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ location = val;
+ configChanged = true;
+ }
+ inline const std::string& get_location() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return location;
+ }
+ inline void set_defaultInitialFileCount (uint16_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ defaultInitialFileCount = val;
+ configChanged = true;
+ }
+ inline uint16_t get_defaultInitialFileCount() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return defaultInitialFileCount;
+ }
+ inline void set_defaultDataFileSize (uint32_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ defaultDataFileSize = val;
+ configChanged = true;
+ }
+ inline uint32_t get_defaultDataFileSize() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return defaultDataFileSize;
+ }
+ inline void set_tplIsInitialized (uint8_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ tplIsInitialized = val;
+ configChanged = true;
+ }
+ inline uint8_t get_tplIsInitialized() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return tplIsInitialized;
+ }
+ inline void set_tplDirectory (const std::string& val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ tplDirectory = val;
+ configChanged = true;
+ }
+ inline const std::string& get_tplDirectory() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return tplDirectory;
+ }
+ inline void set_tplWritePageSize (uint32_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ tplWritePageSize = val;
+ configChanged = true;
+ }
+ inline uint32_t get_tplWritePageSize() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return tplWritePageSize;
+ }
+ inline void set_tplWritePages (uint32_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ tplWritePages = val;
+ configChanged = true;
+ }
+ inline uint32_t get_tplWritePages() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return tplWritePages;
+ }
+ inline void set_tplInitialFileCount (uint16_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ tplInitialFileCount = val;
+ configChanged = true;
+ }
+ inline uint16_t get_tplInitialFileCount() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return tplInitialFileCount;
+ }
+ inline void set_tplDataFileSize (uint32_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ tplDataFileSize = val;
+ configChanged = true;
+ }
+ inline uint32_t get_tplDataFileSize() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return tplDataFileSize;
+ }
+ inline void set_tplCurrentFileCount (uint32_t val) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ tplCurrentFileCount = val;
+ configChanged = true;
+ }
+ inline uint32_t get_tplCurrentFileCount() {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ return tplCurrentFileCount;
+ }
+ inline void inc_tplTransactionDepth (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ tplTransactionDepth += by;
+ if (tplTransactionDepthHigh < tplTransactionDepth)
+ tplTransactionDepthHigh = tplTransactionDepth;
+ instChanged = true;
+ }
+ inline void dec_tplTransactionDepth (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ tplTransactionDepth -= by;
+ if (tplTransactionDepthLow > tplTransactionDepth)
+ tplTransactionDepthLow = tplTransactionDepth;
+ instChanged = true;
+ }
+ inline void inc_tplTxnPrepares (uint64_t by = 1) {
+ getThreadStats()->tplTxnPrepares += by;
+ instChanged = true;
+ }
+ inline void dec_tplTxnPrepares (uint64_t by = 1) {
+ getThreadStats()->tplTxnPrepares -= by;
+ instChanged = true;
+ }
+ inline void inc_tplTxnCommits (uint64_t by = 1) {
+ getThreadStats()->tplTxnCommits += by;
+ instChanged = true;
+ }
+ inline void dec_tplTxnCommits (uint64_t by = 1) {
+ getThreadStats()->tplTxnCommits -= by;
+ instChanged = true;
+ }
+ inline void inc_tplTxnAborts (uint64_t by = 1) {
+ getThreadStats()->tplTxnAborts += by;
+ instChanged = true;
+ }
+ inline void dec_tplTxnAborts (uint64_t by = 1) {
+ getThreadStats()->tplTxnAborts -= by;
+ instChanged = true;
+ }
+ inline void inc_tplOutstandingAIOs (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ tplOutstandingAIOs += by;
+ if (tplOutstandingAIOsHigh < tplOutstandingAIOs)
+ tplOutstandingAIOsHigh = tplOutstandingAIOs;
+ instChanged = true;
+ }
+ inline void dec_tplOutstandingAIOs (uint32_t by = 1) {
+ ::qpid::sys::Mutex::ScopedLock mutex(accessLock);
+ tplOutstandingAIOs -= by;
+ if (tplOutstandingAIOsLow > tplOutstandingAIOs)
+ tplOutstandingAIOsLow = tplOutstandingAIOs;
+ instChanged = true;
+ }
+
+ // Event Methods
+
+};
+
+}}}}}
+
+#endif /*!_MANAGEMENT_STORE_*/
17 years, 7 months