rhmessaging commits: r1008 - mgmt/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-10-11 12:42:35 -0400 (Thu, 11 Oct 2007)
New Revision: 1008
Modified:
mgmt/cumin/python/cumin/cluster.strings
mgmt/cumin/python/cumin/exchange.strings
mgmt/cumin/python/cumin/queue.strings
mgmt/cumin/python/cumin/server.strings
Log:
Changes the language used to describe actions on the current object to
be more explicit.
Modified: mgmt/cumin/python/cumin/cluster.strings
===================================================================
--- mgmt/cumin/python/cumin/cluster.strings 2007-10-11 13:47:29 UTC (rev 1007)
+++ mgmt/cumin/python/cumin/cluster.strings 2007-10-11 16:42:35 UTC (rev 1008)
@@ -31,7 +31,7 @@
</dl>
<ul class="actions">
- <li><a href="">Shutdown Cluster</a></li>
+ <li><a href="">Shutdown This Cluster</a></li>
</ul>
{tabs}
Modified: mgmt/cumin/python/cumin/exchange.strings
===================================================================
--- mgmt/cumin/python/cumin/exchange.strings 2007-10-11 13:47:29 UTC (rev 1007)
+++ mgmt/cumin/python/cumin/exchange.strings 2007-10-11 16:42:35 UTC (rev 1008)
@@ -80,8 +80,8 @@
</dl>
<ul class="actions">
- <li><a href="{edit_exchange_href}">Edit Exchange</a></li>
- <li><a href="{remove_exchange_href}">Remove Exchange</a></li>
+ <li><a href="{edit_exchange_href}">Edit This Exchange</a></li>
+ <li><a href="{remove_exchange_href}">Remove This Exchange</a></li>
</ul>
{tabs}
Modified: mgmt/cumin/python/cumin/queue.strings
===================================================================
--- mgmt/cumin/python/cumin/queue.strings 2007-10-11 13:47:29 UTC (rev 1007)
+++ mgmt/cumin/python/cumin/queue.strings 2007-10-11 16:42:35 UTC (rev 1008)
@@ -115,8 +115,8 @@
</dl>
<ul class="actions">
- <li><a href="{edit_queue_href}">Edit Queue</a></li>
- <li><a href="{remove_queue_href}">Remove Queue</a></li>
+ <li><a href="{edit_queue_href}">Edit This Queue</a></li>
+ <li><a href="{remove_queue_href}">Remove This Queue</a></li>
</ul>
{tabs}
Modified: mgmt/cumin/python/cumin/server.strings
===================================================================
--- mgmt/cumin/python/cumin/server.strings 2007-10-11 13:47:29 UTC (rev 1007)
+++ mgmt/cumin/python/cumin/server.strings 2007-10-11 16:42:35 UTC (rev 1008)
@@ -32,7 +32,7 @@
</dl>
<ul class="actions">
- <li><a href="">Shutdown</a></li>
+ <li><a href="">Shutdown This Server</a></li>
</ul>
{tabs}
@@ -66,7 +66,7 @@
<ul class="actions">
<li><a href="">Add Server</a></li>
- <li><a href="">Shutdown Servers</a></li>
+ <li><a href="">Shutdown Servers in This Group</a></li>
</ul>
{tabs}
18 years, 6 months
rhmessaging commits: r1007 - mgmt/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-10-11 09:47:29 -0400 (Thu, 11 Oct 2007)
New Revision: 1007
Modified:
mgmt/cumin/python/cumin/cluster.strings
mgmt/cumin/python/cumin/page.py
mgmt/cumin/python/cumin/page.strings
Log:
Sketches in some columns in the cluster table. Refactors the cluster
tab a little.
Modified: mgmt/cumin/python/cumin/cluster.strings
===================================================================
--- mgmt/cumin/python/cumin/cluster.strings 2007-10-11 13:37:38 UTC (rev 1006)
+++ mgmt/cumin/python/cumin/cluster.strings 2007-10-11 13:47:29 UTC (rev 1007)
@@ -2,17 +2,20 @@
<table class="ClusterSet mobjects">
<tr>
<th>Cluster</th>
+ <th>Configuration</th>
+ <th>Status</th>
</tr>
+
{items}
</table>
[ClusterSet.item_html]
<tr>
<td>{item_link}</td>
+ <td>3 servers</td>
+ <td>0 errors, 0 warnings</td>
</tr>
-
-
[ClusterView.html]
<div class="oblock">
<div class="mstatus green" id="{id}">
Modified: mgmt/cumin/python/cumin/page.py
===================================================================
--- mgmt/cumin/python/cumin/page.py 2007-10-11 13:37:38 UTC (rev 1006)
+++ mgmt/cumin/python/cumin/page.py 2007-10-11 13:47:29 UTC (rev 1007)
@@ -178,9 +178,16 @@
def render_title(self, session, model):
return "Servers (%i)" % len(model.get_servers())
- class ClusterTab(ClusterSet):
- pass
+ class ClusterTab(Widget):
+ def __init__(self, app, name):
+ super(MainView.ClusterTab, self).__init__(app, name)
+ self.clusters = ClusterSet(app, "clusters")
+ self.add_child(self.clusters)
+
+ def render_title(self, session, model):
+ return self.clusters.render_title(session, model)
+
class VirtualHostTab(TabSet):
def __init__(self, app, name):
super(MainView.VirtualHostTab, self).__init__(app, name)
Modified: mgmt/cumin/python/cumin/page.strings
===================================================================
--- mgmt/cumin/python/cumin/page.strings 2007-10-11 13:37:38 UTC (rev 1006)
+++ mgmt/cumin/python/cumin/page.strings 2007-10-11 13:47:29 UTC (rev 1007)
@@ -377,12 +377,7 @@
<li><a href="">Add Cluster</a></li>
</ul>
-<table class="mobjects">
- <tr>
- <th>Cluster</th>
- </tr>
- {items}
-</table>
+{clusters}
[VirtualHostTab.html]
<ul class="radiotabs tabs">{tabs}</ul>
18 years, 6 months
rhmessaging commits: r1006 - mgmt/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-10-11 09:37:38 -0400 (Thu, 11 Oct 2007)
New Revision: 1006
Modified:
mgmt/cumin/python/cumin/virtualhost.py
Log:
Fixes a bug and simplifies vhost and vhost template code a little.
Modified: mgmt/cumin/python/cumin/virtualhost.py
===================================================================
--- mgmt/cumin/python/cumin/virtualhost.py 2007-10-11 13:15:13 UTC (rev 1005)
+++ mgmt/cumin/python/cumin/virtualhost.py 2007-10-11 13:37:38 UTC (rev 1006)
@@ -106,63 +106,28 @@
def render_title(self, session, vhost):
return "Host Template '%s'" % vhost.name
-class BaseVirtualHostView(Widget):
+class VirtualHostView(Widget):
def __init__(self, app, name):
- super(BaseVirtualHostView, self).__init__(app, name)
+ super(VirtualHostView, self).__init__(app, name)
self.tabs = TabSet(app, "tabs")
self.add_child(self.tabs)
- self.queues = self.QueueTab(app, "queues")
+ self.queues = QueueSet(app, "queues")
self.tabs.add_child(self.queues)
- self.exchanges = self.ExchangeTab(app, "exchanges")
+ self.exchanges = ExchangeSet(app, "exchanges")
self.tabs.add_child(self.exchanges)
- def render_name(self, session, vhost):
- return vhost.name
-
- class QueueTab(Widget):
- def __init__(self, app, name):
- super(BaseVirtualHostView.QueueTab, self).__init__(app, name)
-
- self.queues = QueueSet(app, "queues")
- self.add_child(self.queues)
-
- def render_title(self, session, vhost):
- return self.queues.render_title(session, vhost)
-
- def render_add_queue_href(self, session, vhost):
- branch = session.branch()
- self.page().show_virtual_host(branch, vhost).show_queue_add(branch)
- return branch.marshal()
-
- class ExchangeTab(Widget):
- def __init__(self, app, name):
- super(BaseVirtualHostView.ExchangeTab, self).__init__(app, name)
-
- self.exchanges = ExchangeSet(app, "exchanges")
- self.add_child(self.exchanges)
-
- def render_title(self, session, vhost):
- return self.exchanges.render_title(session, vhost)
-
- def render_add_exchange_href(self, session, vhost):
- branch = session.branch()
- frame = self.page().show_virtual_host(branch, vhost)
- frame.show_exchange_add(branch)
- return branch.marshal()
-
-class VirtualHostView(BaseVirtualHostView):
- def __init__(self, app, name):
- super(VirtualHostView, self).__init__(app, name)
-
self.log = self.VirtualHostLog(app, "log")
self.tabs.add_child(self.log)
def render_title(self, session, vhost):
return "Functional Host '%s'" % vhost.name
+ def render_name(self, session, vhost):
+ return vhost.name
+
def render_server(self, session, vhost):
server = vhost.get_server()
@@ -189,10 +154,25 @@
def render_title(self, session, vhost):
return "Log Messages"
-class VirtualHostTemplateView(VirtualHostView):
+class VirtualHostTemplateView(Widget):
+ def __init__(self, app, name):
+ super(VirtualHostTemplateView, self).__init__(app, name)
+
+ self.tabs = TabSet(app, "tabs")
+ self.add_child(self.tabs)
+
+ self.queues = self.QueueTab(app, "queues")
+ self.tabs.add_child(self.queues)
+
+ self.exchanges = self.ExchangeTab(app, "exchanges")
+ self.tabs.add_child(self.exchanges)
+
def render_title(self, session, vhost):
return "Host Template '%s'" % vhost.name
+ def render_name(self, session, vhost):
+ return vhost.name
+
def render_add_queue_href(self, session, vhost):
branch = session.branch()
self.page().show_virtual_host(branch, vhost).show_queue_add(branch)
@@ -202,3 +182,34 @@
branch = session.branch()
self.page().show_virtual_host(branch, vhost).show_exchange_add(branch)
return branch.marshal()
+
+ class QueueTab(Widget):
+ def __init__(self, app, name):
+ super(VirtualHostTemplateView.QueueTab, self).__init__(app, name)
+
+ self.queues = QueueSet(app, "queues")
+ self.add_child(self.queues)
+
+ def render_title(self, session, vhost):
+ return self.queues.render_title(session, vhost)
+
+ def render_add_queue_href(self, session, vhost):
+ branch = session.branch()
+ self.page().show_virtual_host(branch, vhost).show_queue_add(branch)
+ return branch.marshal()
+
+ class ExchangeTab(Widget):
+ def __init__(self, app, name):
+ super(VirtualHostTemplateView.ExchangeTab, self).__init__(app, name)
+
+ self.exchanges = ExchangeSet(app, "exchanges")
+ self.add_child(self.exchanges)
+
+ def render_title(self, session, vhost):
+ return self.exchanges.render_title(session, vhost)
+
+ def render_add_exchange_href(self, session, vhost):
+ branch = session.branch()
+ frame = self.page().show_virtual_host(branch, vhost)
+ frame.show_exchange_add(branch)
+ return branch.marshal()
18 years, 6 months
rhmessaging commits: r1005 - mgmt/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-10-11 09:15:13 -0400 (Thu, 11 Oct 2007)
New Revision: 1005
Modified:
mgmt/cumin/python/cumin/exchange.py
mgmt/cumin/python/cumin/exchange.strings
Log:
Adds a binding add action to exchanges. It's only stub right now.
Modified: mgmt/cumin/python/cumin/exchange.py
===================================================================
--- mgmt/cumin/python/cumin/exchange.py 2007-10-11 13:06:56 UTC (rev 1004)
+++ mgmt/cumin/python/cumin/exchange.py 2007-10-11 13:15:13 UTC (rev 1005)
@@ -136,6 +136,10 @@
def render_title(self, session, exchange):
return "Bindings (%i)" % len(exchange.binding_items())
+ def render_add_binding_href(self, session, exchange):
+ branch = session.branch()
+ return branch.marshal()
+
def get_items(self, session, exchange):
return sorted(exchange.binding_items(), cmp,
lambda x: x.get_queue().name)
Modified: mgmt/cumin/python/cumin/exchange.strings
===================================================================
--- mgmt/cumin/python/cumin/exchange.strings 2007-10-11 13:06:56 UTC (rev 1004)
+++ mgmt/cumin/python/cumin/exchange.strings 2007-10-11 13:15:13 UTC (rev 1005)
@@ -88,13 +88,18 @@
</div>
[ExchangeBindingSet.html]
+<ul class="actions">
+ <li><a href="{add_binding_href}">Add Binding</a></li>
+</ul>
+
<table class="ExchangeBindingSet mobjects">
<tr>
<th>Queue</th>
<th>Routing Key</th>
<th></th>
</tr>
-{items}
+
+ {items}
</table>
[ExchangeBindingSet.item_html]
18 years, 6 months
rhmessaging commits: r1004 - mgmt/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-10-11 09:06:56 -0400 (Thu, 11 Oct 2007)
New Revision: 1004
Modified:
mgmt/cumin/python/cumin/queue.py
mgmt/cumin/python/cumin/queue.strings
Log:
Moves the add binding action closer to the binding list.
Modified: mgmt/cumin/python/cumin/queue.py
===================================================================
--- mgmt/cumin/python/cumin/queue.py 2007-10-11 03:09:53 UTC (rev 1003)
+++ mgmt/cumin/python/cumin/queue.py 2007-10-11 13:06:56 UTC (rev 1004)
@@ -168,15 +168,15 @@
self.page().show_queue(branch, queue).show_remove(branch)
return branch.marshal()
+class QueueBindingSet(ItemSet):
+ def render_title(self, session, queue):
+ return "Bindings (%i)" % len(queue.binding_items())
+
def render_add_binding_href(self, session, queue):
branch = session.branch()
self.page().show_queue(branch, queue).show_binding_add(branch)
return branch.marshal()
-class QueueBindingSet(ItemSet):
- def render_title(self, session, queue):
- return "Bindings (%i)" % len(queue.binding_items())
-
def get_items(self, session, queue):
return sorted(queue.binding_items(), cmp,
lambda x: x.get_exchange().name)
Modified: mgmt/cumin/python/cumin/queue.strings
===================================================================
--- mgmt/cumin/python/cumin/queue.strings 2007-10-11 03:09:53 UTC (rev 1003)
+++ mgmt/cumin/python/cumin/queue.strings 2007-10-11 13:06:56 UTC (rev 1004)
@@ -117,20 +117,24 @@
<ul class="actions">
<li><a href="{edit_queue_href}">Edit Queue</a></li>
<li><a href="{remove_queue_href}">Remove Queue</a></li>
- <li><a href="{add_binding_href}">Add Binding</a></li>
</ul>
{tabs}
</div>
[QueueBindingSet.html]
+ <ul class="actions">
+ <li><a href="{add_binding_href}">Add Binding</a></li>
+ </ul>
+
<table class="QueueBindingSet mobjects">
<tr>
<th>Exchange</th>
<th>Routing Key</th>
<th></th>
</tr>
-{items}
+
+ {items}
</table>
[QueueBindingSet.item_html]
18 years, 6 months
rhmessaging commits: r1003 - in mgmt: notes and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-10-10 23:09:53 -0400 (Wed, 10 Oct 2007)
New Revision: 1003
Modified:
mgmt/cumin/python/cumin/exchange.py
mgmt/cumin/python/cumin/page.py
mgmt/cumin/python/cumin/queue.py
mgmt/cumin/python/cumin/queue.strings
mgmt/cumin/python/cumin/virtualhost.py
mgmt/cumin/python/cumin/virtualhost.strings
mgmt/notes/Todo
Log:
Separates the UIs of vhosts that are under a server or cluster from
that of vhosts that are standalone and serve as templates. The former
are read-only, while the latter can have new queues and exchanges
added, etc.
Modified: mgmt/cumin/python/cumin/exchange.py
===================================================================
--- mgmt/cumin/python/cumin/exchange.py 2007-10-11 02:41:16 UTC (rev 1002)
+++ mgmt/cumin/python/cumin/exchange.py 2007-10-11 03:09:53 UTC (rev 1003)
@@ -220,7 +220,7 @@
session.set_redirect(branch.marshal())
def render_title(self, session, vhost):
- return "Add Exchange to Functional Host '%s'" % vhost.name
+ return "Add Exchange to Host Template '%s'" % vhost.name
class ExchangeEdit(ExchangeForm):
def on_cancel(self, session, exchange):
Modified: mgmt/cumin/python/cumin/page.py
===================================================================
--- mgmt/cumin/python/cumin/page.py 2007-10-11 02:41:16 UTC (rev 1002)
+++ mgmt/cumin/python/cumin/page.py 2007-10-11 03:09:53 UTC (rev 1003)
@@ -109,7 +109,7 @@
self.view = MainView(app, "view")
self.add_child(self.view)
- self.vhost = VirtualHostFrame(app, "vhost")
+ self.vhost = VirtualHostTemplateFrame(app, "vhost")
self.add_child(self.vhost)
self.server = ServerFrame(app, "server")
Modified: mgmt/cumin/python/cumin/queue.py
===================================================================
--- mgmt/cumin/python/cumin/queue.py 2007-10-11 02:41:16 UTC (rev 1002)
+++ mgmt/cumin/python/cumin/queue.py 2007-10-11 03:09:53 UTC (rev 1003)
@@ -267,7 +267,7 @@
session.set_redirect(branch.marshal())
def render_title(self, session, vhost):
- return "Add Queue to Functional Host '%s'" % vhost.name
+ return "Add Queue to Host Template '%s'" % vhost.name
class QueueEdit(QueueForm):
def on_cancel(self, session, queue):
Modified: mgmt/cumin/python/cumin/queue.strings
===================================================================
--- mgmt/cumin/python/cumin/queue.strings 2007-10-11 02:41:16 UTC (rev 1002)
+++ mgmt/cumin/python/cumin/queue.strings 2007-10-11 03:09:53 UTC (rev 1003)
@@ -7,12 +7,13 @@
[QueueSet.html]
<table class="QueueSet mobjects">
-<tr>
- <th>Queue</th>
- <th>Configuration</th>
- <th>Status</th>
-</tr>
-{items}
+ <tr>
+ <th>Queue</th>
+ <th>Configuration</th>
+ <th>Status</th>
+ </tr>
+
+ {items}
</table>
[QueueSet.item_html]
Modified: mgmt/cumin/python/cumin/virtualhost.py
===================================================================
--- mgmt/cumin/python/cumin/virtualhost.py 2007-10-11 02:41:16 UTC (rev 1002)
+++ mgmt/cumin/python/cumin/virtualhost.py 2007-10-11 03:09:53 UTC (rev 1003)
@@ -17,7 +17,7 @@
def render_item_link(self, session, vhost):
branch = session.branch()
- self.page().show_virtual_host(branch, vhost)
+ self.page().show_virtual_host(branch, vhost).show_view(branch)
return mlink(branch.marshal(), "VirtualHost", vhost.name)
@@ -26,54 +26,35 @@
return "Groups (%i)" \
% len(model.get_virtual_host_groups())
-class VirtualHostFrame(CuminFrame):
+class VirtualHostParameter(Parameter):
+ def do_unmarshal(self, string):
+ return self.app.model.get_virtual_host(int(string))
+
+ def do_marshal(self, vhost):
+ return str(vhost.id)
+
+class BaseVirtualHostFrame(CuminFrame):
def __init__(self, app, name):
- super(VirtualHostFrame, self).__init__(app, name)
+ super(BaseVirtualHostFrame, self).__init__(app, name)
- self.param = self.VirtualHostParameter(app, "id")
+ self.param = VirtualHostParameter(app, "id")
self.add_parameter(self.param)
self.set_object_attribute(self.param)
- self.view = VirtualHostView(app, "view")
- self.add_child(self.view)
-
- self.queue_add = QueueAdd(app, "queue_add")
- self.add_child(self.queue_add)
-
self.queue = QueueFrame(app, "queue")
self.add_child(self.queue)
- self.exchange_add = ExchangeAdd(app, "exchange_add")
- self.add_child(self.exchange_add)
-
self.exchange = ExchangeFrame(app, "exchange")
self.add_child(self.exchange)
- # XXX move this to the top level
- class VirtualHostParameter(Parameter):
- def do_unmarshal(self, string):
- return self.app.model.get_virtual_host(int(string))
-
- def do_marshal(self, vhost):
- return str(vhost.id)
-
def set_virtual_host(self, session, vhost):
return self.param.set(session, vhost)
- def show_view(self, session):
- return self.show_mode(session, self.view)
-
- def show_queue_add(self, session):
- return self.show_mode(session, self.queue_add)
-
def show_queue(self, session, queue):
self.queue.set_queue(session, queue)
return self.show_mode(session, self.queue)
- def show_exchange_add(self, session):
- return self.show_mode(session, self.exchange_add)
-
def show_exchange(self, session, exchange):
self.exchange.set_exchange(session, exchange)
@@ -84,31 +65,104 @@
self.show_view(branch)
return branch.marshal()
+class VirtualHostFrame(BaseVirtualHostFrame):
+ def __init__(self, app, name):
+ super(VirtualHostFrame, self).__init__(app, name)
+
+ self.view = VirtualHostView(app, "view")
+ self.add_child(self.view)
+
+ def set_virtual_host(self, session, vhost):
+ return self.param.set(session, vhost)
+
+ def show_view(self, session):
+ return self.show_mode(session, self.view)
+
def render_title(self, session, vhost):
return "Functional Host '%s'" % vhost.name
-class VirtualHostView(Widget):
+class VirtualHostTemplateFrame(BaseVirtualHostFrame):
def __init__(self, app, name):
- super(VirtualHostView, self).__init__(app, name)
+ super(VirtualHostTemplateFrame, self).__init__(app, name)
+ self.view = VirtualHostTemplateView(app, "view")
+ self.add_child(self.view)
+
+ self.queue_add = QueueAdd(app, "queue_add")
+ self.add_child(self.queue_add)
+
+ self.exchange_add = ExchangeAdd(app, "exchange_add")
+ self.add_child(self.exchange_add)
+
+ def show_view(self, session):
+ return self.show_mode(session, self.view)
+
+ def show_queue_add(self, session):
+ return self.show_mode(session, self.queue_add)
+
+ def show_exchange_add(self, session):
+ return self.show_mode(session, self.exchange_add)
+
+ def render_title(self, session, vhost):
+ return "Host Template '%s'" % vhost.name
+
+class BaseVirtualHostView(Widget):
+ def __init__(self, app, name):
+ super(BaseVirtualHostView, self).__init__(app, name)
+
self.tabs = TabSet(app, "tabs")
self.add_child(self.tabs)
- self.queues = QueueSet(app, "queues")
+ self.queues = self.QueueTab(app, "queues")
self.tabs.add_child(self.queues)
- self.exchanges = ExchangeSet(app, "exchanges")
+ self.exchanges = self.ExchangeTab(app, "exchanges")
self.tabs.add_child(self.exchanges)
+ def render_name(self, session, vhost):
+ return vhost.name
+
+ class QueueTab(Widget):
+ def __init__(self, app, name):
+ super(BaseVirtualHostView.QueueTab, self).__init__(app, name)
+
+ self.queues = QueueSet(app, "queues")
+ self.add_child(self.queues)
+
+ def render_title(self, session, vhost):
+ return self.queues.render_title(session, vhost)
+
+ def render_add_queue_href(self, session, vhost):
+ branch = session.branch()
+ self.page().show_virtual_host(branch, vhost).show_queue_add(branch)
+ return branch.marshal()
+
+ class ExchangeTab(Widget):
+ def __init__(self, app, name):
+ super(BaseVirtualHostView.ExchangeTab, self).__init__(app, name)
+
+ self.exchanges = ExchangeSet(app, "exchanges")
+ self.add_child(self.exchanges)
+
+ def render_title(self, session, vhost):
+ return self.exchanges.render_title(session, vhost)
+
+ def render_add_exchange_href(self, session, vhost):
+ branch = session.branch()
+ frame = self.page().show_virtual_host(branch, vhost)
+ frame.show_exchange_add(branch)
+ return branch.marshal()
+
+class VirtualHostView(BaseVirtualHostView):
+ def __init__(self, app, name):
+ super(VirtualHostView, self).__init__(app, name)
+
self.log = self.VirtualHostLog(app, "log")
self.tabs.add_child(self.log)
def render_title(self, session, vhost):
return "Functional Host '%s'" % vhost.name
- def render_name(self, session, vhost):
- return vhost.name
-
def render_server(self, session, vhost):
server = vhost.get_server()
@@ -131,6 +185,14 @@
else:
return none()
+ class VirtualHostLog(Widget):
+ def render_title(self, session, vhost):
+ return "Log Messages"
+
+class VirtualHostTemplateView(VirtualHostView):
+ def render_title(self, session, vhost):
+ return "Host Template '%s'" % vhost.name
+
def render_add_queue_href(self, session, vhost):
branch = session.branch()
self.page().show_virtual_host(branch, vhost).show_queue_add(branch)
@@ -140,7 +202,3 @@
branch = session.branch()
self.page().show_virtual_host(branch, vhost).show_exchange_add(branch)
return branch.marshal()
-
- class VirtualHostLog(Widget):
- def render_title(self, session, vhost):
- return "Log Messages"
Modified: mgmt/cumin/python/cumin/virtualhost.strings
===================================================================
--- mgmt/cumin/python/cumin/virtualhost.strings 2007-10-11 02:41:16 UTC (rev 1002)
+++ mgmt/cumin/python/cumin/virtualhost.strings 2007-10-11 03:09:53 UTC (rev 1003)
@@ -25,9 +25,34 @@
<dt>Cluster</dt><dd>{cluster}</dd>
</dl>
+ {tabs}
+</div>
+
+[VirtualHostTemplateView.html]
+<div class="oblock">
+ <h1>{title}</h1>
+
+ <dl class="properties">
+ <dt>Name</dt><dd>{name}</dd>
+ </dl>
+
<ul class="actions">
- <li><a href="{add_queue_href}">Add Queue</a></li>
- <li><a href="{add_exchange_href}">Add Exchange</a></li>
+ <li><a href="">Remove This Template</a></li>
</ul>
-{tabs}
+
+ {tabs}
</div>
+
+[QueueTab.html]
+<ul class="actions">
+ <li><a href="{add_queue_href}">Add Queue</a></li>
+</ul>
+
+{queues}
+
+[ExchangeTab.html]
+<ul class="actions">
+ <li><a href="{add_exchange_href}">Add Exchange</a></li>
+</ul>
+
+{exchanges}
Modified: mgmt/notes/Todo
===================================================================
--- mgmt/notes/Todo 2007-10-11 02:41:16 UTC (rev 1002)
+++ mgmt/notes/Todo 2007-10-11 03:09:53 UTC (rev 1003)
@@ -67,3 +67,5 @@
* Consider adding a set_object to Frame, instead of having
set_somethingspecific on each frame.
+
+ * Remove log messages from host template view
18 years, 6 months
rhmessaging commits: r1002 - in store/trunk/cpp: tests/jrnl and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: kpvdr
Date: 2007-10-10 22:41:16 -0400 (Wed, 10 Oct 2007)
New Revision: 1002
Modified:
store/trunk/cpp/lib/jrnl/jcntl.cpp
store/trunk/cpp/lib/jrnl/rcvdat.hpp
store/trunk/cpp/tests/jrnl/JournalSystemTests.cpp
Log:
Fixed bug where highest rid was not correctly identified after restore; added check for prepared xid list during resore. This more-or-less completes the transaction code for the journal. There still seems to be some memory leaks around, though, and transaction integration testing is still not complete.
Modified: store/trunk/cpp/lib/jrnl/jcntl.cpp
===================================================================
--- store/trunk/cpp/lib/jrnl/jcntl.cpp 2007-10-10 21:36:41 UTC (rev 1001)
+++ store/trunk/cpp/lib/jrnl/jcntl.cpp 2007-10-11 02:41:16 UTC (rev 1002)
@@ -33,6 +33,7 @@
#include <jrnl/jcntl.hpp>
+#include <algorithm>
#include <cerrno>
#include <fstream>
#include <iomanip>
@@ -372,7 +373,7 @@
const bool
jcntl::rcvr_get_next_record(u_int16_t& fid, std::ifstream* ifsp, rcvdat& rd,
- const std::vector<std::string>& /*prep_txn_list*/) throw (jexception)
+ const std::vector<std::string>& prep_txn_list) throw (jexception)
{
u_int32_t dblks_read = 0;
bool done = false;
@@ -391,22 +392,23 @@
done = er.rcv_decode(h, ifsp, dblks_read);
jfile_cycle(fid, ifsp, rd, false);
}
+ rd._enq_cnt_list[fid]++;
if (er.xid_size())
{
//std::cout << "$" << std::flush;
er.get_xid(&xidp);
assert(xidp != NULL);
std::string xid((char*)xidp, er.xid_size());
- _tmap.insert_txn_data(xid, txn_data(h._rid, fid, true));
+ std::vector<std::string>::const_iterator cit = std::find(prep_txn_list.begin(),
+ prep_txn_list.end(), xid);
+ if (cit != prep_txn_list.end())
+ _tmap.insert_txn_data(xid, txn_data(h._rid, fid, true));
free(xidp);
}
else
- {
_emap.insert_fid(h._rid, fid);
- rd._enq_cnt_list[fid]++;
- if (rd._h_rid < h._rid)
- rd._h_rid = h._rid;
- }
+ if (rd._h_rid < h._rid)
+ rd._h_rid = h._rid;
}
break;
case RHM_JDAT_DEQ_MAGIC:
@@ -427,7 +429,10 @@
dr.get_xid(&xidp);
assert(xidp != NULL);
std::string xid((char*)xidp, dr.xid_size());
- _tmap.insert_txn_data(xid, txn_data(h._rid, fid, false));
+ std::vector<std::string>::const_iterator cit = std::find(prep_txn_list.begin(),
+ prep_txn_list.end(), xid);
+ if (cit != prep_txn_list.end())
+ _tmap.insert_txn_data(xid, txn_data(h._rid, fid, false));
free(xidp);
}
else
@@ -442,9 +447,9 @@
if (e.err_code() != jerrno::JERR_MAP_NOTFOUND)
throw e;
}
- if (rd._h_rid < h._rid)
- rd._h_rid = h._rid;
}
+ if (rd._h_rid < h._rid)
+ rd._h_rid = h._rid;
}
break;
case RHM_JDAT_TXA_MAGIC:
@@ -460,15 +465,26 @@
ar.get_xid(&xidp);
assert(xidp != NULL);
std::string xid((char*)xidp, ar.xid_size());
- txn_data_list tdl = _tmap.get_remove_tdata_list(xid);
- for (tdl_itr itr = tdl.begin(); itr != tdl.end(); itr++)
+ std::vector<std::string>::const_iterator cit = std::find(prep_txn_list.begin(),
+ prep_txn_list.end(), xid);
+ if (cit != prep_txn_list.end())
{
- try { _emap.unlock(itr->_rid); }
- catch(jexception e) { if (e.err_code() != jerrno::JERR_MAP_NOTFOUND) throw e; }
- if (itr->_enq_flag)
- _wrfc.decr_enqcnt(itr->_fid);
+ txn_data_list tdl = _tmap.get_remove_tdata_list(xid);
+ for (tdl_itr itr = tdl.begin(); itr != tdl.end(); itr++)
+ {
+ try { _emap.unlock(itr->_rid); }
+ catch(jexception e)
+ {
+ if (e.err_code() != jerrno::JERR_MAP_NOTFOUND)
+ throw e;
+ }
+ if (itr->_enq_flag)
+ _wrfc.decr_enqcnt(itr->_fid);
+ }
}
free(xidp);
+ if (rd._h_rid < h._rid)
+ rd._h_rid = h._rid;
}
break;
case RHM_JDAT_TXC_MAGIC:
@@ -484,18 +500,25 @@
cr.get_xid(&xidp);
assert(xidp != NULL);
std::string xid((char*)xidp, cr.xid_size());
- txn_data_list tdl = _tmap.get_remove_tdata_list(xid);
- for (tdl_itr itr = tdl.begin(); itr != tdl.end(); itr++)
+ std::vector<std::string>::const_iterator cit = std::find(prep_txn_list.begin(),
+ prep_txn_list.end(), xid);
+ if (cit != prep_txn_list.end())
{
- if (itr->_enq_flag) // txn enqueue
- _emap.insert_fid(itr->_rid, itr->_fid);
- else // txn dequeue
+ txn_data_list tdl = _tmap.get_remove_tdata_list(xid);
+ for (tdl_itr itr = tdl.begin(); itr != tdl.end(); itr++)
{
- u_int16_t fid = _emap.get_remove_fid(h._rid);
- _wrfc.decr_enqcnt(fid);
+ if (itr->_enq_flag) // txn enqueue
+ _emap.insert_fid(itr->_rid, itr->_fid);
+ else // txn dequeue
+ {
+ u_int16_t fid = _emap.get_remove_fid(h._rid);
+ _wrfc.decr_enqcnt(fid);
+ }
}
}
free(xidp);
+ if (rd._h_rid < h._rid)
+ rd._h_rid = h._rid;
}
break;
case RHM_JDAT_EMPTY_MAGIC:
@@ -565,131 +588,6 @@
return true;
}
-// const bool
-// jcntl::rcvr_fanalyze(u_int16_t fid, rcvdat& rd, const std::vector<std::string>& /*prep_txn_list*/)
-// throw (jexception)
-// {
-// bool eoj = false;
-// std::stringstream ss;
-// ss << _jdir.dirname() << "/" << _base_filename << ".";
-// ss << std::setfill('0') << std::setw(4) << fid << "." << JRNL_DATA_EXTENSION;
-// //std::cout << "rcvr_fanalyze: " << ss.str() << ":";
-// std::ifstream jifs(ss.str().c_str());
-// if (!jifs.good())
-// throw jexception(jerrno::JERR__FILEIO, ss.str(), "jinf", "analyze");
-//
-// // 1. Read file header
-// file_hdr fhdr;
-// jifs.read((char*)&fhdr, sizeof(fhdr));
-// if (fhdr._hdr._magic == RHM_JDAT_FILE_MAGIC)
-// {
-// assert(fhdr._fid == fid);
-// if (!rd._fro)
-// rd._fro = fhdr._fro;
-// std::streamoff foffs = fhdr._fro;
-// jifs.seekg(foffs);
-//
-// // 2. Read file records
-// while (jifs.good() && !eoj)
-// {
-// hdr h;
-// jifs.read((char*)&h, sizeof(hdr));
-// switch(h._magic)
-// {
-// case RHM_JDAT_ENQ_MAGIC:
-// {
-// size_t xidsize = 0;
-// size_t recsize = 0;
-// #if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
-// jifs.ignore(sizeof(u_int32_t));
-// #endif
-// jifs.read((char*)&xidsize, sizeof(size_t));
-// #if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
-// jifs.ignore(sizeof(u_int32_t));
-// #endif
-// #if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
-// jifs.ignore(sizeof(u_int32_t));
-// #endif
-// jifs.read((char*)&recsize, sizeof(size_t));
-// #if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
-// jifs.ignore(sizeof(u_int32_t));
-// #endif
-// _emap.insert_fid(h._rid, fid);
-// rd._enq_cnt_list[fid]++;
-// if (rd._h_rid < h._rid)
-// rd._h_rid = h._rid;
-// //std::cout << " e" << h._rid;
-// u_int32_t rec_dblks = jrec::size_dblks((size_t)recsize + sizeof(enq_hdr) +
-// sizeof(rec_tail));
-// foffs += rec_dblks * JRNL_DBLK_SIZE;
-// jifs.seekg(foffs);
-// }
-// break;
-// case RHM_JDAT_DEQ_MAGIC:
-// {
-// u_int64_t drid = 0;
-// size_t xidsize = 0;
-// jifs.read((char*)&drid, sizeof(u_int64_t));
-// #if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
-// jifs.ignore(sizeof(u_int32_t));
-// #endif
-// jifs.read((char*)&xidsize, sizeof(size_t));
-// #if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
-// jifs.ignore(sizeof(u_int32_t));
-// #endif
-// try
-// {
-// _emap.get_remove_fid(drid);
-// rd._enq_cnt_list[fid]--;
-// }
-// catch (jexception& e) {} // ignore JERR_EMAP_NOTFOUND thrown here
-// if (rd._h_rid < h._rid)
-// rd._h_rid = h._rid;
-// //std::cout << " d" << drid << ")";
-// u_int32_t rec_dblks = jrec::size_dblks(sizeof(deq_hdr));
-// foffs += rec_dblks * JRNL_DBLK_SIZE;
-// jifs.seekg(foffs);
-// }
-// break;
-// case RHM_JDAT_TXA_MAGIC:
-// //std::cout << " a";
-// break;
-// case RHM_JDAT_TXC_MAGIC:
-// //std::cout << " c";
-// break;
-// case RHM_JDAT_EMPTY_MAGIC:
-// {
-// //std::cout << " x";
-// u_int32_t rec_dblks = jrec::size_dblks(sizeof(hdr));
-// foffs += rec_dblks * JRNL_DBLK_SIZE;
-// jifs.seekg(foffs);
-// }
-// break;
-// case 0:
-// rd._lfid = fid;
-// rd._eo = foffs;
-// if (!jifs.eof())
-// eoj = true;
-// //std::cout << (jifs.eof()?" <eof>":" <end>");
-// break;
-// default:
-// std::stringstream ss;
-// ss << std::hex << std::setfill('0') << "Magic=0x" << std::setw(8) << h._magic;
-// throw jexception(jerrno::JERR_JCNTL_UNKNOWNMAGIC, ss.str(), "jcntl",
-// "rcvr_fanalyze");
-// }
-// }
-// }
-// else
-// {
-// eoj = true;
-// //std::cout << " <empty>";
-// }
-// jifs.close();
-// //std::cout << std::endl;
-// return eoj;
-// }
-
void
jcntl::aio_wr_callback(jcntl* journal, u_int32_t num_dtoks)
{
Modified: store/trunk/cpp/lib/jrnl/rcvdat.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/rcvdat.hpp 2007-10-10 21:36:41 UTC (rev 1001)
+++ store/trunk/cpp/lib/jrnl/rcvdat.hpp 2007-10-11 02:41:16 UTC (rev 1002)
@@ -42,10 +42,6 @@
struct rcvdat
{
- typedef std::vector<u_int64_t> rid_list;
- typedef std::pair<rid_list, rid_list> enq_deq_rid_list;
- typedef std::map<std::string, enq_deq_rid_list> enq_deq_map;
-
bool _empty; ///< Journal data files empty
u_int16_t _ffid; ///< First file id
size_t _fro; ///< First record offset in ffid
@@ -53,7 +49,6 @@
size_t _eo; ///< End offset (first byte past last record)
u_int64_t _h_rid; ///< Highest rid found
std::vector<u_int32_t> _enq_cnt_list; ///< Number enqueued records found for each file
- enq_deq_map _edm; ///< Map of enqueue and dequeue rids for each xid
rcvdat():
_empty(true),
@@ -62,8 +57,7 @@
_lfid(0),
_eo(0),
_h_rid(0),
- _enq_cnt_list(JRNL_NUM_FILES, 0),
- _edm()
+ _enq_cnt_list(JRNL_NUM_FILES, 0)
{}
void reset()
{
@@ -75,8 +69,18 @@
_h_rid=0;
for (unsigned f=0; f<_enq_cnt_list.size(); f++)
_enq_cnt_list[f] = 0;
- _edm.clear();
}
+ void print()
+ {
+ std::cout << "_empty=" << (_empty?"T":"F") << std::endl;
+ std::cout << "_ffid=" << _ffid << std::endl;
+ std::cout << "_fro=" << _fro << std::endl;
+ std::cout << "_lfid=" << _lfid << std::endl;
+ std::cout << "_eo=" << _eo << std::endl;
+ std::cout << "_h_rid=" << _h_rid << std::endl;
+ for (unsigned i=0; i<_enq_cnt_list.size(); i++)
+ std::cout << "_enq_cnt_list[" << i << "]=" << _enq_cnt_list[i] << std::endl;
+ }
};
}
}
Modified: store/trunk/cpp/tests/jrnl/JournalSystemTests.cpp
===================================================================
--- store/trunk/cpp/tests/jrnl/JournalSystemTests.cpp 2007-10-10 21:36:41 UTC (rev 1001)
+++ store/trunk/cpp/tests/jrnl/JournalSystemTests.cpp 2007-10-11 02:41:16 UTC (rev 1002)
@@ -54,8 +54,9 @@
CPPUNIT_TEST(RecoveredReadTest);
CPPUNIT_TEST(TxnRecoveredReadTest);
CPPUNIT_TEST(RecoveredDequeueTest);
-// CPPUNIT_TEST(TxnRecoveredDequeueTest);
+ CPPUNIT_TEST(TxnRecoveredDequeueTest);
CPPUNIT_TEST(ComplexRecoveryTest1);
+ CPPUNIT_TEST(TxnComplexRecoveryTest1);
CPPUNIT_TEST(EncodeTest_000);
CPPUNIT_TEST(EncodeTest_001);
CPPUNIT_TEST(EncodeTest_002);
@@ -390,6 +391,7 @@
rhm::journal::jcntl jc(test_name, "jdata", test_name);
jc.initialize();
create_xid(xid, 1, XID_SIZE);
+ txn_list.push_back(xid);
for (int m=0; m<NUM_MSGS; m++)
enq_txn_msg(&jc, create_msg(msg, m, MSG_SIZE), xid);
txn_commit(&jc, xid);
@@ -548,6 +550,7 @@
rhm::journal::jcntl jc(test_name, "jdata", test_name);
jc.initialize();
create_xid(xid, 2, XID_SIZE);
+ txn_list.push_back(xid);
for (int m=0; m<NUM_MSGS; m++)
enq_txn_msg(&jc, create_msg(msg, m, MSG_SIZE), xid);
txn_commit(&jc, xid);
@@ -725,6 +728,7 @@
rhm::journal::jcntl jc(test_name, "jdata", test_name);
jc.initialize();
create_xid(xid, 3, XID_SIZE);
+ txn_list.push_back(xid);
for (int m=0; m<NUM_MSGS; m++)
enq_txn_msg(&jc, create_msg(msg, m, MSG_SIZE), xid);
txn_commit(&jc, xid);
@@ -947,6 +951,160 @@
}
}
+ void TxnComplexRecoveryTest1()
+ {
+ std::vector<std::string> txn_list;
+ //Stack
+ char* test_name = "TxnComplexRecoveryTest1_Stack";
+ try
+ {
+ {
+ rhm::journal::jcntl jc(test_name, "jdata", test_name);
+ jc.initialize();
+ // rids: 0 to NUM_MSGS - 1
+ for (int m=0; m<NUM_MSGS; m++)
+ enq_msg(&jc, create_msg(msg, m, MSG_SIZE));
+ // rids: NUM_MSGS to NUM_MSGS*2 - 1
+ create_xid(xid, 4, XID_SIZE);
+ txn_list.push_back(xid);
+ for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
+ enq_txn_msg(&jc, create_msg(msg, m, MSG_SIZE), xid);
+ // rids: NUM_MSGS*2 to NUM_MSGS*3 - 1
+ for (int m=0; m<NUM_MSGS; m++)
+ deq_msg(&jc, m);
+ // rid: NUM_MSGS*3
+ txn_commit(&jc, xid);
+ jc.flush();
+ for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
+ {
+ read_msg(&jc);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt before recover.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ }
+ {
+ rhm::journal::jcntl jc(test_name, "jdata", test_name);
+ jc.recover(txn_list);
+ for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
+ {
+ read_msg(&jc);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ jc.recover_complete();
+ // rids: NUM_MSGS*3+1 to NUM_MSGS*4
+ for (int m=NUM_MSGS*3+1; m<NUM_MSGS*4+1; m++)
+ enq_msg(&jc, create_msg(msg, m, MSG_SIZE));
+ jc.flush();
+ for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
+ {
+ read_msg(&jc);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt after recover.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ for (int m=NUM_MSGS*3+1; m<NUM_MSGS*4+1; m++)
+ {
+ read_msg(&jc);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt after recover.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ // rids: NUM_MSGS*4+1 to NUM_MSGS*6
+ for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
+ deq_msg(&jc, m);
+ for (int m=NUM_MSGS*3+1; m<NUM_MSGS*4+1; m++)
+ deq_msg(&jc, m);
+ }
+ }
+ catch (rhm::journal::jexception& e)
+ {
+ std::stringstream ss;
+ ss << e;
+ CPPUNIT_FAIL(ss.str());
+ }
+ // Heap
+ test_name = "TxnComplexRecoveryTest1_Heap";
+ rhm::journal::jcntl* jcp = NULL;
+ try
+ {
+ {
+ jcp = new rhm::journal::jcntl(test_name, "jdata", test_name);
+ CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
+ jcp->initialize();
+ // rids: 0 to NUM_MSGS*2 - 1
+ for (int m=0; m<NUM_MSGS; m++)
+ enq_msg(jcp, create_msg(msg, m, MSG_SIZE));
+ create_xid(xid, 4, XID_SIZE);
+ // rids: NUM_MSGS to NUM_MSGS*2 - 1
+ for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
+ enq_txn_msg(jcp, create_msg(msg, m, MSG_SIZE), xid);
+ // rids: NUM_MSGS*2 to NUM_MSGS*3 - 1
+ for (int m=0; m<NUM_MSGS; m++)
+ deq_msg(jcp, m);
+ // rid: NUM_MSGS*3
+ txn_commit(jcp, xid);
+ jcp->flush();
+ for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
+ {
+ read_msg(jcp);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt before recover.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ delete jcp;
+ jcp = NULL;
+ }
+ {
+ rhm::journal::jcntl* jcp = new rhm::journal::jcntl(test_name, "jdata", test_name);
+ CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
+ jcp->recover(txn_list);
+ for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
+ {
+ read_msg(jcp);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ jcp->recover_complete();
+ // rids: NUM_MSGS*3+1 to NUM_MSGS*4
+ for (int m=NUM_MSGS*3+1; m<NUM_MSGS*4+1; m++)
+ enq_msg(jcp, create_msg(msg, m, MSG_SIZE));
+ jcp->flush();
+ for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
+ {
+ read_msg(jcp);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ for (int m=NUM_MSGS*3+1; m<NUM_MSGS*4+1; m++)
+ {
+ read_msg(jcp);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ // rids: NUM_MSGS*4+1 to NUM_MSGS*6
+ for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
+ deq_msg(jcp, m);
+ for (int m=NUM_MSGS*3+1; m<NUM_MSGS*4+1; m++)
+ deq_msg(jcp, m);
+ delete jcp;
+ }
+ }
+ catch (rhm::journal::jexception& e)
+ {
+ if (jcp)
+ delete jcp;
+ std::stringstream ss;
+ ss << e;
+ CPPUNIT_FAIL(ss.str());
+ }
+ }
+
void EncodeTest_000()
{
runEncodeTest(0, 0, 0, false, 0, 0, false, 2, "Empty journal");
18 years, 6 months
rhmessaging commits: r1001 - in store/trunk/cpp: tests/jrnl and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: kpvdr
Date: 2007-10-10 17:36:41 -0400 (Wed, 10 Oct 2007)
New Revision: 1001
Modified:
store/trunk/cpp/lib/jrnl/deq_rec.cpp
store/trunk/cpp/lib/jrnl/deq_rec.hpp
store/trunk/cpp/lib/jrnl/enq_rec.cpp
store/trunk/cpp/lib/jrnl/enq_rec.hpp
store/trunk/cpp/lib/jrnl/jcntl.cpp
store/trunk/cpp/lib/jrnl/jcntl.hpp
store/trunk/cpp/lib/jrnl/jrec.hpp
store/trunk/cpp/lib/jrnl/rcvdat.hpp
store/trunk/cpp/lib/jrnl/rmgr.cpp
store/trunk/cpp/lib/jrnl/txn_rec.cpp
store/trunk/cpp/lib/jrnl/txn_rec.hpp
store/trunk/cpp/tests/jrnl/JournalSystemTests.cpp
store/trunk/cpp/tests/jrnl/rtest
Log:
Transactional recover now almost complete, but there is still some testing to be done. Correlation with prepared xid list still missing.
Modified: store/trunk/cpp/lib/jrnl/deq_rec.cpp
===================================================================
--- store/trunk/cpp/lib/jrnl/deq_rec.cpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/lib/jrnl/deq_rec.cpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -84,7 +84,7 @@
_buff = NULL;
}
-u_int32_t
+const u_int32_t
deq_rec::encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks) throw (jexception)
{
assert(wptr != NULL);
@@ -196,7 +196,7 @@
return size_dblks(wr_cnt);
}
-u_int32_t
+const u_int32_t
deq_rec::decode(hdr& h, void* rptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
throw (jexception)
{
@@ -318,8 +318,57 @@
return size_dblks(rd_cnt);
}
+const bool
+deq_rec::rcv_decode(hdr h, std::ifstream* ifsp, u_int32_t& rec_offs_dblks) throw (jexception)
+{
+ if (rec_offs_dblks) // Contunue decoding xid from previous decode call
+ {
+ // TODO
+ }
+ else // Start at beginning of record
+ {
+ _deq_hdr._hdr.copy(h);
+ ifsp->read((char*)&_deq_hdr._deq_rid, sizeof(u_int64_t));
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+ ifsp->read((char*)&_deq_hdr._xidsize, sizeof(size_t));
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+ if (_deq_hdr._xidsize)
+ {
+ _buff = ::malloc(_deq_hdr._xidsize);
+ if (_buff == NULL)
+ {
+ std::stringstream ss;
+ ss << "_buff malloc(): errno=" << errno;
+ throw jexception(jerrno::JERR__MALLOC, ss.str(), "deq_rec", "decode");
+ }
+ // Decode xid
+ ifsp->read((char*)_buff, _deq_hdr._xidsize);
+ if ((size_t)ifsp->gcount() == _deq_hdr._xidsize)
+ {
+ ifsp->ignore(rec_size_dblks() * JRNL_DBLK_SIZE - sizeof(_deq_hdr) -
+ _deq_hdr._xidsize);
+ return true;
+ }
+ else
+ ; // TODO
+ }
+ else
+ {
+ // Igore rest of record
+ rec_offs_dblks = rec_size_dblks();
+ ifsp->ignore(rec_offs_dblks * JRNL_DBLK_SIZE - sizeof(_deq_hdr));
+ return true;
+ }
+ }
+ return false;
+}
+
const size_t
-deq_rec::get_xid(const void** const xidpp)
+deq_rec::get_xid(void** const xidpp)
{
if (!_buff)
{
Modified: store/trunk/cpp/lib/jrnl/deq_rec.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/deq_rec.hpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/lib/jrnl/deq_rec.hpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -73,11 +73,15 @@
// Prepare instance for use in writing data to journal
void reset(const u_int64_t rid, const u_int64_t drid, const void* const xidp,
const size_t xidlen);
- u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+ const u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
throw (jexception);
- u_int32_t decode(hdr& h, void* rptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+ const u_int32_t decode(hdr& h, void* rptr, u_int32_t rec_offs_dblks,
+ u_int32_t max_size_dblks) throw (jexception);
+ // Decode used for recover
+ const bool rcv_decode(hdr h, std::ifstream* ifsp, u_int32_t& rec_offs_dblks)
throw (jexception);
- const size_t get_xid(const void** const xidpp);
+ inline const u_int64_t deq_rid() const { return _deq_hdr._deq_rid; }
+ const size_t get_xid(void** const xidpp);
std::string& str(std::string& str) const;
inline const size_t data_size() const { return 0; } // This record never carries data
const size_t xid_size() const;
Modified: store/trunk/cpp/lib/jrnl/enq_rec.cpp
===================================================================
--- store/trunk/cpp/lib/jrnl/enq_rec.cpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/lib/jrnl/enq_rec.cpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -98,7 +98,7 @@
_enq_tail._rid = rid;
}
-u_int32_t
+const u_int32_t
enq_rec::encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks) throw (jexception)
{
assert(wptr != NULL);
@@ -240,7 +240,7 @@
return size_dblks(wr_cnt);
}
-u_int32_t
+const u_int32_t
enq_rec::decode(hdr& h, void* rptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
throw (jexception)
{
@@ -427,6 +427,60 @@
return size_dblks(rd_cnt);
}
+const bool
+enq_rec::rcv_decode(hdr h, std::ifstream* ifsp, u_int32_t& rec_offs_dblks) throw (jexception)
+{
+ if (rec_offs_dblks) // Contunue decoding xid from previous decode call
+ {
+ // TODO
+ }
+ else // Start at beginning of record
+ {
+ _enq_hdr._hdr.copy(h);
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+ ifsp->read((char*)&_enq_hdr._xidsize, sizeof(size_t));
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler1
+#endif
+ ifsp->read((char*)&_enq_hdr._dsize, sizeof(size_t));
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler1
+#endif
+ if (_enq_hdr._xidsize)
+ {
+ _buff = ::malloc(_enq_hdr._xidsize);
+ if (_buff == NULL)
+ {
+ std::stringstream ss;
+ ss << "_buff malloc(): errno=" << errno;
+ throw jexception(jerrno::JERR__MALLOC, ss.str(), "deq_rec", "decode");
+ }
+ // Decode xid
+ ifsp->read((char*)_buff, _enq_hdr._xidsize);
+ if ((size_t)ifsp->gcount() == _enq_hdr._xidsize)
+ {
+ ifsp->ignore(rec_size_dblks() * JRNL_DBLK_SIZE - sizeof(_enq_hdr) -
+ _enq_hdr._xidsize);
+ return true;
+ }
+ else
+ ; // TODO
+ }
+ else
+ {
+ // Igore rest of record
+ ifsp->ignore(rec_size_dblks() * JRNL_DBLK_SIZE - sizeof(_enq_hdr));
+ return true;
+ }
+ }
+ return false;
+}
+
const size_t
enq_rec::get_xid(void** const xidpp)
{
Modified: store/trunk/cpp/lib/jrnl/enq_rec.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/enq_rec.hpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/lib/jrnl/enq_rec.hpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -84,9 +84,12 @@
void reset(const u_int64_t rid, const void* const dbuf, const size_t dlen,
const void* const xidp, const size_t xidlen, bool transient);
- u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+ const u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
throw (jexception);
- u_int32_t decode(hdr& h, void* rptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+ const u_int32_t decode(hdr& h, void* rptr, u_int32_t rec_offs_dblks,
+ u_int32_t max_size_dblks) throw (jexception);
+ // Decode used for recover
+ const bool rcv_decode(hdr h, std::ifstream* ifsp, u_int32_t& rec_offs_dblks)
throw (jexception);
const size_t get_xid(void** const xidpp);
const size_t get_data(void** const datapp);
Modified: store/trunk/cpp/lib/jrnl/jcntl.cpp
===================================================================
--- store/trunk/cpp/lib/jrnl/jcntl.cpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/lib/jrnl/jcntl.cpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -361,143 +361,335 @@
throw e;
}
- // Restore all read and write pointers
+ // Restore all read and write pointers and transactions
if (!rd._empty)
{
- bool eoj = false;
- for (u_int16_t fnum=0; fnum<JRNL_NUM_FILES && !eoj; fnum++)
- {
- u_int16_t fid = (fnum + rd._ffid) % JRNL_NUM_FILES;
- eoj = rcvr_fanalyze(fid, rd, prep_txn_list);
- }
+ u_int16_t fid = rd._ffid;
+ std::ifstream ifs;
+ while (rcvr_get_next_record(fid, &ifs, rd, prep_txn_list));
}
}
const bool
-jcntl::rcvr_fanalyze(u_int16_t fid, rcvdat& rd, const std::vector<std::string>& /*prep_txn_list*/)
- throw (jexception)
+jcntl::rcvr_get_next_record(u_int16_t& fid, std::ifstream* ifsp, rcvdat& rd,
+ const std::vector<std::string>& /*prep_txn_list*/) throw (jexception)
{
- bool eoj = false;
- std::stringstream ss;
- ss << _jdir.dirname() << "/" << _base_filename << ".";
- ss << std::setfill('0') << std::setw(4) << fid << "." << JRNL_DATA_EXTENSION;
-//std::cout << "rcvr_fanalyze: " << ss.str() << ":";
- std::ifstream jifs(ss.str().c_str());
- if (!jifs.good())
- throw jexception(jerrno::JERR__FILEIO, ss.str(), "jinf", "analyze");
-
- // 1. Read file header
- file_hdr fhdr;
- jifs.read((char*)&fhdr, sizeof(fhdr));
- if (fhdr._hdr._magic == RHM_JDAT_FILE_MAGIC)
+ u_int32_t dblks_read = 0;
+ bool done = false;
+ void* xidp = NULL;
+ hdr h;
+ jfile_cycle(fid, ifsp, rd, true);
+ ifsp->read((char*)&h, sizeof(hdr));
+ switch(h._magic)
{
- assert(fhdr._fid == fid);
- if (!rd._fro)
- rd._fro = fhdr._fro;
- std::streamoff foffs = fhdr._fro;
- jifs.seekg(foffs);
-
- // 2. Read file records
- while (jifs.good() && !eoj)
- {
- hdr h;
- jifs.read((char*)&h, sizeof(hdr));
- switch(h._magic)
+ case RHM_JDAT_ENQ_MAGIC:
{
- case RHM_JDAT_ENQ_MAGIC:
+//std::cout << " e" << h._rid << std::flush;
+ enq_rec er;
+ while (!done)
{
- size_t xidsize = 0;
- size_t recsize = 0;
-#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
- jifs.ignore(sizeof(u_int32_t));
-#endif
- jifs.read((char*)&xidsize, sizeof(size_t));
-#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
- jifs.ignore(sizeof(u_int32_t));
-#endif
-#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
- jifs.ignore(sizeof(u_int32_t));
-#endif
- jifs.read((char*)&recsize, sizeof(size_t));
-#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
- jifs.ignore(sizeof(u_int32_t));
-#endif
+ done = er.rcv_decode(h, ifsp, dblks_read);
+ jfile_cycle(fid, ifsp, rd, false);
+ }
+ if (er.xid_size())
+ {
+//std::cout << "$" << std::flush;
+ er.get_xid(&xidp);
+ assert(xidp != NULL);
+ std::string xid((char*)xidp, er.xid_size());
+ _tmap.insert_txn_data(xid, txn_data(h._rid, fid, true));
+ free(xidp);
+ }
+ else
+ {
_emap.insert_fid(h._rid, fid);
rd._enq_cnt_list[fid]++;
if (rd._h_rid < h._rid)
rd._h_rid = h._rid;
-//std::cout << " e" << h._rid;
- u_int32_t rec_dblks = jrec::size_dblks((size_t)recsize + sizeof(enq_hdr) +
- sizeof(rec_tail));
- foffs += rec_dblks * JRNL_DBLK_SIZE;
- jifs.seekg(foffs);
}
- break;
- case RHM_JDAT_DEQ_MAGIC:
+ }
+ break;
+ case RHM_JDAT_DEQ_MAGIC:
+ {
+//std::cout << " d" << h._rid << std::flush;
+ deq_rec dr;
+ while (!done)
{
- u_int64_t drid = 0;
- size_t xidsize = 0;
- jifs.read((char*)&drid, sizeof(u_int64_t));
-#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
- jifs.ignore(sizeof(u_int32_t));
-#endif
- jifs.read((char*)&xidsize, sizeof(size_t));
-#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
- jifs.ignore(sizeof(u_int32_t));
-#endif
+ done = dr.rcv_decode(h, ifsp, dblks_read);
+ jfile_cycle(fid, ifsp, rd, false);
+ }
+ if (dr.xid_size())
+ {
+//std::cout << "$" << std::flush;
+ // If the enqueue is part of a pending txn, it will not yet be in emap
+ try { _emap.lock(h._rid); }
+ catch(jexception e) { if (e.err_code() != jerrno::JERR_MAP_NOTFOUND) throw e; }
+ dr.get_xid(&xidp);
+ assert(xidp != NULL);
+ std::string xid((char*)xidp, dr.xid_size());
+ _tmap.insert_txn_data(xid, txn_data(h._rid, fid, false));
+ free(xidp);
+ }
+ else
+ {
try
{
- _emap.get_remove_fid(drid);
+ _emap.get_remove_fid(dr.deq_rid());
rd._enq_cnt_list[fid]--;
}
- catch (jexception& e) {} // ignore JERR_EMAP_NOTFOUND thrown here
+ catch (jexception& e)
+ {
+ if (e.err_code() != jerrno::JERR_MAP_NOTFOUND)
+ throw e;
+ }
if (rd._h_rid < h._rid)
rd._h_rid = h._rid;
-//std::cout << " d" << drid << ")";
- u_int32_t rec_dblks = jrec::size_dblks(sizeof(deq_hdr));
- foffs += rec_dblks * JRNL_DBLK_SIZE;
- jifs.seekg(foffs);
}
- break;
- case RHM_JDAT_TXA_MAGIC:
-//std::cout << " a";
- break;
- case RHM_JDAT_TXC_MAGIC:
-//std::cout << " c";
- break;
- case RHM_JDAT_EMPTY_MAGIC:
+ }
+ break;
+ case RHM_JDAT_TXA_MAGIC:
+ {
+//std::cout << " a" << h._rid << std::flush;
+ txn_rec ar;
+ while (!done)
{
-//std::cout << " x";
- u_int32_t rec_dblks = jrec::size_dblks(sizeof(hdr));
- foffs += rec_dblks * JRNL_DBLK_SIZE;
- jifs.seekg(foffs);
+ done = ar.rcv_decode(h, ifsp, dblks_read);
+ jfile_cycle(fid, ifsp, rd, false);
}
- break;
- case 0:
- rd._lfid = fid;
- rd._eo = foffs;
- if (!jifs.eof())
- eoj = true;
-//std::cout << (jifs.eof()?" <eof>":" <end>");
- break;
- default:
- std::stringstream ss;
- ss << std::hex << std::setfill('0') << "Magic=0x" << std::setw(8) << h._magic;
- throw jexception(jerrno::JERR_JCNTL_UNKNOWNMAGIC, ss.str(), "jcntl",
- "rcvr_fanalyze");
+ // Delete this txn from tmap, unlock any locked records in emap
+ ar.get_xid(&xidp);
+ assert(xidp != NULL);
+ std::string xid((char*)xidp, ar.xid_size());
+ txn_data_list tdl = _tmap.get_remove_tdata_list(xid);
+ for (tdl_itr itr = tdl.begin(); itr != tdl.end(); itr++)
+ {
+ try { _emap.unlock(itr->_rid); }
+ catch(jexception e) { if (e.err_code() != jerrno::JERR_MAP_NOTFOUND) throw e; }
+ if (itr->_enq_flag)
+ _wrfc.decr_enqcnt(itr->_fid);
+ }
+ free(xidp);
}
+ break;
+ case RHM_JDAT_TXC_MAGIC:
+ {
+//std::cout << " c" << h._rid << std::flush;
+ txn_rec cr;
+ while (!done)
+ {
+ done = cr.rcv_decode(h, ifsp, dblks_read);
+ jfile_cycle(fid, ifsp, rd, false);
+ }
+ // Delete this txn from tmap, process records into emap
+ cr.get_xid(&xidp);
+ assert(xidp != NULL);
+ std::string xid((char*)xidp, cr.xid_size());
+ txn_data_list tdl = _tmap.get_remove_tdata_list(xid);
+ for (tdl_itr itr = tdl.begin(); itr != tdl.end(); itr++)
+ {
+ if (itr->_enq_flag) // txn enqueue
+ _emap.insert_fid(itr->_rid, itr->_fid);
+ else // txn dequeue
+ {
+ u_int16_t fid = _emap.get_remove_fid(h._rid);
+ _wrfc.decr_enqcnt(fid);
+ }
+ }
+ free(xidp);
+ }
+ break;
+ case RHM_JDAT_EMPTY_MAGIC:
+ {
+//std::cout << " x" << std::flush;
+ u_int32_t rec_dblks = jrec::size_dblks(sizeof(hdr));
+ ifsp->ignore(rec_dblks * JRNL_DBLK_SIZE - sizeof(hdr));
+ }
+ break;
+ case 0:
+//std::cout << " z" << std::flush;
+ rd._lfid = fid;
+ rd._eo = ifsp->tellg();
+ return false;
+ default:
+//std::cout << " ?" << std::flush;
+ std::stringstream ss;
+ ss << std::hex << std::setfill('0') << "Magic=0x" << std::setw(8) << h._magic;
+ throw jexception(jerrno::JERR_JCNTL_UNKNOWNMAGIC, ss.str(), "jcntl",
+ "rcvr_get_next_record");
+ }
+
+ return true;
+}
+
+const bool
+jcntl::jfile_cycle(u_int16_t& fid, std::ifstream* ifsp, rcvdat& rd, const bool jump_fro)
+{
+ if (ifsp->is_open())
+ {
+ if (ifsp->eof() || !ifsp->good())
+ {
+ ifsp->close();
+ fid++;
+ if (fid >= JRNL_NUM_FILES)
+ fid = 0;
+ if (fid == rd._ffid) // used up all journal files
+ return false;
}
}
- else
+ if (!ifsp->is_open())
{
- eoj = true;
-//std::cout << " <empty>";
+ std::stringstream ss;
+ ss << _jdir.dirname() << "/" << _base_filename << ".";
+ ss << std::setfill('0') << std::setw(4) << fid << "." << JRNL_DATA_EXTENSION;
+ ifsp->open(ss.str().c_str());
+ if (!ifsp->good())
+ throw jexception(jerrno::JERR__FILEIO, ss.str(), "jcntl", "rcvr_get_next_record");
+
+ // Read file header
+ file_hdr fhdr;
+ ifsp->read((char*)&fhdr, sizeof(fhdr));
+ if (fhdr._hdr._magic == RHM_JDAT_FILE_MAGIC)
+ {
+ assert(fhdr._fid == fid);
+ if (!rd._fro)
+ rd._fro = fhdr._fro;
+ std::streamoff foffs = jump_fro ? fhdr._fro : JRNL_DBLK_SIZE * JRNL_SBLK_SIZE;
+ ifsp->seekg(foffs);
+ }
+ else
+ {
+ ifsp->close();
+ return false;
+ }
}
- jifs.close();
-//std::cout << std::endl;
- return eoj;
+ return true;
}
+// const bool
+// jcntl::rcvr_fanalyze(u_int16_t fid, rcvdat& rd, const std::vector<std::string>& /*prep_txn_list*/)
+// throw (jexception)
+// {
+// bool eoj = false;
+// std::stringstream ss;
+// ss << _jdir.dirname() << "/" << _base_filename << ".";
+// ss << std::setfill('0') << std::setw(4) << fid << "." << JRNL_DATA_EXTENSION;
+// //std::cout << "rcvr_fanalyze: " << ss.str() << ":";
+// std::ifstream jifs(ss.str().c_str());
+// if (!jifs.good())
+// throw jexception(jerrno::JERR__FILEIO, ss.str(), "jinf", "analyze");
+//
+// // 1. Read file header
+// file_hdr fhdr;
+// jifs.read((char*)&fhdr, sizeof(fhdr));
+// if (fhdr._hdr._magic == RHM_JDAT_FILE_MAGIC)
+// {
+// assert(fhdr._fid == fid);
+// if (!rd._fro)
+// rd._fro = fhdr._fro;
+// std::streamoff foffs = fhdr._fro;
+// jifs.seekg(foffs);
+//
+// // 2. Read file records
+// while (jifs.good() && !eoj)
+// {
+// hdr h;
+// jifs.read((char*)&h, sizeof(hdr));
+// switch(h._magic)
+// {
+// case RHM_JDAT_ENQ_MAGIC:
+// {
+// size_t xidsize = 0;
+// size_t recsize = 0;
+// #if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+// jifs.ignore(sizeof(u_int32_t));
+// #endif
+// jifs.read((char*)&xidsize, sizeof(size_t));
+// #if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+// jifs.ignore(sizeof(u_int32_t));
+// #endif
+// #if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+// jifs.ignore(sizeof(u_int32_t));
+// #endif
+// jifs.read((char*)&recsize, sizeof(size_t));
+// #if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+// jifs.ignore(sizeof(u_int32_t));
+// #endif
+// _emap.insert_fid(h._rid, fid);
+// rd._enq_cnt_list[fid]++;
+// if (rd._h_rid < h._rid)
+// rd._h_rid = h._rid;
+// //std::cout << " e" << h._rid;
+// u_int32_t rec_dblks = jrec::size_dblks((size_t)recsize + sizeof(enq_hdr) +
+// sizeof(rec_tail));
+// foffs += rec_dblks * JRNL_DBLK_SIZE;
+// jifs.seekg(foffs);
+// }
+// break;
+// case RHM_JDAT_DEQ_MAGIC:
+// {
+// u_int64_t drid = 0;
+// size_t xidsize = 0;
+// jifs.read((char*)&drid, sizeof(u_int64_t));
+// #if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+// jifs.ignore(sizeof(u_int32_t));
+// #endif
+// jifs.read((char*)&xidsize, sizeof(size_t));
+// #if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+// jifs.ignore(sizeof(u_int32_t));
+// #endif
+// try
+// {
+// _emap.get_remove_fid(drid);
+// rd._enq_cnt_list[fid]--;
+// }
+// catch (jexception& e) {} // ignore JERR_EMAP_NOTFOUND thrown here
+// if (rd._h_rid < h._rid)
+// rd._h_rid = h._rid;
+// //std::cout << " d" << drid << ")";
+// u_int32_t rec_dblks = jrec::size_dblks(sizeof(deq_hdr));
+// foffs += rec_dblks * JRNL_DBLK_SIZE;
+// jifs.seekg(foffs);
+// }
+// break;
+// case RHM_JDAT_TXA_MAGIC:
+// //std::cout << " a";
+// break;
+// case RHM_JDAT_TXC_MAGIC:
+// //std::cout << " c";
+// break;
+// case RHM_JDAT_EMPTY_MAGIC:
+// {
+// //std::cout << " x";
+// u_int32_t rec_dblks = jrec::size_dblks(sizeof(hdr));
+// foffs += rec_dblks * JRNL_DBLK_SIZE;
+// jifs.seekg(foffs);
+// }
+// break;
+// case 0:
+// rd._lfid = fid;
+// rd._eo = foffs;
+// if (!jifs.eof())
+// eoj = true;
+// //std::cout << (jifs.eof()?" <eof>":" <end>");
+// break;
+// default:
+// std::stringstream ss;
+// ss << std::hex << std::setfill('0') << "Magic=0x" << std::setw(8) << h._magic;
+// throw jexception(jerrno::JERR_JCNTL_UNKNOWNMAGIC, ss.str(), "jcntl",
+// "rcvr_fanalyze");
+// }
+// }
+// }
+// else
+// {
+// eoj = true;
+// //std::cout << " <empty>";
+// }
+// jifs.close();
+// //std::cout << std::endl;
+// return eoj;
+// }
+
void
jcntl::aio_wr_callback(jcntl* journal, u_int32_t num_dtoks)
{
Modified: store/trunk/cpp/lib/jrnl/jcntl.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/jcntl.hpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/lib/jrnl/jcntl.hpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -611,16 +611,21 @@
/**
* \brief Analyze journal for recovery.
*/
- void rcvr_janalyze(rcvdat& jrs, const std::vector<std::string>& prep_txn_list)
+ void rcvr_janalyze(rcvdat& rd, const std::vector<std::string>& prep_txn_list)
throw (jexception);
+ const bool rcvr_get_next_record(u_int16_t& fid, std::ifstream* ifsp, rcvdat& rd,
+ const std::vector<std::string>& prep_txn_list) throw (jexception);
+
+ const bool jfile_cycle(u_int16_t& fid, std::ifstream* ifsp, rcvdat& rd,
+ const bool jump_fro);
/**
* \brief Analyze a particular journal file for recovery.
*
* \return <b><i>true</i></b> if end of journal (eoj) found; <b><i>false</i></b> otherwise.
*/
- const bool rcvr_fanalyze(u_int16_t fid, rcvdat& jrs,
- const std::vector<std::string>& prep_txn_list) throw (jexception);
+// const bool rcvr_fanalyze(u_int16_t fid, rcvdat& jrs,
+// const std::vector<std::string>& prep_txn_list) throw (jexception);
/**
* Intenal callback write
Modified: store/trunk/cpp/lib/jrnl/jrec.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/jrec.hpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/lib/jrnl/jrec.hpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -41,6 +41,7 @@
}
}
+#include <fstream>
#include <string>
#include <sys/types.h>
#include <jrnl/file_hdr.hpp>
@@ -113,8 +114,8 @@
* \param max_size_dblks Maximum number of data-blocks to write to pointer wptr.
* \returns Number of data-blocks encoded.
*/
- virtual u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
- throw (jexception) = 0;
+ virtual const u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks,
+ u_int32_t max_size_dblks) throw (jexception) = 0;
/**
* \brief Decode into this instance of jrec from the read buffer at the disk-block-aligned
@@ -146,9 +147,12 @@
* \param max_size_dblks Maximum number of data-blocks to read from pointer rptr.
* \returns Number of data-blocks read (consumed).
*/
- virtual u_int32_t decode(hdr& h, void* rptr, u_int32_t rec_offs_dblks,
+ virtual const u_int32_t decode(hdr& h, void* rptr, u_int32_t rec_offs_dblks,
u_int32_t max_size_dblks) throw (jexception) = 0;
+ virtual const bool rcv_decode(hdr h, std::ifstream* ifsp, u_int32_t& rec_offs_dblks)
+ throw (jexception) = 0;
+
virtual std::string& str(std::string& str) const = 0;
virtual const size_t data_size() const = 0;
virtual const size_t xid_size() const = 0;
Modified: store/trunk/cpp/lib/jrnl/rcvdat.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/rcvdat.hpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/lib/jrnl/rcvdat.hpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -62,7 +62,7 @@
_lfid(0),
_eo(0),
_h_rid(0),
- _enq_cnt_list(JRNL_NUM_FILES),
+ _enq_cnt_list(JRNL_NUM_FILES, 0),
_edm()
{}
void reset()
@@ -73,7 +73,8 @@
_lfid=0;
_eo=0;
_h_rid=0;
- _enq_cnt_list.clear();
+ for (unsigned f=0; f<_enq_cnt_list.size(); f++)
+ _enq_cnt_list[f] = 0;
_edm.clear();
}
};
Modified: store/trunk/cpp/lib/jrnl/rmgr.cpp
===================================================================
--- store/trunk/cpp/lib/jrnl/rmgr.cpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/lib/jrnl/rmgr.cpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -250,7 +250,7 @@
while (true)
{
//std::string s;
-//std::cout << " [f pi=" << _pg_index << " d=" << dblks_rem() << " f=" << (_rrfc.empty()?"T":"F") << " status:" << _rrfc.file_handle()->status_str(s) << "]" << std::flush;
+//std::cout << " [f pi=" << _pg_index << " d=" << dblks_rem() << " c=" << (_rrfc.is_compl()?"T":"F") << " o=" << (_rrfc.is_wr_aio_outstanding()?"T":"F") << " status:" << _rrfc.file_handle()->status_str(s) << "]" << std::flush;
if(dblks_rem() == 0 && _rrfc.is_compl() && !_rrfc.is_wr_aio_outstanding())
{
aio_cycle(); // check if any AIOs have returned
Modified: store/trunk/cpp/lib/jrnl/txn_rec.cpp
===================================================================
--- store/trunk/cpp/lib/jrnl/txn_rec.cpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/lib/jrnl/txn_rec.cpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -88,7 +88,7 @@
_txn_tail._rid = rid;
}
-u_int32_t
+const u_int32_t
txn_rec::encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks) throw (jexception)
{
assert(wptr != NULL);
@@ -196,7 +196,7 @@
return size_dblks(wr_cnt);
}
-u_int32_t
+const u_int32_t
txn_rec::decode(hdr& h, void* rptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
throw (jexception)
{
@@ -313,8 +313,45 @@
return size_dblks(rd_cnt);
}
+const bool
+txn_rec::rcv_decode(hdr h, std::ifstream* ifsp, u_int32_t& rec_offs_dblks) throw (jexception)
+{
+ if (rec_offs_dblks) // Contunue decoding xid from previous decode call
+ {
+ // TODO
+ }
+ else // Start at beginning of record
+ {
+ _txn_hdr._hdr.copy(h);
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+ ifsp->read((char*)&_txn_hdr._xidsize, sizeof(size_t));
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+ _buff = ::malloc(_txn_hdr._xidsize);
+ if (_buff == NULL)
+ {
+ std::stringstream ss;
+ ss << "_buff malloc(): errno=" << errno;
+ throw jexception(jerrno::JERR__MALLOC, ss.str(), "deq_rec", "decode");
+ }
+ // Decode xid
+ ifsp->read((char*)_buff, _txn_hdr._xidsize);
+ if ((size_t)ifsp->gcount() == _txn_hdr._xidsize)
+ {
+ ifsp->ignore(rec_size_dblks() * JRNL_DBLK_SIZE - sizeof(_txn_hdr) - _txn_hdr._xidsize);
+ return true;
+ }
+ else
+ ; // TODO
+ }
+ return false;
+}
+
const size_t
-txn_rec::get_xid(const void** const xidpp)
+txn_rec::get_xid(void** const xidpp)
{
if (!_buff)
{
Modified: store/trunk/cpp/lib/jrnl/txn_rec.hpp
===================================================================
--- store/trunk/cpp/lib/jrnl/txn_rec.hpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/lib/jrnl/txn_rec.hpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -73,11 +73,14 @@
// Prepare instance for use in writing data to journal
void reset(const u_int32_t magic, const u_int64_t rid, const void* const xidp,
const size_t xidlen);
- u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+ const u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
throw (jexception);
- u_int32_t decode(hdr& h, void* rptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+ const u_int32_t decode(hdr& h, void* rptr, u_int32_t rec_offs_dblks,
+ u_int32_t max_size_dblks) throw (jexception);
+ // Decode used for recover
+ const bool rcv_decode(hdr h, std::ifstream* ifsp, u_int32_t& rec_offs_dblks)
throw (jexception);
- const size_t get_xid(const void** const xidpp);
+ const size_t get_xid(void** const xidpp);
std::string& str(std::string& str) const;
inline const size_t data_size() const { return 0; } // This record never carries data
const size_t xid_size() const;
Modified: store/trunk/cpp/tests/jrnl/JournalSystemTests.cpp
===================================================================
--- store/trunk/cpp/tests/jrnl/JournalSystemTests.cpp 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/tests/jrnl/JournalSystemTests.cpp 2007-10-10 21:36:41 UTC (rev 1001)
@@ -38,6 +38,8 @@
#define NUM_MSGS 5
#define MAX_AIO_SLEEPS 500
#define AIO_SLEEP_TIME 1000
+#define MSG_SIZE 100
+#define XID_SIZE 64
class JournalSystemTests : public CppUnit::TestCase
{
@@ -46,9 +48,13 @@
CPPUNIT_TEST(InitializationTest);
CPPUNIT_TEST(EmptyRecoverTest);
CPPUNIT_TEST(EnqueueTest);
+ CPPUNIT_TEST(TxnEnqueueTest);
CPPUNIT_TEST(RecoverReadTest);
+ CPPUNIT_TEST(TxnRecoverReadTest);
CPPUNIT_TEST(RecoveredReadTest);
+ CPPUNIT_TEST(TxnRecoveredReadTest);
CPPUNIT_TEST(RecoveredDequeueTest);
+// CPPUNIT_TEST(TxnRecoveredDequeueTest);
CPPUNIT_TEST(ComplexRecoveryTest1);
CPPUNIT_TEST(EncodeTest_000);
CPPUNIT_TEST(EncodeTest_001);
@@ -83,6 +89,7 @@
jtest t;
std::string msg;
+ std::string xid;
void* mbuff;
size_t msize;
void* xidbuff;
@@ -230,7 +237,7 @@
rhm::journal::jcntl jc(test_name, "jdata", test_name);
jc.initialize();
for (int m=0; m<NUM_MSGS; m++)
- enq_msg(&jc, create_msg(msg, m));
+ enq_msg(&jc, create_msg(msg, m, MSG_SIZE));
}
catch (rhm::journal::jexception& e)
{
@@ -247,7 +254,7 @@
CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
jcp->initialize();
for (int m=0; m<NUM_MSGS; m++)
- enq_msg(jcp, create_msg(msg, m));
+ enq_msg(jcp, create_msg(msg, m, MSG_SIZE));
delete jcp;
}
catch (rhm::journal::jexception& e)
@@ -260,6 +267,48 @@
}
}
+ void TxnEnqueueTest()
+ {
+ //Stack
+ char* test_name = "TxnEnqueueTest_Stack";
+ try
+ {
+ rhm::journal::jcntl jc(test_name, "jdata", test_name);
+ jc.initialize();
+ create_xid(xid, 0, XID_SIZE);
+ for (int m=0; m<NUM_MSGS; m++)
+ enq_txn_msg(&jc, create_msg(msg, m, MSG_SIZE), xid);
+ txn_commit(&jc, xid);
+ }
+ catch (rhm::journal::jexception& e)
+ {
+ std::stringstream ss;
+ ss << e;
+ CPPUNIT_FAIL(ss.str());
+ }
+ // Heap
+ test_name = "TxnEnqueueTest_Heap";
+ rhm::journal::jcntl* jcp = NULL;
+ try
+ {
+ jcp = new rhm::journal::jcntl(test_name, "jdata", test_name);
+ CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
+ jcp->initialize();
+ for (int m=0; m<NUM_MSGS; m++)
+ enq_txn_msg(jcp, create_msg(msg, m, MSG_SIZE), xid);
+ txn_commit(jcp, xid);
+ delete jcp;
+ }
+ catch (rhm::journal::jexception& e)
+ {
+ if (jcp)
+ delete jcp;
+ std::stringstream ss;
+ ss << e;
+ CPPUNIT_FAIL(ss.str());
+ }
+ }
+
void RecoverReadTest()
{
std::vector<std::string> txn_list;
@@ -271,7 +320,7 @@
rhm::journal::jcntl jc(test_name, "jdata", test_name);
jc.initialize();
for (int m=0; m<NUM_MSGS; m++)
- enq_msg(&jc, create_msg(msg, m));
+ enq_msg(&jc, create_msg(msg, m, MSG_SIZE));
}
{
rhm::journal::jcntl jc(test_name, "jdata", test_name);
@@ -280,7 +329,7 @@
{
read_msg(&jc);
CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
- create_msg(msg, m).compare(string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(string((char*)mbuff, msize)) == 0);
cleanMessage();
}
}
@@ -301,7 +350,7 @@
CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
jcp->initialize();
for (int m=0; m<NUM_MSGS; m++)
- enq_msg(jcp, create_msg(msg, m));
+ enq_msg(jcp, create_msg(msg, m, MSG_SIZE));
delete jcp;
jcp = NULL;
}
@@ -314,7 +363,7 @@
read_msg(jcp);
std::string msg((char*)mbuff, msize);
CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
delete jcp;
@@ -330,6 +379,79 @@
}
}
+ void TxnRecoverReadTest()
+ {
+ std::vector<std::string> txn_list;
+ //Stack
+ char* test_name = "TxnRecoverReadTest_Stack";
+ try
+ {
+ {
+ rhm::journal::jcntl jc(test_name, "jdata", test_name);
+ jc.initialize();
+ create_xid(xid, 1, XID_SIZE);
+ for (int m=0; m<NUM_MSGS; m++)
+ enq_txn_msg(&jc, create_msg(msg, m, MSG_SIZE), xid);
+ txn_commit(&jc, xid);
+ }
+ {
+ rhm::journal::jcntl jc(test_name, "jdata", test_name);
+ jc.recover(txn_list);
+ for (int m=0; m<NUM_MSGS; m++)
+ {
+ read_msg(&jc);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
+ create_msg(msg, m, MSG_SIZE).compare(string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ }
+ }
+ catch (rhm::journal::jexception& e)
+ {
+ std::stringstream ss;
+ ss << e;
+ CPPUNIT_FAIL(ss.str());
+ }
+ // Heap
+ test_name = "TxnRecoverReadTest_Heap";
+ rhm::journal::jcntl* jcp = NULL;
+ try
+ {
+ {
+ jcp = new rhm::journal::jcntl(test_name, "jdata", test_name);
+ CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
+ jcp->initialize();
+ for (int m=0; m<NUM_MSGS; m++)
+ enq_txn_msg(jcp, create_msg(msg, m, MSG_SIZE), xid);
+ txn_commit(jcp, xid);
+ delete jcp;
+ jcp = NULL;
+ }
+ {
+ rhm::journal::jcntl* jcp = new rhm::journal::jcntl(test_name, "jdata", test_name);
+ CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
+ jcp->recover(txn_list);
+ for (int m=0; m<NUM_MSGS; m++)
+ {
+ read_msg(jcp);
+ std::string msg((char*)mbuff, msize);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ delete jcp;
+ }
+ }
+ catch (rhm::journal::jexception& e)
+ {
+ if (jcp)
+ delete jcp;
+ std::stringstream ss;
+ ss << e;
+ CPPUNIT_FAIL(ss.str());
+ }
+ }
+
void RecoveredReadTest()
{
std::vector<std::string> txn_list;
@@ -341,7 +463,7 @@
rhm::journal::jcntl jc(test_name, "jdata", test_name);
jc.initialize();
for (int m=0; m<NUM_MSGS; m++)
- enq_msg(&jc, create_msg(msg, m));
+ enq_msg(&jc, create_msg(msg, m, MSG_SIZE));
}
{
rhm::journal::jcntl jc(test_name, "jdata", test_name);
@@ -350,7 +472,7 @@
{
read_msg(&jc);
CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
jc.recover_complete();
@@ -358,7 +480,7 @@
{
read_msg(&jc);
CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
}
@@ -379,7 +501,7 @@
CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
jcp->initialize();
for (int m=0; m<NUM_MSGS; m++)
- enq_msg(jcp, create_msg(msg, m));
+ enq_msg(jcp, create_msg(msg, m, MSG_SIZE));
delete jcp;
jcp = NULL;
}
@@ -391,7 +513,7 @@
{
read_msg(jcp);
CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
jcp->recover_complete();
@@ -399,7 +521,7 @@
{
read_msg(jcp);
CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
delete jcp;
@@ -415,6 +537,94 @@
}
}
+ void TxnRecoveredReadTest()
+ {
+ std::vector<std::string> txn_list;
+ //Stack
+ char* test_name = "TxnRecoveredReadTest_Stack";
+ try
+ {
+ {
+ rhm::journal::jcntl jc(test_name, "jdata", test_name);
+ jc.initialize();
+ create_xid(xid, 2, XID_SIZE);
+ for (int m=0; m<NUM_MSGS; m++)
+ enq_txn_msg(&jc, create_msg(msg, m, MSG_SIZE), xid);
+ txn_commit(&jc, xid);
+ }
+ {
+ rhm::journal::jcntl jc(test_name, "jdata", test_name);
+ jc.recover(txn_list);
+ for (int m=0; m<NUM_MSGS; m++)
+ {
+ read_msg(&jc);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ jc.recover_complete();
+ for (int m=0; m<NUM_MSGS; m++)
+ {
+ read_msg(&jc);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ }
+ }
+ catch (rhm::journal::jexception& e)
+ {
+ std::stringstream ss;
+ ss << e;
+ CPPUNIT_FAIL(ss.str());
+ }
+ // Heap
+ test_name = "TxnRecoveredReadTest_Heap";
+ rhm::journal::jcntl* jcp = NULL;
+ try
+ {
+ {
+ jcp = new rhm::journal::jcntl(test_name, "jdata", test_name);
+ CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
+ jcp->initialize();
+ for (int m=0; m<NUM_MSGS; m++)
+ enq_txn_msg(jcp, create_msg(msg, m, MSG_SIZE), xid);
+ txn_commit(jcp, xid);
+ delete jcp;
+ jcp = NULL;
+ }
+ {
+ rhm::journal::jcntl* jcp = new rhm::journal::jcntl(test_name, "jdata", test_name);
+ CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
+ jcp->recover(txn_list);
+ for (int m=0; m<NUM_MSGS; m++)
+ {
+ read_msg(jcp);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ jcp->recover_complete();
+ for (int m=0; m<NUM_MSGS; m++)
+ {
+ read_msg(jcp);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ delete jcp;
+ }
+ }
+ catch (rhm::journal::jexception& e)
+ {
+ if (jcp)
+ delete jcp;
+ std::stringstream ss;
+ ss << e;
+ CPPUNIT_FAIL(ss.str());
+ }
+ }
+
void RecoveredDequeueTest()
{
std::vector<std::string> txn_list;
@@ -426,7 +636,7 @@
rhm::journal::jcntl jc(test_name, "jdata", test_name);
jc.initialize();
for (int m=0; m<NUM_MSGS; m++)
- enq_msg(&jc, create_msg(msg, m));
+ enq_msg(&jc, create_msg(msg, m, MSG_SIZE));
}
{
rhm::journal::jcntl jc(test_name, "jdata", test_name);
@@ -435,7 +645,7 @@
{
read_msg(&jc);
CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
jc.recover_complete();
@@ -443,7 +653,7 @@
{
read_msg(&jc);
CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
for (int m=0; m<NUM_MSGS; m++)
@@ -466,7 +676,7 @@
CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
jcp->initialize();
for (int m=0; m<NUM_MSGS; m++)
- enq_msg(jcp, create_msg(msg, m));
+ enq_msg(jcp, create_msg(msg, m, MSG_SIZE));
delete jcp;
jcp = NULL;
}
@@ -478,7 +688,7 @@
{
read_msg(jcp);
CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
jcp->recover_complete();
@@ -486,7 +696,7 @@
{
read_msg(jcp);
CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
for (int m=0; m<NUM_MSGS; m++)
@@ -504,6 +714,98 @@
}
}
+ void TxnRecoveredDequeueTest()
+ {
+ std::vector<std::string> txn_list;
+ //Stack
+ char* test_name = "TxnRecoveredDequeueTest_Stack";
+ try
+ {
+ {
+ rhm::journal::jcntl jc(test_name, "jdata", test_name);
+ jc.initialize();
+ create_xid(xid, 3, XID_SIZE);
+ for (int m=0; m<NUM_MSGS; m++)
+ enq_txn_msg(&jc, create_msg(msg, m, MSG_SIZE), xid);
+ txn_commit(&jc, xid);
+ }
+ {
+ rhm::journal::jcntl jc(test_name, "jdata", test_name);
+ jc.recover(txn_list);
+ for (int m=0; m<NUM_MSGS; m++)
+ {
+ read_msg(&jc);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ jc.recover_complete();
+ for (int m=0; m<NUM_MSGS; m++)
+ {
+ read_msg(&jc);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ for (int m=0; m<NUM_MSGS; m++)
+ deq_msg(&jc, m);
+ }
+ }
+ catch (rhm::journal::jexception& e)
+ {
+ std::stringstream ss;
+ ss << e;
+ CPPUNIT_FAIL(ss.str());
+ }
+ // Heap
+ test_name = "TxnRecoveredDequeueTest_Heap";
+ rhm::journal::jcntl* jcp = NULL;
+ try
+ {
+ {
+ jcp = new rhm::journal::jcntl(test_name, "jdata", test_name);
+ CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
+ jcp->initialize();
+ for (int m=0; m<NUM_MSGS; m++)
+ enq_txn_msg(jcp, create_msg(msg, m, MSG_SIZE), xid);
+ txn_commit(jcp, xid);
+ delete jcp;
+ jcp = NULL;
+ }
+ {
+ rhm::journal::jcntl* jcp = new rhm::journal::jcntl(test_name, "jdata", test_name);
+ CPPUNIT_ASSERT_MESSAGE("Journal heap instantiation failed.", jcp != NULL);
+ jcp->recover(txn_list);
+ for (int m=0; m<NUM_MSGS; m++)
+ {
+ read_msg(jcp);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ jcp->recover_complete();
+ for (int m=0; m<NUM_MSGS; m++)
+ {
+ read_msg(jcp);
+ CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
+ cleanMessage();
+ }
+ for (int m=0; m<NUM_MSGS; m++)
+ deq_msg(jcp, m);
+ delete jcp;
+ }
+ }
+ catch (rhm::journal::jexception& e)
+ {
+ if (jcp)
+ delete jcp;
+ std::stringstream ss;
+ ss << e;
+ CPPUNIT_FAIL(ss.str());
+ }
+ }
+
void ComplexRecoveryTest1()
{
std::vector<std::string> txn_list;
@@ -516,7 +818,7 @@
jc.initialize();
// rids: 0 to NUM_MSGS*2 - 1
for (int m=0; m<NUM_MSGS*2; m++)
- enq_msg(&jc, create_msg(msg, m));
+ enq_msg(&jc, create_msg(msg, m, MSG_SIZE));
// rids: NUM_MSGS*2 to NUM_MSGS*3 - 1
for (int m=0; m<NUM_MSGS; m++)
deq_msg(&jc, m);
@@ -525,7 +827,7 @@
{
read_msg(&jc);
CPPUNIT_ASSERT_MESSAGE("Message corrupt before recover.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
}
@@ -536,26 +838,26 @@
{
read_msg(&jc);
CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
jc.recover_complete();
// rids: NUM_MSGS*3 to NUM_MSGS*4 - 1
for (int m=NUM_MSGS*3; m<NUM_MSGS*4; m++)
- enq_msg(&jc, create_msg(msg, m));
+ enq_msg(&jc, create_msg(msg, m, MSG_SIZE));
jc.flush();
for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
{
read_msg(&jc);
CPPUNIT_ASSERT_MESSAGE("Message corrupt after recover.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
for (int m=NUM_MSGS*3; m<NUM_MSGS*4; m++)
{
read_msg(&jc);
CPPUNIT_ASSERT_MESSAGE("Message corrupt after recover.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
// rids: NUM_MSGS*4 to NUM_MSGS*6 - 1
@@ -582,7 +884,7 @@
jcp->initialize();
// rids: 0 to NUM_MSGS*2 - 1
for (int m=0; m<NUM_MSGS*2; m++)
- enq_msg(jcp, create_msg(msg, m));
+ enq_msg(jcp, create_msg(msg, m, MSG_SIZE));
// rids: NUM_MSGS*2 to NUM_MSGS*3 - 1
for (int m=0; m<NUM_MSGS; m++)
deq_msg(jcp, m);
@@ -591,7 +893,7 @@
{
read_msg(jcp);
CPPUNIT_ASSERT_MESSAGE("Message corrupt before recover.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
delete jcp;
@@ -605,26 +907,26 @@
{
read_msg(jcp);
CPPUNIT_ASSERT_MESSAGE("Message corrupt during recover.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
jcp->recover_complete();
// rids: NUM_MSGS*3 to NUM_MSGS*4 - 1
for (int m=NUM_MSGS*3; m<NUM_MSGS*4; m++)
- enq_msg(jcp, create_msg(msg, m));
+ enq_msg(jcp, create_msg(msg, m, MSG_SIZE));
jcp->flush();
for (int m=NUM_MSGS; m<NUM_MSGS*2; m++)
{
read_msg(jcp);
CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
for (int m=NUM_MSGS*3; m<NUM_MSGS*4; m++)
{
read_msg(jcp);
CPPUNIT_ASSERT_MESSAGE("Message corrupt after recovery.",
- create_msg(msg, m).compare(std::string((char*)mbuff, msize)) == 0);
+ create_msg(msg, m, MSG_SIZE).compare(std::string((char*)mbuff, msize)) == 0);
cleanMessage();
}
// rids: NUM_MSGS*4 to NUM_MSGS*6 - 1
@@ -809,6 +1111,16 @@
dtp, false), jc, aio_sleep_cnt, dtp));
}
+ void enq_txn_msg(rhm::journal::jcntl* jc, const std::string msg, const std::string xid)
+ {
+ rhm::journal::data_tok* dtp = new rhm::journal::data_tok;
+ CPPUNIT_ASSERT_MESSAGE("Data Token heap intantiation failed.", dtp != NULL);
+
+ unsigned aio_sleep_cnt = 0;
+ while (handle_jcntl_response(jc->enqueue_txn_data_record(msg.c_str(), msg.size(), msg.size(),
+ dtp, xid, false), jc, aio_sleep_cnt, dtp));
+ }
+
void deq_msg(rhm::journal::jcntl* jc, u_int64_t rid)
{
rhm::journal::data_tok* dtp = new rhm::journal::data_tok;
@@ -820,6 +1132,36 @@
while (handle_jcntl_response(jc->dequeue_data_record(dtp), jc, aio_sleep_cnt, dtp));
}
+ void deq_txn_msg(rhm::journal::jcntl* jc, u_int64_t rid, const std::string xid)
+ {
+ rhm::journal::data_tok* dtp = new rhm::journal::data_tok;
+ CPPUNIT_ASSERT_MESSAGE("Data Token heap intantiation failed.", dtp != NULL);
+ dtp->set_wstate(rhm::journal::data_tok::ENQ);
+ dtp->set_rid(rid);
+
+ unsigned aio_sleep_cnt = 0;
+ while (handle_jcntl_response(jc->dequeue_txn_data_record(dtp, xid),
+ jc, aio_sleep_cnt, dtp));
+ }
+
+ void txn_abort(rhm::journal::jcntl* jc, const std::string xid)
+ {
+ rhm::journal::data_tok* dtp = new rhm::journal::data_tok;
+ CPPUNIT_ASSERT_MESSAGE("Data Token heap intantiation failed.", dtp != NULL);
+
+ unsigned aio_sleep_cnt = 0;
+ while (handle_jcntl_response(jc->txn_abort(dtp, xid), jc, aio_sleep_cnt, dtp));
+ }
+
+ void txn_commit(rhm::journal::jcntl* jc, const std::string xid)
+ {
+ rhm::journal::data_tok* dtp = new rhm::journal::data_tok;
+ CPPUNIT_ASSERT_MESSAGE("Data Token heap intantiation failed.", dtp != NULL);
+
+ unsigned aio_sleep_cnt = 0;
+ while (handle_jcntl_response(jc->txn_commit(dtp, xid), jc, aio_sleep_cnt, dtp));
+ }
+
char* read_msg(rhm::journal::jcntl* jc)
{
rhm::journal::data_tok* dtp = new rhm::journal::data_tok;
@@ -849,34 +1191,47 @@
else
{
delete dtp;
- CPPUNIT_FAIL("dequeue_data(): timeout on RHM_IORES_AIO_WAIT.");
+ CPPUNIT_FAIL("Timeout on RHM_IORES_AIO_WAIT.");
}
break;
case rhm::journal::RHM_IORES_EMPTY:
delete dtp;
- CPPUNIT_FAIL("dequeue_data(): RHM_IORES_EMPTY.");
+ CPPUNIT_FAIL("RHM_IORES_EMPTY");
case rhm::journal::RHM_IORES_FULL:
delete dtp;
- CPPUNIT_FAIL("dequeue_data(): RHM_IORES_FULL.");
+ CPPUNIT_FAIL("RHM_IORES_FULL");
case rhm::journal::RHM_IORES_BUSY:
delete dtp;
- CPPUNIT_FAIL("dequeue_data(): RHM_IORES_BUSY.");
+ CPPUNIT_FAIL("RHM_IORES_BUSY");
+ case rhm::journal::RHM_IORES_TXPENDING:
+ delete dtp;
+ CPPUNIT_FAIL("RHM_IORES_TXPENDING");
default:
delete dtp;
- CPPUNIT_FAIL("dequeue_data(): unknown return value.");
+ CPPUNIT_FAIL("unknown return value");
}
return true;
}
- static std::string& create_msg(std::string& s, int msg_num)
+ static std::string& create_msg(std::string& s, int msg_num, int len)
{
std::stringstream ss;
- ss << "Message_" << std::setfill('0') << std::setw(4) << msg_num;
- ss << "_4567890123456789012345678901234567890";
- ss << "12345678901234567890123456789012345678901234567890"; // 100 chars long (2 dblks)
+ ss << "Message_" << std::setfill('0') << std::setw(4) << msg_num << "_";
+ for (int i=14; i<=len; i++)
+ ss << (char)('0' + i%10);
s.assign(ss.str());
return s;
}
+
+ static std::string& create_xid(std::string& s, int msg_num, int len)
+ {
+ std::stringstream ss;
+ ss << "XID_" << std::setfill('0') << std::setw(4) << msg_num << "_";
+ for (int i=9; i<len; i++)
+ ss << (char)('a' + i%26);
+ s.assign(ss.str());
+ return s;
+ }
void runEncodeTest(const unsigned num_msgs, const unsigned min_msg_size,
const unsigned max_msg_szie, const bool auto_deq, const unsigned min_xid_size,
Modified: store/trunk/cpp/tests/jrnl/rtest
===================================================================
--- store/trunk/cpp/tests/jrnl/rtest 2007-10-10 18:39:30 UTC (rev 1000)
+++ store/trunk/cpp/tests/jrnl/rtest 2007-10-10 21:36:41 UTC (rev 1001)
@@ -30,8 +30,8 @@
NUM_JFILES=8
VG_ITERATIONS=1
-#VG_NORM_FILESIZE=11
-VG_NORM_FILESIZE=18 # RHEL5 triggers extra valgrind messages when pthreads are in use
+VG_NORM_FILESIZE=11
+#VG_NORM_FILESIZE=18 # RHEL5 triggers extra valgrind messages when pthreads are in use
# Write test
W_DO_TEST=T
@@ -58,8 +58,8 @@
RM_DIR="${RM} -rf"
TEST_PROG="./jtest"
CHK_PROG="./janalyze.py"
-VALGRIND="valgrind -q --track-fds=yes --leak-check=full --leak-resolution=high --show-reachable=yes"
-#VALGRIND="valgrind -q --track-fds=yes --leak-check=full --leak-resolution=high --show-reachable=yes --suppressions=/usr/lib/valgrind/glibc-2.5.supp"
+#VALGRIND="valgrind -q --track-fds=yes --leak-check=full --leak-resolution=high --show-reachable=yes"
+VALGRIND="valgrind -q --track-fds=yes --leak-check=full --leak-resolution=high --show-reachable=yes --suppressions=/usr/lib/valgrind/glibc-2.5.supp"
MAKE="make -f Makefile.rtest"
18 years, 6 months
rhmessaging commits: r1000 - mgmt/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-10-10 14:39:30 -0400 (Wed, 10 Oct 2007)
New Revision: 1000
Modified:
mgmt/cumin/python/cumin/cluster.py
mgmt/cumin/python/cumin/cluster.strings
mgmt/cumin/python/cumin/exchange.py
mgmt/cumin/python/cumin/page.py
mgmt/cumin/python/cumin/queue.py
mgmt/cumin/python/cumin/server.py
mgmt/cumin/python/cumin/virtualhost.py
mgmt/cumin/python/cumin/virtualhost.strings
Log:
Rename "Virtual Host" to "Functional Host" in the UI, to avoid
confusion.
Modified: mgmt/cumin/python/cumin/cluster.py
===================================================================
--- mgmt/cumin/python/cumin/cluster.py 2007-10-10 18:12:30 UTC (rev 999)
+++ mgmt/cumin/python/cumin/cluster.py 2007-10-10 18:39:30 UTC (rev 1000)
@@ -100,7 +100,7 @@
class VirtualHostTab(VirtualHostSet):
def render_title(self, session, cluster):
- return "Virtual Hosts (%i)" % len(cluster.virtual_host_items())
+ return "Functional Hosts (%i)" % len(cluster.virtual_host_items())
def get_items(self, session, cluster):
return sorted(cluster.virtual_host_items(), cmp, lambda x: x.name)
Modified: mgmt/cumin/python/cumin/cluster.strings
===================================================================
--- mgmt/cumin/python/cumin/cluster.strings 2007-10-10 18:12:30 UTC (rev 999)
+++ mgmt/cumin/python/cumin/cluster.strings 2007-10-10 18:39:30 UTC (rev 1000)
@@ -11,6 +11,8 @@
<td>{item_link}</td>
</tr>
+
+
[ClusterView.html]
<div class="oblock">
<div class="mstatus green" id="{id}">
@@ -56,12 +58,12 @@
[VirtualHostTab.html]
<ul class="actions">
- <li><a href="">Add Virtual Host</a></li>
+ <li><a href="">Add Functional Host</a></li>
</ul>
<table class="mobjects">
<tr>
- <th>Virtual Host</th>
+ <th>Functional Host</th>
<th>Configuration</th>
<th></th>
</tr>
Modified: mgmt/cumin/python/cumin/exchange.py
===================================================================
--- mgmt/cumin/python/cumin/exchange.py 2007-10-10 18:12:30 UTC (rev 999)
+++ mgmt/cumin/python/cumin/exchange.py 2007-10-10 18:39:30 UTC (rev 1000)
@@ -220,7 +220,7 @@
session.set_redirect(branch.marshal())
def render_title(self, session, vhost):
- return "Add Exchange to Virtual Host '%s'" % vhost.name
+ return "Add Exchange to Functional Host '%s'" % vhost.name
class ExchangeEdit(ExchangeForm):
def on_cancel(self, session, exchange):
Modified: mgmt/cumin/python/cumin/page.py
===================================================================
--- mgmt/cumin/python/cumin/page.py 2007-10-10 18:12:30 UTC (rev 999)
+++ mgmt/cumin/python/cumin/page.py 2007-10-10 18:39:30 UTC (rev 1000)
@@ -192,7 +192,7 @@
self.add_child(self.groups)
def render_title(self, session, model):
- return "Virtual Hosts"
+ return "Functional Hosts"
class VirtualHostTemplateSet(VirtualHostSet):
def get_items(self, session, model):
Modified: mgmt/cumin/python/cumin/queue.py
===================================================================
--- mgmt/cumin/python/cumin/queue.py 2007-10-10 18:12:30 UTC (rev 999)
+++ mgmt/cumin/python/cumin/queue.py 2007-10-10 18:39:30 UTC (rev 1000)
@@ -267,7 +267,7 @@
session.set_redirect(branch.marshal())
def render_title(self, session, vhost):
- return "Add Queue to Virtual Host '%s'" % vhost.name
+ return "Add Queue to Functional Host '%s'" % vhost.name
class QueueEdit(QueueForm):
def on_cancel(self, session, queue):
Modified: mgmt/cumin/python/cumin/server.py
===================================================================
--- mgmt/cumin/python/cumin/server.py 2007-10-10 18:12:30 UTC (rev 999)
+++ mgmt/cumin/python/cumin/server.py 2007-10-10 18:39:30 UTC (rev 1000)
@@ -127,7 +127,7 @@
class ServerVirtualHosts(VirtualHostSet):
def render_title(self, session, server):
- return "Virtual Hosts (%i)" % len(server.virtual_host_items())
+ return "Functional Hosts (%i)" % len(server.virtual_host_items())
def get_items(self, session, server):
return sorted(server.virtual_host_items(), cmp, lambda x: x.name)
Modified: mgmt/cumin/python/cumin/virtualhost.py
===================================================================
--- mgmt/cumin/python/cumin/virtualhost.py 2007-10-10 18:12:30 UTC (rev 999)
+++ mgmt/cumin/python/cumin/virtualhost.py 2007-10-10 18:39:30 UTC (rev 1000)
@@ -10,7 +10,7 @@
class VirtualHostSet(ItemSet):
def render_title(self, session, model):
- return "Virtual Hosts (%i)" % len(model.get_virtual_hosts())
+ return "Functional Hosts (%i)" % len(model.get_virtual_hosts())
def get_items(self, session, model):
return sorted(model.get_virtual_hosts(), cmp, lambda x: x.name)
@@ -85,7 +85,7 @@
return branch.marshal()
def render_title(self, session, vhost):
- return "Virtual Host '%s'" % vhost.name
+ return "Functional Host '%s'" % vhost.name
class VirtualHostView(Widget):
def __init__(self, app, name):
@@ -104,7 +104,7 @@
self.tabs.add_child(self.log)
def render_title(self, session, vhost):
- return "Virtual Host '%s'" % vhost.name
+ return "Functional Host '%s'" % vhost.name
def render_name(self, session, vhost):
return vhost.name
Modified: mgmt/cumin/python/cumin/virtualhost.strings
===================================================================
--- mgmt/cumin/python/cumin/virtualhost.strings 2007-10-10 18:12:30 UTC (rev 999)
+++ mgmt/cumin/python/cumin/virtualhost.strings 2007-10-10 18:39:30 UTC (rev 1000)
@@ -1,7 +1,7 @@
[VirtualHostSet.html]
<table class="VirtualHostSet mobjects">
<tr>
- <th>Virtual Host</th>
+ <th>Functional Host</th>
<th>Configuration</th>
<th>Status</th>
</tr>
18 years, 6 months
rhmessaging commits: r999 - mgmt/cumin/python/wooly.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2007-10-10 14:12:30 -0400 (Wed, 10 Oct 2007)
New Revision: 999
Modified:
mgmt/cumin/python/wooly/__init__.py
mgmt/cumin/python/wooly/forms.py
mgmt/cumin/python/wooly/widgets.py
Log:
Cleans up state handling. Makes some more ui state
session-persistent, so that for instance the tab you navigated from is
still visible when you return.
Modified: mgmt/cumin/python/wooly/__init__.py
===================================================================
--- mgmt/cumin/python/wooly/__init__.py 2007-10-10 17:04:51 UTC (rev 998)
+++ mgmt/cumin/python/wooly/__init__.py 2007-10-10 18:12:30 UTC (rev 999)
@@ -86,11 +86,11 @@
if str:
return str
- def scope(self, session, params):
- params.update(self.parameters)
+ def save_parameters(self, session):
+ session.save_parameters(self.parameters)
for child in self.children:
- child.scope(session, params)
+ child.save_parameters(session)
def process(self, session, object):
if session.debug:
@@ -180,6 +180,10 @@
if self.object:
return self.object.get(session)
+ def save_parameters(self, session):
+ if self.get_object(session) != None:
+ super(Frame, self).save_parameters(session)
+
def do_process(self, session, object):
new_object = self.get_object(session)
super(Frame, self).do_process(session, new_object)
@@ -343,6 +347,7 @@
self.values = dict()
self.errors = dict() # widget => list of str
self.redirect = None
+ self.saved_parameters = list()
self.debug = True
@@ -410,6 +415,9 @@
def set_redirect(self, redirect):
self.redirect = redirect
+ def save_parameters(self, params):
+ self.saved_parameters += params
+
def marshal(self):
return self.marshal_page() + "?" + self.marshal_url_vars()
@@ -417,11 +425,11 @@
return self.get_page().name
def marshal_url_vars(self, separator=";"):
- params = set()
- self.get_page().scope(self, params)
+ self.get_page().save_parameters(self)
+
vars = list()
- for param in params:
+ for param in self.saved_parameters:
key = param.path()
value = self.get(key)
default = param.get_default(self)
@@ -451,6 +459,8 @@
self.set_page(self.app.get_page(string))
def unmarshal_url_vars(self, string, separator=";"):
+ self.get_page().save_parameters(self)
+
vars = string.split(separator)
for var in vars:
Modified: mgmt/cumin/python/wooly/forms.py
===================================================================
--- mgmt/cumin/python/wooly/forms.py 2007-10-10 17:04:51 UTC (rev 998)
+++ mgmt/cumin/python/wooly/forms.py 2007-10-10 18:12:30 UTC (rev 999)
@@ -17,8 +17,9 @@
def render_hidden_inputs(self, session, object):
writer = Writer()
- params = set()
- session.get_page().scope(session, params)
+ session.get_page().save_parameters(session)
+
+ params = set(session.saved_parameters)
params.difference_update(self.form_params)
for param in params:
Modified: mgmt/cumin/python/wooly/widgets.py
===================================================================
--- mgmt/cumin/python/wooly/widgets.py 2007-10-10 17:04:51 UTC (rev 998)
+++ mgmt/cumin/python/wooly/widgets.py 2007-10-10 18:12:30 UTC (rev 999)
@@ -28,14 +28,6 @@
return mode
- def scope(self, session, params):
- params.update(self.parameters)
-
- mode = self.get_selected_mode(session)
-
- if mode:
- mode.scope(session, params)
-
def do_process(self, session, object):
mode = self.get_selected_mode(session)
18 years, 6 months