rhmessaging commits: r3968 - mgmt/newdata/cumin/python/cumin/grid.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2010-05-13 10:01:42 -0400 (Thu, 13 May 2010)
New Revision: 3968
Modified:
mgmt/newdata/cumin/python/cumin/grid/scheduler.py
mgmt/newdata/cumin/python/cumin/grid/submission.py
Log:
Fixed up SubmissionAddForm's Scheduler field
Modified: mgmt/newdata/cumin/python/cumin/grid/scheduler.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/scheduler.py 2010-05-13 11:35:46 UTC (rev 3967)
+++ mgmt/newdata/cumin/python/cumin/grid/scheduler.py 2010-05-13 14:01:42 UTC (rev 3968)
@@ -70,11 +70,9 @@
self.stop = DaemonSelectionStop(app, self, "SCHEDD")
class SchedulerSelectField(ScalarField):
- def __init__(self, app, name, pool):
+ def __init__(self, app, name):
super(SchedulerSelectField, self).__init__(app, name, None)
- self.pool = pool
-
self.param = SchedulerParameter(app, "param")
self.add_parameter(self.param)
@@ -97,17 +95,18 @@
class SchedulerOptions(OptionInputSet):
def do_get_items(self, session):
- pool = self.parent.pool.get(session)
+ collector = self.form.object.get(session)
- if pool:
- schedulers = list(Scheduler.selectBy(Pool=pool.id))
+ cls = self.app.model.mrg_grid.Scheduler
+ if collector:
+ schedulers = cls.get_selection(session.cursor, Pool=collector.Pool)
else:
- schedulers = list(Scheduler.select())
+ schedulers = cls.get_selection(session.cursor)
return schedulers
def render_item_value(self, session, item):
- return item.id
+ return item._id
def render_item_content(self, session, item):
return item.Name
Modified: mgmt/newdata/cumin/python/cumin/grid/submission.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/submission.py 2010-05-13 11:35:46 UTC (rev 3967)
+++ mgmt/newdata/cumin/python/cumin/grid/submission.py 2010-05-13 14:01:42 UTC (rev 3968)
@@ -94,13 +94,9 @@
def __init__(self, app, name, task):
super(SubmissionAddForm, self).__init__(app, name, task)
- # XXX
- self.pool = PoolParameter(app, "pool")
- self.add_parameter(self.pool)
-
from scheduler import SchedulerSelectField # XXX
- self.scheduler = SchedulerSelectField(app, "scheduler", self.pool)
+ self.scheduler = SchedulerSelectField(app, "scheduler")
self.scheduler.required = True
self.scheduler.help = "Create submission at this scheduler"
self.add_field(self.scheduler)
14 years, 7 months
rhmessaging commits: r3967 - store/trunk/cpp/lib.
by rhmessaging-commits@lists.jboss.org
Author: kpvdr
Date: 2010-05-13 07:35:46 -0400 (Thu, 13 May 2010)
New Revision: 3967
Modified:
store/trunk/cpp/lib/TxnCtxt.h
Log:
Removed unused lock
Modified: store/trunk/cpp/lib/TxnCtxt.h
===================================================================
--- store/trunk/cpp/lib/TxnCtxt.h 2010-05-12 21:50:17 UTC (rev 3966)
+++ store/trunk/cpp/lib/TxnCtxt.h 2010-05-13 11:35:46 UTC (rev 3967)
@@ -56,7 +56,6 @@
typedef std::auto_ptr<qpid::sys::Mutex::ScopedLock> AutoScopedLock;
ipqdef impactedQueues; // list of Queues used in the txn
- mutable qpid::sys::Mutex Lock;
IdSequence* loggedtx;
boost::intrusive_ptr<DataTokenImpl> dtokp;
AutoScopedLock globalHolder;
14 years, 7 months
rhmessaging commits: r3966 - mgmt/newdata/mint/python/mint.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2010-05-12 17:50:17 -0400 (Wed, 12 May 2010)
New Revision: 3966
Added:
mgmt/newdata/mint/python/mint/update.py
Removed:
mgmt/newdata/mint/python/mint/newupdate.py
Modified:
mgmt/newdata/mint/python/mint/expire.py
mgmt/newdata/mint/python/mint/main.py
mgmt/newdata/mint/python/mint/model.py
mgmt/newdata/mint/python/mint/vacuum.py
Log:
Rename the newupdate module to update, now that the old one is gone
Modified: mgmt/newdata/mint/python/mint/expire.py
===================================================================
--- mgmt/newdata/mint/python/mint/expire.py 2010-05-12 19:43:32 UTC (rev 3965)
+++ mgmt/newdata/mint/python/mint/expire.py 2010-05-12 21:50:17 UTC (rev 3966)
@@ -1,4 +1,4 @@
-from newupdate import *
+from update import *
from util import *
import mint
Modified: mgmt/newdata/mint/python/mint/main.py
===================================================================
--- mgmt/newdata/mint/python/mint/main.py 2010-05-12 19:43:32 UTC (rev 3965)
+++ mgmt/newdata/mint/python/mint/main.py 2010-05-12 21:50:17 UTC (rev 3966)
@@ -1,8 +1,8 @@
from database import MintDatabase
from expire import ExpireThread
from model import MintModel
-from newupdate import UpdateThread
from session import MintSession
+from update import UpdateThread
from vacuum import VacuumThread
from util import *
Modified: mgmt/newdata/mint/python/mint/model.py
===================================================================
--- mgmt/newdata/mint/python/mint/model.py 2010-05-12 19:43:32 UTC (rev 3965)
+++ mgmt/newdata/mint/python/mint/model.py 2010-05-12 21:50:17 UTC (rev 3966)
@@ -1,6 +1,6 @@
from rosemary.model import *
-from newupdate import *
+from update import *
from util import *
log = logging.getLogger("mint.model")
Deleted: mgmt/newdata/mint/python/mint/newupdate.py
===================================================================
--- mgmt/newdata/mint/python/mint/newupdate.py 2010-05-12 19:43:32 UTC (rev 3965)
+++ mgmt/newdata/mint/python/mint/newupdate.py 2010-05-12 21:50:17 UTC (rev 3966)
@@ -1,403 +0,0 @@
-import pickle
-
-from psycopg2 import IntegrityError, TimestampFromTicks
-from rosemary.model import *
-from util import *
-
-log = logging.getLogger("mint.newupdate")
-
-class UpdateThread(MintDaemonThread):
- def __init__(self, app):
- super(UpdateThread, self).__init__(app)
-
- self.conn = None
- self.stats = None
-
- self.updates = ConcurrentQueue()
-
- self.halt_on_error = True
-
- def init(self):
- self.conn = self.app.database.get_connection()
- self.stats = UpdateStats()
-
- def enqueue(self, update):
- update.thread = self
-
- self.updates.put(update)
-
- if self.stats:
- self.stats.enqueued += 1
-
- # This is an attempt to yield from the enqueueing thread (this
- # method's caller) to the update thread
-
- if self.updates.qsize() > 1000:
- sleep(0.1)
-
- def run(self):
- while True:
- if self.stop_requested:
- break
-
- try:
- update = self.updates.get(True, 1)
- except Empty:
- continue
-
- if self.stats:
- self.stats.dequeued += 1
-
- update.process(self.conn, self.stats)
-
-class UpdateStats(object):
- def __init__(self):
- self.enqueued = 0
- self.dequeued = 0
-
- self.updated = 0
- self.deleted = 0
- self.dropped = 0
-
- self.samples_updated = 0
- self.samples_expired = 0
- self.samples_dropped = 0
-
-class Update(object):
- def __init__(self, model):
- self.model = model
-
- def process(self, conn, stats):
- log.debug("Processing %s", self)
-
- try:
- self.do_process(conn, stats)
-
- conn.commit()
- except UpdateException, e:
- log.info("Update could not be completed; %s", e)
-
- conn.rollback()
- except:
- log.exception("Update failed")
-
- conn.rollback()
-
- if self.model.app.update_thread.halt_on_error:
- raise
-
- def do_process(self, conn, stats):
- raise Exception("Not implemented")
-
- def __repr__(self):
- return self.__class__.__name__
-
-class ObjectUpdate(Update):
- def __init__(self, model, agent, obj):
- super(ObjectUpdate, self).__init__(model)
-
- self.agent = agent
- self.object = obj
-
- def do_process(self, conn, stats):
- cls = self.get_class()
- obj = self.get_object(cls, self.object.getObjectId().objectName)
-
- columns = list()
-
- self.process_headers(obj, columns)
- self.process_properties(obj, columns)
-
- cursor = conn.cursor()
-
- try:
- obj.save(cursor, columns)
- finally:
- cursor.close()
-
- stats.updated += 1
-
- def get_class(self):
- class_key = self.object.getClassKey()
-
- name = class_key.getPackageName()
-
- try:
- pkg = self.model._packages_by_name[name]
- except KeyError:
- raise PackageUnknown(name)
-
- name = class_key.getClassName()
- name = name[0].upper() + name[1:] # /me shakes fist
-
- try:
- cls = pkg._classes_by_name[name]
- except KeyError:
- raise ClassUnknown(name)
-
- return cls
-
- def get_object(self, cls, object_id):
- try:
- return self.agent.objects_by_id[object_id]
- except KeyError:
- conn = self.model.app.database.get_connection()
- cursor = conn.cursor()
-
- obj = RosemaryObject(cls, None)
- obj._qmf_agent_id = self.agent.id
- obj._qmf_object_id = object_id
-
- try:
- try:
- cls.load_object_by_qmf_id(cursor, obj)
- except RosemaryNotFound:
- obj._id = cls.get_new_id(cursor)
- finally:
- cursor.close()
-
- self.agent.objects_by_id[object_id] = obj
-
- return obj
-
- def process_headers(self, obj, columns):
- table = obj._class.sql_table
-
- update_time, create_time, delete_time = self.object.getTimestamps()
-
- update_time = datetime.fromtimestamp(update_time / 1000000000)
- create_time = datetime.fromtimestamp(create_time / 1000000000)
-
- if delete_time:
- delete_time = datetime.fromtimestamp(delete_time / 1000000000)
-
- if obj._sync_time:
- # This object is already in the database
-
- obj._qmf_update_time = update_time
- columns.append(table._qmf_update_time)
-
- # XXX session_id may have changed too?
- else:
- obj._qmf_agent_id = self.agent.id
- obj._qmf_object_id = self.object.getObjectId().objectName
- obj._qmf_session_id = str(self.object.getObjectId().getSequence())
- obj._qmf_class_key = str(self.object.getClassKey())
- obj._qmf_update_time = update_time
- obj._qmf_create_time = create_time
-
- columns.append(table._id)
- columns.append(table._qmf_agent_id)
- columns.append(table._qmf_object_id)
- columns.append(table._qmf_session_id)
- columns.append(table._qmf_class_key)
- columns.append(table._qmf_update_time)
- columns.append(table._qmf_create_time)
-
- def process_properties(self, obj, columns):
- cls = obj._class
-
- for prop, value in self.object.getProperties():
- try:
- if prop.type == 10:
- col, nvalue = self.process_reference(cls, prop, value)
- else:
- col, nvalue = self.process_value(cls, prop, value)
- except MappingException, e:
- log.debug(e)
- continue
-
- # XXX This optimization will be obsolete when QMF does it
- # instead
-
- if nvalue == getattr(obj, col.name):
- continue
-
- setattr(obj, col.name, nvalue)
- columns.append(col)
-
- def process_reference(self, cls, prop, value):
- try:
- ref = cls._references_by_name[prop.name]
- except KeyError:
- raise MappingException("Reference %s is unknown" % prop.name)
-
- if not ref.sql_column:
- raise MappingException("Reference %s has no column" % ref.name)
-
- col = ref.sql_column
-
- if value:
- try:
- that_id = str(value.objectName)
- except:
- raise MappingException("XXX ref isn't an oid")
-
- that = self.get_object(ref.that_cls, that_id)
-
- if not that._sync_time:
- msg = "Referenced object %s hasn't appeared yet"
- raise MappingException(msg % that)
-
- value = that._id
-
- return col, value
-
- def process_value(self, cls, prop, value):
- try:
- col = cls._properties_by_name[prop.name].sql_column
- except KeyError:
- raise MappingException("Property %s is unknown" % prop)
-
- if value is not None:
- value = self.transform_value(prop, value)
-
- return col, value
-
- def transform_value(self, attr, value):
- if attr.type == 8: # absTime
- if value == 0:
- value = None
- else:
- value = datetime.fromtimestamp(value / 1000000000)
- # XXX value = TimestampFromTicks(value / 1000000000)
- elif attr.type == 15: # map
- value = pickle.dumps(value)
- elif attr.type == 10: # objId
- value = str(value)
- elif attr.type == 14: # uuid
- value = str(value)
-
- return value
-
- def __repr__(self):
- name = self.__class__.__name__
- cls = self.object.getClassKey().getClassName()
- id = self.object.getObjectId().objectName
-
- return "%s(%s,%s,%s)" % (name, self.agent.id, cls, id)
-
-class ObjectDelete(ObjectUpdate):
- def do_process(self, conn, stats):
- cls = self.get_class()
- obj = self.get_object(cls, self.object.getObjectId().objectName)
-
- cursor = conn.cursor()
-
- try:
- cls.sql_delete.execute(cursor, (), obj.__dict__)
- finally:
- cursor.close()
-
- try:
- del self.agent.objects_by_id[self.object.getObjectId().objectName]
- except KeyError:
- pass
-
- stats.deleted += 1
-
-class ObjectAddSample(ObjectUpdate):
- def do_process(self, conn, stats):
- cls = self.get_class()
- obj = self.get_object(cls, self.object.getObjectId().objectName)
-
- if not cls._statistics:
- stats.samples_dropped += 1; return
-
- if not obj._sync_time:
- stats.samples_dropped += 1; return
-
- if stats.enqueued - stats.dequeued > 100:
- if obj._qmf_update_time > datetime.now() - timedelta(seconds=60):
- stats.samples_dropped += 1; return
-
- update_time, create_time, delete_time = self.object.getTimestamps()
-
- update_time = datetime.fromtimestamp(update_time / 1000000000)
-
- update_columns = list()
- update_columns.append(cls.sql_table._qmf_update_time)
-
- insert_columns = list()
- insert_columns.append(cls.sql_samples_table._qmf_update_time)
-
- obj._qmf_update_time = update_time
-
- self.process_samples(obj, update_columns, insert_columns)
-
- cursor = conn.cursor()
-
- try:
- obj.save(cursor, update_columns)
-
- cls.sql_samples_insert.execute \
- (cursor, insert_columns, obj.__dict__)
- finally:
- cursor.close()
-
- stats.samples_updated += 1
-
- def process_samples(self, obj, update_columns, insert_columns):
- for stat, value in self.object.getStatistics():
- try:
- col = obj._class._statistics_by_name[stat.name].sql_column
- except KeyError:
- log.debug("Statistic %s is unknown", stat)
-
- continue
-
- if value is not None:
- value = self.transform_value(stat, value)
-
- # Don't write unchanged values
- #
- # XXX This optimization will be obsolete when QMF does it
- # instead
-
- if value != getattr(obj, col.name):
- update_columns.append(col)
-
- insert_columns.append(col)
-
- setattr(obj, col.name, value)
-
-class AgentDelete(Update):
- def __init__(self, model, agent):
- super(AgentDelete, self).__init__(model)
-
- self.agent = agent
-
- def do_process(self, conn, stats):
- print "Ahoy!"
-
- cursor = conn.cursor()
-
- id = self.agent.id
-
- try:
- for pkg in self.model._packages:
- for cls in pkg._classes:
- for obj in cls.get_selection(cursor, _qmf_agent_id=id):
- obj.delete(cursor)
- print "Bam!", obj
- finally:
- cursor.close()
-
-class UpdateException(Exception):
- def __init__(self, name):
- self.name = name
-
- def __str__(self):
- return "%s(%s)" % (self.__class__.__name__, self.name)
-
-class PackageUnknown(UpdateException):
- pass
-
-class ClassUnknown(UpdateException):
- pass
-
-class ObjectUnknown(UpdateException):
- pass
-
-class MappingException(Exception):
- pass
Copied: mgmt/newdata/mint/python/mint/update.py (from rev 3963, mgmt/newdata/mint/python/mint/newupdate.py)
===================================================================
--- mgmt/newdata/mint/python/mint/update.py (rev 0)
+++ mgmt/newdata/mint/python/mint/update.py 2010-05-12 21:50:17 UTC (rev 3966)
@@ -0,0 +1,403 @@
+import pickle
+
+from psycopg2 import IntegrityError, TimestampFromTicks
+from rosemary.model import *
+from util import *
+
+log = logging.getLogger("mint.update")
+
+class UpdateThread(MintDaemonThread):
+ def __init__(self, app):
+ super(UpdateThread, self).__init__(app)
+
+ self.conn = None
+ self.stats = None
+
+ self.updates = ConcurrentQueue()
+
+ self.halt_on_error = True
+
+ def init(self):
+ self.conn = self.app.database.get_connection()
+ self.stats = UpdateStats()
+
+ def enqueue(self, update):
+ update.thread = self
+
+ self.updates.put(update)
+
+ if self.stats:
+ self.stats.enqueued += 1
+
+ # This is an attempt to yield from the enqueueing thread (this
+ # method's caller) to the update thread
+
+ if self.updates.qsize() > 1000:
+ sleep(0.1)
+
+ def run(self):
+ while True:
+ if self.stop_requested:
+ break
+
+ try:
+ update = self.updates.get(True, 1)
+ except Empty:
+ continue
+
+ if self.stats:
+ self.stats.dequeued += 1
+
+ update.process(self.conn, self.stats)
+
+class UpdateStats(object):
+ def __init__(self):
+ self.enqueued = 0
+ self.dequeued = 0
+
+ self.updated = 0
+ self.deleted = 0
+ self.dropped = 0
+
+ self.samples_updated = 0
+ self.samples_expired = 0
+ self.samples_dropped = 0
+
+class Update(object):
+ def __init__(self, model):
+ self.model = model
+
+ def process(self, conn, stats):
+ log.debug("Processing %s", self)
+
+ try:
+ self.do_process(conn, stats)
+
+ conn.commit()
+ except UpdateException, e:
+ log.info("Update could not be completed; %s", e)
+
+ conn.rollback()
+ except:
+ log.exception("Update failed")
+
+ conn.rollback()
+
+ if self.model.app.update_thread.halt_on_error:
+ raise
+
+ def do_process(self, conn, stats):
+ raise Exception("Not implemented")
+
+ def __repr__(self):
+ return self.__class__.__name__
+
+class ObjectUpdate(Update):
+ def __init__(self, model, agent, obj):
+ super(ObjectUpdate, self).__init__(model)
+
+ self.agent = agent
+ self.object = obj
+
+ def do_process(self, conn, stats):
+ cls = self.get_class()
+ obj = self.get_object(cls, self.object.getObjectId().objectName)
+
+ columns = list()
+
+ self.process_headers(obj, columns)
+ self.process_properties(obj, columns)
+
+ cursor = conn.cursor()
+
+ try:
+ obj.save(cursor, columns)
+ finally:
+ cursor.close()
+
+ stats.updated += 1
+
+ def get_class(self):
+ class_key = self.object.getClassKey()
+
+ name = class_key.getPackageName()
+
+ try:
+ pkg = self.model._packages_by_name[name]
+ except KeyError:
+ raise PackageUnknown(name)
+
+ name = class_key.getClassName()
+ name = name[0].upper() + name[1:] # /me shakes fist
+
+ try:
+ cls = pkg._classes_by_name[name]
+ except KeyError:
+ raise ClassUnknown(name)
+
+ return cls
+
+ def get_object(self, cls, object_id):
+ try:
+ return self.agent.objects_by_id[object_id]
+ except KeyError:
+ conn = self.model.app.database.get_connection()
+ cursor = conn.cursor()
+
+ obj = RosemaryObject(cls, None)
+ obj._qmf_agent_id = self.agent.id
+ obj._qmf_object_id = object_id
+
+ try:
+ try:
+ cls.load_object_by_qmf_id(cursor, obj)
+ except RosemaryNotFound:
+ obj._id = cls.get_new_id(cursor)
+ finally:
+ cursor.close()
+
+ self.agent.objects_by_id[object_id] = obj
+
+ return obj
+
+ def process_headers(self, obj, columns):
+ table = obj._class.sql_table
+
+ update_time, create_time, delete_time = self.object.getTimestamps()
+
+ update_time = datetime.fromtimestamp(update_time / 1000000000)
+ create_time = datetime.fromtimestamp(create_time / 1000000000)
+
+ if delete_time:
+ delete_time = datetime.fromtimestamp(delete_time / 1000000000)
+
+ if obj._sync_time:
+ # This object is already in the database
+
+ obj._qmf_update_time = update_time
+ columns.append(table._qmf_update_time)
+
+ # XXX session_id may have changed too?
+ else:
+ obj._qmf_agent_id = self.agent.id
+ obj._qmf_object_id = self.object.getObjectId().objectName
+ obj._qmf_session_id = str(self.object.getObjectId().getSequence())
+ obj._qmf_class_key = str(self.object.getClassKey())
+ obj._qmf_update_time = update_time
+ obj._qmf_create_time = create_time
+
+ columns.append(table._id)
+ columns.append(table._qmf_agent_id)
+ columns.append(table._qmf_object_id)
+ columns.append(table._qmf_session_id)
+ columns.append(table._qmf_class_key)
+ columns.append(table._qmf_update_time)
+ columns.append(table._qmf_create_time)
+
+ def process_properties(self, obj, columns):
+ cls = obj._class
+
+ for prop, value in self.object.getProperties():
+ try:
+ if prop.type == 10:
+ col, nvalue = self.process_reference(cls, prop, value)
+ else:
+ col, nvalue = self.process_value(cls, prop, value)
+ except MappingException, e:
+ log.debug(e)
+ continue
+
+ # XXX This optimization will be obsolete when QMF does it
+ # instead
+
+ if nvalue == getattr(obj, col.name):
+ continue
+
+ setattr(obj, col.name, nvalue)
+ columns.append(col)
+
+ def process_reference(self, cls, prop, value):
+ try:
+ ref = cls._references_by_name[prop.name]
+ except KeyError:
+ raise MappingException("Reference %s is unknown" % prop.name)
+
+ if not ref.sql_column:
+ raise MappingException("Reference %s has no column" % ref.name)
+
+ col = ref.sql_column
+
+ if value:
+ try:
+ that_id = str(value.objectName)
+ except:
+ raise MappingException("XXX ref isn't an oid")
+
+ that = self.get_object(ref.that_cls, that_id)
+
+ if not that._sync_time:
+ msg = "Referenced object %s hasn't appeared yet"
+ raise MappingException(msg % that)
+
+ value = that._id
+
+ return col, value
+
+ def process_value(self, cls, prop, value):
+ try:
+ col = cls._properties_by_name[prop.name].sql_column
+ except KeyError:
+ raise MappingException("Property %s is unknown" % prop)
+
+ if value is not None:
+ value = self.transform_value(prop, value)
+
+ return col, value
+
+ def transform_value(self, attr, value):
+ if attr.type == 8: # absTime
+ if value == 0:
+ value = None
+ else:
+ value = datetime.fromtimestamp(value / 1000000000)
+ # XXX value = TimestampFromTicks(value / 1000000000)
+ elif attr.type == 15: # map
+ value = pickle.dumps(value)
+ elif attr.type == 10: # objId
+ value = str(value)
+ elif attr.type == 14: # uuid
+ value = str(value)
+
+ return value
+
+ def __repr__(self):
+ name = self.__class__.__name__
+ cls = self.object.getClassKey().getClassName()
+ id = self.object.getObjectId().objectName
+
+ return "%s(%s,%s,%s)" % (name, self.agent.id, cls, id)
+
+class ObjectDelete(ObjectUpdate):
+ def do_process(self, conn, stats):
+ cls = self.get_class()
+ obj = self.get_object(cls, self.object.getObjectId().objectName)
+
+ cursor = conn.cursor()
+
+ try:
+ cls.sql_delete.execute(cursor, (), obj.__dict__)
+ finally:
+ cursor.close()
+
+ try:
+ del self.agent.objects_by_id[self.object.getObjectId().objectName]
+ except KeyError:
+ pass
+
+ stats.deleted += 1
+
+class ObjectAddSample(ObjectUpdate):
+ def do_process(self, conn, stats):
+ cls = self.get_class()
+ obj = self.get_object(cls, self.object.getObjectId().objectName)
+
+ if not cls._statistics:
+ stats.samples_dropped += 1; return
+
+ if not obj._sync_time:
+ stats.samples_dropped += 1; return
+
+ if stats.enqueued - stats.dequeued > 100:
+ if obj._qmf_update_time > datetime.now() - timedelta(seconds=60):
+ stats.samples_dropped += 1; return
+
+ update_time, create_time, delete_time = self.object.getTimestamps()
+
+ update_time = datetime.fromtimestamp(update_time / 1000000000)
+
+ update_columns = list()
+ update_columns.append(cls.sql_table._qmf_update_time)
+
+ insert_columns = list()
+ insert_columns.append(cls.sql_samples_table._qmf_update_time)
+
+ obj._qmf_update_time = update_time
+
+ self.process_samples(obj, update_columns, insert_columns)
+
+ cursor = conn.cursor()
+
+ try:
+ obj.save(cursor, update_columns)
+
+ cls.sql_samples_insert.execute \
+ (cursor, insert_columns, obj.__dict__)
+ finally:
+ cursor.close()
+
+ stats.samples_updated += 1
+
+ def process_samples(self, obj, update_columns, insert_columns):
+ for stat, value in self.object.getStatistics():
+ try:
+ col = obj._class._statistics_by_name[stat.name].sql_column
+ except KeyError:
+ log.debug("Statistic %s is unknown", stat)
+
+ continue
+
+ if value is not None:
+ value = self.transform_value(stat, value)
+
+ # Don't write unchanged values
+ #
+ # XXX This optimization will be obsolete when QMF does it
+ # instead
+
+ if value != getattr(obj, col.name):
+ update_columns.append(col)
+
+ insert_columns.append(col)
+
+ setattr(obj, col.name, value)
+
+class AgentDelete(Update):
+ def __init__(self, model, agent):
+ super(AgentDelete, self).__init__(model)
+
+ self.agent = agent
+
+ def do_process(self, conn, stats):
+ print "Ahoy!"
+
+ cursor = conn.cursor()
+
+ id = self.agent.id
+
+ try:
+ for pkg in self.model._packages:
+ for cls in pkg._classes:
+ for obj in cls.get_selection(cursor, _qmf_agent_id=id):
+ obj.delete(cursor)
+ print "Bam!", obj
+ finally:
+ cursor.close()
+
+class UpdateException(Exception):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return "%s(%s)" % (self.__class__.__name__, self.name)
+
+class PackageUnknown(UpdateException):
+ pass
+
+class ClassUnknown(UpdateException):
+ pass
+
+class ObjectUnknown(UpdateException):
+ pass
+
+class MappingException(Exception):
+ pass
Modified: mgmt/newdata/mint/python/mint/vacuum.py
===================================================================
--- mgmt/newdata/mint/python/mint/vacuum.py 2010-05-12 19:43:32 UTC (rev 3965)
+++ mgmt/newdata/mint/python/mint/vacuum.py 2010-05-12 21:50:17 UTC (rev 3966)
@@ -1,4 +1,4 @@
-from newupdate import *
+from update import *
from util import *
log = logging.getLogger("mint.vacuum")
14 years, 7 months
rhmessaging commits: r3965 - mgmt/newdata/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2010-05-12 15:43:32 -0400 (Wed, 12 May 2010)
New Revision: 3965
Modified:
mgmt/newdata/cumin/python/cumin/parameters.py
Log:
Removed unused argument on ObjectAssociatedAttribute
Modified: mgmt/newdata/cumin/python/cumin/parameters.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/parameters.py 2010-05-12 19:15:57 UTC (rev 3964)
+++ mgmt/newdata/cumin/python/cumin/parameters.py 2010-05-12 19:43:32 UTC (rev 3965)
@@ -11,7 +11,7 @@
return login.user
class ObjectAssociateAttribute(Attribute):
- def __init__(self, app, name, object, cls):
+ def __init__(self, app, name, object):
super(ObjectAssociateAttribute, self).__init__(app, name)
self.object = object
14 years, 7 months
rhmessaging commits: r3964 - in mgmt/newdata/cumin/python/cumin: grid and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2010-05-12 15:15:57 -0400 (Wed, 12 May 2010)
New Revision: 3964
Modified:
mgmt/newdata/cumin/python/cumin/grid/pool.py
mgmt/newdata/cumin/python/cumin/grid/slot.py
mgmt/newdata/cumin/python/cumin/grid/slot.strings
mgmt/newdata/cumin/python/cumin/parameters.py
Log:
Converted pool/slot overview and visualization to qmf v2.
Modified: mgmt/newdata/cumin/python/cumin/grid/pool.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/pool.py 2010-05-11 20:55:59 UTC (rev 3963)
+++ mgmt/newdata/cumin/python/cumin/grid/pool.py 2010-05-12 19:15:57 UTC (rev 3964)
@@ -100,18 +100,15 @@
def do_process(self, session):
#self.limits.limit_count.process(session)
- super(PoolView, self).do_process(session)
+ super(PoolFrame, self).do_process(session)
class PoolOverview(Widget):
- def __init__(self, app, name, pool):
+ def __init__(self, app, name, collector):
super(PoolOverview, self).__init__(app, name)
- self.pool = pool
+ self.collector = collector
- self.collector = PoolCollectorAttribute(app, "collector", self.pool)
- self.add_attribute(self.collector)
-
- self.grid = PoolGridAttribute(app, "grid", self.pool)
+ self.grid = CollectorGridAttribute(app, "grid", self.collector)
self.add_attribute(self.grid)
stats = CollectorGeneralStatSet(app, "collector_stats", self.collector)
@@ -120,10 +117,10 @@
stats = GridStats(app, "grid_stats", self.grid)
self.add_child(stats)
- slot_map = PoolSlotMap(app, "slot_png", self.pool)
+ slot_map = PoolSlotMap(app, "slot_png", self.collector)
self.add_child(slot_map)
- slot_vis = PoolSlotFlashVis(app, "slot_map", self.pool)
+ slot_vis = PoolSlotFlashVis(app, "slot_map", self.collector)
self.add_child(slot_vis)
chart = self.JobStackedChart(app, "jobs", self.collector)
@@ -204,50 +201,52 @@
return "Grid submits"
class PoolSlotMap(SlotMap):
- def __init__(self, app, name, pool):
+ def __init__(self, app, name, collector):
super(PoolSlotMap, self).__init__(app, name)
- self.pool = pool
+ self.collector = collector
def do_process(self, session):
super(PoolSlotMap, self).do_process(session)
- pool = self.pool.get(session)
+ collector = self.collector.get(session)
- self.slots.add_where_expr(session, "s.pool = '%s'", pool.id)
+ self.slots.add_where_expr(session, "\"Pool\" = '%s'", collector.Pool)
def render_image_href(self, session):
- pool = self.pool.get(session)
+ collector = self.collector.get(session)
- page = main.module.pool_slots_page
+ page = self.app.grid.pool_slots_page
sess = Session(page)
- page.pool.set(sess, pool)
+ page.collector.set(sess, collector)
return sess.marshal()
def get_scheduler_select(self, session):
- pool = self.pool.get(session)
- return "pool='%s'" % pool.id
+ pass
+ #pool = self.pool.get(session)
class PoolSlotMapPage(SlotMapPage):
def __init__(self, app, name):
- self.pool = PoolParameter(app, "id")
- super(PoolSlotMapPage, self).__init__(app, name, self.pool, "Pool")
+ cls = app.model.mrg_grid.Collector
+ self.collector = RosemaryObjectParameter(app, "id", cls)
- self.add_parameter(self.pool)
+ super(PoolSlotMapPage, self).__init__(app, name, self.collector, "Pool")
+ self.add_parameter(self.collector)
+
def do_process(self, session):
super(PoolSlotMapPage, self).do_process(session)
- pool = self.pool.get(session)
+ collector = self.collector.get(session)
# XXX The conditional is necessary because this page is
# overloaded to generate little dots, in which case it has no
# pool
- if pool:
- self.slots.add_where_expr(session, "s.pool = '%s'", pool.id)
+ if collector:
+ self.slots.add_where_expr(session, "\"Pool\" = '%s'", collector.Pool)
class PoolSlotFlashVis(PoolSlotMap):
def __init__(self, app, name, object):
@@ -259,34 +258,34 @@
self.fullpageable = True
def render_slots_href(self, session):
- pool = self.pool.get(session)
+ collector = self.collector.get(session)
- page = main.module.pool_slots_page
+ page = self.app.grid.pool_slots_page
sess = Session(page)
- page.pool.set(sess, pool)
+ page.collector.set(sess, collector)
page.json.set(sess, "slots")
page.groups.set(sess, [self.group_by.get(session)])
return sess.marshal()
def render_image_href(self, session):
- pool = self.pool.get(session)
+ collector = self.collector.get(session)
- page = main.module.pool_slots_page
+ page = self.app.grid.pool_slots_page
sess = Session(page)
- page.pool.set(sess, pool)
+ page.collector.set(sess, collector)
return sess.marshal()
def render_ctrl_href(self, session):
- pool = self.pool.get(session)
+ collector = self.collector.get(session)
- page = main.module.pool_slots_page
+ page = self.app.grid.pool_slots_page
sess = Session(page)
- page.pool.set(sess, pool)
+ page.collector.set(sess, collector)
page.json.set(sess, "ctrl")
page.groups.set(sess, [self.group_by.get(session)])
@@ -305,12 +304,12 @@
return self.fullpageable and "fullpageable" or ""
def render_fullpage_href(self, session):
- pool = self.pool.get(session)
+ collector = self.collector.get(session)
- page = main.module.pool_slots_fullpage
+ page = self.app.grid.pool_slots_fullpage
sess = Session(page)
- page.pool.set(sess, pool)
+ page.collector.set(sess, collector)
return sess.marshal()
@@ -318,8 +317,8 @@
def __init__(self, app, name):
super(PoolSlotFlashVis.GroupBySwitch, self).__init__(app, name)
- self.add_state("system", "By system")
- self.add_state("accounting_group", "By accounting group")
+ self.add_state("System", "By system")
+ self.add_state("AccountingGroup", "By accounting group")
self.add_state("None", "No Grouping")
def get_click(self, session, state):
@@ -331,10 +330,11 @@
def __init__(self, app, name):
super(PoolSlotFullPage, self).__init__(app, name)
- self.pool = PoolParameter(app, "id")
- self.add_parameter(self.pool)
+ cls = app.model.mrg_grid.Collector
+ self.collector = RosemaryObjectParameter(app, "id", cls)
+ self.add_parameter(self.collector)
- self.flash_chart = PoolSlotFlashVis(app, "chart", self.pool)
+ self.flash_chart = PoolSlotFlashVis(app, "chart", self.collector)
self.flash_chart.fullpageable = False
self.add_child(self.flash_chart)
Modified: mgmt/newdata/cumin/python/cumin/grid/slot.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/slot.py 2010-05-11 20:55:59 UTC (rev 3963)
+++ mgmt/newdata/cumin/python/cumin/grid/slot.py 2010-05-12 19:15:57 UTC (rev 3964)
@@ -10,6 +10,7 @@
from cumin.objectselector import *
from cumin.stat import *
from cumin.widgets import *
+from cumin.OpenFlashChart import *
strings = StringCatalog(__file__)
log = logging.getLogger("cumin.slot")
@@ -43,20 +44,10 @@
super(SlotDataSet, self).__init__(app)
exprs = list()
- exprs.append("s.qmf_update_time > now() - interval '60 minutes'")
+ exprs.append("_qmf_update_time > now() - interval '60 minutes'")
self.where_exprs.default = exprs
-class OldSlotFrame(CuminFrame):
- def __init__(self, app, name):
- super(OldSlotFrame, self).__init__(app, name)
-
- self.object = SlotParameter(app, "id")
- self.add_parameter(self.object)
-
- self.view = SlotView(app, "view", self.object)
- self.add_mode(self.view)
-
class SlotView(CuminView):
def __init__(self, app, name, slot):
super(SlotView, self).__init__(app, name, slot)
@@ -195,8 +186,8 @@
return
columns = [x[0] for x in cursor.description]
- activity = columns.index("activity")
- state = columns.index("state")
+ activity = columns.index("Activity")
+ state = columns.index("Atate")
interiors = self.interiors.copy()
interiors[None] = interiors["Unknown"]
@@ -226,7 +217,7 @@
d = dict()
for i in plist:
- activityState = "%s:%s" % (records[i]["activity"], records[i]["state"])
+ activityState = "%s:%s" % (records[i]["Activity"], records[i]["State"])
if not activityState in d:
d[activityState] = 0
d[activityState] += 1
@@ -244,7 +235,7 @@
root = Element()
root.name = self.object_description
- root.value = self.object_param.get(session).id
+ root.value = self.object_param.get(session)._id
root.slots = slot_count
root.vis = self.json.get(session)
root.activity_colors = self.interiors
@@ -263,15 +254,15 @@
if leaves:
#for i in sorted(plist, key=lambda x:records[x]["name"]):
for i in sorted(plist, key=lambda x:"%s%s%s" %
- (records[x]["activity"], records[x]["state"], records[x]["name"])):
+ (records[x]["Activity"], records[x]["State"], records[x]["Name"])):
el = Element()
- el.job_id = records[i]["job_id"] and records[i]["job_id"] or ""
- el.activity = records[i]["activity"] and records[i]["activity"] or "Unknown"
- el.state = records[i]["state"] and records[i]["state"] or "Unknown"
- el.value = records[i]["name"] and records[i]["name"] or ""
- el.load_avg = records[i]["load_avg"] and round(records[i]["load_avg"], 2) or 0
+ el.job_id = records[i]["JobId"] and records[i]["JobId"] or ""
+ el.activity = records[i]["Activity"] and records[i]["Activity"] or "Unknown"
+ el.state = records[i]["State"] and records[i]["State"] or "Unknown"
+ el.value = records[i]["Name"] and records[i]["Name"] or ""
+ el.load_avg = records[i]["LoadAvg"] and round(records[i]["LoadAvg"], 2) or 0
el.name = "slot"
- el.slot_id = records[i]["id"]
+ el.slot_id = records[i]["_id"]
level_list.append(el)
else:
# display summary info for all the slots under this grouping
@@ -392,13 +383,13 @@
return state
class SlotInfo(ItemSet):
- display_names = {"job_id": ("Job ID", "", ""),
- "system": ("System", "", ""),
- "machine": ("Machine", "", ""),
- "state": ("State", "", ""),
- "activity": ("Activity", "", ""),
- "name": ("Name", "", ""),
- "id": ("Slot_ID", "slotInfo_id", "hidden_row")}
+ display_names = {"JobId": ("Job ID", "", ""),
+ "System": ("System", "", ""),
+ "Machine": ("Machine", "", ""),
+ "State": ("State", "", ""),
+ "Activity": ("Activity", "", ""),
+ "Name": ("Name", "", ""),
+ "_id": ("Slot_ID", "slotInfo_id", "hidden_row")}
def __init__(self, app, name):
super(SlotMap.SlotInfo, self).__init__(app, name)
Modified: mgmt/newdata/cumin/python/cumin/grid/slot.strings
===================================================================
--- mgmt/newdata/cumin/python/cumin/grid/slot.strings 2010-05-11 20:55:59 UTC (rev 3963)
+++ mgmt/newdata/cumin/python/cumin/grid/slot.strings 2010-05-12 19:15:57 UTC (rev 3964)
@@ -1,19 +1,18 @@
[SlotDataSet.sql]
select
- s.id,
- s.name,
- s.pool,
- s.machine,
- s.system,
- s.job_id,
- s.accounting_group,
- s.op_sys,
- s.arch,
- s.activity,
- s.state,
- c.load_avg
-from slot as s
-left outer join slot_stats as c on c.id = s.stats_curr_id
+ _id,
+ "Name",
+ "Pool",
+ "Machine",
+ "System",
+ "JobId",
+ "AccountingGroup",
+ "OpSys",
+ "Arch",
+ "Activity",
+ "State",
+ "LoadAvg"
+from "mrg.grid"."Slot"
{sql_where}
{sql_order_by}
{sql_limit}
Modified: mgmt/newdata/cumin/python/cumin/parameters.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/parameters.py 2010-05-11 20:55:59 UTC (rev 3963)
+++ mgmt/newdata/cumin/python/cumin/parameters.py 2010-05-12 19:15:57 UTC (rev 3964)
@@ -11,7 +11,7 @@
return login.user
class ObjectAssociateAttribute(Attribute):
- def __init__(self, app, name, object):
+ def __init__(self, app, name, object, cls):
super(ObjectAssociateAttribute, self).__init__(app, name)
self.object = object
@@ -165,20 +165,20 @@
return Pool(string)
def do_marshal(self, pool):
- return str(pool.id)
+ return str(pool._id)
-class PoolCollectorAttribute(ObjectAssociateAttribute):
- def get_associate(self, session, pool):
- return pool.get_collector()
+class CollectorGridAttribute(ObjectAssociateAttribute):
+ def get_associate(self, session, collector):
+ cls = self.app.model.mrg_grid.Grid
+ grid = cls.get_object(session.cursor, Pool=collector.Pool)
+ return grid
-class PoolGridAttribute(ObjectAssociateAttribute):
- def get_associate(self, session, pool):
- return pool.get_grid()
+class CollectorNegotiatorAttribute(ObjectAssociateAttribute):
+ def get_associate(self, session, collector):
+ cls = self.app.model.mrg_grid.Negotiator
+ negotiator = cls.get_object(session.cursor, Pool=collector.Pool)
+ return negotiator
-class PoolNegotiatorAttribute(ObjectAssociateAttribute):
- def get_associate(self, session, pool):
- return pool.get_negotiator()
-
class QueueParameter(Parameter):
def do_unmarshal(self, string):
return Queue.get(int(string))
14 years, 7 months
rhmessaging commits: r3963 - mgmt/newdata/cumin/bin.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2010-05-11 16:55:59 -0400 (Tue, 11 May 2010)
New Revision: 3963
Modified:
mgmt/newdata/cumin/bin/cumin-database
Log:
Exit with non-zero if a check fails
Modified: mgmt/newdata/cumin/bin/cumin-database
===================================================================
--- mgmt/newdata/cumin/bin/cumin-database 2010-05-11 20:42:38 UTC (rev 3962)
+++ mgmt/newdata/cumin/bin/cumin-database 2010-05-11 20:55:59 UTC (rev 3963)
@@ -237,19 +237,24 @@
case "$1" in
check)
echo -n "Checking environment ........ "
- check-environment && echo "OK"
+ check-environment || exit 1
+ echo "OK"
echo -n "Checking initialization ..... "
- check-initialized && echo "OK"
+ check-initialized || exit 1
+ echo "OK"
echo -n "Checking configuration ...... "
- check-configured && echo "OK"
+ check-configured || exit 1
+ echo "OK"
echo -n "Checking server ............. "
- check-started && echo "OK"
+ check-started || exit 1
+ echo "OK"
echo -n "Checking database 'cumin' ... "
- check-created && echo "OK"
+ check-created || exit 1
+ echo "OK"
echo "The database is ready"
;;
14 years, 7 months
rhmessaging commits: r3962 - in mgmt/newdata/cumin: etc and 1 other directory.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2010-05-11 16:42:38 -0400 (Tue, 11 May 2010)
New Revision: 3962
Modified:
mgmt/newdata/cumin/bin/cumin-database
mgmt/newdata/cumin/etc/sysvinit-cumin
Log:
Add a cumin-database install function that just does everything; make the init script check that the database is installed
Modified: mgmt/newdata/cumin/bin/cumin-database
===================================================================
--- mgmt/newdata/cumin/bin/cumin-database 2010-05-11 14:39:05 UTC (rev 3961)
+++ mgmt/newdata/cumin/bin/cumin-database 2010-05-11 20:42:38 UTC (rev 3962)
@@ -5,147 +5,287 @@
exit 2
fi
+# A workaround for running this script from the devel environment
+
+if [[ -d cumin && -f etc/devel.profile ]]; then
+ source etc/devel.profile &> /dev/null
+fi
+
pgdata="/var/lib/pgsql/data"
pglog="${pgdata}/pg_log"
pghbaconf="${pgdata}/pg_hba.conf"
dbname="cumin"
+function format-output {
+ while read line; do
+ echo " | $line"
+ done
+}
+
+function run {
+ echo " | \$ $1"
+
+ if [[ "$2" ]]; then
+ su - postgres -c "$1" | format-output 2>&1
+ else
+ $1 | format-output 2>&1
+ fi
+
+ return ${PIPESTATUS[0]}
+}
+
function check-environment {
which rpm > /dev/null
- rpm -q postgresql-server > /dev/null
+
+ rpm -q postgresql-server &> /dev/null || {
+ echo "Error: postgresql-server is not installed"
+ echo "Hint: Run 'yum install postgresql-server'"
+ return 1
+ }
}
-function check-server {
- # Is it installed?
- # Is it initialized?
- # Is it running?
-
- test -d "$pgdata" || {
- echo "The database is not configured. Run 'cumin-database configure'."
- exit 1
+function check-started {
+ /sbin/service postgresql status &> /dev/null || {
+ echo "Error: The database is not running"
+ echo "Hint: Run 'cumin-database start'"
+ return 1
}
+}
- /sbin/service postgresql status > /dev/null || {
- echo "The database is not running. Run 'cumin-database start'."
- exit 1
+function check-initialized {
+ if [[ ! -d "$pgdata" ]]; then
+ echo "Error: The database is not initialized"
+ echo "Hint: Run 'cumin-database initialize'"
+ return 1
+ fi
+}
+
+function check-configured {
+ grep "$dbname" "$pghbaconf" &> /dev/null || {
+ echo "Error: The database is not configured"
+ echo "Hint: Run 'cumin-database configure'"
+ return 1
}
}
-function check-access {
+function check-created {
psql -d cumin -U cumin -h localhost -c '\q' &> /dev/null || {
- echo "The database is not accessible. Run 'cumin-database create'"
- exit 1
+ echo "Error: The database is not created"
+ echo "Hint: Run 'cumin-database create'"
+ return 1
}
}
-function format-output {
- while read line; do
- echo " | $line"
+function confirm-install {
+ cat <<EOF
+WARNING
+
+This script installs a cumin database into the system postgresql
+instance.
+
+ * It will stop and start the postgresql service.
+
+ * It will initialize the postgresql database cluster if it isn't
+ already initialized.
+
+ * It will alter postgresql configuration files.
+
+If you already have a custom-configured postgresql install, you may
+not want to proceed.
+
+If there are clients that depend on the running postgresql service,
+you probably don't want to proceed.
+
+If, however, none of these changes affect your existing deployment, it
+is safe to proceed.
+
+Enter 'yes' to proceed or Ctrl-C to cancel:
+EOF
+ while read word; do
+ if [[ "$word" == "yes" ]]; then
+ break
+ fi
+
+ echo "Enter 'yes' to proceed or Ctrl-C to cancel:"
done
+
+ install
}
-function run {
- echo " | \$ $1"
+function install {
+ check-environment || exit 1
- if [[ "$2" ]]; then
- su - postgres -c "$1" | format-output 2>&1
- else
- $1 | format-output 2>&1
- fi
+ check-initialized &> /dev/null || initialize
- return ${PIPESTATUS[0]}
+ check-configured &> /dev/null || configure
+
+ check-started &> /dev/null || start > /dev/null
+
+ check-created &> /dev/null || create
}
-case "$1" in
- start)
- run "/sbin/service postgresql start"
- echo "The database server is started."
- ;;
- stop)
- run "/sbin/service postgresql stop"
- echo "The database server is stopped."
- ;;
- configure)
- check-environment
+function start {
+ check-environment || exit 1
+ check-initialized || exit 1
- if grep ${dbname} ${pghbaconf} &> /dev/null; then
- echo "The database server appears to have been configured already."
- exit 1
- fi
+ /sbin/service postgresql start
+}
- if /sbin/service postgresql status > /dev/null; then
- echo "The database server is running. To proceed with"
- echo "configuration, it must be stopped."
- exit 1
- fi
+function stop {
+ check-environment || exit 1
+ check-initialized || exit 1
- if [[ ! -d "$pgdata" ]]; then
- run "initdb --pgdata='$pgdata' --auth='ident sameuser'" postgres
- run "mkdir '$pglog'" postgres
- run "chmod 700 '$pglog'" postgres
+ /sbin/service postgresql stop
+}
- /sbin/restorecon -R "$pgdata"
- fi
+function initialize {
+ check-environment || exit 1
+
+ if check-initialized &> /dev/null; then
+ echo "Error: The database server is already initialized"
+ exit 1
+ fi
- python <<EOF
+ if check-started &> /dev/null; then
+ echo "Error: The database server is running"
+ echo "Hint: Run 'cumin-database stop'"
+ exit 1
+ fi
+
+ run "initdb --pgdata='$pgdata' --auth='ident sameuser'" postgres
+ run "mkdir '$pglog'" postgres
+ run "chmod 700 '$pglog'" postgres
+
+ /sbin/restorecon -R "$pgdata"
+}
+
+function configure {
+ check-environment || exit 1
+ check-initialized || exit 1
+
+ if check-configured &> /dev/null; then
+ echo "Error: The database server is already configured"
+ exit 1
+ fi
+
+ python <<EOF
from cumin.database import modify_pghba_conf
modify_pghba_conf('${pghbaconf}', '${dbname}', 'cumin')
EOF
+}
- echo "The database server is configured."
- ;;
- check)
- echo -n "Checking environment ... "
- check-environment && echo "OK"
+function create {
+ check-environment || exit 1
+ check-started || exit 1
+ check-configured || exit 1
- echo -n "Checking server ........ "
- check-server && echo "OK"
+ if check-created &> /dev/null; then
+ echo "Error: The database is already created"
+ exit 1
+ fi
- echo -n "Checking access ........ "
- check-access && echo "OK"
+ run "createuser --superuser ${dbname}" postgres
+ run "createdb --owner=${dbname} ${dbname}" postgres
- # check-data
+ cumin-admin create-schema > /dev/null
+ # cumin-admin add-role user
+ # cumin-admin add-role admin
+}
- echo "The database is ready."
- ;;
- create)
- check-environment
- check-server
+function drop {
+ check-environment || exit 1
+ check-started || exit 1
- run "createuser --superuser ${dbname}" postgres
- run "createdb --owner=${dbname} ${dbname}" postgres
+ run "dropdb ${dbname}" postgres
+ run "dropuser ${dbname}" postgres
+}
- check-access
+function confirm-annihilate {
+ check-environment || exit 1
- run "cumin-admin create-schema"
- # run "cumin-admin add-role user"
- # run "cumin-admin add-role admin"
+ echo "Really?"
- echo "The database is initialized."
- ;;
- drop)
- check-environment
- check-server
+ while read line; do
+ if [[ "$line" == "really" ]]; then
+ break
+ fi
- run "dropdb ${dbname}" postgres
- run "dropuser ${dbname}" postgres
+ echo "Not good enough"
+ done
- echo "The database is dropped."
+ run "/sbin/service postgresql stop" || :
+ run "rm -rf /var/lib/pgsql/data"
+}
+
+function usage {
+ cat <<EOF
+Control and configure the cumin database
+Usage: cumin-database COMMAND
+Commands:
+ check Check the cumin database
+ install Automated database install
+ start Start the database server
+ stop Stop the database server
+ initialize Create the main database cluster
+ configure Configure the main database cluster
+ create Create the user, database, and schema
+ drop Discard the database user, database, and all data
+EOF
+ exit 1
+}
+
+case "$1" in
+ check)
+ echo -n "Checking environment ........ "
+ check-environment && echo "OK"
+
+ echo -n "Checking initialization ..... "
+ check-initialized && echo "OK"
+
+ echo -n "Checking configuration ...... "
+ check-configured && echo "OK"
+
+ echo -n "Checking server ............. "
+ check-started && echo "OK"
+
+ echo -n "Checking database 'cumin' ... "
+ check-created && echo "OK"
+
+ echo "The database is ready"
;;
+ install)
+ confirm-install
+ echo "The database is installed"
+ ;;
+ start)
+ start
+ echo "The database server is started"
+ ;;
+ stop)
+ stop
+ echo "The database server is stopped"
+ ;;
+ initialize)
+ initialize
+ echo "The database server is initialized"
+ ;;
+ configure)
+ configure
+ echo "The database server is configured"
+ ;;
+ create)
+ create
+ echo "The database is created"
+ ;;
+ drop)
+ drop
+ echo "The database is dropped"
+ ;;
annihilate)
- run "rm -rf /var/lib/pgsql/data"
- echo "Ouch!"
+ confirm-annihilate
+ echo "You devastate me"
;;
*)
- echo "Control and configure the cumin database"
- echo "Usage: cumin-database COMMAND"
- echo "Commands:"
- echo " start Start the database server"
- echo " stop Stop the database server"
- echo " configure Configure the main database cluster"
- echo " check Check the cumin database"
- echo " create Create the user, database, and schema"
- echo " drop Discard the database user, database, and all data"
- exit 1
+ usage
;;
esac
Modified: mgmt/newdata/cumin/etc/sysvinit-cumin
===================================================================
--- mgmt/newdata/cumin/etc/sysvinit-cumin 2010-05-11 14:39:05 UTC (rev 3961)
+++ mgmt/newdata/cumin/etc/sysvinit-cumin 2010-05-11 20:42:38 UTC (rev 3962)
@@ -8,8 +8,15 @@
#
# Sanity checks.
-[ -x /usr/bin/cumin ] || exit 0
+test -x /usr/bin/cumin || exit 1
+test -x /usr/bin/cumin-database || exit 1
+cumin-database check &> /dev/null || {
+ echo "Cumin's database is not yet installed"
+ echo "Run 'cumin-database install' as root"
+ exit 1
+}
+
# Source function library.
. /etc/rc.d/init.d/functions
@@ -20,7 +27,7 @@
RETVAL=0
start() {
- echo -n $"Starting Cumin daemon: "
+ echo -n $"Starting cumin: "
daemon --user cumin --check $servicename $processname \&
RETVAL=$?
echo
@@ -28,7 +35,7 @@
}
stop() {
- echo -n $"Stopping Cumin daemon: "
+ echo -n $"Stopping cumin: "
killproc $servicename -TERM
RETVAL=$?
14 years, 7 months
rhmessaging commits: r3961 - mgmt/newdata/cumin/python/cumin.
by rhmessaging-commits@lists.jboss.org
Author: eallen
Date: 2010-05-11 10:39:05 -0400 (Tue, 11 May 2010)
New Revision: 3961
Modified:
mgmt/newdata/cumin/python/cumin/main.py
mgmt/newdata/cumin/python/cumin/widgets.py
Log:
Added TopObjectAttrubuteColumn to prevent column highlighting for top tables
Modified: mgmt/newdata/cumin/python/cumin/main.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/main.py 2010-05-10 21:09:25 UTC (rev 3960)
+++ mgmt/newdata/cumin/python/cumin/main.py 2010-05-11 14:39:05 UTC (rev 3961)
@@ -325,8 +325,10 @@
col = ObjectLinkColumn(app, "name", cls.nodeName, cls._id, frame)
self.add_column(col)
- self.add_attribute_column(cls.loadAverage1Min)
- self.sort_col = cls.loadAverage1Min.name
+ attr = cls.loadAverage1Min
+ col = TopObjectAttributeColumn(self.app, attr.name, attr)
+ self.add_column(col)
+ self.sort_col = attr.name
self.header = TopTableHeader(app, "header")
self.replace_child(self.header)
@@ -365,7 +367,7 @@
self.page.main.grid.pool.submission.view.show(session)
return branch.marshal()
- class DurationColumn(ObjectAttributeColumn):
+ class DurationColumn(TopObjectAttributeColumn):
def render_header_content(self, session):
return "Duration"
Modified: mgmt/newdata/cumin/python/cumin/widgets.py
===================================================================
--- mgmt/newdata/cumin/python/cumin/widgets.py 2010-05-10 21:09:25 UTC (rev 3960)
+++ mgmt/newdata/cumin/python/cumin/widgets.py 2010-05-11 14:39:05 UTC (rev 3961)
@@ -1608,6 +1608,10 @@
def render_class(self, session):
return self.parent.name
+class TopObjectAttributeColumn(ObjectAttributeColumn):
+ def render_class(self, session):
+ return self.name
+
class TopTableFooter(Widget):
def render(self, session):
return ""
14 years, 7 months
rhmessaging commits: r3960 - mgmt/newdata/wooly/python/wooly.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2010-05-10 17:09:25 -0400 (Mon, 10 May 2010)
New Revision: 3960
Modified:
mgmt/newdata/wooly/python/wooly/datatable.py
Log:
Python 2.5 doesn't have a start param in enumerate
Modified: mgmt/newdata/wooly/python/wooly/datatable.py
===================================================================
--- mgmt/newdata/wooly/python/wooly/datatable.py 2010-05-10 20:43:39 UTC (rev 3959)
+++ mgmt/newdata/wooly/python/wooly/datatable.py 2010-05-10 21:09:25 UTC (rev 3960)
@@ -238,7 +238,7 @@
count = self.table.count.get(session)
limit = self.table.header.limit.get(session)
- return [(x[1], x[0]) for x in enumerate(range(0, count, limit), 1)]
+ return [(x[1], x[0] + 1) for x in enumerate(range(0, count, limit))]
def render_title(self, session):
return "Page"
14 years, 7 months
rhmessaging commits: r3959 - mgmt/newdata/mint/python/mint.
by rhmessaging-commits@lists.jboss.org
Author: justi9
Date: 2010-05-10 16:43:39 -0400 (Mon, 10 May 2010)
New Revision: 3959
Modified:
mgmt/newdata/mint/python/mint/util.py
Log:
Avoid collision in 'enumerate
'
Modified: mgmt/newdata/mint/python/mint/util.py
===================================================================
--- mgmt/newdata/mint/python/mint/util.py 2010-05-10 19:57:40 UTC (rev 3958)
+++ mgmt/newdata/mint/python/mint/util.py 2010-05-10 20:43:39 UTC (rev 3959)
@@ -16,7 +16,7 @@
from parsley.collectionsex import *
from parsley.config import *
from parsley.loggingex import *
-from parsley.threadingex import *
+from parsley.threadingex import print_threads
log = logging.getLogger("mint.util")
14 years, 7 months