[jboss-svn-commits] JBL Code SVN: r28968 - in labs/jbossrules/trunk: drools-core/src/main/java/org/drools/reteoo and 4 other directories.
jboss-svn-commits at lists.jboss.org
jboss-svn-commits at lists.jboss.org
Mon Aug 17 23:31:50 EDT 2009
Author: mark.proctor at jboss.com
Date: 2009-08-17 23:31:50 -0400 (Mon, 17 Aug 2009)
New Revision: 28968
Added:
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/ConcurrentRightTuple.java
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/UnificationNode.java
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/ConcurrentHashTable.java
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/ConcurrentRightTupleList.java
labs/jbossrules/trunk/drools-core/src/test/java/org/drools/util/ConcurrentRightTupleIndexHashTableTest.java
Modified:
labs/jbossrules/trunk/drools-compiler/src/main/java/org/drools/rule/builder/QueryBuilder.java
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/BetaNode.java
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/JoinNode.java
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/NotNode.java
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/RightTuple.java
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/builder/BuildContext.java
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/builder/ReteooRuleBuilder.java
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/rule/Query.java
labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/RightTupleList.java
Log:
JBRULES-2252 Concurent Right Input Memory
-Initial works on trying to introduce a concurrent right input memory.
-Code now detects when it's a query, instead of a rule, so that it can create concurrent friendly
RightTuples and use concurrent friendly memory implementations
-Initial work on hacking the ConcurrentHashMap to use RightTupleLists as the keys
Modified: labs/jbossrules/trunk/drools-compiler/src/main/java/org/drools/rule/builder/QueryBuilder.java
===================================================================
--- labs/jbossrules/trunk/drools-compiler/src/main/java/org/drools/rule/builder/QueryBuilder.java 2009-08-17 22:10:13 UTC (rev 28967)
+++ labs/jbossrules/trunk/drools-compiler/src/main/java/org/drools/rule/builder/QueryBuilder.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -11,6 +11,7 @@
import org.drools.rule.Declaration;
import org.drools.rule.LiteralConstraint;
import org.drools.rule.Pattern;
+import org.drools.rule.Query;
import org.drools.spi.FieldValue;
import org.drools.spi.InternalReadAccessor;
import org.drools.spi.ObjectType;
@@ -43,7 +44,10 @@
String[] params = queryDescr.getParameters();
String[] types = queryDescr.getParameterTypes();
- int i = 0;
+ int i = 0;
+
+ Declaration[] declarations = new Declaration[ params.length ];
+
try {
for ( i = 0; i < params.length; i++ ) {
Declaration declr = pattern.addDeclaration( params[i] );
@@ -55,7 +59,12 @@
PatternBuilder.registerReadAccessor( context, objectType, "arguments", reader );
declr.setReadAccessor( reader );
- }
+
+ declarations[i] = declr;
+ }
+
+ ((Query)context.getRule()).setParameters( declarations );
+
} catch ( ClassNotFoundException e ) {
context.getErrors().add( new DescrBuildError( context.getParentDescr(),
queryDescr,
Modified: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/BetaNode.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/BetaNode.java 2009-08-17 22:10:13 UTC (rev 28967)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/BetaNode.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -25,6 +25,7 @@
import org.drools.RuleBaseConfiguration;
import org.drools.common.BaseNode;
import org.drools.common.BetaConstraints;
+import org.drools.common.InternalFactHandle;
import org.drools.common.InternalWorkingMemory;
import org.drools.common.NodeMemory;
import org.drools.common.PropagationContextImpl;
@@ -35,6 +36,7 @@
import org.drools.rule.BehaviorManager;
import org.drools.spi.BetaNodeFieldConstraint;
import org.drools.spi.PropagationContext;
+import org.drools.util.ConcurrentRightTupleList;
import org.drools.util.Iterator;
import org.drools.util.LinkedList;
import org.drools.util.LinkedListEntry;
@@ -80,6 +82,7 @@
protected boolean objectMemory = true; // hard coded to true
protected boolean tupleMemoryEnabled;
+ protected boolean concurrentRightTupleMemory = false;
// ------------------------------------------------------------
// Constructors
@@ -128,6 +131,8 @@
nextObjectSinkNode = (ObjectSinkNode) in.readObject();
objectMemory = in.readBoolean();
tupleMemoryEnabled = in.readBoolean();
+ concurrentRightTupleMemory = in.readBoolean();
+
super.readExternal( in );
}
@@ -142,6 +147,8 @@
out.writeObject( nextObjectSinkNode );
out.writeBoolean( objectMemory );
out.writeBoolean( tupleMemoryEnabled );
+ out.writeBoolean( concurrentRightTupleMemory );
+
super.writeExternal( out );
}
@@ -299,8 +306,18 @@
public void setLeftTupleMemoryEnabled(boolean tupleMemoryEnabled) {
this.tupleMemoryEnabled = tupleMemoryEnabled;
}
+
+
- public String toString() {
+ public boolean isConcurrentRightTupleMemory() {
+ return concurrentRightTupleMemory;
+ }
+
+ public void setConcurrentRightTupleMemory(boolean concurrentRightTupleMemory) {
+ this.concurrentRightTupleMemory = concurrentRightTupleMemory;
+ }
+
+ public String toString() {
return "";
}
@@ -414,4 +431,15 @@
this.previousObjectSinkNode = previous;
}
+ public RightTuple createRightTuple(InternalFactHandle handle,
+ RightTupleSink sink) {
+ if ( !this.concurrentRightTupleMemory ) {
+ return new RightTuple(handle,
+ sink);
+ } else {
+ return new ConcurrentRightTuple(handle,
+ sink);
+ }
+ }
+
}
Added: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/ConcurrentRightTuple.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/ConcurrentRightTuple.java (rev 0)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/ConcurrentRightTuple.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -0,0 +1,87 @@
+package org.drools.reteoo;
+
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.drools.common.InternalFactHandle;
+import org.drools.util.Entry;
+import org.drools.util.RightTupleList;
+
+public class ConcurrentRightTuple extends RightTuple {
+ private RightTupleList memory;
+
+ private AtomicReference<Entry> previous;
+ private AtomicReference<Entry> next;
+
+ public ConcurrentRightTuple() {
+
+ }
+
+ public ConcurrentRightTuple(InternalFactHandle handle,
+ RightTupleSink sink) {
+ this.handle = handle;
+ this.sink = sink;
+
+ this.previous = new AtomicReference<Entry>();
+ this.next = new AtomicReference<Entry>();
+
+ RightTuple currentFirst = handle.getRightTuple();
+ if ( currentFirst != null ) {
+ currentFirst.setHandlePrevious( this );
+ setHandleNext( currentFirst );
+ }
+
+ handle.setRightTuple( this );
+ }
+
+ public RightTupleList getMemory() {
+ return memory;
+ }
+
+ public void setMemory(RightTupleList memory) {
+ this.memory = memory;
+ }
+
+ public Entry getPrevious() {
+ return previous.get();
+ }
+
+ public void setPrevious(Entry previous) {
+ this.previous.set( previous );
+ }
+
+ public Entry getNext() {
+ return next.get();
+ }
+
+ public void setNext(Entry next) {
+ this.next.set( next );
+ }
+
+
+
+ public int hashCode() {
+ return this.handle.hashCode();
+ }
+
+ public String toString() {
+ return this.handle.toString() + "\n";
+ }
+
+ public boolean equals(ConcurrentRightTuple other) {
+ // we know the object is never null and always of the type ReteTuple
+ if ( other == this ) {
+ return true;
+ }
+
+ // A ReteTuple is only the same if it has the same hashCode, factId and parent
+ if ( (other == null) || (hashCode() != other.hashCode()) ) {
+ return false;
+ }
+
+ return this.handle == other.handle;
+ }
+
+ public boolean equals(Object object) {
+ return equals( (ConcurrentRightTuple) object );
+ }
+}
Modified: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/JoinNode.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/JoinNode.java 2009-08-17 22:10:13 UTC (rev 28967)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/JoinNode.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -57,7 +57,7 @@
/**
*
*/
- private static final long serialVersionUID = 400L;
+ private static final long serialVersionUID = 400L;
public JoinNode() {
@@ -123,40 +123,40 @@
this.constraints.resetTuple( memory.getContext() );
}
-
-// public void assertLeftTuple(final LeftTuple leftTuple,
-// RightTuple rightTuple,
-// final PropagationContext context,
-// final InternalWorkingMemory workingMemory) {
-// final BetaMemory memory = (BetaMemory) workingMemory.getNodeMemory( this );
-//
-// if ( this.tupleMemoryEnabled ) {
-// memory.getLeftTupleMemory().add( leftTuple );
-// }
-//
-// this.constraints.updateFromTuple( memory.getContext(),
-// workingMemory,
-// leftTuple );
-// if ( rightTuple == null ) {
-// rightTuple = memory.getRightTupleMemory().getFirst( leftTuple );
-// }
-//
-// boolean suspend = false;
-// for ( ;rightTuple != null && !suspend; rightTuple = (RightTuple) rightTuple.getNext() ) {
-// final InternalFactHandle handle = rightTuple.getFactHandle();
-// if ( this.constraints.isAllowedCachedLeft( memory.getContext(),
-// handle ) ) {
-// this.sink.propagateAssertLeftTuple( leftTuple,
-// rightTuple,
-// context,
-// workingMemory,
-// this.tupleMemoryEnabled );
-// }
-// }
-//
-// this.constraints.resetTuple( memory.getContext() );
-// }
+ // public void assertLeftTuple(final LeftTuple leftTuple,
+ // RightTuple rightTuple,
+ // final PropagationContext context,
+ // final InternalWorkingMemory workingMemory) {
+ // final BetaMemory memory = (BetaMemory) workingMemory.getNodeMemory( this );
+ //
+ // if ( this.tupleMemoryEnabled ) {
+ // memory.getLeftTupleMemory().add( leftTuple );
+ // }
+ //
+ // this.constraints.updateFromTuple( memory.getContext(),
+ // workingMemory,
+ // leftTuple );
+ // if ( rightTuple == null ) {
+ // rightTuple = memory.getRightTupleMemory().getFirst( leftTuple );
+ // }
+ //
+ // boolean suspend = false;
+ // for ( ;rightTuple != null && !suspend; rightTuple = (RightTuple) rightTuple.getNext() ) {
+ // final InternalFactHandle handle = rightTuple.getFactHandle();
+ // if ( this.constraints.isAllowedCachedLeft( memory.getContext(),
+ // handle ) ) {
+ // this.sink.propagateAssertLeftTuple( leftTuple,
+ // rightTuple,
+ // context,
+ // workingMemory,
+ // this.tupleMemoryEnabled );
+ // }
+ // }
+ //
+ // this.constraints.resetTuple( memory.getContext() );
+ // }
+
/**
* Assert a new <code>FactHandleImpl</code>. The left input of
* <code>ReteTuple</code>s is iterated and joins attemped, via the
@@ -180,8 +180,8 @@
final InternalWorkingMemory workingMemory) {
final BetaMemory memory = (BetaMemory) workingMemory.getNodeMemory( this );
- RightTuple rightTuple = new RightTuple( factHandle,
- this );
+ RightTuple rightTuple = createRightTuple( factHandle,
+ this );
if ( !behavior.assertRightTuple( memory.getBehaviorContext(),
rightTuple,
@@ -209,7 +209,7 @@
rightTuple,
context,
workingMemory,
- this.tupleMemoryEnabled );
+ this.tupleMemoryEnabled );
}
i++;
}
@@ -287,7 +287,7 @@
sink.assertLeftTuple( new LeftTuple( leftTuple,
rightTuple,
sink,
- this.tupleMemoryEnabled ),
+ this.tupleMemoryEnabled ),
context,
workingMemory );
}
@@ -296,7 +296,7 @@
this.constraints.resetTuple( memory.getContext() );
}
}
-
+
public short getType() {
return NodeTypeEnums.JoinNode;
}
Modified: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/NotNode.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/NotNode.java 2009-08-17 22:10:13 UTC (rev 28967)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/NotNode.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -42,7 +42,7 @@
*
*/
public class NotNode extends BetaNode {
- private static final long serialVersionUID = 400L;
+ private static final long serialVersionUID = 400L;
static int notAssertObject = 0;
static int notAssertTuple = 0;
@@ -142,8 +142,8 @@
public void assertObject(final InternalFactHandle factHandle,
final PropagationContext context,
final InternalWorkingMemory workingMemory) {
- final RightTuple rightTuple = new RightTuple( factHandle,
- this );
+ final RightTuple rightTuple = createRightTuple( factHandle,
+ this );
final BetaMemory memory = (BetaMemory) workingMemory.getNodeMemory( this );
if ( !behavior.assertRightTuple( memory.getBehaviorContext(),
@@ -298,10 +298,10 @@
workingMemory );
}
}
-
+
public short getType() {
return NodeTypeEnums.NotNode;
- }
+ }
public String toString() {
ObjectSource source = this.rightInput;
Modified: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/RightTuple.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/RightTuple.java 2009-08-17 22:10:13 UTC (rev 28967)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/RightTuple.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -7,7 +7,7 @@
public class RightTuple
implements
Entry {
- private InternalFactHandle handle;
+ protected InternalFactHandle handle;
private RightTuple handlePrevious;
private RightTuple handleNext;
@@ -21,7 +21,7 @@
private LeftTuple blocked;
- private RightTupleSink sink;
+ protected RightTupleSink sink;
public RightTuple() {
Added: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/UnificationNode.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/UnificationNode.java (rev 0)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/UnificationNode.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -0,0 +1,113 @@
+package org.drools.reteoo;
+
+import org.drools.RuleBaseConfiguration;
+import org.drools.common.BaseNode;
+import org.drools.common.InternalWorkingMemory;
+import org.drools.common.NodeMemory;
+import org.drools.spi.PropagationContext;
+
+public class UnificationNode extends LeftTupleSource
+ implements
+ LeftTupleSinkNode,
+ NodeMemory {
+
+ @Override
+ public void updateSink(LeftTupleSink sink,
+ PropagationContext context,
+ InternalWorkingMemory workingMemory) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void attach() {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void attach(InternalWorkingMemory[] workingMemories) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ protected void doRemove(RuleRemovalContext context,
+ ReteooBuilder builder,
+ BaseNode node,
+ InternalWorkingMemory[] workingMemories) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void networkUpdated() {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public LeftTupleSinkNode getNextLeftTupleSinkNode() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public LeftTupleSinkNode getPreviousLeftTupleSinkNode() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public void setNextLeftTupleSinkNode(LeftTupleSinkNode next) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setPreviousLeftTupleSinkNode(LeftTupleSinkNode previous) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void assertLeftTuple(LeftTuple leftTuple,
+ PropagationContext context,
+ InternalWorkingMemory workingMemory) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public short getType() {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+
+ @Override
+ public boolean isLeftTupleMemoryEnabled() {
+ // TODO Auto-generated method stub
+ return false;
+ }
+
+ @Override
+ public void retractLeftTuple(LeftTuple leftTuple,
+ PropagationContext context,
+ InternalWorkingMemory workingMemory) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setLeftTupleMemoryEnabled(boolean tupleMemoryEnabled) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public Object createMemory(RuleBaseConfiguration config) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+}
Modified: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/builder/BuildContext.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/builder/BuildContext.java 2009-08-17 22:10:13 UTC (rev 28967)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/builder/BuildContext.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -29,6 +29,7 @@
import org.drools.reteoo.ReteooBuilder;
import org.drools.rule.Behavior;
import org.drools.rule.EntryPoint;
+import org.drools.rule.Query;
import org.drools.rule.Rule;
import org.drools.rule.RuleConditionElement;
import org.drools.time.TemporalDependencyMatrix;
@@ -87,6 +88,8 @@
/** This one is slightly different as alphaMemory can be adaptive, only turning on for new rule attachments */
private boolean alphaNodeMemoryAllowed;
+
+ private boolean query;
/** Stores the list of nodes being added that require partitionIds */
private List<BaseNode> nodes;
@@ -356,8 +359,14 @@
public boolean isAlphaMemoryAllowed() {
return this.alphaNodeMemoryAllowed;
}
+
+
- /**
+ public boolean isQuery() {
+ return query;
+ }
+
+ /**
* @return the currentEntryPoint
*/
public EntryPoint getCurrentEntryPoint() {
@@ -431,6 +440,9 @@
public void setRule(Rule rule) {
this.rule = rule;
+ if ( rule instanceof Query) {
+ this.query = true;
+ }
}
}
Modified: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/builder/ReteooRuleBuilder.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/builder/ReteooRuleBuilder.java 2009-08-17 22:10:13 UTC (rev 28967)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/reteoo/builder/ReteooRuleBuilder.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -89,8 +89,7 @@
*/
public List<TerminalNode> addRule(final Rule rule,
final InternalRuleBase rulebase,
- final ReteooBuilder.IdGenerator idGenerator) throws InvalidPatternException {
-
+ final ReteooBuilder.IdGenerator idGenerator) throws InvalidPatternException {
// the list of terminal nodes
final List<TerminalNode> nodes = new ArrayList<TerminalNode>();
Modified: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/rule/Query.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/rule/Query.java 2009-08-17 22:10:13 UTC (rev 28967)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/rule/Query.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -1,5 +1,18 @@
package org.drools.rule;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Calendar;
+import java.util.Map;
+
+import org.drools.io.Resource;
+import org.drools.spi.CompiledInvoker;
+import org.drools.spi.Consequence;
+import org.drools.spi.Duration;
+import org.drools.spi.Enabled;
+import org.drools.spi.Salience;
+
/*
* Copyright 2005 JBoss Inc
*
@@ -26,10 +39,22 @@
public Query() {
}
+
+ private Declaration[] parameters;
public Query(final String name) {
super( name );
}
+
+ public void writeExternal(ObjectOutput out) throws IOException {
+ super.writeExternal( out );
+ out.writeObject( parameters );
+ }
+
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ super.readExternal(in);
+ this.parameters = ( Declaration[] ) in.readObject();
+ }
/**
* Override this as Queries will NEVER have a consequence, and it should
@@ -39,4 +64,12 @@
return super.isSemanticallyValid();
}
+ public void setParameters(Declaration[] parameters) {
+ this.parameters = parameters;
+ }
+
+ public Declaration[] getParameters() {
+ return this.parameters;
+ }
+
}
\ No newline at end of file
Added: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/ConcurrentHashTable.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/ConcurrentHashTable.java (rev 0)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/ConcurrentHashTable.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -0,0 +1,709 @@
+package org.drools.util;
+
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+import java.io.Serializable;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.AbstractCollection;
+import java.util.AbstractMap;
+import java.util.AbstractSet;
+import java.util.Collection;
+import java.util.Set;
+import java.util.Map;
+import java.util.Enumeration;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.drools.reteoo.LeftTuple;
+import org.drools.reteoo.RightTuple;
+import org.drools.util.AbstractHashTable.DoubleCompositeIndex;
+import org.drools.util.AbstractHashTable.FieldIndex;
+import org.drools.util.AbstractHashTable.Index;
+import org.drools.util.AbstractHashTable.SingleIndex;
+import org.drools.util.AbstractHashTable.TripleCompositeIndex;
+
+public class ConcurrentHashTable {
+ private static final long serialVersionUID = 7249069246763182397L;
+
+ /*
+ * The basic strategy is to subdivide the table among Segments,
+ * each of which itself is a concurrently readable hash table.
+ */
+
+ /* ---------------- Constants -------------- */
+
+ /**
+ * The default initial capacity for this table,
+ * used when not otherwise specified in a constructor.
+ */
+ static final int DEFAULT_INITIAL_CAPACITY = 16;
+
+ /**
+ * The default load factor for this table, used when not
+ * otherwise specified in a constructor.
+ */
+ static final float DEFAULT_LOAD_FACTOR = 0.75f;
+
+ /**
+ * The default concurrency level for this table, used when not
+ * otherwise specified in a constructor.
+ */
+ static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+
+ /**
+ * The maximum capacity, used if a higher value is implicitly
+ * specified by either of the constructors with arguments. MUST
+ * be a power of two <= 1<<30 to ensure that entries are indexable
+ * using ints.
+ */
+ static final int MAXIMUM_CAPACITY = 1 << 30;
+
+ /**
+ * The maximum number of segments to allow; used to bound
+ * constructor arguments.
+ */
+ static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
+
+ /**
+ * Number of unsynchronized retries in size and containsValue
+ * methods before resorting to locking. This is used to avoid
+ * unbounded retries if tables undergo continuous modification
+ * which would make it impossible to obtain an accurate result.
+ */
+ static final int RETRIES_BEFORE_LOCK = 2;
+
+ /* ---------------- Fields -------------- */
+
+ /**
+ * Mask value for indexing into segments. The upper bits of a
+ * key's hash code are used to choose the segment.
+ */
+ final int segmentMask;
+
+ /**
+ * Shift value for indexing within segments.
+ */
+ final int segmentShift;
+
+ /**
+ * The segments, each of which is a specialized hash table
+ */
+ final Segment[] segments;
+
+ private Index index;
+
+ private int startResult;
+
+ /* ---------------- Small Utilities -------------- */
+
+ /**
+ * Applies a supplemental hash function to a given hashCode, which
+ * defends against poor quality hash functions. This is critical
+ * because ConcurrentHashMap uses power-of-two length hash tables,
+ * that otherwise encounter collisions for hashCodes that do not
+ * differ in lower or upper bits.
+ */
+ private static int hash(int h) {
+ // Spread bits to regularize both segment and index locations,
+ // using variant of single-word Wang/Jenkins hash.
+ h += (h << 15) ^ 0xffffcd7d;
+ h ^= (h >>> 10);
+ h += (h << 3);
+ h ^= (h >>> 6);
+ h += (h << 2) + (h << 14);
+ return h ^ (h >>> 16);
+ }
+
+ /**
+ * Returns the segment that should be used for key with given hash
+ * @param hash the hash code for the key
+ * @return the segment
+ */
+ final Segment segmentFor(int hash) {
+ return segments[(hash >>> segmentShift) & segmentMask];
+ }
+
+ /* ---------------- Inner Classes -------------- */
+
+ /**
+ * Segments are specialized versions of hash tables. This
+ * subclasses from ReentrantLock opportunistically, just to
+ * simplify some locking and avoid separate construction.
+ */
+ static final class Segment extends ReentrantLock
+ implements
+ Serializable {
+ /*
+ * Segments maintain a table of entry lists that are ALWAYS
+ * kept in a consistent state, so can be read without locking.
+ * Next fields of nodes are immutable (final). All list
+ * additions are performed at the front of each bin. This
+ * makes it easy to check changes, and also fast to traverse.
+ * When nodes would otherwise be changed, new nodes are
+ * created to replace them. This works well for hash tables
+ * since the bin lists tend to be short. (The average length
+ * is less than two for the default load factor threshold.)
+ *
+ * Read operations can thus proceed without locking, but rely
+ * on selected uses of volatiles to ensure that completed
+ * write operations performed by other threads are
+ * noticed. For most purposes, the "count" field, tracking the
+ * number of elements, serves as that volatile variable
+ * ensuring visibility. This is convenient because this field
+ * needs to be read in many read operations anyway:
+ *
+ * - All (unsynchronized) read operations must first read the
+ * "count" field, and should not look at table entries if
+ * it is 0.
+ *
+ * - All (synchronized) write operations should write to
+ * the "count" field after structurally changing any bin.
+ * The operations must not take any action that could even
+ * momentarily cause a concurrent read operation to see
+ * inconsistent data. This is made easier by the nature of
+ * the read operations in Map. For example, no operation
+ * can reveal that the table has grown but the threshold
+ * has not yet been updated, so there are no atomicity
+ * requirements for this with respect to reads.
+ *
+ * As a guide, all critical volatile reads and writes to the
+ * count field are marked in code comments.
+ */
+
+ private static final long serialVersionUID = 2249069246763182397L;
+
+ /**
+ * The number of elements in this segment's region.
+ */
+ transient volatile int tupleCount;
+
+ transient volatile int keyCount;
+
+ /**
+ * Number of updates that alter the size of the table. This is
+ * used during bulk-read methods to make sure they see a
+ * consistent snapshot: If modCounts change during a traversal
+ * of segments computing size or checking containsValue, then
+ * we might have an inconsistent view of state so (usually)
+ * must retry.
+ */
+ transient int modCount;
+
+ /**
+ * The table is rehashed when its size exceeds this threshold.
+ * (The value of this field is always <tt>(int)(capacity *
+ * loadFactor)</tt>.)
+ */
+ transient int threshold;
+
+ /**
+ * The per-segment table.
+ */
+ transient volatile RightTupleList[] table;
+
+ /**
+ * The load factor for the hash table. Even though this value
+ * is same for all segments, it is replicated to avoid needing
+ * links to outer object.
+ * @serial
+ */
+ final float loadFactor;
+
+ private Index index;
+
+ Segment(Index index,
+ int initialCapacity,
+ float lf) {
+ loadFactor = lf;
+ setTable( new RightTupleList[initialCapacity] );
+ this.index = index;
+ }
+
+ static final Segment[] newArray(int i) {
+ return new Segment[i];
+ }
+
+ /**
+ * Sets table to new HashEntry array.
+ * Call only while holding lock or in constructor.
+ */
+ void setTable(RightTupleList[] newTable) {
+ threshold = (int) (newTable.length * loadFactor);
+ table = newTable;
+ }
+
+ /**
+ * Returns properly casted first entry of bin for given hash.
+ */
+ RightTupleList getFirst(int hash) {
+ RightTupleList[] tab = table;
+ return tab[hash & (tab.length - 1)];
+ }
+
+ /* Specialized implementations of map methods */
+
+ // Object get(Object key, int hash) {
+ // if (count != 0) { // read-volatile
+ // RightTuple e = getFirst(hash);
+ // while (e != null) {
+ // if (e.hash == hash && key.equals(e.key)) {
+ // Object v = e.value;
+ // if (v != null)
+ // return v;
+ // return readValueUnderLock(e); // recheck
+ // }
+ // e = e.next;
+ // }
+ // }
+ // return null;
+ // }
+ //
+ // boolean containsKey(Object key, int hash) {
+ // if (count != 0) { // read-volatile
+ // HashEntry e = getFirst(hash);
+ // while (e != null) {
+ // if (e.hash == hash && key.equals(e.key))
+ // return true;
+ // e = e.next;
+ // }
+ // }
+ // return false;
+ // }
+ void add(final RightTuple rightTuple,
+ int hashCode,
+ Object object) {
+ lock();
+ try {
+ final RightTupleList entry = getOrCreate( hashCode,
+ object );
+ rightTuple.setMemory( entry );
+ entry.add( rightTuple );
+ this.tupleCount++;
+ } finally {
+ unlock();
+ }
+ }
+
+ /**
+ * Remove; match on key only if value null, else match both.\
+ */
+ void remove(RightTuple rightTuple,
+ int hashCode,
+ Object object) {
+ lock();
+ try {
+ int c = keyCount - 1;
+
+ RightTupleList[] tab = table;
+ int index = hashCode & (tab.length - 1);
+ RightTupleList first = tab[index];
+ RightTupleList e = first;
+ while ( e != null ) {
+ if ( e.matches( object,
+ hashCode ) ) {
+ break;
+ }
+ e = (RightTupleList) e.next;
+ }
+
+ e.remove( rightTuple );
+ tupleCount--;
+
+ if ( e.getFirst( null ) == null ) {
+ // list is empty, so remove it
+ RightTupleList newFirst = (RightTupleList) e.getNext();
+ for ( RightTupleList p = first; p != e; p = (RightTupleList) p.getNext() ) {
+ newFirst = new RightTupleList( p.getIndex(),
+ hashCode,
+ newFirst );
+ }
+ keyCount = c; // write-volatile
+ }
+
+ } finally {
+ unlock();
+ }
+ }
+
+ RightTupleList get(final int hashCode,
+ final LeftTuple tuple) {
+ //this.index.setCachedValue( tuple );
+ lock();
+ try {
+ RightTupleList[] tab = table;
+ int index = hashCode & (tab.length - 1);
+ RightTupleList first = tab[index];
+ RightTupleList entry = first;
+
+ while ( entry != null ) {
+ if ( entry.matches( tuple,
+ hashCode ) ) {
+ return entry;
+ }
+ entry = (RightTupleList) entry.getNext();
+ }
+
+ return entry;
+ } finally {
+ unlock();
+ }
+ }
+
+ private RightTupleList getOrCreate(int hashCode,
+ final Object object) {
+ int c = keyCount;
+
+ RightTupleList[] tab = table;
+ int index = hashCode & (tab.length - 1);
+ RightTupleList first = tab[index];
+ RightTupleList e = first;
+ while ( e != null ) {
+ if ( e.matches( object,
+ hashCode ) ) {
+ return e;
+ }
+ e = (RightTupleList) e.next;
+ }
+
+ if ( e == null ) {
+ if ( c++ > threshold ) // ensure capacity
+ rehash();
+ ++modCount;
+ e = new RightTupleList( this.index,
+ hashCode,
+ first );
+ tab[index] = e;
+ keyCount = c; // write-volatile
+ }
+
+ return e;
+ }
+
+ void rehash() {
+ RightTupleList[] oldTable = table;
+ int oldCapacity = oldTable.length;
+ if ( oldCapacity >= MAXIMUM_CAPACITY ) return;
+
+ /*
+ * Reclassify nodes in each list to new Map. Because we are
+ * using power-of-two expansion, the elements from each bin
+ * must either stay at same index, or move with a power of two
+ * offset. We eliminate unnecessary node creation by catching
+ * cases where old nodes can be reused because their next
+ * fields won't change. Statistically, at the default
+ * threshold, only about one-sixth of them need cloning when
+ * a table doubles. The nodes they replace will be garbage
+ * collectable as soon as they are no longer referenced by any
+ * reader thread that may be in the midst of traversing table
+ * right now.
+ */
+
+ RightTupleList[] newTable = new RightTupleList[oldCapacity << 1];
+ threshold = (int) (newTable.length * loadFactor);
+ int sizeMask = newTable.length - 1;
+ for ( int i = 0; i < oldCapacity; i++ ) {
+ // We need to guarantee that any existing reads of old Map can
+ // proceed. So we cannot yet null out each bin.
+ RightTupleList e = oldTable[i];
+
+ if ( e != null ) {
+ RightTupleList next = (RightTupleList) e.getNext();
+ int idx = e.hashCode() & sizeMask;
+
+ // Single node on list
+ if ( next == null ) newTable[idx] = e;
+
+ else {
+ // Reuse trailing consecutive sequence at same slot
+ RightTupleList lastRun = e;
+ int lastIdx = idx;
+ for ( RightTupleList last = next; last != null; last = (RightTupleList) last.getNext() ) {
+ int k = last.hashCode() & sizeMask;
+ if ( k != lastIdx ) {
+ lastIdx = k;
+ lastRun = last;
+ }
+ }
+ newTable[lastIdx] = lastRun;
+
+ // Clone all remaining nodes
+ for ( RightTupleList p = e; p != lastRun; p = (RightTupleList) p.getNext() ) {
+ int k = p.hashCode() & sizeMask;
+ RightTupleList n = newTable[k];
+ newTable[k] = new RightTupleList( p, n );
+ }
+ }
+ }
+ }
+ table = newTable;
+ }
+
+ void clear() {
+ if ( tupleCount != 0 ) {
+ lock();
+ try {
+ RightTupleList[] tab = table;
+ for ( int i = 0; i < tab.length; i++ )
+ tab[i] = null;
+ ++modCount;
+ tupleCount = 0; // write-volatile
+ keyCount = 0;
+ } finally {
+ unlock();
+ }
+ }
+ }
+ }
+
+ /* ---------------- Public operations -------------- */
+
+ /**
+ * Creates a new, empty map with the specified initial
+ * capacity, load factor and concurrency level.
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements.
+ * @param loadFactor the load factor threshold, used to control resizing.
+ * Resizing may be performed when the average number of elements per
+ * bin exceeds this threshold.
+ * @param concurrencyLevel the estimated number of concurrently
+ * updating threads. The implementation performs internal sizing
+ * to try to accommodate this many threads.
+ * @throws IllegalArgumentException if the initial capacity is
+ * negative or the load factor or concurrencyLevel are
+ * nonpositive.
+ */
+ public ConcurrentHashTable(final FieldIndex[] index,
+ int initialCapacity,
+ float loadFactor,
+ int concurrencyLevel) {
+ this.startResult = RightTupleIndexHashTable.PRIME;
+ for ( int i = 0, length = index.length; i < length; i++ ) {
+ this.startResult = RightTupleIndexHashTable.PRIME * this.startResult + index[i].getExtractor().getIndex();
+ }
+
+ switch ( index.length ) {
+ case 0 :
+ throw new IllegalArgumentException( "FieldIndexHashTable cannot use an index[] of length 0" );
+ case 1 :
+ this.index = new SingleIndex( index,
+ this.startResult );
+ break;
+ case 2 :
+ this.index = new DoubleCompositeIndex( index,
+ this.startResult );
+ break;
+ case 3 :
+ this.index = new TripleCompositeIndex( index,
+ this.startResult );
+ break;
+ default :
+ throw new IllegalArgumentException( "FieldIndexHashTable cannot use an index[] of length great than 3" );
+ }
+
+ if ( !(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0 ) throw new IllegalArgumentException();
+
+ if ( concurrencyLevel > MAX_SEGMENTS ) concurrencyLevel = MAX_SEGMENTS;
+
+ // Find power-of-two sizes best matching arguments
+ int sshift = 0;
+ int ssize = 1;
+ while ( ssize < concurrencyLevel ) {
+ ++sshift;
+ ssize <<= 1;
+ }
+ segmentShift = 32 - sshift;
+ segmentMask = ssize - 1;
+ this.segments = Segment.newArray( ssize );
+
+ if ( initialCapacity > MAXIMUM_CAPACITY ) initialCapacity = MAXIMUM_CAPACITY;
+ int c = initialCapacity / ssize;
+ if ( c * ssize < initialCapacity ) ++c;
+ int cap = 1;
+ while ( cap < c )
+ cap <<= 1;
+
+ for ( int i = 0; i < this.segments.length; ++i )
+ this.segments[i] = new Segment( this.index,
+ cap,
+ loadFactor );
+ }
+
+ /**
+ * Creates a new, empty map with the specified initial capacity
+ * and load factor and with the default concurrencyLevel (16).
+ *
+ * @param initialCapacity The implementation performs internal
+ * sizing to accommodate this many elements.
+ * @param loadFactor the load factor threshold, used to control resizing.
+ * Resizing may be performed when the average number of elements per
+ * bin exceeds this threshold.
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative or the load factor is nonpositive
+ *
+ * @since 1.6
+ */
+ public ConcurrentHashTable(final FieldIndex[] index,
+ int initialCapacity,
+ float loadFactor) {
+ this( index,
+ initialCapacity,
+ loadFactor,
+ DEFAULT_CONCURRENCY_LEVEL );
+ }
+
+ /**
+ * Creates a new, empty map with the specified initial capacity,
+ * and with default load factor (0.75) and concurrencyLevel (16).
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements.
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative.
+ */
+ public ConcurrentHashTable(final FieldIndex[] index,
+ int initialCapacity) {
+ this( index,
+ initialCapacity,
+ DEFAULT_LOAD_FACTOR,
+ DEFAULT_CONCURRENCY_LEVEL );
+ }
+
+ /**
+ * Creates a new, empty map with a default initial capacity (16),
+ * load factor (0.75) and concurrencyLevel (16).
+ */
+ public ConcurrentHashTable(final FieldIndex[] index) {
+ this( index,
+ DEFAULT_INITIAL_CAPACITY,
+ DEFAULT_LOAD_FACTOR,
+ DEFAULT_CONCURRENCY_LEVEL );
+ }
+
+ /**
+ * Returns <tt>true</tt> if this map contains no key-value mappings.
+ *
+ * @return <tt>true</tt> if this map contains no key-value mappings
+ */
+ public boolean isEmpty() {
+ final Segment[] segments = this.segments;
+ /*
+ * We keep track of per-segment modCounts to avoid ABA
+ * problems in which an element in one segment was added and
+ * in another removed during traversal, in which case the
+ * table was never actually empty at any point. Note the
+ * similar use of modCounts in the size() and containsValue()
+ * methods, which are the only other methods also susceptible
+ * to ABA problems.
+ */
+ int[] mc = new int[segments.length];
+ int mcsum = 0;
+ for ( int i = 0; i < segments.length; ++i ) {
+ if ( segments[i].tupleCount != 0 ) return false;
+ else mcsum += mc[i] = segments[i].modCount;
+ }
+ // If mcsum happens to be zero, then we know we got a snapshot
+ // before any modifications at all were made. This is
+ // probably common enough to bother tracking.
+ if ( mcsum != 0 ) {
+ for ( int i = 0; i < segments.length; ++i ) {
+ if ( segments[i].tupleCount != 0 || mc[i] != segments[i].modCount ) return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Returns the number of key-value mappings in this map. If the
+ * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
+ * <tt>Integer.MAX_VALUE</tt>.
+ *
+ * @return the number of key-value mappings in this map
+ */
+ public int size() {
+ final Segment[] segments = this.segments;
+ long sum = 0;
+ long check = 0;
+ int[] mc = new int[segments.length];
+ // Try a few times to get accurate count. On failure due to
+ // continuous async changes in table, resort to locking.
+ for ( int k = 0; k < RETRIES_BEFORE_LOCK; ++k ) {
+ check = 0;
+ sum = 0;
+ int mcsum = 0;
+ for ( int i = 0; i < segments.length; ++i ) {
+ sum += segments[i].tupleCount;
+ mcsum += mc[i] = segments[i].modCount;
+ }
+ if ( mcsum != 0 ) {
+ for ( int i = 0; i < segments.length; ++i ) {
+ check += segments[i].tupleCount;
+ if ( mc[i] != segments[i].modCount ) {
+ check = -1; // force retry
+ break;
+ }
+ }
+ }
+ if ( check == sum ) break;
+ }
+ if ( check != sum ) { // Resort to locking all segments
+ sum = 0;
+ for ( int i = 0; i < segments.length; ++i )
+ segments[i].lock();
+ for ( int i = 0; i < segments.length; ++i )
+ sum += segments[i].tupleCount;
+ for ( int i = 0; i < segments.length; ++i )
+ segments[i].unlock();
+ }
+ if ( sum > Integer.MAX_VALUE ) return Integer.MAX_VALUE;
+ else return (int) sum;
+ }
+
+ public void add(final RightTuple rightTuple) {
+ Object object = rightTuple.getFactHandle().getObject();
+ final int hashCode = this.index.hashCodeOf( object );
+ segmentFor( hashCode ).add( rightTuple,
+ hashCode,
+ object );
+ }
+
+ /**
+ * Removes the key (and its corresponding value) from this map.
+ * This method does nothing if the key is not in the map.
+ *
+ * @param key the key that needs to be removed
+ * @return the previous value associated with <tt>key</tt>, or
+ * <tt>null</tt> if there was no mapping for <tt>key</tt>
+ * @throws NullPointerException if the specified key is null
+ */
+ public void remove(final RightTuple rightTuple) {
+ Object object = rightTuple.getFactHandle().getObject();
+ final int hashCode = this.index.hashCodeOf( object );
+ segmentFor( hashCode ).remove( rightTuple,
+ hashCode,
+ object );
+ }
+
+ public RightTupleList get(final LeftTuple tuple) {
+ final int hashCode = this.index.hashCodeOf( tuple );
+ return segmentFor( hashCode ).get( hashCode,
+ tuple );
+ }
+
+ /**
+ * Removes all of the mappings from this map.
+ */
+ public void clear() {
+ for ( int i = 0; i < segments.length; ++i )
+ segments[i].clear();
+ }
+
+}
Added: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/ConcurrentRightTupleList.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/ConcurrentRightTupleList.java (rev 0)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/ConcurrentRightTupleList.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -0,0 +1,181 @@
+/**
+ *
+ */
+package org.drools.util;
+
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.drools.common.InternalFactHandle;
+import org.drools.reteoo.LeftTuple;
+import org.drools.reteoo.RightTuple;
+import org.drools.reteoo.RightTupleMemory;
+import org.drools.util.AbstractHashTable.Index;
+
+public class ConcurrentRightTupleList
+ implements
+ RightTupleMemory,
+ Entry {
+ private static final long serialVersionUID = 400L;
+
+ public AtomicReference<Entry> previous;
+ public AtomicReference<Entry> next;
+
+ public AtomicReference<RightTuple> first;
+ public AtomicReference<RightTuple> last;
+
+ private final int hashCode;
+ private final Index index;
+
+ public ConcurrentRightTupleList() {
+ // this is not an index bucket
+ this.hashCode = 0;
+ this.index = null;
+
+ this.previous = new AtomicReference<Entry>();
+ this.next = new AtomicReference<Entry>();
+
+ this.first = new AtomicReference<RightTuple>();
+ this.last = new AtomicReference<RightTuple>();
+ }
+
+ public ConcurrentRightTupleList(final Index index,
+ final int hashCode) {
+ this.index = index;
+ this.hashCode = hashCode;
+ }
+
+ public RightTuple getFirst(LeftTuple leftTuple) {
+ return this.first.get();
+ }
+
+ public RightTuple getLast(LeftTuple leftTuple) {
+ return this.last.get();
+ }
+
+ public void add(final RightTuple rightTuple) {
+ if ( this.last != null ) {
+ this.last.get().setNext( rightTuple );
+ rightTuple.setPrevious( this.last.get() );
+ this.last.set( rightTuple );
+ } else {
+ this.first.set( rightTuple );
+ this.last.set( rightTuple );
+ }
+ }
+
+ /**
+ * We assume that this rightTuple is contained in this hash table
+ */
+ public void remove(final RightTuple rightTuple) {
+ RightTuple previous = (RightTuple) rightTuple.getPrevious();
+ RightTuple next = (RightTuple) rightTuple.getNext();
+
+ if ( previous != null && next != null ) {
+ // remove from middle
+ previous.setNext( next );
+ next.setPrevious( previous );
+ } else if ( next != null ) {
+ // remove from first
+ this.first.set( next );
+ next.setPrevious( null );
+ } else if ( previous != null ) {
+ // remove from end
+ this.last.set( previous );
+ previous.setNext( null );
+ } else {
+ // remove everything
+ this.last = null;
+ this.first = null;
+ }
+
+ rightTuple.setPrevious( null );
+ rightTuple.setNext( null );
+ }
+
+ public RightTuple get(final InternalFactHandle handle) {
+ RightTuple current = this.first.get();
+ while ( current != null ) {
+ if ( handle == current.getFactHandle() ) {
+ return current;
+ }
+ current = (RightTuple) current.getNext();
+ }
+ return null;
+ }
+
+ public boolean contains(final InternalFactHandle handle) {
+ return get( handle ) != null;
+ }
+
+ public RightTuple get(final RightTuple rightTuple) {
+ InternalFactHandle handle = rightTuple.getFactHandle();
+ RightTuple current = this.first.get();
+ while ( current != null ) {
+ if ( handle == current.getFactHandle() ) {
+ return current;
+ }
+ current = (RightTuple) current.getNext();
+ }
+ return null;
+ }
+
+ public boolean contains(final RightTuple rightTuple) {
+ return get( rightTuple ) != null;
+ }
+
+ public int size() {
+ int i = 0;
+ RightTuple current = this.first.get();
+ while ( current != null ) {
+ current = (RightTuple) current.getNext();
+ i++;
+ }
+ return i;
+ }
+
+ public Iterator iterator() {
+ throw new UnsupportedOperationException();
+ }
+
+ public boolean matches(final Object object,
+ final int objectHashCode) {
+ return this.hashCode == objectHashCode && this.index.equal( this.first.get().getFactHandle().getObject(),
+ object );
+ }
+
+ public boolean matches(final LeftTuple tuple,
+ final int tupleHashCode) {
+ return this.hashCode == tupleHashCode && this.index.equal( this.first.get().getFactHandle().getObject(),
+ tuple );
+ }
+
+ public int hashCode() {
+ return this.hashCode;
+ }
+
+ public boolean equals(final Object object) {
+ final ConcurrentRightTupleList other = (ConcurrentRightTupleList) object;
+ return this.hashCode == other.hashCode && this.index == other.index;
+ }
+
+ public Entry getNext() {
+ return this.next.get();
+ }
+
+ public void setNext(final Entry next) {
+ this.next.set( next );
+ }
+
+ public boolean isIndexed() {
+ return (this.index != null);
+ }
+
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ for ( RightTuple rightTuple = (RightTuple) this.first.get(); rightTuple != null; rightTuple = (RightTuple) rightTuple.getNext() ) {
+ builder.append( rightTuple );
+ }
+
+ return builder.toString();
+ }
+}
\ No newline at end of file
Modified: labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/RightTupleList.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/RightTupleList.java 2009-08-17 22:10:13 UTC (rev 28967)
+++ labs/jbossrules/trunk/drools-core/src/main/java/org/drools/util/RightTupleList.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -37,7 +37,23 @@
this.index = index;
this.hashCode = hashCode;
}
+
+ public RightTupleList(final Index index,
+ final int hashCode,
+ final Entry next) {
+ this.index = index;
+ this.hashCode = hashCode;
+ this.next = next;
+ }
+ public RightTupleList(RightTupleList p, final Entry next) {
+ this.index = p.index;
+ this.hashCode = p.hashCode;
+ this.next = next;
+ this.first = p.first;
+ this.last = p.last;
+ }
+
public RightTuple getFirst(LeftTuple leftTuple) {
return this.first;
}
@@ -161,6 +177,7 @@
public boolean matches(final Object object,
final int objectHashCode) {
+ System.out.println( index + " : " + this.first + " : " + object );
return this.hashCode == objectHashCode && this.index.equal( this.first.getFactHandle().getObject(),
object );
}
@@ -191,6 +208,10 @@
public boolean isIndexed() {
return (this.index != null);
}
+
+ public Index getIndex() {
+ return this.index;
+ }
public String toString() {
StringBuilder builder = new StringBuilder();
Added: labs/jbossrules/trunk/drools-core/src/test/java/org/drools/util/ConcurrentRightTupleIndexHashTableTest.java
===================================================================
--- labs/jbossrules/trunk/drools-core/src/test/java/org/drools/util/ConcurrentRightTupleIndexHashTableTest.java (rev 0)
+++ labs/jbossrules/trunk/drools-core/src/test/java/org/drools/util/ConcurrentRightTupleIndexHashTableTest.java 2009-08-18 03:31:50 UTC (rev 28968)
@@ -0,0 +1,601 @@
+package org.drools.util;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import org.drools.Cheese;
+import org.drools.base.ClassFieldAccessorCache;
+import org.drools.base.ClassFieldAccessorStore;
+import org.drools.base.ClassObjectType;
+import org.drools.base.ValueType;
+import org.drools.base.evaluators.EqualityEvaluatorsDefinition;
+import org.drools.base.evaluators.Operator;
+import org.drools.common.DefaultFactHandle;
+import org.drools.common.InternalFactHandle;
+import org.drools.reteoo.LeftTuple;
+import org.drools.reteoo.RightTuple;
+import org.drools.rule.Declaration;
+import org.drools.rule.Pattern;
+import org.drools.spi.InternalReadAccessor;
+import org.drools.util.AbstractHashTable.FieldIndex;
+
+public class ConcurrentRightTupleIndexHashTableTest extends TestCase {
+ EqualityEvaluatorsDefinition equals = new EqualityEvaluatorsDefinition();
+
+ ClassFieldAccessorStore store = new ClassFieldAccessorStore();
+
+ protected void setUp() throws Exception {
+ store.setClassFieldAccessorCache( new ClassFieldAccessorCache( Thread.currentThread().getContextClassLoader() ) );
+ store.setEagerWire( true );
+ }
+
+ public void testSingleEntry() throws Exception {
+ final InternalReadAccessor extractor = store.getReader( Cheese.class,
+ "type",
+ getClass().getClassLoader() );
+
+ final Pattern pattern = new Pattern( 0,
+ new ClassObjectType( Cheese.class ) );
+
+ final Declaration declaration = new Declaration( "typeOfCheese",
+ extractor,
+ pattern );
+
+ final FieldIndex fieldIndex = new FieldIndex( extractor,
+ declaration,
+ equals.getEvaluator( ValueType.STRING_TYPE,
+ Operator.EQUAL ) );
+
+ final ConcurrentHashTable map = new ConcurrentHashTable( new FieldIndex[]{fieldIndex} );
+
+ final Cheese cheddar = new Cheese( "cheddar",
+ 10 );
+ final InternalFactHandle cheddarHandle1 = new DefaultFactHandle( 0,
+ cheddar );
+
+ assertEquals( 0,
+ map.size() );
+ assertNull( map.get( new LeftTuple( cheddarHandle1,
+ null,
+ true ) ) );
+
+ final Cheese stilton1 = new Cheese( "stilton",
+ 35 );
+ RightTuple stiltonRighTuple = new RightTuple( new DefaultFactHandle( 1,
+ stilton1 ),
+ null );
+
+ map.add( stiltonRighTuple );
+
+ assertEquals( 1,
+ map.size() );
+
+ final Cheese stilton2 = new Cheese( "stilton",
+ 80 );
+ final InternalFactHandle stiltonHandle2 = new DefaultFactHandle( 2,
+ stilton2 );
+
+ final RightTupleList list = map.get( new LeftTuple( stiltonHandle2,
+ null,
+ true ) );
+ assertSame( stiltonRighTuple.getFactHandle(),
+ list.first.getFactHandle() );
+ assertNull( list.first.getNext() );
+ }
+
+ public void testTwoDifferentEntries() throws Exception {
+ final InternalReadAccessor extractor = store.getReader( Cheese.class,
+ "type",
+ getClass().getClassLoader() );
+
+ final Pattern pattern = new Pattern( 0,
+ new ClassObjectType( Cheese.class ) );
+
+ final Declaration declaration = new Declaration( "typeOfCheese",
+ extractor,
+ pattern );
+
+ final FieldIndex fieldIndex = new FieldIndex( extractor,
+ declaration,
+ equals.getEvaluator( ValueType.STRING_TYPE,
+ Operator.EQUAL ) );
+
+ final ConcurrentHashTable map = new ConcurrentHashTable( new FieldIndex[]{fieldIndex} );
+
+ assertEquals( 0,
+ map.size() );
+
+ final Cheese stilton1 = new Cheese( "stilton",
+ 35 );
+ final InternalFactHandle stiltonHandle1 = new DefaultFactHandle( 1,
+ stilton1 );
+ map.add( new RightTuple( stiltonHandle1,
+ null ) );
+
+ final Cheese cheddar1 = new Cheese( "cheddar",
+ 35 );
+ final InternalFactHandle cheddarHandle1 = new DefaultFactHandle( 2,
+ cheddar1 );
+ map.add( new RightTuple( cheddarHandle1,
+ null ) );
+
+ assertEquals( 2,
+ map.size() );
+
+ final Cheese stilton2 = new Cheese( "stilton",
+ 77 );
+ final InternalFactHandle stiltonHandle2 = new DefaultFactHandle( 2,
+ stilton2 );
+ RightTupleList list = map.get( new LeftTuple( stiltonHandle2,
+ null,
+ true ) );
+ assertSame( stiltonHandle1,
+ list.first.getFactHandle() );
+ assertNull( list.first.getNext() );
+
+ final Cheese cheddar2 = new Cheese( "cheddar",
+ 5 );
+ final InternalFactHandle cheddarHandle2 = new DefaultFactHandle( 2,
+ cheddar2 );
+ list = map.get( new LeftTuple( cheddarHandle2,
+ null,
+ true ) );
+ assertSame( cheddarHandle1,
+ list.first.getFactHandle() );
+ assertNull( list.first.getNext() );
+ }
+
+ public void testTwoEqualEntries() throws Exception {
+ final InternalReadAccessor extractor = store.getReader( Cheese.class,
+ "type",
+ getClass().getClassLoader() );
+
+ final Pattern pattern = new Pattern( 0,
+ new ClassObjectType( Cheese.class ) );
+
+ final Declaration declaration = new Declaration( "typeOfCheese",
+ extractor,
+ pattern );
+
+ final FieldIndex fieldIndex = new FieldIndex( extractor,
+ declaration,
+ equals.getEvaluator( ValueType.STRING_TYPE,
+ Operator.EQUAL ) );
+
+ final ConcurrentHashTable map = new ConcurrentHashTable( new FieldIndex[]{fieldIndex} );
+
+ assertEquals( 0,
+ map.size() );
+
+ final Cheese stilton1 = new Cheese( "stilton",
+ 35 );
+ final InternalFactHandle stiltonHandle1 = new DefaultFactHandle( 1,
+ stilton1 );
+ map.add( new RightTuple( stiltonHandle1,
+ null ) );
+
+ final Cheese cheddar1 = new Cheese( "cheddar",
+ 35 );
+ final InternalFactHandle cheddarHandle1 = new DefaultFactHandle( 2,
+ cheddar1 );
+ map.add( new RightTuple( cheddarHandle1,
+ null ) );
+
+ final Cheese stilton2 = new Cheese( "stilton",
+ 81 );
+ final InternalFactHandle stiltonHandle2 = new DefaultFactHandle( 3,
+ stilton2 );
+ map.add( new RightTuple( stiltonHandle2,
+ null ) );
+
+ assertEquals( 3,
+ map.size() );
+
+ // Check they are correctly chained to the same FieldIndexEntry
+ final Cheese stilton3 = new Cheese( "stilton",
+ 89 );
+ final InternalFactHandle stiltonHandle3 = new DefaultFactHandle( 4,
+ stilton2 );
+
+ final RightTupleList list = map.get( new LeftTuple( stiltonHandle3,
+ null,
+ true ) );
+ assertSame( stiltonHandle1,
+ list.first.getFactHandle() );
+ assertSame( stiltonHandle2,
+ ((RightTuple) list.first.getNext()).getFactHandle() );
+ }
+
+ public void testTwoDifferentEntriesSameHashCode() throws Exception {
+ final InternalReadAccessor extractor = store.getReader( TestClass.class,
+ "object",
+ getClass().getClassLoader() );
+
+ final Pattern pattern = new Pattern( 0,
+ new ClassObjectType( TestClass.class ) );
+
+ final Declaration declaration = new Declaration( "theObject",
+ extractor,
+ pattern );
+
+ final FieldIndex fieldIndex = new FieldIndex( extractor,
+ declaration,
+ equals.getEvaluator( ValueType.OBJECT_TYPE,
+ Operator.EQUAL ) );
+
+ final ConcurrentHashTable map = new ConcurrentHashTable( new FieldIndex[]{fieldIndex} );
+
+ final TestClass c1 = new TestClass( 0,
+ new TestClass( 20,
+ "stilton" ) );
+
+ final InternalFactHandle ch1 = new DefaultFactHandle( 1,
+ c1 );
+
+ map.add( new RightTuple( ch1,
+ null ) );
+
+ final TestClass c2 = new TestClass( 0,
+ new TestClass( 20,
+ "cheddar" ) );
+ final InternalFactHandle ch2 = new DefaultFactHandle( 2,
+ c2 );
+ map.add( new RightTuple( ch2,
+ null ) );
+
+ // same hashcode, but different values, so it should result in a size of 2
+ assertEquals( 2,
+ map.size() );
+
+ // however both are in the same table bucket
+
+ // this table bucket will have two FieldIndexEntries, as they are actually two different values
+// Entry[] entries = getEntries( map );
+// assertEquals( 1,
+// entries.length );
+// RightTupleList list = (RightTupleList) entries[0];
+// assertSame( ch2,
+// list.first.getFactHandle() );
+// assertNull( list.first.getNext() );
+//
+// assertSame( ch1,
+// ((RightTupleList) list.next).first.getFactHandle() );
+// assertNull( ((RightTupleList) list.next).first.getNext() );
+// assertNull( ((RightTupleList) list.next).next );
+ }
+
+ public void testRemove() throws Exception {
+ final InternalReadAccessor extractor = store.getReader( Cheese.class,
+ "type",
+ getClass().getClassLoader() );
+
+ final Pattern pattern = new Pattern( 0,
+ new ClassObjectType( Cheese.class ) );
+
+ final Declaration declaration = new Declaration( "typeOfCheese",
+ extractor,
+ pattern );
+
+ final FieldIndex fieldIndex = new FieldIndex( extractor,
+ declaration,
+ equals.getEvaluator( ValueType.STRING_TYPE,
+ Operator.EQUAL ) );
+
+ final ConcurrentHashTable map = new ConcurrentHashTable( new FieldIndex[]{fieldIndex} );
+
+ assertEquals( 0,
+ map.size() );
+
+ final Cheese stilton1 = new Cheese( "stilton",
+ 35 );
+ final InternalFactHandle stiltonHandle1 = new DefaultFactHandle( 1,
+ stilton1 );
+ RightTuple stiltonRightTuple1 = new RightTuple( stiltonHandle1,
+ null );
+ map.add( stiltonRightTuple1 );
+
+ final Cheese cheddar1 = new Cheese( "cheddar",
+ 35 );
+ final InternalFactHandle cheddarHandle1 = new DefaultFactHandle( 2,
+ cheddar1 );
+ RightTuple cheddarRightTuple1 = new RightTuple( cheddarHandle1,
+ null );
+ map.add( cheddarRightTuple1 );
+
+ final Cheese stilton2 = new Cheese( "stilton",
+ 81 );
+ final InternalFactHandle stiltonHandle2 = new DefaultFactHandle( 3,
+ stilton2 );
+ RightTuple stiltonRightTuple2 = new RightTuple( stiltonHandle2,
+ null );
+ map.add( stiltonRightTuple2 );
+
+ assertEquals( 3,
+ map.size() );
+// assertEquals( 2,
+// tablePopulationSize( map ) );
+
+ // cheddar is in its own bucket, which should be removed once empty. We cannot have
+ // empty FieldIndexEntries in the Map, as they get their value from the first FactEntry.
+ map.remove( cheddarRightTuple1 );
+ assertEquals( 2,
+ map.size() );
+// assertEquals( 1,
+// tablePopulationSize( map ) );
+
+ // We remove t he stiltonHandle2, but there is still one more stilton, so size should be the same
+ map.remove( stiltonRightTuple2 );
+ assertEquals( 1,
+ map.size() );
+// assertEquals( 1,
+// tablePopulationSize( map ) );
+
+ // No more stiltons, so the table should be empty
+ map.remove( stiltonRightTuple1 );
+ assertEquals( 0,
+ map.size() );
+// assertEquals( 0,
+// tablePopulationSize( map ) );
+ }
+
+ public void testResize() throws Exception {
+ final InternalReadAccessor extractor = store.getReader( Cheese.class,
+ "type",
+ getClass().getClassLoader() );
+
+ final Pattern pattern = new Pattern( 0,
+ new ClassObjectType( Cheese.class ) );
+
+ final Declaration declaration = new Declaration( "typeOfCheese",
+ extractor,
+ pattern );
+
+ final FieldIndex fieldIndex = new FieldIndex( extractor,
+ declaration,
+ equals.getEvaluator( ValueType.STRING_TYPE,
+ Operator.EQUAL ) );
+
+ final ConcurrentHashTable map = new ConcurrentHashTable( new FieldIndex[]{fieldIndex}, 16,
+ 0.75f );
+
+ assertEquals( 0,
+ map.size() );
+
+ final Cheese stilton1 = new Cheese( "stilton",
+ 35 );
+ map.add( newRightTuple( 1,
+ stilton1 ) );
+
+ final Cheese stilton2 = new Cheese( "stilton",
+ 81 );
+ map.add( newRightTuple( 2,
+ stilton2 ) );
+
+ final Cheese cheddar1 = new Cheese( "cheddar",
+ 35 );
+ map.add( newRightTuple( 3,
+ cheddar1 ) );
+
+ final Cheese cheddar2 = new Cheese( "cheddar",
+ 38 );
+ map.add( newRightTuple( 4,
+ cheddar2 ) );
+
+ final Cheese brie = new Cheese( "brie",
+ 293 );
+ map.add( newRightTuple( 5,
+ brie ) );
+
+ final Cheese mozerella = new Cheese( "mozerella",
+ 15 );
+ map.add( newRightTuple( 6,
+ mozerella ) );
+
+ final Cheese dolcelatte = new Cheese( "dolcelatte",
+ 284 );
+ map.add( newRightTuple( 7,
+ dolcelatte ) );
+
+ final Cheese camembert1 = new Cheese( "camembert",
+ 924 );
+ map.add( newRightTuple( 8,
+ camembert1 ) );
+
+ final Cheese camembert2 = new Cheese( "camembert",
+ 765 );
+ map.add( newRightTuple( 9,
+ camembert2 ) );
+
+ final Cheese redLeicestor = new Cheese( "red leicestor",
+ 23 );
+ map.add( newRightTuple( 10,
+ redLeicestor ) );
+
+ final Cheese wensleydale = new Cheese( "wensleydale",
+ 20 );
+ map.add( newRightTuple( 11,
+ wensleydale ) );
+
+ final Cheese edam = new Cheese( "edam",
+ 12 );
+ map.add( newRightTuple( 12,
+ edam ) );
+
+ final Cheese goude1 = new Cheese( "goude",
+ 93 );
+ map.add( newRightTuple( 13,
+ goude1 ) );
+
+ final Cheese goude2 = new Cheese( "goude",
+ 88 );
+ map.add( newRightTuple( 14,
+ goude2 ) );
+
+ final Cheese gruyere = new Cheese( "gruyere",
+ 82 );
+ map.add( newRightTuple( 15,
+ gruyere ) );
+
+ final Cheese emmental = new Cheese( "emmental",
+ 98 );
+ map.add( newRightTuple( 16,
+ emmental ) );
+
+ // At this point we have 16 facts but only 12 different types of cheeses
+ // so no table resize and thus its size is 16
+
+ assertEquals( 16,
+ map.size() );
+
+// Entry[] table = map.getTable();
+// assertEquals( 16,
+// table.length );
+
+ final Cheese feta = new Cheese( "feta",
+ 48 );
+ map.add( newRightTuple( 2,
+ feta ) );
+
+ // This adds our 13th type of cheese. The map is set with an initial capacity of 16 and
+ // a threshold of 75%, that after 12 it should resize the map to 32.
+ assertEquals( 17,
+ map.size() );
+
+// table = map.getTable();
+// assertEquals( 32,
+// table.length );
+
+ final Cheese haloumi = new Cheese( "haloumi",
+ 48 );
+ map.add( newRightTuple( 2,
+ haloumi ) );
+
+ final Cheese chevre = new Cheese( "chevre",
+ 48 );
+ map.add( newRightTuple( 2,
+ chevre ) );
+
+ }
+
+ private RightTuple newRightTuple(int id,
+ Object object) {
+ return new RightTuple( new DefaultFactHandle( id,
+ object ),
+ null );
+
+ }
+
+ public static class TestClass {
+ private int hashCode;
+ private Object object;
+
+ public TestClass() {
+
+ }
+
+ public TestClass(final int hashCode,
+ final Object object) {
+ this.hashCode = hashCode;
+ this.object = object;
+ }
+
+ public Object getObject() {
+ return this.object;
+ }
+
+ public void setObject(final Object object) {
+ this.object = object;
+ }
+
+ public void setHashCode(final int hashCode) {
+ this.hashCode = hashCode;
+ }
+
+ public int hashCode() {
+ return this.hashCode;
+ }
+
+ public boolean equals(final Object obj) {
+ if ( this == obj ) {
+ return true;
+ }
+ if ( obj == null ) {
+ return false;
+ }
+ if ( getClass() != obj.getClass() ) {
+ return false;
+ }
+ final TestClass other = (TestClass) obj;
+
+ if ( this.object == null ) {
+ if ( other.object != null ) {
+ return false;
+ }
+ } else if ( !this.object.equals( other.object ) ) {
+ return false;
+ }
+ return true;
+ }
+ }
+
+ private int tablePopulationSize(final AbstractHashTable map) throws Exception {
+ final Field field = AbstractHashTable.class.getDeclaredField( "table" );
+ field.setAccessible( true );
+ final Entry[] array = (Entry[]) field.get( map );
+ int size = 0;
+ for ( int i = 0, length = array.length; i < length; i++ ) {
+ if ( array[i] != null ) {
+ size++;
+ }
+ }
+ return size;
+ }
+
+ private Entry[] getEntries(final AbstractHashTable map) throws Exception {
+ final Field field = AbstractHashTable.class.getDeclaredField( "table" );
+ field.setAccessible( true );
+ final List list = new ArrayList();
+
+ final Entry[] array = (Entry[]) field.get( map );
+ for ( int i = 0, length = array.length; i < length; i++ ) {
+ if ( array[i] != null ) {
+ list.add( array[i] );
+ }
+ }
+ return (Entry[]) list.toArray( new Entry[list.size()] );
+ }
+
+ public void testEmptyIterator() {
+ final InternalReadAccessor extractor = store.getReader( Cheese.class,
+ "type",
+ getClass().getClassLoader() );
+
+ final Pattern pattern = new Pattern( 0,
+ new ClassObjectType( Cheese.class ) );
+
+ final Declaration declaration = new Declaration( "typeOfCheese",
+ extractor,
+ pattern );
+
+ final FieldIndex fieldIndex = new FieldIndex( extractor,
+ declaration,
+ equals.getEvaluator( ValueType.STRING_TYPE,
+ Operator.EQUAL ) );
+
+ final RightTupleIndexHashTable map = new RightTupleIndexHashTable( new FieldIndex[]{fieldIndex} );
+
+ final Cheese stilton = new Cheese( "stilton",
+ 55 );
+ final InternalFactHandle stiltonHandle = new DefaultFactHandle( 2,
+ stilton );
+
+ assertNull( map.getFirst( new LeftTuple( stiltonHandle,
+ null,
+ true ) ) );
+ }
+
+
+}
More information about the jboss-svn-commits
mailing list