DNA SVN: r1320 - in trunk: dna-graph/src/main/java/org/jboss/dna/graph/property and 17 other directories.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-16 18:25:36 -0500 (Mon, 16 Nov 2009)
New Revision: 1320
Added:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Validator.java
trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexLayout.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexSession.java
trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java
trunk/dna-search/src/main/java/org/jboss/dna/search/filters/
trunk/dna-search/src/main/java/org/jboss/dna/search/filters/ResultFilter.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/
trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareNameQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/ComparePathQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareStringQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/MatchNoneQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/NotQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/ScoreQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/UuidsQuery.java
trunk/dna-search/src/test/java/org/jboss/dna/search/query/
trunk/dna-search/src/test/java/org/jboss/dna/search/query/LuceneNotQueryTest.java
Removed:
trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexStrategy.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexStrategy.java
trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexStrategy.java
trunk/dna-search/src/main/java/org/jboss/dna/search/SearchContext.java
trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java
trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java
Modified:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/GraphI18n.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/property/ValueComparators.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryContext.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryResults.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/FullTextSearch.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitors.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/FullTextSearchParser.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/CanonicalPlanner.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/AbstractAccessComponent.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableColumn.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableSchemata.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableTable.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Schemata.java
trunk/dna-graph/src/main/resources/org/jboss/dna/graph/GraphI18n.properties
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizerTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/FullTextSearchParserTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/SqlQueryParserTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/plan/CanonicalPlannerTest.java
trunk/dna-search/pom.xml
trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfiguration.java
trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfigurations.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java
trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java
trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java
trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties
trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java
Log:
DNA-467 Continued implementation of the dna-search components. Current status is that the SearchEngine is nearly complete (feature-wise), but has had little testing.
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/GraphI18n.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/GraphI18n.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/GraphI18n.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -133,6 +133,8 @@
public static I18n tableDoesNotExist;
public static I18n columnDoesNotExistOnTable;
public static I18n columnDoesNotExistInQuery;
+ public static I18n columnIsNotFullTextSearchable;
+ public static I18n tableIsNotFullTextSearchable;
public static I18n selectorDoesNotExistInQuery;
public static I18n propertyOnSelectorIsNotUsedInQuery;
public static I18n errorResolvingNodesFromLocationsUsingSourceAndWorkspace;
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/property/ValueComparators.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/property/ValueComparators.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/property/ValueComparators.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -261,6 +261,19 @@
}
};
/**
+ * A comparator of path segment values.
+ */
+ public static final Comparator<Path.Segment> PATH_SEGMENT_COMPARATOR = new Comparator<Path.Segment>() {
+
+ public int compare( Path.Segment o1,
+ Path.Segment o2 ) {
+ if (o1 == o2) return 0;
+ if (o1 == null) return -1;
+ if (o2 == null) return 1;
+ return o1.compareTo(o2);
+ }
+ };
+ /**
* A comparator of URI values.
*/
public static final Comparator<URI> URI_COMPARATOR = new Comparator<URI>() {
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryContext.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryContext.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryContext.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -165,6 +165,24 @@
}
/**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#equals(java.lang.Object)
+ */
+ @Override
+ public boolean equals( Object obj ) {
+ if (obj == this) return true;
+ if (obj instanceof QueryContext) {
+ QueryContext that = (QueryContext)obj;
+ if (!this.context.equals(that.getExecutionContext())) return false;
+ if (!this.schemata.equals(that.getSchemata())) return false;
+ if (!this.variables.equals(that.getVariables())) return false;
+ return true;
+ }
+ return false;
+ }
+
+ /**
* Obtain a copy of this context, except that the copy uses the supplied execution context.
*
* @param context the execution context that should be used in the new query context
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryResults.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryResults.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryResults.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -179,6 +179,7 @@
* {@link QueryResults#getTuples() tuples} in the results, and which can be used to access the individual values in each of
* the tuples.
*/
+ @Immutable
public interface Columns {
/**
* Get the columns.
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/FullTextSearch.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/FullTextSearch.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/FullTextSearch.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -190,25 +190,77 @@
}
/**
+ * A {@link Term} that represents a search term that requires another term to not appear.
+ */
+ public static class NegationTerm implements Term {
+ private final Term negated;
+
+ public NegationTerm( Term negatedTerm ) {
+ assert negatedTerm != null;
+ this.negated = negatedTerm;
+ }
+
+ /**
+ * Get the term that is negated.
+ *
+ * @return the negated term; never null
+ */
+ public Term getNegatedTerm() {
+ return negated;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#hashCode()
+ */
+ @Override
+ public int hashCode() {
+ return negated.hashCode();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#equals(java.lang.Object)
+ */
+ @Override
+ public boolean equals( Object obj ) {
+ if (obj == this) return true;
+ if (obj instanceof NegationTerm) {
+ NegationTerm that = (NegationTerm)obj;
+ return this.getNegatedTerm().equals(that.getNegatedTerm());
+ }
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return "-" + negated.toString();
+ }
+ }
+
+ /**
* A {@link Term} that represents a single search term. The term may be comprised of multiple words.
*/
public static class SimpleTerm implements Term {
private final String value;
- private final boolean excluded;
private final boolean quoted;
/**
* Create a simple term with the value and whether the term is excluded or included.
*
* @param value the value that makes up the term
- * @param excluded true if the term should not appear, or false if the term is required
*/
- public SimpleTerm( String value,
- boolean excluded ) {
+ public SimpleTerm( String value ) {
assert value != null;
assert value.trim().length() > 0;
this.value = value;
- this.excluded = excluded;
this.quoted = this.value.indexOf(' ') != -1;
}
@@ -223,12 +275,12 @@
}
/**
- * Get whether or not this term is expected to appear in the results.
+ * Get the values of this term if the term is quoted.
*
- * @return true if the term is expected to not appear, or false if the term is expected to appear
+ * @return the array of terms; never null
*/
- public boolean isExcluded() {
- return excluded;
+ public String[] getValues() {
+ return value.split("/w");
}
/**
@@ -260,7 +312,6 @@
if (obj == this) return true;
if (obj instanceof SimpleTerm) {
SimpleTerm that = (SimpleTerm)obj;
- if (this.isExcluded() != that.isExcluded()) return false;
return this.getValue().equals(that.getValue());
}
return false;
@@ -273,8 +324,7 @@
*/
@Override
public String toString() {
- String value = quoted ? "\"" + this.value + "\"" : this.value;
- return excluded ? "-" + value : value;
+ return quoted ? "\"" + this.value + "\"" : this.value;
}
}
@@ -352,7 +402,7 @@
}
/**
- * A set of {@link Term}s that are ANDed together.
+ * A set of {@link Term}s that are ORed together.
*/
public static class Disjunction extends CompoundTerm {
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitors.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitors.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitors.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -511,18 +511,23 @@
}
protected final void enqueue( Visitable objectToBeVisited ) {
- itemQueue.add(objectToBeVisited);
+ if (objectToBeVisited != null) {
+ itemQueue.add(objectToBeVisited);
+ }
}
protected final void enqueue( Iterable<? extends Visitable> objectsToBeVisited ) {
for (Visitable objectToBeVisited : objectsToBeVisited) {
- itemQueue.add(objectToBeVisited);
+ if (objectToBeVisited != null) {
+ itemQueue.add(objectToBeVisited);
+ }
}
}
protected final void visitNext() {
if (!itemQueue.isEmpty()) {
Visitable first = (Visitable)itemQueue.removeFirst();
+ assert first != null;
first.accept(this);
}
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/FullTextSearchParser.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/FullTextSearchParser.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/FullTextSearchParser.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -31,6 +31,7 @@
import org.jboss.dna.common.util.CheckArg;
import org.jboss.dna.graph.query.model.FullTextSearch.Conjunction;
import org.jboss.dna.graph.query.model.FullTextSearch.Disjunction;
+import org.jboss.dna.graph.query.model.FullTextSearch.NegationTerm;
import org.jboss.dna.graph.query.model.FullTextSearch.SimpleTerm;
import org.jboss.dna.graph.query.model.FullTextSearch.Term;
@@ -88,10 +89,9 @@
}
protected Term parseTerm( TokenStream tokens ) {
- if (tokens.canConsume('-')) {
- return new SimpleTerm(removeQuotes(tokens.consume()), true);
- }
- return new SimpleTerm(removeQuotes(tokens.consume()), false);
+ boolean negated = tokens.canConsume('-');
+ Term result = new SimpleTerm(removeQuotes(tokens.consume()));
+ return negated ? new NegationTerm(result) : result;
}
/**
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/CanonicalPlanner.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/CanonicalPlanner.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/CanonicalPlanner.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -53,6 +53,7 @@
import org.jboss.dna.graph.query.plan.PlanNode.Property;
import org.jboss.dna.graph.query.plan.PlanNode.Type;
import org.jboss.dna.graph.query.validate.Schemata;
+import org.jboss.dna.graph.query.validate.Validator;
import org.jboss.dna.graph.query.validate.Schemata.Table;
import org.jboss.dna.graph.query.validate.Schemata.View;
@@ -143,10 +144,28 @@
// Process the orderings and limits ...
plan = attachSorting(context, plan, query.getOrderings());
plan = attachLimits(context, plan, query.getLimits());
+
+ // Validate that all the parts of the query are resolvable ...
+ validate(context, query, usedSources);
+
return plan;
}
/**
+ * Validate the supplied query.
+ *
+ * @param context the context in which the query is being planned
+ * @param query the set query to be planned
+ * @param usedSelectors the map of {@link SelectorName}s (aliases or names) used in the query.
+ */
+ protected void validate( QueryContext context,
+ QueryCommand query,
+ Map<SelectorName, Table> usedSelectors ) {
+ // Resolve everything ...
+ Visitors.visitAll(query, new Validator(context, usedSelectors));
+ }
+
+ /**
* Create a canonical query plan for the given set query.
*
* @param context the context in which the query is being planned
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/AbstractAccessComponent.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/AbstractAccessComponent.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/AbstractAccessComponent.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -24,6 +24,7 @@
package org.jboss.dna.graph.query.process;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import org.jboss.dna.graph.Location;
import org.jboss.dna.graph.property.Name;
@@ -31,7 +32,6 @@
import org.jboss.dna.graph.query.QueryContext;
import org.jboss.dna.graph.query.QueryResults.Columns;
import org.jboss.dna.graph.query.model.AllNodes;
-import org.jboss.dna.graph.query.model.And;
import org.jboss.dna.graph.query.model.Column;
import org.jboss.dna.graph.query.model.Constraint;
import org.jboss.dna.graph.query.model.Limit;
@@ -50,7 +50,7 @@
protected final PlanNode accessNode;
protected final SelectorName sourceName;
protected final List<Column> projectedColumns;
- protected final Constraint constraint;
+ protected final List<Constraint> andedConstraints;
protected final Limit limit;
protected AbstractAccessComponent( QueryContext context,
@@ -94,16 +94,13 @@
}
// Add the criteria ...
- Constraint constraint = null;
+ List<Constraint> andedConstraints = null;
for (PlanNode select : accessNode.findAllAtOrBelow(Type.SELECT)) {
Constraint selectConstraint = select.getProperty(Property.SELECT_CRITERIA, Constraint.class);
- if (constraint != null) {
- constraint = new And(constraint, selectConstraint);
- } else {
- constraint = selectConstraint;
- }
+ if (andedConstraints == null) andedConstraints = new ArrayList<Constraint>();
+ andedConstraints.add(selectConstraint);
}
- this.constraint = constraint;
+ this.andedConstraints = andedConstraints != null ? andedConstraints : Collections.<Constraint>emptyList();
// Find the limit ...
Limit limit = Limit.NONE;
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -51,8 +51,8 @@
/**
* An abstract {@link Processor} implementation that builds a tree of {@link ProcessingComponent} objects to perform the different
* parts of the query processing logic. Subclasses are required to only implement one method: the
- * {@link #createAccessComponent(QueryContext, PlanNode, Columns, Analyzer)} should create a ProcessorComponent object that will
- * perform the (low-level access) query described by the {@link PlanNode plan} given as a parameter.
+ * {@link #createAccessComponent(QueryCommand,QueryContext, PlanNode, Columns, Analyzer)} should create a ProcessorComponent
+ * object that will perform the (low-level access) query described by the {@link PlanNode plan} given as a parameter.
*/
public abstract class QueryProcessor implements Processor {
@@ -81,18 +81,24 @@
// Go through the plan and create the corresponding ProcessingComponents ...
Analyzer analyzer = createAnalyzer(context);
- ProcessingComponent component = createComponent(context, plan, columns, analyzer);
+ ProcessingComponent component = createComponent(command, context, plan, columns, analyzer);
long nanos2 = System.nanoTime();
statistics = statistics.withResultsFormulationTime(nanos2 - nanos);
-
- // Now execute the component ...
nanos = nanos2;
- tuples = component.execute();
+ if (component != null) {
+ // Now execute the component ...
+ tuples = component.execute();
+ } else {
+ // There must have been an error ...
+ assert context.getProblems().hasErrors();
+ tuples = Collections.emptyList();
+ }
+
} finally {
statistics = statistics.withExecutionTime(System.nanoTime() - nanos);
}
- if (tuples == null) tuples = Collections.emptyList();
+ assert tuples != null;
return new org.jboss.dna.graph.query.process.QueryResults(context, command, columns, statistics, tuples);
}
@@ -100,7 +106,7 @@
* Create an {@link Analyzer} implementation that should be used by the non-access {@link ProcessingComponent}s that evaluate
* criteria. By default, this method returns null, which means that any criteria evaluation will likely be pushed down under
* an {@link Type#ACCESS ACCESS} node (and thus handled by an
- * {@link #createAccessComponent(QueryContext, PlanNode, Columns, Analyzer) access component}.
+ * {@link #createAccessComponent(QueryCommand,QueryContext, PlanNode, Columns, Analyzer) access component}.
* <p>
* However, for more simple access components that are not capable of handling joins and other non-trivial criteria, simply
* return an Analyzer implementation that implements the methods using the source.
@@ -116,13 +122,15 @@
/**
* Create the {@link ProcessingComponent} that processes a single {@link Type#ACCESS} branch of a query plan.
*
+ * @param originalQuery the original query that is being executed; never null
* @param context the context in which query is being evaluated; never null
* @param accessNode the node in the query plan that represents the {@link Type#ACCESS} plan; never null
* @param resultColumns the columns that are to be returned; never null
* @param analyzer the criteria analyzer; never null
* @return the processing component; may not be null
*/
- protected abstract ProcessingComponent createAccessComponent( QueryContext context,
+ protected abstract ProcessingComponent createAccessComponent( QueryCommand originalQuery,
+ QueryContext context,
PlanNode accessNode,
Columns resultColumns,
Analyzer analyzer );
@@ -133,17 +141,20 @@
* {@link PlanNode} objects in the optimized query plan, and the method is actually recursive (since the optimized query plan
* forms a tree). However, whenever this call structure reaches the {@link Type#ACCESS ACCESS} nodes in the query plan (which
* each represents a separate atomic low-level query to the underlying system), the
- * {@link #createAccessComponent(QueryContext, PlanNode, Columns, Analyzer)} method is called. Subclasses should create an
- * appropriate ProcessingComponent implementation that performs this atomic low-level query.
+ * {@link #createAccessComponent(QueryCommand,QueryContext, PlanNode, Columns, Analyzer)} method is called. Subclasses should
+ * create an appropriate ProcessingComponent implementation that performs this atomic low-level query.
*
+ * @param originalQuery the original query that is being executed; never null
* @param context the context in which query is being evaluated
* @param node the plan node for which the ProcessingComponent is to be created
* @param columns the definition of the result columns for this portion of the query
* @param analyzer the analyzer (returned from {@link #createAnalyzer(QueryContext)}) that should be used on the components
* that evaluate criteria; may be null if a best-effort should be made for the evaluation
- * @return the processing component for this plan node; never null
+ * @return the processing component for this plan node; or null if there was an error recorded in the
+ * {@link QueryContext#getProblems() problems}
*/
- protected ProcessingComponent createComponent( QueryContext context,
+ protected ProcessingComponent createComponent( QueryCommand originalQuery,
+ QueryContext context,
PlanNode node,
Columns columns,
Analyzer analyzer ) {
@@ -152,14 +163,18 @@
case ACCESS:
// Create the component to handle the ACCESS node ...
assert node.getChildCount() == 1;
- component = createAccessComponent(context, node, columns, analyzer);
+ component = createAccessComponent(originalQuery, context, node, columns, analyzer);
// // Don't do anything special with an access node at the moment ...
// component = createComponent(context, node.getFirstChild(), columns, analyzer);
break;
case DUP_REMOVE:
// Create the component under the DUP_REMOVE ...
assert node.getChildCount() == 1;
- ProcessingComponent distinctDelegate = createComponent(context, node.getFirstChild(), columns, analyzer);
+ ProcessingComponent distinctDelegate = createComponent(originalQuery,
+ context,
+ node.getFirstChild(),
+ columns,
+ analyzer);
component = new DistinctComponent(distinctDelegate);
break;
case GROUP:
@@ -167,8 +182,8 @@
case JOIN:
// Create the components under the JOIN ...
assert node.getChildCount() == 2;
- ProcessingComponent left = createComponent(context, node.getFirstChild(), columns, analyzer);
- ProcessingComponent right = createComponent(context, node.getLastChild(), columns, analyzer);
+ ProcessingComponent left = createComponent(originalQuery, context, node.getFirstChild(), columns, analyzer);
+ ProcessingComponent right = createComponent(originalQuery, context, node.getLastChild(), columns, analyzer);
// Create the join component ...
JoinAlgorithm algorithm = node.getProperty(Property.JOIN_ALGORITHM, JoinAlgorithm.class);
JoinType joinType = node.getProperty(Property.JOIN_TYPE, JoinType.class);
@@ -202,7 +217,11 @@
case LIMIT:
// Create the component under the LIMIT ...
assert node.getChildCount() == 1;
- ProcessingComponent limitDelegate = createComponent(context, node.getFirstChild(), columns, analyzer);
+ ProcessingComponent limitDelegate = createComponent(originalQuery,
+ context,
+ node.getFirstChild(),
+ columns,
+ analyzer);
// Then create the limit component ...
Integer rowLimit = node.getProperty(Property.LIMIT_COUNT, Integer.class);
Integer offset = node.getProperty(Property.LIMIT_OFFSET, Integer.class);
@@ -217,7 +236,11 @@
case PROJECT:
// Create the component under the PROJECT ...
assert node.getChildCount() == 1;
- ProcessingComponent projectDelegate = createComponent(context, node.getFirstChild(), columns, analyzer);
+ ProcessingComponent projectDelegate = createComponent(originalQuery,
+ context,
+ node.getFirstChild(),
+ columns,
+ analyzer);
// Then create the project component ...
List<Column> projectedColumns = node.getPropertyAsList(Property.PROJECT_COLUMNS, Column.class);
component = new ProjectComponent(projectDelegate, projectedColumns);
@@ -225,7 +248,11 @@
case SELECT:
// Create the component under the SELECT ...
assert node.getChildCount() == 1;
- ProcessingComponent selectDelegate = createComponent(context, node.getFirstChild(), columns, analyzer);
+ ProcessingComponent selectDelegate = createComponent(originalQuery,
+ context,
+ node.getFirstChild(),
+ columns,
+ analyzer);
// Then create the select component ...
Constraint constraint = node.getProperty(Property.SELECT_CRITERIA, Constraint.class);
component = new SelectComponent(selectDelegate, constraint, context.getVariables(), analyzer);
@@ -234,7 +261,7 @@
// Create the components under the SET_OPERATION ...
List<ProcessingComponent> setDelegates = new LinkedList<ProcessingComponent>();
for (PlanNode child : node) {
- setDelegates.add(createComponent(context, child, columns, analyzer));
+ setDelegates.add(createComponent(originalQuery, context, child, columns, analyzer));
}
// Then create the select component ...
Operation operation = node.getProperty(Property.SET_OPERATION, Operation.class);
@@ -255,7 +282,11 @@
case SORT:
// Create the component under the SORT ...
assert node.getChildCount() == 1;
- ProcessingComponent sortDelegate = createComponent(context, node.getFirstChild(), columns, analyzer);
+ ProcessingComponent sortDelegate = createComponent(originalQuery,
+ context,
+ node.getFirstChild(),
+ columns,
+ analyzer);
// Then create the sort component ...
List<Object> orderBys = node.getPropertyAsList(Property.SORT_ORDER_BY, Object.class);
if (orderBys.isEmpty()) {
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableColumn.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableColumn.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableColumn.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -29,13 +29,24 @@
@Immutable
class ImmutableColumn implements Column {
+
+ public static final boolean DEFAULT_FULL_TEXT_SEARCHABLE = false;
+
+ private final boolean fullTextSearchable;
private final String name;
private final PropertyType type;
protected ImmutableColumn( String name,
PropertyType type ) {
+ this(name, type, DEFAULT_FULL_TEXT_SEARCHABLE);
+ }
+
+ protected ImmutableColumn( String name,
+ PropertyType type,
+ boolean fullTextSearchable ) {
this.name = name;
this.type = type != null ? type : PropertyType.STRING;
+ this.fullTextSearchable = fullTextSearchable;
}
/**
@@ -59,6 +70,15 @@
/**
* {@inheritDoc}
*
+ * @see org.jboss.dna.graph.query.validate.Schemata.Column#isFullTextSearchable()
+ */
+ public boolean isFullTextSearchable() {
+ return fullTextSearchable;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
* @see java.lang.Object#toString()
*/
@Override
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableSchemata.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableSchemata.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableSchemata.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -328,12 +328,34 @@
CheckArg.isNotEmpty(tableName, "tableName");
CheckArg.isNotEmpty(columnName, "columnName");
CheckArg.isNotNull(type, "type");
+ return addColumn(tableName, columnName, type, ImmutableColumn.DEFAULT_FULL_TEXT_SEARCHABLE);
+ }
+
+ /**
+ * Add a column with the supplied name and type to the named table. Any existing column with that name will be replaced
+ * with the new column. If the table does not yet exist, it will be added.
+ *
+ * @param tableName the name of the new table
+ * @param columnName the names of the column
+ * @param type the type for the column
+ * @param fullTextSearchable true if the column should be full-text searchable, or false if not
+ * @return this builder, for convenience in method chaining; never null
+ * @throws IllegalArgumentException if the table name is null or empty, the column name is null or empty, or if the
+ * property type is null
+ */
+ public Builder addColumn( String tableName,
+ String columnName,
+ PropertyType type,
+ boolean fullTextSearchable ) {
+ CheckArg.isNotEmpty(tableName, "tableName");
+ CheckArg.isNotEmpty(columnName, "columnName");
+ CheckArg.isNotNull(type, "type");
SelectorName selector = new SelectorName(tableName);
ImmutableTable existing = tables.get(selector);
ImmutableTable table = null;
if (existing == null) {
List<Column> columns = new ArrayList<Column>();
- columns.add(new ImmutableColumn(columnName, type));
+ columns.add(new ImmutableColumn(columnName, type, fullTextSearchable));
table = new ImmutableTable(selector, columns);
} else {
table = existing.withColumn(columnName, type);
@@ -343,6 +365,37 @@
}
/**
+ * Make sure the column on the named table is searchable.
+ *
+ * @param tableName the name of the new table
+ * @param columnName the names of the column
+ * @return this builder, for convenience in method chaining; never null
+ * @throws IllegalArgumentException if the table name is null or empty or if the column name is null or empty
+ */
+ public Builder makeSearchable( String tableName,
+ String columnName ) {
+ CheckArg.isNotEmpty(tableName, "tableName");
+ CheckArg.isNotEmpty(columnName, "columnName");
+ SelectorName selector = new SelectorName(tableName);
+ ImmutableTable existing = tables.get(selector);
+ ImmutableTable table = null;
+ if (existing == null) {
+ List<Column> columns = new ArrayList<Column>();
+ columns.add(new ImmutableColumn(columnName, PropertyType.STRING, true));
+ table = new ImmutableTable(selector, columns);
+ } else {
+ Column column = existing.getColumn(columnName);
+ PropertyType type = PropertyType.STRING;
+ if (column != null) {
+ type = column.getPropertyType();
+ }
+ table = existing.withColumn(columnName, type, true);
+ }
+ tables.put(table.getName(), table);
+ return this;
+ }
+
+ /**
* Add to the specified table a key that references the existing named columns.
*
* @param tableName the name of the new table
@@ -419,7 +472,8 @@
"The view references a non-existant column '"
+ column.getColumnName() + "' in '" + source.getName() + "'");
}
- viewColumns.add(new ImmutableColumn(viewColumnName, sourceColumn.getPropertyType()));
+ viewColumns.add(new ImmutableColumn(viewColumnName, sourceColumn.getPropertyType(),
+ sourceColumn.isFullTextSearchable()));
}
if (viewColumns.size() != columns.size()) {
// We weren't able to resolve all of the columns,
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableTable.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableTable.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableTable.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -187,10 +187,18 @@
return new ImmutableTable(getName(), newColumns);
}
+ public ImmutableTable withColumn( String name,
+ PropertyType type,
+ boolean fullTextSearchable ) {
+ List<Column> newColumns = new LinkedList<Column>(columns);
+ newColumns.add(new ImmutableColumn(name, type, fullTextSearchable));
+ return new ImmutableTable(getName(), newColumns);
+ }
+
public ImmutableTable withColumns( Iterable<Column> columns ) {
List<Column> newColumns = new LinkedList<Column>(this.getColumns());
for (Column column : columns) {
- newColumns.add(new ImmutableColumn(column.getName(), column.getPropertyType()));
+ newColumns.add(new ImmutableColumn(column.getName(), column.getPropertyType(), column.isFullTextSearchable()));
}
return new ImmutableTable(getName(), newColumns);
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Schemata.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Schemata.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Schemata.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -160,6 +160,13 @@
* @return the property type; never null
*/
PropertyType getPropertyType();
+
+ /**
+ * Get whether the column can be used in a full-text search.
+ *
+ * @return true if the column is full-text searchable, or false otherwise
+ */
+ boolean isFullTextSearchable();
}
/**
Added: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Validator.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Validator.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Validator.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,339 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.query.validate;
+
+import java.util.Map;
+import org.jboss.dna.common.collection.Problems;
+import org.jboss.dna.graph.GraphI18n;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.model.AllNodes;
+import org.jboss.dna.graph.query.model.ChildNode;
+import org.jboss.dna.graph.query.model.ChildNodeJoinCondition;
+import org.jboss.dna.graph.query.model.Column;
+import org.jboss.dna.graph.query.model.DescendantNode;
+import org.jboss.dna.graph.query.model.DescendantNodeJoinCondition;
+import org.jboss.dna.graph.query.model.EquiJoinCondition;
+import org.jboss.dna.graph.query.model.FullTextSearch;
+import org.jboss.dna.graph.query.model.FullTextSearchScore;
+import org.jboss.dna.graph.query.model.Length;
+import org.jboss.dna.graph.query.model.LowerCase;
+import org.jboss.dna.graph.query.model.NamedSelector;
+import org.jboss.dna.graph.query.model.NodeDepth;
+import org.jboss.dna.graph.query.model.NodeLocalName;
+import org.jboss.dna.graph.query.model.NodeName;
+import org.jboss.dna.graph.query.model.NodePath;
+import org.jboss.dna.graph.query.model.PropertyExistence;
+import org.jboss.dna.graph.query.model.PropertyValue;
+import org.jboss.dna.graph.query.model.SameNode;
+import org.jboss.dna.graph.query.model.SameNodeJoinCondition;
+import org.jboss.dna.graph.query.model.SelectorName;
+import org.jboss.dna.graph.query.model.Visitor;
+import org.jboss.dna.graph.query.model.Visitors.AbstractVisitor;
+import org.jboss.dna.graph.query.validate.Schemata.Table;
+
+/**
+ * A {@link Visitor} implementation that validates a query's used of a {@link Schemata} and records any problems as errors.
+ */
+public class Validator extends AbstractVisitor {
+
+ private final QueryContext context;
+ private final Problems problems;
+ private final Map<SelectorName, Table> selectorsByName;
+
+ /**
+ * @param context the query context
+ * @param selectorsByName the {@link Table tables} by their name or alias, as defined by the selectors
+ */
+ public Validator( QueryContext context,
+ Map<SelectorName, Table> selectorsByName ) {
+ this.context = context;
+ this.problems = this.context.getProblems();
+ this.selectorsByName = selectorsByName;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.AllNodes)
+ */
+ @Override
+ public void visit( AllNodes obj ) {
+ // this table doesn't have to be in the list of selected tables
+ verifyTable(obj.getName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.ChildNode)
+ */
+ @Override
+ public void visit( ChildNode obj ) {
+ verify(obj.getSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.ChildNodeJoinCondition)
+ */
+ @Override
+ public void visit( ChildNodeJoinCondition obj ) {
+ verify(obj.getParentSelectorName());
+ verify(obj.getChildSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.Column)
+ */
+ @Override
+ public void visit( Column obj ) {
+ verify(obj.getSelectorName(), obj.getPropertyName()); // don't care about the alias
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.DescendantNode)
+ */
+ @Override
+ public void visit( DescendantNode obj ) {
+ verify(obj.getSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.DescendantNodeJoinCondition)
+ */
+ @Override
+ public void visit( DescendantNodeJoinCondition obj ) {
+ verify(obj.getAncestorSelectorName());
+ verify(obj.getDescendantSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.EquiJoinCondition)
+ */
+ @Override
+ public void visit( EquiJoinCondition obj ) {
+ verify(obj.getSelector1Name(), obj.getProperty1Name());
+ verify(obj.getSelector2Name(), obj.getProperty2Name());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.FullTextSearch)
+ */
+ @Override
+ public void visit( FullTextSearch obj ) {
+ SelectorName selectorName = obj.getSelectorName();
+ if (obj.getPropertyName() != null) {
+ Schemata.Column column = verify(selectorName, obj.getPropertyName());
+ if (column != null) {
+ // Make sure the column is full-text searchable ...
+ if (!column.isFullTextSearchable()) {
+ problems.addError(GraphI18n.columnIsNotFullTextSearchable, column.getName(), selectorName);
+ }
+ }
+ } else {
+ Table table = verify(selectorName);
+ if (table != null) {
+ // Make sure there is at least one column on the table that is full-text searchable ...
+ boolean searchable = false;
+ for (Schemata.Column column : table.getColumns()) {
+ if (column.isFullTextSearchable()) {
+ searchable = true;
+ break;
+ }
+ }
+ if (!searchable) {
+ problems.addError(GraphI18n.tableIsNotFullTextSearchable, selectorName);
+ }
+ }
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.FullTextSearchScore)
+ */
+ @Override
+ public void visit( FullTextSearchScore obj ) {
+ verify(obj.getSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.Length)
+ */
+ @Override
+ public void visit( Length obj ) {
+ verify(obj.getSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.LowerCase)
+ */
+ @Override
+ public void visit( LowerCase obj ) {
+ verify(obj.getSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.NamedSelector)
+ */
+ @Override
+ public void visit( NamedSelector obj ) {
+ verify(obj.getAliasOrName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.NodeDepth)
+ */
+ @Override
+ public void visit( NodeDepth obj ) {
+ verify(obj.getSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.NodeLocalName)
+ */
+ @Override
+ public void visit( NodeLocalName obj ) {
+ verify(obj.getSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.NodeName)
+ */
+ @Override
+ public void visit( NodeName obj ) {
+ verify(obj.getSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.NodePath)
+ */
+ @Override
+ public void visit( NodePath obj ) {
+ verify(obj.getSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.PropertyExistence)
+ */
+ @Override
+ public void visit( PropertyExistence obj ) {
+ verify(obj.getSelectorName(), obj.getPropertyName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.PropertyValue)
+ */
+ @Override
+ public void visit( PropertyValue obj ) {
+ verify(obj.getSelectorName(), obj.getPropertyName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.SameNode)
+ */
+ @Override
+ public void visit( SameNode obj ) {
+ verify(obj.getSelectorName());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitors.AbstractVisitor#visit(org.jboss.dna.graph.query.model.SameNodeJoinCondition)
+ */
+ @Override
+ public void visit( SameNodeJoinCondition obj ) {
+ verify(obj.getSelector1Name());
+ verify(obj.getSelector2Name());
+ }
+
+ protected String string( Name name ) {
+ return context.getExecutionContext().getValueFactories().getStringFactory().create(name);
+ }
+
+ protected Table verify( SelectorName selectorName ) {
+ Table table = selectorsByName.get(selectorName);
+ if (table == null) {
+ problems.addError(GraphI18n.tableDoesNotExist, selectorName.getName());
+ }
+ return table;
+ }
+
+ protected Table verifyTable( SelectorName tableName ) {
+ Table table = selectorsByName.get(tableName);
+ if (table == null) {
+ problems.addError(GraphI18n.tableDoesNotExist, tableName.getName());
+ }
+ return table;
+ }
+
+ protected Schemata.Column verify( SelectorName selectorName,
+ Name propertyName ) {
+ Table table = selectorsByName.get(selectorName);
+ if (table == null) {
+ problems.addError(GraphI18n.tableDoesNotExist, selectorName.getName());
+ return null;
+ }
+ Schemata.Column column = table.getColumn(string(propertyName));
+ if (column == null) {
+ problems.addError(GraphI18n.columnDoesNotExistOnTable, string(propertyName), selectorName.getName());
+ }
+ return column;
+ }
+
+}
Property changes on: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Validator.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/dna-graph/src/main/resources/org/jboss/dna/graph/GraphI18n.properties
===================================================================
--- trunk/dna-graph/src/main/resources/org/jboss/dna/graph/GraphI18n.properties 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/main/resources/org/jboss/dna/graph/GraphI18n.properties 2009-11-16 23:25:36 UTC (rev 1320)
@@ -121,6 +121,8 @@
tableDoesNotExist = Table '{0}' does not exist
columnDoesNotExistOnTable = Column '{0}' does not exist on the table '{1}'
columnDoesNotExistInQuery = Column '{0}' does not exist in query
+columnIsNotFullTextSearchable = Column '{0}' on the table '{1}' does not support full-text searching
+tableIsNotFullTextSearchable = Table '{0}' has no columns that support full-text searching
selectorDoesNotExistInQuery = Selector '{0}' does not exist in query
propertyOnSelectorIsNotUsedInQuery = Property '{0}' on selector '{1}' is not used in query
errorResolvingNodesFromLocationsUsingSourceAndWorkspace = Error resolving nodes from locations using '{1}' workspace in '{0}'
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizerTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizerTest.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizerTest.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -77,6 +77,8 @@
builder.addTable("t1", "c11", "c12", "c13");
builder.addTable("t2", "c21", "c22", "c23");
builder.addTable("all", "a1", "a2", "a3", "a4", "primaryType", "mixins");
+ builder.makeSearchable("all", "a2");
+ builder.makeSearchable("all", "a1");
builder.addKey("all", "a1");
builder.addKey("all", "a3");
builder.addView("v1", "SELECT c11, c12 AS c2 FROM t1 WHERE c13 < CAST('3' AS LONG)");
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/FullTextSearchParserTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/FullTextSearchParserTest.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/FullTextSearchParserTest.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -34,6 +34,7 @@
import org.jboss.dna.graph.query.model.FullTextSearch.CompoundTerm;
import org.jboss.dna.graph.query.model.FullTextSearch.Conjunction;
import org.jboss.dna.graph.query.model.FullTextSearch.Disjunction;
+import org.jboss.dna.graph.query.model.FullTextSearch.NegationTerm;
import org.jboss.dna.graph.query.model.FullTextSearch.SimpleTerm;
import org.jboss.dna.graph.query.model.FullTextSearch.Term;
import org.junit.Before;
@@ -159,9 +160,9 @@
Disjunction disjunction = (Disjunction)result;
assertThat(disjunction.getTerms().size(), is(4));
Conjunction conjunction1 = (Conjunction)disjunction.getTerms().get(0);
- SimpleTerm term3 = (SimpleTerm)disjunction.getTerms().get(1);
- SimpleTerm term4 = (SimpleTerm)disjunction.getTerms().get(2);
- SimpleTerm term5 = (SimpleTerm)disjunction.getTerms().get(3);
+ Term term3 = disjunction.getTerms().get(1);
+ Term term4 = disjunction.getTerms().get(2);
+ Term term5 = disjunction.getTerms().get(3);
assertHasSimpleTerms(conjunction1, "term1", "term2");
assertSimpleTerm(term3, "term3", true, false);
assertSimpleTerm(term4, "term4", true, false);
@@ -172,12 +173,12 @@
String... terms ) {
List<Term> expectedTerms = new ArrayList<Term>();
for (String term : terms) {
- SimpleTerm expected = new SimpleTerm(term, false);
if (term.startsWith("-")) {
term = term.substring(1);
- expected = new SimpleTerm(term, true);
+ expectedTerms.add(new NegationTerm(new SimpleTerm(term)));
+ } else {
+ expectedTerms.add(new SimpleTerm(term));
}
- expectedTerms.add(expected);
}
assertHasTerms(compoundTerm, expectedTerms.toArray(new Term[expectedTerms.size()]));
}
@@ -187,11 +188,20 @@
boolean excluded,
boolean quotingRequired ) {
assertThat(term, is(notNullValue()));
- assertThat(term, is(instanceOf(SimpleTerm.class)));
- SimpleTerm simpleTerm = (SimpleTerm)term;
- assertThat(simpleTerm.getValue(), is(value));
- assertThat(simpleTerm.isExcluded(), is(excluded));
- assertThat(simpleTerm.isQuotingRequired(), is(quotingRequired));
+ if (excluded) {
+ assertThat(term, is(instanceOf(NegationTerm.class)));
+ NegationTerm negationTerm = (NegationTerm)term;
+ Term negated = negationTerm.getNegatedTerm();
+ assertThat(negated, is(instanceOf(SimpleTerm.class)));
+ SimpleTerm simpleTerm = (SimpleTerm)negated;
+ assertThat(simpleTerm.getValue(), is(value));
+ assertThat(simpleTerm.isQuotingRequired(), is(quotingRequired));
+ } else {
+ assertThat(term, is(instanceOf(SimpleTerm.class)));
+ SimpleTerm simpleTerm = (SimpleTerm)term;
+ assertThat(simpleTerm.getValue(), is(value));
+ assertThat(simpleTerm.isQuotingRequired(), is(quotingRequired));
+ }
}
public static void assertHasTerms( CompoundTerm compoundTerm,
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/SqlQueryParserTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/SqlQueryParserTest.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/SqlQueryParserTest.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -70,7 +70,7 @@
import org.jboss.dna.graph.query.model.UpperCase;
import org.jboss.dna.graph.query.model.FullTextSearch.Conjunction;
import org.jboss.dna.graph.query.model.FullTextSearch.Disjunction;
-import org.jboss.dna.graph.query.model.FullTextSearch.SimpleTerm;
+import org.jboss.dna.graph.query.model.FullTextSearch.Term;
import org.junit.Before;
import org.junit.Test;
@@ -680,15 +680,15 @@
@Test
public void shouldParseFullTextSearchExpressionFromStringWithValidExpression() {
Position pos = new Position(100, 13);
- FullTextSearch.Term result = parser.parseFullTextSearchExpression("term1 term2 OR -term3 OR -term4 OR term5", pos);
+ Term result = parser.parseFullTextSearchExpression("term1 term2 OR -term3 OR -term4 OR term5", pos);
assertThat(result, is(notNullValue()));
assertThat(result, is(instanceOf(Disjunction.class)));
Disjunction disjunction = (Disjunction)result;
assertThat(disjunction.getTerms().size(), is(4));
Conjunction conjunction1 = (Conjunction)disjunction.getTerms().get(0);
- SimpleTerm term3 = (SimpleTerm)disjunction.getTerms().get(1);
- SimpleTerm term4 = (SimpleTerm)disjunction.getTerms().get(2);
- SimpleTerm term5 = (SimpleTerm)disjunction.getTerms().get(3);
+ Term term3 = disjunction.getTerms().get(1);
+ Term term4 = disjunction.getTerms().get(2);
+ Term term5 = disjunction.getTerms().get(3);
FullTextSearchParserTest.assertHasSimpleTerms(conjunction1, "term1", "term2");
FullTextSearchParserTest.assertSimpleTerm(term3, "term3", true, false);
FullTextSearchParserTest.assertSimpleTerm(term4, "term4", true, false);
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/plan/CanonicalPlannerTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/plan/CanonicalPlannerTest.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/plan/CanonicalPlannerTest.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -159,4 +159,66 @@
assertThat(source.getChildCount(), is(0));
}
+ @Test
+ public void shouldProduceErrorWhenFullTextSearchingTableWithNoSearchableColumns() {
+ schemata = schemataBuilder.addTable("someTable", "column1", "column2", "column3").build();
+ // Make sure the query without the search criteria does not have an error
+ query = builder.select("column1", "column2").from("someTable").query();
+ queryContext = new QueryContext(context, schemata, hints, problems);
+ plan = planner.createPlan(queryContext, query);
+ assertThat(problems.hasErrors(), is(false));
+
+ query = builder.select("column1", "column2").from("someTable").where().search("someTable", "term1").end().query();
+ queryContext = new QueryContext(context, schemata, hints, problems);
+ plan = planner.createPlan(queryContext, query);
+ assertThat(problems.hasErrors(), is(true));
+ }
+
+ @Test
+ public void shouldProducePlanWhenFullTextSearchingTableWithAtLeastOneSearchableColumn() {
+ schemata = schemataBuilder.addTable("someTable", "column1", "column2", "column3")
+ .makeSearchable("someTable", "column1")
+ .build();
+ query = builder.select("column1", "column4").from("someTable").where().search("someTable", "term1").end().query();
+ queryContext = new QueryContext(context, schemata, hints, problems);
+ plan = planner.createPlan(queryContext, query);
+ assertThat(problems.hasErrors(), is(true));
+ }
+
+ @Test
+ public void shouldProduceErrorWhenFullTextSearchingColumnThatIsNotSearchable() {
+ schemata = schemataBuilder.addTable("someTable", "column1", "column2", "column3").build();
+ // Make sure the query without the search criteria does not have an error
+ query = builder.select("column1", "column2").from("someTable").query();
+ queryContext = new QueryContext(context, schemata, hints, problems);
+ plan = planner.createPlan(queryContext, query);
+ assertThat(problems.hasErrors(), is(false));
+
+ query = builder.select("column1", "column2")
+ .from("someTable")
+ .where()
+ .search("someTable", "column2", "term1")
+ .end()
+ .query();
+ queryContext = new QueryContext(context, schemata, hints, problems);
+ plan = planner.createPlan(queryContext, query);
+ assertThat(problems.hasErrors(), is(true));
+ }
+
+ @Test
+ public void shouldProducePlanWhenFullTextSearchingColumnThatIsSearchable() {
+ schemata = schemataBuilder.addTable("someTable", "column1", "column2", "column3")
+ .makeSearchable("someTable", "column1")
+ .build();
+ query = builder.select("column1", "column4")
+ .from("someTable")
+ .where()
+ .search("someTable", "column1", "term1")
+ .end()
+ .query();
+ queryContext = new QueryContext(context, schemata, hints, problems);
+ plan = planner.createPlan(queryContext, query);
+ assertThat(problems.hasErrors(), is(true));
+ }
+
}
Modified: trunk/dna-search/pom.xml
===================================================================
--- trunk/dna-search/pom.xml 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/pom.xml 2009-11-16 23:25:36 UTC (rev 1320)
@@ -91,6 +91,20 @@
<artifactId>log4j</artifactId>
<scope>test</scope>
</dependency>
+
+
+
+
+
+ <dependency>
+ <groupId>org.apache.jackrabbit</groupId>
+ <artifactId>jackrabbit-core</artifactId>
+ <version>2.0-beta1</version>
+ </dependency>
+
+
+
+
<!--
Java Concurrency in Practice annotations
-->
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfiguration.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfiguration.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfiguration.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -28,7 +28,8 @@
/**
* Interface used to obtain the Lucene {@link Directory} instance that should be used for a workspace given the name of the
- * workspace. There are several implementations (see {@link DirectoryConfigurations}), but custom implementations can always be used.
+ * workspace. There are several implementations (see {@link DirectoryConfigurations}), but custom implementations can always be
+ * used.
*/
@ThreadSafe
public interface DirectoryConfiguration {
@@ -43,4 +44,16 @@
*/
Directory getDirectory( String workspaceName,
String indexName ) throws SearchEngineException;
+
+ /**
+ * Destroy the {@link Directory} that is used for the workspace with the supplied name.
+ *
+ * @param workspaceName the workspace name
+ * @param indexName the name of the index to be created
+ * @return true if the directory existed and was destroyed, or false if the directory didn't exist
+ * @throws IllegalArgumentException if the workspace name is null
+ * @throws SearchEngineException if there is a problem creating the directory
+ */
+ boolean destroyDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException;
}
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfigurations.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfigurations.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfigurations.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -36,6 +36,7 @@
import org.jboss.dna.common.text.NoOpEncoder;
import org.jboss.dna.common.text.TextEncoder;
import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.common.util.FileUtil;
import org.jboss.dna.common.util.HashCode;
/**
@@ -146,6 +147,19 @@
}
/**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.DirectoryConfiguration#destroyDirectory(java.lang.String, java.lang.String)
+ */
+ public boolean destroyDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ IndexId id = new IndexId(workspaceName, indexName);
+ DirectoryType result = directories.remove(id);
+ return result != null ? doDestroy(result) : false;
+ }
+
+ /**
* Method implemented by subclasses to create a new Directory implementation.
*
* @param workspaceName the name of the workspace for which the {@link Directory} is to be created; never null
@@ -155,6 +169,8 @@
*/
protected abstract DirectoryType createDirectory( String workspaceName,
String indexName ) throws SearchEngineException;
+
+ protected abstract boolean doDestroy( DirectoryType directory ) throws SearchEngineException;
}
/**
@@ -172,6 +188,16 @@
String indexName ) {
return new RAMDirectory();
}
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.DirectoryConfigurations.PoolingDirectoryFactory#doDestroy(org.apache.lucene.store.Directory)
+ */
+ @Override
+ protected boolean doDestroy( RAMDirectory directory ) throws SearchEngineException {
+ return directory != null;
+ }
}
/**
@@ -304,6 +330,20 @@
}
/**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.DirectoryConfigurations.PoolingDirectoryFactory#doDestroy(org.apache.lucene.store.Directory)
+ */
+ @Override
+ protected boolean doDestroy( FSDirectory directory ) throws SearchEngineException {
+ File file = directory.getFile();
+ if (file.exists()) {
+ return FileUtil.delete(file);
+ }
+ return false;
+ }
+
+ /**
* Override this method to define which subclass of {@link FSDirectory} should be created.
*
* @param directory the file system directory; never null
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,1503 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import net.jcip.annotations.ThreadSafe;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriter.MaxFieldLength;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.regex.JavaUtilRegexCapabilities;
+import org.apache.lucene.search.regex.RegexQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Version;
+import org.jboss.dna.common.i18n.I18n;
+import org.jboss.dna.common.text.NoOpEncoder;
+import org.jboss.dna.common.text.TextEncoder;
+import org.jboss.dna.common.util.Logger;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.Node;
+import org.jboss.dna.graph.property.Binary;
+import org.jboss.dna.graph.property.DateTime;
+import org.jboss.dna.graph.property.DateTimeFactory;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.PathFactory;
+import org.jboss.dna.graph.property.Property;
+import org.jboss.dna.graph.property.PropertyType;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.QueryEngine;
+import org.jboss.dna.graph.query.QueryResults;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.model.NodeDepth;
+import org.jboss.dna.graph.query.model.NodeLocalName;
+import org.jboss.dna.graph.query.model.NodeName;
+import org.jboss.dna.graph.query.model.NodePath;
+import org.jboss.dna.graph.query.model.Operator;
+import org.jboss.dna.graph.query.model.PropertyValue;
+import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.model.Visitors;
+import org.jboss.dna.graph.query.optimize.Optimizer;
+import org.jboss.dna.graph.query.optimize.OptimizerRule;
+import org.jboss.dna.graph.query.optimize.RuleBasedOptimizer;
+import org.jboss.dna.graph.query.plan.CanonicalPlanner;
+import org.jboss.dna.graph.query.plan.PlanHints;
+import org.jboss.dna.graph.query.plan.PlanNode;
+import org.jboss.dna.graph.query.plan.Planner;
+import org.jboss.dna.graph.query.process.ProcessingComponent;
+import org.jboss.dna.graph.query.process.QueryProcessor;
+import org.jboss.dna.graph.request.ChangeRequest;
+import org.jboss.dna.search.IndexRules.Rule;
+import org.jboss.dna.search.query.CompareNameQuery;
+import org.jboss.dna.search.query.ComparePathQuery;
+import org.jboss.dna.search.query.CompareStringQuery;
+import org.jboss.dna.search.query.MatchNoneQuery;
+import org.jboss.dna.search.query.NotQuery;
+import org.jboss.dna.search.query.UuidsQuery;
+
+/**
+ * A simple {@link IndexLayout} implementation that relies upon two separate indexes: one for the node content and a second one
+ * for paths and UUIDs.
+ */
+@ThreadSafe
+public abstract class DualIndexLayout implements IndexLayout {
+
+ protected static final long MIN_DATE = 0;
+ protected static final long MAX_DATE = Long.MAX_VALUE;
+ protected static final long MIN_LONG = Long.MIN_VALUE;
+ protected static final long MAX_LONG = Long.MAX_VALUE;
+ protected static final double MIN_DOUBLE = Double.MIN_VALUE;
+ protected static final double MAX_DOUBLE = Double.MAX_VALUE;
+ protected static final int MIN_DEPTH = 0;
+ protected static final int MAX_DEPTH = 100;
+
+ protected static final String PATHS_INDEX_NAME = "paths";
+ protected static final String CONTENT_INDEX_NAME = "content";
+
+ protected static final String UUID_FIELD = "uuid";
+ protected static final String FULL_TEXT_SUFFIX = "/fs"; // the slash character is not allowed in a property name unescaped
+
+ static class PathIndex {
+ public static final String PATH = "path";
+ public static final String LOCAL_NAME = "name";
+ public static final String SNS_INDEX = "sns";
+ public static final String UUID = UUID_FIELD;
+ public static final String DEPTH = "depth";
+ }
+
+ static class ContentIndex {
+ public static final String UUID = UUID_FIELD;
+ public static final String FULL_TEXT = "fts";
+ }
+
+ /**
+ * The number of results that should be returned when performing queries while deleting entire branches of content. The
+ * current value is {@value} .
+ */
+ protected static final int SIZE_OF_DELETE_BATCHES = 1000;
+
+ private ThreadLocal<DateFormat> dateFormatter = new ThreadLocal<DateFormat>() {
+ @Override
+ protected DateFormat initialValue() {
+ return new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss");
+ }
+ };
+
+ /**
+ * Obtain an immutable {@link FieldSelector} instance that accesses the UUID field.
+ */
+ protected static final FieldSelector UUID_FIELD_SELECTOR = new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ return PathIndex.UUID.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
+ }
+ };
+
+ /**
+ * Get the date formatter that can be reused safely within the current thread.
+ *
+ * @return the date formatter; never null
+ */
+ protected DateFormat dateFormatter() {
+ return dateFormatter.get();
+ }
+
+ /**
+ * Get the text encoder that should be used to encode namespaces in the search index.
+ *
+ * @return the namespace text encoder; never null
+ */
+ protected TextEncoder getNamespaceEncoder() {
+ return new NoOpEncoder();
+ }
+
+ /**
+ * Create a Lucene {@link Analyzer} analyzer that should be used for indexing and searching.
+ *
+ * @return the analyzer; never null
+ */
+ protected Analyzer createAnalyzer() {
+ return new StandardAnalyzer(Version.LUCENE_CURRENT);
+ }
+
+ protected abstract class LuceneSession implements IndexSession {
+ protected final ExecutionContext context;
+ protected final String sourceName;
+ protected final String workspaceName;
+ protected final IndexRules rules;
+ private final QueryEngine queryEngine;
+ private final Analyzer analyzer;
+ private final Directory pathsIndexDirectory;
+ private final Directory contentIndexDirectory;
+ protected final boolean overwrite;
+ protected final boolean readOnly;
+ protected final ValueFactory<String> stringFactory;
+ protected final DateTimeFactory dateFactory;
+ protected final PathFactory pathFactory;
+ private int changeCount;
+ private IndexReader pathsReader;
+ private IndexWriter pathsWriter;
+ private IndexSearcher pathsSearcher;
+ private IndexReader contentReader;
+ private IndexWriter contentWriter;
+ private IndexSearcher contentSearcher;
+
+ protected LuceneSession( ExecutionContext context,
+ String sourceName,
+ String workspaceName,
+ IndexRules rules,
+ Directory pathsIndexDirectory,
+ Directory contentIndexDirectory,
+ boolean overwrite,
+ boolean readOnly ) {
+ this.context = context;
+ this.sourceName = sourceName;
+ this.workspaceName = workspaceName;
+ this.rules = rules;
+ this.overwrite = overwrite;
+ this.readOnly = readOnly;
+ this.pathsIndexDirectory = pathsIndexDirectory;
+ this.contentIndexDirectory = contentIndexDirectory;
+ this.analyzer = createAnalyzer();
+ this.stringFactory = context.getValueFactories().getStringFactory();
+ this.dateFactory = context.getValueFactories().getDateFactory();
+ this.pathFactory = context.getValueFactories().getPathFactory();
+ assert this.context != null;
+ assert this.sourceName != null;
+ assert this.workspaceName != null;
+ assert this.rules != null;
+ assert this.analyzer != null;
+ assert this.pathsIndexDirectory != null;
+ assert this.contentIndexDirectory != null;
+ assert this.stringFactory != null;
+ assert this.dateFactory != null;
+ // do this last ...
+ this.queryEngine = createQueryProcessor();
+ assert this.queryEngine != null;
+ }
+
+ /**
+ * Create the field name that will be used to store the full-text searchable property values.
+ *
+ * @param propertyName the name of the property; may not be null
+ * @return the field name for the full-text searchable property values; never null
+ */
+ protected String fullTextFieldName( String propertyName ) {
+ return propertyName + FULL_TEXT_SUFFIX;
+ }
+
+ protected IndexReader getPathsReader() throws IOException {
+ if (pathsReader == null) {
+ pathsReader = IndexReader.open(pathsIndexDirectory, readOnly);
+ }
+ return pathsReader;
+ }
+
+ protected IndexReader getContentReader() throws IOException {
+ if (contentReader == null) {
+ contentReader = IndexReader.open(contentIndexDirectory, readOnly);
+ }
+ return contentReader;
+ }
+
+ protected IndexWriter getPathsWriter() throws IOException {
+ assert !readOnly;
+ if (pathsWriter == null) {
+ pathsWriter = new IndexWriter(pathsIndexDirectory, analyzer, overwrite, MaxFieldLength.UNLIMITED);
+ }
+ return pathsWriter;
+ }
+
+ protected IndexWriter getContentWriter() throws IOException {
+ assert !readOnly;
+ if (contentWriter == null) {
+ contentWriter = new IndexWriter(contentIndexDirectory, analyzer, overwrite, MaxFieldLength.UNLIMITED);
+ }
+ return contentWriter;
+ }
+
+ protected IndexSearcher getPathsSearcher() throws IOException {
+ if (pathsSearcher == null) {
+ pathsSearcher = new IndexSearcher(getPathsReader());
+ }
+ return pathsSearcher;
+ }
+
+ protected IndexSearcher getContentSearcher() throws IOException {
+ if (contentSearcher == null) {
+ contentSearcher = new IndexSearcher(getContentReader());
+ }
+ return contentSearcher;
+ }
+
+ protected boolean hasWriters() {
+ return pathsWriter != null || contentWriter != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexSession#getContext()
+ */
+ public final ExecutionContext getContext() {
+ return context;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexSession#getSourceName()
+ */
+ public final String getSourceName() {
+ return sourceName;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexSession#getWorkspaceName()
+ */
+ public String getWorkspaceName() {
+ return workspaceName;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexSession#hasChanges()
+ */
+ public boolean hasChanges() {
+ return changeCount > 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexSession#index(org.jboss.dna.graph.Node)
+ */
+ public void index( Node node ) throws IOException {
+ assert !readOnly;
+ Location location = node.getLocation();
+ UUID uuid = location.getUuid();
+ if (uuid == null) uuid = UUID.randomUUID();
+ Path path = location.getPath();
+ String uuidStr = stringFactory.create(uuid);
+ String pathStr = pathAsString(path, stringFactory);
+ String nameStr = path.isRoot() ? "" : stringFactory.create(path.getLastSegment().getName());
+ int sns = path.isRoot() ? 1 : path.getLastSegment().getIndex();
+
+ Logger logger = Logger.getLogger(getClass());
+ if (logger.isTraceEnabled()) {
+ logger.trace("indexing {0}", pathStr);
+ }
+
+ // Create a separate document for the path, which makes it easier to handle moves since the path can
+ // be changed without changing any other content fields ...
+ Document doc = new Document();
+ doc.add(new Field(PathIndex.PATH, pathStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field(PathIndex.LOCAL_NAME, nameStr, Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new NumericField(PathIndex.LOCAL_NAME, Field.Store.YES, true).setIntValue(sns));
+ doc.add(new Field(PathIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new NumericField(PathIndex.DEPTH, Field.Store.YES, true).setIntValue(path.size()));
+ getPathsWriter().addDocument(doc);
+
+ // Create the document for the content (properties) ...
+ doc = new Document();
+ doc.add(new Field(ContentIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ String stringValue = null;
+ StringBuilder fullTextSearchValue = null;
+ for (Property property : node.getProperties()) {
+ Name name = property.getName();
+ Rule rule = rules.getRule(name);
+ if (rule.isSkipped()) continue;
+ String nameString = stringFactory.create(name);
+ if (rule.isDate()) {
+ for (Object value : property) {
+ if (value == null) continue;
+ DateTime dateValue = dateFactory.create(value);
+ // Add a separate field for each property value ...
+ doc.add(new NumericField(nameString, rule.getStoreOption(), true).setLongValue(dateValue.getMillisecondsInUtc()));
+ // Dates are not added to the full-text search field (since this wouldn't make sense)
+ }
+ continue;
+ }
+ for (Object value : property) {
+ if (value == null) continue;
+ if (value instanceof Binary) {
+ // don't include binary values as individual fields but do include them in the full-text search ...
+ // TODO : add to full-text search ...
+ continue;
+ }
+ stringValue = stringFactory.create(value);
+ // Add a separate field for each property value ...
+ doc.add(new Field(nameString, stringValue, rule.getStoreOption(), rule.getIndexOption()));
+
+ if (rule.isFullText()) {
+ // Add this text to the full-text field ...
+ if (fullTextSearchValue == null) {
+ fullTextSearchValue = new StringBuilder();
+ } else {
+ fullTextSearchValue.append(' ');
+ }
+ fullTextSearchValue.append(stringValue);
+
+ // Also create a full-text-searchable field ...
+ String fullTextNameString = fullTextFieldName(nameString);
+ doc.add(new Field(fullTextNameString, stringValue, Store.NO, Index.ANALYZED));
+ }
+ }
+ }
+ // Add the full-text-search field ...
+ if (fullTextSearchValue != null) {
+ doc.add(new Field(ContentIndex.FULL_TEXT, fullTextSearchValue.toString(), Field.Store.NO, Field.Index.ANALYZED));
+ }
+ getContentWriter().addDocument(doc);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexSession#optimize()
+ */
+ public void optimize() throws IOException {
+ getContentWriter().optimize();
+ getPathsWriter().optimize();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexSession#apply(java.lang.Iterable)
+ */
+ public int apply( Iterable<ChangeRequest> changes ) /*throws IOException*/{
+ for (ChangeRequest change : changes) {
+ if (change != null) continue;
+ }
+ return 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Because this strategy uses multiple indexes, and since there's no correlation between the documents in those indexes,
+ * we need to perform the delete in multiple steps. First, we need to perform a query to find out which nodes exist below
+ * a certain path. Then, we need to delete those nodes from the paths index. Finally, we need to delete the corresponding
+ * documents in the content index that represent those same nodes.
+ * </p>
+ * <p>
+ * Since we don't know how many documents there will be, we perform these steps in batches, where each batch limits the
+ * number of results to a maximum number. We repeat batches as long as we find more results. This approach has the
+ * advantage that we'll never bring in a large number of results, and it allows us to delete the documents from the
+ * content node using a query.
+ * </p>
+ *
+ * @see org.jboss.dna.search.IndexSession#deleteBelow(org.jboss.dna.graph.property.Path)
+ */
+ public int deleteBelow( Path path ) throws IOException {
+ assert !readOnly;
+ // Perform a query using the reader to find those nodes at/below the path ...
+ try {
+ IndexReader pathReader = getPathsReader();
+ IndexSearcher pathSearcher = new IndexSearcher(pathReader);
+ String pathStr = stringFactory.create(path) + "/";
+ PrefixQuery query = new PrefixQuery(new Term(PathIndex.PATH, pathStr));
+ int numberDeleted = 0;
+ while (true) {
+ // Execute the query and get the results ...
+ TopDocs results = pathSearcher.search(query, SIZE_OF_DELETE_BATCHES);
+ int numResultsInBatch = results.scoreDocs.length;
+ // Walk the results, delete the doc, and add to the query that we'll use against the content index ...
+ IndexReader contentReader = getContentReader();
+ for (ScoreDoc result : results.scoreDocs) {
+ int docId = result.doc;
+ // Find the UUID of the node ...
+ Document doc = pathReader.document(docId, UUID_FIELD_SELECTOR);
+ String uuid = doc.get(PathIndex.UUID);
+ // Delete the document from the paths index ...
+ pathReader.deleteDocument(docId);
+ // Delete the corresponding document from the content index ...
+ contentReader.deleteDocuments(new Term(ContentIndex.UUID, uuid));
+ }
+ numberDeleted += numResultsInBatch;
+ if (numResultsInBatch < SIZE_OF_DELETE_BATCHES) break;
+ }
+ return numberDeleted;
+ } catch (FileNotFoundException e) {
+ // There are no index files yet, so nothing to delete ...
+ return 0;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexSession#search(org.jboss.dna.graph.ExecutionContext, java.lang.String, int, int,
+ * java.util.List)
+ */
+ public void search( ExecutionContext context,
+ String fullTextString,
+ int maxResults,
+ int offset,
+ List<Location> results ) throws IOException, ParseException {
+ assert fullTextString != null;
+ assert fullTextString.length() > 0;
+ assert offset >= 0;
+ assert maxResults > 0;
+ assert results != null;
+
+ // Parse the full-text search and search against the 'fts' field ...
+ QueryParser parser = new QueryParser(ContentIndex.FULL_TEXT, createAnalyzer());
+ Query query = parser.parse(fullTextString);
+ TopDocs docs = getContentSearcher().search(query, maxResults + offset);
+
+ // Collect the results ...
+ IndexReader contentReader = getContentReader();
+ IndexReader pathReader = getPathsReader();
+ IndexSearcher pathSearcher = getPathsSearcher();
+ ScoreDoc[] scoreDocs = docs.scoreDocs;
+ int numberOfResults = scoreDocs.length;
+ if (numberOfResults > offset) {
+ // There are enough results to satisfy the offset ...
+ PathFactory pathFactory = context.getValueFactories().getPathFactory();
+ for (int i = offset, num = scoreDocs.length; i != num; ++i) {
+ ScoreDoc result = scoreDocs[i];
+ int docId = result.doc;
+ // Find the UUID of the node (this UUID might be artificial, so we have to find the path) ...
+ Document doc = contentReader.document(docId, UUID_FIELD_SELECTOR);
+ String uuid = doc.get(ContentIndex.UUID);
+ // Find the path for this node (is there a better way to do this than one search per UUID?) ...
+ TopDocs pathDocs = pathSearcher.search(new TermQuery(new Term(PathIndex.UUID, uuid)), 1);
+ if (pathDocs.scoreDocs.length < 1) {
+ // No path record found ...
+ continue;
+ }
+ Document pathDoc = pathReader.document(pathDocs.scoreDocs[0].doc);
+ String pathString = pathDoc.get(PathIndex.PATH);
+ Path path = pathFactory.create(pathString);
+ // Now add the location ...
+ results.add(Location.create(path, UUID.fromString(uuid)));
+ }
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexSession#query(org.jboss.dna.graph.query.QueryContext,
+ * org.jboss.dna.graph.query.model.QueryCommand)
+ */
+ public QueryResults query( QueryContext queryContext,
+ QueryCommand query ) {
+ return queryEngine.execute(queryContext, query);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexSession#commit()
+ */
+ public void commit() throws IOException {
+ IOException ioError = null;
+ RuntimeException runtimeError = null;
+ if (pathsReader != null) {
+ try {
+ pathsReader.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ pathsReader = null;
+ }
+ }
+ if (contentReader != null) {
+ try {
+ contentReader.close();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ contentReader = null;
+ }
+ }
+ if (pathsWriter != null) {
+ try {
+ pathsWriter.commit();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ try {
+ pathsWriter.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ pathsWriter = null;
+ }
+ }
+ }
+ if (contentWriter != null) {
+ try {
+ contentWriter.commit();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ try {
+ contentWriter.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ contentWriter = null;
+ }
+ }
+ }
+ if (ioError != null) throw ioError;
+ if (runtimeError != null) throw runtimeError;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexSession#rollback()
+ */
+ public void rollback() throws IOException {
+ IOException ioError = null;
+ RuntimeException runtimeError = null;
+ if (pathsReader != null) {
+ try {
+ pathsReader.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ pathsReader = null;
+ }
+ }
+ if (contentReader != null) {
+ try {
+ contentReader.close();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ contentReader = null;
+ }
+ }
+ if (pathsWriter != null) {
+ try {
+ pathsWriter.rollback();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ try {
+ pathsWriter.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ pathsWriter = null;
+ }
+ }
+ }
+ if (contentWriter != null) {
+ try {
+ contentWriter.rollback();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ try {
+ contentWriter.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ contentWriter = null;
+ }
+ }
+ }
+ if (ioError != null) throw ioError;
+ if (runtimeError != null) throw runtimeError;
+ }
+
+ protected QueryEngine createQueryProcessor() {
+ // Create the query engine ...
+ Planner planner = new CanonicalPlanner();
+ Optimizer optimizer = new RuleBasedOptimizer() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.optimize.RuleBasedOptimizer#populateRuleStack(java.util.LinkedList,
+ * org.jboss.dna.graph.query.plan.PlanHints)
+ */
+ @Override
+ protected void populateRuleStack( LinkedList<OptimizerRule> ruleStack,
+ PlanHints hints ) {
+ super.populateRuleStack(ruleStack, hints);
+ // Add any custom rules here, either at the front of the stack or at the end
+ }
+ };
+ QueryProcessor processor = new QueryProcessor() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.process.QueryProcessor#createAccessComponent(org.jboss.dna.graph.query.model.QueryCommand,
+ * org.jboss.dna.graph.query.QueryContext, org.jboss.dna.graph.query.plan.PlanNode,
+ * org.jboss.dna.graph.query.QueryResults.Columns,
+ * org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
+ */
+ @Override
+ protected ProcessingComponent createAccessComponent( QueryCommand originalQuery,
+ QueryContext context,
+ PlanNode accessNode,
+ Columns resultColumns,
+ org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer ) {
+ try {
+ return LuceneSession.this.createAccessComponent(originalQuery,
+ context,
+ accessNode,
+ resultColumns,
+ analyzer);
+ } catch (IOException e) {
+ I18n msg = SearchI18n.errorWhilePerformingQuery;
+ context.getProblems().addError(e,
+ msg,
+ Visitors.readable(originalQuery),
+ getWorkspaceName(),
+ getSourceName(),
+ e.getMessage());
+ return null;
+ }
+ }
+ };
+
+ return new QueryEngine(planner, optimizer, processor);
+ }
+
+ protected abstract ProcessingComponent createAccessComponent( QueryCommand originalQuery,
+ QueryContext context,
+ PlanNode accessNode,
+ Columns resultColumns,
+ org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer )
+ throws IOException;
+
+ /**
+ * Get the set of UUIDs for the children of the node at the given path.
+ *
+ * @param parentPath the path to the parent node; may not be null
+ * @return the UUIDs of the child nodes; never null but possibly empty
+ * @throws IOException if there is an error accessing the indexes
+ */
+ protected Set<UUID> getUuidsForChildrenOf( Path parentPath ) throws IOException {
+ // Find the path of the parent ...
+ String stringifiedPath = pathAsString(parentPath, stringFactory);
+ // Append a '/' to the parent path, so we'll only get decendants ...
+ stringifiedPath = stringifiedPath + '/';
+
+ // Create a query to find all the nodes below the parent path ...
+ Query query = new PrefixQuery(new Term(PathIndex.PATH, stringifiedPath));
+ // Include only the children ...
+ int childrenDepth = parentPath.size() + 1;
+ Query depthQuery = NumericRangeQuery.newIntRange(PathIndex.DEPTH, childrenDepth, childrenDepth, true, true);
+ // And combine ...
+ BooleanQuery combinedQuery = new BooleanQuery();
+ combinedQuery.add(query, Occur.MUST);
+ combinedQuery.add(depthQuery, Occur.MUST);
+ query = combinedQuery;
+
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return uuidCollector.getUuids();
+ }
+
+ /**
+ * Get the set of UUIDs for the nodes that are descendants of the node at the given path.
+ *
+ * @param parentPath the path to the parent node; may not be null and <i>may not be the root node</i>
+ * @param includeParent true if the parent node should be included in the results, or false if only the descendants should
+ * be included
+ * @return the UUIDs of the nodes; never null but possibly empty
+ * @throws IOException if there is an error accessing the indexes
+ */
+ protected Set<UUID> getUuidsForDescendantsOf( Path parentPath,
+ boolean includeParent ) throws IOException {
+ assert !parentPath.isRoot();
+
+ // Find the path of the parent ...
+ String stringifiedPath = pathAsString(parentPath, stringFactory);
+ if (!includeParent) {
+ // Append a '/' to the parent path, and we'll only get decendants ...
+ stringifiedPath = stringifiedPath + '/';
+ }
+
+ // Create a prefix query ...
+ Query query = new PrefixQuery(new Term(PathIndex.PATH, stringifiedPath));
+
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return uuidCollector.getUuids();
+ }
+
+ /**
+ * Get the set containing the single UUID for the node at the given path.
+ *
+ * @param path the path to the node; may not be null
+ * @return the UUID of the supplied node; or null if the node cannot be found
+ * @throws IOException if there is an error accessing the indexes
+ */
+ protected UUID getUuidFor( Path path ) throws IOException {
+ // Create a query to find all the nodes below the parent path ...
+ IndexSearcher searcher = getPathsSearcher();
+ String stringifiedPath = pathAsString(path, stringFactory);
+ TermQuery query = new TermQuery(new Term(PathIndex.PATH, stringifiedPath));
+
+ // Now execute and collect the UUIDs ...
+ TopDocs topDocs = searcher.search(query, 1);
+ if (topDocs.totalHits == 0) return null;
+ Document pathDoc = getPathsReader().document(topDocs.scoreDocs[0].doc);
+ String uuidString = pathDoc.get(PathIndex.UUID);
+ return UUID.fromString(uuidString);
+ }
+
+ /**
+ * Utility method to create a query to find all of the documents representing nodes with the supplied UUIDs.
+ *
+ * @param uuids the UUIDs of the nodes that are to be found; may not be null
+ * @return the query; never null
+ */
+ protected Query findAllNodesWithUuids( Set<UUID> uuids ) {
+ if (uuids.isEmpty()) {
+ // There are no children, so return a null query ...
+ return new MatchNoneQuery();
+ }
+ if (uuids.size() == 1) {
+ UUID uuid = uuids.iterator().next();
+ if (uuid == null) return new MatchNoneQuery();
+ return new TermQuery(new Term(ContentIndex.UUID, uuid.toString()));
+ }
+ if (uuids.size() < 50) {
+ // Create an OR boolean query for all the UUIDs, since this is probably more efficient ...
+ BooleanQuery query = new BooleanQuery();
+ for (UUID uuid : uuids) {
+ Query uuidQuery = new TermQuery(new Term(ContentIndex.UUID, uuid.toString()));
+ query.add(uuidQuery, Occur.SHOULD);
+ }
+ return query;
+ }
+ // Returna query that will always find all of the UUIDs ...
+ return new UuidsQuery(ContentIndex.UUID, uuids, getContext().getValueFactories().getUuidFactory());
+ }
+
+ protected Query findAllNodesBelow( Path ancestorPath ) throws IOException {
+ if (ancestorPath.isRoot()) {
+ return new MatchAllDocsQuery();
+ }
+ Set<UUID> uuids = getUuidsForDescendantsOf(ancestorPath, false);
+ return findAllNodesWithUuids(uuids);
+ }
+
+ /**
+ * Return a query that can be used to find all of the documents that represent nodes that are children of the node at the
+ * supplied path.
+ *
+ * @param parentPath the path of the parent node.
+ * @return the query; never null
+ * @throws IOException if there is an error finding the UUIDs of the child nodes
+ */
+ protected Query findChildNodes( Path parentPath ) throws IOException {
+ if (parentPath.isRoot()) {
+ return new MatchAllDocsQuery();
+ }
+ Set<UUID> childUuids = getUuidsForChildrenOf(parentPath);
+ return findAllNodesWithUuids(childUuids);
+ }
+
+ /**
+ * Create a query that can be used to find the one document (or node) that exists at the exact path supplied. This method
+ * first queries the {@link PathIndex path index} to find the UUID of the node at the supplied path, and then returns a
+ * query that matches the UUID.
+ *
+ * @param path the path of the node
+ * @return the query; never null
+ * @throws IOException if there is an error finding the UUID for the supplied path
+ */
+ protected Query findNodeAt( Path path ) throws IOException {
+ UUID uuid = getUuidFor(path);
+ if (uuid == null) return null;
+ return new TermQuery(new Term(ContentIndex.UUID, uuid.toString()));
+ }
+
+ /**
+ * Create a query that can be used to find documents (or nodes) that have a field value that satisfies the supplied LIKE
+ * expression.
+ *
+ * @param fieldName the name of the document field to search
+ * @param likeExpression the JCR like expression
+ * @return the query; never null
+ */
+ protected Query findNodesLike( String fieldName,
+ String likeExpression ) {
+ assert likeExpression != null;
+ assert likeExpression.length() > 0;
+
+ // '%' matches 0 or more characters
+ // '_' matches any single character
+ // '\x' matches 'x'
+ // all other characters match themselves
+
+ // Wildcard queries are a better match, but they can be slow and should not be used
+ // if the first character of the expression is a '%' or '_' ...
+ char firstChar = likeExpression.charAt(0);
+ if (firstChar != '%' && firstChar != '_') {
+ // Create a wildcard query ...
+ String expression = toWildcardExpression(likeExpression);
+ return new WildcardQuery(new Term(fieldName, expression));
+ }
+ // Create a regex query,
+ String regex = toRegularExpression(likeExpression);
+ RegexQuery query = new RegexQuery(new Term(fieldName, regex));
+ query.setRegexImplementation(new JavaUtilRegexCapabilities());
+ return query;
+ }
+
+ protected Query findNodesWith( PropertyValue propertyValue,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) {
+ String field = stringFactory.create(propertyValue.getPropertyName());
+ PropertyType valueType = PropertyType.discoverType(value);
+ ValueFactories factories = context.getValueFactories();
+ switch (valueType) {
+ case NAME:
+ case PATH:
+ case REFERENCE:
+ case URI:
+ case UUID:
+ case STRING:
+ String stringValue = stringFactory.create(value);
+ if (valueType == PropertyType.PATH) {
+ stringValue = pathAsString(pathFactory.create(value), stringFactory);
+ }
+ if (!caseSensitive) stringValue = stringValue.toLowerCase();
+ switch (operator) {
+ case EQUAL_TO:
+ return new TermQuery(new Term(field, stringValue));
+ case NOT_EQUAL_TO:
+ Query query = new TermQuery(new Term(field, stringValue));
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return CompareStringQuery.createQueryForNodesWithFieldGreaterThan(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return CompareStringQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case LESS_THAN:
+ return CompareStringQuery.createQueryForNodesWithFieldLessThan(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case LESS_THAN_OR_EQUAL_TO:
+ return CompareStringQuery.createQueryForNodesWithFieldLessThanOrEqualTo(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case LIKE:
+ return findNodesLike(field, stringValue);
+ }
+ break;
+ case DATE:
+ long date = factories.getLongFactory().create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, date, date, true, true);
+ case NOT_EQUAL_TO:
+ Query query = NumericRangeQuery.newLongRange(field, date, date, true, true);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return NumericRangeQuery.newLongRange(field, date, MAX_DATE, false, true);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, date, MAX_DATE, true, true);
+ case LESS_THAN:
+ return NumericRangeQuery.newLongRange(field, MIN_DATE, date, true, false);
+ case LESS_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, MIN_DATE, date, true, true);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case LONG:
+ long longValue = factories.getLongFactory().create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, longValue, longValue, true, true);
+ case NOT_EQUAL_TO:
+ Query query = NumericRangeQuery.newLongRange(field, longValue, longValue, true, true);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return NumericRangeQuery.newLongRange(field, longValue, MAX_LONG, false, true);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, longValue, MAX_LONG, true, true);
+ case LESS_THAN:
+ return NumericRangeQuery.newLongRange(field, MIN_LONG, longValue, true, false);
+ case LESS_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, MIN_LONG, longValue, true, true);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case DECIMAL:
+ case DOUBLE:
+ double doubleValue = factories.getDoubleFactory().create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return NumericRangeQuery.newDoubleRange(field, doubleValue, doubleValue, true, true);
+ case NOT_EQUAL_TO:
+ Query query = NumericRangeQuery.newDoubleRange(field, doubleValue, doubleValue, true, true);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return NumericRangeQuery.newDoubleRange(field, doubleValue, MAX_DOUBLE, false, true);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newDoubleRange(field, doubleValue, MAX_DOUBLE, true, true);
+ case LESS_THAN:
+ return NumericRangeQuery.newDoubleRange(field, MIN_DOUBLE, doubleValue, true, false);
+ case LESS_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newDoubleRange(field, MIN_DOUBLE, doubleValue, true, true);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case BOOLEAN:
+ boolean booleanValue = factories.getBooleanFactory().create(value);
+ stringValue = stringFactory.create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return new TermQuery(new Term(field, stringValue));
+ case NOT_EQUAL_TO:
+ return new TermQuery(new Term(field, stringFactory.create(!booleanValue)));
+ case GREATER_THAN:
+ if (!booleanValue) {
+ return new TermQuery(new Term(field, stringFactory.create(true)));
+ }
+ // Can't be greater than 'true', per JCR spec
+ return new MatchNoneQuery();
+ case GREATER_THAN_OR_EQUAL_TO:
+ return new TermQuery(new Term(field, stringFactory.create(true)));
+ case LESS_THAN:
+ if (booleanValue) {
+ return new TermQuery(new Term(field, stringFactory.create(false)));
+ }
+ // Can't be less than 'false', per JCR spec
+ return new MatchNoneQuery();
+ case LESS_THAN_OR_EQUAL_TO:
+ return new TermQuery(new Term(field, stringFactory.create(false)));
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case OBJECT:
+ case BINARY:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ return null;
+
+ }
+
+ protected Query findNodesWith( NodePath nodePath,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException {
+ if (!caseSensitive) value = stringFactory.create(value).toLowerCase();
+ Path pathValue = operator != Operator.LIKE ? pathFactory.create(value) : null;
+ Query query = null;
+ switch (operator) {
+ case EQUAL_TO:
+ return findNodeAt(pathValue);
+ case NOT_EQUAL_TO:
+ return new NotQuery(findNodeAt(pathValue));
+ case LIKE:
+ String likeExpression = stringFactory.create(value);
+ return findNodesLike(PathIndex.PATH, likeExpression);
+ case GREATER_THAN:
+ query = ComparePathQuery.createQueryForNodesWithPathGreaterThan(pathValue,
+ PathIndex.PATH,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = ComparePathQuery.createQueryForNodesWithPathGreaterThanOrEqualTo(pathValue,
+ PathIndex.PATH,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN:
+ query = ComparePathQuery.createQueryForNodesWithPathLessThan(pathValue,
+ PathIndex.PATH,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = ComparePathQuery.createQueryForNodesWithPathLessThanOrEqualTo(pathValue,
+ PathIndex.PATH,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ }
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return findAllNodesWithUuids(uuidCollector.getUuids());
+ }
+
+ protected Query findNodesWith( NodeName nodeName,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException {
+ String stringValue = stringFactory.create(value);
+ if (!caseSensitive) stringValue = stringValue.toLowerCase();
+ Path.Segment segment = operator != Operator.LIKE ? pathFactory.createSegment(stringValue) : null;
+ int snsIndex = operator != Operator.LIKE ? segment.getIndex() : 0;
+ Query query = null;
+ switch (operator) {
+ case EQUAL_TO:
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(new TermQuery(new Term(PathIndex.LOCAL_NAME, stringValue)), Occur.MUST);
+ booleanQuery.add(NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, snsIndex, snsIndex, true, false),
+ Occur.MUST);
+ return booleanQuery;
+ case NOT_EQUAL_TO:
+ booleanQuery = new BooleanQuery();
+ booleanQuery.add(new TermQuery(new Term(PathIndex.LOCAL_NAME, stringValue)), Occur.MUST);
+ booleanQuery.add(NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, snsIndex, snsIndex, true, false),
+ Occur.MUST);
+ return new NotQuery(booleanQuery);
+ case GREATER_THAN:
+ query = CompareNameQuery.createQueryForNodesWithNameGreaterThan(segment,
+ PathIndex.LOCAL_NAME,
+ PathIndex.SNS_INDEX,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = CompareNameQuery.createQueryForNodesWithNameGreaterThanOrEqualTo(segment,
+ PathIndex.LOCAL_NAME,
+ PathIndex.SNS_INDEX,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN:
+ query = CompareNameQuery.createQueryForNodesWithNameLessThan(segment,
+ PathIndex.LOCAL_NAME,
+ PathIndex.SNS_INDEX,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = CompareNameQuery.createQueryForNodesWithNameLessThanOrEqualTo(segment,
+ PathIndex.LOCAL_NAME,
+ PathIndex.SNS_INDEX,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LIKE:
+ // See whether the like expression has brackets ...
+ String likeExpression = stringValue;
+ int openBracketIndex = likeExpression.indexOf('[');
+ if (openBracketIndex != -1) {
+ String localNameExpression = likeExpression.substring(0, openBracketIndex);
+ String snsIndexExpression = likeExpression.substring(openBracketIndex);
+ Query localNameQuery = createLocalNameQuery(localNameExpression);
+ Query snsQuery = createSnsIndexQuery(snsIndexExpression);
+ if (localNameQuery == null) {
+ if (snsQuery == null) {
+ query = new MatchNoneQuery();
+ } else {
+ // There is just an SNS part ...
+ query = snsQuery;
+ }
+ } else {
+ // There is a local name part ...
+ if (snsQuery == null) {
+ query = localNameQuery;
+ } else {
+ // There is both a local name part and a SNS part ...
+ booleanQuery = new BooleanQuery();
+ booleanQuery.add(localNameQuery, Occur.MUST);
+ booleanQuery.add(snsQuery, Occur.MUST);
+ query = booleanQuery;
+ }
+ }
+ } else {
+ // There is no SNS expression ...
+ query = createLocalNameQuery(likeExpression);
+ }
+ assert query != null;
+ break;
+ }
+
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return findAllNodesWithUuids(uuidCollector.getUuids());
+ }
+
+ protected Query findNodesWith( NodeLocalName nodeName,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException {
+ String nameValue = stringFactory.create(value);
+ Query query = null;
+ switch (operator) {
+ case LIKE:
+ String likeExpression = stringFactory.create(value);
+ return findNodesLike(PathIndex.LOCAL_NAME, likeExpression); // already is a query with UUIDs
+ case EQUAL_TO:
+ query = new TermQuery(new Term(PathIndex.LOCAL_NAME, nameValue));
+ break;
+ case NOT_EQUAL_TO:
+ query = new NotQuery(new TermQuery(new Term(PathIndex.LOCAL_NAME, nameValue)));
+ break;
+ case GREATER_THAN:
+ query = CompareStringQuery.createQueryForNodesWithFieldGreaterThan(nameValue,
+ PathIndex.LOCAL_NAME,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = CompareStringQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(nameValue,
+ PathIndex.LOCAL_NAME,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN:
+ query = CompareStringQuery.createQueryForNodesWithFieldLessThan(nameValue,
+ PathIndex.LOCAL_NAME,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = CompareStringQuery.createQueryForNodesWithFieldLessThanOrEqualTo(nameValue,
+ PathIndex.LOCAL_NAME,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ }
+
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return findAllNodesWithUuids(uuidCollector.getUuids());
+ }
+
+ protected Query findNodesWith( NodeDepth depthConstraint,
+ Operator operator,
+ Object value ) throws IOException {
+ int depth = context.getValueFactories().getLongFactory().create(value).intValue();
+ Query query = null;
+ switch (operator) {
+ case EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, depth, true, true);
+ break;
+ case NOT_EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, depth, true, true);
+ query = new NotQuery(query);
+ break;
+ case GREATER_THAN:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, MAX_DEPTH, false, true);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, MAX_DEPTH, true, true);
+ break;
+ case LESS_THAN:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, MIN_DEPTH, depth, true, false);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, MIN_DEPTH, depth, true, true);
+ break;
+ case LIKE:
+ // This is not allowed ...
+ return null;
+ }
+
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return findAllNodesWithUuids(uuidCollector.getUuids());
+ }
+
+ protected Query createLocalNameQuery( String likeExpression ) {
+ if (likeExpression == null) return null;
+ likeExpression = likeExpression.trim();
+ if (likeExpression.length() == 0) return null;
+ if (likeExpression.indexOf('?') != -1 || likeExpression.indexOf('*') != -1) {
+ // The local name is a like ...
+ return findNodesLike(PathIndex.LOCAL_NAME, likeExpression);
+ }
+ // The local name is an exact match ...
+ return new TermQuery(new Term(PathIndex.LOCAL_NAME, likeExpression));
+ }
+
+ protected Query createSnsIndexQuery( String likeExpression ) {
+ if (likeExpression == null) return null;
+ likeExpression = likeExpression.trim();
+ if (likeExpression.length() == 0) return null;
+
+ // Remove the leading '[' ...
+ assert likeExpression.charAt(0) == '[';
+ likeExpression = likeExpression.substring(1);
+
+ // Remove the trailing ']' if it exists ...
+ int closeBracketIndex = likeExpression.indexOf(']');
+ if (closeBracketIndex != -1) {
+ likeExpression = likeExpression.substring(0, closeBracketIndex);
+ }
+ // If SNS expression contains '?' or '*' ...
+ if (likeExpression.indexOf('?') != -1 || likeExpression.indexOf('*') != -1) {
+ // There is a LIKE expression for the SNS ...
+ return findNodesLike(PathIndex.SNS_INDEX, likeExpression);
+ }
+ // This is not a LIKE expression but an exact value specification and should be a number ...
+ try {
+ // This SNS is just a number ...
+ int sns = Integer.parseInt(likeExpression);
+ return NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, sns, sns, true, false);
+ } catch (NumberFormatException e) {
+ // It's not a number but it's in the SNS field, so there will be no results ...
+ return new MatchNoneQuery();
+ }
+ }
+
+ }
+
+ /**
+ * Convert the JCR like expression to a Lucene wildcard expression. The JCR like expression uses '%' to match 0 or more
+ * characters, '_' to match any single character, '\x' to match the 'x' character, and all other characters to match
+ * themselves.
+ *
+ * @param likeExpression the like expression; may not be null
+ * @return the expression that can be used with a WildcardQuery; never null
+ */
+ protected static String toWildcardExpression( String likeExpression ) {
+ assert likeExpression != null;
+ assert likeExpression.length() > 0;
+ return likeExpression.replace('%', '*').replace('_', '?').replaceAll("\\\\(.)", "$1");
+ }
+
+ /**
+ * Convert the JCR like expression to a regular expression. The JCR like expression uses '%' to match 0 or more characters,
+ * '_' to match any single character, '\x' to match the 'x' character, and all other characters to match themselves. Note that
+ * if any regex metacharacters appear in the like expression, they will be escaped within the resulting regular expression.
+ *
+ * @param likeExpression the like expression; may not be null
+ * @return the expression that can be used with a WildcardQuery; never null
+ */
+ protected static String toRegularExpression( String likeExpression ) {
+ assert likeExpression != null;
+ assert likeExpression.length() > 0;
+ // Replace all '\x' with 'x' ...
+ String result = likeExpression.replaceAll("\\\\(.)", "$1");
+ // Escape characters used as metacharacters in regular expressions, including
+ // '[', '^', '\', '$', '.', '|', '?', '*', '+', '(', and ')'
+ result = result.replaceAll("([[^\\\\$.|?*+()])", "\\$1");
+ // Replace '%'->'[.]+' and '_'->'[.]
+ result = likeExpression.replace("%", "[.]+").replace("_", "[.]");
+ return result;
+ }
+
+ protected static String pathAsString( Path path,
+ ValueFactory<String> stringFactory ) {
+ assert path != null;
+ if (path.isRoot()) return "/";
+ String pathStr = stringFactory.create(path);
+ if (!pathStr.endsWith("]")) {
+ pathStr = pathStr + '[' + Path.DEFAULT_INDEX + ']';
+ }
+ return pathStr;
+ }
+
+ /**
+ * A {@link Collector} implementation that only captures the UUID of the documents returned by a query. Score information is
+ * not recorded. This is often used when querying the {@link PathIndex} to collect the UUIDs of a set of nodes satisfying some
+ * path constraint.
+ *
+ * @see DualIndexLayout.LuceneSession#findChildNodes(Path)
+ */
+ protected static class UuidCollector extends Collector {
+ private final Set<UUID> uuids = new HashSet<UUID>();
+ private String[] uuidsByDocId;
+ private int baseDocId;
+
+ protected UuidCollector() {
+ }
+
+ /**
+ * Get the UUIDs that have been collected.
+ *
+ * @return the set of UUIDs; never null
+ */
+ public Set<UUID> getUuids() {
+ return uuids;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#acceptsDocsOutOfOrder()
+ */
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setScorer(org.apache.lucene.search.Scorer)
+ */
+ @Override
+ public void setScorer( Scorer scorer ) {
+ // we don't care about scoring
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#collect(int)
+ */
+ @Override
+ public void collect( int doc ) {
+ int index = doc - baseDocId;
+ assert index >= 0;
+ String uuidString = uuidsByDocId[index];
+ assert uuidString != null;
+ uuids.add(UUID.fromString(uuidString));
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setNextReader(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public void setNextReader( IndexReader reader,
+ int docBase ) throws IOException {
+ this.uuidsByDocId = FieldCache.DEFAULT.getStrings(reader, UUID_FIELD);
+ this.baseDocId = docBase;
+ }
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexStrategy.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexStrategy.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexStrategy.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -1,399 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.UUID;
-import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.FieldSelectorResult;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.PrefixQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.util.Version;
-import org.jboss.dna.common.text.NoOpEncoder;
-import org.jboss.dna.common.text.TextEncoder;
-import org.jboss.dna.common.util.Logger;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Node;
-import org.jboss.dna.graph.property.Binary;
-import org.jboss.dna.graph.property.DateTime;
-import org.jboss.dna.graph.property.DateTimeFactory;
-import org.jboss.dna.graph.property.Name;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.property.Property;
-import org.jboss.dna.graph.property.ValueFactory;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryEngine;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.QueryResults.Columns;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.optimize.Optimizer;
-import org.jboss.dna.graph.query.optimize.OptimizerRule;
-import org.jboss.dna.graph.query.optimize.RuleBasedOptimizer;
-import org.jboss.dna.graph.query.plan.CanonicalPlanner;
-import org.jboss.dna.graph.query.plan.PlanHints;
-import org.jboss.dna.graph.query.plan.PlanNode;
-import org.jboss.dna.graph.query.plan.Planner;
-import org.jboss.dna.graph.query.process.ProcessingComponent;
-import org.jboss.dna.graph.query.process.QueryProcessor;
-import org.jboss.dna.search.IndexRules.Rule;
-
-/**
- * A simple {@link IndexStrategy} implementation that relies upon two separate indexes: one for the node content and a second
- * one for paths and UUIDs.
- */
-@ThreadSafe
-abstract class DualIndexStrategy implements IndexStrategy {
-
- static class PathIndex {
- public static final String PATH = "path";
- public static final String UUID = "uuid";
- }
-
- static class ContentIndex {
- public static final String UUID = PathIndex.UUID;
- public static final String FULL_TEXT = "fts";
- }
-
- /**
- * The number of results that should be returned when performing queries while deleting entire branches of content. The
- * current value is {@value} .
- */
- protected static final int SIZE_OF_DELETE_BATCHES = 1000;
-
- private ThreadLocal<DateFormat> dateFormatter = new ThreadLocal<DateFormat>() {
- @Override
- protected DateFormat initialValue() {
- return new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss");
- }
- };
-
- /**
- * Obtain an immutable {@link FieldSelector} instance that accesses the UUID field.
- */
- protected static final FieldSelector UUID_FIELD_SELECTOR = new FieldSelector() {
- private static final long serialVersionUID = 1L;
-
- public FieldSelectorResult accept( String fieldName ) {
- return PathIndex.UUID.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
- }
- };
-
- private final IndexRules rules;
- private final Logger logger;
- private final QueryEngine queryEngine;
-
- /**
- * Create a new indexing strategy instance.
- *
- * @param rules the indexing rules that govern how properties are to be index; may not be null
- */
- protected DualIndexStrategy( IndexRules rules ) {
- assert rules != null;
- this.rules = rules;
- this.logger = Logger.getLogger(getClass());
- // Create the query engine ...
- Planner planner = new CanonicalPlanner();
- Optimizer optimizer = new RuleBasedOptimizer() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.optimize.RuleBasedOptimizer#populateRuleStack(java.util.LinkedList,
- * org.jboss.dna.graph.query.plan.PlanHints)
- */
- @Override
- protected void populateRuleStack( LinkedList<OptimizerRule> ruleStack,
- PlanHints hints ) {
- super.populateRuleStack(ruleStack, hints);
- // Add any custom rules here, either at the front of the stack or at the end
- }
- };
- QueryProcessor processor = new QueryProcessor() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.process.QueryProcessor#createAccessComponent(org.jboss.dna.graph.query.QueryContext,
- * org.jboss.dna.graph.query.plan.PlanNode, org.jboss.dna.graph.query.QueryResults.Columns,
- * org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
- */
- @Override
- protected ProcessingComponent createAccessComponent( QueryContext context,
- PlanNode accessNode,
- Columns resultColumns,
- org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer ) {
- return DualIndexStrategy.this.createAccessComponent((SearchContext)context, accessNode, resultColumns, analyzer);
- }
- };
-
- this.queryEngine = new QueryEngine(planner, optimizer, processor);
- }
-
- protected abstract ProcessingComponent createAccessComponent( SearchContext context,
- PlanNode accessNode,
- Columns resultColumns,
- org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer );
-
- /**
- * Utility method to obtain a {@link DateFormat} instance that can be used safely within a single thread.
- *
- * @return the date formatter; never null
- */
- protected final DateFormat dateFormatter() {
- return dateFormatter.get();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexStrategy#getNamespaceEncoder()
- */
- public TextEncoder getNamespaceEncoder() {
- return new NoOpEncoder();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexStrategy#getChangeCountForAutomaticOptimization()
- */
- public int getChangeCountForAutomaticOptimization() {
- return 0;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexStrategy#createAnalyzer()
- */
- public Analyzer createAnalyzer() {
- return new StandardAnalyzer(Version.LUCENE_CURRENT);
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * Because this strategy uses multiple indexes, and since there's no correlation between the documents in those indexes, we
- * need to perform the delete in multiple steps. First, we need to perform a query to find out which nodes exist below a
- * certain path. Then, we need to delete those nodes from the paths index. Finally, we need to delete the corresponding
- * documents in the content index that represent those same nodes.
- * </p>
- * <p>
- * Since we don't know how many documents there will be, we perform these steps in batches, where each batch limits the number
- * of results to a maximum number. We repeat batches as long as we find more results. This approach has the advantage that
- * we'll never bring in a large number of results, and it allows us to delete the documents from the content node using a
- * query.
- * </p>
- *
- * @see org.jboss.dna.search.IndexStrategy#deleteBelow(Path, IndexContext)
- */
- public int deleteBelow( Path path,
- IndexContext indexes ) throws IOException {
- // Perform a query using the reader to find those nodes at/below the path ...
- try {
- IndexReader pathReader = indexes.getPathsReader();
- IndexSearcher pathSearcher = new IndexSearcher(pathReader);
- String pathStr = indexes.stringFactory().create(path) + "/";
- PrefixQuery query = new PrefixQuery(new Term(PathIndex.PATH, pathStr));
- int numberDeleted = 0;
- while (true) {
- // Execute the query and get the results ...
- TopDocs results = pathSearcher.search(query, SIZE_OF_DELETE_BATCHES);
- int numResultsInBatch = results.scoreDocs.length;
- // Walk the results, delete the doc, and add to the query that we'll use against the content index ...
- IndexReader contentReader = indexes.getContentReader();
- for (ScoreDoc result : results.scoreDocs) {
- int docId = result.doc;
- // Find the UUID of the node ...
- Document doc = pathReader.document(docId, UUID_FIELD_SELECTOR);
- String uuid = doc.get(PathIndex.UUID);
- // Delete the document from the paths index ...
- pathReader.deleteDocument(docId);
- // Delete the corresponding document from the content index ...
- contentReader.deleteDocuments(new Term(ContentIndex.UUID, uuid));
- }
- numberDeleted += numResultsInBatch;
- if (numResultsInBatch < SIZE_OF_DELETE_BATCHES) break;
- }
- indexes.commit();
- return numberDeleted;
- } catch (FileNotFoundException e) {
- // There are no index files yet, so nothing to delete ...
- return 0;
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexStrategy#index(Node, IndexContext)
- */
- public void index( Node node,
- IndexContext indexes ) throws IOException {
- ValueFactory<String> strings = indexes.stringFactory();
- Location location = node.getLocation();
- UUID uuid = location.getUuid();
- if (uuid == null) uuid = UUID.randomUUID();
- Path path = location.getPath();
- String pathStr = path.isRoot() ? "/" : strings.create(location.getPath()) + "/";
- String uuidStr = uuid.toString();
-
- if (logger.isTraceEnabled()) {
- logger.trace("indexing {0}", pathStr);
- }
-
- // Create a separate document for the path, which makes it easier to handle moves since the path can
- // be changed without changing any other content fields ...
- Document doc = new Document();
- doc.add(new Field(PathIndex.PATH, pathStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field(PathIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- indexes.getPathsWriter().addDocument(doc);
-
- // Create the document for the content (properties) ...
- doc = new Document();
- doc.add(new Field(ContentIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- String stringValue = null;
- StringBuilder fullTextSearchValue = null;
- for (Property property : node.getProperties()) {
- Name name = property.getName();
- Rule rule = rules.getRule(name);
- if (rule.isSkipped()) continue;
- String nameString = strings.create(name);
- if (rule.isDate()) {
- DateTimeFactory dateFactory = indexes.dateFactory();
- for (Object value : property) {
- if (value == null) continue;
- DateTime dateValue = dateFactory.create(value);
- stringValue = dateFormatter().format(dateValue.toDate());
- // Add a separate field for each property value ...
- doc.add(new Field(nameString, stringValue, rule.getStoreOption(), rule.getIndexOption()));
- // Dates are not added to the full-text search field (since this wouldn't make sense)
- }
- continue;
- }
- for (Object value : property) {
- if (value == null) continue;
- if (value instanceof Binary) {
- // don't include binary values as individual fields but do include them in the full-text search ...
- // TODO : add to full-text search ...
- continue;
- }
- stringValue = strings.create(value);
- // Add a separate field for each property value ...
- doc.add(new Field(nameString, stringValue, rule.getStoreOption(), rule.getIndexOption()));
- // And add to the full-text field ...
- if (rule.isFullText()) {
- if (fullTextSearchValue == null) {
- fullTextSearchValue = new StringBuilder();
- } else {
- fullTextSearchValue.append(' ');
- }
- fullTextSearchValue.append(stringValue);
- }
- }
- }
- // Add the full-text-search field ...
- if (fullTextSearchValue != null) {
- doc.add(new Field(ContentIndex.FULL_TEXT, fullTextSearchValue.toString(), Field.Store.NO, Field.Index.ANALYZED));
- }
- indexes.getContentWriter().addDocument(doc);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexStrategy#search(String, int, int, IndexContext, List)
- */
- public void search( String fullTextString,
- int maxResults,
- int offset,
- IndexContext indexes,
- List<Location> results ) throws IOException, ParseException {
- assert fullTextString != null;
- assert fullTextString.length() > 0;
- assert offset >= 0;
- assert maxResults > 0;
- assert indexes != null;
- assert results != null;
-
- // Parse the full-text search and search against the 'fts' field ...
- QueryParser parser = new QueryParser(ContentIndex.FULL_TEXT, createAnalyzer());
- Query query = parser.parse(fullTextString);
- TopDocs docs = indexes.getContentSearcher().search(query, maxResults + offset);
-
- // Collect the results ...
- IndexReader contentReader = indexes.getContentReader();
- IndexReader pathReader = indexes.getPathsReader();
- IndexSearcher pathSearcher = indexes.getPathsSearcher();
- ScoreDoc[] scoreDocs = docs.scoreDocs;
- int numberOfResults = scoreDocs.length;
- if (numberOfResults > offset) {
- // There are enough results to satisfy the offset ...
- for (int i = offset, num = scoreDocs.length; i != num; ++i) {
- ScoreDoc result = scoreDocs[i];
- int docId = result.doc;
- // Find the UUID of the node (this UUID might be artificial, so we have to find the path) ...
- Document doc = contentReader.document(docId, UUID_FIELD_SELECTOR);
- String uuid = doc.get(ContentIndex.UUID);
- // Find the path for this node (is there a better way to do this than one search per UUID?) ...
- TopDocs pathDocs = pathSearcher.search(new TermQuery(new Term(PathIndex.UUID, uuid)), 1);
- if (pathDocs.scoreDocs.length < 1) {
- // No path record found ...
- continue;
- }
- Document pathDoc = pathReader.document(pathDocs.scoreDocs[0].doc);
- Path path = indexes.pathFactory().create(pathDoc.get(PathIndex.PATH));
- // Now add the location ...
- results.add(Location.create(path, UUID.fromString(uuid)));
- }
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexStrategy#query(org.jboss.dna.search.SearchContext,
- * org.jboss.dna.graph.query.model.QueryCommand)
- */
- public QueryResults query( SearchContext context,
- QueryCommand query ) {
- return this.queryEngine.execute(context, query);
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -1,294 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.IOException;
-import net.jcip.annotations.NotThreadSafe;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriter.MaxFieldLength;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.store.Directory;
-import org.jboss.dna.common.util.CheckArg;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.property.DateTimeFactory;
-import org.jboss.dna.graph.property.PathFactory;
-import org.jboss.dna.graph.property.ValueFactory;
-
-/**
- * A context for working with the index readers and writers.
- */
-@NotThreadSafe
-final class IndexContext {
-
- private final ExecutionContext context;
- private final Directory pathsIndexDirectory;
- private final Directory contentIndexDirectory;
- private final Analyzer analyzer;
- private final boolean overwrite;
- private final boolean readOnly;
- private final ValueFactory<String> stringFactory;
- private final DateTimeFactory dateFactory;
- private IndexReader pathsReader;
- private IndexWriter pathsWriter;
- private IndexSearcher pathsSearcher;
- private IndexReader contentReader;
- private IndexWriter contentWriter;
- private IndexSearcher contentSearcher;
-
- IndexContext( ExecutionContext context,
- Directory pathsIndexDirectory,
- Directory contentIndexDirectory,
- Analyzer analyzer,
- boolean overwrite,
- boolean readOnly ) {
- assert context != null;
- assert pathsIndexDirectory != null;
- assert contentIndexDirectory != null;
- this.context = context;
- this.pathsIndexDirectory = pathsIndexDirectory;
- this.contentIndexDirectory = contentIndexDirectory;
- this.analyzer = analyzer;
- this.overwrite = overwrite;
- this.stringFactory = context.getValueFactories().getStringFactory();
- this.dateFactory = context.getValueFactories().getDateFactory();
- this.readOnly = readOnly;
- }
-
- /**
- * @return context
- */
- public ExecutionContext context() {
- return context;
- }
-
- /**
- * @return stringFactory
- */
- public ValueFactory<String> stringFactory() {
- return stringFactory;
- }
-
- public DateTimeFactory dateFactory() {
- return dateFactory;
- }
-
- public PathFactory pathFactory() {
- return context.getValueFactories().getPathFactory();
- }
-
- public IndexReader getPathsReader() throws IOException {
- if (pathsReader == null) {
- pathsReader = IndexReader.open(pathsIndexDirectory, readOnly);
- }
- return pathsReader;
- }
-
- public IndexReader getContentReader() throws IOException {
- if (contentReader == null) {
- contentReader = IndexReader.open(contentIndexDirectory, readOnly);
- }
- return contentReader;
- }
-
- public IndexWriter getPathsWriter() throws IOException {
- if (pathsWriter == null) {
- pathsWriter = new IndexWriter(pathsIndexDirectory, analyzer, overwrite, MaxFieldLength.UNLIMITED);
- }
- return pathsWriter;
- }
-
- public IndexWriter getContentWriter() throws IOException {
- if (contentWriter == null) {
- contentWriter = new IndexWriter(contentIndexDirectory, analyzer, overwrite, MaxFieldLength.UNLIMITED);
- }
- return contentWriter;
- }
-
- public IndexSearcher getPathsSearcher() throws IOException {
- if (pathsSearcher == null) {
- pathsSearcher = new IndexSearcher(getPathsReader());
- }
- return pathsSearcher;
- }
-
- public IndexSearcher getContentSearcher() throws IOException {
- if (contentSearcher == null) {
- contentSearcher = new IndexSearcher(getContentReader());
- }
- return contentSearcher;
- }
-
- public boolean hasWriters() {
- return pathsWriter != null || contentWriter != null;
- }
-
- public void commit() throws IOException {
- IOException ioError = null;
- RuntimeException runtimeError = null;
- if (pathsReader != null) {
- try {
- pathsReader.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- pathsReader = null;
- }
- }
- if (contentReader != null) {
- try {
- contentReader.close();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- contentReader = null;
- }
- }
- if (pathsWriter != null) {
- try {
- pathsWriter.commit();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- try {
- pathsWriter.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- pathsWriter = null;
- }
- }
- }
- if (contentWriter != null) {
- try {
- contentWriter.commit();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- try {
- contentWriter.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- contentWriter = null;
- }
- }
- }
- if (ioError != null) throw ioError;
- if (runtimeError != null) throw runtimeError;
- }
-
- public void rollback() throws IOException {
- IOException ioError = null;
- RuntimeException runtimeError = null;
- if (pathsReader != null) {
- try {
- pathsReader.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- pathsReader = null;
- }
- }
- if (contentReader != null) {
- try {
- contentReader.close();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- contentReader = null;
- }
- }
- if (pathsWriter != null) {
- try {
- pathsWriter.rollback();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- try {
- pathsWriter.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- pathsWriter = null;
- }
- }
- }
- if (contentWriter != null) {
- try {
- contentWriter.rollback();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- try {
- contentWriter.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- contentWriter = null;
- }
- }
- }
- if (ioError != null) throw ioError;
- if (runtimeError != null) throw runtimeError;
- }
-
- /**
- * Create a copy of this index context, except that it uses the supplied execution context.
- *
- * @param context the new execution context that should be used in the copy
- * @return the new context; never null
- * @throws IllegalArgumentException if the context is null
- */
- public IndexContext with( ExecutionContext context ) {
- CheckArg.isNotNull(context, "context");
- return new IndexContext(context, pathsIndexDirectory, contentIndexDirectory, analyzer, overwrite, readOnly);
- }
-
-}
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexLayout.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexLayout.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexLayout.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,66 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.io.IOException;
+import net.jcip.annotations.ThreadSafe;
+import org.jboss.dna.graph.ExecutionContext;
+
+/**
+ * The representation of a single layout of one or more Lucene indexes.
+ */
+@ThreadSafe
+public interface IndexLayout {
+
+ /**
+ * Create a new session to the indexes.
+ *
+ * @param context the execution context for which this session is to be established; may not be null
+ * @param sourceName the name of the source; may not be null
+ * @param workspaceName the name of the workspace; may not be null
+ * @param overwrite true if the existing indexes should be overwritten, or false if they should be used
+ * @param readOnly true if the resulting session can be optimized for use in read-only situations, or false if the session
+ * needs to allow calling the write methods
+ * @return the session to the indexes; never null
+ */
+ IndexSession createSession( ExecutionContext context,
+ String sourceName,
+ String workspaceName,
+ boolean overwrite,
+ boolean readOnly );
+
+ /**
+ * Destroy the indexes for the workspace with the supplied name.
+ *
+ * @param context the execution context in which the destruction should be performed; may not be null
+ * @param sourceName the name of the source; may not be null
+ * @param workspaceName the name of the workspace; may not be null
+ * @return true if the indexes for the workspace were destroyed, or false if there was no such workspace index
+ * @throws IOException if there is a problem destroying the indexes
+ */
+ boolean destroyIndexes( ExecutionContext context,
+ String sourceName,
+ String workspaceName ) throws IOException;
+
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexLayout.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -39,7 +39,7 @@
* The set of rules that dictate how properties should be indexed.
*/
@Immutable
-class IndexRules {
+public class IndexRules {
public static final int INDEX = 2 << 0;
public static final int ANALYZE = 2 << 1;
Copied: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexSession.java (from rev 1319, trunk/dna-search/src/main/java/org/jboss/dna/search/IndexStrategy.java)
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexSession.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexSession.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,162 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.search.IndexSearcher;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.Node;
+import org.jboss.dna.graph.connector.RepositorySource;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.QueryResults;
+import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.validate.Schemata;
+import org.jboss.dna.graph.request.ChangeRequest;
+
+/**
+ * A stateful session that maintains {@link IndexReader}, {@link IndexWriter} and {@link IndexSearcher} resources to the indexes
+ * of a particular source and workspace.
+ */
+public interface IndexSession {
+
+ /**
+ * Get the name of the {@link RepositorySource repository source} for which this session exists. A session instance will
+ * always return the same name.
+ *
+ * @return the source name; never null
+ */
+ String getSourceName();
+
+ /**
+ * Get the name of the workspace for which this session exists. A session instance will always return the same name.
+ *
+ * @return the workspace name; never null
+ */
+ String getWorkspaceName();
+
+ /**
+ * Get the execution context in which this session is operating.
+ *
+ * @return the execution context; never null
+ */
+ ExecutionContext getContext();
+
+ /**
+ * Return whether this session made changes to the indexed state.
+ *
+ * @return true if change were made, or false otherwise
+ */
+ boolean hasChanges();
+
+ /**
+ * Perform a full-text search given the supplied query.
+ *
+ * @param context the context in which the search should be executed; may not be null
+ * @param fullTextString the full-text query; never null or blank
+ * @param maxResults the maximum number of results that are to be returned; always positive
+ * @param offset the number of initial results to skip, or 0 if the first results are to be returned
+ * @param results the list where the results should be accumulated; never null
+ * @throws IOException if there is a problem indexing or using the indexes
+ * @throws ParseException if there is a problem parsing the query
+ */
+ void search( ExecutionContext context,
+ String fullTextString,
+ int maxResults,
+ int offset,
+ List<Location> results ) throws IOException, ParseException;
+
+ /**
+ * Perform a query of the content. The {@link QueryCommand query} is supplied in the form of the Abstract Query Model, with
+ * the {@link Schemata} that defines the tables and views that are available to the query, and the set of index readers (and
+ * writers) that should be used.
+ *
+ * @param queryContext the context in which the query should be executed; may not be null
+ * @param query the query; never null
+ * @return the results of the query; never null
+ * @throws IOException if there is a problem indexing or using the indexes
+ * @throws ParseException if there is a problem parsing the query
+ */
+ QueryResults query( QueryContext queryContext,
+ QueryCommand query ) throws IOException, ParseException;
+
+ /**
+ * Index the node given the index writers. Note that implementors should simply just use the writers to add documents to the
+ * index(es), and should never call any of the writer lifecycle methods (e.g., {@link IndexWriter#commit()},
+ * {@link IndexWriter#rollback()}, etc.).
+ *
+ * @param node the node to be indexed; never null
+ * @throws IOException if there is a problem indexing or using the writers
+ */
+ void index( Node node ) throws IOException;
+
+ /**
+ * Update the indexes to reflect the supplied changes to the graph content. Note that implementors should simply just use the
+ * writers to add documents to the index(es), and should never call any of the writer lifecycle methods (e.g.,
+ * {@link IndexWriter#commit()}, {@link IndexWriter#rollback()}, etc.).
+ *
+ * @param changes the set of changes to the content
+ * @return the (approximate) number of nodes that were affected by the changes
+ * @throws IOException if there is a problem indexing or using the writers
+ */
+ int apply( Iterable<ChangeRequest> changes ) throws IOException;
+
+ /**
+ * Remove from the index(es) all of the information pertaining to the nodes at or below the supplied path. Note that
+ * implementors should simply just use the writers to add documents to the index(es), and should never call any of the writer
+ * lifecycle methods (e.g., {@link IndexWriter#commit()}, {@link IndexWriter#rollback()}, etc.).
+ *
+ * @param path the path identifying the graph content that is to be removed; never null
+ * @return the (approximate) number of nodes that were affected by the changes
+ * @throws IOException if there is a problem indexing or using the writers
+ */
+ int deleteBelow( Path path ) throws IOException;
+
+ /**
+ * Optimize the indexes, if required.
+ *
+ * @throws IOException if there is a problem optimizing
+ */
+ void optimize() throws IOException;
+
+ /**
+ * Close this session by committing all of the changes. This session is no longer usable after this method is called.
+ *
+ * @throws IOException if there is a problem committing
+ */
+ void commit() throws IOException;
+
+ /**
+ * Close this session by rolling back all of the changes that have been made. This session is no longer usable after this
+ * method is called.
+ *
+ * @throws IOException if there is a problem rolling back
+ */
+ void rollback() throws IOException;
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexSession.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexStrategy.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexStrategy.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexStrategy.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -1,133 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.IOException;
-import java.util.List;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.queryParser.ParseException;
-import org.jboss.dna.common.text.TextEncoder;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Node;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.validate.Schemata;
-import org.jboss.dna.graph.request.ChangeRequest;
-
-/**
- * Interface defining the behaviors associated with indexing graph content.
- */
-interface IndexStrategy {
-
- /**
- * Get the number of changes that are allowed before optimization is automatically run.
- *
- * @return a positive number denoting the minimum number of changes between automatic optimization operations, or a
- * non-positive number if automatic optimization should never be run
- */
- int getChangeCountForAutomaticOptimization();
-
- /**
- * Get the {@link TextEncoder} that should be used to encode the namespace URIs.
- *
- * @return the encoder; may not be null
- */
- TextEncoder getNamespaceEncoder();
-
- /**
- * Index the node given the index writers. Note that implementors should simply just use the writers to add documents to the
- * index(es), and should never call any of the writer lifecycle methods (e.g., {@link IndexWriter#commit()},
- * {@link IndexWriter#rollback()}, etc.).
- *
- * @param node the node to be indexed; never null
- * @param indexes the set of index readers and writers; never null
- * @throws IOException if there is a problem indexing or using the writers
- */
- void index( Node node,
- IndexContext indexes ) throws IOException;
-
- /**
- * Update the indexes to reflect the supplied changes to the graph content. Note that implementors should simply just use the
- * writers to add documents to the index(es), and should never call any of the writer lifecycle methods (e.g.,
- * {@link IndexWriter#commit()}, {@link IndexWriter#rollback()}, etc.).
- *
- * @param changes the set of changes to the content
- * @param indexes the set of index readers and writers; never null
- * @return the (approximate) number of nodes that were affected by the changes
- * @throws IOException if there is a problem indexing or using the writers
- */
- int apply( Iterable<ChangeRequest> changes,
- IndexContext indexes ) throws IOException;
-
- /**
- * Remove from the index(es) all of the information pertaining to the nodes at or below the supplied path. Note that
- * implementors should simply just use the writers to add documents to the index(es), and should never call any of the writer
- * lifecycle methods (e.g., {@link IndexWriter#commit()}, {@link IndexWriter#rollback()}, etc.).
- *
- * @param path the path identifying the graph content that is to be removed; never null
- * @param indexes the set of index readers and writers; never null
- * @return the (approximate) number of nodes that were affected by the changes
- * @throws IOException if there is a problem indexing or using the writers
- */
- int deleteBelow( Path path,
- IndexContext indexes ) throws IOException;
-
- /**
- * Create the analyzer that is used for reading and updating the indexes.
- *
- * @return the analyzer; may not be null
- */
- Analyzer createAnalyzer();
-
- /**
- * Perform a full-text search given the supplied query.
- *
- * @param fullTextString the full-text query; never null or blank
- * @param maxResults the maximum number of results that are to be returned; always positive
- * @param offset the number of initial results to skip, or 0 if the first results are to be returned
- * @param indexes the set of index readers and writers; never null
- * @param results the list where the results should be accumulated; never null
- * @throws IOException if there is a problem indexing or using the writers
- * @throws ParseException if there is a problem parsing the query
- */
- void search( String fullTextString,
- int maxResults,
- int offset,
- IndexContext indexes,
- List<Location> results ) throws IOException, ParseException;
-
- /**
- * Perform a query of the content. The {@link QueryCommand query} is supplied in the form of the Abstract Query Model, with
- * the {@link Schemata} that defines the tables and views that are available to the query, and the set of index readers (and
- * writers) that should be used.
- *
- * @param context the context in which the query should be executed; never null
- * @param query the query; never null
- * @return the results of the query
- */
- QueryResults query( SearchContext context,
- QueryCommand query );
-}
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,656 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import net.jcip.annotations.ThreadSafe;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.jboss.dna.common.i18n.I18n;
+import org.jboss.dna.graph.DnaLexicon;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.JcrLexicon;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.property.Binary;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.model.And;
+import org.jboss.dna.graph.query.model.BindVariableName;
+import org.jboss.dna.graph.query.model.ChildNode;
+import org.jboss.dna.graph.query.model.Comparison;
+import org.jboss.dna.graph.query.model.Constraint;
+import org.jboss.dna.graph.query.model.DescendantNode;
+import org.jboss.dna.graph.query.model.DynamicOperand;
+import org.jboss.dna.graph.query.model.FullTextSearch;
+import org.jboss.dna.graph.query.model.FullTextSearchScore;
+import org.jboss.dna.graph.query.model.Length;
+import org.jboss.dna.graph.query.model.Literal;
+import org.jboss.dna.graph.query.model.LowerCase;
+import org.jboss.dna.graph.query.model.NodeDepth;
+import org.jboss.dna.graph.query.model.NodeLocalName;
+import org.jboss.dna.graph.query.model.NodeName;
+import org.jboss.dna.graph.query.model.NodePath;
+import org.jboss.dna.graph.query.model.Not;
+import org.jboss.dna.graph.query.model.Operator;
+import org.jboss.dna.graph.query.model.Or;
+import org.jboss.dna.graph.query.model.PropertyExistence;
+import org.jboss.dna.graph.query.model.PropertyValue;
+import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.model.SameNode;
+import org.jboss.dna.graph.query.model.SelectorName;
+import org.jboss.dna.graph.query.model.SetCriteria;
+import org.jboss.dna.graph.query.model.StaticOperand;
+import org.jboss.dna.graph.query.model.UpperCase;
+import org.jboss.dna.graph.query.model.Visitors;
+import org.jboss.dna.graph.query.model.FullTextSearch.NegationTerm;
+import org.jboss.dna.graph.query.plan.PlanNode;
+import org.jboss.dna.graph.query.process.AbstractAccessComponent;
+import org.jboss.dna.graph.query.process.ProcessingComponent;
+import org.jboss.dna.graph.query.process.SelectComponent;
+import org.jboss.dna.graph.query.process.SelectComponent.Analyzer;
+
+/**
+ * An {@link IndexLayout} implementation that stores all content within a set of two indexes: one for the node content and a
+ * second one for paths and UUIDs.
+ */
+@ThreadSafe
+public class KitchenSinkIndexLayout extends DualIndexLayout {
+
+ /**
+ * The default set of {@link IndexRules} used by {@link KitchenSinkIndexLayout} instances when no rules are provided.
+ */
+ public static final IndexRules DEFAULT_RULES;
+
+ static {
+ IndexRules.Builder builder = IndexRules.createBuilder();
+ // Configure the default behavior ...
+ builder.defaultTo(IndexRules.INDEX | IndexRules.ANALYZE);
+ // Configure the UUID properties to be just indexed (not stored, not analyzed, not included in full-text) ...
+ builder.index(JcrLexicon.UUID, DnaLexicon.UUID);
+ // Configure the properties that we'll treat as dates ...
+ builder.treatAsDates(JcrLexicon.CREATED, JcrLexicon.LAST_MODIFIED);
+ DEFAULT_RULES = builder.build();
+ }
+
+ private final IndexRules rules;
+ private final DirectoryConfiguration directoryConfiguration;
+
+ public KitchenSinkIndexLayout( DirectoryConfiguration directoryConfiguration ) {
+ this.rules = DEFAULT_RULES;
+ this.directoryConfiguration = directoryConfiguration;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexLayout#createSession(org.jboss.dna.graph.ExecutionContext, java.lang.String,
+ * java.lang.String, boolean, boolean)
+ */
+ public IndexSession createSession( ExecutionContext context,
+ String sourceName,
+ String workspaceName,
+ boolean overwrite,
+ boolean readOnly ) {
+ Directory pathIndexDirectory = directoryConfiguration.getDirectory(workspaceName, PATHS_INDEX_NAME);
+ Directory contentIndexDirectory = directoryConfiguration.getDirectory(workspaceName, CONTENT_INDEX_NAME);
+ assert pathIndexDirectory != null;
+ assert contentIndexDirectory != null;
+ return new Session(context, sourceName, workspaceName, rules, pathIndexDirectory, contentIndexDirectory, overwrite,
+ readOnly);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexLayout#destroyIndexes(org.jboss.dna.graph.ExecutionContext, java.lang.String,
+ * java.lang.String)
+ */
+ public boolean destroyIndexes( ExecutionContext context,
+ String sourceName,
+ String workspaceName ) {
+ directoryConfiguration.destroyDirectory(workspaceName, PATHS_INDEX_NAME);
+ directoryConfiguration.destroyDirectory(workspaceName, CONTENT_INDEX_NAME);
+ return true;
+ }
+
+ protected class Session extends LuceneSession {
+
+ protected Session( ExecutionContext context,
+ String sourceName,
+ String workspaceName,
+ IndexRules rules,
+ Directory pathsIndexDirectory,
+ Directory contentIndexDirectory,
+ boolean overwrite,
+ boolean readOnly ) {
+ super(context, sourceName, workspaceName, rules, pathsIndexDirectory, contentIndexDirectory, overwrite, readOnly);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.DualIndexLayout.LuceneSession#createAccessComponent(org.jboss.dna.graph.query.model.QueryCommand,
+ * org.jboss.dna.graph.query.QueryContext, org.jboss.dna.graph.query.plan.PlanNode,
+ * org.jboss.dna.graph.query.QueryResults.Columns, org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
+ */
+ @Override
+ protected ProcessingComponent createAccessComponent( QueryCommand originalQuery,
+ QueryContext context,
+ PlanNode accessNode,
+ Columns resultColumns,
+ Analyzer analyzer ) throws IOException {
+ // Create a processing component for this access query ...
+ return new LuceneQueryComponent(this, originalQuery, context, resultColumns, accessNode, analyzer, sourceName,
+ workspaceName);
+ }
+
+ }
+
+ /**
+ * The {@link ProcessingComponent} implementation that executes a single atomic access query against the Lucene indexes.
+ */
+ protected static class LuceneQueryComponent extends AbstractAccessComponent {
+ private final QueryCommand originalQuery;
+ private final Session session;
+ private final IndexReader pathIndexReader;
+ private final Analyzer analyzer;
+ private final String sourceName;
+ private final String workspaceName;
+
+ protected LuceneQueryComponent( Session session,
+ QueryCommand originalQuery,
+ QueryContext context,
+ Columns columns,
+ PlanNode accessNode,
+ Analyzer analyzer,
+ String sourceName,
+ String workspaceName ) throws IOException {
+ super(context, columns, accessNode);
+ this.originalQuery = originalQuery;
+ this.analyzer = analyzer;
+ this.session = session;
+ this.pathIndexReader = session.getPathsReader();
+ this.sourceName = sourceName;
+ this.workspaceName = workspaceName;
+ }
+
+ protected String fieldNameFor( Name name ) {
+ return session.stringFactory.create(name);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.process.ProcessingComponent#execute()
+ */
+ @Override
+ public List<Object[]> execute() {
+
+ // Some kinds of constraints are not easily pushed down to Lucene as are of a Lucene Query, and
+ // instead are applied by filtering the results. For example, a FullTextSearchScore applies
+ // to the score of the tuple, which cannot be (easily?) applied as a Query.
+ //
+ // Therefore, each of the AND-ed constraints of the query are evaluated separately. After all,
+ // each of the tuples returned by the planned query must satisfy all of the AND-ed constraints.
+ // Or, to put it another way, if a tuple does not satisfy one of the AND-ed constraints, the
+ // tuple should not be included in the query results.
+ //
+ // Logically, any AND-ed criteria that cannot be pushed down to Lucene can of course be applied
+ // as a filter on the results. Thus, each AND-ed constraint is processed to first determine if
+ // it can be represented as a Lucene query; all other AND-ed constraints must be handled as
+ // a results filter. Since most queries will likely use one or more simple constraints AND-ed
+ // together, this approach will likely work very well.
+ //
+ // The only hairy case is when any AND-ed constraint is actually an OR-ed combination of multiple
+ // constraints of which at least one cannot be pushed down to Lucene. In this case, the entire
+ // AND-ed constraint must be treated as a results filter (even if many of those constraints that
+ // make up the OR-ed constraint can be pushed down). Hopefully, this will not be a common case
+ // in actual queries.
+
+ // For each of the AND-ed constraints ...
+ Query pushDownQuery = null;
+ Constraint postProcessConstraint = null;
+ try {
+ for (Constraint andedConstraint : this.andedConstraints) {
+ // Determine if it can be represented as a Lucene query ...
+ Query constraintQuery = createQuery(andedConstraint);
+ if (constraintQuery != null) {
+ // The AND-ed constraint _can_ be represented as a push-down Lucene query ...
+ if (pushDownQuery == null) {
+ // This must be the first query ...
+ pushDownQuery = constraintQuery;
+ } else if (pushDownQuery instanceof BooleanQuery) {
+ // We have to add the constraint query to the existing boolean ...
+ BooleanQuery booleanQuery = (BooleanQuery)pushDownQuery;
+ booleanQuery.add(constraintQuery, Occur.MUST);
+ } else {
+ // This is the second push-down query, so create a BooleanQuery ...
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(pushDownQuery, Occur.MUST);
+ booleanQuery.add(constraintQuery, Occur.MUST);
+ pushDownQuery = booleanQuery;
+ }
+ } else {
+ // The AND-ed constraint _cannot_ be represented as a push-down Lucene query ...
+ if (postProcessConstraint == null) {
+ postProcessConstraint = andedConstraint;
+ } else {
+ postProcessConstraint = new And(postProcessConstraint, andedConstraint);
+ }
+ }
+ }
+ } catch (IOException e) {
+ // There was a error working with the constraints (such as a ValueFormatException) ...
+ QueryContext context = getContext();
+ I18n msg = SearchI18n.errorWhilePerformingQuery;
+ String origQueryString = Visitors.readable(originalQuery, context.getExecutionContext());
+ context.getProblems().addError(e, msg, origQueryString, workspaceName, sourceName, e.getMessage());
+ return emptyTuples();
+ } catch (RuntimeException e) {
+ // There was a error working with the constraints (such as a ValueFormatException) ...
+ QueryContext context = getContext();
+ I18n msg = SearchI18n.errorWhilePerformingQuery;
+ String origQueryString = Visitors.readable(originalQuery, context.getExecutionContext());
+ context.getProblems().addError(e, msg, origQueryString, workspaceName, sourceName, e.getMessage());
+ return emptyTuples();
+ }
+
+ if (pushDownQuery == null) {
+ // There are no constraints that can be pushed down, so return _all_ the nodes ...
+ pushDownQuery = new MatchAllDocsQuery();
+ }
+
+ // Get the results from Lucene ...
+ List<Object[]> tuples = null;
+ final Columns columns = getColumns();
+ final QueryContext context = getContext();
+ final ExecutionContext execContext = context.getExecutionContext();
+ try {
+ // Execute the query against the content indexes ...
+ IndexSearcher searcher = session.getContentSearcher();
+ TupleCollector collector = new TupleCollector(columns, execContext.getValueFactories().getUuidFactory());
+ searcher.search(pushDownQuery, collector);
+ tuples = collector.getTuples();
+ } catch (IOException e) {
+ // There was a problem executing the Lucene query ...
+ I18n msg = SearchI18n.errorWhilePerformingLuceneQuery;
+ String origQueryString = Visitors.readable(originalQuery, execContext);
+ context.getProblems().addError(e, msg, pushDownQuery, origQueryString, workspaceName, sourceName, e.getMessage());
+ return emptyTuples();
+ }
+
+ if (postProcessConstraint != null && !tuples.isEmpty()) {
+ // Create a delegate processing component that will return the tuples we've already found ...
+ final List<Object[]> allTuples = tuples;
+ ProcessingComponent tuplesProcessor = new ProcessingComponent(context, columns) {
+ @Override
+ public List<Object[]> execute() {
+ return allTuples;
+ }
+ };
+ // Create a processing component that will apply these constraints to the tuples we already found ...
+ return new SelectComponent(tuplesProcessor, postProcessConstraint, context.getVariables()).execute();
+ }
+ return tuples;
+ }
+
+ protected Query createQuery( Constraint constraint ) throws IOException {
+ if (constraint instanceof And) {
+ And and = (And)constraint;
+ Query leftQuery = createQuery(and.getLeft());
+ Query rightQuery = createQuery(and.getRight());
+ if (leftQuery == null || rightQuery == null) return null;
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(createQuery(and.getLeft()), Occur.MUST);
+ booleanQuery.add(createQuery(and.getRight()), Occur.MUST);
+ return booleanQuery;
+ }
+ if (constraint instanceof Or) {
+ Or or = (Or)constraint;
+ Query leftQuery = createQuery(or.getLeft());
+ Query rightQuery = createQuery(or.getRight());
+ if (leftQuery == null) {
+ return rightQuery != null ? rightQuery : null;
+ } else if (rightQuery == null) {
+ return leftQuery;
+ }
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(createQuery(or.getLeft()), Occur.SHOULD);
+ booleanQuery.add(createQuery(or.getRight()), Occur.SHOULD);
+ return booleanQuery;
+ }
+ if (constraint instanceof Not) {
+ Not not = (Not)constraint;
+ Query notted = createQuery(not.getConstraint());
+ if (notted == null) return new MatchAllDocsQuery();
+ }
+ if (constraint instanceof SetCriteria) {
+ SetCriteria setCriteria = (SetCriteria)constraint;
+ DynamicOperand left = setCriteria.getLeftOperand();
+ int numRightOperands = setCriteria.getRightOperands().size();
+ assert numRightOperands > 0;
+ if (numRightOperands == 1) {
+ return createQuery(left, Operator.EQUAL_TO, setCriteria.getRightOperands().iterator().next());
+ }
+ BooleanQuery setQuery = new BooleanQuery();
+ for (StaticOperand right : setCriteria.getRightOperands()) {
+ Query rightQuery = createQuery(left, Operator.EQUAL_TO, right);
+ if (rightQuery == null) return null;
+ setQuery.add(rightQuery, Occur.SHOULD);
+ }
+ return setQuery;
+ }
+ if (constraint instanceof PropertyExistence) {
+ PropertyExistence existence = (PropertyExistence)constraint;
+ return createQuery(existence.getSelectorName(), existence.getPropertyName());
+ }
+ if (constraint instanceof Comparison) {
+ Comparison comparison = (Comparison)constraint;
+ return createQuery(comparison.getOperand1(), comparison.getOperator(), comparison.getOperand2());
+ }
+ if (constraint instanceof FullTextSearch) {
+ FullTextSearch search = (FullTextSearch)constraint;
+ String fieldName = ContentIndex.FULL_TEXT;
+ Name propertyName = search.getPropertyName();
+ if (propertyName != null) {
+ fieldName = session.fullTextFieldName(fieldNameFor(propertyName));
+ }
+ return createQuery(fieldName, search.getTerm());
+ }
+ try {
+ if (constraint instanceof SameNode) {
+ SameNode sameNode = (SameNode)constraint;
+ return session.findNodeAt(sameNode.getPath());
+ }
+ if (constraint instanceof ChildNode) {
+ ChildNode childNode = (ChildNode)constraint;
+ return session.findChildNodes(childNode.getParentPath());
+ }
+ if (constraint instanceof DescendantNode) {
+ DescendantNode descendantNode = (DescendantNode)constraint;
+ return session.findAllNodesBelow(descendantNode.getAncestorPath());
+ }
+ } catch (IOException e) {
+ I18n msg = SearchI18n.errorWhilePerformingQuery;
+ getContext().getProblems().addError(e,
+ msg,
+ Visitors.readable(originalQuery),
+ workspaceName,
+ sourceName,
+ e.getMessage());
+ return null;
+ }
+ // Should not get here ...
+ assert false;
+ return null;
+ }
+
+ protected Query createQuery( DynamicOperand left,
+ Operator operator,
+ StaticOperand right ) throws IOException {
+ return createQuery(left, operator, right, true);
+ }
+
+ protected Query createQuery( DynamicOperand left,
+ Operator operator,
+ StaticOperand right,
+ boolean caseSensitive ) throws IOException {
+ // Handle the static operand ...
+ Object value = null;
+ if (right instanceof Literal) {
+ Literal literal = (Literal)right;
+ value = literal.getValue();
+ if (!caseSensitive) value = lowerCase(value);
+ } else if (right instanceof BindVariableName) {
+ BindVariableName variable = (BindVariableName)right;
+ String variableName = variable.getVariableName();
+ value = getContext().getVariables().get(variableName);
+ if (!caseSensitive) value = lowerCase(value);
+ } else {
+ assert false;
+ return null;
+ }
+
+ // Address the dynamic operand ...
+ if (left instanceof FullTextSearchScore) {
+ // This can only be represented as a filter ...
+ return null;
+ } else if (left instanceof PropertyValue) {
+ return session.findNodesWith((PropertyValue)left, operator, value, caseSensitive);
+ } else if (left instanceof Length) {
+ Length length = (Length)left;
+ PropertyValue nested = length.getPropertyValue();
+
+ return null;
+ } else if (left instanceof LowerCase) {
+ LowerCase lowercase = (LowerCase)left;
+ return createQuery(lowercase.getOperand(), operator, right, false);
+ } else if (left instanceof UpperCase) {
+ UpperCase lowercase = (UpperCase)left;
+ return createQuery(lowercase.getOperand(), operator, right, false);
+ } else if (left instanceof NodeDepth) {
+ assert operator != Operator.LIKE;
+ // Could be represented as a result filter, but let's do this now ...
+ return session.findNodesWith((NodeDepth)left, operator, value);
+ } else if (left instanceof NodePath) {
+ return session.findNodesWith((NodePath)left, operator, value, caseSensitive);
+ } else if (left instanceof NodeName) {
+ return session.findNodesWith((NodeName)left, operator, value, caseSensitive);
+ } else if (left instanceof NodeLocalName) {
+ return session.findNodesWith((NodeLocalName)left, operator, value, caseSensitive);
+ } else {
+ assert false;
+ return null;
+ }
+ }
+
+ protected Object lowerCase( Object value ) {
+ if (value instanceof String) {
+ return ((String)value).toLowerCase();
+ }
+ assert !(value instanceof Binary);
+ ValueFactory<String> stringFactory = getContext().getExecutionContext().getValueFactories().getStringFactory();
+ ValueFactory<?> valueFactory = getContext().getExecutionContext().getValueFactories().getValueFactory(value);
+ return valueFactory.create(stringFactory.create(value).toLowerCase());
+ }
+
+ protected Query createQuery( SelectorName selectorName,
+ Name propertyName ) {
+ Term term = new Term(fieldNameFor(propertyName));
+ return new TermQuery(term);
+ }
+
+ protected Query createQuery( String fieldName,
+ FullTextSearch.Term term ) {
+ if (term instanceof FullTextSearch.Conjunction) {
+ FullTextSearch.Conjunction conjunction = (FullTextSearch.Conjunction)term;
+ BooleanQuery query = new BooleanQuery();
+ for (FullTextSearch.Term nested : conjunction) {
+ if (nested instanceof NegationTerm) {
+ query.add(createQuery(fieldName, ((NegationTerm)nested).getNegatedTerm()), Occur.MUST_NOT);
+ } else {
+ query.add(createQuery(fieldName, nested), Occur.MUST);
+ }
+ }
+ return query;
+ }
+ if (term instanceof FullTextSearch.Disjunction) {
+ FullTextSearch.Disjunction disjunction = (FullTextSearch.Disjunction)term;
+ BooleanQuery query = new BooleanQuery();
+ for (FullTextSearch.Term nested : disjunction) {
+ if (nested instanceof NegationTerm) {
+ query.add(createQuery(fieldName, ((NegationTerm)nested).getNegatedTerm()), Occur.MUST_NOT);
+ } else {
+ query.add(createQuery(fieldName, nested), Occur.SHOULD);
+ }
+ }
+ return query;
+ }
+ if (term instanceof FullTextSearch.SimpleTerm) {
+ FullTextSearch.SimpleTerm simple = (FullTextSearch.SimpleTerm)term;
+ if (simple.isQuotingRequired()) {
+ PhraseQuery query = new PhraseQuery();
+ query.setSlop(0); // terms must be adjacent
+ for (String value : simple.getValues()) {
+ query.add(new Term(fieldName, value));
+ }
+ return query;
+ }
+ return new TermQuery(new Term(fieldName, simple.getValue()));
+ }
+ // Should not get here ...
+ assert false;
+ return null;
+ }
+ }
+
+ /**
+ * This collector is responsible for loading the value for each of the columns into each tuple array.
+ */
+ protected static class TupleCollector extends Collector {
+ private final LinkedList<Object[]> tuples = new LinkedList<Object[]>();
+ private final Columns columns;
+ private final int numValues;
+ private final boolean recordScore;
+ private final int scoreIndex;
+ private final FieldSelector fieldSelector;
+ private final int locationIndex;
+ private final ValueFactory<UUID> uuidFactory;
+ private Scorer scorer;
+ private IndexReader currentReader;
+ private int docOffset;
+
+ protected TupleCollector( Columns columns,
+ ValueFactory<UUID> uuidFactory ) {
+ this.columns = columns;
+ this.uuidFactory = uuidFactory;
+ assert this.columns != null;
+ assert this.uuidFactory != null;
+ this.numValues = this.columns.getTupleSize();
+ assert this.numValues >= 0;
+ assert this.columns.getSelectorNames().size() == 1;
+ final String selectorName = this.columns.getSelectorNames().get(0);
+ this.locationIndex = this.columns.getLocationIndex(selectorName);
+ this.recordScore = this.columns.hasFullTextSearchScores();
+ this.scoreIndex = this.recordScore ? this.columns.getFullTextSearchScoreIndexFor(selectorName) : -1;
+ final Set<String> columnNames = new HashSet<String>(this.columns.getColumnNames());
+ columnNames.add(ContentIndex.UUID); // add the UUID, which we'll put into the Location ...
+ this.fieldSelector = new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ return columnNames.contains(fieldName) ? FieldSelectorResult.LOAD : FieldSelectorResult.NO_LOAD;
+ }
+ };
+ }
+
+ /**
+ * @return tuples
+ */
+ public LinkedList<Object[]> getTuples() {
+ return tuples;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#acceptsDocsOutOfOrder()
+ */
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setNextReader(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public void setNextReader( IndexReader reader,
+ int docBase ) {
+ this.currentReader = reader;
+ this.docOffset = docBase;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setScorer(org.apache.lucene.search.Scorer)
+ */
+ @Override
+ public void setScorer( Scorer scorer ) {
+ this.scorer = scorer;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#collect(int)
+ */
+ @Override
+ public void collect( int doc ) throws IOException {
+ int docId = doc + docOffset;
+ Object[] tuple = new Object[numValues];
+ Document document = currentReader.document(docId, fieldSelector);
+ for (String columnName : columns.getColumnNames()) {
+ int index = columns.getColumnIndexForName(columnName);
+ // We just need to retrieve the first value if there is more than one ...
+ tuple[index] = document.get(columnName);
+ }
+
+ // Set the score column if required ...
+ if (recordScore) {
+ assert scorer != null;
+ tuple[scoreIndex] = scorer.score();
+ }
+
+ // Load the UUID into a Location object ...
+ UUID uuid = uuidFactory.create(document.get(ContentIndex.UUID));
+ tuple[locationIndex] = Location.create(uuid);
+ tuples.add(tuple);
+ }
+ }
+
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexStrategy.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexStrategy.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexStrategy.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -1,115 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.util.List;
-import net.jcip.annotations.ThreadSafe;
-import org.jboss.dna.graph.DnaLexicon;
-import org.jboss.dna.graph.JcrLexicon;
-import org.jboss.dna.graph.query.QueryResults.Columns;
-import org.jboss.dna.graph.query.plan.PlanNode;
-import org.jboss.dna.graph.query.process.AbstractAccessComponent;
-import org.jboss.dna.graph.query.process.ProcessingComponent;
-import org.jboss.dna.graph.query.process.SelectComponent.Analyzer;
-import org.jboss.dna.graph.request.ChangeRequest;
-
-/**
- * An {@link IndexStrategy} implementation that stores all content within a set of two indexes: one for the node content and a
- * second one for paths and UUIDs.
- */
-@ThreadSafe
-class KitchenSinkIndexStrategy extends DualIndexStrategy {
-
- /**
- * The default set of {@link IndexRules} used by {@link KitchenSinkIndexStrategy} instances when no rules are provided.
- */
- public static final IndexRules DEFAULT_RULES;
-
- static {
- IndexRules.Builder builder = IndexRules.createBuilder();
- // Configure the default behavior ...
- builder.defaultTo(IndexRules.INDEX | IndexRules.ANALYZE);
- // Configure the UUID properties to be just indexed (not stored, not analyzed, not included in full-text) ...
- builder.index(JcrLexicon.UUID, DnaLexicon.UUID);
- // Configure the properties that we'll treat as dates ...
- builder.treatAsDates(JcrLexicon.CREATED, JcrLexicon.LAST_MODIFIED);
- DEFAULT_RULES = builder.build();
- }
-
- /**
- * Create a new indexing strategy instance uses the {@link #DEFAULT_RULES default indexing rules}.
- */
- public KitchenSinkIndexStrategy() {
- this(null);
- }
-
- /**
- * Create a new indexing strategy instance.
- *
- * @param rules the indexing rules that govern how properties are to be index, or null if the {@link #DEFAULT_RULES default
- * rules} are to be used
- */
- public KitchenSinkIndexStrategy( IndexRules rules ) {
- super(rules != null ? rules : DEFAULT_RULES);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.DualIndexStrategy#createAccessComponent(org.jboss.dna.search.SearchContext,
- * org.jboss.dna.graph.query.plan.PlanNode, org.jboss.dna.graph.query.QueryResults.Columns,
- * org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
- */
- @Override
- protected ProcessingComponent createAccessComponent( final SearchContext context,
- PlanNode accessNode,
- Columns resultColumns,
- Analyzer analyzer ) {
- // Create a processing component for this access query ...
- return new AbstractAccessComponent(context, resultColumns, accessNode) {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.process.ProcessingComponent#execute()
- */
- @Override
- public List<Object[]> execute() {
- return null;
- }
- };
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexStrategy#apply(Iterable, IndexContext)
- */
- public int apply( Iterable<ChangeRequest> changes,
- IndexContext indexes ) /*throws IOException*/{
- for (ChangeRequest change : changes) {
- if (change != null) continue;
- }
- return 0;
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/SearchContext.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/SearchContext.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/SearchContext.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -1,161 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.util.Map;
-import org.jboss.dna.common.collection.Problems;
-import org.jboss.dna.common.util.CheckArg;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.plan.PlanHints;
-import org.jboss.dna.graph.query.validate.Schemata;
-
-/**
- *
- */
-class SearchContext extends QueryContext {
-
- private final IndexContext indexes;
-
- /**
- * Create a new context for searching and querying.
- *
- * @param indexes the indexes that should be used
- * @param schemata the definition of the tables available to this query
- */
- public SearchContext( IndexContext indexes,
- Schemata schemata ) {
- super(indexes.context(), schemata);
- this.indexes = indexes;
- assert this.indexes != null;
- }
-
- /**
- * Create a new context for searching and querying.
- *
- * @param queryContext
- * @param indexes
- */
- public SearchContext( QueryContext queryContext,
- IndexContext indexes ) {
- super(queryContext.getExecutionContext(), queryContext.getSchemata(), queryContext.getHints(),
- queryContext.getProblems(), queryContext.getVariables());
- this.indexes = indexes;
- assert this.indexes != null;
- }
-
- /**
- * Create a new context for searching and querying.
- *
- * @param context the execution context
- * @param schemata the schemata
- * @param hints the hints, or null if there are no hints
- * @param problems the problems container, or null if a new problems container should be created
- * @param variables the mapping of variables and values, or null if there are no such variables
- * @throws IllegalArgumentException if the context or schmata are null
- */
- public SearchContext( IndexContext context,
- Schemata schemata,
- PlanHints hints,
- Problems problems,
- Map<String, Object> variables ) {
- super(context.context(), schemata, hints, problems, variables);
- this.indexes = context;
- assert this.indexes != null;
- }
-
- /**
- * Get the {@link IndexContext} for this query context.
- *
- * @return the index context; never null
- */
- public IndexContext getIndexes() {
- return indexes;
- }
-
- /**
- * Obtain a copy of this context, except that the copy uses the supplied index context.
- *
- * @param context the index context that should be used in the new query context
- * @return the new context; never null
- * @throws IllegalArgumentException if the index context reference is null
- */
- public SearchContext with( IndexContext context ) {
- CheckArg.isNotNull(context, "context");
- return new SearchContext(context, getSchemata(), getHints(), getProblems(), getVariables());
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.QueryContext#with(org.jboss.dna.graph.ExecutionContext)
- */
- @Override
- public SearchContext with( ExecutionContext context ) {
- CheckArg.isNotNull(context, "context");
- return new SearchContext(indexes.with(context), getSchemata(), getHints(), getProblems(), getVariables());
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.QueryContext#with(org.jboss.dna.graph.query.validate.Schemata)
- */
- @Override
- public SearchContext with( Schemata schemata ) {
- CheckArg.isNotNull(schemata, "schemata");
- return new SearchContext(indexes, schemata, getHints(), getProblems(), getVariables());
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.QueryContext#with(org.jboss.dna.graph.query.plan.PlanHints)
- */
- @Override
- public SearchContext with( PlanHints hints ) {
- CheckArg.isNotNull(hints, "hints");
- return new SearchContext(indexes, getSchemata(), hints, getProblems(), getVariables());
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.QueryContext#with(org.jboss.dna.common.collection.Problems)
- */
- @Override
- public SearchContext with( Problems problems ) {
- return new SearchContext(indexes, getSchemata(), getHints(), problems, getVariables());
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.QueryContext#with(java.util.Map)
- */
- @Override
- public SearchContext with( Map<String, Object> variables ) {
- return new SearchContext(indexes, getSchemata(), getHints(), getProblems(), variables);
- }
-}
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -23,26 +23,33 @@
*/
package org.jboss.dna.search;
+import java.io.IOException;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import net.jcip.annotations.GuardedBy;
import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.store.Directory;
+import org.apache.lucene.queryParser.ParseException;
+import org.jboss.dna.common.i18n.I18n;
import org.jboss.dna.common.util.CheckArg;
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.Graph;
import org.jboss.dna.graph.GraphI18n;
import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.Subgraph;
+import org.jboss.dna.graph.SubgraphNode;
import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
import org.jboss.dna.graph.connector.RepositorySource;
import org.jboss.dna.graph.connector.RepositorySourceException;
import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.property.PathFactory;
+import org.jboss.dna.graph.query.QueryContext;
import org.jboss.dna.graph.query.QueryResults;
import org.jboss.dna.graph.query.model.QueryCommand;
import org.jboss.dna.graph.query.validate.Schemata;
@@ -57,45 +64,68 @@
@ThreadSafe
public class SearchEngine {
- private final ExecutionContext context;
+ /**
+ * The default maximum number of changes that can be made to an index before the indexes are automatically optimized is * * *
+ * * {@value}
+ */
+ public static final int DEFAULT_MAX_CHANGES_BEFORE_AUTOMATIC_OPTIMIZATION = 0;
+
+ protected final ExecutionContext context;
private final String sourceName;
private final RepositoryConnectionFactory connectionFactory;
- private final DirectoryConfiguration directoryFactory;
- private final IndexStrategy indexStrategy;
- private final PathFactory pathFactory;
- @GuardedBy( "workspaceEngineLock" )
- private final Map<String, WorkspaceSearchEngine> workspaceEnginesByName;
- private final ReadWriteLock workspaceEngineLock = new ReentrantReadWriteLock();
+ protected final IndexLayout indexLayout;
+ private final int maxChangesBeforeAutomaticOptimization;
+ @GuardedBy( "workspacesLock" )
+ private final Map<String, Workspace> workspacesByName = new HashMap<String, Workspace>();
+ private final ReadWriteLock workspacesLock = new ReentrantReadWriteLock();
/**
* Create a search engine instance given the supplied {@link ExecutionContext execution context}, name of the
* {@link RepositorySource}, the {@link RepositoryConnectionFactory factory for RepositorySource connections}, and the
* {@link DirectoryConfiguration directory factory} that defines where each workspace's indexes should be placed.
*
- * @param context the execution context in which all indexing operations should be performed
+ * @param context the execution context for indexing and optimization operations
* @param sourceName the name of the {@link RepositorySource}
* @param connectionFactory the connection factory
- * @param directoryFactory the factory for Lucene {@link Directory directories}
+ * @param indexLayout the specification of the Lucene index layout
+ * @param maxChangesBeforeAutomaticOptimization the number of changes that can be made to the index before the indexes are
+ * automatically optimized; may be 0 or a negative number if no automatic optimization should be done
* @throws IllegalArgumentException if any of the parameters (other than indexing strategy) are null
*/
public SearchEngine( ExecutionContext context,
String sourceName,
RepositoryConnectionFactory connectionFactory,
- DirectoryConfiguration directoryFactory ) {
+ IndexLayout indexLayout,
+ int maxChangesBeforeAutomaticOptimization ) {
CheckArg.isNotNull(context, "context");
CheckArg.isNotNull(sourceName, "sourceName");
CheckArg.isNotNull(connectionFactory, "connectionFactory");
- CheckArg.isNotNull(directoryFactory, "directoryFactory");
this.sourceName = sourceName;
this.connectionFactory = connectionFactory;
- this.directoryFactory = directoryFactory;
+ this.indexLayout = indexLayout;
this.context = context;
- this.pathFactory = context.getValueFactories().getPathFactory();
- this.workspaceEnginesByName = new HashMap<String, WorkspaceSearchEngine>();
- this.indexStrategy = new KitchenSinkIndexStrategy();
+ this.maxChangesBeforeAutomaticOptimization = maxChangesBeforeAutomaticOptimization < 0 ? 0 : maxChangesBeforeAutomaticOptimization;
}
/**
+ * Create a search engine instance given the supplied {@link ExecutionContext execution context}, name of the
+ * {@link RepositorySource}, the {@link RepositoryConnectionFactory factory for RepositorySource connections}, and the
+ * {@link DirectoryConfiguration directory factory} that defines where each workspace's indexes should be placed.
+ *
+ * @param context the execution context for indexing and optimization operations
+ * @param sourceName the name of the {@link RepositorySource}
+ * @param connectionFactory the connection factory
+ * @param indexLayout the specification of the Lucene index layout
+ * @throws IllegalArgumentException if any of the parameters (other than indexing strategy) are null
+ */
+ public SearchEngine( ExecutionContext context,
+ String sourceName,
+ RepositoryConnectionFactory connectionFactory,
+ IndexLayout indexLayout ) {
+ this(context, sourceName, connectionFactory, indexLayout, DEFAULT_MAX_CHANGES_BEFORE_AUTOMATIC_OPTIMIZATION);
+ }
+
+ /**
* Get the name of the RepositorySource that this engine is to use.
*
* @return the source name; never null
@@ -114,6 +144,13 @@
}
/**
+ * @return maxChangesBeforeAutomaticOptimization
+ */
+ public int getMaxChangesBeforeAutomaticOptimization() {
+ return maxChangesBeforeAutomaticOptimization;
+ }
+
+ /**
* Utility to create a Graph for the source.
*
* @return the graph instance; never null
@@ -123,46 +160,58 @@
}
/**
- * Get the search engine for the workspace with the supplied name.
+ * Utility to obtain the root path.
*
+ * @return the root path; never null
+ */
+ final Path rootPath() {
+ return context.getValueFactories().getPathFactory().createRootPath();
+ }
+
+ final String readable( Path path ) {
+ return context.getValueFactories().getStringFactory().create(path);
+ }
+
+ /**
+ * Index all of the content at or below the supplied path in the named workspace within the {@link #getSourceName() source}.
+ * If the starting point is the root node, then this method will drop the existing index(es) and rebuild from the content in
+ * the workspace and source.
+ * <p>
+ * This method operates synchronously and returns when the requested indexing is completed.
+ * </p>
+ *
* @param workspaceName the name of the workspace
- * @return the workspace's search engine
+ * @param startingPoint the location that represents the content to be indexed; must have a path
+ * @param depthPerRead the depth of each subgraph read operation
+ * @throws IllegalArgumentException if the workspace name or location are null
+ * @throws RepositorySourceException if there is a problem accessing the content
+ * @throws SearchEngineException if there is a problem updating the indexes
* @throws InvalidWorkspaceException if the workspace does not exist
*/
- protected WorkspaceSearchEngine getWorkspaceEngine( String workspaceName ) {
- WorkspaceSearchEngine engine = null;
- try {
- workspaceEngineLock.readLock().lock();
- engine = workspaceEnginesByName.get(workspaceName);
- } finally {
- workspaceEngineLock.readLock().unlock();
- }
+ public void index( String workspaceName,
+ Location startingPoint,
+ int depthPerRead ) throws RepositorySourceException, SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ CheckArg.isNotNull(startingPoint, "startingPoint");
+ assert startingPoint.hasPath();
- if (engine == null) {
- // Verify the workspace does exist ...
- if (!graph().getWorkspaces().contains(workspaceName)) {
- String msg = GraphI18n.workspaceDoesNotExistInRepository.text(workspaceName, getSourceName());
- throw new InvalidWorkspaceException(msg);
- }
- try {
- workspaceEngineLock.writeLock().lock();
- // Check whether another thread got in and created the engine while we waited ...
- engine = workspaceEnginesByName.get(workspaceName);
- if (engine == null) {
- // Create the engine and register it ...
- engine = new WorkspaceSearchEngine(context, directoryFactory, indexStrategy, sourceName, workspaceName,
- connectionFactory);
- workspaceEnginesByName.put(workspaceName, engine);
- }
- } finally {
- workspaceEngineLock.writeLock().unlock();
- }
+ Workspace workspace = getWorkspace(workspaceName);
+ if (startingPoint.getPath().isRoot()) {
+ // More efficient to just start over with a new index ...
+ workspace.execute(true, addContent(startingPoint, depthPerRead));
+ } else {
+ // Have to first remove the content below the starting point, then add it again ...
+ workspace.execute(false, removeContent(startingPoint), addContent(startingPoint, depthPerRead));
}
- return engine;
}
/**
* Index all of the content at or below the supplied path in the named workspace within the {@link #getSourceName() source}.
+ * If the starting point is the root node, then this method will drop the existing index(es) and rebuild from the content in
+ * the workspace and source.
+ * <p>
+ * This method operates synchronously and returns when the requested indexing is completed.
+ * </p>
*
* @param workspaceName the name of the workspace
* @param startingPoint the path that represents the content to be indexed
@@ -172,16 +221,17 @@
* @throws SearchEngineException if there is a problem updating the indexes
* @throws InvalidWorkspaceException if the workspace does not exist
*/
- public void indexContent( String workspaceName,
- Path startingPoint,
- int depthPerRead ) throws RepositorySourceException, SearchEngineException {
+ public void index( String workspaceName,
+ Path startingPoint,
+ int depthPerRead ) throws RepositorySourceException, SearchEngineException {
CheckArg.isNotNull(workspaceName, "workspaceName");
CheckArg.isNotNull(startingPoint, "startingPoint");
- getWorkspaceEngine(workspaceName).indexContent(startingPoint, depthPerRead);
+ index(workspaceName, Location.create(startingPoint), depthPerRead);
}
/**
- * Index all of the content in the named workspace within the {@link #getSourceName() source}.
+ * Index all of the content in the named workspace within the {@link #getSourceName() source}. This method operates
+ * synchronously and returns when the requested indexing is completed.
*
* @param workspaceName the name of the workspace
* @param depthPerRead the depth of each subgraph read operation
@@ -190,23 +240,24 @@
* @throws SearchEngineException if there is a problem updating the indexes
* @throws InvalidWorkspaceException if the workspace does not exist
*/
- public void indexContent( String workspaceName,
- int depthPerRead ) throws RepositorySourceException, SearchEngineException {
+ public void index( String workspaceName,
+ int depthPerRead ) throws RepositorySourceException, SearchEngineException {
CheckArg.isNotNull(workspaceName, "workspaceName");
- indexContent(workspaceName, pathFactory.createRootPath(), depthPerRead);
+ index(workspaceName, rootPath(), depthPerRead);
}
/**
- * Index (or re-index) all of the content in all of the workspaces within the source.
+ * Index (or re-index) all of the content in all of the workspaces within the source. This method operates synchronously and
+ * returns when the requested indexing is completed.
*
* @param depthPerRead the depth of each subgraph read operation
* @throws RepositorySourceException if there is a problem accessing the content
* @throws SearchEngineException if there is a problem updating the indexes
*/
- public void indexContent( int depthPerRead ) throws RepositorySourceException, SearchEngineException {
- Path rootPath = pathFactory.createRootPath();
+ public void index( int depthPerRead ) throws RepositorySourceException, SearchEngineException {
+ Path rootPath = rootPath();
for (String workspaceName : graph().getWorkspaces()) {
- getWorkspaceEngine(workspaceName).indexContent(rootPath, depthPerRead);
+ index(workspaceName, rootPath, depthPerRead);
}
}
@@ -218,7 +269,7 @@
* @throws RepositorySourceException if there is a problem accessing the content
* @throws SearchEngineException if there is a problem updating the indexes
*/
- public void indexChanges( final Iterable<ChangeRequest> changes ) throws SearchEngineException {
+ public void index( final Iterable<ChangeRequest> changes ) throws SearchEngineException {
// First break up all the changes into different collections, one collection per workspace ...
Map<String, Collection<ChangeRequest>> changesByWorkspace = new HashMap<String, Collection<ChangeRequest>>();
for (ChangeRequest request : changes) {
@@ -235,7 +286,7 @@
for (Map.Entry<String, Collection<ChangeRequest>> entry : changesByWorkspace.entrySet()) {
String workspaceName = entry.getKey();
Collection<ChangeRequest> changesForWorkspace = entry.getValue();
- getWorkspaceEngine(workspaceName).indexChanges(changesForWorkspace);
+ getWorkspace(workspaceName).execute(false, updateContent(changesForWorkspace));
}
}
@@ -247,7 +298,7 @@
*/
public void optimize() throws SearchEngineException {
for (String workspaceName : graph().getWorkspaces()) {
- getWorkspaceEngine(workspaceName).optimize();
+ getWorkspace(workspaceName).execute(false, optimizeContent());
}
}
@@ -262,46 +313,498 @@
*/
public void optimize( String workspaceName ) throws SearchEngineException {
CheckArg.isNotNull(workspaceName, "workspaceName");
- getWorkspaceEngine(workspaceName).optimize();
+ getWorkspace(workspaceName).execute(false, optimizeContent());
}
/**
* Perform a full-text search of the content in the named workspace, given the maximum number of results and the offset
* defining the first result the caller is interested in.
*
+ * @param context the execution context in which the search is to take place; may not be null
* @param workspaceName the name of the workspace
* @param fullTextSearch the full-text search to be performed; may not be null
* @param maxResults the maximum number of results that are to be returned; always positive
* @param offset the number of initial results to skip, or 0 if the first results are to be returned
* @return the activity that will perform the work
- * @throws IllegalArgumentException if the workspace name is null
+ * @throws IllegalArgumentException if the execution context or workspace name are null
* @throws SearchEngineException if there is a problem during optimization
* @throws InvalidWorkspaceException if the workspace does not exist
*/
- public List<Location> fullTextSearch( String workspaceName,
+ public List<Location> fullTextSearch( ExecutionContext context,
+ String workspaceName,
String fullTextSearch,
int maxResults,
int offset ) {
+ CheckArg.isNotNull(context, "context");
CheckArg.isNotNull(workspaceName, "workspaceName");
- return getWorkspaceEngine(workspaceName).fullTextSearch(fullTextSearch, maxResults, offset);
+ Search searchActivity = searchContent(context, fullTextSearch, maxResults, offset);
+ getWorkspace(workspaceName).execute(false, searchActivity);
+ return searchActivity.getResults();
}
/**
* Perform a query of the content in the named workspace, given the Abstract Query Model representation of the query.
*
+ * @param context the execution context in which the search is to take place; may not be null
* @param workspaceName the name of the workspace
* @param query the query that is to be executed, in the form of the Abstract Query Model
* @param schemata the definition of the tables and views that can be used in the query; may not be null
* @return the query results; never null
* @throws IllegalArgumentException if the context, query, or schemata references are null
*/
- public QueryResults execute( String workspaceName,
- QueryCommand query,
- Schemata schemata ) {
+ public QueryResults query( ExecutionContext context,
+ String workspaceName,
+ QueryCommand query,
+ Schemata schemata ) {
+ CheckArg.isNotNull(context, "context");
CheckArg.isNotNull(workspaceName, "workspaceName");
CheckArg.isNotNull(query, "query");
CheckArg.isNotNull(schemata, "schemata");
- return getWorkspaceEngine(workspaceName).execute(query, schemata);
+ QueryContext queryContext = new QueryContext(context, schemata);
+ Query queryActivity = queryContent(queryContext, query);
+ getWorkspace(workspaceName).execute(false, queryActivity);
+ return queryActivity.getResults();
}
+ /**
+ * Remove the supplied index from the search engine. This is typically done when the workspace has been deleted from the
+ * source, or when
+ *
+ * @param workspaceName the name of the workspace
+ * @throws IllegalArgumentException if the workspace name is null
+ * @throws SearchEngineException if there is a problem removing the workspace
+ */
+ public void removeWorkspace( String workspaceName ) throws SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ try {
+ workspacesLock.writeLock().lock();
+ // Check whether another thread got in and created the engine while we waited ...
+ Workspace workspace = workspacesByName.remove(workspaceName);
+ if (workspace != null) {
+ indexLayout.destroyIndexes(context, getSourceName(), workspaceName);
+ }
+ } catch (IOException e) {
+ String message = SearchI18n.errorWhileRemovingIndexesForWorkspace.text(sourceName, workspaceName, e.getMessage());
+ throw new SearchEngineException(message, e);
+ } finally {
+ workspacesLock.writeLock().unlock();
+ }
+ }
+
+ /**
+ * Remove from the search engine all workspace-related indexes, thereby cleaning up any resources used by this search engine.
+ *
+ * @throws SearchEngineException if there is a problem removing any of the workspace
+ */
+ public void removeWorkspaces() throws SearchEngineException {
+ try {
+ workspacesLock.writeLock().lock();
+ for (String workspaceName : new HashSet<String>(workspacesByName.keySet())) {
+ removeWorkspace(workspaceName);
+ }
+ } finally {
+ workspacesLock.writeLock().unlock();
+ }
+ }
+
+ /**
+ * Get the search engine for the workspace with the supplied name.
+ *
+ * @param workspaceName the name of the workspace
+ * @return the workspace's search engine
+ * @throws InvalidWorkspaceException if the workspace does not exist
+ */
+ protected Workspace getWorkspace( String workspaceName ) {
+ Workspace workspace = null;
+ try {
+ workspacesLock.readLock().lock();
+ workspace = workspacesByName.get(workspaceName);
+ } finally {
+ workspacesLock.readLock().unlock();
+ }
+
+ if (workspace == null) {
+ // Verify the workspace does exist ...
+ if (!graph().getWorkspaces().contains(workspaceName)) {
+ String msg = GraphI18n.workspaceDoesNotExistInRepository.text(workspaceName, getSourceName());
+ throw new InvalidWorkspaceException(msg);
+ }
+ try {
+ workspacesLock.writeLock().lock();
+ // Check whether another thread got in and created the engine while we waited ...
+ workspace = workspacesByName.get(workspaceName);
+ if (workspace == null) {
+ // Create the engine and register it ...
+ workspace = new Workspace(workspaceName);
+ workspacesByName.put(workspaceName, workspace);
+ }
+ } finally {
+ workspacesLock.writeLock().unlock();
+ }
+ }
+ return workspace;
+ }
+
+ protected class Workspace {
+ private final String sourceName;
+ private final String workspaceName;
+ protected final AtomicInteger modifiedNodesSinceLastOptimize = new AtomicInteger(0);
+
+ protected Workspace( String workspaceName ) {
+ this.workspaceName = workspaceName;
+ this.sourceName = getSourceName();
+ }
+
+ /**
+ * Get the workspace name.
+ *
+ * @return the workspace name; never null
+ */
+ public String getWorkspaceName() {
+ return workspaceName;
+ }
+
+ /**
+ * Execute the supplied activities against the indexes.
+ *
+ * @param overwrite true if the existing indexes should be overwritten, or false if they should be used
+ * @param activities the activities to execute
+ * @throws SearchEngineException if there is a problem performing the activities
+ */
+ protected final void execute( boolean overwrite,
+ Activity... activities ) throws SearchEngineException {
+ // Determine if the activities are readonly ...
+ boolean readOnly = true;
+ for (Activity activity : activities) {
+ if (!(activity instanceof ReadOnlyActivity)) {
+ readOnly = false;
+ break;
+ }
+ }
+
+ // Create a session ...
+ IndexSession session = indexLayout.createSession(context, sourceName, workspaceName, overwrite, readOnly);
+ assert session != null;
+
+ // Execute the various activities ...
+ Throwable error = null;
+ try {
+ int numChanges = 0;
+ for (Activity activity : activities) {
+ try {
+ numChanges += activity.execute(session);
+ } catch (IOException e) {
+ error = e;
+ throw new SearchEngineException(activity.messageFor(e, sourceName, workspaceName), e);
+ } catch (ParseException e) {
+ error = e;
+ throw new SearchEngineException(activity.messageFor(e, sourceName, workspaceName), e);
+ } catch (RuntimeException e) {
+ error = e;
+ throw e;
+ }
+ }
+ if (numChanges > 0) {
+ numChanges = this.modifiedNodesSinceLastOptimize.addAndGet(numChanges);
+ // Determine if there have been enough changes made to run the optimizer ...
+ int maxChanges = getMaxChangesBeforeAutomaticOptimization();
+ if (maxChanges > 0 && numChanges >= maxChanges) {
+ Activity optimizer = optimizeContent();
+ try {
+ optimizer.execute(session);
+ } catch (ParseException e) {
+ error = e;
+ throw new SearchEngineException(optimizer.messageFor(e, sourceName, workspaceName), e);
+ } catch (IOException e) {
+ error = e;
+ throw new SearchEngineException(optimizer.messageFor(e, sourceName, workspaceName), e);
+ } catch (RuntimeException e) {
+ error = e;
+ throw e;
+ }
+ }
+ }
+ } finally {
+ try {
+ if (error == null) {
+ session.commit();
+ } else {
+ session.rollback();
+ }
+ } catch (IOException e2) {
+ // We don't want to lose the existing error, if there is one ...
+ if (error == null) {
+ I18n msg = SearchI18n.errorWhileCommittingIndexChanges;
+ throw new SearchEngineException(msg.text(workspaceName, sourceName, e2.getMessage()), e2);
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Create an activity that will optimize the indexes.
+ *
+ * @return the activity that will perform the work
+ */
+ protected Activity optimizeContent() {
+ return new Activity() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.SearchEngine.Activity#execute(org.jboss.dna.search.IndexSession)
+ */
+ public int execute( IndexSession indexSession ) throws IOException {
+ indexSession.optimize();
+ return 0; // no lines changed
+ }
+
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ return SearchI18n.errorWhileOptimizingIndexes.text(sourceName, workspaceName, error.getMessage());
+ }
+ };
+ }
+
+ /**
+ * Create an activity that will read from the source the content at the supplied location and add the content to the search
+ * index.
+ *
+ * @param location the location of the content to read; may not be null
+ * @param depthPerRead the depth of each read operation; always positive
+ * @return the activity that will perform the work
+ */
+ protected Activity addContent( final Location location,
+ final int depthPerRead ) {
+ return new Activity() {
+ public int execute( IndexSession indexSession ) throws IOException {
+
+ // Create a queue that we'll use to walk the content ...
+ LinkedList<Location> locationsToRead = new LinkedList<Location>();
+ locationsToRead.add(location);
+ int count = 0;
+
+ // Now read and index the content ...
+ Graph graph = graph();
+ graph.useWorkspace(indexSession.getWorkspaceName());
+ while (!locationsToRead.isEmpty()) {
+ Location location = locationsToRead.poll();
+ if (location == null) continue;
+ Subgraph subgraph = graph.getSubgraphOfDepth(depthPerRead).at(location);
+ // Index all of the nodes within this subgraph ...
+ for (SubgraphNode node : subgraph) {
+ // Index the node ...
+ indexSession.index(node);
+ ++count;
+
+ // Process the children ...
+ for (Location child : node.getChildren()) {
+ if (!subgraph.includes(child)) {
+ // Record this location as needing to be read ...
+ locationsToRead.add(child);
+ }
+ }
+ }
+ }
+ return count;
+ }
+
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ String path = readable(location.getPath());
+ return SearchI18n.errorWhileIndexingContentAtPath.text(path, workspaceName, sourceName, error.getMessage());
+ }
+ };
+ }
+
+ /**
+ * Create an activity that will remove from the indexes all documents that represent content at or below the specified
+ * location.
+ *
+ * @param location the location of the content to removed; may not be null
+ * @return the activity that will perform the work
+ */
+ protected Activity removeContent( final Location location ) {
+ return new Activity() {
+
+ public int execute( IndexSession indexSession ) throws IOException {
+ // Delete the content at/below the path ...
+ return indexSession.deleteBelow(location.getPath());
+ }
+
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ String path = readable(location.getPath());
+ return SearchI18n.errorWhileRemovingContentAtPath.text(path, workspaceName, sourceName, error.getMessage());
+ }
+ };
+ }
+
+ /**
+ * Create an activity that will update the indexes with changes that were already made to the content.
+ *
+ * @param changes the changes that have been made to the content; may not be null
+ * @return the activity that will perform the work
+ */
+ protected Activity updateContent( final Iterable<ChangeRequest> changes ) {
+ return new Activity() {
+
+ public int execute( IndexSession indexSession ) throws IOException {
+ return indexSession.apply(changes);
+ }
+
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ return SearchI18n.errorWhileUpdatingContent.text(workspaceName, sourceName, error.getMessage());
+ }
+ };
+ }
+
+ /**
+ * Create an activity that will perform a full-text search given the supplied query.
+ *
+ * @param context the context in which the search is to be performed; may not be null
+ * @param fullTextSearch the full-text search to be performed; may not be null
+ * @param maxResults the maximum number of results that are to be returned; always positive
+ * @param offset the number of initial results to skip, or 0 if the first results are to be returned
+ * @return the activity that will perform the work; never null
+ */
+ protected Search searchContent( final ExecutionContext context,
+ final String fullTextSearch,
+ final int maxResults,
+ final int offset ) {
+ final List<Location> results = new ArrayList<Location>(maxResults);
+ return new Search() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.SearchEngine.Activity#execute(org.jboss.dna.search.IndexSession)
+ */
+ public int execute( IndexSession session ) throws IOException, ParseException {
+ session.search(context, fullTextSearch, maxResults, offset, results);
+ return 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.SearchEngine.Activity#messageFor(java.lang.Throwable, java.lang.String,
+ * java.lang.String)
+ */
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ return SearchI18n.errorWhilePerformingSearch.text(fullTextSearch, workspaceName, sourceName, error.getMessage());
+ }
+
+ public List<Location> getResults() {
+ return results;
+ }
+ };
+ }
+
+ /**
+ * Create an activity that will perform a query against the index.
+ *
+ * @param context the context in which the search is to be performed; may not be null
+ * @param query the query to be performed; may not be null
+ * @return the activity that will perform the query; never null
+ */
+ protected Query queryContent( final QueryContext context,
+ final QueryCommand query ) {
+ return new Query() {
+ private QueryResults results = null;
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.SearchEngine.Activity#execute(org.jboss.dna.search.IndexSession)
+ */
+ public int execute( IndexSession session ) throws IOException, ParseException {
+ results = session.query(context, query);
+ return 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.SearchEngine.Activity#messageFor(java.lang.Throwable, java.lang.String,
+ * java.lang.String)
+ */
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ return SearchI18n.errorWhilePerformingQuery.text(query, workspaceName, sourceName, error.getMessage());
+ }
+
+ public QueryResults getResults() {
+ return results;
+ }
+ };
+ }
+
+ /**
+ * Interface for activities that will be executed against a workspace. These activities don't have to commit or roll back the
+ * writer, nor do they have to translate the exceptions, since this is done by the
+ * {@link Workspace#execute(boolean, Activity...)} method.
+ */
+ protected interface Activity {
+
+ /**
+ * Perform the activity by using the index writer.
+ *
+ * @param indexSession the index session that should be used by the activity; never null
+ * @return the number of changes that were made by this activity
+ * @throws IOException if there is an error using the writer
+ * @throws ParseException if there is an error due to parsing
+ */
+ int execute( IndexSession indexSession ) throws IOException, ParseException;
+
+ /**
+ * Translate an exception obtained during {@link #execute(IndexSession) execution} into a single message.
+ *
+ * @param t the exception
+ * @param sourceName the name of the source
+ * @param workspaceName the name of the workspace
+ * @return the error message
+ */
+ String messageFor( Throwable t,
+ String sourceName,
+ String workspaceName );
+ }
+
+ /**
+ * A read-only activity.
+ */
+ protected interface ReadOnlyActivity extends Activity {
+ }
+
+ /**
+ * A search activity.
+ */
+ protected interface Search extends ReadOnlyActivity {
+ /**
+ * Get the results of the search.
+ *
+ * @return the list of {@link Location} objects for each node satisfying the results; never null
+ */
+ List<Location> getResults();
+ }
+
+ /**
+ * A query activity.
+ */
+ protected interface Query extends ReadOnlyActivity {
+ /**
+ * Get the results of the query.
+ *
+ * @return the results of a query; never null
+ */
+ QueryResults getResults();
+ }
+
}
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -40,11 +40,14 @@
public static I18n errorWhileRemovingContentAtPath;
public static I18n errorWhileUpdatingContent;
public static I18n errorWhileCommittingIndexChanges;
+ public static I18n errorWhileRollingBackIndexChanges;
public static I18n errorCreatingIndexWriter;
public static I18n errorWhileOptimizingIndexes;
public static I18n errorWhilePerformingSearch;
public static I18n errorWhilePerformingQuery;
public static I18n errorWhileInitializingSearchEngine;
+ public static I18n errorWhileRemovingIndexesForWorkspace;
+ public static I18n errorWhilePerformingLuceneQuery;
static {
try {
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -1,569 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriter.MaxFieldLength;
-import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.store.Directory;
-import org.jboss.dna.common.i18n.I18n;
-import org.jboss.dna.common.util.CheckArg;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.Graph;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Subgraph;
-import org.jboss.dna.graph.SubgraphNode;
-import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
-import org.jboss.dna.graph.connector.RepositorySource;
-import org.jboss.dna.graph.connector.RepositorySourceException;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.validate.Schemata;
-import org.jboss.dna.graph.request.ChangeRequest;
-
-/**
- * A search engine dedicated to a single workspace.
- */
-@ThreadSafe
-class WorkspaceSearchEngine {
-
- protected static final String PATHS_INDEX_NAME = "paths";
- protected static final String CONTENT_INDEX_NAME = "content";
-
- private final Directory pathsDirectory;
- private final Directory contentDirectory;
- private final ExecutionContext context;
- private final ExecutionContext encodedContext;
- private final String sourceName;
- private final String workspaceName;
- private final RepositoryConnectionFactory connectionFactory;
- private final IndexStrategy indexingStrategy;
- protected final AtomicInteger modifiedNodesSinceLastOptimize = new AtomicInteger(0);
-
- /**
- * Create a search engine instance given the supplied {@link ExecutionContext execution context}, name of the
- * {@link RepositorySource}, the {@link RepositoryConnectionFactory factory for RepositorySource connections}, and the
- * {@link DirectoryConfiguration directory factory} that defines where each workspace's indexes should be placed.
- *
- * @param context the execution context in which all indexing operations should be performed
- * @param directoryFactory the factory from which can be obtained the Lucene directory where the indexes should be persisted
- * @param indexingStrategy the indexing strategy that governs how properties are to be indexed; may not be null
- * @param sourceName the name of the {@link RepositorySource}
- * @param workspaceName the name of the workspace
- * @param connectionFactory the connection factory
- * @throws IllegalArgumentException if any of the parameters are null
- * @throws SearchEngineException if there is a problem initializing this engine
- */
- protected WorkspaceSearchEngine( ExecutionContext context,
- DirectoryConfiguration directoryFactory,
- IndexStrategy indexingStrategy,
- String sourceName,
- String workspaceName,
- RepositoryConnectionFactory connectionFactory ) throws SearchEngineException {
- CheckArg.isNotNull(context, "context");
- CheckArg.isNotNull(directoryFactory, "directoryFactory");
- CheckArg.isNotNull(sourceName, "sourceName");
- CheckArg.isNotNull(workspaceName, "workspaceName");
- CheckArg.isNotNull(connectionFactory, "connectionFactory");
- CheckArg.isNotNull(indexingStrategy, "indexingStrategy");
- this.sourceName = sourceName;
- this.workspaceName = workspaceName;
- this.connectionFactory = connectionFactory;
- this.context = context;
- this.indexingStrategy = indexingStrategy;
- this.encodedContext = context.with(new EncodingNamespaceRegistry(context.getNamespaceRegistry(),
- this.indexingStrategy.getNamespaceEncoder()));
- this.pathsDirectory = directoryFactory.getDirectory(workspaceName, PATHS_INDEX_NAME);
- this.contentDirectory = directoryFactory.getDirectory(workspaceName, CONTENT_INDEX_NAME);
-
- initialize();
- }
-
- protected void initialize() throws SearchEngineException {
- // Always create the index if not there ...
- try {
- Analyzer analyzer = this.indexingStrategy.createAnalyzer();
- ensureIndexesExist(this.pathsDirectory, analyzer);
- ensureIndexesExist(this.contentDirectory, analyzer);
- } catch (IOException e) {
- String msg = SearchI18n.errorWhileInitializingSearchEngine.text(workspaceName, sourceName, e.getMessage());
- throw new SearchEngineException(msg, e);
- }
- }
-
- private static void ensureIndexesExist( Directory directory,
- Analyzer analyzer ) throws IOException {
- IndexWriter writer = null;
- Throwable error = null;
- try {
- writer = new IndexWriter(directory, analyzer, false, MaxFieldLength.UNLIMITED);
- } catch (FileNotFoundException e) {
- // The index files don't yet exist, so we need to create them ...
- try {
- writer = new IndexWriter(directory, analyzer, true, MaxFieldLength.UNLIMITED);
- } catch (Throwable t) {
- error = t;
- }
- } catch (Throwable t) {
- error = t;
- } finally {
- if (writer != null) {
- // Either way, make sure we close the writer that we created ...
- try {
- writer.close();
- } catch (IOException e) {
- if (error == null) throw e;
- } catch (RuntimeException e) {
- if (error == null) throw e;
- }
- }
- }
- }
-
- final Graph graph() {
- Graph graph = Graph.create(sourceName, connectionFactory, context);
- graph.useWorkspace(workspaceName);
- return graph;
- }
-
- final String workspaceName() {
- return workspaceName;
- }
-
- final String sourceName() {
- return sourceName;
- }
-
- final String readable( Path path ) {
- return context.getValueFactories().getStringFactory().create(path);
- }
-
- final IndexStrategy strategy() {
- return indexingStrategy;
- }
-
- /**
- * Index all of the content at or below the supplied path.
- *
- * @param startingPoint the path that represents the content to be indexed
- * @param depthPerBatch the depth of each subgraph read operation
- * @throws IllegalArgumentException if the path is null or the depth is not positive
- * @throws RepositorySourceException if there is a problem accessing the content
- * @throws SearchEngineException if there is a problem updating the indexes
- */
- public void indexContent( Path startingPoint,
- int depthPerBatch ) throws RepositorySourceException, SearchEngineException {
- CheckArg.isNotNull(startingPoint, "startingPoint");
- indexContent(Location.create(startingPoint), depthPerBatch);
- }
-
- /**
- * Index all of the content at or below the supplied location.
- *
- * @param startingPoint the location that represents the content to be indexed
- * @param depthPerRead the depth of each subgraph read operation
- * @throws IllegalArgumentException if the location is null or the depth is not positive
- * @throws RepositorySourceException if there is a problem accessing the content
- * @throws SearchEngineException if there is a problem updating the indexes
- */
- public void indexContent( Location startingPoint,
- int depthPerRead ) throws RepositorySourceException, SearchEngineException {
- CheckArg.isNotNull(startingPoint, "startingPoint");
- CheckArg.isPositive(depthPerRead, "depthPerBatch");
- assert startingPoint.hasPath();
-
- if (startingPoint.getPath().isRoot()) {
- // More efficient to just start over with a new index ...
- execute(true, addContent(startingPoint, depthPerRead));
- } else {
- // Have to first remove the content below the starting point, then add it again ...
- execute(false, removeContent(startingPoint), addContent(startingPoint, depthPerRead));
- }
- }
-
- /**
- * Update the indexes with the supplied set of changes to the content.
- *
- * @param changes the set of changes to the content
- * @throws IllegalArgumentException if the path is null
- * @throws RepositorySourceException if there is a problem accessing the content
- * @throws SearchEngineException if there is a problem updating the indexes
- */
- public void indexChanges( final Iterable<ChangeRequest> changes ) throws SearchEngineException {
- CheckArg.isNotNull(changes, "changes");
- execute(false, updateContent(changes));
- }
-
- /**
- * Invoke the engine's garbage collection on all indexes used by this workspace. This method reclaims space and optimizes the
- * index. This should be done on a periodic basis after changes are made to the engine's indexes.
- *
- * @throws SearchEngineException if there is a problem during optimization
- */
- public void optimize() throws SearchEngineException {
- execute(false, optimizeContent());
- }
-
- /**
- * Create an activity that will perform a full-text search given the supplied query.
- *
- * @param fullTextSearch the full-text search to be performed; may not be null
- * @param maxResults the maximum number of results that are to be returned; always positive
- * @param offset the number of initial results to skip, or 0 if the first results are to be returned
- * @return the activity that will perform the work
- */
- public List<Location> fullTextSearch( final String fullTextSearch,
- final int maxResults,
- final int offset ) {
- return execute(false, searchContent(fullTextSearch, maxResults, offset)).getResults();
- }
-
- /**
- * Create an activity that will perform a query of the content in this workspace, given the Abstract Query Model
- * representation of the query.
- *
- * @param query the query that is to be executed, in the form of the Abstract Query Model
- * @param schemata the definition of the tables available for the query; may not be null
- * @return the query results; never null
- * @throws IllegalArgumentException if the context or query references are null
- */
- public QueryResults execute( QueryCommand query,
- Schemata schemata ) {
- return execute(false, queryContent(query, schemata)).getResults();
- }
-
- /**
- * Execute the supplied activities against the indexes.
- *
- * @param <ActivityType> the type of activity
- * @param overwrite true if the existing indexes should be overwritten, or false if they should be used
- * @param activity the activity to execute
- * @return the same activity that was supplied as a parameter, returned as a convenience
- * @throws SearchEngineException if there is a problem performing the activities
- */
- protected final <ActivityType extends Activity> ActivityType execute( boolean overwrite,
- ActivityType activity ) throws SearchEngineException {
- execute(overwrite, new Activity[] {activity});
- return activity;
- }
-
- /**
- * Execute the supplied activities against the indexes.
- *
- * @param overwrite true if the existing indexes should be overwritten, or false if they should be used
- * @param activities the activities to execute
- * @throws SearchEngineException if there is a problem performing the activities
- */
- protected final void execute( boolean overwrite,
- Activity... activities ) throws SearchEngineException {
- // Determine if the activities are readonly ...
- boolean readOnly = true;
- for (Activity activity : activities) {
- if (!(activity instanceof ReadOnlyActivity)) {
- readOnly = false;
- break;
- }
- }
-
- Analyzer analyzer = this.indexingStrategy.createAnalyzer();
- IndexContext indexes = new IndexContext(encodedContext, pathsDirectory, contentDirectory, analyzer, overwrite, readOnly);
-
- // Execute the various activities ...
- Throwable error = null;
- try {
- for (Activity activity : activities) {
- try {
- activity.execute(indexes);
- } catch (IOException e) {
- error = e;
- throw new SearchEngineException(activity.messageFor(e), e);
- } catch (ParseException e) {
- error = e;
- throw new SearchEngineException(activity.messageFor(e), e);
- } catch (RuntimeException e) {
- error = e;
- throw e;
- }
- }
- if (indexes.hasWriters()) {
- // Determine if there have been enough changes made to run the optimizer ...
- int maxChanges = this.indexingStrategy.getChangeCountForAutomaticOptimization();
- if (maxChanges > 0 && this.modifiedNodesSinceLastOptimize.get() >= maxChanges) {
- Activity optimizer = optimizeContent();
- try {
- optimizer.execute(indexes);
- } catch (ParseException e) {
- error = e;
- throw new SearchEngineException(optimizer.messageFor(e), e);
- } catch (IOException e) {
- error = e;
- throw new SearchEngineException(optimizer.messageFor(e), e);
- } catch (RuntimeException e) {
- error = e;
- throw e;
- }
- }
- }
- } finally {
- try {
- if (error == null) {
- indexes.commit();
- } else {
- indexes.rollback();
- }
- } catch (IOException e2) {
- // We don't want to lose the existing error, if there is one ...
- if (error == null) {
- I18n msg = SearchI18n.errorWhileCommittingIndexChanges;
- throw new SearchEngineException(msg.text(workspaceName(), sourceName(), e2.getMessage()), e2);
- }
- }
- }
- }
-
- /**
- * Interface for activities that will be executed against the set of indexes. These activities don't have to commit or roll
- * back the writer, nor do they have to translate the exceptions, since this is done by the
- * {@link WorkspaceSearchEngine#execute(boolean, Activity...)} method.
- *
- * @see ReadOnlyActivity
- */
- protected interface Activity {
-
- /**
- * Perform the activity by using the index writer.
- *
- * @param indexes the set of indexes to use; never null
- * @throws IOException if there is an error using the writer
- * @throws ParseException if there is an error due to parsing
- */
- void execute( IndexContext indexes ) throws IOException, ParseException;
-
- /**
- * Translate an exception obtained during {@link #execute(IndexContext) execution} into a single message.
- *
- * @param t the exception
- * @return the error message
- */
- String messageFor( Throwable t );
- }
-
- protected interface ReadOnlyActivity extends Activity {
- }
-
- protected interface Search extends ReadOnlyActivity {
- /**
- * Get the results of the search.
- *
- * @return the list of {@link Location} objects for each node satisfying the results; never null
- */
- List<Location> getResults();
- }
-
- protected interface Query extends ReadOnlyActivity {
- /**
- * Get the results of the query.
- *
- * @return the results of a query; never null
- */
- QueryResults getResults();
- }
-
- /**
- * Create an activity that will read from the source the content at the supplied location and add the content to the search
- * index.
- *
- * @param location the location of the content to read; may not be null
- * @param depthPerRead the depth of each read operation; always positive
- * @return the activity that will perform the work
- */
- protected Activity addContent( final Location location,
- final int depthPerRead ) {
- return new Activity() {
- public void execute( IndexContext indexes ) throws IOException {
-
- // Create a queue that we'll use to walk the content ...
- LinkedList<Location> locationsToRead = new LinkedList<Location>();
- locationsToRead.add(location);
- int count = 0;
-
- // Now read and index the content ...
- Graph graph = graph();
- while (!locationsToRead.isEmpty()) {
- Location location = locationsToRead.poll();
- if (location == null) continue;
- Subgraph subgraph = graph.getSubgraphOfDepth(depthPerRead).at(location);
- // Index all of the nodes within this subgraph ...
- for (SubgraphNode node : subgraph) {
- // Index the node ...
- strategy().index(node, indexes);
- ++count;
-
- // Process the children ...
- for (Location child : node.getChildren()) {
- if (!subgraph.includes(child)) {
- // Record this location as needing to be read ...
- locationsToRead.add(child);
- }
- }
- }
- }
- modifiedNodesSinceLastOptimize.addAndGet(count);
- }
-
- public String messageFor( Throwable error ) {
- String path = readable(location.getPath());
- return SearchI18n.errorWhileIndexingContentAtPath.text(path, workspaceName(), sourceName(), error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will remove from the indexes all documents that represent content at or below the specified
- * location.
- *
- * @param location the location of the content to removed; may not be null
- * @return the activity that will perform the work
- */
- protected Activity removeContent( final Location location ) {
- return new Activity() {
-
- public void execute( IndexContext indexes ) throws IOException {
- // Delete the content at/below the path ...
- modifiedNodesSinceLastOptimize.addAndGet(strategy().deleteBelow(location.getPath(), indexes));
- }
-
- public String messageFor( Throwable error ) {
- String path = readable(location.getPath());
- return SearchI18n.errorWhileRemovingContentAtPath.text(path, workspaceName(), sourceName(), error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will optimize the indexes.
- *
- * @return the activity that will perform the work
- */
- protected Activity optimizeContent() {
- return new Activity() {
- public void execute( IndexContext indexes ) throws IOException {
- // Don't block ...
- indexes.getContentWriter().optimize();
- indexes.getPathsWriter().optimize();
- }
-
- public String messageFor( Throwable error ) {
- return SearchI18n.errorWhileOptimizingIndexes.text(workspaceName(), sourceName(), error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will update the indexes with changes that were already made to the content.
- *
- * @param changes the changes that have been made to the content; may not be null
- * @return the activity that will perform the work
- */
- protected Activity updateContent( final Iterable<ChangeRequest> changes ) {
- return new Activity() {
-
- public void execute( IndexContext indexes ) throws IOException {
- // Iterate over the changes ...
- modifiedNodesSinceLastOptimize.addAndGet(strategy().apply(changes, indexes));
- }
-
- public String messageFor( Throwable error ) {
- return SearchI18n.errorWhileUpdatingContent.text(workspaceName(), sourceName(), error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will perform a full-text search given the supplied query.
- *
- * @param fullTextSearch the full-text search to be performed; may not be null
- * @param maxResults the maximum number of results that are to be returned; always positive
- * @param offset the number of initial results to skip, or 0 if the first results are to be returned
- * @return the activity that will perform the work
- */
- protected Search searchContent( final String fullTextSearch,
- final int maxResults,
- final int offset ) {
- final List<Location> results = new ArrayList<Location>(maxResults);
- return new Search() {
- public void execute( IndexContext indexes ) throws IOException, ParseException {
- strategy().search(fullTextSearch, maxResults, offset, indexes, results);
- }
-
- public String messageFor( Throwable error ) {
- return SearchI18n.errorWhilePerformingSearch.text(fullTextSearch,
- workspaceName(),
- sourceName(),
- error.getMessage());
- }
-
- public List<Location> getResults() {
- return results;
- }
- };
- }
-
- /**
- * Create an activity that will perform a query against the index.
- *
- * @param query the query to be performed; may not be null
- * @param schemata the definition of the tables being used in the query; may not be null
- * @return the activity that will perform the work
- */
- protected Query queryContent( final QueryCommand query,
- final Schemata schemata ) {
- return new Query() {
- private QueryResults results = null;
- private SearchContext context = null;
-
- public void execute( IndexContext indexes ) {
- context = new SearchContext(indexes, schemata);
- results = strategy().query(context, query);
- }
-
- public String messageFor( Throwable error ) {
- return SearchI18n.errorWhilePerformingQuery.text(query, workspaceName(), sourceName(), error.getMessage());
- }
-
- public QueryResults getResults() {
- return results;
- }
- };
- }
-}
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/filters/ResultFilter.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/filters/ResultFilter.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/filters/ResultFilter.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,36 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.filters;
+
+import org.jboss.dna.graph.query.QueryResults.Columns;
+
+/**
+ *
+ */
+public interface ResultFilter {
+
+ boolean allows( Object[] tuple,
+ Columns columns );
+
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/filters/ResultFilter.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareNameQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareNameQuery.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareNameQuery.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,258 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.query;
+
+import java.io.IOException;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.PathFactory;
+import org.jboss.dna.graph.property.ValueComparators;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.model.Comparison;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against the name of nodes. This
+ * query implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * that correspond to nodes with Names that satisfy the constraint.
+ */
+public class CompareNameQuery extends CompareQuery<Path.Segment> {
+
+ private static final long serialVersionUID = 1L;
+ protected static final Evaluator<Path.Segment> IS_LESS_THAN = new Evaluator<Path.Segment>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path.Segment nodeValue,
+ Path.Segment constraintValue ) {
+ return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) < 0;
+ }
+
+ @Override
+ public String toString() {
+ return " < ";
+ }
+ };
+ protected static final Evaluator<Path.Segment> IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<Path.Segment>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path.Segment nodeValue,
+ Path.Segment constraintValue ) {
+ return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) <= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " <= ";
+ }
+ };
+ protected static final Evaluator<Path.Segment> IS_GREATER_THAN = new Evaluator<Path.Segment>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path.Segment nodeValue,
+ Path.Segment constraintValue ) {
+ return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) > 0;
+ }
+
+ @Override
+ public String toString() {
+ return " > ";
+ }
+ };
+ protected static final Evaluator<Path.Segment> IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<Path.Segment>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path.Segment nodeValue,
+ Path.Segment constraintValue ) {
+ return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) >= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " >= ";
+ }
+ };
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
+ * that is greater than the supplied constraint name.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param localNameField the name of the document field containing the local name value; may not be null
+ * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareNameQuery createQueryForNodesWithNameGreaterThan( Path.Segment constraintValue,
+ String localNameField,
+ String snsIndexFieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
+ factories.getStringFactory(), factories.getLongFactory(), IS_GREATER_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
+ * that is greater than or equal to the supplied constraint name.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param localNameField the name of the document field containing the local name value; may not be null
+ * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareNameQuery createQueryForNodesWithNameGreaterThanOrEqualTo( Path.Segment constraintValue,
+ String localNameField,
+ String snsIndexFieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
+ factories.getStringFactory(), factories.getLongFactory(), IS_GREATER_THAN_OR_EQUAL_TO,
+ caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
+ * that is less than the supplied constraint name.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param localNameField the name of the document field containing the local name value; may not be null
+ * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareNameQuery createQueryForNodesWithNameLessThan( Path.Segment constraintValue,
+ String localNameField,
+ String snsIndexFieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
+ factories.getStringFactory(), factories.getLongFactory(), IS_LESS_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
+ * that is less than or equal to the supplied constraint name.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param localNameField the name of the document field containing the local name value; may not be null
+ * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareNameQuery createQueryForNodesWithNameLessThanOrEqualTo( Path.Segment constraintValue,
+ String localNameField,
+ String snsIndexFieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
+ factories.getStringFactory(), factories.getLongFactory(), IS_LESS_THAN_OR_EQUAL_TO,
+ caseSensitive);
+ }
+
+ private final String snsIndexFieldName;
+ private final ValueFactory<Long> longFactory;
+ private final PathFactory pathFactory;
+ private final boolean caseSensitive;
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param localNameField the name of the document field containing the local name value; may not be null
+ * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
+ * @param constraintValue the constraint path; may not be null
+ * @param pathFactory the path factory that can be used during the scoring; may not be null
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param longFactory the long factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
+ * constraint; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ */
+ protected CompareNameQuery( final String localNameField,
+ final String snsIndexFieldName,
+ Path.Segment constraintValue,
+ PathFactory pathFactory,
+ ValueFactory<String> stringFactory,
+ ValueFactory<Long> longFactory,
+ Evaluator<Path.Segment> evaluator,
+ boolean caseSensitive ) {
+ super(localNameField, constraintValue, null, stringFactory, evaluator, new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ if (fieldName.equals(localNameField)) return FieldSelectorResult.LOAD;
+ if (fieldName.equals(snsIndexFieldName)) return FieldSelectorResult.LOAD;
+ return FieldSelectorResult.NO_LOAD;
+ }
+ });
+ this.snsIndexFieldName = snsIndexFieldName;
+ this.longFactory = longFactory;
+ this.pathFactory = pathFactory;
+ this.caseSensitive = caseSensitive;
+ assert this.snsIndexFieldName != null;
+ assert this.longFactory != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ protected Path.Segment readFromDocument( IndexReader reader,
+ int docId ) throws IOException {
+ Document doc = reader.document(docId, fieldSelector);
+ String localName = doc.get(fieldName);
+ if (!caseSensitive) localName = localName.toLowerCase();
+ int sns = longFactory.create(doc.get(snsIndexFieldName)).intValue();
+ return pathFactory.createSegment(localName, sns);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new CompareNameQuery(fieldName, snsIndexFieldName, constraintValue, pathFactory, stringFactory, longFactory,
+ evaluator, caseSensitive);
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareNameQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/query/ComparePathQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/ComparePathQuery.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/ComparePathQuery.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,223 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.query;
+
+import java.io.IOException;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.ValueComparators;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.model.Comparison;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against the Path of nodes. This
+ * query implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * that correspond to nodes with Paths that satisfy the constraint.
+ */
+public class ComparePathQuery extends CompareQuery<Path> {
+
+ private static final long serialVersionUID = 1L;
+ protected static final Evaluator<Path> PATH_IS_LESS_THAN = new Evaluator<Path>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path nodePath,
+ Path constraintPath ) {
+ return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) < 0;
+ }
+
+ @Override
+ public String toString() {
+ return " < ";
+ }
+ };
+ protected static final Evaluator<Path> PATH_IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<Path>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path nodePath,
+ Path constraintPath ) {
+ return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) <= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " <= ";
+ }
+ };
+ protected static final Evaluator<Path> PATH_IS_GREATER_THAN = new Evaluator<Path>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path nodePath,
+ Path constraintPath ) {
+ return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) > 0;
+ }
+
+ @Override
+ public String toString() {
+ return " > ";
+ }
+ };
+ protected static final Evaluator<Path> PATH_IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<Path>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path nodePath,
+ Path constraintPath ) {
+ return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) >= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " >= ";
+ }
+ };
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
+ * that is greater than the supplied constraint path.
+ *
+ * @param constraintPath the constraint path; may not be null
+ * @param fieldName the name of the document field containing the path value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the path query; never null
+ */
+ public static ComparePathQuery createQueryForNodesWithPathGreaterThan( Path constraintPath,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
+ PATH_IS_GREATER_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
+ * that is greater than or equal to the supplied constraint path.
+ *
+ * @param constraintPath the constraint path; may not be null
+ * @param fieldName the name of the document field containing the path value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the path query; never null
+ */
+ public static ComparePathQuery createQueryForNodesWithPathGreaterThanOrEqualTo( Path constraintPath,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
+ PATH_IS_GREATER_THAN_OR_EQUAL_TO, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
+ * that is less than the supplied constraint path.
+ *
+ * @param constraintPath the constraint path; may not be null
+ * @param fieldName the name of the document field containing the path value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the path query; never null
+ */
+ public static ComparePathQuery createQueryForNodesWithPathLessThan( Path constraintPath,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
+ PATH_IS_LESS_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
+ * that is less than or equal to the supplied constraint path.
+ *
+ * @param constraintPath the constraint path; may not be null
+ * @param fieldName the name of the document field containing the path value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the path query; never null
+ */
+ public static ComparePathQuery createQueryForNodesWithPathLessThanOrEqualTo( Path constraintPath,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
+ PATH_IS_LESS_THAN_OR_EQUAL_TO, caseSensitive);
+ }
+
+ private final boolean caseSensitive;
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the path value; may not be null
+ * @param constraintPath the constraint path; may not be null
+ * @param pathFactory the value factory that can be used during the scoring; may not be null
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
+ * constraint; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ */
+ protected ComparePathQuery( String fieldName,
+ Path constraintPath,
+ ValueFactory<Path> pathFactory,
+ ValueFactory<String> stringFactory,
+ Evaluator<Path> evaluator,
+ boolean caseSensitive ) {
+ super(fieldName, constraintPath, pathFactory, stringFactory, evaluator);
+ this.caseSensitive = caseSensitive;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ protected Path readFromDocument( IndexReader reader,
+ int docId ) throws IOException {
+ Document doc = reader.document(docId, fieldSelector);
+ String valueString = doc.get(fieldName);
+ if (!caseSensitive) valueString = valueString.toLowerCase();
+ return valueTypeFactory.create(valueString);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new ComparePathQuery(fieldName, constraintValue, valueTypeFactory, stringFactory, evaluator, caseSensitive);
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/query/ComparePathQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareQuery.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareQuery.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,308 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.query;
+
+import java.io.IOException;
+import java.io.Serializable;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.model.Comparison;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against the Path of nodes. This
+ * query implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * that correspond to nodes with Paths that satisfy the constraint.
+ *
+ * @param <ValueType>
+ */
+public abstract class CompareQuery<ValueType> extends Query {
+
+ private static final long serialVersionUID = 1L;
+
+ protected static interface Evaluator<ValueType> extends Serializable {
+ boolean satisfiesConstraint( ValueType nodeValue,
+ ValueType constraintValue );
+ }
+
+ /**
+ * The operand that is being negated by this query.
+ */
+ protected final String fieldName;
+ protected final FieldSelector fieldSelector;
+ protected final ValueType constraintValue;
+ protected final Evaluator<ValueType> evaluator;
+ protected final ValueFactory<ValueType> valueTypeFactory;
+ protected final ValueFactory<String> stringFactory;
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param constraintValue the constraint value; may not be null
+ * @param valueTypeFactory the value factory that can be used during the scoring; may not be null
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link Evaluator} implementation that returns whether the node value satisfies the constraint; may not
+ * be null
+ */
+ protected CompareQuery( String fieldName,
+ ValueType constraintValue,
+ ValueFactory<ValueType> valueTypeFactory,
+ ValueFactory<String> stringFactory,
+ Evaluator<ValueType> evaluator ) {
+ this(fieldName, constraintValue, valueTypeFactory, stringFactory, evaluator, null);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param constraintValue the constraint value; may not be null
+ * @param valueTypeFactory the value factory that can be used during the scoring; may not be null unless
+ * {@link #readFromDocument(IndexReader, int)} is overloaded to not use it
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link Evaluator} implementation that returns whether the node value satisfies the constraint; may not
+ * be null
+ * @param fieldSelector the field selector that should load the fields needed to recover the value; may be null if the field
+ * selector should be generated automatically
+ */
+ protected CompareQuery( String fieldName,
+ ValueType constraintValue,
+ ValueFactory<ValueType> valueTypeFactory,
+ ValueFactory<String> stringFactory,
+ Evaluator<ValueType> evaluator,
+ FieldSelector fieldSelector ) {
+ this.fieldName = fieldName;
+ this.constraintValue = constraintValue;
+ this.valueTypeFactory = valueTypeFactory;
+ this.stringFactory = stringFactory;
+ this.evaluator = evaluator;
+ assert this.fieldName != null;
+ assert this.constraintValue != null;
+ assert this.evaluator != null;
+ this.fieldSelector = fieldSelector != null ? fieldSelector : new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ return fieldName.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
+ }
+ };
+ }
+
+ protected ValueType readFromDocument( IndexReader reader,
+ int docId ) throws IOException {
+ Document doc = reader.document(docId, fieldSelector);
+ String valueString = doc.get(fieldName);
+ return valueTypeFactory.create(valueString);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
+ */
+ @Override
+ public Weight createWeight( Searcher searcher ) {
+ return new NotWeight(searcher);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#toString(java.lang.String)
+ */
+ @Override
+ public String toString( String field ) {
+ return fieldName + " " + evaluator.toString() + " " + stringFactory != null ? stringFactory.create(constraintValue) : constraintValue.toString();
+ }
+
+ /**
+ * Calculates query weights and builds query scores for our NOT queries.
+ */
+ protected class NotWeight extends Weight {
+ private static final long serialVersionUID = 1L;
+ private final Searcher searcher;
+
+ protected NotWeight( Searcher searcher ) {
+ this.searcher = searcher;
+ assert this.searcher != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#getQuery()
+ */
+ @Override
+ public Query getQuery() {
+ return CompareQuery.this;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a weight factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#getValue()
+ */
+ @Override
+ public float getValue() {
+ return 1.0f; // weight factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a normalization factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
+ */
+ @Override
+ public float sumOfSquaredWeights() {
+ return 1.0f; // normalization factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always does nothing, as there is nothing to normalize.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#normalize(float)
+ */
+ @Override
+ public void normalize( float norm ) {
+ // No need to do anything here
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
+ */
+ @Override
+ public Scorer scorer( IndexReader reader,
+ boolean scoreDocsInOrder,
+ boolean topScorer ) {
+ // Return a custom scorer ...
+ return new NotScorer(reader);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public Explanation explain( IndexReader reader,
+ int doc ) {
+ return new Explanation(getValue(), getQuery().toString());
+ }
+ }
+
+ /**
+ * A scorer for the Path query.
+ */
+ protected class NotScorer extends Scorer {
+ private int docId = -1;
+ private final int maxDocId;
+ private final IndexReader reader;
+
+ protected NotScorer( IndexReader reader ) {
+ // We don't care which Similarity we have, because we don't use it. So get the default.
+ super(Similarity.getDefault());
+ this.reader = reader;
+ assert this.reader != null;
+ this.maxDocId = this.reader.maxDoc();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#docID()
+ */
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() throws IOException {
+ do {
+ ++docId;
+ if (reader.isDeleted(docId)) {
+ // We should skip this document ...
+ continue;
+ }
+ ValueType value = readFromDocument(reader, docId);
+ if (evaluator.satisfiesConstraint(value, constraintValue)) return docId;
+ } while (docId < maxDocId);
+ return Scorer.NO_MORE_DOCS;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
+ */
+ @Override
+ public int advance( int target ) throws IOException {
+ if (target == Scorer.NO_MORE_DOCS) return target;
+ while (true) {
+ int doc = nextDoc();
+ if (doc >= target) return doc;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
+ * scored by this scorer.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ return 1.0f;
+ }
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareStringQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareStringQuery.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareStringQuery.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,219 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.query;
+
+import java.io.IOException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.property.ValueComparators;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.model.Comparison;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against a string field. This query
+ * implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * with string fields that satisfy the constraint.
+ */
+public class CompareStringQuery extends CompareQuery<String> {
+
+ private static final long serialVersionUID = 1L;
+ protected static final Evaluator<String> IS_LESS_THAN = new Evaluator<String>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( String nodeValue,
+ String constraintValue ) {
+ return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) < 0;
+ }
+
+ @Override
+ public String toString() {
+ return " < ";
+ }
+ };
+ protected static final Evaluator<String> IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<String>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( String nodeValue,
+ String constraintValue ) {
+ return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) <= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " <= ";
+ }
+ };
+ protected static final Evaluator<String> IS_GREATER_THAN = new Evaluator<String>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( String nodeValue,
+ String constraintValue ) {
+ return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) > 0;
+ }
+
+ @Override
+ public String toString() {
+ return " > ";
+ }
+ };
+ protected static final Evaluator<String> IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<String>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( String nodeValue,
+ String constraintValue ) {
+ return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) >= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " >= ";
+ }
+ };
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a string field value that is greater than the supplied
+ * constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareStringQuery createQueryForNodesWithFieldGreaterThan( String constraintValue,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
+ IS_GREATER_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a string field value that is greater than or equal to
+ * the supplied constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareStringQuery createQueryForNodesWithFieldGreaterThanOrEqualTo( String constraintValue,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
+ IS_GREATER_THAN_OR_EQUAL_TO, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a string field value that is less than the supplied
+ * constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareStringQuery createQueryForNodesWithFieldLessThan( String constraintValue,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
+ IS_LESS_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a string field value that is less than or equal to the
+ * supplied constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareStringQuery createQueryForNodesWithFieldLessThanOrEqualTo( String constraintValue,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
+ IS_LESS_THAN_OR_EQUAL_TO, caseSensitive);
+ }
+
+ private final boolean caseSensitive;
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param constraintValue the constraint value; may not be null
+ * @param valueFactory the value factory that can be used during the scoring; may not be null
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
+ * constraint; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ */
+ protected CompareStringQuery( String fieldName,
+ String constraintValue,
+ ValueFactory<String> valueFactory,
+ ValueFactory<String> stringFactory,
+ Evaluator<String> evaluator,
+ boolean caseSensitive ) {
+ super(fieldName, caseSensitive ? constraintValue : constraintValue.toLowerCase(), valueFactory, stringFactory, evaluator);
+ this.caseSensitive = caseSensitive;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ protected String readFromDocument( IndexReader reader,
+ int docId ) throws IOException {
+ String result = super.readFromDocument(reader, docId);
+ return caseSensitive ? result : result.toLowerCase();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new CompareStringQuery(fieldName, constraintValue, valueTypeFactory, stringFactory, evaluator, caseSensitive);
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareStringQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/query/MatchNoneQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/MatchNoneQuery.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/MatchNoneQuery.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,216 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.query;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.Weight;
+
+/**
+ * A Lucene {@link Query} implementation that always matches no documents.
+ */
+public class MatchNoneQuery extends Query {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Construct a query that always returns no documents.
+ */
+ public MatchNoneQuery() {
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
+ */
+ @Override
+ public Weight createWeight( Searcher searcher ) {
+ return new NoneWeight();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new MatchNoneQuery();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#toString(java.lang.String)
+ */
+ @Override
+ public String toString( String field ) {
+ return "NO DOCS";
+ }
+
+ /**
+ * Calculates query weights and builds query scores for our NOT queries.
+ */
+ protected class NoneWeight extends Weight {
+ private static final long serialVersionUID = 1L;
+
+ protected NoneWeight() {
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#getQuery()
+ */
+ @Override
+ public Query getQuery() {
+ return MatchNoneQuery.this;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a weight factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#getValue()
+ */
+ @Override
+ public float getValue() {
+ return 1.0f; // weight factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a normalization factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
+ */
+ @Override
+ public float sumOfSquaredWeights() {
+ return 1.0f; // normalization factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always does nothing, as there is nothing to normalize.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#normalize(float)
+ */
+ @Override
+ public void normalize( float norm ) {
+ // No need to do anything here
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
+ */
+ @Override
+ public Scorer scorer( IndexReader reader,
+ boolean scoreDocsInOrder,
+ boolean topScorer ) {
+ return new NoneScorer();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public Explanation explain( IndexReader reader,
+ int doc ) {
+ return new Explanation(getValue(), "NO VALUES");
+ }
+ }
+
+ /**
+ * A scorer for the NOT query that iterates over documents (in increasing docId order), using the given scorer implementation
+ * for the operand of the NOT.
+ */
+ protected static class NoneScorer extends Scorer {
+ private int docId = -1;
+
+ protected NoneScorer() {
+ // We don't care which Similarity we have, because we don't use it. So get the default.
+ super(Similarity.getDefault());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#docID()
+ */
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() {
+ docId = Scorer.NO_MORE_DOCS;
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
+ */
+ @Override
+ public int advance( int target ) {
+ return Scorer.NO_MORE_DOCS;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
+ * scored by this scorer.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ return 1.0f;
+ }
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/query/MatchNoneQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/query/NotQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/NotQuery.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/NotQuery.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,274 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.query;
+
+import java.io.IOException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.Weight;
+
+/**
+ * A Lucene {@link Query} implementation that is used to represent a NOT expression of another wrapped Query object. This query
+ * implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * that were <i>not</i> scored by the wrapped query. In other words, if the wrapped query ended up scoring any document, that
+ * document is <i>not</i> scored (i.e., skipped) by this query.
+ */
+public class NotQuery extends Query {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * The operand that is being negated by this query.
+ */
+ protected final Query operand;
+
+ /**
+ * Construct a NOT(x) constraint where the 'x' operand is supplied.
+ *
+ * @param operand the operand being negated
+ */
+ public NotQuery( Query operand ) {
+ this.operand = operand;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
+ */
+ @Override
+ public Weight createWeight( Searcher searcher ) {
+ return new NotWeight(searcher);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new NotQuery(operand);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#toString(java.lang.String)
+ */
+ @Override
+ public String toString( String field ) {
+ return "NOT(" + operand.toString(field) + ")";
+ }
+
+ /**
+ * Calculates query weights and builds query scores for our NOT queries.
+ */
+ protected class NotWeight extends Weight {
+ private static final long serialVersionUID = 1L;
+ private final Searcher searcher;
+
+ protected NotWeight( Searcher searcher ) {
+ this.searcher = searcher;
+ assert this.searcher != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#getQuery()
+ */
+ @Override
+ public Query getQuery() {
+ return NotQuery.this;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a weight factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#getValue()
+ */
+ @Override
+ public float getValue() {
+ return 1.0f; // weight factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a normalization factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
+ */
+ @Override
+ public float sumOfSquaredWeights() {
+ return 1.0f; // normalization factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always does nothing, as there is nothing to normalize.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#normalize(float)
+ */
+ @Override
+ public void normalize( float norm ) {
+ // No need to do anything here
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
+ */
+ @Override
+ public Scorer scorer( IndexReader reader,
+ boolean scoreDocsInOrder,
+ boolean topScorer ) throws IOException {
+ // Get the operand's score, and set this on the NOT query
+ Scorer operandScorer = operand.weight(searcher).scorer(reader, scoreDocsInOrder, topScorer);
+ // Return a custom scorer ...
+ return new NotScorer(operandScorer, reader);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public Explanation explain( IndexReader reader,
+ int doc ) throws IOException {
+ Explanation operandExplanation = operand.weight(searcher).explain(reader, doc);
+ String desc = operandExplanation.getDescription();
+ return new Explanation(getValue(), "NOT(" + desc + ")");
+ }
+ }
+
+ /**
+ * A scorer for the NOT query that iterates over documents (in increasing docId order), using the given scorer implementation
+ * for the operand of the NOT.
+ */
+ protected static class NotScorer extends Scorer {
+ private int docId = -1;
+ private int nextScoredDocId = -1;
+ private final Scorer operandScorer;
+ private final IndexReader reader;
+ private final int pastMaxDocId;
+
+ /**
+ * @param operandScorer the scorer that is used to score the documents based upon the operand of the NOT; may not be null
+ * @param reader the reader that has access to all the docs ...
+ */
+ protected NotScorer( Scorer operandScorer,
+ IndexReader reader ) {
+ // We don't care which Similarity we have, because we don't use it. So get the default.
+ super(Similarity.getDefault());
+ this.operandScorer = operandScorer;
+ this.reader = reader;
+ assert this.operandScorer != null;
+ assert this.reader != null;
+ this.pastMaxDocId = this.reader.maxDoc();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#docID()
+ */
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() throws IOException {
+ if (nextScoredDocId == -1) {
+ // Find the first document that is scored by the operand's scorer ...
+ nextScoredDocId = operandScorer.nextDoc();
+ }
+ do {
+ ++docId;
+ if (docId == pastMaxDocId) {
+ // We're aleady to the end of the documents in the index, so return no more docs
+ return Scorer.NO_MORE_DOCS;
+ }
+ if (docId == nextScoredDocId) {
+ // Find the next document that is scored by the operand's scorer ...
+ nextScoredDocId = operandScorer.nextDoc();
+ continue;
+ }
+ if (reader.isDeleted(docId)) {
+ // We should skip this document ...
+ continue;
+ }
+ return docId;
+ } while (true);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
+ */
+ @Override
+ public int advance( int target ) throws IOException {
+ if (target == Scorer.NO_MORE_DOCS) return target;
+ while (true) {
+ int doc = nextDoc();
+ if (doc >= target) return doc;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
+ * scored by this scorer.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ return 1.0f;
+ }
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/query/NotQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/query/ScoreQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/ScoreQuery.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/ScoreQuery.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,275 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.query;
+
+import java.io.IOException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.query.model.FullTextSearchScore;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link FullTextSearchScore} criteria a NOT expression of another
+ * wrapped Query object. This query implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * that were <i>not</i> scored by the wrapped query. In other words, if the wrapped query ended up scoring any document, that
+ * document is <i>not</i> scored (i.e., skipped) by this query.
+ */
+public class ScoreQuery extends Query {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * The operand that is being negated by this query.
+ */
+ protected final Query operand;
+
+ /**
+ * Construct a NOT(x) constraint where the 'x' operand is supplied.
+ *
+ * @param operand the operand being negated
+ */
+ public ScoreQuery( Query operand ) {
+ this.operand = operand;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
+ */
+ @Override
+ public Weight createWeight( Searcher searcher ) {
+ return new NotWeight(searcher);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new ScoreQuery(operand);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#toString(java.lang.String)
+ */
+ @Override
+ public String toString( String field ) {
+ return "NOT(" + operand.toString(field) + ")";
+ }
+
+ /**
+ * Calculates query weights and builds query scores for our NOT queries.
+ */
+ protected class NotWeight extends Weight {
+ private static final long serialVersionUID = 1L;
+ private final Searcher searcher;
+
+ protected NotWeight( Searcher searcher ) {
+ this.searcher = searcher;
+ assert this.searcher != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#getQuery()
+ */
+ @Override
+ public Query getQuery() {
+ return ScoreQuery.this;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a weight factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#getValue()
+ */
+ @Override
+ public float getValue() {
+ return 1.0f; // weight factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a normalization factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
+ */
+ @Override
+ public float sumOfSquaredWeights() {
+ return 1.0f; // normalization factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always does nothing, as there is nothing to normalize.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#normalize(float)
+ */
+ @Override
+ public void normalize( float norm ) {
+ // No need to do anything here
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
+ */
+ @Override
+ public Scorer scorer( IndexReader reader,
+ boolean scoreDocsInOrder,
+ boolean topScorer ) throws IOException {
+ // Get the operand's score, and set this on the NOT query
+ Scorer operandScorer = operand.weight(searcher).scorer(reader, scoreDocsInOrder, topScorer);
+ // Return a custom scorer ...
+ return new NotScorer(operandScorer, reader);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public Explanation explain( IndexReader reader,
+ int doc ) throws IOException {
+ Explanation operandExplanation = operand.weight(searcher).explain(reader, doc);
+ String desc = operandExplanation.getDescription();
+ return new Explanation(getValue(), "NOT(" + desc + ")");
+ }
+ }
+
+ /**
+ * A scorer for the NOT query that iterates over documents (in increasing docId order), using the given scorer implementation
+ * for the operand of the NOT.
+ */
+ protected static class NotScorer extends Scorer {
+ private int docId = -1;
+ private int nextScoredDocId = -1;
+ private final Scorer operandScorer;
+ private final IndexReader reader;
+ private final int pastMaxDocId;
+
+ /**
+ * @param operandScorer the scorer that is used to score the documents based upon the operand of the NOT; may not be null
+ * @param reader the reader that has access to all the docs ...
+ */
+ protected NotScorer( Scorer operandScorer,
+ IndexReader reader ) {
+ // We don't care which Similarity we have, because we don't use it. So get the default.
+ super(Similarity.getDefault());
+ this.operandScorer = operandScorer;
+ this.reader = reader;
+ assert this.operandScorer != null;
+ assert this.reader != null;
+ this.pastMaxDocId = this.reader.maxDoc();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#docID()
+ */
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() throws IOException {
+ if (nextScoredDocId == -1) {
+ // Find the first document that is scored by the operand's scorer ...
+ nextScoredDocId = operandScorer.nextDoc();
+ }
+ do {
+ ++docId;
+ if (docId == pastMaxDocId) {
+ // We're aleady to the end of the documents in the index, so return no more docs
+ return Scorer.NO_MORE_DOCS;
+ }
+ if (docId == nextScoredDocId) {
+ // Find the next document that is scored by the operand's scorer ...
+ nextScoredDocId = operandScorer.nextDoc();
+ continue;
+ }
+ if (reader.isDeleted(docId)) {
+ // We should skip this document ...
+ continue;
+ }
+ return docId;
+ } while (true);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
+ */
+ @Override
+ public int advance( int target ) throws IOException {
+ if (target == Scorer.NO_MORE_DOCS) return target;
+ while (true) {
+ int doc = nextDoc();
+ if (doc >= target) return doc;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
+ * scored by this scorer.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ return 1.0f;
+ }
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/query/ScoreQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/query/UuidsQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/UuidsQuery.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/UuidsQuery.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,268 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.query;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.UUID;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.property.ValueFactory;
+
+/**
+ * A Lucene {@link Query} implementation that is used to score positively those documents that have a UUID in the supplied set.
+ * This works for large sets of UUIDs; in smaller numbers, it may be more efficient to create a boolean query that checks for each
+ * fo the UUIDs.
+ */
+public class UuidsQuery extends Query {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * The operand that is being negated by this query.
+ */
+ protected final Set<UUID> uuids;
+ protected final FieldSelector fieldSelector;
+ protected final String fieldName;
+ protected final ValueFactory<UUID> uuidFactory;
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param uuids the set of UUID values; may not be null
+ * @param uuidFactory the factory to create UUID values; may not be null
+ */
+ public UuidsQuery( String fieldName,
+ Set<UUID> uuids,
+ ValueFactory<UUID> uuidFactory ) {
+ this.fieldName = fieldName;
+ this.uuids = uuids;
+ this.uuidFactory = uuidFactory;
+ assert this.fieldName != null;
+ assert this.uuids != null;
+ assert this.uuidFactory != null;
+ this.fieldSelector = new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ return fieldName.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
+ }
+ };
+ }
+
+ protected boolean includeDocument( IndexReader reader,
+ int docId ) throws IOException {
+ Document doc = reader.document(docId, fieldSelector);
+ String valueString = doc.get(fieldName);
+ return uuids.contains(uuidFactory.create(valueString));
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
+ */
+ @Override
+ public Weight createWeight( Searcher searcher ) {
+ return new UuidSetWeight(searcher);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#toString(java.lang.String)
+ */
+ @Override
+ public String toString( String field ) {
+ return fieldName + " IN UUIDs";
+ }
+
+ /**
+ * Calculates query weights and builds query scores for our NOT queries.
+ */
+ protected class UuidSetWeight extends Weight {
+ private static final long serialVersionUID = 1L;
+ private final Searcher searcher;
+
+ protected UuidSetWeight( Searcher searcher ) {
+ this.searcher = searcher;
+ assert this.searcher != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#getQuery()
+ */
+ @Override
+ public Query getQuery() {
+ return UuidsQuery.this;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a weight factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#getValue()
+ */
+ @Override
+ public float getValue() {
+ return 1.0f; // weight factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a normalization factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
+ */
+ @Override
+ public float sumOfSquaredWeights() {
+ return 1.0f; // normalization factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always does nothing, as there is nothing to normalize.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#normalize(float)
+ */
+ @Override
+ public void normalize( float norm ) {
+ // No need to do anything here
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
+ */
+ @Override
+ public Scorer scorer( IndexReader reader,
+ boolean scoreDocsInOrder,
+ boolean topScorer ) {
+ // Return a custom scorer ...
+ return new UuidScorer(reader);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public Explanation explain( IndexReader reader,
+ int doc ) {
+ return new Explanation(getValue(), getQuery().toString());
+ }
+ }
+
+ /**
+ * A scorer for the Path query.
+ */
+ protected class UuidScorer extends Scorer {
+ private int docId = -1;
+ private final int maxDocId;
+ private final IndexReader reader;
+
+ protected UuidScorer( IndexReader reader ) {
+ // We don't care which Similarity we have, because we don't use it. So get the default.
+ super(Similarity.getDefault());
+ this.reader = reader;
+ assert this.reader != null;
+ this.maxDocId = this.reader.maxDoc();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#docID()
+ */
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() throws IOException {
+ do {
+ ++docId;
+ if (reader.isDeleted(docId)) {
+ // We should skip this document ...
+ continue;
+ }
+ if (includeDocument(reader, docId)) return docId;
+ } while (docId < maxDocId);
+ return Scorer.NO_MORE_DOCS;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
+ */
+ @Override
+ public int advance( int target ) throws IOException {
+ if (target == Scorer.NO_MORE_DOCS) return target;
+ while (true) {
+ int doc = nextDoc();
+ if (doc >= target) return doc;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
+ * scored by this scorer.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ return 1.0f;
+ }
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/query/UuidsQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties
===================================================================
--- trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties 2009-11-16 23:25:36 UTC (rev 1320)
@@ -30,8 +30,11 @@
errorWhileRemovingContentAtPath = Error while removing the content at/below "{0}" in the "{1}" workspace of the "{2}" source: {3}
errorWhileUpdatingContent = Error while updating content in the "{0}" workspace of the "{1}" source: {2}
errorWhileCommittingIndexChanges = Error while committing changes to the indexes for the "{0}" workspace of the "{1}" source: {2}
+errorWhileRollingBackIndexChanges = Error while rolling back changes to the indexes for the "{0}" workspace of the "{1}" source: {2}
errorCreatingIndexWriter = Error attempting to create an index writer for the "{0}" index for the "{1}" workspace of the "{2}" source: {3}
errorWhileOptimizingIndexes = Error while optimizing the indexes for the "{0}" workspace of the "{1}" source: {2}
errorWhilePerformingSearch = Error while searching for "{0}" in the "{1}" workspace of the "{2}" source: {3}
errorWhilePerformingQuery = Error while performing the query "{0}" against the content in the "{1}" workspace of the "{2}" source: {3}
-errorWhileInitializingSearchEngine = Error while initializing the search engine for the "{0}" workspace of the "{1}" source: {2}
\ No newline at end of file
+errorWhileInitializingSearchEngine = Error while initializing the search engine for the "{0}" workspace of the "{1}" source: {2}
+errorWhileRemovingIndexesForWorkspace = Error while removing the indexes for the "{0}" workspace of the "{1}" source: {2}
+errorWhilePerformingLuceneQuery = Error while performing the Lucene query "{0}" as part of the "{1}" query against the "{2}" workspace of the "{3}" source: {4}
\ No newline at end of file
Modified: trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -25,16 +25,32 @@
import static org.hamcrest.core.Is.is;
import static org.hamcrest.core.IsNull.notNullValue;
-import static org.hamcrest.core.IsSame.sameInstance;
import static org.junit.Assert.assertThat;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.stub;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import java.util.List;
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.Graph;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.Node;
import org.jboss.dna.graph.connector.RepositoryConnection;
import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
import org.jboss.dna.graph.connector.RepositorySourceException;
import org.jboss.dna.graph.connector.inmemory.InMemoryRepositorySource;
import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.model.Query;
+import org.jboss.dna.graph.query.validate.Schemata;
import org.jboss.dna.graph.request.InvalidWorkspaceException;
+import org.jboss.dna.search.IndexLayout;
+import org.jboss.dna.search.IndexSession;
+import org.jboss.dna.search.SearchEngine;
import org.junit.Before;
import org.junit.Test;
@@ -47,7 +63,10 @@
private String workspaceName2;
private InMemoryRepositorySource source;
private RepositoryConnectionFactory connectionFactory;
- private DirectoryConfiguration directoryFactory;
+ private IndexLayout layout;
+ private IndexSession sessionWs1;
+ private IndexSession sessionWs2;
+ private IndexSession sessionDefault;
private Graph content;
@Before
@@ -66,12 +85,6 @@
content.createWorkspace().named(workspaceName1);
content.createWorkspace().named(workspaceName2);
- // Load some content ...
- content.useWorkspace(workspaceName1);
- content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("cars.xml")).into("/");
- content.useWorkspace(workspaceName2);
- content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("aircraft.xml")).into("/");
-
// Set up the connection factory ...
connectionFactory = new RepositoryConnectionFactory() {
@SuppressWarnings( "synthetic-access" )
@@ -80,67 +93,261 @@
}
};
+ // Set up the index layout ...
+ layout = mock(IndexLayout.class);
+ sessionWs1 = mockSession(layout, workspaceName1);
+ sessionWs2 = mockSession(layout, workspaceName2);
+ sessionDefault = mockSession(layout, "");
+
// Now set up the search engine ...
- directoryFactory = DirectoryConfigurations.inMemory();
- engine = new SearchEngine(context, sourceName, connectionFactory, directoryFactory);
+ engine = new SearchEngine(context, sourceName, connectionFactory, layout);
}
- protected Path path( String string ) {
- return context.getValueFactories().getPathFactory().create(string);
+ protected IndexSession mockSession( IndexLayout mockLayout,
+ String workspaceName ) {
+ IndexSession session = mock(IndexSession.class);
+ stub(layout.createSession(context, sourceName, workspaceName, false, false)).toReturn(session);
+ stub(layout.createSession(context, sourceName, workspaceName, false, true)).toReturn(session);
+ stub(layout.createSession(context, sourceName, workspaceName, true, false)).toReturn(session);
+ stub(layout.createSession(context, sourceName, workspaceName, true, true)).toReturn(session);
+ stub(session.getWorkspaceName()).toReturn(workspaceName);
+ stub(session.getSourceName()).toReturn(sourceName);
+ return session;
}
- @Test
- public void shouldHaveLoadedTestContentIntoRepositorySource() {
+ protected Path path( String path ) {
+ return context.getValueFactories().getPathFactory().create(path);
+ }
+
+ protected void loadContent() throws Exception {
+ // Load some content ...
content.useWorkspace(workspaceName1);
- assertThat(content.getNodeAt("/Cars/Hybrid/Toyota Prius").getProperty("msrp").getFirstValue(), is((Object)"$21,500"));
+ content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("cars.xml")).into("/");
content.useWorkspace(workspaceName2);
- assertThat(content.getNodeAt("/Aircraft/Commercial/Boeing 787").getProperty("range").getFirstValue(),
- is((Object)"3050nm"));
+ content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("aircraft.xml")).into("/");
}
@Test
- public void shouldHaveExecutionContext() {
- assertThat(engine.getContext(), is(sameInstance(context)));
+ public void shouldReturnSearchWorkspaceForExistingWorkspaceInSource() {
+ SearchEngine.Workspace workspace = engine.getWorkspace(workspaceName1);
+ assertThat(workspace, is(notNullValue()));
+ assertThat(workspace.modifiedNodesSinceLastOptimize.get(), is(0));
+ assertThat(workspace.getWorkspaceName(), is(workspaceName1));
}
+ @Test( expected = InvalidWorkspaceException.class )
+ public void shouldFailToReturnSearchWorkspaceForNonExistantWorkspaceInSource() {
+ engine.getWorkspace(workspaceName1 + "foobar");
+ }
+
@Test
- public void shouldHaveSourceName() {
- assertThat(engine.getSourceName(), is(sourceName));
+ public void shouldDoNothingDuringRemoveWorkspaceIfWorkspaceHasNotBeenLoaded() throws Exception {
+ engine.removeWorkspace(workspaceName1);
+ verifyZeroInteractions(layout);
}
@Test
- public void shouldFindExistingWorkspaces() {
- assertThat(engine.getWorkspaceEngine(workspaceName1), is(notNullValue()));
- assertThat(engine.getWorkspaceEngine(workspaceName2), is(notNullValue()));
+ public void shouldForwardRemoveWorkspaceToIndexLayout() throws Exception {
+ engine.getWorkspace(workspaceName1);
+ engine.removeWorkspace(workspaceName1);
+ verify(layout).destroyIndexes(context, sourceName, workspaceName1);
+ verifyNoMoreInteractions(layout);
}
- @Test( expected = InvalidWorkspaceException.class )
- public void shouldNotFindNonExistingWorkspaces() {
- engine.getWorkspaceEngine("Non-existant workspace");
+ @Test
+ public void shouldForwardRemoveWorkspaceToIndexLayoutForEachWorkspaceThatWasLoaded() throws Exception {
+ engine.getWorkspace(workspaceName1);
+ engine.removeWorkspaces();
+ verify(layout).destroyIndexes(context, sourceName, workspaceName1);
+ verifyZeroInteractions(layout);
}
@Test
- public void shouldIndexAllContentInRepositorySource() {
- engine.indexContent(3);
+ public void shouldForwardRemoveWorkspaceToIndexLayoutForAllWorkspacesThatWereLoaded() throws Exception {
+ engine.getWorkspace(workspaceName1);
+ engine.getWorkspace(workspaceName2);
+ engine.removeWorkspaces();
+ verify(layout).destroyIndexes(context, sourceName, workspaceName1);
+ verify(layout).destroyIndexes(context, sourceName, workspaceName2);
+ verifyNoMoreInteractions(layout);
}
+ @Test( expected = IllegalArgumentException.class )
+ public void shouldFailIfNullWorkspaceNamePassedToRemoveWorkspace() throws Exception {
+ engine.removeWorkspace(null);
+ }
+
@Test
- public void shouldIndexAllContentInWorkspace() {
- engine.indexContent(workspaceName1, 3);
- engine.indexContent(workspaceName2, 5);
+ public void shouldForwardOptimizeOfWorkspaceToIndexSession() throws Exception {
+ engine.optimize(workspaceName1);
+ verify(sessionWs1).optimize();
+ verify(sessionWs1).commit();
+ verifyNoMoreInteractions(sessionWs1);
}
@Test
- public void shouldIndexAllContentInWorkspaceBelowPath() {
- engine.indexContent(workspaceName1, path("/Cars/Hybrid"), 3);
- engine.indexContent(workspaceName2, path("/Aircraft/Commercial"), 5);
+ public void shouldForwardOptimizeOfAllWorkspacesToEachIndexSession() throws Exception {
+ engine.optimize(); // will find all three workspaces
+ verify(sessionWs1).optimize();
+ verify(sessionWs1).commit();
+ verifyNoMoreInteractions(sessionWs1);
+ verify(sessionWs2).optimize();
+ verify(sessionWs2).commit();
+ verifyNoMoreInteractions(sessionWs2);
+ verify(sessionDefault).optimize();
+ verify(sessionDefault).commit();
+ verifyNoMoreInteractions(sessionDefault);
}
@Test
- public void shouldReIndexAllContentInWorkspaceBelowPath() {
- for (int i = 0; i != 0; i++) {
- engine.indexContent(workspaceName1, path("/Cars/Hybrid"), 3);
- engine.indexContent(workspaceName2, path("/Aircraft/Commercial"), 5);
- }
+ public void shouldForwardIndexOfWorkspaceToIndexSession() throws Exception {
+ loadContent();
+ engine.index(workspaceName1, 3);
+ verify(sessionWs1, times(18)).index((Node)anyObject());
+ verify(sessionWs1).commit();
}
+
+ @Test
+ public void shouldForwardIndexOfSubgraphInWorkspaceToIndexSession() throws Exception {
+ loadContent();
+ engine.index(workspaceName1, path("/Cars"), 3);
+ verify(sessionWs1).deleteBelow(path("/Cars"));
+ verify(sessionWs1, times(17)).index((Node)anyObject());
+ verify(sessionWs1).commit();
+ }
+
+ @Test
+ public void shouldForwardIndexEntireWorkspaceToIndexSession() throws Exception {
+ loadContent();
+ engine.index(workspaceName1, path("/"), 3);
+ verify(sessionWs1, times(18)).index((Node)anyObject());
+ verify(sessionWs1).commit();
+ }
+
+ @Test
+ public void shouldForwardIndexOfAllWorkspacesToEachIndexSession() throws Exception {
+ loadContent();
+ engine.index(3); // will find all three workspaces
+ verify(sessionWs1, times(18)).index((Node)anyObject());
+ verify(sessionWs1).commit();
+ verify(sessionWs2, times(24)).index((Node)anyObject());
+ verify(sessionWs2).commit();
+ verify(sessionDefault, times(1)).index((Node)anyObject());
+ verify(sessionDefault).commit();
+ }
+
+ @SuppressWarnings( "unchecked" )
+ @Test
+ public void shouldForwardSearchToIndexSession() throws Exception {
+ String query = "term1 term2";
+ engine.fullTextSearch(context, workspaceName1, query, 3, 0);
+ verify(sessionWs1).search(eq(context), eq(query), eq(3), eq(0), (List<Location>)anyObject());
+ verify(sessionWs1).commit();
+ }
+
+ @Test
+ public void shouldForwardQueryToIndexSession() throws Exception {
+ Query query = mock(Query.class);
+ Schemata schemata = mock(Schemata.class);
+ engine.query(context, workspaceName1, query, schemata);
+ verify(sessionWs1).query(eq(new QueryContext(context, schemata)), eq(query));
+ verify(sessionWs1).commit();
+ }
+
+ // These tests expect there to be some real IndexLayout ...
+
+ // @Test
+ // public void shouldIndexAllContentInRepositorySource() throws Exception {
+ // loadContent();
+ // engine.index(3);
+ // }
+ //
+ // @Test
+ // public void shouldIndexAllContentInWorkspace() throws Exception {
+ // loadContent();
+ // engine.index(workspaceName1, 3);
+ // engine.index(workspaceName2, 5);
+ // }
+ //
+ // @Test
+ // public void shouldIndexAllContentInWorkspaceBelowPath() throws Exception {
+ // loadContent();
+ // engine.index(workspaceName1, path("/Cars/Hybrid"), 3);
+ // engine.index(workspaceName2, path("/Aircraft/Commercial"), 5);
+ // }
+ //
+ // @Test
+ // public void shouldReIndexAllContentInWorkspaceBelowPath() throws Exception {
+ // loadContent();
+ // for (int i = 0; i != 0; i++) {
+ // engine.index(workspaceName1, path("/Cars/Hybrid"), 3);
+ // engine.index(workspaceName2, path("/Aircraft/Commercial"), 5);
+ // }
+ // }
+ //
+ // @Test
+ // public void shouldHaveLoadedTestContentIntoRepositorySource() {
+ // assertThat(content.getNodeAt("/Cars/Hybrid/Toyota Prius").getProperty("msrp").getFirstValue(), is((Object)"$21,500"));
+ // }
+ //
+ // @Test
+ // public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfOne() {
+ // engine.index(workspaceName1, path("/"), 1);
+ // }
+ //
+ // @Test
+ // public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfTwo() {
+ // engine.index(workspaceName1, path("/"), 2);
+ // }
+ //
+ // @Test
+ // public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfThree() {
+ // engine.index(workspaceName1, path("/"), 3);
+ // }
+ //
+ // @Test
+ // public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfFour() {
+ // engine.index(workspaceName1, path("/"), 4);
+ // }
+ //
+ // @Test
+ // public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfTen() {
+ // engine.index(workspaceName1, path("/"), 10);
+ // }
+ //
+ // @Test
+ // public void shouldIndexRepositoryContentStartingAtNonRootNode() {
+ // engine.index(workspaceName1, path("/Cars"), 10);
+ // }
+ //
+ // @Test
+ // public void shouldReIndexRepositoryContentStartingAtNonRootNode() {
+ // for (int i = 0; i != 3; ++i) {
+ // engine.index(workspaceName1, path("/Cars"), 10);
+ // }
+ // }
+ //
+ // @Test
+ // public void shouldFindNodesByFullTextSearch() {
+ // engine.index(workspaceName1, path("/"), 100);
+ // List<Location> results = engine.fullTextSearch(context, workspaceName1, "Toyota Prius", 10, 0);
+ // assertThat(results, is(notNullValue()));
+ // assertThat(results.size(), is(2));
+ // assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Prius")));
+ // assertThat(results.get(1).getPath(), is(path("/Cars/Hybrid/Toyota Highlander")));
+ // }
+ //
+ // @Test
+ // public void shouldFindNodesByFullTextSearchWithOffset() {
+ // engine.index(workspaceName1, path("/"), 100);
+ // List<Location> results = engine.fullTextSearch(context, workspaceName1, "toyota prius", 1, 0);
+ // assertThat(results, is(notNullValue()));
+ // assertThat(results.size(), is(1));
+ // assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Prius")));
+ //
+ // results = engine.fullTextSearch(context, workspaceName1, "+Toyota", 1, 1);
+ // assertThat(results, is(notNullValue()));
+ // assertThat(results.size(), is(1));
+ // assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Highlander")));
+ // }
}
Deleted: trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java 2009-11-16 23:24:06 UTC (rev 1319)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -1,171 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import static org.hamcrest.core.Is.is;
-import static org.hamcrest.core.IsNull.notNullValue;
-import static org.junit.Assert.assertThat;
-import java.util.List;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.Graph;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.connector.RepositoryConnection;
-import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
-import org.jboss.dna.graph.connector.RepositorySourceException;
-import org.jboss.dna.graph.connector.inmemory.InMemoryRepositorySource;
-import org.jboss.dna.graph.property.Path;
-import org.junit.Before;
-import org.junit.Test;
-
-public class WorkspaceSearchEngineTest {
-
- private WorkspaceSearchEngine engine;
- private ExecutionContext context;
- private String sourceName;
- private String workspaceName;
- private InMemoryRepositorySource source;
- private RepositoryConnectionFactory connectionFactory;
- private DirectoryConfiguration directoryFactory;
- private IndexStrategy indexingStrategy;
- private Graph content;
-
- @Before
- public void beforeEach() throws Exception {
- context = new ExecutionContext();
- sourceName = "sourceA";
- workspaceName = "workspace1";
-
- // Set up the source and graph instance ...
- source = new InMemoryRepositorySource();
- source.setName(sourceName);
- source.setDefaultWorkspaceName(workspaceName);
- content = Graph.create(source, context);
-
- // Load some content ...
- content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("cars.xml")).into("/");
-
- // Set up the connection factory ...
- connectionFactory = new RepositoryConnectionFactory() {
- @SuppressWarnings( "synthetic-access" )
- public RepositoryConnection createConnection( String sourceName ) throws RepositorySourceException {
- return source.getConnection();
- }
- };
-
- // Set up the indexing strategy ...
- IndexRules rules = IndexRules.createBuilder(KitchenSinkIndexStrategy.DEFAULT_RULES)
- .defaultTo(IndexRules.INDEX | IndexRules.ANALYZE | IndexRules.FULL_TEXT)
- .build();
- indexingStrategy = new KitchenSinkIndexStrategy(rules);
-
- // Now set up the search engine ...
- directoryFactory = DirectoryConfigurations.inMemory();
- engine = new WorkspaceSearchEngine(context, directoryFactory, indexingStrategy, sourceName, workspaceName,
- connectionFactory);
- }
-
- protected Path path( String string ) {
- return context.getValueFactories().getPathFactory().create(string);
- }
-
- protected void assertSearchResults( String fullTextSearch,
- Path... expectedPaths ) {
- int numExpected = expectedPaths.length;
- List<Location> results = engine.fullTextSearch(fullTextSearch, numExpected, 0);
- int numFound = results.size();
- assertThat("Different number of results were found", numExpected, is(numFound));
- Path[] actualPaths = new Path[numFound];
- int i = 0;
- for (Location actual : results) {
- actualPaths[i++] = actual.getPath();
- }
- assertThat(expectedPaths, is(actualPaths));
- }
-
- @Test
- public void shouldHaveLoadedTestContentIntoRepositorySource() {
- assertThat(content.getNodeAt("/Cars/Hybrid/Toyota Prius").getProperty("msrp").getFirstValue(), is((Object)"$21,500"));
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfOne() {
- engine.indexContent(path("/"), 1);
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfTwo() {
- engine.indexContent(path("/"), 2);
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfThree() {
- engine.indexContent(path("/"), 3);
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfFour() {
- engine.indexContent(path("/"), 4);
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfTen() {
- engine.indexContent(path("/"), 10);
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtNonRootNode() {
- engine.indexContent(path("/Cars"), 10);
- }
-
- @Test
- public void shouldReIndexRepositoryContentStartingAtNonRootNode() {
- for (int i = 0; i != 3; ++i) {
- engine.indexContent(path("/Cars"), 10);
- }
- }
-
- @Test
- public void shouldFindNodesByFullTextSearch() {
- engine.indexContent(path("/"), 100);
- List<Location> results = engine.fullTextSearch("Toyota Prius", 10, 0);
- assertThat(results, is(notNullValue()));
- assertThat(results.size(), is(2));
- assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Prius")));
- assertThat(results.get(1).getPath(), is(path("/Cars/Hybrid/Toyota Highlander")));
- }
-
- @Test
- public void shouldFindNodesByFullTextSearchWithOffset() {
- engine.indexContent(path("/"), 100);
- List<Location> results = engine.fullTextSearch("toyota prius", 1, 0);
- assertThat(results, is(notNullValue()));
- assertThat(results.size(), is(1));
- assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Prius")));
-
- results = engine.fullTextSearch("+Toyota", 1, 1);
- assertThat(results, is(notNullValue()));
- assertThat(results.size(), is(1));
- assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Highlander")));
- }
-}
Added: trunk/dna-search/src/test/java/org/jboss/dna/search/query/LuceneNotQueryTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/query/LuceneNotQueryTest.java (rev 0)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/query/LuceneNotQueryTest.java 2009-11-16 23:25:36 UTC (rev 1320)
@@ -0,0 +1,126 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.query;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.stub;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Similarity;
+import org.jboss.dna.search.query.NotQuery;
+import org.junit.Test;
+
+public class LuceneNotQueryTest {
+
+ @Test
+ public void scorerShouldSkipAdjacentDocsIfScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(0, 1, 2, 3, 4);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 5, 6, 7, 8, 9);
+ }
+
+ @Test
+ public void scorerShouldSkipDocsAtEndIfScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(8, 9);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 0, 1, 2, 3, 4, 5, 6, 7);
+ }
+
+ @Test
+ public void scorerShouldScoreFirstDocsIfNotScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(2, 3, 4);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 0, 1, 5, 6, 7, 8, 9);
+ }
+
+ @Test
+ public void scorerShouldScoreNonAdjacentDocsNotScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(2, 4, 8);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 0, 1, 3, 5, 6, 7, 9);
+ }
+
+ protected void assertScores( Scorer scorer,
+ int... docIds ) throws IOException {
+ for (int docId : docIds) {
+ assertThat(scorer.nextDoc(), is(docId));
+ assertThat(scorer.score(), is(1.0f));
+ }
+ assertThat(scorer.nextDoc(), is(Scorer.NO_MORE_DOCS));
+ }
+
+ protected static class MockScorer extends Scorer {
+ private final Iterator<Integer> docIds;
+
+ protected MockScorer( int... docIds ) {
+ super(Similarity.getDefault());
+ List<Integer> ids = new ArrayList<Integer>();
+ for (int docId : docIds) {
+ ids.add(new Integer(docId));
+ }
+ this.docIds = ids.iterator();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() {
+ if (docIds.hasNext()) return docIds.next();
+ return Scorer.NO_MORE_DOCS;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ throw new UnsupportedOperationException("Should not be called");
+ }
+ }
+
+}
Property changes on: trunk/dna-search/src/test/java/org/jboss/dna/search/query/LuceneNotQueryTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
14 years, 5 months
DNA SVN: r1319 - in trunk: dna-graph/src/main/java/org/jboss/dna/graph/query/process and 6 other directories.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-16 18:24:06 -0500 (Mon, 16 Nov 2009)
New Revision: 1319
Added:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/AbstractAccessComponent.java
trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexStrategy.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexStrategy.java
trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexStrategy.java
trunk/dna-search/src/main/java/org/jboss/dna/search/SearchContext.java
Removed:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/ValueCache.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingRules.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingStrategy.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryEngine.java
trunk/dna-search/src/main/java/org/jboss/dna/search/StoreLittleIndexingStrategy.java
Modified:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryContext.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/Queryable.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableSchemata.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/AddAccessNodesTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ChooseJoinAlgorithmTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/PushSelectCriteriaTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ReplaceViewsTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RightOuterToLeftOuterJoinsTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizerTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/plan/CanonicalPlannerTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/DistinctComponentTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/QueryResultColumnsTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/SortLocationsComponentTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/SortValuesComponentTest.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java
trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java
trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java
trunk/dna-search/src/test/java/org/jboss/dna/search/IndexingRulesTest.java
trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java
trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java
Log:
DNA-467 Additional refactoring to move toward a working search engine.
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryContext.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryContext.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryContext.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -26,6 +26,7 @@
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
+import net.jcip.annotations.Immutable;
import org.jboss.dna.common.collection.Problems;
import org.jboss.dna.common.collection.SimpleProblems;
import org.jboss.dna.common.util.CheckArg;
@@ -35,64 +36,120 @@
import org.jboss.dna.graph.query.validate.Schemata;
/**
- *
+ * An immutable context in which queries are to be executed. Each query context defines the information that is available during
+ * query execution.
*/
-public final class QueryContext {
+@Immutable
+public class QueryContext {
private final ExecutionContext context;
private final PlanHints hints;
private final Schemata schemata;
private final Problems problems;
private final Map<String, Object> variables;
+ /**
+ * Create a new context for query execution.
+ *
+ * @param context the execution context
+ * @param schemata the schemata
+ * @param hints the hints, or null if there are no hints
+ * @param problems the problems container, or null if a new problems container should be created
+ * @param variables the mapping of variables and values, or null if there are no such variables
+ * @throws IllegalArgumentException if the context or schmata are null
+ */
public QueryContext( ExecutionContext context,
+ Schemata schemata,
PlanHints hints,
- Schemata schemata,
Problems problems,
Map<String, Object> variables ) {
CheckArg.isNotNull(context, "context");
+ CheckArg.isNotNull(schemata, "schemata");
this.context = context;
this.hints = hints != null ? hints : new PlanHints();
this.schemata = schemata;
this.problems = problems != null ? problems : new SimpleProblems();
this.variables = variables != null ? Collections.<String, Object>unmodifiableMap(new HashMap<String, Object>(variables)) : Collections.<String, Object>emptyMap();
+ assert this.context != null;
+ assert this.hints != null;
+ assert this.schemata != null;
+ assert this.problems != null;
+ assert this.variables != null;
}
+ /**
+ * Create a new context for query execution.
+ *
+ * @param context the execution context
+ * @param schemata the schemata
+ * @param hints the hints, or null if there are no hints
+ * @param problems the problems container, or null if a new problems container should be created
+ * @throws IllegalArgumentException if the context or schmata are null
+ */
public QueryContext( ExecutionContext context,
+ Schemata schemata,
PlanHints hints,
- Schemata schemata,
Problems problems ) {
- this(context, hints, schemata, problems, null);
+ this(context, schemata, hints, problems, null);
}
+ /**
+ * Create a new context for query execution.
+ *
+ * @param context the execution context
+ * @param schemata the schemata
+ * @param hints the hints, or null if there are no hints
+ * @throws IllegalArgumentException if the context or schmata are null
+ */
public QueryContext( ExecutionContext context,
- PlanHints hints,
+ Schemata schemata,
+ PlanHints hints ) {
+ this(context, schemata, hints, null, null);
+ }
+
+ /**
+ * Create a new context for query execution.
+ *
+ * @param context the execution context
+ * @param schemata the schemata
+ * @throws IllegalArgumentException if the context or schmata are null
+ */
+ public QueryContext( ExecutionContext context,
Schemata schemata ) {
- this(context, hints, schemata, null, null);
+ this(context, schemata, null, null, null);
}
/**
- * @return context
+ * Get the execution context available to this query context.
+ *
+ * @return the execution context; never null
*/
public final ExecutionContext getExecutionContext() {
return context;
}
/**
- * @return hints
+ * Get the plan hints.
+ *
+ * @return the plan hints; never null
*/
public final PlanHints getHints() {
return hints;
}
/**
- * @return problems
+ * Get the problem container used by this query context. Any problems that have been encountered will be accumlated in this
+ * container.
+ *
+ * @return the problem container; never null
*/
public final Problems getProblems() {
return problems;
}
/**
- * @return schemata
+ * Get the definition of the tables available within this query context.
+ *
+ * @return the schemata; never null
*/
public Schemata getSchemata() {
return schemata;
@@ -101,10 +158,66 @@
/**
* Get the variables that are to be substituted into the {@link BindVariableName} used in the query.
*
- * @return immutable map of variable values keyed by their name; never null
+ * @return immutable map of variable values keyed by their name; never null but possibly empty
*/
public Map<String, Object> getVariables() {
return variables;
}
+ /**
+ * Obtain a copy of this context, except that the copy uses the supplied execution context.
+ *
+ * @param context the execution context that should be used in the new query context
+ * @return the new context; never null
+ * @throws IllegalArgumentException if the execution context reference is null
+ */
+ public QueryContext with( ExecutionContext context ) {
+ CheckArg.isNotNull(context, "context");
+ return new QueryContext(context, schemata, hints, problems, variables);
+ }
+
+ /**
+ * Obtain a copy of this context, except that the copy uses the supplied schemata.
+ *
+ * @param schemata the schemata that should be used in the new context
+ * @return the new context; never null
+ * @throws IllegalArgumentException if the schemata reference is null
+ */
+ public QueryContext with( Schemata schemata ) {
+ CheckArg.isNotNull(schemata, "schemata");
+ return new QueryContext(context, schemata, hints, problems, variables);
+ }
+
+ /**
+ * Obtain a copy of this context, except that the copy uses the supplied hints.
+ *
+ * @param hints the hints that should be used in the new context
+ * @return the new context; never null
+ * @throws IllegalArgumentException if the hints reference is null
+ */
+ public QueryContext with( PlanHints hints ) {
+ CheckArg.isNotNull(hints, "hints");
+ return new QueryContext(context, schemata, hints, problems, variables);
+ }
+
+ /**
+ * Obtain a copy of this context, except that the copy uses the supplied problem container.
+ *
+ * @param problems the problems that should be used in the new context; may be null if a new problem container should be used
+ * @return the new context; never null
+ */
+ public QueryContext with( Problems problems ) {
+ return new QueryContext(context, schemata, hints, problems, variables);
+ }
+
+ /**
+ * Obtain a copy of this context, except that the copy uses the supplied variables.
+ *
+ * @param variables the variables that should be used in the new context; may be null if there are no such variables
+ * @return the new context; never null
+ */
+ public QueryContext with( Map<String, Object> variables ) {
+ return new QueryContext(context, schemata, hints, problems, variables);
+ }
+
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -27,18 +27,15 @@
import java.util.concurrent.atomic.AtomicBoolean;
import net.jcip.annotations.ThreadSafe;
import org.jboss.dna.common.util.CheckArg;
-import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.query.QueryResults.Statistics;
import org.jboss.dna.graph.query.model.Column;
import org.jboss.dna.graph.query.model.Constraint;
import org.jboss.dna.graph.query.model.FullTextSearch;
import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.model.SelectorName;
import org.jboss.dna.graph.query.model.Visitors;
import org.jboss.dna.graph.query.optimize.Optimizer;
import org.jboss.dna.graph.query.optimize.RuleBasedOptimizer;
import org.jboss.dna.graph.query.plan.CanonicalPlanner;
-import org.jboss.dna.graph.query.plan.PlanHints;
import org.jboss.dna.graph.query.plan.PlanNode;
import org.jboss.dna.graph.query.plan.Planner;
import org.jboss.dna.graph.query.plan.PlanNode.Property;
@@ -54,17 +51,6 @@
@ThreadSafe
public class QueryEngine implements Queryable {
- /**
- * A {@link Schemata} implementation that always returns null, meaning the table does not exist.
- */
- private static final Schemata DEFAULT_SCHEMATA = new Schemata() {
- public Table getTable( SelectorName name ) {
- // This won't allow the query engine to do anything (or much of anything),
- // but it is legal and will result in meaningful problems
- return null;
- }
- };
-
private final Planner planner;
private final Optimizer optimizer;
private final Processor processor;
@@ -92,42 +78,25 @@
/**
* {@inheritDoc}
*
- * @see org.jboss.dna.graph.query.Queryable#execute(org.jboss.dna.graph.ExecutionContext,
- * org.jboss.dna.graph.query.model.QueryCommand, org.jboss.dna.graph.query.validate.Schemata)
+ * @see org.jboss.dna.graph.query.Queryable#execute(org.jboss.dna.graph.query.QueryContext,
+ * org.jboss.dna.graph.query.model.QueryCommand)
*/
- public QueryResults execute( ExecutionContext context,
- QueryCommand query,
- Schemata schemata ) {
- return execute(context, query, schemata, new PlanHints());
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.Queryable#execute(org.jboss.dna.graph.ExecutionContext,
- * org.jboss.dna.graph.query.model.QueryCommand, org.jboss.dna.graph.query.validate.Schemata,
- * org.jboss.dna.graph.query.plan.PlanHints)
- */
- public QueryResults execute( ExecutionContext context,
- QueryCommand query,
- Schemata schemata,
- PlanHints hints ) {
+ public QueryResults execute( QueryContext context,
+ QueryCommand query ) {
CheckArg.isNotNull(context, "context");
CheckArg.isNotNull(query, "query");
- if (schemata == null) schemata = DEFAULT_SCHEMATA;
- QueryContext queryContext = new QueryContext(context, hints, schemata);
// Create the canonical plan ...
long start = System.nanoTime();
- PlanNode plan = planner.createPlan(queryContext, query);
+ PlanNode plan = planner.createPlan(context, query);
long duration = System.nanoTime() - start;
Statistics stats = new Statistics(duration);
QueryResultColumns resultColumns = QueryResultColumns.empty();
- if (!queryContext.getProblems().hasErrors()) {
+ if (!context.getProblems().hasErrors()) {
// Optimize the plan ...
start = System.nanoTime();
- PlanNode optimizedPlan = optimizer.optimize(queryContext, plan);
+ PlanNode optimizedPlan = optimizer.optimize(context, plan);
duration = System.nanoTime() - start;
stats = stats.withOptimizationTime(duration);
@@ -137,11 +106,11 @@
duration = System.nanoTime() - start;
stats = stats.withOptimizationTime(duration);
- if (!queryContext.getProblems().hasErrors()) {
+ if (!context.getProblems().hasErrors()) {
// Execute the plan ...
try {
start = System.nanoTime();
- return processor.execute(queryContext, query, stats, optimizedPlan);
+ return processor.execute(context, query, stats, optimizedPlan);
} finally {
duration = System.nanoTime() - start;
stats = stats.withOptimizationTime(duration);
@@ -149,7 +118,7 @@
}
}
// There were problems somewhere ...
- return new org.jboss.dna.graph.query.process.QueryResults(queryContext, query, resultColumns, stats);
+ return new org.jboss.dna.graph.query.process.QueryResults(context, query, resultColumns, stats);
}
protected QueryResultColumns determineQueryResultColumns( PlanNode optimizedPlan ) {
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/Queryable.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/Queryable.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/Queryable.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -23,10 +23,7 @@
*/
package org.jboss.dna.graph.query;
-import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.plan.PlanHints;
-import org.jboss.dna.graph.query.validate.Schemata;
/**
* An interface defining the ability to submit a query and obtain results.
@@ -38,26 +35,9 @@
*
* @param context the context in which the query should be executed
* @param query the query that is to be executed
- * @param schemata the schemata that should be used to validate the query
* @return the query results; never null
* @throws IllegalArgumentException if the context or query references are null
*/
- QueryResults execute( ExecutionContext context,
- QueryCommand query,
- Schemata schemata );
-
- /**
- * Execute the supplied query by planning, optimizing, and then processing it.
- *
- * @param context the context in which the query should be executed
- * @param query the query that is to be executed
- * @param schemata the schemata that should be used to validate the query
- * @param hints the hints for the execution; may be null if there are no hints
- * @return the query results; never null
- * @throws IllegalArgumentException if the context or query references are null
- */
- QueryResults execute( ExecutionContext context,
- QueryCommand query,
- Schemata schemata,
- PlanHints hints );
+ QueryResults execute( QueryContext context,
+ QueryCommand query );
}
Added: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/AbstractAccessComponent.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/AbstractAccessComponent.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/AbstractAccessComponent.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -0,0 +1,120 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.query.process;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.NameFactory;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.model.AllNodes;
+import org.jboss.dna.graph.query.model.And;
+import org.jboss.dna.graph.query.model.Column;
+import org.jboss.dna.graph.query.model.Constraint;
+import org.jboss.dna.graph.query.model.Limit;
+import org.jboss.dna.graph.query.model.SelectorName;
+import org.jboss.dna.graph.query.plan.PlanNode;
+import org.jboss.dna.graph.query.plan.PlanNode.Property;
+import org.jboss.dna.graph.query.plan.PlanNode.Type;
+import org.jboss.dna.graph.query.validate.Schemata;
+
+/**
+ * A reusable base class for {@link ProcessingComponent} implementations that does everything except obtain the correct
+ * {@link Location} objects for the query results.
+ */
+public abstract class AbstractAccessComponent extends ProcessingComponent {
+
+ protected final PlanNode accessNode;
+ protected final SelectorName sourceName;
+ protected final List<Column> projectedColumns;
+ protected final Constraint constraint;
+ protected final Limit limit;
+
+ protected AbstractAccessComponent( QueryContext context,
+ Columns columns,
+ PlanNode accessNode ) {
+ super(context, columns);
+ this.accessNode = accessNode;
+
+ // Find the table name; should be
+ PlanNode source = accessNode.findAtOrBelow(Type.SOURCE);
+ if (source != null) {
+ this.sourceName = source.getProperty(Property.SOURCE_NAME, SelectorName.class);
+ if (!AllNodes.ALL_NODES_NAME.equals(this.sourceName)) {
+ throw new IllegalArgumentException();
+ }
+ } else {
+ throw new IllegalArgumentException();
+ }
+
+ // Find the project ...
+ PlanNode project = accessNode.findAtOrBelow(Type.PROJECT);
+ if (project != null) {
+ List<Column> projectedColumns = project.getPropertyAsList(Property.PROJECT_COLUMNS, Column.class);
+ if (projectedColumns != null) {
+ this.projectedColumns = projectedColumns;
+ } else {
+ // Get the columns from the source columns ...
+ List<Schemata.Column> schemataColumns = source.getPropertyAsList(Property.SOURCE_COLUMNS, Schemata.Column.class);
+ this.projectedColumns = new ArrayList<Column>(schemataColumns.size());
+ NameFactory nameFactory = context.getExecutionContext().getValueFactories().getNameFactory();
+ for (Schemata.Column schemataColumn : schemataColumns) {
+ String columnName = schemataColumn.getName();
+ // PropertyType type = schemataColumn.getPropertyType();
+ Name propertyName = nameFactory.create(columnName);
+ Column column = new Column(sourceName, propertyName, columnName);
+ this.projectedColumns.add(column);
+ }
+ }
+ } else {
+ throw new IllegalArgumentException();
+ }
+
+ // Add the criteria ...
+ Constraint constraint = null;
+ for (PlanNode select : accessNode.findAllAtOrBelow(Type.SELECT)) {
+ Constraint selectConstraint = select.getProperty(Property.SELECT_CRITERIA, Constraint.class);
+ if (constraint != null) {
+ constraint = new And(constraint, selectConstraint);
+ } else {
+ constraint = selectConstraint;
+ }
+ }
+ this.constraint = constraint;
+
+ // Find the limit ...
+ Limit limit = Limit.NONE;
+ PlanNode limitNode = accessNode.findAtOrBelow(Type.LIMIT);
+ if (limitNode != null) {
+ Integer count = limitNode.getProperty(Property.LIMIT_COUNT, Integer.class);
+ if (count != null) limit = limit.withRowLimit(count.intValue());
+ Integer offset = limitNode.getProperty(Property.LIMIT_OFFSET, Integer.class);
+ if (offset != null) limit = limit.withOffset(offset.intValue());
+ }
+ this.limit = limit;
+ }
+
+}
Property changes on: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/AbstractAccessComponent.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/ValueCache.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/ValueCache.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/ValueCache.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -1,44 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.graph.query.process;
-
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.property.Name;
-import org.jboss.dna.graph.query.QueryResults;
-
-/**
- * Interface representing a cache of property values used in a {@link QueryResults}.
- */
-public interface ValueCache {
-
- /**
- * Get the value of the named property on the node at the supplied location.
- *
- * @param location the location of the node; may not be null
- * @param name the property name
- * @return the value of the property, or null if there is no such value
- */
- Object getProperty( Location location,
- Name name );
-}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableSchemata.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableSchemata.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/ImmutableSchemata.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -45,7 +45,6 @@
import org.jboss.dna.graph.query.parse.InvalidQueryException;
import org.jboss.dna.graph.query.parse.SqlQueryParser;
import org.jboss.dna.graph.query.plan.CanonicalPlanner;
-import org.jboss.dna.graph.query.plan.PlanHints;
import org.jboss.dna.graph.query.plan.PlanNode;
import org.jboss.dna.graph.query.plan.PlanNode.Property;
import org.jboss.dna.graph.query.plan.PlanNode.Type;
@@ -394,7 +393,7 @@
for (SelectorName name : viewNames) {
QueryCommand command = definitions.get(name);
// Create the canonical plan for the definition ...
- QueryContext queryContext = new QueryContext(context, new PlanHints(), schemata);
+ QueryContext queryContext = new QueryContext(context, schemata);
CanonicalPlanner planner = new CanonicalPlanner();
PlanNode plan = planner.createPlan(queryContext, command);
if (queryContext.getProblems().hasErrors()) continue;
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/AddAccessNodesTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/AddAccessNodesTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/AddAccessNodesTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -31,9 +31,6 @@
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.query.AbstractQueryTest;
import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.optimize.AddAccessNodes;
-import org.jboss.dna.graph.query.optimize.OptimizerRule;
-import org.jboss.dna.graph.query.plan.PlanHints;
import org.jboss.dna.graph.query.plan.PlanNode;
import org.jboss.dna.graph.query.plan.PlanNode.Type;
import org.jboss.dna.graph.query.validate.Schemata;
@@ -50,7 +47,7 @@
@Before
public void beforeEach() {
- context = new QueryContext(new ExecutionContext(), new PlanHints(), mock(Schemata.class));
+ context = new QueryContext(new ExecutionContext(), mock(Schemata.class));
rule = AddAccessNodes.INSTANCE;
}
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ChooseJoinAlgorithmTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ChooseJoinAlgorithmTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ChooseJoinAlgorithmTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -35,10 +35,7 @@
import org.jboss.dna.graph.query.model.DescendantNodeJoinCondition;
import org.jboss.dna.graph.query.model.JoinCondition;
import org.jboss.dna.graph.query.model.JoinType;
-import org.jboss.dna.graph.query.optimize.ChooseJoinAlgorithm;
-import org.jboss.dna.graph.query.optimize.OptimizerRule;
import org.jboss.dna.graph.query.plan.JoinAlgorithm;
-import org.jboss.dna.graph.query.plan.PlanHints;
import org.jboss.dna.graph.query.plan.PlanNode;
import org.jboss.dna.graph.query.plan.PlanNode.Property;
import org.jboss.dna.graph.query.plan.PlanNode.Type;
@@ -57,7 +54,7 @@
@Before
public void beforeEach() {
- context = new QueryContext(new ExecutionContext(), new PlanHints(), mock(Schemata.class));
+ context = new QueryContext(new ExecutionContext(), mock(Schemata.class));
bestRule = ChooseJoinAlgorithm.USE_BEST_JOIN_ALGORITHM;
nestedRule = ChooseJoinAlgorithm.USE_ONLY_NESTED_JOIN_ALGORITHM;
}
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/PushSelectCriteriaTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/PushSelectCriteriaTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/PushSelectCriteriaTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -32,9 +32,6 @@
import org.jboss.dna.graph.query.AbstractQueryTest;
import org.jboss.dna.graph.query.QueryContext;
import org.jboss.dna.graph.query.model.JoinType;
-import org.jboss.dna.graph.query.optimize.OptimizerRule;
-import org.jboss.dna.graph.query.optimize.PushSelectCriteria;
-import org.jboss.dna.graph.query.plan.PlanHints;
import org.jboss.dna.graph.query.plan.PlanNode;
import org.jboss.dna.graph.query.plan.PlanNode.Property;
import org.jboss.dna.graph.query.plan.PlanNode.Type;
@@ -52,7 +49,7 @@
@Before
public void beforeEach() {
- context = new QueryContext(new ExecutionContext(), new PlanHints(), mock(Schemata.class));
+ context = new QueryContext(new ExecutionContext(), mock(Schemata.class));
rule = PushSelectCriteria.INSTANCE;
}
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ReplaceViewsTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ReplaceViewsTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ReplaceViewsTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -38,7 +38,6 @@
import org.jboss.dna.graph.query.model.Operator;
import org.jboss.dna.graph.query.model.PropertyValue;
import org.jboss.dna.graph.query.model.SelectorName;
-import org.jboss.dna.graph.query.plan.PlanHints;
import org.jboss.dna.graph.query.plan.PlanNode;
import org.jboss.dna.graph.query.plan.PlanNode.Property;
import org.jboss.dna.graph.query.plan.PlanNode.Type;
@@ -67,7 +66,7 @@
builder.addView("v1", "SELECT c11, c12 FROM t1 WHERE c13 < CAST('3' AS LONG)");
builder.addView("v2", "SELECT t1.c11, t1.c12, t2.c23 FROM t1 JOIN t2 ON t1.c11 = t2.c21");
schemata = builder.build();
- context = new QueryContext(execContext, new PlanHints(), schemata);
+ context = new QueryContext(execContext, schemata);
}
/**
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RightOuterToLeftOuterJoinsTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RightOuterToLeftOuterJoinsTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RightOuterToLeftOuterJoinsTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -32,9 +32,6 @@
import org.jboss.dna.graph.query.AbstractQueryTest;
import org.jboss.dna.graph.query.QueryContext;
import org.jboss.dna.graph.query.model.JoinType;
-import org.jboss.dna.graph.query.optimize.OptimizerRule;
-import org.jboss.dna.graph.query.optimize.RightOuterToLeftOuterJoins;
-import org.jboss.dna.graph.query.plan.PlanHints;
import org.jboss.dna.graph.query.plan.PlanNode;
import org.jboss.dna.graph.query.plan.PlanNode.Property;
import org.jboss.dna.graph.query.plan.PlanNode.Type;
@@ -52,7 +49,7 @@
@Before
public void beforeEach() {
- context = new QueryContext(new ExecutionContext(), new PlanHints(), mock(Schemata.class));
+ context = new QueryContext(new ExecutionContext(), mock(Schemata.class));
rule = RightOuterToLeftOuterJoins.INSTANCE;
}
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizerTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizerTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizerTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -86,7 +86,7 @@
builder.addView("type2",
"SELECT all.a3, all.a4 FROM all WHERE all.primaryType IN ('t2','t0') AND all.mixins IN ('t4','t5')");
Schemata schemata = builder.build();
- context = new QueryContext(execContext, new PlanHints(), schemata);
+ context = new QueryContext(execContext, schemata);
node = new PlanNode(Type.ACCESS);
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/plan/CanonicalPlannerTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/plan/CanonicalPlannerTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/plan/CanonicalPlannerTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -112,7 +112,7 @@
public void shouldProducePlanForSelectStarFromTable() {
schemata = schemataBuilder.addTable("__ALLNODES__", "column1", "column2", "column3").build();
query = builder.selectStar().fromAllNodes().query();
- queryContext = new QueryContext(context, hints, schemata, problems);
+ queryContext = new QueryContext(context, schemata, hints, problems);
plan = planner.createPlan(queryContext, query);
assertThat(problems.isEmpty(), is(true));
assertProjectNode(plan, "column1", "column2", "column3");
@@ -128,7 +128,7 @@
public void shouldProduceErrorWhenSelectingNonExistantTable() {
schemata = schemataBuilder.addTable("someTable", "column1", "column2", "column3").build();
query = builder.selectStar().fromAllNodes().query();
- queryContext = new QueryContext(context, hints, schemata, problems);
+ queryContext = new QueryContext(context, schemata, hints, problems);
plan = planner.createPlan(queryContext, query);
assertThat(problems.hasErrors(), is(true));
}
@@ -137,7 +137,7 @@
public void shouldProduceErrorWhenSelectingNonExistantColumnOnExistingTable() {
schemata = schemataBuilder.addTable("someTable", "column1", "column2", "column3").build();
query = builder.select("column1", "column4").from("someTable").query();
- queryContext = new QueryContext(context, hints, schemata, problems);
+ queryContext = new QueryContext(context, schemata, hints, problems);
plan = planner.createPlan(queryContext, query);
assertThat(problems.hasErrors(), is(true));
}
@@ -146,7 +146,7 @@
public void shouldProducePlanWhenSelectingAllColumnsOnExistingTable() {
schemata = schemataBuilder.addTable("someTable", "column1", "column2", "column3").build();
query = builder.selectStar().from("someTable").query();
- queryContext = new QueryContext(context, hints, schemata, problems);
+ queryContext = new QueryContext(context, schemata, hints, problems);
plan = planner.createPlan(queryContext, query);
System.out.println(plan);
assertThat(problems.hasErrors(), is(false));
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/DistinctComponentTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/DistinctComponentTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/DistinctComponentTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -31,9 +31,6 @@
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.query.QueryContext;
import org.jboss.dna.graph.query.QueryResults.Columns;
-import org.jboss.dna.graph.query.plan.PlanHints;
-import org.jboss.dna.graph.query.process.DistinctComponent;
-import org.jboss.dna.graph.query.process.ProcessingComponent;
import org.jboss.dna.graph.query.validate.Schemata;
import org.junit.Before;
import org.junit.Test;
@@ -50,7 +47,7 @@
@Before
public void beforeEach() {
- context = new QueryContext(new ExecutionContext(), new PlanHints(), mock(Schemata.class));
+ context = new QueryContext(new ExecutionContext(), mock(Schemata.class));
inputTuples = new ArrayList<Object[]>();
// Define the columns for the results ...
columns = resultColumns("Selector1", "ColA", "ColB", "ColC");
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/QueryResultColumnsTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/QueryResultColumnsTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/QueryResultColumnsTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -37,9 +37,6 @@
import org.jboss.dna.graph.query.QueryResults.Statistics;
import org.jboss.dna.graph.query.model.Column;
import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.plan.PlanHints;
-import org.jboss.dna.graph.query.process.QueryResultColumns;
-import org.jboss.dna.graph.query.process.QueryResults;
import org.jboss.dna.graph.query.validate.Schemata;
import org.junit.Before;
import org.junit.Test;
@@ -66,7 +63,7 @@
@Before
public void beforeEach() {
MockitoAnnotations.initMocks(this);
- context = new QueryContext(executionContext, new PlanHints(), schemata);
+ context = new QueryContext(executionContext, schemata);
columnList = new ArrayList<Column>();
columnList.add(new Column(selector("table1"), name("colA"), "colA"));
columnList.add(new Column(selector("table1"), name("colB"), "colB"));
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/SortLocationsComponentTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/SortLocationsComponentTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/SortLocationsComponentTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -31,9 +31,6 @@
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.query.QueryContext;
import org.jboss.dna.graph.query.QueryResults.Columns;
-import org.jboss.dna.graph.query.plan.PlanHints;
-import org.jboss.dna.graph.query.process.ProcessingComponent;
-import org.jboss.dna.graph.query.process.SortLocationsComponent;
import org.jboss.dna.graph.query.validate.Schemata;
import org.junit.Before;
import org.junit.Test;
@@ -50,7 +47,7 @@
@Before
public void beforeEach() {
- context = new QueryContext(new ExecutionContext(), new PlanHints(), mock(Schemata.class));
+ context = new QueryContext(new ExecutionContext(), mock(Schemata.class));
inputTuples = new ArrayList<Object[]>();
// Define the columns for the results ...
columns = resultColumns("Selector1", "ColA", "ColB", "ColC");
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/SortValuesComponentTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/SortValuesComponentTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/SortValuesComponentTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -32,7 +32,6 @@
import org.jboss.dna.graph.query.QueryContext;
import org.jboss.dna.graph.query.QueryResults.Columns;
import org.jboss.dna.graph.query.model.Ordering;
-import org.jboss.dna.graph.query.plan.PlanHints;
import org.jboss.dna.graph.query.validate.Schemata;
import org.junit.Before;
import org.junit.Test;
@@ -56,7 +55,7 @@
columns = resultColumns("Selector1", "ColA", "ColB", "ColC");
schemata = schemataFor(columns, PropertyType.STRING, PropertyType.LONG, PropertyType.STRING);
// Define the context ...
- context = new QueryContext(new ExecutionContext(), new PlanHints(), schemata);
+ context = new QueryContext(new ExecutionContext(), schemata);
inputTuples = new ArrayList<Object[]>();
// And define the delegating component ...
delegate = new ProcessingComponent(context, columns) {
Copied: trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexStrategy.java (from rev 1318, trunk/dna-search/src/main/java/org/jboss/dna/search/StoreLittleIndexingStrategy.java)
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexStrategy.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexStrategy.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -0,0 +1,399 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.UUID;
+import net.jcip.annotations.ThreadSafe;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.util.Version;
+import org.jboss.dna.common.text.NoOpEncoder;
+import org.jboss.dna.common.text.TextEncoder;
+import org.jboss.dna.common.util.Logger;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.Node;
+import org.jboss.dna.graph.property.Binary;
+import org.jboss.dna.graph.property.DateTime;
+import org.jboss.dna.graph.property.DateTimeFactory;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.Property;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.QueryEngine;
+import org.jboss.dna.graph.query.QueryResults;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.optimize.Optimizer;
+import org.jboss.dna.graph.query.optimize.OptimizerRule;
+import org.jboss.dna.graph.query.optimize.RuleBasedOptimizer;
+import org.jboss.dna.graph.query.plan.CanonicalPlanner;
+import org.jboss.dna.graph.query.plan.PlanHints;
+import org.jboss.dna.graph.query.plan.PlanNode;
+import org.jboss.dna.graph.query.plan.Planner;
+import org.jboss.dna.graph.query.process.ProcessingComponent;
+import org.jboss.dna.graph.query.process.QueryProcessor;
+import org.jboss.dna.search.IndexRules.Rule;
+
+/**
+ * A simple {@link IndexStrategy} implementation that relies upon two separate indexes: one for the node content and a second
+ * one for paths and UUIDs.
+ */
+@ThreadSafe
+abstract class DualIndexStrategy implements IndexStrategy {
+
+ static class PathIndex {
+ public static final String PATH = "path";
+ public static final String UUID = "uuid";
+ }
+
+ static class ContentIndex {
+ public static final String UUID = PathIndex.UUID;
+ public static final String FULL_TEXT = "fts";
+ }
+
+ /**
+ * The number of results that should be returned when performing queries while deleting entire branches of content. The
+ * current value is {@value} .
+ */
+ protected static final int SIZE_OF_DELETE_BATCHES = 1000;
+
+ private ThreadLocal<DateFormat> dateFormatter = new ThreadLocal<DateFormat>() {
+ @Override
+ protected DateFormat initialValue() {
+ return new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss");
+ }
+ };
+
+ /**
+ * Obtain an immutable {@link FieldSelector} instance that accesses the UUID field.
+ */
+ protected static final FieldSelector UUID_FIELD_SELECTOR = new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ return PathIndex.UUID.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
+ }
+ };
+
+ private final IndexRules rules;
+ private final Logger logger;
+ private final QueryEngine queryEngine;
+
+ /**
+ * Create a new indexing strategy instance.
+ *
+ * @param rules the indexing rules that govern how properties are to be index; may not be null
+ */
+ protected DualIndexStrategy( IndexRules rules ) {
+ assert rules != null;
+ this.rules = rules;
+ this.logger = Logger.getLogger(getClass());
+ // Create the query engine ...
+ Planner planner = new CanonicalPlanner();
+ Optimizer optimizer = new RuleBasedOptimizer() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.optimize.RuleBasedOptimizer#populateRuleStack(java.util.LinkedList,
+ * org.jboss.dna.graph.query.plan.PlanHints)
+ */
+ @Override
+ protected void populateRuleStack( LinkedList<OptimizerRule> ruleStack,
+ PlanHints hints ) {
+ super.populateRuleStack(ruleStack, hints);
+ // Add any custom rules here, either at the front of the stack or at the end
+ }
+ };
+ QueryProcessor processor = new QueryProcessor() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.process.QueryProcessor#createAccessComponent(org.jboss.dna.graph.query.QueryContext,
+ * org.jboss.dna.graph.query.plan.PlanNode, org.jboss.dna.graph.query.QueryResults.Columns,
+ * org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
+ */
+ @Override
+ protected ProcessingComponent createAccessComponent( QueryContext context,
+ PlanNode accessNode,
+ Columns resultColumns,
+ org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer ) {
+ return DualIndexStrategy.this.createAccessComponent((SearchContext)context, accessNode, resultColumns, analyzer);
+ }
+ };
+
+ this.queryEngine = new QueryEngine(planner, optimizer, processor);
+ }
+
+ protected abstract ProcessingComponent createAccessComponent( SearchContext context,
+ PlanNode accessNode,
+ Columns resultColumns,
+ org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer );
+
+ /**
+ * Utility method to obtain a {@link DateFormat} instance that can be used safely within a single thread.
+ *
+ * @return the date formatter; never null
+ */
+ protected final DateFormat dateFormatter() {
+ return dateFormatter.get();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexStrategy#getNamespaceEncoder()
+ */
+ public TextEncoder getNamespaceEncoder() {
+ return new NoOpEncoder();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexStrategy#getChangeCountForAutomaticOptimization()
+ */
+ public int getChangeCountForAutomaticOptimization() {
+ return 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexStrategy#createAnalyzer()
+ */
+ public Analyzer createAnalyzer() {
+ return new StandardAnalyzer(Version.LUCENE_CURRENT);
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Because this strategy uses multiple indexes, and since there's no correlation between the documents in those indexes, we
+ * need to perform the delete in multiple steps. First, we need to perform a query to find out which nodes exist below a
+ * certain path. Then, we need to delete those nodes from the paths index. Finally, we need to delete the corresponding
+ * documents in the content index that represent those same nodes.
+ * </p>
+ * <p>
+ * Since we don't know how many documents there will be, we perform these steps in batches, where each batch limits the number
+ * of results to a maximum number. We repeat batches as long as we find more results. This approach has the advantage that
+ * we'll never bring in a large number of results, and it allows us to delete the documents from the content node using a
+ * query.
+ * </p>
+ *
+ * @see org.jboss.dna.search.IndexStrategy#deleteBelow(Path, IndexContext)
+ */
+ public int deleteBelow( Path path,
+ IndexContext indexes ) throws IOException {
+ // Perform a query using the reader to find those nodes at/below the path ...
+ try {
+ IndexReader pathReader = indexes.getPathsReader();
+ IndexSearcher pathSearcher = new IndexSearcher(pathReader);
+ String pathStr = indexes.stringFactory().create(path) + "/";
+ PrefixQuery query = new PrefixQuery(new Term(PathIndex.PATH, pathStr));
+ int numberDeleted = 0;
+ while (true) {
+ // Execute the query and get the results ...
+ TopDocs results = pathSearcher.search(query, SIZE_OF_DELETE_BATCHES);
+ int numResultsInBatch = results.scoreDocs.length;
+ // Walk the results, delete the doc, and add to the query that we'll use against the content index ...
+ IndexReader contentReader = indexes.getContentReader();
+ for (ScoreDoc result : results.scoreDocs) {
+ int docId = result.doc;
+ // Find the UUID of the node ...
+ Document doc = pathReader.document(docId, UUID_FIELD_SELECTOR);
+ String uuid = doc.get(PathIndex.UUID);
+ // Delete the document from the paths index ...
+ pathReader.deleteDocument(docId);
+ // Delete the corresponding document from the content index ...
+ contentReader.deleteDocuments(new Term(ContentIndex.UUID, uuid));
+ }
+ numberDeleted += numResultsInBatch;
+ if (numResultsInBatch < SIZE_OF_DELETE_BATCHES) break;
+ }
+ indexes.commit();
+ return numberDeleted;
+ } catch (FileNotFoundException e) {
+ // There are no index files yet, so nothing to delete ...
+ return 0;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexStrategy#index(Node, IndexContext)
+ */
+ public void index( Node node,
+ IndexContext indexes ) throws IOException {
+ ValueFactory<String> strings = indexes.stringFactory();
+ Location location = node.getLocation();
+ UUID uuid = location.getUuid();
+ if (uuid == null) uuid = UUID.randomUUID();
+ Path path = location.getPath();
+ String pathStr = path.isRoot() ? "/" : strings.create(location.getPath()) + "/";
+ String uuidStr = uuid.toString();
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("indexing {0}", pathStr);
+ }
+
+ // Create a separate document for the path, which makes it easier to handle moves since the path can
+ // be changed without changing any other content fields ...
+ Document doc = new Document();
+ doc.add(new Field(PathIndex.PATH, pathStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field(PathIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ indexes.getPathsWriter().addDocument(doc);
+
+ // Create the document for the content (properties) ...
+ doc = new Document();
+ doc.add(new Field(ContentIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ String stringValue = null;
+ StringBuilder fullTextSearchValue = null;
+ for (Property property : node.getProperties()) {
+ Name name = property.getName();
+ Rule rule = rules.getRule(name);
+ if (rule.isSkipped()) continue;
+ String nameString = strings.create(name);
+ if (rule.isDate()) {
+ DateTimeFactory dateFactory = indexes.dateFactory();
+ for (Object value : property) {
+ if (value == null) continue;
+ DateTime dateValue = dateFactory.create(value);
+ stringValue = dateFormatter().format(dateValue.toDate());
+ // Add a separate field for each property value ...
+ doc.add(new Field(nameString, stringValue, rule.getStoreOption(), rule.getIndexOption()));
+ // Dates are not added to the full-text search field (since this wouldn't make sense)
+ }
+ continue;
+ }
+ for (Object value : property) {
+ if (value == null) continue;
+ if (value instanceof Binary) {
+ // don't include binary values as individual fields but do include them in the full-text search ...
+ // TODO : add to full-text search ...
+ continue;
+ }
+ stringValue = strings.create(value);
+ // Add a separate field for each property value ...
+ doc.add(new Field(nameString, stringValue, rule.getStoreOption(), rule.getIndexOption()));
+ // And add to the full-text field ...
+ if (rule.isFullText()) {
+ if (fullTextSearchValue == null) {
+ fullTextSearchValue = new StringBuilder();
+ } else {
+ fullTextSearchValue.append(' ');
+ }
+ fullTextSearchValue.append(stringValue);
+ }
+ }
+ }
+ // Add the full-text-search field ...
+ if (fullTextSearchValue != null) {
+ doc.add(new Field(ContentIndex.FULL_TEXT, fullTextSearchValue.toString(), Field.Store.NO, Field.Index.ANALYZED));
+ }
+ indexes.getContentWriter().addDocument(doc);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexStrategy#search(String, int, int, IndexContext, List)
+ */
+ public void search( String fullTextString,
+ int maxResults,
+ int offset,
+ IndexContext indexes,
+ List<Location> results ) throws IOException, ParseException {
+ assert fullTextString != null;
+ assert fullTextString.length() > 0;
+ assert offset >= 0;
+ assert maxResults > 0;
+ assert indexes != null;
+ assert results != null;
+
+ // Parse the full-text search and search against the 'fts' field ...
+ QueryParser parser = new QueryParser(ContentIndex.FULL_TEXT, createAnalyzer());
+ Query query = parser.parse(fullTextString);
+ TopDocs docs = indexes.getContentSearcher().search(query, maxResults + offset);
+
+ // Collect the results ...
+ IndexReader contentReader = indexes.getContentReader();
+ IndexReader pathReader = indexes.getPathsReader();
+ IndexSearcher pathSearcher = indexes.getPathsSearcher();
+ ScoreDoc[] scoreDocs = docs.scoreDocs;
+ int numberOfResults = scoreDocs.length;
+ if (numberOfResults > offset) {
+ // There are enough results to satisfy the offset ...
+ for (int i = offset, num = scoreDocs.length; i != num; ++i) {
+ ScoreDoc result = scoreDocs[i];
+ int docId = result.doc;
+ // Find the UUID of the node (this UUID might be artificial, so we have to find the path) ...
+ Document doc = contentReader.document(docId, UUID_FIELD_SELECTOR);
+ String uuid = doc.get(ContentIndex.UUID);
+ // Find the path for this node (is there a better way to do this than one search per UUID?) ...
+ TopDocs pathDocs = pathSearcher.search(new TermQuery(new Term(PathIndex.UUID, uuid)), 1);
+ if (pathDocs.scoreDocs.length < 1) {
+ // No path record found ...
+ continue;
+ }
+ Document pathDoc = pathReader.document(pathDocs.scoreDocs[0].doc);
+ Path path = indexes.pathFactory().create(pathDoc.get(PathIndex.PATH));
+ // Now add the location ...
+ results.add(Location.create(path, UUID.fromString(uuid)));
+ }
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexStrategy#query(org.jboss.dna.search.SearchContext,
+ * org.jboss.dna.graph.query.model.QueryCommand)
+ */
+ public QueryResults query( SearchContext context,
+ QueryCommand query ) {
+ return this.queryEngine.execute(context, query);
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexStrategy.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -31,16 +31,17 @@
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
+import org.jboss.dna.common.util.CheckArg;
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.property.DateTimeFactory;
import org.jboss.dna.graph.property.PathFactory;
import org.jboss.dna.graph.property.ValueFactory;
/**
- * A set of index readers and writers.
+ * A context for working with the index readers and writers.
*/
@NotThreadSafe
-public final class IndexContext {
+final class IndexContext {
private final ExecutionContext context;
private final Directory pathsIndexDirectory;
@@ -278,4 +279,16 @@
if (runtimeError != null) throw runtimeError;
}
+ /**
+ * Create a copy of this index context, except that it uses the supplied execution context.
+ *
+ * @param context the new execution context that should be used in the copy
+ * @return the new context; never null
+ * @throws IllegalArgumentException if the context is null
+ */
+ public IndexContext with( ExecutionContext context ) {
+ CheckArg.isNotNull(context, "context");
+ return new IndexContext(context, pathsIndexDirectory, contentIndexDirectory, analyzer, overwrite, readOnly);
+ }
+
}
Copied: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java (from rev 1318, trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingRules.java)
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -0,0 +1,628 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import net.jcip.annotations.Immutable;
+import net.jcip.annotations.NotThreadSafe;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.graph.property.Name;
+
+/**
+ * The set of rules that dictate how properties should be indexed.
+ */
+@Immutable
+class IndexRules {
+
+ public static final int INDEX = 2 << 0;
+ public static final int ANALYZE = 2 << 1;
+ public static final int STORE = 2 << 2;
+ public static final int STORE_COMPRESSED = 2 << 3;
+ public static final int ANALYZED_WITHOUT_NORMS = 2 << 4;
+ public static final int FULL_TEXT = 2 << 5;
+ public static final int TREAT_AS_DATE = 2 << 6;
+
+ /**
+ * A single rule that dictates how a single property should be indexed.
+ *
+ * @see IndexRules#getRule(Name)
+ */
+ @Immutable
+ public static interface Rule {
+ boolean isIncluded();
+
+ boolean isSkipped();
+
+ boolean isAnalyzed();
+
+ boolean isAnalyzedWithoutNorms();
+
+ boolean isStored();
+
+ boolean isStoredCompressed();
+
+ boolean isFullText();
+
+ boolean isDate();
+
+ int getMask();
+
+ Field.Store getStoreOption();
+
+ Field.Index getIndexOption();
+ }
+
+ public static final Rule SKIP = new SkipRule();
+
+ @Immutable
+ protected static class SkipRule implements Rule {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#getMask()
+ */
+ public int getMask() {
+ return 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isAnalyzed()
+ */
+ public boolean isAnalyzed() {
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isAnalyzedWithoutNorms()
+ */
+ public boolean isAnalyzedWithoutNorms() {
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isFullText()
+ */
+ public boolean isFullText() {
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isIncluded()
+ */
+ public boolean isIncluded() {
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isSkipped()
+ */
+ public boolean isSkipped() {
+ return true;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isStored()
+ */
+ public boolean isStored() {
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isStoredCompressed()
+ */
+ public boolean isStoredCompressed() {
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isDate()
+ */
+ public boolean isDate() {
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#getIndexOption()
+ */
+ public Index getIndexOption() {
+ return Field.Index.NO;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#getStoreOption()
+ */
+ public Store getStoreOption() {
+ return Field.Store.NO;
+ }
+ }
+
+ @Immutable
+ public static final class GeneralRule implements Rule {
+ private final int value;
+ private final Field.Store store;
+ private final Field.Index index;
+
+ protected GeneralRule( int value ) {
+ this.value = value;
+ this.index = isAnalyzed() ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED;
+ this.store = isStored() ? Field.Store.YES : Field.Store.NO;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#getMask()
+ */
+ public int getMask() {
+ return value;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isAnalyzed()
+ */
+ public boolean isAnalyzed() {
+ return (value & ANALYZE) == ANALYZE;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isAnalyzedWithoutNorms()
+ */
+ public boolean isAnalyzedWithoutNorms() {
+ return (value & ANALYZED_WITHOUT_NORMS) == ANALYZED_WITHOUT_NORMS;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isFullText()
+ */
+ public boolean isFullText() {
+ return (value & FULL_TEXT) == FULL_TEXT;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isIncluded()
+ */
+ public boolean isIncluded() {
+ return true;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isSkipped()
+ */
+ public boolean isSkipped() {
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isStored()
+ */
+ public boolean isStored() {
+ return (value & STORE) == STORE;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isStoredCompressed()
+ */
+ public boolean isStoredCompressed() {
+ return (value & STORE_COMPRESSED) == STORE_COMPRESSED;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#isDate()
+ */
+ public boolean isDate() {
+ return (value & TREAT_AS_DATE) == TREAT_AS_DATE;
+ }
+
+ protected Rule with( int options ) {
+ return createRule(value | options);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#getIndexOption()
+ */
+ public Index getIndexOption() {
+ return index;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexRules.Rule#getStoreOption()
+ */
+ public Store getStoreOption() {
+ return store;
+ }
+ }
+
+ private static final ConcurrentHashMap<Integer, Rule> CACHE = new ConcurrentHashMap<Integer, Rule>();
+
+ protected static Rule createRule( int value ) {
+ if (value <= 0) {
+ return SKIP;
+ }
+ Integer key = new Integer(value);
+ Rule rule = CACHE.get(key);
+ if (rule == null) {
+ Rule newRule = new GeneralRule(value);
+ rule = CACHE.putIfAbsent(value, newRule);
+ if (rule == null) rule = newRule;
+ }
+ return rule;
+ }
+
+ private final Map<Name, Rule> rulesByName;
+ private final Rule defaultRule;
+
+ protected IndexRules( Map<Name, Rule> rulesByName,
+ Rule defaultRule ) {
+ this.rulesByName = rulesByName;
+ this.defaultRule = defaultRule != null ? defaultRule : SKIP;
+ assert this.defaultRule != null;
+ }
+
+ /**
+ * Get the rule associated with the given property name.
+ *
+ * @param name the property name, or null if the default rule is to be returned
+ * @return the rule; never null
+ */
+ public Rule getRule( Name name ) {
+ Rule result = rulesByName.get(name);
+ return result != null ? result : this.defaultRule;
+ }
+
+ /**
+ * Return a new builder that can be used to create {@link IndexRules} objects.
+ *
+ * @return a builder; never null
+ */
+ public static Builder createBuilder() {
+ return new Builder(new HashMap<Name, Rule>());
+ }
+
+ /**
+ * Return a new builder that can be used to create {@link IndexRules} objects.
+ *
+ * @param initialRules the rules that the builder should start with
+ * @return a builder; never null
+ * @throws IllegalArgumentException if the initial rules reference is null
+ */
+ public static Builder createBuilder( IndexRules initialRules ) {
+ CheckArg.isNotNull(initialRules, "initialRules");
+ return new Builder(initialRules.rulesByName).defaultTo(initialRules.defaultRule);
+ }
+
+ /**
+ * A builder of immutable {@link IndexRules} objects.
+ */
+ @NotThreadSafe
+ public static class Builder {
+ private final Map<Name, Rule> rulesByName;
+ private Rule defaultRule;
+
+ Builder( Map<Name, Rule> rulesByName ) {
+ assert rulesByName != null;
+ this.rulesByName = rulesByName;
+ }
+
+ /**
+ * Set the default rules.
+ *
+ * @param rule the default rule to use
+ * @return this builder for convenience and method chaining; never null
+ * @throws IllegalArgumentException if the rule mask is negative
+ */
+ public Builder defaultTo( Rule rule ) {
+ CheckArg.isNotNull(rule, "rule");
+ defaultRule = rule;
+ return this;
+ }
+
+ /**
+ * Set the default rules.
+ *
+ * @param ruleMask the bitmask of rule to use
+ * @return this builder for convenience and method chaining; never null
+ * @throws IllegalArgumentException if the rule mask is negative
+ */
+ public Builder defaultTo( int ruleMask ) {
+ CheckArg.isNonNegative(ruleMask, "options");
+ if (ruleMask == 0) {
+ defaultRule = SKIP;
+ } else {
+ // Make sure the index flag is set ...
+ ruleMask |= INDEX;
+ defaultRule = createRule(ruleMask);
+ }
+ return this;
+ }
+
+ /**
+ * Mark the properties with the supplied names to be skipped from indexing.
+ *
+ * @param namesToIndex the names of the properties that are to be skipped
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder skip( Name... namesToIndex ) {
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ rulesByName.put(name, SKIP);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Set the properties with the supplied names to use the supplied rules.
+ *
+ * @param ruleMask the bitmask of rules to use
+ * @param namesToIndex the names of the properties that are to be skipped
+ * @return this builder for convenience and method chaining; never null
+ * @throws IllegalArgumentException if the rule mask is negative
+ */
+ public Builder set( int ruleMask,
+ Name... namesToIndex ) {
+ CheckArg.isNonNegative(ruleMask, "options");
+ if (namesToIndex != null) {
+ if (ruleMask > 0) {
+ skip(namesToIndex);
+ } else {
+ // Make sure the index flag is set ...
+ ruleMask |= INDEX;
+ Rule rule = createRule(ruleMask);
+ for (Name name : namesToIndex) {
+ rulesByName.put(name, rule);
+ }
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Mark the properties with the supplied names to use the supplied rule mask. This does not remove any other rules for
+ * these properties.
+ *
+ * @param ruleMask the bitmask of rules to add
+ * @param namesToIndex the names of the properties that are to be skipped
+ * @return this builder for convenience and method chaining; never null
+ * @throws IllegalArgumentException if the rule mask is negative
+ */
+ public Builder add( int ruleMask,
+ Name... namesToIndex ) {
+ CheckArg.isNonNegative(ruleMask, "options");
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ add(name, ruleMask);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Mark the properties with the supplied names to be indexed. This does not remove any other rules for these properties.
+ *
+ * @param namesToIndex the names of the properties that are to be indexed
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder index( Name... namesToIndex ) {
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ add(name, INDEX);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Mark the properties with the supplied names to be analyzed (and obviously indexed). This does not remove any other
+ * rules for these properties.
+ *
+ * @param namesToIndex the names of the properties that are to be analyzed
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder analyze( Name... namesToIndex ) {
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ add(name, ANALYZE | INDEX);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Mark the properties with the supplied names to be stored (and obviously indexed). This does not remove any other rules
+ * for these properties.
+ *
+ * @param namesToIndex the names of the properties that are to be stored
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder store( Name... namesToIndex ) {
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ add(name, STORE | INDEX);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Mark the properties with the supplied names to be included in full-text searches (and obviously indexed). This does not
+ * remove any other rules for these properties.
+ *
+ * @param namesToIndex the names of the properties that are to be included in full-text searches
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder fullText( Name... namesToIndex ) {
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ add(name, FULL_TEXT | INDEX);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Mark the properties with the supplied names to be treated as dates (and obviously indexed). This does not remove any
+ * other rules for these properties.
+ *
+ * @param namesToIndex the names of the properties that are to be included in full-text searches
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder treatAsDates( Name... namesToIndex ) {
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ add(name, TREAT_AS_DATE | INDEX);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Mark the properties with the supplied names to be indexed, analyzed and stored. This does not remove any other rules
+ * for these properties.
+ *
+ * @param namesToIndex the names of the properties that are to be indexed, analyzed and stored
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder analyzeAndStore( Name... namesToIndex ) {
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ add(name, INDEX | ANALYZE | STORE);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Mark the properties with the supplied names to be indexed, analyzed, stored and included in full-text searches. This
+ * does not remove any other rules for these properties.
+ *
+ * @param namesToIndex the names of the properties that are to be indexed, analyzed, stored and included in full-text
+ * searches
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder analyzeAndStoreAndFullText( Name... namesToIndex ) {
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ add(name, INDEX | ANALYZE | STORE | FULL_TEXT);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Mark the properties with the supplied names to be indexed, analyzed and included in full-text searches. This does not
+ * remove any other rules for these properties.
+ *
+ * @param namesToIndex the names of the properties that are to be indexed, analyzed and included in full-text searches
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder analyzeAndFullText( Name... namesToIndex ) {
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ add(name, INDEX | ANALYZE | FULL_TEXT);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Mark the properties with the supplied names to be indexed, stored and included in full-text searches. This does not
+ * remove any other rules for these properties.
+ *
+ * @param namesToIndex the names of the properties that are to be indexed, stored and included in full-text searches
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder storeAndFullText( Name... namesToIndex ) {
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ add(name, INDEX | STORE | FULL_TEXT);
+ }
+ }
+ return this;
+ }
+
+ protected void add( Name name,
+ int option ) {
+ Rule rule = rulesByName.get(name);
+ if (rule != null) {
+ option |= rule.getMask();
+ }
+ rulesByName.put(name, createRule(option));
+ }
+
+ /**
+ * Build the indexing rules.
+ *
+ * @return the immutable indexing rules.
+ */
+ public IndexRules build() {
+ return new IndexRules(Collections.unmodifiableMap(new HashMap<Name, Rule>(rulesByName)), defaultRule);
+ }
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexStrategy.java (from rev 1318, trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingStrategy.java)
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexStrategy.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexStrategy.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -0,0 +1,133 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.queryParser.ParseException;
+import org.jboss.dna.common.text.TextEncoder;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.Node;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.query.QueryResults;
+import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.validate.Schemata;
+import org.jboss.dna.graph.request.ChangeRequest;
+
+/**
+ * Interface defining the behaviors associated with indexing graph content.
+ */
+interface IndexStrategy {
+
+ /**
+ * Get the number of changes that are allowed before optimization is automatically run.
+ *
+ * @return a positive number denoting the minimum number of changes between automatic optimization operations, or a
+ * non-positive number if automatic optimization should never be run
+ */
+ int getChangeCountForAutomaticOptimization();
+
+ /**
+ * Get the {@link TextEncoder} that should be used to encode the namespace URIs.
+ *
+ * @return the encoder; may not be null
+ */
+ TextEncoder getNamespaceEncoder();
+
+ /**
+ * Index the node given the index writers. Note that implementors should simply just use the writers to add documents to the
+ * index(es), and should never call any of the writer lifecycle methods (e.g., {@link IndexWriter#commit()},
+ * {@link IndexWriter#rollback()}, etc.).
+ *
+ * @param node the node to be indexed; never null
+ * @param indexes the set of index readers and writers; never null
+ * @throws IOException if there is a problem indexing or using the writers
+ */
+ void index( Node node,
+ IndexContext indexes ) throws IOException;
+
+ /**
+ * Update the indexes to reflect the supplied changes to the graph content. Note that implementors should simply just use the
+ * writers to add documents to the index(es), and should never call any of the writer lifecycle methods (e.g.,
+ * {@link IndexWriter#commit()}, {@link IndexWriter#rollback()}, etc.).
+ *
+ * @param changes the set of changes to the content
+ * @param indexes the set of index readers and writers; never null
+ * @return the (approximate) number of nodes that were affected by the changes
+ * @throws IOException if there is a problem indexing or using the writers
+ */
+ int apply( Iterable<ChangeRequest> changes,
+ IndexContext indexes ) throws IOException;
+
+ /**
+ * Remove from the index(es) all of the information pertaining to the nodes at or below the supplied path. Note that
+ * implementors should simply just use the writers to add documents to the index(es), and should never call any of the writer
+ * lifecycle methods (e.g., {@link IndexWriter#commit()}, {@link IndexWriter#rollback()}, etc.).
+ *
+ * @param path the path identifying the graph content that is to be removed; never null
+ * @param indexes the set of index readers and writers; never null
+ * @return the (approximate) number of nodes that were affected by the changes
+ * @throws IOException if there is a problem indexing or using the writers
+ */
+ int deleteBelow( Path path,
+ IndexContext indexes ) throws IOException;
+
+ /**
+ * Create the analyzer that is used for reading and updating the indexes.
+ *
+ * @return the analyzer; may not be null
+ */
+ Analyzer createAnalyzer();
+
+ /**
+ * Perform a full-text search given the supplied query.
+ *
+ * @param fullTextString the full-text query; never null or blank
+ * @param maxResults the maximum number of results that are to be returned; always positive
+ * @param offset the number of initial results to skip, or 0 if the first results are to be returned
+ * @param indexes the set of index readers and writers; never null
+ * @param results the list where the results should be accumulated; never null
+ * @throws IOException if there is a problem indexing or using the writers
+ * @throws ParseException if there is a problem parsing the query
+ */
+ void search( String fullTextString,
+ int maxResults,
+ int offset,
+ IndexContext indexes,
+ List<Location> results ) throws IOException, ParseException;
+
+ /**
+ * Perform a query of the content. The {@link QueryCommand query} is supplied in the form of the Abstract Query Model, with
+ * the {@link Schemata} that defines the tables and views that are available to the query, and the set of index readers (and
+ * writers) that should be used.
+ *
+ * @param context the context in which the query should be executed; never null
+ * @param query the query; never null
+ * @return the results of the query
+ */
+ QueryResults query( SearchContext context,
+ QueryCommand query );
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexStrategy.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingRules.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingRules.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingRules.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -1,628 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import net.jcip.annotations.Immutable;
-import net.jcip.annotations.NotThreadSafe;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.jboss.dna.common.util.CheckArg;
-import org.jboss.dna.graph.property.Name;
-
-/**
- * The set of rules that dictate how properties should be indexed.
- */
-@Immutable
-public class IndexingRules {
-
- public static final int INDEX = 2 << 0;
- public static final int ANALYZE = 2 << 1;
- public static final int STORE = 2 << 2;
- public static final int STORE_COMPRESSED = 2 << 3;
- public static final int ANALYZED_WITHOUT_NORMS = 2 << 4;
- public static final int FULL_TEXT = 2 << 5;
- public static final int TREAT_AS_DATE = 2 << 6;
-
- /**
- * A single rule that dictates how a single property should be indexed.
- *
- * @see IndexingRules#getRule(Name)
- */
- @Immutable
- public static interface Rule {
- boolean isIncluded();
-
- boolean isSkipped();
-
- boolean isAnalyzed();
-
- boolean isAnalyzedWithoutNorms();
-
- boolean isStored();
-
- boolean isStoredCompressed();
-
- boolean isFullText();
-
- boolean isDate();
-
- int getMask();
-
- Field.Store getStoreOption();
-
- Field.Index getIndexOption();
- }
-
- public static final Rule SKIP = new SkipRule();
-
- @Immutable
- protected static class SkipRule implements Rule {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#getMask()
- */
- public int getMask() {
- return 0;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isAnalyzed()
- */
- public boolean isAnalyzed() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isAnalyzedWithoutNorms()
- */
- public boolean isAnalyzedWithoutNorms() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isFullText()
- */
- public boolean isFullText() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isIncluded()
- */
- public boolean isIncluded() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isSkipped()
- */
- public boolean isSkipped() {
- return true;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isStored()
- */
- public boolean isStored() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isStoredCompressed()
- */
- public boolean isStoredCompressed() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isDate()
- */
- public boolean isDate() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#getIndexOption()
- */
- public Index getIndexOption() {
- return Field.Index.NO;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#getStoreOption()
- */
- public Store getStoreOption() {
- return Field.Store.NO;
- }
- }
-
- @Immutable
- public static final class GeneralRule implements Rule {
- private final int value;
- private final Field.Store store;
- private final Field.Index index;
-
- protected GeneralRule( int value ) {
- this.value = value;
- this.index = isAnalyzed() ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED;
- this.store = isStored() ? Field.Store.YES : Field.Store.NO;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#getMask()
- */
- public int getMask() {
- return value;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isAnalyzed()
- */
- public boolean isAnalyzed() {
- return (value & ANALYZE) == ANALYZE;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isAnalyzedWithoutNorms()
- */
- public boolean isAnalyzedWithoutNorms() {
- return (value & ANALYZED_WITHOUT_NORMS) == ANALYZED_WITHOUT_NORMS;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isFullText()
- */
- public boolean isFullText() {
- return (value & FULL_TEXT) == FULL_TEXT;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isIncluded()
- */
- public boolean isIncluded() {
- return true;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isSkipped()
- */
- public boolean isSkipped() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isStored()
- */
- public boolean isStored() {
- return (value & STORE) == STORE;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isStoredCompressed()
- */
- public boolean isStoredCompressed() {
- return (value & STORE_COMPRESSED) == STORE_COMPRESSED;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#isDate()
- */
- public boolean isDate() {
- return (value & TREAT_AS_DATE) == TREAT_AS_DATE;
- }
-
- protected Rule with( int options ) {
- return createRule(value | options);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#getIndexOption()
- */
- public Index getIndexOption() {
- return index;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingRules.Rule#getStoreOption()
- */
- public Store getStoreOption() {
- return store;
- }
- }
-
- private static final ConcurrentHashMap<Integer, Rule> CACHE = new ConcurrentHashMap<Integer, Rule>();
-
- protected static Rule createRule( int value ) {
- if (value <= 0) {
- return SKIP;
- }
- Integer key = new Integer(value);
- Rule rule = CACHE.get(key);
- if (rule == null) {
- Rule newRule = new GeneralRule(value);
- rule = CACHE.putIfAbsent(value, newRule);
- if (rule == null) rule = newRule;
- }
- return rule;
- }
-
- private final Map<Name, Rule> rulesByName;
- private final Rule defaultRule;
-
- protected IndexingRules( Map<Name, Rule> rulesByName,
- Rule defaultRule ) {
- this.rulesByName = rulesByName;
- this.defaultRule = defaultRule != null ? defaultRule : SKIP;
- assert this.defaultRule != null;
- }
-
- /**
- * Get the rule associated with the given property name.
- *
- * @param name the property name, or null if the default rule is to be returned
- * @return the rule; never null
- */
- public Rule getRule( Name name ) {
- Rule result = rulesByName.get(name);
- return result != null ? result : this.defaultRule;
- }
-
- /**
- * Return a new builder that can be used to create {@link IndexingRules} objects.
- *
- * @return a builder; never null
- */
- public static Builder createBuilder() {
- return new Builder(new HashMap<Name, Rule>());
- }
-
- /**
- * Return a new builder that can be used to create {@link IndexingRules} objects.
- *
- * @param initialRules the rules that the builder should start with
- * @return a builder; never null
- * @throws IllegalArgumentException if the initial rules reference is null
- */
- public static Builder createBuilder( IndexingRules initialRules ) {
- CheckArg.isNotNull(initialRules, "initialRules");
- return new Builder(initialRules.rulesByName).defaultTo(initialRules.defaultRule);
- }
-
- /**
- * A builder of immutable {@link IndexingRules} objects.
- */
- @NotThreadSafe
- public static class Builder {
- private final Map<Name, Rule> rulesByName;
- private Rule defaultRule;
-
- Builder( Map<Name, Rule> rulesByName ) {
- assert rulesByName != null;
- this.rulesByName = rulesByName;
- }
-
- /**
- * Set the default rules.
- *
- * @param rule the default rule to use
- * @return this builder for convenience and method chaining; never null
- * @throws IllegalArgumentException if the rule mask is negative
- */
- public Builder defaultTo( Rule rule ) {
- CheckArg.isNotNull(rule, "rule");
- defaultRule = rule;
- return this;
- }
-
- /**
- * Set the default rules.
- *
- * @param ruleMask the bitmask of rule to use
- * @return this builder for convenience and method chaining; never null
- * @throws IllegalArgumentException if the rule mask is negative
- */
- public Builder defaultTo( int ruleMask ) {
- CheckArg.isNonNegative(ruleMask, "options");
- if (ruleMask == 0) {
- defaultRule = SKIP;
- } else {
- // Make sure the index flag is set ...
- ruleMask |= INDEX;
- defaultRule = createRule(ruleMask);
- }
- return this;
- }
-
- /**
- * Mark the properties with the supplied names to be skipped from indexing.
- *
- * @param namesToIndex the names of the properties that are to be skipped
- * @return this builder for convenience and method chaining; never null
- */
- public Builder skip( Name... namesToIndex ) {
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- rulesByName.put(name, SKIP);
- }
- }
- return this;
- }
-
- /**
- * Set the properties with the supplied names to use the supplied rules.
- *
- * @param ruleMask the bitmask of rules to use
- * @param namesToIndex the names of the properties that are to be skipped
- * @return this builder for convenience and method chaining; never null
- * @throws IllegalArgumentException if the rule mask is negative
- */
- public Builder set( int ruleMask,
- Name... namesToIndex ) {
- CheckArg.isNonNegative(ruleMask, "options");
- if (namesToIndex != null) {
- if (ruleMask > 0) {
- skip(namesToIndex);
- } else {
- // Make sure the index flag is set ...
- ruleMask |= INDEX;
- Rule rule = createRule(ruleMask);
- for (Name name : namesToIndex) {
- rulesByName.put(name, rule);
- }
- }
- }
- return this;
- }
-
- /**
- * Mark the properties with the supplied names to use the supplied rule mask. This does not remove any other rules for
- * these properties.
- *
- * @param ruleMask the bitmask of rules to add
- * @param namesToIndex the names of the properties that are to be skipped
- * @return this builder for convenience and method chaining; never null
- * @throws IllegalArgumentException if the rule mask is negative
- */
- public Builder add( int ruleMask,
- Name... namesToIndex ) {
- CheckArg.isNonNegative(ruleMask, "options");
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- add(name, ruleMask);
- }
- }
- return this;
- }
-
- /**
- * Mark the properties with the supplied names to be indexed. This does not remove any other rules for these properties.
- *
- * @param namesToIndex the names of the properties that are to be indexed
- * @return this builder for convenience and method chaining; never null
- */
- public Builder index( Name... namesToIndex ) {
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- add(name, INDEX);
- }
- }
- return this;
- }
-
- /**
- * Mark the properties with the supplied names to be analyzed (and obviously indexed). This does not remove any other
- * rules for these properties.
- *
- * @param namesToIndex the names of the properties that are to be analyzed
- * @return this builder for convenience and method chaining; never null
- */
- public Builder analyze( Name... namesToIndex ) {
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- add(name, ANALYZE | INDEX);
- }
- }
- return this;
- }
-
- /**
- * Mark the properties with the supplied names to be stored (and obviously indexed). This does not remove any other rules
- * for these properties.
- *
- * @param namesToIndex the names of the properties that are to be stored
- * @return this builder for convenience and method chaining; never null
- */
- public Builder store( Name... namesToIndex ) {
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- add(name, STORE | INDEX);
- }
- }
- return this;
- }
-
- /**
- * Mark the properties with the supplied names to be included in full-text searches (and obviously indexed). This does not
- * remove any other rules for these properties.
- *
- * @param namesToIndex the names of the properties that are to be included in full-text searches
- * @return this builder for convenience and method chaining; never null
- */
- public Builder fullText( Name... namesToIndex ) {
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- add(name, FULL_TEXT | INDEX);
- }
- }
- return this;
- }
-
- /**
- * Mark the properties with the supplied names to be treated as dates (and obviously indexed). This does not remove any
- * other rules for these properties.
- *
- * @param namesToIndex the names of the properties that are to be included in full-text searches
- * @return this builder for convenience and method chaining; never null
- */
- public Builder treatAsDates( Name... namesToIndex ) {
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- add(name, TREAT_AS_DATE | INDEX);
- }
- }
- return this;
- }
-
- /**
- * Mark the properties with the supplied names to be indexed, analyzed and stored. This does not remove any other rules
- * for these properties.
- *
- * @param namesToIndex the names of the properties that are to be indexed, analyzed and stored
- * @return this builder for convenience and method chaining; never null
- */
- public Builder analyzeAndStore( Name... namesToIndex ) {
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- add(name, INDEX | ANALYZE | STORE);
- }
- }
- return this;
- }
-
- /**
- * Mark the properties with the supplied names to be indexed, analyzed, stored and included in full-text searches. This
- * does not remove any other rules for these properties.
- *
- * @param namesToIndex the names of the properties that are to be indexed, analyzed, stored and included in full-text
- * searches
- * @return this builder for convenience and method chaining; never null
- */
- public Builder analyzeAndStoreAndFullText( Name... namesToIndex ) {
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- add(name, INDEX | ANALYZE | STORE | FULL_TEXT);
- }
- }
- return this;
- }
-
- /**
- * Mark the properties with the supplied names to be indexed, analyzed and included in full-text searches. This does not
- * remove any other rules for these properties.
- *
- * @param namesToIndex the names of the properties that are to be indexed, analyzed and included in full-text searches
- * @return this builder for convenience and method chaining; never null
- */
- public Builder analyzeAndFullText( Name... namesToIndex ) {
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- add(name, INDEX | ANALYZE | FULL_TEXT);
- }
- }
- return this;
- }
-
- /**
- * Mark the properties with the supplied names to be indexed, stored and included in full-text searches. This does not
- * remove any other rules for these properties.
- *
- * @param namesToIndex the names of the properties that are to be indexed, stored and included in full-text searches
- * @return this builder for convenience and method chaining; never null
- */
- public Builder storeAndFullText( Name... namesToIndex ) {
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- add(name, INDEX | STORE | FULL_TEXT);
- }
- }
- return this;
- }
-
- protected void add( Name name,
- int option ) {
- Rule rule = rulesByName.get(name);
- if (rule != null) {
- option |= rule.getMask();
- }
- rulesByName.put(name, createRule(option));
- }
-
- /**
- * Build the indexing rules.
- *
- * @return the immutable indexing rules.
- */
- public IndexingRules build() {
- return new IndexingRules(Collections.unmodifiableMap(new HashMap<Name, Rule>(rulesByName)), defaultRule);
- }
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingStrategy.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingStrategy.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingStrategy.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -1,126 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.IOException;
-import java.util.List;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.queryParser.ParseException;
-import org.jboss.dna.common.text.TextEncoder;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Node;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.validate.Schemata;
-import org.jboss.dna.graph.request.ChangeRequest;
-
-/**
- * Interface defining the behaviors associated with indexing graph content.
- */
-public interface IndexingStrategy {
-
- int getChangeCountForAutomaticOptimization();
-
- TextEncoder getNamespaceEncoder();
-
- /**
- * Index the node given the index writers. Note that implementors should simply just use the writers to add documents to the
- * index(es), and should never call any of the writer lifecycle methods (e.g., {@link IndexWriter#commit()},
- * {@link IndexWriter#rollback()}, etc.).
- *
- * @param node the node to be indexed; never null
- * @param indexes the set of index readers and writers; never null
- * @throws IOException if there is a problem indexing or using the writers
- */
- void index( Node node,
- IndexContext indexes ) throws IOException;
-
- /**
- * Update the indexes to reflect the supplied changes to the graph content. Note that implementors should simply just use the
- * writers to add documents to the index(es), and should never call any of the writer lifecycle methods (e.g.,
- * {@link IndexWriter#commit()}, {@link IndexWriter#rollback()}, etc.).
- *
- * @param changes the set of changes to the content
- * @param indexes the set of index readers and writers; never null
- * @return the (approximate) number of nodes that were affected by the changes
- * @throws IOException if there is a problem indexing or using the writers
- */
- int apply( Iterable<ChangeRequest> changes,
- IndexContext indexes ) throws IOException;
-
- /**
- * Remove from the index(es) all of the information pertaining to the nodes at or below the supplied path. Note that
- * implementors should simply just use the writers to add documents to the index(es), and should never call any of the writer
- * lifecycle methods (e.g., {@link IndexWriter#commit()}, {@link IndexWriter#rollback()}, etc.).
- *
- * @param path the path identifying the graph content that is to be removed; never null
- * @param indexes the set of index readers and writers; never null
- * @return the (approximate) number of nodes that were affected by the changes
- * @throws IOException if there is a problem indexing or using the writers
- */
- int deleteBelow( Path path,
- IndexContext indexes ) throws IOException;
-
- /**
- * Create the analyzer that is used for reading and updating the indexes.
- *
- * @return the analyzer; may not be null
- */
- Analyzer createAnalyzer();
-
- /**
- * Perform a full-text search given the supplied query.
- *
- * @param fullTextString the full-text query; never null or blank
- * @param maxResults the maximum number of results that are to be returned; always positive
- * @param offset the number of initial results to skip, or 0 if the first results are to be returned
- * @param indexes the set of index readers and writers; never null
- * @param results the list where the results should be accumulated; never null
- * @throws IOException if there is a problem indexing or using the writers
- * @throws ParseException if there is a problem parsing the query
- */
- void performQuery( String fullTextString,
- int maxResults,
- int offset,
- IndexContext indexes,
- List<Location> results ) throws IOException, ParseException;
-
- /**
- * Perform a query of the content. The {@link QueryCommand query} is supplied in the form of the Abstract Query Model, with
- * the {@link Schemata} that defines the tables and views that are available to the query, and the set of index readers (and
- * writers) that should be used.
- *
- * @param query the query; never null
- * @param schemata the definition of the tables used in the query; never null
- * @param indexes the set of index readers and writers; never null
- * @return the results of the query
- * @throws IOException if there is a problem indexing or using the writers
- * @throws ParseException if there is a problem parsing the query
- */
- QueryResults performQuery( QueryCommand query,
- Schemata schemata,
- IndexContext indexes ) throws IOException, ParseException;
-}
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexStrategy.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexStrategy.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexStrategy.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -0,0 +1,115 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.util.List;
+import net.jcip.annotations.ThreadSafe;
+import org.jboss.dna.graph.DnaLexicon;
+import org.jboss.dna.graph.JcrLexicon;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.plan.PlanNode;
+import org.jboss.dna.graph.query.process.AbstractAccessComponent;
+import org.jboss.dna.graph.query.process.ProcessingComponent;
+import org.jboss.dna.graph.query.process.SelectComponent.Analyzer;
+import org.jboss.dna.graph.request.ChangeRequest;
+
+/**
+ * An {@link IndexStrategy} implementation that stores all content within a set of two indexes: one for the node content and a
+ * second one for paths and UUIDs.
+ */
+@ThreadSafe
+class KitchenSinkIndexStrategy extends DualIndexStrategy {
+
+ /**
+ * The default set of {@link IndexRules} used by {@link KitchenSinkIndexStrategy} instances when no rules are provided.
+ */
+ public static final IndexRules DEFAULT_RULES;
+
+ static {
+ IndexRules.Builder builder = IndexRules.createBuilder();
+ // Configure the default behavior ...
+ builder.defaultTo(IndexRules.INDEX | IndexRules.ANALYZE);
+ // Configure the UUID properties to be just indexed (not stored, not analyzed, not included in full-text) ...
+ builder.index(JcrLexicon.UUID, DnaLexicon.UUID);
+ // Configure the properties that we'll treat as dates ...
+ builder.treatAsDates(JcrLexicon.CREATED, JcrLexicon.LAST_MODIFIED);
+ DEFAULT_RULES = builder.build();
+ }
+
+ /**
+ * Create a new indexing strategy instance uses the {@link #DEFAULT_RULES default indexing rules}.
+ */
+ public KitchenSinkIndexStrategy() {
+ this(null);
+ }
+
+ /**
+ * Create a new indexing strategy instance.
+ *
+ * @param rules the indexing rules that govern how properties are to be index, or null if the {@link #DEFAULT_RULES default
+ * rules} are to be used
+ */
+ public KitchenSinkIndexStrategy( IndexRules rules ) {
+ super(rules != null ? rules : DEFAULT_RULES);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.DualIndexStrategy#createAccessComponent(org.jboss.dna.search.SearchContext,
+ * org.jboss.dna.graph.query.plan.PlanNode, org.jboss.dna.graph.query.QueryResults.Columns,
+ * org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
+ */
+ @Override
+ protected ProcessingComponent createAccessComponent( final SearchContext context,
+ PlanNode accessNode,
+ Columns resultColumns,
+ Analyzer analyzer ) {
+ // Create a processing component for this access query ...
+ return new AbstractAccessComponent(context, resultColumns, accessNode) {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.process.ProcessingComponent#execute()
+ */
+ @Override
+ public List<Object[]> execute() {
+ return null;
+ }
+ };
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.IndexStrategy#apply(Iterable, IndexContext)
+ */
+ public int apply( Iterable<ChangeRequest> changes,
+ IndexContext indexes ) /*throws IOException*/{
+ for (ChangeRequest change : changes) {
+ if (change != null) continue;
+ }
+ return 0;
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexStrategy.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -1,57 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.util.List;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryResults.Columns;
-import org.jboss.dna.graph.query.plan.PlanNode;
-import org.jboss.dna.graph.query.process.ProcessingComponent;
-
-/**
- * A {@link ProcessingComponent} implementation that is used by the {@link LuceneQueryEngine.LuceneProcessor} to perform atomic
- * queries against the Lucene indexes.
- */
-class LuceneQueryComponent extends ProcessingComponent {
-
- private final PlanNode accessNode;
-
- LuceneQueryComponent( QueryContext context,
- Columns columns,
- PlanNode accessNode ) {
- super(context, columns);
- this.accessNode = accessNode;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.process.ProcessingComponent#execute()
- */
- @Override
- public List<Object[]> execute() {
- if (accessNode != null) return null;
- return null;
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryEngine.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryEngine.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryEngine.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -1,114 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import org.apache.lucene.queryParser.ParseException;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryEngine;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.QueryResults.Columns;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.optimize.Optimizer;
-import org.jboss.dna.graph.query.optimize.OptimizerRule;
-import org.jboss.dna.graph.query.optimize.RuleBasedOptimizer;
-import org.jboss.dna.graph.query.plan.CanonicalPlanner;
-import org.jboss.dna.graph.query.plan.PlanHints;
-import org.jboss.dna.graph.query.plan.PlanNode;
-import org.jboss.dna.graph.query.process.ProcessingComponent;
-import org.jboss.dna.graph.query.process.QueryProcessor;
-import org.jboss.dna.graph.query.process.SelectComponent.Analyzer;
-import org.jboss.dna.graph.query.validate.Schemata;
-
-/**
- *
- */
-class LuceneQueryEngine {
-
- private QueryEngine engine;
-
- public LuceneQueryEngine() {
- engine = new QueryEngine(new CanonicalPlanner(), new LuceneOptimizer(), new LuceneProcessor());
- }
-
- /**
- * Execute the supplied query by planning, optimizing, and then processing it.
- *
- * @param query the query that is to be executed
- * @param schemata the schemata that defines the tables used in the query
- * @param indexes the indexes that should be used to execute the query; never null
- * @return the query results; never null
- * @throws IllegalArgumentException if the context or query references are null
- * @throws IOException if there is a problem indexing or using the writers
- * @throws ParseException if there is a problem parsing the query
- */
- public QueryResults execute( QueryCommand query,
- Schemata schemata,
- IndexContext indexes ) throws IOException, ParseException {
- return engine.execute(indexes.context(), query, schemata, new PlanHints());
- }
-
- /**
- * An {@link Optimizer} implementation that specializes the {@link RuleBasedOptimizer} by using custom rules.
- */
- protected static class LuceneOptimizer extends RuleBasedOptimizer {
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.optimize.RuleBasedOptimizer#populateRuleStack(java.util.LinkedList,
- * org.jboss.dna.graph.query.plan.PlanHints)
- */
- @Override
- protected void populateRuleStack( LinkedList<OptimizerRule> ruleStack,
- PlanHints hints ) {
- super.populateRuleStack(ruleStack, hints);
- // Add any custom rules here, either at the front of the stack or at the end
- }
- }
-
- /**
- * A query processor that operates against Lucene indexes. All functionality is inherited from the {@link QueryProcessor},
- * except for the creation of the {@link ProcessingComponent} that does the low-level atomic queries (against the Lucene
- * indexes).
- */
- protected static class LuceneProcessor extends QueryProcessor {
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.process.QueryProcessor#createAccessComponent(org.jboss.dna.graph.query.QueryContext,
- * org.jboss.dna.graph.query.plan.PlanNode, org.jboss.dna.graph.query.QueryResults.Columns,
- * org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
- */
- @Override
- protected ProcessingComponent createAccessComponent( QueryContext context,
- PlanNode accessNode,
- Columns resultColumns,
- Analyzer analyzer ) {
- return new LuceneQueryComponent(context, resultColumns, accessNode);
- }
- }
-}
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/SearchContext.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/SearchContext.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/SearchContext.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -0,0 +1,161 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.util.Map;
+import org.jboss.dna.common.collection.Problems;
+import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.plan.PlanHints;
+import org.jboss.dna.graph.query.validate.Schemata;
+
+/**
+ *
+ */
+class SearchContext extends QueryContext {
+
+ private final IndexContext indexes;
+
+ /**
+ * Create a new context for searching and querying.
+ *
+ * @param indexes the indexes that should be used
+ * @param schemata the definition of the tables available to this query
+ */
+ public SearchContext( IndexContext indexes,
+ Schemata schemata ) {
+ super(indexes.context(), schemata);
+ this.indexes = indexes;
+ assert this.indexes != null;
+ }
+
+ /**
+ * Create a new context for searching and querying.
+ *
+ * @param queryContext
+ * @param indexes
+ */
+ public SearchContext( QueryContext queryContext,
+ IndexContext indexes ) {
+ super(queryContext.getExecutionContext(), queryContext.getSchemata(), queryContext.getHints(),
+ queryContext.getProblems(), queryContext.getVariables());
+ this.indexes = indexes;
+ assert this.indexes != null;
+ }
+
+ /**
+ * Create a new context for searching and querying.
+ *
+ * @param context the execution context
+ * @param schemata the schemata
+ * @param hints the hints, or null if there are no hints
+ * @param problems the problems container, or null if a new problems container should be created
+ * @param variables the mapping of variables and values, or null if there are no such variables
+ * @throws IllegalArgumentException if the context or schmata are null
+ */
+ public SearchContext( IndexContext context,
+ Schemata schemata,
+ PlanHints hints,
+ Problems problems,
+ Map<String, Object> variables ) {
+ super(context.context(), schemata, hints, problems, variables);
+ this.indexes = context;
+ assert this.indexes != null;
+ }
+
+ /**
+ * Get the {@link IndexContext} for this query context.
+ *
+ * @return the index context; never null
+ */
+ public IndexContext getIndexes() {
+ return indexes;
+ }
+
+ /**
+ * Obtain a copy of this context, except that the copy uses the supplied index context.
+ *
+ * @param context the index context that should be used in the new query context
+ * @return the new context; never null
+ * @throws IllegalArgumentException if the index context reference is null
+ */
+ public SearchContext with( IndexContext context ) {
+ CheckArg.isNotNull(context, "context");
+ return new SearchContext(context, getSchemata(), getHints(), getProblems(), getVariables());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.QueryContext#with(org.jboss.dna.graph.ExecutionContext)
+ */
+ @Override
+ public SearchContext with( ExecutionContext context ) {
+ CheckArg.isNotNull(context, "context");
+ return new SearchContext(indexes.with(context), getSchemata(), getHints(), getProblems(), getVariables());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.QueryContext#with(org.jboss.dna.graph.query.validate.Schemata)
+ */
+ @Override
+ public SearchContext with( Schemata schemata ) {
+ CheckArg.isNotNull(schemata, "schemata");
+ return new SearchContext(indexes, schemata, getHints(), getProblems(), getVariables());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.QueryContext#with(org.jboss.dna.graph.query.plan.PlanHints)
+ */
+ @Override
+ public SearchContext with( PlanHints hints ) {
+ CheckArg.isNotNull(hints, "hints");
+ return new SearchContext(indexes, getSchemata(), hints, getProblems(), getVariables());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.QueryContext#with(org.jboss.dna.common.collection.Problems)
+ */
+ @Override
+ public SearchContext with( Problems problems ) {
+ return new SearchContext(indexes, getSchemata(), getHints(), problems, getVariables());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.QueryContext#with(java.util.Map)
+ */
+ @Override
+ public SearchContext with( Map<String, Object> variables ) {
+ return new SearchContext(indexes, getSchemata(), getHints(), getProblems(), variables);
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/SearchContext.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -61,7 +61,7 @@
private final String sourceName;
private final RepositoryConnectionFactory connectionFactory;
private final DirectoryConfiguration directoryFactory;
- private final IndexingStrategy indexingStrategy;
+ private final IndexStrategy indexStrategy;
private final PathFactory pathFactory;
@GuardedBy( "workspaceEngineLock" )
private final Map<String, WorkspaceSearchEngine> workspaceEnginesByName;
@@ -76,15 +76,12 @@
* @param sourceName the name of the {@link RepositorySource}
* @param connectionFactory the connection factory
* @param directoryFactory the factory for Lucene {@link Directory directories}
- * @param indexingStrategy the indexing strategy that governs how properties are to be indexed; or null if the default
- * strategy should be used
* @throws IllegalArgumentException if any of the parameters (other than indexing strategy) are null
*/
public SearchEngine( ExecutionContext context,
String sourceName,
RepositoryConnectionFactory connectionFactory,
- DirectoryConfiguration directoryFactory,
- IndexingStrategy indexingStrategy ) {
+ DirectoryConfiguration directoryFactory ) {
CheckArg.isNotNull(context, "context");
CheckArg.isNotNull(sourceName, "sourceName");
CheckArg.isNotNull(connectionFactory, "connectionFactory");
@@ -95,7 +92,7 @@
this.context = context;
this.pathFactory = context.getValueFactories().getPathFactory();
this.workspaceEnginesByName = new HashMap<String, WorkspaceSearchEngine>();
- this.indexingStrategy = indexingStrategy != null ? indexingStrategy : new StoreLittleIndexingStrategy();
+ this.indexStrategy = new KitchenSinkIndexStrategy();
}
/**
@@ -153,7 +150,7 @@
engine = workspaceEnginesByName.get(workspaceName);
if (engine == null) {
// Create the engine and register it ...
- engine = new WorkspaceSearchEngine(context, directoryFactory, indexingStrategy, sourceName, workspaceName,
+ engine = new WorkspaceSearchEngine(context, directoryFactory, indexStrategy, sourceName, workspaceName,
connectionFactory);
workspaceEnginesByName.put(workspaceName, engine);
}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/StoreLittleIndexingStrategy.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/StoreLittleIndexingStrategy.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/StoreLittleIndexingStrategy.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -1,372 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.List;
-import java.util.UUID;
-import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.FieldSelectorResult;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.PrefixQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.util.Version;
-import org.jboss.dna.common.text.NoOpEncoder;
-import org.jboss.dna.common.text.TextEncoder;
-import org.jboss.dna.common.util.Logger;
-import org.jboss.dna.graph.DnaLexicon;
-import org.jboss.dna.graph.JcrLexicon;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Node;
-import org.jboss.dna.graph.property.Binary;
-import org.jboss.dna.graph.property.DateTime;
-import org.jboss.dna.graph.property.DateTimeFactory;
-import org.jboss.dna.graph.property.Name;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.property.Property;
-import org.jboss.dna.graph.property.ValueFactory;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.validate.Schemata;
-import org.jboss.dna.graph.request.ChangeRequest;
-import org.jboss.dna.search.IndexingRules.Rule;
-
-/**
- * A simple {@link IndexingStrategy} implementation that relies upon very few fields to be stored in the indexes.
- */
-@ThreadSafe
-class StoreLittleIndexingStrategy implements IndexingStrategy {
-
- static class PathIndex {
- public static final String PATH = "path";
- public static final String UUID = "uuid";
- }
-
- static class ContentIndex {
- public static final String UUID = PathIndex.UUID;
- public static final String FULL_TEXT = "fts";
- }
-
- public static final int SIZE_OF_DELETE_BATCHES = 100;
-
- private ThreadLocal<DateFormat> dateFormatter = new ThreadLocal<DateFormat>() {
- @Override
- protected DateFormat initialValue() {
- return new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss");
- }
- };
-
- private static final FieldSelector UUID_FIELD_SELECTOR = new FieldSelector() {
- private static final long serialVersionUID = 1L;
-
- public FieldSelectorResult accept( String fieldName ) {
- return PathIndex.UUID.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
- }
- };
-
- /**
- * The default set of {@link IndexingRules} used by {@link StoreLittleIndexingStrategy} instances when no rules are provided.
- */
- public static final IndexingRules DEFAULT_RULES;
-
- static {
- IndexingRules.Builder builder = IndexingRules.createBuilder();
- // Configure the default behavior ...
- builder.defaultTo(IndexingRules.INDEX | IndexingRules.ANALYZE);
- // Configure the UUID properties to be just indexed (not stored, not analyzed, not included in full-text) ...
- builder.index(JcrLexicon.UUID, DnaLexicon.UUID);
- // Configure the properties that we'll treat as dates ...
- builder.treatAsDates(JcrLexicon.CREATED, JcrLexicon.LAST_MODIFIED);
- DEFAULT_RULES = builder.build();
- }
-
- private final IndexingRules rules;
- private final Logger logger;
- private final LuceneQueryEngine queryEngine;
-
- /**
- * Create a new indexing strategy instance that does not support queries.
- */
- public StoreLittleIndexingStrategy() {
- this(null);
- }
-
- /**
- * Create a new indexing strategy instance.
- *
- * @param rules the indexing rules that govern how properties are to be index, or null if the {@link #DEFAULT_RULES default
- * rules} are to be used
- */
- public StoreLittleIndexingStrategy( IndexingRules rules ) {
- this.rules = rules != null ? rules : DEFAULT_RULES;
- this.logger = Logger.getLogger(getClass());
- this.queryEngine = new LuceneQueryEngine();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingStrategy#getNamespaceEncoder()
- */
- public TextEncoder getNamespaceEncoder() {
- return new NoOpEncoder();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingStrategy#getChangeCountForAutomaticOptimization()
- */
- public int getChangeCountForAutomaticOptimization() {
- return 0;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingStrategy#createAnalyzer()
- */
- public Analyzer createAnalyzer() {
- return new StandardAnalyzer(Version.LUCENE_CURRENT);
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * Because this strategy uses multiple indexes, and since there's no correlation between the documents in those indexes, we
- * need to perform the delete in multiple steps. First, we need to perform a query to find out which nodes exist below a
- * certain path. Then, we need to delete those nodes from the paths index. Finally, we need to delete the corresponding
- * documents in the content index that represent those same nodes.
- * </p>
- * <p>
- * Since we don't know how many documents there will be, we perform these steps in batches, where each batch limits the number
- * of results to a maximum number. We repeat batches as long as we find more results. This approach has the advantage that
- * we'll never bring in a large number of results, and it allows us to delete the documents from the content node using a
- * query.
- * </p>
- *
- * @see org.jboss.dna.search.IndexingStrategy#deleteBelow(Path, IndexContext)
- */
- public int deleteBelow( Path path,
- IndexContext indexes ) throws IOException {
- // Perform a query using the reader to find those nodes at/below the path ...
- try {
- IndexReader pathReader = indexes.getPathsReader();
- IndexSearcher pathSearcher = new IndexSearcher(pathReader);
- String pathStr = indexes.stringFactory().create(path) + "/";
- PrefixQuery query = new PrefixQuery(new Term(PathIndex.PATH, pathStr));
- int numberDeleted = 0;
- while (true) {
- // Execute the query and get the results ...
- TopDocs results = pathSearcher.search(query, SIZE_OF_DELETE_BATCHES);
- int numResultsInBatch = results.scoreDocs.length;
- // Walk the results, delete the doc, and add to the query that we'll use against the content index ...
- IndexReader contentReader = indexes.getContentReader();
- for (ScoreDoc result : results.scoreDocs) {
- int docId = result.doc;
- // Find the UUID of the node ...
- Document doc = pathReader.document(docId, UUID_FIELD_SELECTOR);
- String uuid = doc.get(PathIndex.UUID);
- // Delete the document from the paths index ...
- pathReader.deleteDocument(docId);
- // Delete the corresponding document from the content index ...
- contentReader.deleteDocuments(new Term(ContentIndex.UUID, uuid));
- }
- numberDeleted += numResultsInBatch;
- if (numResultsInBatch < SIZE_OF_DELETE_BATCHES) break;
- }
- indexes.commit();
- return numberDeleted;
- } catch (FileNotFoundException e) {
- // There are no index files yet, so nothing to delete ...
- return 0;
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingStrategy#index(Node, IndexContext)
- */
- public void index( Node node,
- IndexContext indexes ) throws IOException {
- ValueFactory<String> strings = indexes.stringFactory();
- Location location = node.getLocation();
- UUID uuid = location.getUuid();
- if (uuid == null) uuid = UUID.randomUUID();
- Path path = location.getPath();
- String pathStr = path.isRoot() ? "/" : strings.create(location.getPath()) + "/";
- String uuidStr = uuid.toString();
-
- if (logger.isTraceEnabled()) {
- logger.trace("indexing {0}", pathStr);
- }
-
- // Create a separate document for the path, which makes it easier to handle moves since the path can
- // be changed without changing any other content fields ...
- Document doc = new Document();
- doc.add(new Field(PathIndex.PATH, pathStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field(PathIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- indexes.getPathsWriter().addDocument(doc);
-
- // Create the document for the content (properties) ...
- doc = new Document();
- doc.add(new Field(ContentIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- String stringValue = null;
- StringBuilder fullTextSearchValue = null;
- for (Property property : node.getProperties()) {
- Name name = property.getName();
- Rule rule = rules.getRule(name);
- if (rule.isSkipped()) continue;
- String nameString = strings.create(name);
- if (rule.isDate()) {
- DateTimeFactory dateFactory = indexes.dateFactory();
- for (Object value : property) {
- if (value == null) continue;
- DateTime dateValue = dateFactory.create(value);
- stringValue = dateFormatter.get().format(dateValue.toDate());
- // Add a separate field for each property value ...
- doc.add(new Field(nameString, stringValue, rule.getStoreOption(), rule.getIndexOption()));
- // Dates are not added to the full-text search field (since this wouldn't make sense)
- }
- continue;
- }
- for (Object value : property) {
- if (value == null) continue;
- if (value instanceof Binary) {
- // don't include binary values as individual fields but do include them in the full-text search ...
- // TODO : add to full-text search ...
- continue;
- }
- stringValue = strings.create(value);
- // Add a separate field for each property value ...
- doc.add(new Field(nameString, stringValue, rule.getStoreOption(), rule.getIndexOption()));
- // And add to the full-text field ...
- if (rule.isFullText()) {
- if (fullTextSearchValue == null) {
- fullTextSearchValue = new StringBuilder();
- } else {
- fullTextSearchValue.append(' ');
- }
- fullTextSearchValue.append(stringValue);
- }
- }
- }
- // Add the full-text-search field ...
- if (fullTextSearchValue != null) {
- doc.add(new Field(ContentIndex.FULL_TEXT, fullTextSearchValue.toString(), Field.Store.NO, Field.Index.ANALYZED));
- }
- indexes.getContentWriter().addDocument(doc);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingStrategy#performQuery(String, int, int, IndexContext, List)
- */
- public void performQuery( String fullTextString,
- int maxResults,
- int offset,
- IndexContext indexes,
- List<Location> results ) throws IOException, ParseException {
- assert fullTextString != null;
- assert fullTextString.length() > 0;
- assert offset >= 0;
- assert maxResults > 0;
- assert indexes != null;
- assert results != null;
-
- // Parse the full-text search and search against the 'fts' field ...
- QueryParser parser = new QueryParser(ContentIndex.FULL_TEXT, createAnalyzer());
- Query query = parser.parse(fullTextString);
- TopDocs docs = indexes.getContentSearcher().search(query, maxResults + offset);
-
- // Collect the results ...
- IndexReader contentReader = indexes.getContentReader();
- IndexReader pathReader = indexes.getPathsReader();
- IndexSearcher pathSearcher = indexes.getPathsSearcher();
- ScoreDoc[] scoreDocs = docs.scoreDocs;
- int numberOfResults = scoreDocs.length;
- if (numberOfResults > offset) {
- // There are enough results to satisfy the offset ...
- for (int i = offset, num = scoreDocs.length; i != num; ++i) {
- ScoreDoc result = scoreDocs[i];
- int docId = result.doc;
- // Find the UUID of the node (this UUID might be artificial, so we have to find the path) ...
- Document doc = contentReader.document(docId, UUID_FIELD_SELECTOR);
- String uuid = doc.get(ContentIndex.UUID);
- // Find the path for this node (is there a better way to do this than one search per UUID?) ...
- TopDocs pathDocs = pathSearcher.search(new TermQuery(new Term(PathIndex.UUID, uuid)), 1);
- if (pathDocs.scoreDocs.length < 1) {
- // No path record found ...
- continue;
- }
- Document pathDoc = pathReader.document(pathDocs.scoreDocs[0].doc);
- Path path = indexes.pathFactory().create(pathDoc.get(PathIndex.PATH));
- // Now add the location ...
- results.add(Location.create(path, UUID.fromString(uuid)));
- }
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingStrategy#performQuery(org.jboss.dna.graph.query.model.QueryCommand,
- * org.jboss.dna.graph.query.validate.Schemata, org.jboss.dna.search.IndexContext)
- */
- public QueryResults performQuery( QueryCommand query,
- Schemata schemata,
- IndexContext indexes ) throws IOException, ParseException {
- return this.queryEngine.execute(query, schemata, indexes);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexingStrategy#apply(Iterable, IndexContext)
- */
- public int apply( Iterable<ChangeRequest> changes,
- IndexContext indexes ) /*throws IOException*/{
- for (ChangeRequest change : changes) {
- if (change != null) continue;
- }
- return 0;
- }
-}
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -55,7 +55,7 @@
* A search engine dedicated to a single workspace.
*/
@ThreadSafe
-public class WorkspaceSearchEngine {
+class WorkspaceSearchEngine {
protected static final String PATHS_INDEX_NAME = "paths";
protected static final String CONTENT_INDEX_NAME = "content";
@@ -67,7 +67,7 @@
private final String sourceName;
private final String workspaceName;
private final RepositoryConnectionFactory connectionFactory;
- private final IndexingStrategy indexingStrategy;
+ private final IndexStrategy indexingStrategy;
protected final AtomicInteger modifiedNodesSinceLastOptimize = new AtomicInteger(0);
/**
@@ -86,7 +86,7 @@
*/
protected WorkspaceSearchEngine( ExecutionContext context,
DirectoryConfiguration directoryFactory,
- IndexingStrategy indexingStrategy,
+ IndexStrategy indexingStrategy,
String sourceName,
String workspaceName,
RepositoryConnectionFactory connectionFactory ) throws SearchEngineException {
@@ -168,7 +168,7 @@
return context.getValueFactories().getStringFactory().create(path);
}
- final IndexingStrategy strategy() {
+ final IndexStrategy strategy() {
return indexingStrategy;
}
@@ -523,7 +523,7 @@
final List<Location> results = new ArrayList<Location>(maxResults);
return new Search() {
public void execute( IndexContext indexes ) throws IOException, ParseException {
- strategy().performQuery(fullTextSearch, maxResults, offset, indexes, results);
+ strategy().search(fullTextSearch, maxResults, offset, indexes, results);
}
public String messageFor( Throwable error ) {
@@ -550,9 +550,11 @@
final Schemata schemata ) {
return new Query() {
private QueryResults results = null;
+ private SearchContext context = null;
- public void execute( IndexContext indexes ) throws IOException, ParseException {
- results = strategy().performQuery(query, schemata, indexes);
+ public void execute( IndexContext indexes ) {
+ context = new SearchContext(indexes, schemata);
+ results = strategy().query(context, query);
}
public String messageFor( Throwable error ) {
Modified: trunk/dna-search/src/test/java/org/jboss/dna/search/IndexingRulesTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/IndexingRulesTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/IndexingRulesTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -25,7 +25,7 @@
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
-import org.jboss.dna.search.IndexingRules.Builder;
+import org.jboss.dna.search.IndexRules.Builder;
import org.junit.Before;
import org.junit.Test;
@@ -35,23 +35,23 @@
public class IndexingRulesTest {
private Builder builder;
- private IndexingRules rules;
+ private IndexRules rules;
@Before
public void beforeEach() {
- builder = IndexingRules.createBuilder();
+ builder = IndexRules.createBuilder();
rules = builder.build();
}
@Test
public void shouldBuildValidRulesFromBuilderThatIsNotInvoked() {
- builder = IndexingRules.createBuilder();
+ builder = IndexRules.createBuilder();
rules = builder.build();
}
@Test
public void shouldBuildValidRulesFromBuilderAfterJustSettingDefaultRules() {
- builder.defaultTo(IndexingRules.FULL_TEXT);
+ builder.defaultTo(IndexRules.FULL_TEXT);
rules = builder.build();
assertThat(rules.getRule(null).isFullText(), is(true));
}
Modified: trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -48,7 +48,6 @@
private InMemoryRepositorySource source;
private RepositoryConnectionFactory connectionFactory;
private DirectoryConfiguration directoryFactory;
- private IndexingStrategy indexingStrategy;
private Graph content;
@Before
@@ -81,15 +80,9 @@
}
};
- // Set up the indexing strategy ...
- IndexingRules rules = IndexingRules.createBuilder(StoreLittleIndexingStrategy.DEFAULT_RULES)
- .defaultTo(IndexingRules.INDEX | IndexingRules.ANALYZE | IndexingRules.FULL_TEXT)
- .build();
- indexingStrategy = new StoreLittleIndexingStrategy(rules);
-
// Now set up the search engine ...
directoryFactory = DirectoryConfigurations.inMemory();
- engine = new SearchEngine(context, sourceName, connectionFactory, directoryFactory, indexingStrategy);
+ engine = new SearchEngine(context, sourceName, connectionFactory, directoryFactory);
}
protected Path path( String string ) {
Modified: trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java 2009-11-16 23:23:02 UTC (rev 1318)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java 2009-11-16 23:24:06 UTC (rev 1319)
@@ -47,7 +47,7 @@
private InMemoryRepositorySource source;
private RepositoryConnectionFactory connectionFactory;
private DirectoryConfiguration directoryFactory;
- private IndexingStrategy indexingStrategy;
+ private IndexStrategy indexingStrategy;
private Graph content;
@Before
@@ -74,10 +74,10 @@
};
// Set up the indexing strategy ...
- IndexingRules rules = IndexingRules.createBuilder(StoreLittleIndexingStrategy.DEFAULT_RULES)
- .defaultTo(IndexingRules.INDEX | IndexingRules.ANALYZE | IndexingRules.FULL_TEXT)
+ IndexRules rules = IndexRules.createBuilder(KitchenSinkIndexStrategy.DEFAULT_RULES)
+ .defaultTo(IndexRules.INDEX | IndexRules.ANALYZE | IndexRules.FULL_TEXT)
.build();
- indexingStrategy = new StoreLittleIndexingStrategy(rules);
+ indexingStrategy = new KitchenSinkIndexStrategy(rules);
// Now set up the search engine ...
directoryFactory = DirectoryConfigurations.inMemory();
14 years, 5 months
DNA SVN: r1318 - in trunk: dna-graph/src/main/java/org/jboss/dna/graph/query/validate and 2 other directories.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-16 18:23:02 -0500 (Mon, 16 Nov 2009)
New Revision: 1318
Modified:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/Queryable.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Schemata.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingStrategy.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryEngine.java
trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java
trunk/dna-search/src/main/java/org/jboss/dna/search/StoreLittleIndexingStrategy.java
trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java
trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java
trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java
Log:
DNA-467 Changed how the Queryable and QueryEngine use Schemata. Before, the schemata instance was passed into the engine, and used for all queries. But this meant that the schemata instance could change the Table objects it returns. This would be more difficult to implement than if a different Schemata were passed with each query, since that Schemata instance can be a reflection of the tables at the time the query is implemented. And, Schemata can then be made to be immutable, not only simplifying the implementation but also ensuring that the schema information doesn't change during the processing of a query.
So, the Queryable and QueryEngine were changed to always pass a Schemata in with each QueryCommand. This rippled down to the SearchEngine, but this also cleaned things up a bit there, too.
It also allows different Schemata instances to be used for different languages (if that would ever make sense).
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java 2009-11-09 19:29:50 UTC (rev 1317)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java 2009-11-16 23:23:02 UTC (rev 1318)
@@ -54,10 +54,20 @@
@ThreadSafe
public class QueryEngine implements Queryable {
+ /**
+ * A {@link Schemata} implementation that always returns null, meaning the table does not exist.
+ */
+ private static final Schemata DEFAULT_SCHEMATA = new Schemata() {
+ public Table getTable( SelectorName name ) {
+ // This won't allow the query engine to do anything (or much of anything),
+ // but it is legal and will result in meaningful problems
+ return null;
+ }
+ };
+
private final Planner planner;
private final Optimizer optimizer;
private final Processor processor;
- private final Schemata schemata;
/**
* Create a new query engine given the {@link Planner planner}, {@link Optimizer optimizer}, {@link Processor processor}, and
@@ -68,49 +78,43 @@
* @param optimizer the optimizer that should be used to optimize the canonical query plan; may be null if the
* {@link RuleBasedOptimizer} should be used
* @param processor the processor implementation that should be used to process the planned query and return the results
- * @param schemata the schemata implementation, or null if an empty schema should be used (resulting in errors when named
- * tables are queried)
* @throws IllegalArgumentException if the processor reference is null
*/
public QueryEngine( Planner planner,
Optimizer optimizer,
- Processor processor,
- Schemata schemata ) {
+ Processor processor ) {
CheckArg.isNotNull(processor, "processor");
this.planner = planner != null ? planner : new CanonicalPlanner();
this.optimizer = optimizer != null ? optimizer : new RuleBasedOptimizer();
this.processor = processor;
- this.schemata = schemata != null ? schemata : new Schemata() {
- public Table getTable( SelectorName name ) {
- // This won't allow the query engine to do anything (or much of anything),
- // but it is legal and will result in meaningful problems
- return null;
- }
- };
}
/**
* {@inheritDoc}
*
* @see org.jboss.dna.graph.query.Queryable#execute(org.jboss.dna.graph.ExecutionContext,
- * org.jboss.dna.graph.query.model.QueryCommand)
+ * org.jboss.dna.graph.query.model.QueryCommand, org.jboss.dna.graph.query.validate.Schemata)
*/
public QueryResults execute( ExecutionContext context,
- QueryCommand query ) {
- return execute(context, query, new PlanHints());
+ QueryCommand query,
+ Schemata schemata ) {
+ return execute(context, query, schemata, new PlanHints());
}
/**
* {@inheritDoc}
*
* @see org.jboss.dna.graph.query.Queryable#execute(org.jboss.dna.graph.ExecutionContext,
- * org.jboss.dna.graph.query.model.QueryCommand, org.jboss.dna.graph.query.plan.PlanHints)
+ * org.jboss.dna.graph.query.model.QueryCommand, org.jboss.dna.graph.query.validate.Schemata,
+ * org.jboss.dna.graph.query.plan.PlanHints)
*/
public QueryResults execute( ExecutionContext context,
QueryCommand query,
+ Schemata schemata,
PlanHints hints ) {
CheckArg.isNotNull(context, "context");
CheckArg.isNotNull(query, "query");
+ if (schemata == null) schemata = DEFAULT_SCHEMATA;
QueryContext queryContext = new QueryContext(context, hints, schemata);
// Create the canonical plan ...
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/Queryable.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/Queryable.java 2009-11-09 19:29:50 UTC (rev 1317)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/Queryable.java 2009-11-16 23:23:02 UTC (rev 1318)
@@ -26,6 +26,7 @@
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.query.model.QueryCommand;
import org.jboss.dna.graph.query.plan.PlanHints;
+import org.jboss.dna.graph.query.validate.Schemata;
/**
* An interface defining the ability to submit a query and obtain results.
@@ -37,22 +38,26 @@
*
* @param context the context in which the query should be executed
* @param query the query that is to be executed
+ * @param schemata the schemata that should be used to validate the query
* @return the query results; never null
* @throws IllegalArgumentException if the context or query references are null
*/
QueryResults execute( ExecutionContext context,
- QueryCommand query );
+ QueryCommand query,
+ Schemata schemata );
/**
* Execute the supplied query by planning, optimizing, and then processing it.
*
* @param context the context in which the query should be executed
* @param query the query that is to be executed
+ * @param schemata the schemata that should be used to validate the query
* @param hints the hints for the execution; may be null if there are no hints
* @return the query results; never null
* @throws IllegalArgumentException if the context or query references are null
*/
QueryResults execute( ExecutionContext context,
QueryCommand query,
+ Schemata schemata,
PlanHints hints );
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Schemata.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Schemata.java 2009-11-09 19:29:50 UTC (rev 1317)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/validate/Schemata.java 2009-11-16 23:23:02 UTC (rev 1318)
@@ -27,6 +27,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
+import net.jcip.annotations.Immutable;
import org.jboss.dna.graph.property.PropertyType;
import org.jboss.dna.graph.query.model.QueryCommand;
import org.jboss.dna.graph.query.model.SelectorName;
@@ -34,13 +35,13 @@
/**
* The interface used to access the structure being queried and validate a query.
*/
+@Immutable
public interface Schemata {
/**
* Get the information for the table or view with the supplied name within this schema.
* <p>
- * The resulting definition is immutable, though subsequent calls to this method with the same argument may result in a
- * different definition.
+ * The resulting definition is immutable.
* </p>
*
* @param name the table or view name; may not be null
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java 2009-11-09 19:29:50 UTC (rev 1317)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexContext.java 2009-11-16 23:23:02 UTC (rev 1318)
@@ -40,7 +40,7 @@
* A set of index readers and writers.
*/
@NotThreadSafe
-final class IndexContext {
+public final class IndexContext {
private final ExecutionContext context;
private final Directory pathsIndexDirectory;
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingStrategy.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingStrategy.java 2009-11-09 19:29:50 UTC (rev 1317)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexingStrategy.java 2009-11-16 23:23:02 UTC (rev 1318)
@@ -34,12 +34,13 @@
import org.jboss.dna.graph.property.Path;
import org.jboss.dna.graph.query.QueryResults;
import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.validate.Schemata;
import org.jboss.dna.graph.request.ChangeRequest;
/**
* Interface defining the behaviors associated with indexing graph content.
*/
-interface IndexingStrategy {
+public interface IndexingStrategy {
int getChangeCountForAutomaticOptimization();
@@ -108,14 +109,18 @@
List<Location> results ) throws IOException, ParseException;
/**
- * Perform a query of the content.
+ * Perform a query of the content. The {@link QueryCommand query} is supplied in the form of the Abstract Query Model, with
+ * the {@link Schemata} that defines the tables and views that are available to the query, and the set of index readers (and
+ * writers) that should be used.
*
* @param query the query; never null
+ * @param schemata the definition of the tables used in the query; never null
* @param indexes the set of index readers and writers; never null
* @return the results of the query
* @throws IOException if there is a problem indexing or using the writers
* @throws ParseException if there is a problem parsing the query
*/
QueryResults performQuery( QueryCommand query,
+ Schemata schemata,
IndexContext indexes ) throws IOException, ParseException;
}
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryEngine.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryEngine.java 2009-11-09 19:29:50 UTC (rev 1317)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryEngine.java 2009-11-16 23:23:02 UTC (rev 1318)
@@ -49,14 +49,15 @@
private QueryEngine engine;
- public LuceneQueryEngine( Schemata schemata ) {
- engine = new QueryEngine(new CanonicalPlanner(), new LuceneOptimizer(), new LuceneProcessor(), schemata);
+ public LuceneQueryEngine() {
+ engine = new QueryEngine(new CanonicalPlanner(), new LuceneOptimizer(), new LuceneProcessor());
}
/**
* Execute the supplied query by planning, optimizing, and then processing it.
*
* @param query the query that is to be executed
+ * @param schemata the schemata that defines the tables used in the query
* @param indexes the indexes that should be used to execute the query; never null
* @return the query results; never null
* @throws IllegalArgumentException if the context or query references are null
@@ -64,8 +65,9 @@
* @throws ParseException if there is a problem parsing the query
*/
public QueryResults execute( QueryCommand query,
+ Schemata schemata,
IndexContext indexes ) throws IOException, ParseException {
- return engine.execute(indexes.context(), query, new PlanHints());
+ return engine.execute(indexes.context(), query, schemata, new PlanHints());
}
/**
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java 2009-11-09 19:29:50 UTC (rev 1317)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java 2009-11-16 23:23:02 UTC (rev 1318)
@@ -45,6 +45,7 @@
import org.jboss.dna.graph.property.PathFactory;
import org.jboss.dna.graph.query.QueryResults;
import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.validate.Schemata;
import org.jboss.dna.graph.request.ChangeRequest;
import org.jboss.dna.graph.request.InvalidWorkspaceException;
@@ -293,13 +294,17 @@
*
* @param workspaceName the name of the workspace
* @param query the query that is to be executed, in the form of the Abstract Query Model
+ * @param schemata the definition of the tables and views that can be used in the query; may not be null
* @return the query results; never null
- * @throws IllegalArgumentException if the context or query references are null
+ * @throws IllegalArgumentException if the context, query, or schemata references are null
*/
public QueryResults execute( String workspaceName,
- QueryCommand query ) {
+ QueryCommand query,
+ Schemata schemata ) {
CheckArg.isNotNull(workspaceName, "workspaceName");
- return getWorkspaceEngine(workspaceName).execute(query);
+ CheckArg.isNotNull(query, "query");
+ CheckArg.isNotNull(schemata, "schemata");
+ return getWorkspaceEngine(workspaceName).execute(query, schemata);
}
}
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/StoreLittleIndexingStrategy.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/StoreLittleIndexingStrategy.java 2009-11-09 19:29:50 UTC (rev 1317)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/StoreLittleIndexingStrategy.java 2009-11-16 23:23:02 UTC (rev 1318)
@@ -124,22 +124,19 @@
* Create a new indexing strategy instance that does not support queries.
*/
public StoreLittleIndexingStrategy() {
- this(null, null);
+ this(null);
}
/**
* Create a new indexing strategy instance.
*
- * @param schemata the schemata that defines the structure that can be queried; may be null if queries are not going to be
- * used
* @param rules the indexing rules that govern how properties are to be index, or null if the {@link #DEFAULT_RULES default
* rules} are to be used
*/
- public StoreLittleIndexingStrategy( Schemata schemata,
- IndexingRules rules ) {
+ public StoreLittleIndexingStrategy( IndexingRules rules ) {
this.rules = rules != null ? rules : DEFAULT_RULES;
this.logger = Logger.getLogger(getClass());
- this.queryEngine = new LuceneQueryEngine(schemata);
+ this.queryEngine = new LuceneQueryEngine();
}
/**
@@ -351,11 +348,13 @@
/**
* {@inheritDoc}
*
- * @see org.jboss.dna.search.IndexingStrategy#performQuery(QueryCommand, IndexContext)
+ * @see org.jboss.dna.search.IndexingStrategy#performQuery(org.jboss.dna.graph.query.model.QueryCommand,
+ * org.jboss.dna.graph.query.validate.Schemata, org.jboss.dna.search.IndexContext)
*/
public QueryResults performQuery( QueryCommand query,
+ Schemata schemata,
IndexContext indexes ) throws IOException, ParseException {
- return this.queryEngine.execute(query, indexes);
+ return this.queryEngine.execute(query, schemata, indexes);
}
/**
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java 2009-11-09 19:29:50 UTC (rev 1317)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/WorkspaceSearchEngine.java 2009-11-16 23:23:02 UTC (rev 1318)
@@ -48,6 +48,7 @@
import org.jboss.dna.graph.property.Path;
import org.jboss.dna.graph.query.QueryResults;
import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.validate.Schemata;
import org.jboss.dna.graph.request.ChangeRequest;
/**
@@ -252,11 +253,13 @@
* representation of the query.
*
* @param query the query that is to be executed, in the form of the Abstract Query Model
+ * @param schemata the definition of the tables available for the query; may not be null
* @return the query results; never null
* @throws IllegalArgumentException if the context or query references are null
*/
- public QueryResults execute( QueryCommand query ) {
- return execute(false, queryContent(query)).getResults();
+ public QueryResults execute( QueryCommand query,
+ Schemata schemata ) {
+ return execute(false, queryContent(query, schemata)).getResults();
}
/**
@@ -540,14 +543,16 @@
* Create an activity that will perform a query against the index.
*
* @param query the query to be performed; may not be null
+ * @param schemata the definition of the tables being used in the query; may not be null
* @return the activity that will perform the work
*/
- protected Query queryContent( final QueryCommand query ) {
+ protected Query queryContent( final QueryCommand query,
+ final Schemata schemata ) {
return new Query() {
private QueryResults results = null;
public void execute( IndexContext indexes ) throws IOException, ParseException {
- results = strategy().performQuery(query, indexes);
+ results = strategy().performQuery(query, schemata, indexes);
}
public String messageFor( Throwable error ) {
Modified: trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java 2009-11-09 19:29:50 UTC (rev 1317)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java 2009-11-16 23:23:02 UTC (rev 1318)
@@ -27,7 +27,6 @@
import static org.hamcrest.core.IsNull.notNullValue;
import static org.hamcrest.core.IsSame.sameInstance;
import static org.junit.Assert.assertThat;
-import static org.mockito.Mockito.mock;
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.Graph;
import org.jboss.dna.graph.connector.RepositoryConnection;
@@ -35,7 +34,6 @@
import org.jboss.dna.graph.connector.RepositorySourceException;
import org.jboss.dna.graph.connector.inmemory.InMemoryRepositorySource;
import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.validate.Schemata;
import org.jboss.dna.graph.request.InvalidWorkspaceException;
import org.junit.Before;
import org.junit.Test;
@@ -51,7 +49,6 @@
private RepositoryConnectionFactory connectionFactory;
private DirectoryConfiguration directoryFactory;
private IndexingStrategy indexingStrategy;
- private Schemata schemata;
private Graph content;
@Before
@@ -84,14 +81,11 @@
}
};
- // Set up the schemata for the queries ...
- schemata = mock(Schemata.class);
-
// Set up the indexing strategy ...
IndexingRules rules = IndexingRules.createBuilder(StoreLittleIndexingStrategy.DEFAULT_RULES)
.defaultTo(IndexingRules.INDEX | IndexingRules.ANALYZE | IndexingRules.FULL_TEXT)
.build();
- indexingStrategy = new StoreLittleIndexingStrategy(schemata, rules);
+ indexingStrategy = new StoreLittleIndexingStrategy(rules);
// Now set up the search engine ...
directoryFactory = DirectoryConfigurations.inMemory();
Modified: trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java 2009-11-09 19:29:50 UTC (rev 1317)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/WorkspaceSearchEngineTest.java 2009-11-16 23:23:02 UTC (rev 1318)
@@ -26,7 +26,6 @@
import static org.hamcrest.core.Is.is;
import static org.hamcrest.core.IsNull.notNullValue;
import static org.junit.Assert.assertThat;
-import static org.mockito.Mockito.mock;
import java.util.List;
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.Graph;
@@ -36,7 +35,6 @@
import org.jboss.dna.graph.connector.RepositorySourceException;
import org.jboss.dna.graph.connector.inmemory.InMemoryRepositorySource;
import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.validate.Schemata;
import org.junit.Before;
import org.junit.Test;
@@ -51,7 +49,6 @@
private DirectoryConfiguration directoryFactory;
private IndexingStrategy indexingStrategy;
private Graph content;
- private Schemata schemata;
@Before
public void beforeEach() throws Exception {
@@ -76,14 +73,11 @@
}
};
- // Set up the schemata for the queries ...
- schemata = mock(Schemata.class);
-
// Set up the indexing strategy ...
IndexingRules rules = IndexingRules.createBuilder(StoreLittleIndexingStrategy.DEFAULT_RULES)
.defaultTo(IndexingRules.INDEX | IndexingRules.ANALYZE | IndexingRules.FULL_TEXT)
.build();
- indexingStrategy = new StoreLittleIndexingStrategy(schemata, rules);
+ indexingStrategy = new StoreLittleIndexingStrategy(rules);
// Now set up the search engine ...
directoryFactory = DirectoryConfigurations.inMemory();
14 years, 5 months
DNA SVN: r1317 - in trunk/dna-graph/src: test/java/org/jboss/dna/graph and 1 other directory.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-09 14:29:50 -0500 (Mon, 09 Nov 2009)
New Revision: 1317
Modified:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/Graph.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/GraphTest.java
Log:
DNA-550 Added methods to Graph to allow reading of one or more properties on multiple nodes, all in a single operation
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/Graph.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/Graph.java 2009-10-30 21:34:28 UTC (rev 1316)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/Graph.java 2009-11-09 19:29:50 UTC (rev 1317)
@@ -2062,8 +2062,9 @@
* @param name the name of the property that is to be read
* @return the object that is used to specified the node whose property is to be read, and which will return the property
*/
- public On<Property> getProperty( final Name name ) {
- return new On<Property>() {
+ public OnMultiple<Property> getProperty( final Name name ) {
+ CheckArg.isNotNull(name, "name");
+ return new OnMultiple<Property>() {
public Property on( String path ) {
return on(Location.create(createPath(path)));
}
@@ -2092,10 +2093,237 @@
public Property on( Location at ) {
return requests.readProperty(at, getCurrentWorkspaceName(), name).getProperty();
}
+
+ public Map<Location, Property> on( Collection<Location> locations ) {
+ CheckArg.isNotNull(locations, "locations");
+ final List<ReadPropertyRequest> requests = new LinkedList<ReadPropertyRequest>();
+ String workspace = getCurrentWorkspaceName();
+ for (Location location : locations) {
+ requests.add(new ReadPropertyRequest(location, workspace, name));
+ }
+ return execute(requests);
+ }
+
+ public Map<Location, Property> on( Location first,
+ Location... additional ) {
+ CheckArg.isNotNull(first, "first");
+ final List<ReadPropertyRequest> requests = new LinkedList<ReadPropertyRequest>();
+ String workspace = getCurrentWorkspaceName();
+ requests.add(new ReadPropertyRequest(first, workspace, name));
+ for (Location location : additional) {
+ requests.add(new ReadPropertyRequest(location, workspace, name));
+ }
+ return execute(requests);
+ }
+
+ public Map<Location, Property> on( String first,
+ String... additional ) {
+ CheckArg.isNotNull(first, "first");
+ final List<ReadPropertyRequest> requests = new LinkedList<ReadPropertyRequest>();
+ String workspace = getCurrentWorkspaceName();
+ requests.add(new ReadPropertyRequest(Location.create(createPath(first)), workspace, name));
+ for (String path : additional) {
+ requests.add(new ReadPropertyRequest(Location.create(createPath(path)), workspace, name));
+ }
+ return execute(requests);
+ }
+
+ public Map<Location, Property> on( Path first,
+ Path... additional ) {
+ CheckArg.isNotNull(first, "first");
+ final List<ReadPropertyRequest> requests = new LinkedList<ReadPropertyRequest>();
+ String workspace = getCurrentWorkspaceName();
+ requests.add(new ReadPropertyRequest(Location.create(first), workspace, name));
+ for (Path path : additional) {
+ requests.add(new ReadPropertyRequest(Location.create(path), workspace, name));
+ }
+ return execute(requests);
+ }
+
+ public Map<Location, Property> on( UUID first,
+ UUID... additional ) {
+ CheckArg.isNotNull(first, "first");
+ final List<ReadPropertyRequest> requests = new LinkedList<ReadPropertyRequest>();
+ String workspace = getCurrentWorkspaceName();
+ requests.add(new ReadPropertyRequest(Location.create(first), workspace, name));
+ for (UUID uuid : additional) {
+ requests.add(new ReadPropertyRequest(Location.create(uuid), workspace, name));
+ }
+ return execute(requests);
+ }
+
+ protected Map<Location, Property> execute( List<ReadPropertyRequest> requests ) {
+ // Create a composite request ...
+ Request composite = CompositeRequest.with(requests);
+ Graph.this.execute(composite);
+ Map<Location, Property> results = new HashMap<Location, Property>();
+ for (ReadPropertyRequest request : requests) {
+ Property property = request.getProperty();
+ Location location = request.getActualLocationOfNode();
+ results.put(location, property);
+ }
+ return results;
+ }
};
}
/**
+ * Request that the properties with the given names be read on the node defined via the <code>on(...)</code> method on the
+ * returned {@link On} object. Once the location is specified, the {@link Property property} is read and then returned.
+ *
+ * @param names the name of the property that are to be read
+ * @return the object that is used to specified the node whose properties are to be read, and which will return the map of
+ * properties keyed by their name; never null
+ */
+ public OnMultiple<Map<Name, Property>> getProperties( final Name... names ) {
+ return new OnMultiple<Map<Name, Property>>() {
+ public Map<Name, Property> on( String path ) {
+ return on(Location.create(createPath(path)));
+ }
+
+ public Map<Name, Property> on( Path path ) {
+ return on(Location.create(path));
+ }
+
+ public Map<Name, Property> on( Property idProperty ) {
+ return on(Location.create(idProperty));
+ }
+
+ public Map<Name, Property> on( Property firstIdProperty,
+ Property... additionalIdProperties ) {
+ return on(Location.create(firstIdProperty, additionalIdProperties));
+ }
+
+ public Map<Name, Property> on( Iterable<Property> idProperties ) {
+ return on(Location.create(idProperties));
+ }
+
+ public Map<Name, Property> on( UUID uuid ) {
+ return on(Location.create(uuid));
+ }
+
+ public Map<Name, Property> on( Location at ) {
+ final List<ReadPropertyRequest> requests = new LinkedList<ReadPropertyRequest>();
+ String workspace = getCurrentWorkspaceName();
+ for (Name propertyName : names) {
+ requests.add(new ReadPropertyRequest(at, workspace, propertyName));
+ }
+ // Create a composite request ...
+ Request composite = CompositeRequest.with(requests);
+ Graph.this.execute(composite);
+ Map<Name, Property> results = new HashMap<Name, Property>();
+ for (ReadPropertyRequest request : requests) {
+ Property property = request.getProperty();
+ results.put(property.getName(), property);
+ }
+ return results;
+ }
+
+ public Map<Location, Map<Name, Property>> on( Collection<Location> locations ) {
+ CheckArg.isNotNull(locations, "locations");
+ final List<ReadPropertyRequest> requests = new LinkedList<ReadPropertyRequest>();
+ String workspace = getCurrentWorkspaceName();
+ for (Location location : locations) {
+ if (location == null) continue;
+ for (Name propertyName : names) {
+ if (propertyName == null) continue;
+ requests.add(new ReadPropertyRequest(location, workspace, propertyName));
+ }
+ }
+ return execute(requests);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.Graph.OnMultiple#on(org.jboss.dna.graph.Location, org.jboss.dna.graph.Location[])
+ */
+ public Map<Location, Map<Name, Property>> on( Location first,
+ Location... additional ) {
+ CheckArg.isNotNull(first, "first");
+ final List<ReadPropertyRequest> requests = new LinkedList<ReadPropertyRequest>();
+ String workspace = getCurrentWorkspaceName();
+ for (Location location : additional) {
+ if (location == null) continue;
+ for (Name propertyName : names) {
+ if (propertyName == null) continue;
+ requests.add(new ReadPropertyRequest(first, workspace, propertyName));
+ requests.add(new ReadPropertyRequest(location, workspace, propertyName));
+ }
+ }
+ return execute(requests);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.Graph.OnMultiple#on(org.jboss.dna.graph.property.Path,
+ * org.jboss.dna.graph.property.Path[])
+ */
+ public Map<Location, Map<Name, Property>> on( Path first,
+ Path... additional ) {
+ CheckArg.isNotNull(first, "first");
+ List<Location> locations = new LinkedList<Location>();
+ locations.add(Location.create(first));
+ for (Path path : additional) {
+ if (path != null) locations.add(Location.create(path));
+ }
+ return on(locations);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.Graph.OnMultiple#on(java.lang.String, java.lang.String[])
+ */
+ public Map<Location, Map<Name, Property>> on( String first,
+ String... additional ) {
+ CheckArg.isNotNull(first, "first");
+ List<Location> locations = new LinkedList<Location>();
+ locations.add(Location.create(createPath(first)));
+ for (String path : additional) {
+ if (path != null) locations.add(Location.create(createPath(path)));
+ }
+ return on(locations);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.Graph.OnMultiple#on(java.util.UUID, java.util.UUID[])
+ */
+ public Map<Location, Map<Name, Property>> on( UUID first,
+ UUID... additional ) {
+ CheckArg.isNotNull(first, "first");
+ List<Location> locations = new LinkedList<Location>();
+ locations.add(Location.create(first));
+ for (UUID uuid : additional) {
+ if (uuid != null) locations.add(Location.create(uuid));
+ }
+ return on(locations);
+ }
+
+ protected Map<Location, Map<Name, Property>> execute( List<ReadPropertyRequest> requests ) {
+ // Create a composite request ...
+ Request composite = CompositeRequest.with(requests);
+ Graph.this.execute(composite);
+ Map<Location, Map<Name, Property>> results = new HashMap<Location, Map<Name, Property>>();
+ for (ReadPropertyRequest request : requests) {
+ Property property = request.getProperty();
+ Location location = request.getActualLocationOfNode();
+ Map<Name, Property> properties = results.get(location);
+ if (properties == null) {
+ properties = new HashMap<Name, Property>();
+ results.put(location, properties);
+ }
+ properties.put(property.getName(), property);
+ }
+ return results;
+ }
+ };
+ }
+
+ /**
* Request to read the node with the supplied UUID.
*
* @param uuid the UUID of the node that is to be read
@@ -2395,8 +2623,6 @@
* Interface for creating multiple requests to perform various operations. Note that all the requests are accumulated until
* the {@link #execute()} method is called. The results of all the operations are then available in the {@link Results} object
* returned by the {@link #execute()}.
- *
- * @author Randall Hauch
*/
@Immutable
public final class Batch implements Executable<Node> {
@@ -4259,7 +4485,6 @@
/**
* A interface used to execute the accumulated {@link Batch requests}.
*
- * @author Randall Hauch
* @param <NodeType> the type of node that is returned
*/
public interface Executable<NodeType extends Node> {
@@ -4281,7 +4506,6 @@
* A interface that can be used to finish the current request and start another.
*
* @param <Next> the interface that will be used to start another request
- * @author Randall Hauch
*/
public interface Conjunction<Next> {
/**
@@ -4296,7 +4520,6 @@
* A component that defines the location into which a node should be copied or moved.
*
* @param <Next> The interface that is to be returned when this request is completed
- * @author Randall Hauch
*/
public interface Into<Next> {
/**
@@ -4376,7 +4599,6 @@
* or copied node as the last child of the new parent.
*
* @param <Next> The interface that is to be returned when this request is completed
- * @author Randall Hauch
*/
public interface Before<Next> {
/**
@@ -4480,7 +4702,6 @@
* A component that defines the location to which a node should be copied or moved.
*
* @param <Next> The interface that is to be returned when this request is completed
- * @author Randall Hauch
*/
public interface To<Next> {
/**
@@ -4555,7 +4776,6 @@
* A component that defines a new name for a node.
*
* @param <Next> The interface that is to be returned when this request is completed
- * @author Randall Hauch
*/
public interface AsName<Next> {
/**
@@ -4579,7 +4799,6 @@
* A interface that is used to add more locations that are to be copied/moved.
*
* @param <Next> The interface that is to be returned when this request is completed
- * @author Randall Hauch
*/
public interface And<Next> {
/**
@@ -4647,7 +4866,6 @@
* The interface for defining additional nodes to be moved and the parent into which the node(s) are to be moved.
*
* @param <Next> The interface that is to be returned when this request is completed
- * @author Randall Hauch
*/
public interface Move<Next> extends AsName<Into<Next>>, Into<Next>, Before<Next>, And<Move<Next>> {
}
@@ -4659,7 +4877,6 @@
* is to be placed, which will assume the new copy will have the same name as the original.
*
* @param <Next> The interface that is to be returned when this request is completed
- * @author Randall Hauch
*/
public interface Copy<Next> extends FromWorkspace<CopyTarget<Next>>, CopyTarget<Next>, And<Copy<Next>> {
}
@@ -4780,7 +4997,6 @@
* The interface for defining additional properties on a new node.
*
* @param <Next> The interface that is to be returned when this create request is completed
- * @author Randall Hauch
*/
public interface Create<Next> extends Conjunction<Next> {
/**
@@ -4929,7 +5145,6 @@
* The interface for defining additional properties on a new node.
*
* @param <Next> The interface that is to be returned when this create request is completed
- * @author Randall Hauch
*/
public interface CreateAt<Next> extends Conjunction<Next> {
/**
@@ -5060,7 +5275,6 @@
* The interface for defining the node upon which a request operates.
*
* @param <Next> The interface that is to be returned when the request is completed
- * @author Randall Hauch
*/
public interface On<Next> {
/**
@@ -5125,10 +5339,64 @@
}
/**
+ * The interface for defining the node upon which a request operates, including a method that accepts multiple locations.
+ *
+ * @param <Next> The interface that is to be returned when the request is completed
+ */
+ public interface OnMultiple<Next> extends On<Next> {
+ /**
+ * Specify the location of each node upon which the requests are to operate.
+ *
+ * @param to the locations
+ * @return the interface for additional requests or actions
+ */
+ Map<Location, Next> on( Collection<Location> to );
+
+ /**
+ * Specify the location of each node upon which the requests are to operate.
+ *
+ * @param firstTo the first location
+ * @param additional the additional location
+ * @return the interface for additional requests or actions
+ */
+ Map<Location, Next> on( Location firstTo,
+ Location... additional );
+
+ /**
+ * Specify the path of each node upon which the requests are to operate.
+ *
+ * @param firstPath the first path
+ * @param additional the additional path
+ * @return the interface for additional requests or actions
+ */
+ Map<Location, Next> on( String firstPath,
+ String... additional );
+
+ /**
+ * Specify the path of each node upon which the requests are to operate.
+ *
+ * @param firstPath the first path
+ * @param additional the additional path
+ * @return the interface for additional requests or actions
+ */
+ Map<Location, Next> on( Path firstPath,
+ Path... additional );
+
+ /**
+ * Specify the UUID of each node upon which the requests are to operate.
+ *
+ * @param firstPath the first UUID of the node
+ * @param additional the additional UUIDs
+ * @return the interface for additional requests or actions
+ */
+ Map<Location, Next> on( UUID firstPath,
+ UUID... additional );
+ }
+
+ /**
* The interface for defining the node upon which a request operates.
*
* @param <Next> The interface that is to be returned when the request is completed
- * @author Randall Hauch
*/
public interface Of<Next> {
/**
@@ -5196,7 +5464,6 @@
* The interface for defining the node upon which which a request operates.
*
* @param <Next> The interface that is to be returned when the request is completed
- * @author Randall Hauch
*/
public interface At<Next> {
/**
@@ -5267,7 +5534,6 @@
* the {@link BlockOfChildren block size and parent}.
*
* @param <Next>
- * @author Randall Hauch
*/
public interface Children<Next> extends Of<Next> {
/**
@@ -5284,7 +5550,6 @@
* {@link #startingAfter(Location) after a previous sibling}.
*
* @param <Next>
- * @author Randall Hauch
*/
public interface BlockOfChildren<Next> {
/**
@@ -5364,7 +5629,6 @@
* The interface for defining the node under which which a request operates.
*
* @param <Next> The interface that is to be returned when the request is completed
- * @author Randall Hauch
*/
public interface Under<Next> {
/**
@@ -5587,7 +5851,6 @@
* A component used to set the values on a property.
*
* @param <Next> the next command
- * @author Randall Hauch
*/
public interface SetValues<Next> extends On<SetValuesTo<Next>>, SetValuesTo<On<Next>> {
}
@@ -5596,7 +5859,6 @@
* A component used to set the values on a property.
*
* @param <Next>
- * @author Randall Hauch
*/
public interface SetValuesTo<Next> {
@@ -5842,7 +6104,6 @@
* A component that defines a node that is to be created.
*
* @param <Next> The interface that is to be returned to complete the create request
- * @author Randall Hauch
*/
public interface CreateNode<Next> {
/**
@@ -5880,7 +6141,6 @@
* A component that defines a node that is to be created.
*
* @param <Next> The interface that is to be returned to complete the create request
- * @author Randall Hauch
*/
public interface CreateNodeNamed<Next> {
/**
@@ -5904,7 +6164,6 @@
* A component that defines the location into which a node should be copied or moved.
*
* @param <Next> The interface that is to be returned when this request is completed
- * @author Randall Hauch
*/
public interface ImportInto<Next> {
/**
@@ -7183,7 +7442,6 @@
* A set of nodes returned from a {@link Graph graph}, with methods to access the properties and children of the nodes in the
* result. The {@link #iterator()} method can be used to iterate all over the nodes in the result.
*
- * @author Randall Hauch
* @param <NodeType> the type of node that tis results deals with
*/
@Immutable
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/GraphTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/GraphTest.java 2009-10-30 21:34:28 UTC (rev 1316)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/GraphTest.java 2009-11-09 19:29:50 UTC (rev 1317)
@@ -32,6 +32,7 @@
import static org.junit.matchers.JUnitMatchers.hasItems;
import static org.mockito.Matchers.argThat;
import static org.mockito.Mockito.stub;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collection;
@@ -1112,6 +1113,64 @@
}
// ----------------------------------------------------------------------------------------------------------------
+ // Read set number of properties on multiple nodes ...
+ // ----------------------------------------------------------------------------------------------------------------
+
+ @Test
+ public void shouldReadOnePropertyOnMultipleNodes() {
+ List<Location> locations = new ArrayList<Location>();
+ locations.add(Location.create(createPath("/x/y/a")));
+ locations.add(Location.create(createPath("/x/y/b")));
+ locations.add(Location.create(createPath("/x/y/c")));
+ for (Location location : locations) {
+ Property prop1 = context.getPropertyFactory().create(validName, "1");
+ Property prop2 = context.getPropertyFactory().create(createName("otherName"), "2");
+ setPropertiesToReadOn(location, prop1, prop2);
+ }
+
+ Map<Location, Property> propertiesByLocation = graph.getProperty(validName).on(locations);
+ assertThat(numberOfExecutions, is(1));
+ extractRequestsFromComposite();
+ for (Location location : locations) {
+ Property prop = this.properties.get(location).iterator().next();
+ assertNextRequestReadProperty(location, prop);
+ assertThat(propertiesByLocation.get(location), is(prop));
+ }
+ assertNoMoreRequests();
+ }
+
+ @Test
+ public void shouldReadMultiplePropertiesOnMultipleNodes() {
+ List<Location> locations = new ArrayList<Location>();
+ locations.add(Location.create(createPath("/x/y/a")));
+ locations.add(Location.create(createPath("/x/y/b")));
+ locations.add(Location.create(createPath("/x/y/c")));
+ Name name1 = createName("name1");
+ Name name2 = createName("name2");
+ for (Location location : locations) {
+ Property prop1 = context.getPropertyFactory().create(name1, "1");
+ Property prop2 = context.getPropertyFactory().create(name2, "2");
+ setPropertiesToReadOn(location, prop1, prop2);
+ }
+ Map<Location, Map<Name, Property>> propertiesByLocation = graph.getProperties(name1, name2).on(locations);
+ assertThat(numberOfExecutions, is(1));
+ extractRequestsFromComposite();
+ for (Location location : locations) {
+ Map<Name, Property> expectedProps = new HashMap<Name, Property>();
+ for (Property prop : this.properties.get(location)) {
+ expectedProps.put(prop.getName(), prop);
+ }
+ Property prop1 = expectedProps.get(name1);
+ Property prop2 = expectedProps.get(name2);
+ assertNextRequestReadProperty(location, prop1);
+ assertNextRequestReadProperty(location, prop2);
+ assertThat(propertiesByLocation.get(location).get(name1), is(prop1));
+ assertThat(propertiesByLocation.get(location).get(name2), is(prop2));
+ }
+ assertNoMoreRequests();
+ }
+
+ // ----------------------------------------------------------------------------------------------------------------
// Implementation of RepositoryConnection and RequestProcessor for tests
// ----------------------------------------------------------------------------------------------------------------
@@ -1148,7 +1207,7 @@
if (request.into().hasPath()) {
Name childName = request.desiredName();
if (childName == null) childName = request.desiredSegment().getName();
-
+
Path childPath = context.getValueFactories().getPathFactory().create(request.into().getPath(), childName);
Location newChild = actualLocationOf(Location.create(childPath));
// Just update the actual location
14 years, 6 months