DNA SVN: r1419 - trunk/extensions/dna-search-lucene.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-12-09 14:49:30 -0500 (Wed, 09 Dec 2009)
New Revision: 1419
Modified:
trunk/extensions/dna-search-lucene/pom.xml
Log:
DNA-467 Corrected the 'dna-search-lucene' pom.xml file.
Modified: trunk/extensions/dna-search-lucene/pom.xml
===================================================================
--- trunk/extensions/dna-search-lucene/pom.xml 2009-12-09 19:36:29 UTC (rev 1418)
+++ trunk/extensions/dna-search-lucene/pom.xml 2009-12-09 19:49:30 UTC (rev 1419)
@@ -8,10 +8,10 @@
<relativePath>../..</relativePath>
</parent>
<!-- The groupId and version values are inherited from parent -->
- <artifactId>dna-search-provider</artifactId>
+ <artifactId>dna-search-lucene</artifactId>
<packaging>jar</packaging>
- <name>JBoss DNA Search Provider for Lucene</name>
- <description>JBoss DNA Search Provider that uses Lucene.</description>
+ <name>JBoss DNA Search Engine for Lucene</name>
+ <description>JBoss DNA Search Engine that uses Lucene.</description>
<url>http://labs.jboss.org/dna</url>
<!--
Define the dependencies. Note that all version and scopes default to those defined in the dependencyManagement section of the
14 years, 5 months
DNA SVN: r1418 - in trunk: dna-graph/src/main/java/org/jboss/dna/graph and 46 other directories.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-12-09 14:36:29 -0500 (Wed, 09 Dec 2009)
New Revision: 1418
Added:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/FullTextSearchResultColumns.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CompositeRequestChannel.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineProcessor.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineWorkspace.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/FullTextSearchResultColumnsTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/request/CompositeRequestChannelTest.java
trunk/extensions/dna-search-lucene/
trunk/extensions/dna-search-lucene/.classpath
trunk/extensions/dna-search-lucene/.project
trunk/extensions/dna-search-lucene/bin/
trunk/extensions/dna-search-lucene/bin/.project
trunk/extensions/dna-search-lucene/bin/pom.xml
trunk/extensions/dna-search-lucene/pom.xml
trunk/extensions/dna-search-lucene/src/
trunk/extensions/dna-search-lucene/src/main/
trunk/extensions/dna-search-lucene/src/main/java/
trunk/extensions/dna-search-lucene/src/main/java/org/
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/AbstractLuceneSearchEngine.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/EncodingNamespaceRegistry.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/IndexRules.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneConfiguration.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneConfigurations.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneException.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneI18n.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchEngine.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchProcessor.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchSession.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchWorkspace.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareLengthQuery.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareNameQuery.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/ComparePathQuery.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareQuery.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareStringQuery.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/IdsQuery.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/MatchNoneQuery.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/NotQuery.java
trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/ScoreQuery.java
trunk/extensions/dna-search-lucene/src/main/resources/
trunk/extensions/dna-search-lucene/src/main/resources/org/
trunk/extensions/dna-search-lucene/src/main/resources/org/jboss/
trunk/extensions/dna-search-lucene/src/main/resources/org/jboss/dna/
trunk/extensions/dna-search-lucene/src/main/resources/org/jboss/dna/search/
trunk/extensions/dna-search-lucene/src/main/resources/org/jboss/dna/search/lucene/
trunk/extensions/dna-search-lucene/src/main/resources/org/jboss/dna/search/lucene/LuceneI18n.properties
trunk/extensions/dna-search-lucene/src/test/
trunk/extensions/dna-search-lucene/src/test/java/
trunk/extensions/dna-search-lucene/src/test/java/org/
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/AbstractLuceneSearchEngineTest.java
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/EncodingNamespaceRegistryTest.java
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/IndexingRulesTest.java
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneConfigurationsTest.java
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneI18nTest.java
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneSearchEngineTest.java
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/query/
trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/query/NotQueryTest.java
trunk/extensions/dna-search-lucene/src/test/resources/
trunk/extensions/dna-search-lucene/src/test/resources/aircraft.xml
trunk/extensions/dna-search-lucene/src/test/resources/cars.xml
trunk/extensions/dna-search-lucene/src/test/resources/log4j.properties
Removed:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchException.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchProvider.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/connector/federation/ForkRequestProcessorChannelTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/search/SearchEngineTest.java
trunk/dna-search/.classpath
trunk/dna-search/.project
trunk/dna-search/pom.xml
trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexSearchProvider.java
trunk/dna-search/src/main/java/org/jboss/dna/search/EncodingNamespaceRegistry.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfiguration.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfigurations.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneException.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneSession.java
trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareLengthQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareNameQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/ComparePathQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareStringQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/IdsQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/MatchNoneQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/NotQuery.java
trunk/dna-search/src/main/java/org/jboss/dna/search/query/ScoreQuery.java
trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties
trunk/dna-search/src/test/java/org/jboss/dna/search/EncodingNamespaceRegistryTest.java
trunk/dna-search/src/test/java/org/jboss/dna/search/IndexingRulesTest.java
trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java
trunk/dna-search/src/test/java/org/jboss/dna/search/SearchI18nTest.java
trunk/dna-search/src/test/java/org/jboss/dna/search/query/NotQueryTest.java
trunk/dna-search/src/test/resources/aircraft.xml
trunk/dna-search/src/test/resources/cars.xml
trunk/dna-search/src/test/resources/log4j.properties
Modified:
trunk/.gitignore
trunk/dna-graph/src/main/java/org/jboss/dna/graph/Graph.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/connector/federation/ForkRequestProcessor.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/connector/map/MapRequestProcessor.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryResults.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/ProcessingComponent.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryResultColumns.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryResults.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/SelectComponent.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/SortValuesComponent.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/AccessQueryRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/ChangeRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CloneBranchRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CloneWorkspaceRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CompositeRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CopyBranchRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CreateNodeRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CreateWorkspaceRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DeleteBranchRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DeleteChildrenRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DestroyWorkspaceRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/FullTextSearchRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/LockBranchRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/MoveBranchRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RemovePropertyRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RenameNodeRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RequestBuilder.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/SearchRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/SetPropertyRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UnlockBranchRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UpdatePropertiesRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UpdateValuesRequest.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngine.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/GraphTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/QueryResultColumnsTest.java
trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrQueryManager.java
trunk/pom.xml
Log:
DNA-467 Restructured the SearchEngine functionality to simplify it, and to change over to have implementations be extensions. To specialize, an implementation now simply subclasses the SearchEngine, and this is instantiated directly.
The SearchEngine is also now oriented around a RequestProcessor. It's possible to obtain a RequestProcessor to issue multiple requests within a single connection, and this will make it significantly easier to reuse within a connector implementation (as well as within the 'dna-repository' module). Also, the SearchEngine's abstract RequestProcessor implementation provides default implementations for crawling and indexing entire subgraphs to generate the required UpdatePropertiesRequest and CreateNodeRequest stream. Specializations are responsible for fully-implementing the RequestProcessor to handle all the different kinds of requests, including AccessQueryRequest and FullTextSearchRequest.
Thus, the LuceneSearchEngine can be instantiated directly, as it is now a subclass of SearchEngine. It also exists in a new 'extensions/dna-search-lucene' project, and continues to use the two-index design that was implemented previously. This new implementation uses a customized RequestProcessor implementation. This also should make it easier to process Changes via the request processor's methods.
Modified: trunk/.gitignore
===================================================================
--- trunk/.gitignore 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/.gitignore 2009-12-09 19:36:29 UTC (rev 1418)
@@ -43,6 +43,7 @@
/utils/dna-jpa-ddl-gen/target/
+/extensions/dna-search-lucene/target
/extensions/dna-classloader-maven/target
/extensions/dna-common-jdbc/target
/extensions/dna-connector-federation/target
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/Graph.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/Graph.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/Graph.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -2334,10 +2334,10 @@
Map<Location, Map<Name, Property>> results = new HashMap<Location, Map<Name, Property>>();
for (ReadPropertyRequest request : requests) {
Property property = request.getProperty();
-
+
// property was requested but doesn't exist
if (property == null) continue;
-
+
Location location = request.getActualLocationOfNode();
Map<Name, Property> properties = results.get(location);
if (properties == null) {
@@ -2482,12 +2482,18 @@
* Search the current workspace using the supplied full-text search expression.
*
* @param fullTextSearchExpression the full-text search expression
+ * @param maxResults the maximum number of results that are to be returned; always positive
+ * @param offset the number of initial results to skip, or 0 if the first results are to be returned
* @return the results of the search; never null
* @throws IllegalArgumentException if the expression is null
*/
- public QueryResults search( final String fullTextSearchExpression ) {
- FullTextSearchRequest request = requests.search(getCurrentWorkspaceName(), fullTextSearchExpression);
- return request.getResults();
+ public QueryResults search( final String fullTextSearchExpression,
+ int maxResults,
+ int offset ) {
+ FullTextSearchRequest request = requests.search(getCurrentWorkspaceName(), fullTextSearchExpression, maxResults, offset);
+ QueryResults results = new org.jboss.dna.graph.query.process.QueryResults(request.getResultColumns(),
+ request.getStatistics(), request.getTuples());
+ return results;
}
/**
@@ -2634,7 +2640,7 @@
super(context, columns, accessNode);
this.graphSourceName = graphSourceName;
accessRequest = new AccessQueryRequest(workspaceName, sourceName, getColumns(), andedConstraints, limit,
- context.getVariables());
+ context.getSchemata(), context.getVariables());
((GraphQueryContext)context).getBatch().requestQueue.submit(accessRequest);
}
@@ -2663,7 +2669,7 @@
graphSourceName);
return emptyTuples();
}
- return accessRequest.getResults().getTuples();
+ return accessRequest.getTuples();
}
}
@@ -6970,7 +6976,7 @@
@Override
public String toString() {
- return "Subgraph\n" + getToString(context); //ExecutionContext.DEFAULT_CONTEXT);//getLocation().toString();
+ return "Subgraph\n" + getToString(context); // ExecutionContext.DEFAULT_CONTEXT);//getLocation().toString();
}
/**
@@ -6984,7 +6990,7 @@
getRecursiveString(context, getRoot(), sb, 0);
return sb.toString();
}
-
+
private void getRecursiveString( ExecutionContext context,
SubgraphNode node,
StringBuilder str,
@@ -6998,9 +7004,9 @@
for (Location nextLoc : node.getChildren()) {
SubgraphNode childNode = getNode(nextLoc);
// child node location may exist, but the subgraph may not have
- // been constructed deep enough to instantiate the subnode, so
+ // been constructed deep enough to instantiate the subnode, so
// check for null
- if( childNode != null ) {
+ if (childNode != null) {
getRecursiveString(context, childNode, str, indentLevel + 1);
}
}
@@ -7103,52 +7109,52 @@
}
private String getNodeString( ExecutionContext context,
- Location location) {
+ Location location ) {
StringBuilder sb = new StringBuilder();
sb.append('<'); // Bracket the node
ValueFactory<String> strings = context.getValueFactories().getStringFactory();
-
+
String name = "";
- if( location.getPath().getLastSegment() != null ) {
+ if (location.getPath().getLastSegment() != null) {
name = strings.create(location.getPath().getLastSegment());
} else {
name = strings.create(location.getPath());
}
-
- if( name.startsWith("{")) {
+
+ if (name.startsWith("{")) {
// Remove {xxxx} namespace prefix
int end = name.indexOf('}');
- name = name.substring(end+1, name.length());
+ name = name.substring(end + 1, name.length());
}
-
+
// Surround name in double quotes
sb.append("name = ").append('\"').append(name).append('\"').append(" ");
boolean first = true;
- if (getProperties() != null) {
- for ( Property entry : getProperties()) {
-
- if( first ) {
+ if (getProperties() != null) {
+ for (Property entry : getProperties()) {
+
+ if (first) {
first = false;
} else sb.append(" ");
sb.append(getPropertyString(entry));
}
}
sb.append(">\n");
-
+
return sb.toString();
}
-
- private String getPropertyString(Property property) {
+
+ private String getPropertyString( Property property ) {
// Surround property value in double quotes so final property looks like:
- // color = "blue" (single valued property)
- // colors = ["blue", "red", "green"] (multi-valued property)
-
+ // color = "blue" (single valued property)
+ // colors = ["blue", "red", "green"] (multi-valued property)
+
StringBuilder sb = new StringBuilder();
sb.append(property.getName().getLocalName());
sb.append(" = ");
if (property.isEmpty()) {
sb.append("null");
- } else if( property.isSingle() ) {
+ } else if (property.isSingle()) {
String valueStr = getContext().getValueFactories().getStringFactory().create(property.getValues().next());
sb.append('\"').append(valueStr).append('\"');
} else {
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/connector/federation/ForkRequestProcessor.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/connector/federation/ForkRequestProcessor.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/connector/federation/ForkRequestProcessor.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -25,21 +25,14 @@
import java.util.ArrayList;
import java.util.HashMap;
-import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import java.util.NoSuchElementException;
import java.util.Queue;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.atomic.AtomicBoolean;
import net.jcip.annotations.NotThreadSafe;
import org.jboss.dna.common.i18n.I18n;
import org.jboss.dna.graph.ExecutionContext;
@@ -58,6 +51,7 @@
import org.jboss.dna.graph.request.CloneBranchRequest;
import org.jboss.dna.graph.request.CloneWorkspaceRequest;
import org.jboss.dna.graph.request.CompositeRequest;
+import org.jboss.dna.graph.request.CompositeRequestChannel;
import org.jboss.dna.graph.request.CopyBranchRequest;
import org.jboss.dna.graph.request.CreateNodeRequest;
import org.jboss.dna.graph.request.CreateWorkspaceRequest;
@@ -109,372 +103,10 @@
*/
@NotThreadSafe
class ForkRequestProcessor extends RequestProcessor {
-
- /**
- * A psuedo Request that is used by {@link Channel} to insert into a request queue so that the queue's iterator knows when
- * there are no more requests to process.
- */
- protected static class LastRequest extends Request {
- private static final long serialVersionUID = 1L;
-
- @Override
- public boolean isReadOnly() {
- return false;
- }
- }
-
- /**
- * Represents the channel for a specific source into which this processor submits the requests for that source. To use, create
- * a Channel, {@link Channel#start(ExecutorService, ExecutionContext, RepositoryConnectionFactory) start it}, and then
- * {@link Channel#add(Request) add} requests (optionally with a {@link Channel#add(Request, CountDownLatch) latch} or via a
- * {@link Channel#addAndAwait(Request) add and await}). Finally, call {@link Channel#done()} when there are no more requests.
- * <p>
- * When the channel is {@link Channel#start(ExecutorService, ExecutionContext, RepositoryConnectionFactory) started}, it
- * creates a {@link Callable} and submits it to the supplied {@link ExecutorService}. (The resulting {@link Future} is then
- * captured so that the channel can be {@link Channel#cancel(boolean) cancelled}.) The Callable obtains a
- * {@link RepositoryConnection connection} to the channel's source, and then has the connection process a single
- * {@link CompositeRequest} that fronts the queue of Request instances added to this channel. Because a blocking queue is
- * used, the CompositeRequest's {@link CompositeRequest#iterator() iterator} blocks (on {@link Iterator#hasNext()}) until the
- * next request is available. When {@link Channel#done()} is called, the iterator stops blocking and completes.
- * </p>
- */
- protected static class Channel {
- protected final String sourceName;
- /** The list of all requests that are or have been processed as part of this channel */
- protected final LinkedList<Request> allRequests = new LinkedList<Request>();
- /** The queue of requests that remain unprocessed */
- private final BlockingQueue<Request> queue = new LinkedBlockingQueue<Request>();
- /** The CompositeRequest that is submitted to the underlying processor */
- protected final CompositeRequest composite;
- /** The Future that is submitted to the ExecutorService to do the processing */
- protected Future<String> future;
- /** Flag that defines whether the channel has processed all requests */
- protected final AtomicBoolean done = new AtomicBoolean(false);
- protected Throwable compositeError = null;
-
- /**
- * Create a new channel that operates against the supplied source.
- *
- * @param sourceName the name of the repository source used to execute this channel's {@link #allRequests() requests}; may
- * not be null or empty
- */
- protected Channel( final String sourceName ) {
- assert sourceName != null;
- this.sourceName = sourceName;
- this.composite = new CompositeRequest(false) {
- private static final long serialVersionUID = 1L;
- private final LinkedList<Request> allRequests = Channel.this.allRequests;
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.request.CompositeRequest#iterator()
- */
- @Override
- public Iterator<Request> iterator() {
- return createIterator();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.request.CompositeRequest#getRequests()
- */
- @Override
- public List<Request> getRequests() {
- return allRequests;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.request.CompositeRequest#size()
- */
- @Override
- public int size() {
- return done.get() ? allRequests.size() : CompositeRequest.UNKNOWN_NUMBER_OF_REQUESTS;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.request.Request#cancel()
- */
- @Override
- public void cancel() {
- done.set(true);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.request.Request#setError(java.lang.Throwable)
- */
- @Override
- public void setError( Throwable error ) {
- compositeError = error;
- super.setError(error);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.request.Request#hasError()
- */
- @Override
- public boolean hasError() {
- return compositeError != null || super.hasError();
- }
- };
- }
-
- /**
- * Utility method to create an iterator over the requests in this channel. This really should be called once
- *
- * @return the iterator over the channels
- */
- protected Iterator<Request> createIterator() {
- final BlockingQueue<Request> queue = this.queue;
- return new Iterator<Request>() {
- private Request next;
-
- public boolean hasNext() {
- // If next still has a request, then 'hasNext()' has been called multiple times in a row
- if (next != null) return true;
-
- // Now, block for a next item (this blocks) ...
- try {
- next = queue.take();
- } catch (InterruptedException e) {
- // This happens when the federated connector has been told to shutdown now, and it shuts down
- // its executor (the worker pool) immediately by interrupting each in-use thread.
- // In this case, we should consider there to be more more requests ...
- try {
- return false;
- } finally {
- // reset the interrupted status ...
- Thread.interrupted();
- }
- }
- if (next instanceof LastRequest) {
- return false;
- }
- return next != null;
- }
-
- public Request next() {
- if (next == null) {
- // Must have been called without first calling 'hasNext()' ...
- try {
- next = queue.take();
- } catch (InterruptedException e) {
- // This happens when the federated connector has been told to shutdown now, and it shuts down
- // its executor (the worker pool) immediately by interrupting each in-use thread.
- // In this case, we should consider there to be more more requests (again, this case
- // is when 'next()' has been called without calling 'hasNext()') ...
- try {
- throw new NoSuchElementException();
- } finally {
- // reset the interrupted status ...
- Thread.interrupted();
- }
- }
- }
- if (next == null) {
- throw new NoSuchElementException();
- }
- Request result = next;
- next = null;
- return result;
- }
-
- public void remove() {
- throw new UnsupportedOperationException();
- }
- };
- }
-
- /**
- * Begins processing any requests that have been {@link #add(Request) added} to this channel. Processing is done by
- * submitting the channel to the supplied executor.
- *
- * @param executor the executor that is to do the work; may not be null
- * @param context the execution context in which the work is to be performed; may not be null
- * @param connectionFactory the connection factory that should be used to create connections; may not be null
- * @throws IllegalStateException if this channel has already been started
- */
- protected void start( final ExecutorService executor,
- final ExecutionContext context,
- final RepositoryConnectionFactory connectionFactory ) {
- assert executor != null;
- assert context != null;
- assert connectionFactory != null;
- if (this.future != null) {
- throw new IllegalStateException();
- }
- this.future = executor.submit(new Callable<String>() {
- /**
- * {@inheritDoc}
- *
- * @see java.util.concurrent.Callable#call()
- */
- public String call() throws Exception {
- final RepositoryConnection connection = connectionFactory.createConnection(sourceName);
- assert connection != null;
- try {
- connection.execute(context, composite);
- } finally {
- connection.close();
- }
- return sourceName;
- }
- });
- }
-
- /**
- * Add the request to this channel for asynchronous processing. This method is called by the
- * {@link ForkRequestProcessor#submit(Request, String)} method.
- *
- * @param request the request to be submitted; may not be null
- * @throws IllegalStateException if this channel has already been marked as {@link #done()}
- */
- protected void add( Request request ) {
- if (done.get()) {
- throw new IllegalStateException(GraphI18n.unableToAddRequestToChannelThatIsDone.text(sourceName, request));
- }
- assert request != null;
- this.allRequests.add(request);
- this.queue.add(request);
- }
-
- /**
- * Add the request to this channel for asynchronous processing, and supply a {@link CountDownLatch count-down latch} that
- * should be {@link CountDownLatch#countDown() decremented} when this request is completed.
- *
- * @param request the request to be submitted; may not be null
- * @param latch the count-down latch that should be decremented when <code>request</code> has been completed; may not be
- * null
- * @return the same latch that was supplied, for method chaining purposes; never null
- * @throws IllegalStateException if this channel has already been marked as {@link #done()}
- */
- protected CountDownLatch add( Request request,
- CountDownLatch latch ) {
- if (done.get()) {
- throw new IllegalStateException(GraphI18n.unableToAddRequestToChannelThatIsDone.text(sourceName, request));
- }
- assert request != null;
- assert latch != null;
- // Submit the request for processing ...
- this.allRequests.add(request);
- request.setLatchForFreezing(latch);
- this.queue.add(request);
- return latch;
- }
-
- /**
- * Add the request to this channel for asynchronous processing, and supply a {@link CountDownLatch count-down latch} that
- * should be {@link CountDownLatch#countDown() decremented} when this request is completed. This method is called by the
- * {@link ForkRequestProcessor#submitAndAwait(Request, String)} method.
- *
- * @param request the request to be submitted; may not be null
- * @throws InterruptedException if the current thread is interrupted while waiting
- */
- protected void addAndAwait( Request request ) throws InterruptedException {
- // Add the request with a latch, then block until the request has completed ...
- add(request, new CountDownLatch(1)).await();
- }
-
- /**
- * Mark this source as having no more requests to process.
- */
- protected void done() {
- this.done.set(true);
- this.queue.add(new LastRequest());
- }
-
- /**
- * Return whether this channel has been {@link #done() marked as done}.
- *
- * @return true if the channel was marked as done, or false otherwise
- */
- protected boolean isDone() {
- return done.get();
- }
-
- /**
- * Cancel this forked channel, stopping work as soon as possible. If the channel has not yet been started, this method
- *
- * @param mayInterruptIfRunning true if the channel is still being worked on, and the thread on which its being worked on
- * may be interrupted, or false if the channel should be allowed to finish if it is already in work.
- */
- public void cancel( boolean mayInterruptIfRunning ) {
- if (this.future == null || this.future.isDone() || this.future.isCancelled()) return;
-
- // Mark the composite as cancelled first, so that the next composed request will be marked as
- // cancelled.
- this.composite.cancel();
-
- // Now mark the channel as being done ...
- done();
-
- // Now, mark the channel as being cancelled (do allow interrupting the worker thread) ...
- this.future.cancel(mayInterruptIfRunning);
- }
-
- /**
- * Return whether this channel has been {@link #start(ExecutorService, ExecutionContext, RepositoryConnectionFactory)
- * started}.
- *
- * @return true if this channel was started, or false otherwise
- */
- public boolean isStarted() {
- return this.future != null;
- }
-
- /**
- * Return whether this channel has completed all of its work.
- *
- * @return true if the channel was started and is complete, or false otherwise
- */
- public boolean isComplete() {
- return this.future != null && this.future.isDone();
- }
-
- /**
- * Await until this channel has completed.
- *
- * @throws CancellationException if the channel was cancelled
- * @throws ExecutionException if the channel execution threw an exception
- * @throws InterruptedException if the current thread is interrupted while waiting
- */
- protected void await() throws ExecutionException, InterruptedException, CancellationException {
- this.future.get();
- }
-
- /**
- * Get all the requests that were submitted to this queue. The resulting list is the actual list that is appended when
- * requests are added, and may change until the channel is marked as {@link #done() done}.
- *
- * @return all of the requests that were submitted to this channel; never null
- */
- protected List<Request> allRequests() {
- return allRequests;
- }
-
- /**
- * Get the name of the source that this channel uses.
- *
- * @return the source name; never null
- */
- protected String sourceName() {
- return sourceName;
- }
- }
-
private final FederatedRepository repository;
private final ExecutorService executor;
private final RepositoryConnectionFactory connectionFactory;
- private final Map<String, Channel> channelBySourceName = new HashMap<String, Channel>();
+ private final Map<String, CompositeRequestChannel> channelBySourceName = new HashMap<String, CompositeRequestChannel>();
private final Queue<FederatedRequest> federatedRequestQueue;
/**
@@ -512,9 +144,9 @@
protected void submit( Request request,
String sourceName ) {
assert request != null;
- Channel channel = channelBySourceName.get(sourceName);
+ CompositeRequestChannel channel = channelBySourceName.get(sourceName);
if (channel == null) {
- channel = new Channel(sourceName);
+ channel = new CompositeRequestChannel(sourceName);
channelBySourceName.put(sourceName, channel);
channel.start(executor, getExecutionContext(), connectionFactory);
}
@@ -536,9 +168,9 @@
protected void submitAndAwait( Request request,
String sourceName ) throws InterruptedException {
assert request != null;
- Channel channel = channelBySourceName.get(sourceName);
+ CompositeRequestChannel channel = channelBySourceName.get(sourceName);
if (channel == null) {
- channel = new Channel(sourceName);
+ channel = new CompositeRequestChannel(sourceName);
channelBySourceName.put(sourceName, channel);
channel.start(executor, getExecutionContext(), connectionFactory);
}
@@ -565,9 +197,9 @@
String sourceName,
CountDownLatch latch ) {
assert request != null;
- Channel channel = channelBySourceName.get(sourceName);
+ CompositeRequestChannel channel = channelBySourceName.get(sourceName);
if (channel == null) {
- channel = new Channel(sourceName);
+ channel = new CompositeRequestChannel(sourceName);
channelBySourceName.put(sourceName, channel);
channel.start(executor, getExecutionContext(), connectionFactory);
}
@@ -582,7 +214,7 @@
* @throws InterruptedException if the current thread is interrupted while waiting
*/
public void await() throws ExecutionException, InterruptedException, CancellationException {
- for (Channel channel : channelBySourceName.values()) {
+ for (CompositeRequestChannel channel : channelBySourceName.values()) {
channel.await();
}
}
@@ -1736,13 +1368,13 @@
@Override
public void close() {
super.close();
- for (Channel channel : channelBySourceName.values()) {
- channel.done();
+ for (CompositeRequestChannel channel : channelBySourceName.values()) {
+ channel.close();
}
}
protected void cancel( boolean mayInterruptIfRunning ) {
- for (Channel channel : channelBySourceName.values()) {
+ for (CompositeRequestChannel channel : channelBySourceName.values()) {
channel.cancel(mayInterruptIfRunning);
}
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/connector/map/MapRequestProcessor.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/connector/map/MapRequestProcessor.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/connector/map/MapRequestProcessor.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -504,7 +504,7 @@
final ExecutionContext context = getExecutionContext();
QueryResults results = workspace.query(context, request);
if (results != null) {
- request.setResults(results);
+ request.setResults(results.getTuples(), results.getStatistics());
} else {
super.processUnknownRequest(request);
}
@@ -522,7 +522,7 @@
final ExecutionContext context = getExecutionContext();
QueryResults results = workspace.search(context, request.expression());
if (results != null) {
- request.setResults(results);
+ request.setResults(results.getColumns(), results.getTuples(), results.getStatistics());
} else {
super.processUnknownRequest(request);
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryEngine.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -24,15 +24,12 @@
package org.jboss.dna.graph.query;
import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
import net.jcip.annotations.ThreadSafe;
import org.jboss.dna.common.util.CheckArg;
import org.jboss.dna.graph.query.QueryResults.Statistics;
import org.jboss.dna.graph.query.model.Column;
import org.jboss.dna.graph.query.model.Constraint;
-import org.jboss.dna.graph.query.model.FullTextSearch;
import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.model.Visitors;
import org.jboss.dna.graph.query.optimize.Optimizer;
import org.jboss.dna.graph.query.optimize.RuleBasedOptimizer;
import org.jboss.dna.graph.query.plan.CanonicalPlanner;
@@ -118,7 +115,7 @@
}
}
// There were problems somewhere ...
- return new org.jboss.dna.graph.query.process.QueryResults(context, query, resultColumns, stats);
+ return new org.jboss.dna.graph.query.process.QueryResults(resultColumns, stats, context.getProblems());
}
protected QueryResultColumns determineQueryResultColumns( PlanNode optimizedPlan ) {
@@ -127,20 +124,15 @@
if (project != null) {
List<Column> columns = project.getPropertyAsList(Property.PROJECT_COLUMNS, Column.class);
// Determine whether to include the full-text search scores in the results ...
- final AtomicBoolean includeFullTextSearchScores = new AtomicBoolean(false);
+ boolean includeFullTextSearchScores = false;
for (PlanNode select : optimizedPlan.findAllAtOrBelow(Type.SELECT)) {
Constraint constraint = select.getProperty(Property.SELECT_CRITERIA, Constraint.class);
- if (constraint != null) {
- Visitors.visitAll(constraint, new Visitors.AbstractVisitor() {
- @Override
- public void visit( FullTextSearch obj ) {
- includeFullTextSearchScores.set(true);
- }
- });
+ if (QueryResultColumns.includeFullTextScores(constraint)) {
+ includeFullTextSearchScores = true;
+ break;
}
- if (includeFullTextSearchScores.get()) break;
}
- return new QueryResultColumns(columns, includeFullTextSearchScores.get());
+ return new QueryResultColumns(columns, includeFullTextSearchScores);
}
return QueryResultColumns.empty();
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryResults.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryResults.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryResults.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -33,7 +33,6 @@
import org.jboss.dna.common.util.CheckArg;
import org.jboss.dna.graph.Location;
import org.jboss.dna.graph.query.model.Column;
-import org.jboss.dna.graph.query.model.QueryCommand;
/**
* The resulting output of a query.
@@ -42,13 +41,6 @@
public interface QueryResults extends Serializable {
/**
- * Get the original query command.
- *
- * @return the query; never null
- */
- public QueryCommand getCommand();
-
- /**
* Get the description of the columns contained in these results. These columns can be used to discover the indexes of the
* corresponding values from the arrays representing the {@link #getTuples() tuples}.
*
@@ -369,18 +361,18 @@
private final long resultFormulationNanos;
private final long executionNanos;
- private Statistics() {
+ public Statistics() {
this(0L, 0L, 0L, 0L);
}
- protected Statistics( long planningNanos ) {
+ public Statistics( long planningNanos ) {
this(planningNanos, 0L, 0L, 0L);
}
- protected Statistics( long planningNanos,
- long optimizationNanos,
- long resultFormulationNanos,
- long executionNanos ) {
+ public Statistics( long planningNanos,
+ long optimizationNanos,
+ long resultFormulationNanos,
+ long executionNanos ) {
this.planningNanos = planningNanos;
this.optimizationNanos = optimizationNanos;
this.resultFormulationNanos = resultFormulationNanos;
Added: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/FullTextSearchResultColumns.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/FullTextSearchResultColumns.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/FullTextSearchResultColumns.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,86 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.query.process;
+
+import java.util.List;
+import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.query.model.Column;
+
+/**
+ * A specialization of {@link QueryResultColumns} that can be used to represent results containing only the {@link Location} of
+ * the node and the
+ */
+public class FullTextSearchResultColumns extends QueryResultColumns {
+
+ private static final long serialVersionUID = 1L;
+
+ public static final QueryResultColumns INSTANCE = new FullTextSearchResultColumns();
+
+ /**
+ * Create a new definition for the query results containing just the locations and the full-text search scores.
+ */
+ public FullTextSearchResultColumns() {
+ super(true, NO_COLUMNS);
+ }
+
+ /**
+ * Create a new definition for the query results given the supplied columns.
+ *
+ * @param columns the columns that define the results; should never be modified directly
+ */
+ public FullTextSearchResultColumns( List<Column> columns ) {
+ super(true, columns != null ? columns : NO_COLUMNS);
+ CheckArg.isNotEmpty(columns, "columns");
+ }
+
+ /**
+ * Get the index of a tuple's correct Location object.
+ *
+ * @return the Location index that corresponds to the supplied column; never negative
+ */
+ public int getLocationIndex() {
+ return getLocationIndex(DEFAULT_SELECTOR_NAME);
+ }
+
+ /**
+ * Get the index of the tuple value containing the full-text search score for the node.
+ *
+ * @return the index that corresponds to the {@link Double} full-text search score, or -1 if there is no full-text search
+ * score for the named selector
+ */
+ public int getFullTextSearchScoreIndex() {
+ return getFullTextSearchScoreIndexFor(DEFAULT_SELECTOR_NAME);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.process.QueryResultColumns#hasFullTextSearchScores()
+ */
+ @Override
+ public boolean hasFullTextSearchScores() {
+ return true;
+ }
+}
Property changes on: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/FullTextSearchResultColumns.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/ProcessingComponent.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/ProcessingComponent.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/ProcessingComponent.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -44,6 +44,7 @@
import org.jboss.dna.graph.query.model.TypeSystem;
import org.jboss.dna.graph.query.model.UpperCase;
import org.jboss.dna.graph.query.model.TypeSystem.TypeFactory;
+import org.jboss.dna.graph.query.validate.Schemata;
import org.jboss.dna.graph.query.validate.Schemata.Column;
import org.jboss.dna.graph.query.validate.Schemata.Table;
@@ -138,12 +139,14 @@
/**
* Create a {@link DynamicOperation} instance that is able to evaluate the supplied {@link DynamicOperand}.
*
- * @param context the context in which the query is being evaluated; may not be null
+ * @param typeSystem the type system; may not be null
+ * @param schemata the schemata; may not be null
* @param columns the definition of the result columns and the tuples; may not be null
* @param operand the dynamic operand that is to be evaluated by the returned object; may not be null
* @return the dynamic operand operation; never null
*/
- protected DynamicOperation createDynamicOperation( QueryContext context,
+ protected DynamicOperation createDynamicOperation( final TypeSystem typeSystem,
+ Schemata schemata,
Columns columns,
DynamicOperand operand ) {
assert operand != null;
@@ -155,7 +158,7 @@
String selectorName = propValue.getSelectorName().getName();
final int index = columns.getColumnIndexForProperty(selectorName, propertyName);
// Find the expected property type of the value ...
- Table table = context.getSchemata().getTable(propValue.getSelectorName());
+ Table table = schemata.getTable(propValue.getSelectorName());
Column schemaColumn = table.getColumn(propertyName);
final String expectedType = schemaColumn.getPropertyType();
return new DynamicOperation() {
@@ -168,7 +171,6 @@
}
};
}
- final TypeSystem typeSystem = context.getTypeSystem();
final TypeFactory<String> stringFactory = typeSystem.getStringFactory();
if (operand instanceof Length) {
Length length = (Length)operand;
@@ -194,7 +196,7 @@
}
if (operand instanceof LowerCase) {
LowerCase lowerCase = (LowerCase)operand;
- final DynamicOperation delegate = createDynamicOperation(context, columns, lowerCase.getOperand());
+ final DynamicOperation delegate = createDynamicOperation(typeSystem, schemata, columns, lowerCase.getOperand());
return new DynamicOperation() {
public String getExpectedType() {
return stringFactory.getTypeName();
@@ -208,7 +210,7 @@
}
if (operand instanceof UpperCase) {
UpperCase upperCase = (UpperCase)operand;
- final DynamicOperation delegate = createDynamicOperation(context, columns, upperCase.getOperand());
+ final DynamicOperation delegate = createDynamicOperation(typeSystem, schemata, columns, upperCase.getOperand());
return new DynamicOperation() {
public String getExpectedType() {
return stringFactory.getTypeName();
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -100,7 +100,7 @@
statistics = statistics.withExecutionTime(System.nanoTime() - nanos);
}
assert tuples != null;
- return new org.jboss.dna.graph.query.process.QueryResults(context, command, columns, statistics, tuples);
+ return new org.jboss.dna.graph.query.process.QueryResults(columns, statistics, tuples, context.getProblems());
}
/**
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryResultColumns.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryResultColumns.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryResultColumns.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -32,21 +32,29 @@
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
import net.jcip.annotations.Immutable;
+import org.jboss.dna.common.util.CheckArg;
import org.jboss.dna.graph.GraphI18n;
import org.jboss.dna.graph.Location;
import org.jboss.dna.graph.query.QueryResults.Columns;
import org.jboss.dna.graph.query.model.Column;
+import org.jboss.dna.graph.query.model.Constraint;
+import org.jboss.dna.graph.query.model.FullTextSearch;
+import org.jboss.dna.graph.query.model.Visitors;
/**
* Defines the columns associated with the results of a query. This definition allows the values to be accessed
*/
@Immutable
-public final class QueryResultColumns implements Columns {
+public class QueryResultColumns implements Columns {
private static final long serialVersionUID = 1L;
- private static final QueryResultColumns EMPTY = new QueryResultColumns(Collections.<Column>emptyList(), false);
+ protected static final List<Column> NO_COLUMNS = Collections.<Column>emptyList();
+ protected static final QueryResultColumns EMPTY = new QueryResultColumns(false, null);
+ protected static final String DEFAULT_SELECTOR_NAME = "Results";
+
/**
* Get an empty results column definition.
*
@@ -78,12 +86,24 @@
*/
public QueryResultColumns( List<Column> columns,
boolean includeFullTextSearchScores ) {
- assert columns != null;
- this.columns = Collections.unmodifiableList(columns);
+ this(includeFullTextSearchScores, columns);
+ CheckArg.isNotEmpty(columns, "columns");
+ }
+
+ /**
+ * Create a new definition for the query results given the supplied columns.
+ *
+ * @param includeFullTextSearchScores true if room should be made in the tuples for the full-text search scores for each
+ * {@link Location}, or false otherwise
+ * @param columns the columns that define the results; should never be modified directly
+ */
+ protected QueryResultColumns( boolean includeFullTextSearchScores,
+ List<Column> columns ) {
+ this.columns = columns != null ? Collections.<Column>unmodifiableList(columns) : NO_COLUMNS;
this.columnsByName = new HashMap<String, Column>();
this.columnIndexByColumnName = new HashMap<String, Integer>();
Set<String> selectors = new HashSet<String>();
- final int columnCount = columns.size();
+ final int columnCount = this.columns.size();
Integer selectorIndex = new Integer(columnCount - 1);
this.locationIndexBySelectorName = new HashMap<String, Integer>();
this.locationIndexByColumnIndex = new HashMap<Integer, Integer>();
@@ -117,6 +137,11 @@
}
byPropertyName.put(column.getPropertyName(), new Integer(i));
}
+ if (columns != null && selectorNames.isEmpty()) {
+ String selectorName = DEFAULT_SELECTOR_NAME;
+ selectorNames.add(selectorName);
+ locationIndexBySelectorName.put(selectorName, 0);
+ }
this.selectorNames = Collections.unmodifiableList(selectorNames);
this.columnNames = Collections.unmodifiableList(names);
if (includeFullTextSearchScores) {
@@ -132,6 +157,26 @@
}
}
+ public static boolean includeFullTextScores( Iterable<Constraint> constraints ) {
+ for (Constraint constraint : constraints) {
+ if (includeFullTextScores(constraint)) return true;
+ }
+ return false;
+ }
+
+ public static boolean includeFullTextScores( Constraint constraint ) {
+ final AtomicBoolean includeFullTextScores = new AtomicBoolean(false);
+ if (constraint != null) {
+ Visitors.visitAll(constraint, new Visitors.AbstractVisitor() {
+ @Override
+ public void visit( FullTextSearch obj ) {
+ includeFullTextScores.set(true);
+ }
+ });
+ }
+ return includeFullTextScores.get();
+ }
+
/**
* {@inheritDoc}
*
@@ -187,6 +232,11 @@
}
byPropertyName.put(column.getPropertyName(), columnIndex);
}
+ if (selectorNames.isEmpty()) {
+ String selectorName = DEFAULT_SELECTOR_NAME;
+ selectorNames.add(selectorName);
+ locationIndexBySelectorName.put(selectorName, 0);
+ }
this.columnNames = Collections.unmodifiableList(names);
if (wrappedAround.fullTextSearchScoreIndexBySelectorName != null) {
this.fullTextSearchScoreIndexBySelectorName = new HashMap<String, Integer>();
@@ -289,6 +339,7 @@
* @see org.jboss.dna.graph.query.QueryResults.Columns#getLocationIndexForColumn(int)
*/
public int getLocationIndexForColumn( int columnIndex ) {
+ if (locationIndexByColumnIndex.isEmpty()) return 0;
Integer result = locationIndexByColumnIndex.get(new Integer(columnIndex));
if (result == null) {
throw new IndexOutOfBoundsException(GraphI18n.columnDoesNotExistInQuery.text(columnIndex));
@@ -302,6 +353,7 @@
* @see org.jboss.dna.graph.query.QueryResults.Columns#getLocationIndexForColumn(java.lang.String)
*/
public int getLocationIndexForColumn( String columnName ) {
+ if (locationIndexByColumnName.isEmpty()) return 0;
Integer result = locationIndexByColumnName.get(columnName);
if (result == null) {
throw new NoSuchElementException(GraphI18n.columnDoesNotExistInQuery.text(columnName));
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryResults.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryResults.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryResults.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -27,12 +27,13 @@
import java.util.Iterator;
import java.util.List;
import net.jcip.annotations.Immutable;
+import org.jboss.dna.common.collection.ImmutableProblems;
import org.jboss.dna.common.collection.Problems;
+import org.jboss.dna.common.collection.SimpleProblems;
import org.jboss.dna.common.util.StringUtil;
import org.jboss.dna.graph.GraphI18n;
import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.model.TypeSystem;
import org.jboss.dna.graph.query.model.TypeSystem.TypeFactory;
/**
@@ -40,10 +41,11 @@
*/
@Immutable
public class QueryResults implements org.jboss.dna.graph.query.QueryResults {
+ private static final Problems NO_PROBLEMS = new ImmutableProblems(new SimpleProblems());
+
private static final long serialVersionUID = 1L;
- private final QueryContext context;
- private final QueryCommand command;
+ private final Problems problems;
private final Columns columns;
private final List<Object[]> tuples;
private final Statistics statistics;
@@ -51,50 +53,58 @@
/**
* Create a results object for the supplied context, command, and result columns and with the supplied tuples.
*
- * @param context the context in which the query was executed
- * @param command the query command
* @param columns the definition of the query result columns
* @param statistics the statistics for this query; may not be null
* @param tuples the tuples
+ * @param problems the problems; may be null if there are no problems
*/
- public QueryResults( QueryContext context,
- QueryCommand command,
- Columns columns,
+ public QueryResults( Columns columns,
Statistics statistics,
- List<Object[]> tuples ) {
- assert context != null;
- assert command != null;
+ List<Object[]> tuples,
+ Problems problems ) {
assert columns != null;
assert statistics != null;
- this.context = context;
- this.command = command;
+ this.problems = problems != null ? problems : NO_PROBLEMS;
this.columns = columns;
this.tuples = tuples;
this.statistics = statistics;
}
/**
+ * Create a results object for the supplied context, command, and result columns and with the supplied tuples.
+ *
+ * @param columns the definition of the query result columns
+ * @param statistics the statistics for this query; may not be null
+ * @param tuples the tuples
+ */
+ public QueryResults( Columns columns,
+ Statistics statistics,
+ List<Object[]> tuples ) {
+ this(columns, statistics, tuples, NO_PROBLEMS);
+ }
+
+ /**
* Create an empty {@link QueryResults} object for the supplied context, command, and result columns.
*
- * @param context the context in which the query was executed
- * @param command the query command
* @param columns the definition of the query result columns
* @param statistics the statistics for this query; may not be null
+ * @param problems the problems; may be null if there are no problems
*/
- public QueryResults( QueryContext context,
- QueryCommand command,
- Columns columns,
- Statistics statistics ) {
- this(context, command, columns, statistics, Collections.<Object[]>emptyList());
+ public QueryResults( Columns columns,
+ Statistics statistics,
+ Problems problems ) {
+ this(columns, statistics, Collections.<Object[]>emptyList(), problems);
}
/**
- * {@inheritDoc}
+ * Create an empty {@link QueryResults} object for the supplied context, command, and result columns.
*
- * @see org.jboss.dna.graph.query.QueryResults#getCommand()
+ * @param columns the definition of the query result columns
+ * @param statistics the statistics for this query; may not be null
*/
- public QueryCommand getCommand() {
- return command;
+ public QueryResults( Columns columns,
+ Statistics statistics ) {
+ this(columns, statistics, Collections.<Object[]>emptyList(), null);
}
/**
@@ -139,7 +149,7 @@
* @see org.jboss.dna.graph.query.QueryResults#getProblems()
*/
public Problems getProblems() {
- return context.getProblems();
+ return problems;
}
/**
@@ -176,54 +186,66 @@
*/
@Override
public String toString() {
- return toString(Integer.MAX_VALUE);
+ return toString(null, Integer.MAX_VALUE);
}
/**
* Get a string representation of this result object, with a maximum number of tuples to include.
*
+ * @param typeSystem the type system that can be used to convert the values to a string; may be null if
+ * {@link Object#toString()} should be used
* @param maxTuples the maximum number of tuples to print, or {@link Integer#MAX_VALUE} if all the tuples are to be printed
* @return the string representation; never null
*/
- public String toString( int maxTuples ) {
+ public String toString( TypeSystem typeSystem,
+ int maxTuples ) {
StringBuilder sb = new StringBuilder();
- toString(sb, maxTuples);
+ toString(typeSystem, sb, maxTuples);
return sb.toString();
}
/**
* Get a string representation of this result object.
*
+ * @param typeSystem the type system that can be used to convert the values to a string; may be null if
+ * {@link Object#toString()} should be used
* @param sb the string builder to which the results should be written; may not be null
*/
- public void toString( StringBuilder sb ) {
- toString(sb, Integer.MAX_VALUE);
+ public void toString( TypeSystem typeSystem,
+ StringBuilder sb ) {
+ toString(typeSystem, sb, Integer.MAX_VALUE);
}
/**
* Get a string representation of this result object, with a maximum number of tuples to include.
*
+ * @param typeSystem the type system that can be used to convert the values to a string; may be null if
+ * {@link Object#toString()} should be used
* @param sb the string builder to which the results should be written; may not be null
* @param maxTuples the maximum number of tuples to print, or {@link Integer#MAX_VALUE} if all the tuples are to be printed
*/
- public void toString( StringBuilder sb,
+ public void toString( TypeSystem typeSystem,
+ StringBuilder sb,
int maxTuples ) {
- int[] columnWidths = determineColumnWidths(Integer.MAX_VALUE, true);
+ int[] columnWidths = determineColumnWidths(typeSystem, Integer.MAX_VALUE, true);
printDelimiterLine(sb, columnWidths, true);
printHeader(sb, columnWidths);
printDelimiterLine(sb, columnWidths, true);
- printLines(sb, columnWidths, maxTuples);
+ printLines(typeSystem, sb, columnWidths, maxTuples);
printDelimiterLine(sb, columnWidths, false);
}
/**
* Determine the width of each column.
*
+ * @param typeSystem the type system that can be used to convert the values to a string; may be null if
+ * {@link Object#toString()} should be used
* @param maxWidth the maximum width; must be positive
* @param useData true if the data should be used to compute the length, or false if just the column names should be used
* @return the array of widths for each column, excluding any decorating characters; never null
*/
- protected int[] determineColumnWidths( int maxWidth,
+ protected int[] determineColumnWidths( TypeSystem typeSystem,
+ int maxWidth,
boolean useData ) {
assert maxWidth > 0;
int tupleLength = columns.getTupleSize();
@@ -245,7 +267,7 @@
if (useData) {
for (Object[] tuple : getTuples()) {
for (int i = 0, j = 1; i != tupleLength; ++i, ++j) {
- String valueStr = stringOf(tuple[i]);
+ String valueStr = stringOf(typeSystem, tuple[i]);
if (valueStr == null) continue;
columnWidths[j] = Math.max(Math.min(maxWidth, valueStr.length()), columnWidths[j]);
}
@@ -254,9 +276,11 @@
return columnWidths;
}
- protected String stringOf( Object value ) {
+ protected String stringOf( TypeSystem typeSystem,
+ Object value ) {
if (value == null) return null;
- TypeFactory<?> typeFactory = context.getTypeSystem().getTypeFactory(value);
+ if (typeSystem == null) return value.toString();
+ TypeFactory<?> typeFactory = typeSystem.getTypeFactory(value);
return typeFactory.asReadableString(value);
}
@@ -276,7 +300,8 @@
sb.append('\n');
}
- protected void printLines( StringBuilder sb,
+ protected void printLines( TypeSystem typeSystem,
+ StringBuilder sb,
int[] columnWidths,
int maxRowsToPrint ) {
int rowNumber = 1;
@@ -285,13 +310,13 @@
if (maxRowsToPrint > tuples.size()) {
// Print all tuples ...
for (Object[] tuple : getTuples()) {
- printTuple(sb, columnWidths, rowNumber, tupleLength, tuple);
+ printTuple(typeSystem, sb, columnWidths, rowNumber, tupleLength, tuple);
++rowNumber;
}
} else {
// Print max number of rows ...
for (Object[] tuple : getTuples()) {
- printTuple(sb, columnWidths, rowNumber, tupleLength, tuple);
+ printTuple(typeSystem, sb, columnWidths, rowNumber, tupleLength, tuple);
if (rowNumber >= maxRowsToPrint) break;
++rowNumber;
}
@@ -299,14 +324,8 @@
}
- /**
- * @param sb
- * @param columnWidths
- * @param rowNumber
- * @param tupleLength
- * @param tuple
- */
- private final void printTuple( StringBuilder sb,
+ private final void printTuple( TypeSystem typeSystem,
+ StringBuilder sb,
int[] columnWidths,
int rowNumber,
int tupleLength,
@@ -315,7 +334,7 @@
sb.append("| ").append(StringUtil.justifyLeft(Integer.toString(rowNumber), columnWidths[0], ' ')).append(' ');
// Print the remaining columns ...
for (int i = 0, j = 1; i != tupleLength; ++i, ++j) {
- String valueStr = stringOf(tuple[i]);
+ String valueStr = stringOf(typeSystem, tuple[i]);
valueStr = StringUtil.justifyLeft(valueStr, columnWidths[j], ' ');
sb.append('|').append(' ').append(valueStr).append(' ');
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/SelectComponent.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/SelectComponent.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/SelectComponent.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -30,7 +30,6 @@
import java.util.Map;
import java.util.regex.Pattern;
import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.query.QueryContext;
import org.jboss.dna.graph.query.QueryResults.Columns;
import org.jboss.dna.graph.query.model.And;
import org.jboss.dna.graph.query.model.BindVariableName;
@@ -48,6 +47,7 @@
import org.jboss.dna.graph.query.model.StaticOperand;
import org.jboss.dna.graph.query.model.TypeSystem;
import org.jboss.dna.graph.query.model.TypeSystem.TypeFactory;
+import org.jboss.dna.graph.query.validate.Schemata;
/**
*/
@@ -102,7 +102,9 @@
super(delegate);
this.constraint = constraint;
this.variables = variables != null ? variables : Collections.<String, Object>emptyMap();
- this.checker = createChecker(delegate.getContext(), delegate.getColumns(), this.constraint, this.variables, analyzer);
+ TypeSystem types = delegate.getContext().getTypeSystem();
+ Schemata schemata = delegate.getContext().getSchemata();
+ this.checker = createChecker(types, schemata, delegate.getColumns(), this.constraint, this.variables, analyzer);
}
/**
@@ -128,7 +130,7 @@
/**
* Interface used to determine whether a tuple satisfies all of the constraints applied to the SELECT node.
*/
- protected static interface ConstraintChecker {
+ public static interface ConstraintChecker {
/**
* Return true if the tuple satisfies all of the constraints.
*
@@ -221,7 +223,8 @@
* Create the constraint evaluator that is used by the {@link SelectComponent} to evaluate the supplied {@link Constraint
* criteria}. For the most correct behavior, specify an {@link Analyzer} implementation.
*
- * @param context the context in which the query is being evaluated; may not be null
+ * @param types the type system; may not be null
+ * @param schemata the schemata; may not be null
* @param columns the definition of the result columns and the tuples; may not be null
* @param constraint the criteria that this {@link SelectComponent} is to evaluate
* @param variables the variables that are to be substituted for the various {@link BindVariableName} {@link StaticOperand
@@ -232,15 +235,16 @@
* @return the constraint evaluator; never null
*/
@SuppressWarnings( "unchecked" )
- protected ConstraintChecker createChecker( QueryContext context,
+ protected ConstraintChecker createChecker( final TypeSystem types,
+ Schemata schemata,
Columns columns,
Constraint constraint,
Map<String, Object> variables,
final Analyzer analyzer ) {
if (constraint instanceof Or) {
Or orConstraint = (Or)constraint;
- final ConstraintChecker left = createChecker(context, columns, orConstraint.getLeft(), variables, analyzer);
- final ConstraintChecker right = createChecker(context, columns, orConstraint.getRight(), variables, analyzer);
+ final ConstraintChecker left = createChecker(types, schemata, columns, orConstraint.getLeft(), variables, analyzer);
+ final ConstraintChecker right = createChecker(types, schemata, columns, orConstraint.getRight(), variables, analyzer);
return new ConstraintChecker() {
public boolean satisfiesConstraints( Object[] tuple ) {
return left.satisfiesConstraints(tuple) || right.satisfiesConstraints(tuple);
@@ -249,7 +253,12 @@
}
if (constraint instanceof Not) {
Not notConstraint = (Not)constraint;
- final ConstraintChecker original = createChecker(context, columns, notConstraint.getConstraint(), variables, analyzer);
+ final ConstraintChecker original = createChecker(types,
+ schemata,
+ columns,
+ notConstraint.getConstraint(),
+ variables,
+ analyzer);
return new ConstraintChecker() {
public boolean satisfiesConstraints( Object[] tuple ) {
return !original.satisfiesConstraints(tuple);
@@ -258,8 +267,8 @@
}
if (constraint instanceof And) {
And andConstraint = (And)constraint;
- final ConstraintChecker left = createChecker(context, columns, andConstraint.getLeft(), variables, analyzer);
- final ConstraintChecker right = createChecker(context, columns, andConstraint.getRight(), variables, analyzer);
+ final ConstraintChecker left = createChecker(types, schemata, columns, andConstraint.getLeft(), variables, analyzer);
+ final ConstraintChecker right = createChecker(types, schemata, columns, andConstraint.getRight(), variables, analyzer);
return new ConstraintChecker() {
public boolean satisfiesConstraints( Object[] tuple ) {
return left.satisfiesConstraints(tuple) && right.satisfiesConstraints(tuple);
@@ -387,7 +396,7 @@
Comparison comparison = (Comparison)constraint;
// Create the correct dynamic operation ...
- final DynamicOperation dynamicOperation = createDynamicOperation(context, columns, comparison.getOperand1());
+ final DynamicOperation dynamicOperation = createDynamicOperation(types, schemata, columns, comparison.getOperand1());
final String expectedType = dynamicOperation.getExpectedType();
// Determine the literal value ...
@@ -402,13 +411,12 @@
literalValue = literal.getValue();
}
// Create the correct comparator ...
- final TypeSystem typeSystem = context.getTypeSystem();
- final TypeFactory<?> typeFactory = typeSystem.getTypeFactory(expectedType);
+ final TypeFactory<?> typeFactory = types.getTypeFactory(expectedType);
assert typeFactory != null;
final Comparator<Object> comparator = (Comparator<Object>)typeFactory.getComparator();
assert comparator != null;
// Create the correct operation ...
- final TypeFactory<?> literalFactory = typeSystem.getTypeFactory(expectedType);
+ final TypeFactory<?> literalFactory = types.getTypeFactory(expectedType);
final Object rhs = literalFactory.create(literalValue);
switch (comparison.getOperator()) {
case EQUAL_TO:
@@ -449,12 +457,12 @@
};
case LIKE:
// Convert the LIKE expression to a regular expression
- final Pattern pattern = createRegexFromLikeExpression(typeSystem.asString(rhs));
+ final Pattern pattern = createRegexFromLikeExpression(types.asString(rhs));
return new ConstraintChecker() {
public boolean satisfiesConstraints( Object[] tuples ) {
Object tupleValue = dynamicOperation.evaluate(tuples);
if (tupleValue == null) return false;
- String value = typeSystem.asString(tupleValue);
+ String value = types.asString(tupleValue);
return pattern.matcher(value).matches();
}
};
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/SortValuesComponent.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/SortValuesComponent.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/SortValuesComponent.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -106,7 +106,10 @@
Ordering ordering ) {
assert context != null;
assert ordering != null;
- final DynamicOperation operation = createDynamicOperation(context, columns, ordering.getOperand());
+ final DynamicOperation operation = createDynamicOperation(context.getTypeSystem(),
+ context.getSchemata(),
+ columns,
+ ordering.getOperand());
final TypeSystem typeSystem = context.getTypeSystem();
final TypeFactory<?> typeFactory = typeSystem.getTypeFactory(operation.getExpectedType());
assert typeFactory != null;
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/AccessQueryRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/AccessQueryRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/AccessQueryRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -29,11 +29,13 @@
import org.jboss.dna.common.util.CheckArg;
import org.jboss.dna.common.util.HashCode;
import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.QueryResults.Statistics;
import org.jboss.dna.graph.query.model.Column;
import org.jboss.dna.graph.query.model.Constraint;
import org.jboss.dna.graph.query.model.Limit;
import org.jboss.dna.graph.query.model.SelectorName;
import org.jboss.dna.graph.query.model.Visitors;
+import org.jboss.dna.graph.query.validate.Schemata;
/**
* A {@link Request} to issue an access query a graph, where an access query is a low-level atomic query that is part of a large,
@@ -49,8 +51,8 @@
private final SelectorName tableName;
private final List<Constraint> andedConstraints;
private final Limit limit;
- private final Columns resultColumns;
private final Map<String, Object> variables;
+ private final Schemata schemata;
private final int hc;
/**
@@ -61,6 +63,7 @@
* @param resultColumns the specification of the expected columns in the result tuples
* @param andedConstraints the list of AND-ed constraints; may be empty or null if there are no constraints
* @param limit the limit on the results; may be null if there is no limit
+ * @param schemata the schemata that defines the table and columns being queried; may not be null
* @param variables the variables that are available to be substituted upon execution; may be null if there are no variables
* @throws IllegalArgumentException if the query or workspace name is null
*/
@@ -69,16 +72,18 @@
Columns resultColumns,
List<Constraint> andedConstraints,
Limit limit,
+ Schemata schemata,
Map<String, Object> variables ) {
CheckArg.isNotNull(workspace, "workspace");
CheckArg.isNotNull(tableName, "tableName");
CheckArg.isNotNull(resultColumns, "resultColumns");
this.workspaceName = workspace;
this.tableName = tableName;
- this.resultColumns = resultColumns;
this.andedConstraints = andedConstraints != null ? andedConstraints : Collections.<Constraint>emptyList();
this.variables = variables != null ? variables : EMPTY_VARIABLES;
this.limit = limit != null ? limit : Limit.NONE;
+ this.schemata = schemata;
+ this.doSetResults(resultColumns, null, null);
this.hc = HashCode.compute(workspaceName, tableName, resultColumns);
}
@@ -101,16 +106,16 @@
}
/**
- * Get the specification of the columns for the {@link #getResults() results}.
+ * Get the specification of the columns for the {@link #getTuples() results}.
*
* @return the column specifications; never null
*/
public Columns resultColumns() {
- return resultColumns;
+ return super.columns();
}
/**
- * Get the immutable list of constraints that are AND-ed together in this query. Every tuple in the {@link #getResults()
+ * Get the immutable list of constraints that are AND-ed together in this query. Every {@link #getTuples() tuple in the
* results} must satisfy <i>all</i> of these constraints.
*
* @return the AND-ed constraints; never null but possibly empty if there are no constraints
@@ -129,6 +134,15 @@
}
/**
+ * Get the schemata that defines the table structure and columns definitions available to this query.
+ *
+ * @return the schemata; never null
+ */
+ public Schemata schemata() {
+ return schemata;
+ }
+
+ /**
* Get the limit of the result tuples, which can specify a {@link Limit#getRowLimit() maximum number of rows} as well as an
* {@link Limit#getOffset() initial offset} for the first row.
*
@@ -139,6 +153,17 @@
}
/**
+ * Set the results for this request.
+ *
+ * @param tuples the result values
+ * @param statistics the statistics, or null if there are none
+ */
+ public void setResults( List<Object[]> tuples,
+ Statistics statistics ) {
+ super.doSetResults(columns(), tuples, statistics);
+ }
+
+ /**
* {@inheritDoc}
*
* @see java.lang.Object#hashCode()
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/ChangeRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/ChangeRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/ChangeRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -29,7 +29,7 @@
/**
* A Request to make changes in a graph.
*/
-public abstract class ChangeRequest extends Request {
+public abstract class ChangeRequest extends Request implements Cloneable {
private static final long serialVersionUID = 1L;
@@ -60,4 +60,15 @@
* @return the name of the workspace changed by this request
*/
public abstract String changedWorkspace();
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see java.lang.Object#clone()
+ */
+ @Override
+ public abstract ChangeRequest clone();
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CloneBranchRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CloneBranchRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CloneBranchRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -328,4 +328,22 @@
return "clone branch " + from() + " in the \"" + fromWorkspace + "\" workspace into " + into() + " in the \""
+ intoWorkspace + "\" workspace as child " + desiredSegment();
}
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public CloneBranchRequest clone() {
+ CloneBranchRequest result = new CloneBranchRequest(actualFromLocation != null ? actualFromLocation : from, fromWorkspace,
+ actualIntoLocation != null ? actualIntoLocation : into, intoWorkspace,
+ desiredName, desiredSegment, removeExisting);
+ result.setRemovedNodes(removedExistingNodes);
+ result.setActualLocations(actualFromLocation, actualIntoLocation);
+ return result;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CloneWorkspaceRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CloneWorkspaceRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CloneWorkspaceRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -260,4 +260,23 @@
Path path ) {
return actualWorkspaceName != null && actualWorkspaceName.equals(workspace);
}
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public CloneWorkspaceRequest clone() {
+ CloneWorkspaceRequest request = new CloneWorkspaceRequest(
+ nameOfWorkspaceToBeCloned,
+ actualWorkspaceName != null ? actualWorkspaceName : desiredNameOfTargetWorkspace,
+ createConflictBehavior, cloneConflictBehavior);
+ request.setActualRootLocation(actualLocationOfRoot);
+ request.setActualWorkspaceName(actualWorkspaceName);
+ return request;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CompositeRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CompositeRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CompositeRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -339,5 +339,4 @@
buff.append("]");
return buff.toString();
}
-
}
Added: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CompositeRequestChannel.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CompositeRequestChannel.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CompositeRequestChannel.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,454 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.request;
+
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.GraphI18n;
+import org.jboss.dna.graph.connector.RepositoryConnection;
+import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
+import org.jboss.dna.graph.request.processor.RequestProcessor;
+
+/**
+ * A channel for Request objects that can be submitted to a consumer (typically a {@link RequestProcessor} or
+ * {@link RepositoryConnection}) while allowing the channel owner to continue adding more Request objects into the channel.
+ * <p>
+ * The owner of this channel is responsible for starting the processing using one of the two <code>start(...)</code> methods,
+ * adding {@link Request}s via <code>add(...)</code> methods, {@link #close() closing} the channel when there are no more requests
+ * to be added, and finally {@link #await() awaiting} until all of the submitted requests have been processed. Note that the owner
+ * can optionally pre-fill the channel with Request objects before calling <code>start(...)</code>.
+ * </p>
+ * <p>
+ * The consumer will be handed a {@link CompositeRequest}, and should use the {@link CompositeRequest#iterator()} method to obtain
+ * an Iterator<Request>. The {@link Iterator#hasNext()} method will block until there is another Request available in the
+ * channel, or until the channel is closed (at which point {@link Iterator#hasNext()} will return false. (Notice that the
+ * {@link Iterator#next()} method will also block if it is not preceeded by a {@link Iterator#hasNext()}, but will throw a
+ * {@link NoSuchElementException} when there are no more Request objects and the channel is closed.)
+ * </p>
+ * <p>
+ * Because the CompositeRequest's iterator will block, the consumer will block while processing the request. Therefore, this
+ * channel submits the CompositeRequest to the consumer asynchronously, via an {@link ExecutorService} supplied in one of the two
+ * {@link #start(ExecutorService, ExecutionContext, RepositoryConnectionFactory) start}
+ * {@link #start(ExecutorService, RequestProcessor, boolean) methods}.
+ * </p>
+ */
+public class CompositeRequestChannel {
+
+ protected final String sourceName;
+ /** The list of all requests that are or have been processed as part of this channel */
+ protected final LinkedList<Request> allRequests = new LinkedList<Request>();
+ /** The queue of requests that remain unprocessed */
+ private final BlockingQueue<Request> queue = new LinkedBlockingQueue<Request>();
+ /** The CompositeRequest that is submitted to the underlying processor */
+ protected final CompositeRequest composite;
+ /**
+ * The Future that is submitted to the ExecutorService to do the processing, which is used to {@link #await()} until the
+ * processing is completed or {@link #cancel(boolean) cancel} the work
+ */
+ protected Future<String> future;
+ /** Flag that defines whether the channel has processed all requests */
+ protected final AtomicBoolean closed = new AtomicBoolean(false);
+ protected Throwable compositeError = null;
+
+ /**
+ * Create a new channel with the supplied channel name.
+ *
+ * @param sourceName the name of the repository source used to execute this channel's {@link #allRequests() requests}; may not
+ * be null or empty
+ */
+ public CompositeRequestChannel( final String sourceName ) {
+ assert sourceName != null;
+ this.sourceName = sourceName;
+ this.composite = new ChannelCompositeRequest();
+ }
+
+ /**
+ * Utility method to create an iterator over the requests in this channel. This really should be called once
+ *
+ * @return the iterator over the channels
+ */
+ protected Iterator<Request> createIterator() {
+ final BlockingQueue<Request> queue = this.queue;
+ return new Iterator<Request>() {
+ private Request next;
+
+ public boolean hasNext() {
+ // If next still has a request, then 'hasNext()' has been called multiple times in a row
+ if (next != null) return true;
+
+ // Now, block for a next item (this blocks) ...
+ try {
+ next = queue.take();
+ } catch (InterruptedException e) {
+ // This happens when the federated connector has been told to shutdown now, and it shuts down
+ // its executor (the worker pool) immediately by interrupting each in-use thread.
+ // In this case, we should consider there to be more more requests ...
+ try {
+ return false;
+ } finally {
+ // reset the interrupted status ...
+ Thread.interrupted();
+ }
+ }
+ if (next instanceof LastRequest) {
+ return false;
+ }
+ return next != null;
+ }
+
+ public Request next() {
+ if (next == null) {
+ // Must have been called without first calling 'hasNext()' ...
+ try {
+ next = queue.take();
+ } catch (InterruptedException e) {
+ // This happens when the federated connector has been told to shutdown now, and it shuts down
+ // its executor (the worker pool) immediately by interrupting each in-use thread.
+ // In this case, we should consider there to be more more requests (again, this case
+ // is when 'next()' has been called without calling 'hasNext()') ...
+ try {
+ throw new NoSuchElementException();
+ } finally {
+ // reset the interrupted status ...
+ Thread.interrupted();
+ }
+ }
+ }
+ if (next == null) {
+ throw new NoSuchElementException();
+ }
+ Request result = next;
+ next = null;
+ return result;
+ }
+
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
+ }
+
+ /**
+ * Begins processing any requests that have been {@link #add(Request) added} to this channel. Processing is done by submitting
+ * the channel to the supplied executor.
+ *
+ * @param executor the executor that is to do the work; may not be null
+ * @param context the execution context in which the work is to be performed; may not be null
+ * @param connectionFactory the connection factory that should be used to create connections; may not be null
+ * @throws IllegalStateException if this channel has already been started
+ */
+ public void start( final ExecutorService executor,
+ final ExecutionContext context,
+ final RepositoryConnectionFactory connectionFactory ) {
+ assert executor != null;
+ assert context != null;
+ assert connectionFactory != null;
+ assert sourceName != null;
+ if (this.future != null) {
+ throw new IllegalStateException();
+ }
+ this.future = executor.submit(new Callable<String>() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.util.concurrent.Callable#call()
+ */
+ public String call() throws Exception {
+ final RepositoryConnection connection = connectionFactory.createConnection(sourceName);
+ assert connection != null;
+ try {
+ connection.execute(context, composite);
+ } finally {
+ connection.close();
+ }
+ return sourceName;
+ }
+ });
+ }
+
+ /**
+ * Begins processing any requests that have been {@link #add(Request) added} to this channel. Processing is done by submitting
+ * the channel to the supplied executor.
+ *
+ * @param executor the executor that is to do the work; may not be null
+ * @param processor the request processor that will be used to execute the requests; may not be null
+ * @param closeProcessorWhenCompleted true if this method should call {@link RequestProcessor#close()} when the channel is
+ * completed, or false if the caller is responsible for doing this
+ * @throws IllegalStateException if this channel has already been started
+ */
+ public void start( final ExecutorService executor,
+ final RequestProcessor processor,
+ final boolean closeProcessorWhenCompleted ) {
+ assert executor != null;
+ assert processor != null;
+ if (this.future != null) {
+ throw new IllegalStateException();
+ }
+ this.future = executor.submit(new Callable<String>() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.util.concurrent.Callable#call()
+ */
+ public String call() throws Exception {
+ try {
+ processor.process(composite);
+ } finally {
+ if (closeProcessorWhenCompleted) processor.close();
+ }
+ return sourceName;
+ }
+ });
+ }
+
+ /**
+ * Add the request to this channel for asynchronous processing.
+ *
+ * @param request the request to be submitted; may not be null
+ * @throws IllegalStateException if this channel has already been {@link #close() closed}
+ */
+ public void add( Request request ) {
+ if (closed.get()) {
+ throw new IllegalStateException(GraphI18n.unableToAddRequestToChannelThatIsDone.text(sourceName, request));
+ }
+ assert request != null;
+ this.allRequests.add(request);
+ this.queue.add(request);
+ }
+
+ /**
+ * Add the request to this channel for asynchronous processing, and supply a {@link CountDownLatch count-down latch} that
+ * should be {@link CountDownLatch#countDown() decremented} when this request is completed.
+ *
+ * @param request the request to be submitted; may not be null
+ * @param latch the count-down latch that should be decremented when <code>request</code> has been completed; may not be null
+ * @return the same latch that was supplied, for method chaining purposes; never null
+ * @throws IllegalStateException if this channel has already been {@link #close() closed}
+ */
+ public CountDownLatch add( Request request,
+ CountDownLatch latch ) {
+ if (closed.get()) {
+ throw new IllegalStateException(GraphI18n.unableToAddRequestToChannelThatIsDone.text(sourceName, request));
+ }
+ assert request != null;
+ assert latch != null;
+ // Submit the request for processing ...
+ this.allRequests.add(request);
+ request.setLatchForFreezing(latch);
+ this.queue.add(request);
+ return latch;
+ }
+
+ /**
+ * Add the request to this channel for processing, but wait to return until the request has been processed.
+ *
+ * @param request the request to be submitted; may not be null
+ * @throws InterruptedException if the current thread is interrupted while waiting
+ */
+ public void addAndAwait( Request request ) throws InterruptedException {
+ // Add the request with a latch, then block until the request has completed ...
+ add(request, new CountDownLatch(1)).await();
+ }
+
+ /**
+ * Mark this source as having no more requests to process.
+ */
+ public void close() {
+ this.closed.set(true);
+ this.queue.add(new LastRequest());
+ }
+
+ /**
+ * Return whether this channel has been {@link #close() closed}.
+ *
+ * @return true if the channel was marked as done, or false otherwise
+ */
+ public boolean isClosed() {
+ return closed.get();
+ }
+
+ /**
+ * Cancel this forked channel, stopping work as soon as possible. If the channel has not yet been started, this method
+ *
+ * @param mayInterruptIfRunning true if the channel is still being worked on, and the thread on which its being worked on may
+ * be interrupted, or false if the channel should be allowed to finish if it is already in work.
+ */
+ public void cancel( boolean mayInterruptIfRunning ) {
+ if (this.future == null || this.future.isDone() || this.future.isCancelled()) return;
+
+ // Mark the composite as cancelled first, so that the next composed request will be marked as
+ // cancelled.
+ this.composite.cancel();
+
+ // Now mark the channel as being done ...
+ close();
+
+ // Now, mark the channel as being cancelled (do allow interrupting the worker thread) ...
+ this.future.cancel(mayInterruptIfRunning);
+ }
+
+ /**
+ * Return whether this channel has been {@link #start(ExecutorService, ExecutionContext, RepositoryConnectionFactory) started}
+ * .
+ *
+ * @return true if this channel was started, or false otherwise
+ */
+ public boolean isStarted() {
+ return this.future != null;
+ }
+
+ /**
+ * Return whether this channel has completed all of its work.
+ *
+ * @return true if the channel was started and is complete, or false otherwise
+ */
+ public boolean isComplete() {
+ return this.future != null && this.future.isDone();
+ }
+
+ /**
+ * Await until this channel has completed.
+ *
+ * @throws CancellationException if the channel was cancelled
+ * @throws ExecutionException if the channel execution threw an exception
+ * @throws InterruptedException if the current thread is interrupted while waiting
+ */
+ public void await() throws ExecutionException, InterruptedException, CancellationException {
+ this.future.get();
+ }
+
+ /**
+ * Get all the requests that were submitted to this queue. The resulting list is the actual list that is appended when
+ * requests are added, and may change until the channel is {@link #close() closed}.
+ *
+ * @return all of the requests that were submitted to this channel; never null
+ */
+ public List<Request> allRequests() {
+ return allRequests;
+ }
+
+ /**
+ * Get the name of the source that this channel uses.
+ *
+ * @return the source name; never null
+ */
+ public String sourceName() {
+ return sourceName;
+ }
+
+ protected class ChannelCompositeRequest extends CompositeRequest {
+ private static final long serialVersionUID = 1L;
+ private final LinkedList<Request> allRequests = CompositeRequestChannel.this.allRequests;
+
+ protected ChannelCompositeRequest() {
+ super(false);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.CompositeRequest#iterator()
+ */
+ @Override
+ public Iterator<Request> iterator() {
+ return createIterator();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.CompositeRequest#getRequests()
+ */
+ @Override
+ public List<Request> getRequests() {
+ return allRequests;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.CompositeRequest#size()
+ */
+ @Override
+ public int size() {
+ return closed.get() ? allRequests.size() : CompositeRequest.UNKNOWN_NUMBER_OF_REQUESTS;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.Request#cancel()
+ */
+ @Override
+ public void cancel() {
+ closed.set(true);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.Request#setError(java.lang.Throwable)
+ */
+ @Override
+ public void setError( Throwable error ) {
+ compositeError = error;
+ super.setError(error);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.Request#hasError()
+ */
+ @Override
+ public boolean hasError() {
+ return compositeError != null || super.hasError();
+ }
+ }
+
+ /**
+ * A psuedo Request that is used by the {@link CompositeRequestChannel} to insert into a request queue so that the queue's
+ * iterator knows when there are no more requests to process.
+ */
+ protected static class LastRequest extends Request {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public boolean isReadOnly() {
+ return false;
+ }
+ }
+}
Property changes on: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CompositeRequestChannel.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CopyBranchRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CopyBranchRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CopyBranchRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -343,4 +343,21 @@
return "copy branch " + from() + " in the \"" + fromWorkspace + "\" workspace into " + into() + " in the \""
+ intoWorkspace + "\" workspace";
}
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public CopyBranchRequest clone() {
+ CopyBranchRequest result = new CopyBranchRequest(actualFromLocation != null ? actualFromLocation : from, fromWorkspace,
+ actualIntoLocation != null ? actualIntoLocation : into, intoWorkspace,
+ desiredNameForCopy, nodeConflictBehavior);
+ result.setActualLocations(actualFromLocation, actualIntoLocation);
+ return result;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CreateNodeRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CreateNodeRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CreateNodeRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -388,4 +388,18 @@
+ properties();
}
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public CreateNodeRequest clone() {
+ CreateNodeRequest request = new CreateNodeRequest(under, workspaceName, childName, conflictBehavior, properties);
+ request.setActualLocationOfNode(actualLocation);
+ return request;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CreateWorkspaceRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CreateWorkspaceRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/CreateWorkspaceRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -221,4 +221,22 @@
Path path ) {
return actualWorkspaceName != null && actualWorkspaceName.equals(workspace);
}
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public CreateWorkspaceRequest clone() {
+ CreateWorkspaceRequest request = new CreateWorkspaceRequest(
+ actualWorkspaceName != null ? actualWorkspaceName : desiredNameOfNewWorkspace,
+ createConflictBehavior);
+ request.setActualWorkspaceName(actualWorkspaceName);
+ request.setActualRootLocation(actualLocationOfRoot);
+ return request;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DeleteBranchRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DeleteBranchRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DeleteBranchRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -191,4 +191,19 @@
public String toString() {
return "delete branch " + at() + " in the \"" + workspaceName + "\" workspace";
}
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public DeleteBranchRequest clone() {
+ DeleteBranchRequest request = new DeleteBranchRequest(actualLocation != null ? actualLocation : at, workspaceName);
+ request.setActualLocationOfNode(actualLocation);
+ return request;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DeleteChildrenRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DeleteChildrenRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DeleteChildrenRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -256,4 +256,17 @@
public String toString() {
return "delete nodes below " + at() + " in the \"" + workspaceName + "\" workspace";
}
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public DeleteChildrenRequest clone() {
+ return new DeleteChildrenRequest(at, workspaceName);
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DestroyWorkspaceRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DestroyWorkspaceRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/DestroyWorkspaceRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -153,4 +153,17 @@
Path path ) {
return workspaceName().equals(workspace);
}
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public DestroyWorkspaceRequest clone() {
+ return new DestroyWorkspaceRequest(workspaceName);
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/FullTextSearchRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/FullTextSearchRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/FullTextSearchRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -23,7 +23,10 @@
*/
package org.jboss.dna.graph.request;
+import java.util.List;
import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.QueryResults.Statistics;
/**
* A {@link Request} to perform a full-text search on a graph.
@@ -34,20 +37,31 @@
private final String expression;
private final String workspaceName;
+ private final int maxResults;
+ private final int offset;
/**
* Create a new request to execute the supplied query against the name workspace.
*
* @param fullTextSearch the full-text search to be performed; may not be null
* @param workspace the name of the workspace to be queried
- * @throws IllegalArgumentException if the query or workspace name is null
+ * @param maxResults the maximum number of results that are to be returned; always positive
+ * @param offset the number of initial results to skip, or 0 if the first results are to be returned
+ * @throws IllegalArgumentException if the query or workspace name is null, if the maxResults is not positive, or if the
+ * offset is negative
*/
public FullTextSearchRequest( String fullTextSearch,
- String workspace ) {
+ String workspace,
+ int maxResults,
+ int offset ) {
CheckArg.isNotEmpty(fullTextSearch, "fullTextSearch");
CheckArg.isNotNull(workspace, "workspace");
+ CheckArg.isPositive(maxResults, "maxResults");
+ CheckArg.isNonNegative(offset, "offset");
this.expression = fullTextSearch;
this.workspaceName = workspace;
+ this.maxResults = maxResults;
+ this.offset = offset;
}
/**
@@ -69,6 +83,25 @@
}
/**
+ * Get the maximum number of results that should be returned.
+ *
+ * @return the maximum number of results that are to be returned; always positive
+ */
+ public int maxResults() {
+ return maxResults;
+ }
+
+ /**
+ * Get the number of initial search results that should be excluded from the {@link #getTuples() tuples} included on this
+ * request.
+ *
+ * @return the number of initial results to skip, or 0 if the first results are to be returned
+ */
+ public int offset() {
+ return offset;
+ }
+
+ /**
* {@inheritDoc}
*
* @see java.lang.Object#hashCode()
@@ -79,6 +112,28 @@
}
/**
+ * Get the specification of the columns for the {@link #getTuples() results}.
+ *
+ * @return the column specifications; never null
+ */
+ public Columns getResultColumns() {
+ return super.columns();
+ }
+
+ /**
+ * Set the results for this request.
+ *
+ * @param resultColumns the definition of the result columns
+ * @param tuples the result values
+ * @param statistics the statistics, or null if there are none
+ */
+ public void setResults( Columns resultColumns,
+ List<Object[]> tuples,
+ Statistics statistics ) {
+ super.doSetResults(resultColumns, tuples, statistics);
+ }
+
+ /**
* {@inheritDoc}
*
* @see java.lang.Object#equals(java.lang.Object)
@@ -90,6 +145,8 @@
FullTextSearchRequest that = (FullTextSearchRequest)obj;
if (!this.expression().equals(that.expression())) return false;
if (!this.workspace().equals(that.workspace())) return false;
+ if (this.offset() != that.offset()) return false;
+ if (this.maxResults() != that.maxResults()) return false;
return true;
}
return false;
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/LockBranchRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/LockBranchRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/LockBranchRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -229,4 +229,19 @@
+ " in the \"" + workspaceName + "\" workspace";
}
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public LockBranchRequest clone() {
+ LockBranchRequest request = new LockBranchRequest(actualLocation != null ? actualLocation : at, workspaceName, isDeep,
+ lockTimeoutInMillis);
+ request.setActualLocation(actualLocation);
+ return request;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/MoveBranchRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/MoveBranchRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/MoveBranchRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -372,4 +372,20 @@
return "move branch " + from() + " in the \"" + inWorkspace() + "\" workspace into "
+ (into() == null ? "before " + before() : "into " + into());
}
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public MoveBranchRequest clone() {
+ MoveBranchRequest request = new MoveBranchRequest(actualOldLocation != null ? actualOldLocation : from, into, before,
+ workspaceName, desiredNameForNode, conflictBehavior);
+ request.setActualLocations(actualOldLocation, actualNewLocation);
+ return request;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RemovePropertyRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RemovePropertyRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RemovePropertyRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -208,4 +208,20 @@
public String toString() {
return "remove property " + propertyName() + " from " + from() + " in the \"" + workspaceName + "\" workspace";
}
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public RemovePropertyRequest clone() {
+ RemovePropertyRequest request = new RemovePropertyRequest(actualLocation != null ? actualLocation : from, workspaceName,
+ propertyName);
+ request.setActualLocationOfNode(actualLocation);
+ return request;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RenameNodeRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RenameNodeRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RenameNodeRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -243,4 +243,20 @@
return "rename node at " + at() + " in the \"" + workspaceName + "\" workspace to " + toName();
}
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public RenameNodeRequest clone() {
+ RenameNodeRequest request = new RenameNodeRequest(actualOldLocation != null ? actualOldLocation : at, workspaceName,
+ newName);
+ request.setActualLocations(actualOldLocation, actualNewLocation);
+ return request;
+ }
+
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RequestBuilder.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RequestBuilder.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/RequestBuilder.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -657,11 +657,15 @@
*
* @param workspaceName the name of the workspace containing the node
* @param fullTextSearchExpression the full-text search expression
+ * @param maxResults the maximum number of results that are to be returned; always positive
+ * @param offset the number of initial results to skip, or 0 if the first results are to be returned
* @return the request; never null
* @throws IllegalArgumentException if any of the parameters are null or if the expression is empty
*/
public FullTextSearchRequest search( String workspaceName,
- String fullTextSearchExpression ) {
- return process(new FullTextSearchRequest(fullTextSearchExpression, workspaceName));
+ String fullTextSearchExpression,
+ int maxResults,
+ int offset ) {
+ return process(new FullTextSearchRequest(fullTextSearchExpression, workspaceName, maxResults, offset));
}
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/SearchRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/SearchRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/SearchRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -23,7 +23,9 @@
*/
package org.jboss.dna.graph.request;
-import org.jboss.dna.graph.query.QueryResults;
+import java.util.List;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.QueryResults.Statistics;
/**
* A {@link Request} to search or query a graph.
@@ -32,7 +34,9 @@
private static final long serialVersionUID = 1L;
- private QueryResults results;
+ private Columns columns;
+ private List<Object[]> tuples;
+ private Statistics statistics;
/**
* {@inheritDoc}
@@ -47,22 +51,46 @@
/**
* Set the results for this request.
*
- * @param results the results
+ * @param resultColumns the definition of the result columns
+ * @param tuples the result values
+ * @param statistics the statistics, or null if there are none
*/
- public void setResults( QueryResults results ) {
- this.results = results;
+ protected void doSetResults( Columns resultColumns,
+ List<Object[]> tuples,
+ Statistics statistics ) {
+ this.columns = resultColumns;
+ this.tuples = tuples;
+ this.statistics = statistics;
}
/**
+ * Get the specification of the columns for the results.
+ *
+ * @return the column specifications; never null
+ */
+ protected Columns columns() {
+ return columns;
+ }
+
+ /**
* Get the results of this query.
*
* @return the results of the query, or null if this request has not been processed
*/
- public QueryResults getResults() {
- return results;
+ public List<Object[]> getTuples() {
+ return tuples;
}
/**
+ * Get the statistics that describe the time metrics for this query.
+ *
+ * @return the statistics; may be null if there are no statistics
+ */
+ public Statistics getStatistics() {
+ return statistics;
+ }
+
+ /**
* {@inheritDoc}
*
* @see org.jboss.dna.graph.request.Request#cancel()
@@ -70,7 +98,8 @@
@Override
public void cancel() {
super.cancel();
- this.results = null;
+ this.tuples = null;
+ this.statistics = null;
}
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/SetPropertyRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/SetPropertyRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/SetPropertyRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -233,4 +233,20 @@
return "set property " + property().getName() + " on " + on() + " in the \"" + workspaceName + "\" workspace to "
+ (values == null ? "null" : Arrays.asList(values).toString());
}
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public SetPropertyRequest clone() {
+ SetPropertyRequest request = new SetPropertyRequest(actualLocation != null ? actualLocation : on, workspaceName, property);
+ request.setActualLocationOfNode(actualLocation);
+ request.setNewProperty(actualCreation);
+ return request;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UnlockBranchRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UnlockBranchRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UnlockBranchRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -192,4 +192,18 @@
return "unlock branch at " + at() + " in the \"" + workspaceName + "\" workspace";
}
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public UnlockBranchRequest clone() {
+ UnlockBranchRequest request = new UnlockBranchRequest(actualLocation != null ? actualLocation : at, workspaceName);
+ request.setActualLocation(actualLocation);
+ return request;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UpdatePropertiesRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UpdatePropertiesRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UpdatePropertiesRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -60,6 +60,7 @@
private final Location on;
private final String workspaceName;
private final Map<Name, Property> properties;
+ private final boolean removeOtherProperties;
private Set<Name> createdPropertyNames;
private Location actualLocation;
@@ -74,12 +75,29 @@
public UpdatePropertiesRequest( Location on,
String workspaceName,
Map<Name, Property> properties ) {
+ this(on, workspaceName, properties, false);
+ }
+
+ /**
+ * Create a request to update the properties on the node at the supplied location.
+ *
+ * @param on the location of the node to be read
+ * @param workspaceName the name of the workspace containing the node
+ * @param properties the map of properties (keyed by their name), which is reused without copying
+ * @param removeOtherProperties if any properties not being updated should be removed
+ * @throws IllegalArgumentException if the location or workspace name is null or if there are no properties to update
+ */
+ public UpdatePropertiesRequest( Location on,
+ String workspaceName,
+ Map<Name, Property> properties,
+ boolean removeOtherProperties ) {
CheckArg.isNotNull(on, "on");
CheckArg.isNotEmpty(properties, "properties");
CheckArg.isNotNull(workspaceName, "workspaceName");
this.workspaceName = workspaceName;
this.on = on;
this.properties = Collections.unmodifiableMap(properties);
+ this.removeOtherProperties = removeOtherProperties;
}
/**
@@ -121,6 +139,16 @@
}
/**
+ * Return whether any properties not being updated should be removed.
+ *
+ * @return true if the node's existing properties not updated with this request should be removed, or false if this request
+ * should leave other properties unchanged
+ */
+ public boolean removeOtherProperties() {
+ return removeOtherProperties;
+ }
+
+ /**
* Sets the actual and complete location of the node being updated. This method must be called when processing the request,
* and the actual location must have a {@link Location#getPath() path}.
*
@@ -346,7 +374,28 @@
*/
@Override
public String toString() {
+ if (removeOtherProperties) {
+ return "update (and remove other) properties on " + on() + " in the \"" + workspaceName + "\" workspace to "
+ + properties();
+ }
return "update properties on " + on() + " in the \"" + workspaceName + "\" workspace to " + properties();
}
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public UpdatePropertiesRequest clone() {
+ UpdatePropertiesRequest request = new UpdatePropertiesRequest(actualLocation != null ? actualLocation : on,
+ workspaceName, properties, removeOtherProperties);
+ request.setActualLocationOfNode(actualLocation);
+ request.setNewProperties(createdPropertyNames);
+ return request;
+ }
+
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UpdateValuesRequest.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UpdateValuesRequest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/request/UpdateValuesRequest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -29,7 +29,7 @@
public class UpdateValuesRequest extends ChangeRequest {
private static final long serialVersionUID = 1L;
-
+
private final String workspaceName;
private final Location on;
private final Name propertyName;
@@ -39,19 +39,18 @@
private Location actualLocation;
private List<Object> actualAddedValues;
private List<Object> actualRemovedValues;
-
-
+
public UpdateValuesRequest( String workspaceName,
Location on,
Name propertyName,
List<Object> addedValues,
List<Object> removedValues ) {
super();
-
+
assert workspaceName != null;
assert on != null;
assert propertyName != null;
-
+
this.workspaceName = workspaceName;
this.on = on;
this.propertyName = propertyName;
@@ -85,7 +84,7 @@
public String inWorkspace() {
return workspaceName;
}
-
+
/**
* Get the list of values to be added.
*
@@ -94,7 +93,7 @@
public List<Object> addedValues() {
return addedValues;
}
-
+
/**
* Get the list of values to be removed.
*
@@ -103,7 +102,7 @@
public List<Object> removedValues() {
return removedValues;
}
-
+
@Override
public Location changedLocation() {
return on;
@@ -125,7 +124,9 @@
return addedValues.isEmpty() && removedValues.isEmpty();
}
- public void setActualLocation(Location actual, List<Object> actualAddedValues, List<Object> actualRemovedValues) {
+ public void setActualLocation( Location actual,
+ List<Object> actualAddedValues,
+ List<Object> actualRemovedValues ) {
checkNotFrozen();
if (!on.equals(actual)) { // not same if actual is null
throw new IllegalArgumentException(GraphI18n.actualLocationNotEqualToInputLocation.text(actual, on));
@@ -141,11 +142,11 @@
assert actualAddedValues.size() <= addedValues.size();
assert actualRemovedValues != null;
assert actualRemovedValues.size() <= actualRemovedValues.size();
-
+
this.actualAddedValues = actualAddedValues;
this.actualRemovedValues = actualRemovedValues;
}
-
+
/**
* Get the actual location of the node that was updated.
*
@@ -172,5 +173,21 @@
*/
public List<Object> getActualRemovedValues() {
return actualRemovedValues;
- }
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method does not clone the results.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.ChangeRequest#clone()
+ */
+ @Override
+ public UpdateValuesRequest clone() {
+ UpdateValuesRequest request = new UpdateValuesRequest(workspaceName, actualLocation != null ? actualLocation : on,
+ propertyName, addedValues, removedValues);
+ request.setActualLocation(actualLocation, actualAddedValues, actualRemovedValues);
+ return request;
+ }
}
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngine.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngine.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngine.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -23,169 +23,165 @@
*/
package org.jboss.dna.graph.search;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import net.jcip.annotations.GuardedBy;
import net.jcip.annotations.ThreadSafe;
-import org.jboss.dna.common.i18n.I18n;
import org.jboss.dna.common.util.CheckArg;
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.Graph;
import org.jboss.dna.graph.GraphI18n;
import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Subgraph;
-import org.jboss.dna.graph.SubgraphNode;
import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
import org.jboss.dna.graph.connector.RepositorySource;
import org.jboss.dna.graph.connector.RepositorySourceException;
+import org.jboss.dna.graph.observe.Observer;
import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.validate.Schemata;
import org.jboss.dna.graph.request.ChangeRequest;
import org.jboss.dna.graph.request.InvalidWorkspaceException;
-import org.jboss.dna.graph.search.SearchProvider.Session;
/**
* A component that acts as a search engine for the content within a single {@link RepositorySource}. This engine manages a set of
* indexes and provides search functionality for each of the workspaces within the source, and provides various methods to
* (re)index the content contained with source's workspaces and keep the indexes up-to-date via changes.
+ *
+ * @param <WorkspaceType> the workspace type
+ * @param <ProcessorType> the processor type
*/
@ThreadSafe
-public class SearchEngine {
+public abstract class SearchEngine<WorkspaceType extends SearchEngineWorkspace, ProcessorType extends SearchEngineProcessor<WorkspaceType>> {
- /**
- * The default maximum number of changes that can be made to an index before the indexes are automatically optimized is * * *
- * * {@value}
- */
- public static final int DEFAULT_MAX_CHANGES_BEFORE_AUTOMATIC_OPTIMIZATION = 0;
+ public static final boolean DEFAULT_VERIFY_WORKSPACE_IN_SOURCE = true;
- protected final ExecutionContext context;
- private final String sourceName;
+ private final boolean verifyWorkspaceInSource;
private final RepositoryConnectionFactory connectionFactory;
- protected final SearchProvider indexLayout;
- private final int maxChangesBeforeAutomaticOptimization;
- @GuardedBy( "workspacesLock" )
- private final Map<String, Workspace> workspacesByName = new HashMap<String, Workspace>();
- private final ReadWriteLock workspacesLock = new ReentrantReadWriteLock();
+ private final String sourceName;
+ private volatile Workspaces<WorkspaceType> workspaces;
/**
- * Create a search engine instance given the supplied {@link ExecutionContext execution context}, name of the
- * {@link RepositorySource}, the {@link RepositoryConnectionFactory factory for RepositorySource connections}, and the
- * {@link SearchProvider search provider}.
+ * Create a new provider instance that can be used to manage the indexes for the workspaces in a single source.
*
- * @param context the execution context for indexing and optimization operations
- * @param sourceName the name of the {@link RepositorySource}
- * @param connectionFactory the connection factory
- * @param indexLayout the specification of the Lucene index layout
- * @param maxChangesBeforeAutomaticOptimization the number of changes that can be made to the index before the indexes are
- * automatically optimized; may be 0 or a negative number if no automatic optimization should be done
- * @throws IllegalArgumentException if any of the parameters (other than indexing strategy) are null
+ * @param sourceName the name of the source that can be searched; never null
+ * @param connectionFactory the connection factory; may be null if the engine can operate without connecting to the source
*/
- public SearchEngine( ExecutionContext context,
- String sourceName,
- RepositoryConnectionFactory connectionFactory,
- SearchProvider indexLayout,
- int maxChangesBeforeAutomaticOptimization ) {
- CheckArg.isNotNull(context, "context");
- CheckArg.isNotNull(sourceName, "sourceName");
- CheckArg.isNotNull(connectionFactory, "connectionFactory");
- this.sourceName = sourceName;
- this.connectionFactory = connectionFactory;
- this.indexLayout = indexLayout;
- this.context = context;
- this.maxChangesBeforeAutomaticOptimization = maxChangesBeforeAutomaticOptimization < 0 ? 0 : maxChangesBeforeAutomaticOptimization;
+ protected SearchEngine( String sourceName,
+ RepositoryConnectionFactory connectionFactory ) {
+ this(sourceName, connectionFactory, DEFAULT_VERIFY_WORKSPACE_IN_SOURCE);
}
/**
- * Create a search engine instance given the supplied {@link ExecutionContext execution context}, name of the
- * {@link RepositorySource}, the {@link RepositoryConnectionFactory factory for RepositorySource connections}, and the
- * {@link SearchProvider search provider} that defines where each workspace's indexes should be placed.
+ * Create a new provider instance that can be used to manage the indexes for the workspaces in a single source.
*
- * @param context the execution context for indexing and optimization operations
- * @param sourceName the name of the {@link RepositorySource}
- * @param connectionFactory the connection factory
- * @param indexLayout the specification of the Lucene index layout
- * @throws IllegalArgumentException if any of the parameters (other than indexing strategy) are null
+ * @param sourceName the name of the source that can be searched; never null
+ * @param connectionFactory the connection factory; may be null if the engine can operate without connecting to the source
+ * @param verifyWorkspaceInSource true if the workspaces are to be verified by checking the original source
+ * @throws IllegalArgumentException if any of the parameters are null
*/
- public SearchEngine( ExecutionContext context,
- String sourceName,
- RepositoryConnectionFactory connectionFactory,
- SearchProvider indexLayout ) {
- this(context, sourceName, connectionFactory, indexLayout, DEFAULT_MAX_CHANGES_BEFORE_AUTOMATIC_OPTIMIZATION);
+ protected SearchEngine( String sourceName,
+ RepositoryConnectionFactory connectionFactory,
+ boolean verifyWorkspaceInSource ) {
+ CheckArg.isNotNull(sourceName, "sourceName");
+ CheckArg.isNotNull(connectionFactory, "connectionFactory");
+ this.sourceName = sourceName;
+ this.connectionFactory = connectionFactory;
+ this.verifyWorkspaceInSource = verifyWorkspaceInSource;
+ this.workspaces = new SearchWorkspaces(connectionFactory);
}
/**
- * Get the name of the RepositorySource that this engine is to use.
+ * Get the name of the source that can be searched with an engine that uses this provider.
*
- * @return the source name; never null
+ * @return the name of the source that is to be searchable; never null
*/
public String getSourceName() {
return sourceName;
}
/**
- * Get the context in which all indexing operations execute.
+ * Determine whether the workspaces should be verified with the original source before creating indexes for them.
*
- * @return the execution context; never null
+ * @return true if verification should be performed, or false otherwise
*/
- public ExecutionContext getContext() {
- return context;
+ public boolean isVerifyWorkspaceInSource() {
+ return verifyWorkspaceInSource;
}
/**
- * @return maxChangesBeforeAutomaticOptimization
+ * Obtain a graph to the source for which this engine exists.
+ *
+ * @param context the context in which the graph operations should be performed; never null
+ * @return the graph; never null
+ * @throws RepositorySourceException if a connection to the source cannot be established
*/
- public int getMaxChangesBeforeAutomaticOptimization() {
- return maxChangesBeforeAutomaticOptimization;
+ protected Graph graph( ExecutionContext context ) {
+ assert context != null;
+ return Graph.create(sourceName, connectionFactory, context);
}
/**
- * Utility to create a Graph for the source.
+ * Create the index(es) required for the named workspace.
*
- * @return the graph instance; never null
+ * @param context the context in which the operation is to be performed; may not be null
+ * @param workspaceName the name of the workspace; may not be null
+ * @return the workspace; never null
+ * @throws SearchEngineException if there is a problem creating the workspace.
*/
- final Graph graph() {
- return Graph.create(sourceName, connectionFactory, context);
- }
+ protected abstract WorkspaceType createWorkspace( ExecutionContext context,
+ String workspaceName ) throws SearchEngineException;
/**
- * Utility to obtain the root path.
+ * Create the {@link SearchEngineProcessor} implementation that can be used to operate against the
+ * {@link SearchEngineWorkspace} instances.
+ * <p>
+ * Note that the resulting processor must be {@link SearchEngineProcessor#close() closed} by the caller when completed.
+ * </p>
*
- * @return the root path; never null
+ * @param context the context in which the processor is to be used; never null
+ * @param workspaces the set of existing search workspaces; never null
+ * @param observer the observer of any events created by the processor; may be null
+ * @param readOnly true if the processor will only be reading or searching, or false if the processor will be used to update
+ * the workspaces
+ * @return the processor; may not be null
*/
- final Path rootPath() {
- return context.getValueFactories().getPathFactory().createRootPath();
- }
+ protected abstract ProcessorType createProcessor( ExecutionContext context,
+ Workspaces<WorkspaceType> workspaces,
+ Observer observer,
+ boolean readOnly );
/**
- * Utility to obtain a readable string representation of the supplied path.
+ * Create the {@link SearchEngineProcessor} implementation that can be used to operate against the
+ * {@link SearchEngineWorkspace} instances.
+ * <p>
+ * Note that the resulting processor must be {@link SearchEngineProcessor#close() closed} by the caller when completed.
+ * </p>
*
- * @param path the path
- * @return the readable string representation; may be null if path is null
+ * @param context the context in which the processor is to be used; never null
+ * @param observer the observer of any events created by the processor; may be null
+ * @param readOnly true if the processor will only be reading or searching, or false if the processor will be used to update
+ * the workspaces
+ * @return the processor; may not be null
*/
- final String readable( Path path ) {
- return context.getValueFactories().getStringFactory().create(path);
+ public ProcessorType createProcessor( ExecutionContext context,
+ Observer observer,
+ boolean readOnly ) {
+ return createProcessor(context, workspaces, observer, readOnly);
}
/**
- * Index all of the content at or below the supplied path in the named workspace within the {@link #getSourceName() source}.
- * If the starting point is the root node, then this method will drop the existing index(es) and rebuild from the content in
- * the workspace and source.
+ * Utility method to index all of the content at or below the supplied path in the named workspace within the
+ * {@link #getSourceName() source}. If the starting point is the root node, then this method will drop the existing index(es)
+ * and rebuild from the content in the workspace and source.
* <p>
* This method operates synchronously and returns when the requested indexing is completed.
* </p>
*
+ * @param context the context in which the operation is to be performed; may not be null
* @param workspaceName the name of the workspace
* @param startingPoint the location that represents the content to be indexed; must have a path
* @param depthPerRead the depth of each subgraph read operation
@@ -194,20 +190,19 @@
* @throws SearchEngineException if there is a problem updating the indexes
* @throws InvalidWorkspaceException if the workspace does not exist
*/
- public void index( String workspaceName,
+ public void index( ExecutionContext context,
+ String workspaceName,
Location startingPoint,
int depthPerRead ) throws RepositorySourceException, SearchEngineException {
CheckArg.isNotNull(workspaceName, "workspaceName");
CheckArg.isNotNull(startingPoint, "startingPoint");
assert startingPoint.hasPath();
-
- Workspace workspace = getWorkspace(workspaceName);
- if (startingPoint.getPath().isRoot()) {
- // More efficient to just start over with a new index ...
- workspace.execute(true, addContent(startingPoint, depthPerRead));
- } else {
- // Have to first remove the content below the starting point, then add it again ...
- workspace.execute(false, removeContent(startingPoint), addContent(startingPoint, depthPerRead));
+ workspaces.getWorkspace(context, workspaceName, true);
+ ProcessorType processor = createProcessor(context, workspaces, null, false);
+ try {
+ processor.crawl(workspaceName, startingPoint, depthPerRead);
+ } finally {
+ processor.close();
}
}
@@ -219,6 +214,7 @@
* This method operates synchronously and returns when the requested indexing is completed.
* </p>
*
+ * @param context the context in which the operation is to be performed; may not be null
* @param workspaceName the name of the workspace
* @param startingPoint the path that represents the content to be indexed
* @param depthPerRead the depth of each subgraph read operation
@@ -227,18 +223,20 @@
* @throws SearchEngineException if there is a problem updating the indexes
* @throws InvalidWorkspaceException if the workspace does not exist
*/
- public void index( String workspaceName,
+ public void index( ExecutionContext context,
+ String workspaceName,
Path startingPoint,
int depthPerRead ) throws RepositorySourceException, SearchEngineException {
CheckArg.isNotNull(workspaceName, "workspaceName");
CheckArg.isNotNull(startingPoint, "startingPoint");
- index(workspaceName, Location.create(startingPoint), depthPerRead);
+ index(context, workspaceName, Location.create(startingPoint), depthPerRead);
}
/**
* Index all of the content in the named workspace within the {@link #getSourceName() source}. This method operates
* synchronously and returns when the requested indexing is completed.
*
+ * @param context the context in which the operation is to be performed; may not be null
* @param workspaceName the name of the workspace
* @param depthPerRead the depth of each subgraph read operation
* @throws IllegalArgumentException if the workspace name is null
@@ -246,65 +244,76 @@
* @throws SearchEngineException if there is a problem updating the indexes
* @throws InvalidWorkspaceException if the workspace does not exist
*/
- public void index( String workspaceName,
+ public void index( ExecutionContext context,
+ String workspaceName,
int depthPerRead ) throws RepositorySourceException, SearchEngineException {
CheckArg.isNotNull(workspaceName, "workspaceName");
- index(workspaceName, rootPath(), depthPerRead);
+ Path rootPath = context.getValueFactories().getPathFactory().createRootPath();
+ index(context, workspaceName, Location.create(rootPath), depthPerRead);
}
/**
* Index (or re-index) all of the content in all of the workspaces within the source. This method operates synchronously and
* returns when the requested indexing is completed.
*
+ * @param context the context in which the operation is to be performed; may not be null
* @param depthPerRead the depth of each subgraph read operation
* @throws RepositorySourceException if there is a problem accessing the content
* @throws SearchEngineException if there is a problem updating the indexes
*/
- public void index( int depthPerRead ) throws RepositorySourceException, SearchEngineException {
- Path rootPath = rootPath();
- for (String workspaceName : graph().getWorkspaces()) {
- index(workspaceName, rootPath, depthPerRead);
+ public void index( ExecutionContext context,
+ int depthPerRead ) throws RepositorySourceException, SearchEngineException {
+ Path rootPath = context.getValueFactories().getPathFactory().createRootPath();
+ Location rootLocation = Location.create(rootPath);
+ for (String workspaceName : graph(context).getWorkspaces()) {
+ index(context, workspaceName, rootLocation, depthPerRead);
}
}
/**
* Update the indexes with the supplied set of changes to the content.
*
+ * @param context the execution context for which this session is to be established; may not be null
* @param changes the set of changes to the content
+ * @return the actual changes that were made and which record any problems or errors; never null
* @throws IllegalArgumentException if the path is null
* @throws RepositorySourceException if there is a problem accessing the content
* @throws SearchEngineException if there is a problem updating the indexes
*/
- public void index( final Iterable<ChangeRequest> changes ) throws SearchEngineException {
- // First break up all the changes into different collections, one collection per workspace ...
- Map<String, Collection<ChangeRequest>> changesByWorkspace = new HashMap<String, Collection<ChangeRequest>>();
- for (ChangeRequest request : changes) {
- String workspaceName = request.changedWorkspace();
- Collection<ChangeRequest> changesForWorkspace = changesByWorkspace.get(workspaceName);
- if (changesForWorkspace == null) {
- changesForWorkspace = new LinkedList<ChangeRequest>();
- changesByWorkspace.put(workspaceName, changesForWorkspace);
+ public List<ChangeRequest> index( ExecutionContext context,
+ final Iterable<ChangeRequest> changes ) throws SearchEngineException {
+ List<ChangeRequest> requests = new LinkedList<ChangeRequest>();
+ ProcessorType processor = createProcessor(context, workspaces, null, false);
+ try {
+ boolean submit = true;
+ for (ChangeRequest request : changes) {
+ ChangeRequest clone = request.clone();
+ if (submit) {
+ processor.process(clone);
+ if (clone.hasError()) submit = false;
+ }
+ requests.add(clone);
}
- changesForWorkspace.add(request);
+ } finally {
+ processor.close();
}
- // Now update the indexes for each workspace (serially). This minimizes the time that each workspace
- // locks its indexes for writing.
- for (Map.Entry<String, Collection<ChangeRequest>> entry : changesByWorkspace.entrySet()) {
- String workspaceName = entry.getKey();
- Collection<ChangeRequest> changesForWorkspace = entry.getValue();
- getWorkspace(workspaceName).execute(false, updateContent(changesForWorkspace));
- }
+ return requests;
}
/**
* Invoke the engine's garbage collection on all indexes used by all workspaces in the source. This method reclaims space and
* optimizes the index. This should be done on a periodic basis after changes are made to the engine's indexes.
*
+ * @param context the context in which the operation is to be performed; may not be null
+ * @return true if an optimization was performed, or false if there was no need
* @throws SearchEngineException if there is a problem during optimization
*/
- public void optimize() throws SearchEngineException {
- for (String workspaceName : graph().getWorkspaces()) {
- getWorkspace(workspaceName).execute(false, optimizeContent());
+ public boolean optimize( ExecutionContext context ) throws SearchEngineException {
+ ProcessorType processor = createProcessor(context, workspaces, null, true);
+ try {
+ return processor.optimize();
+ } finally {
+ processor.close();
}
}
@@ -313,495 +322,175 @@
* and optimizes the index. This should be done on a periodic basis after changes are made to the engine's indexes.
*
* @param workspaceName the name of the workspace
+ * @param context the context in which the operation is to be performed; may not be null
+ * @return true if an optimization was performed, or false if there was no need
* @throws IllegalArgumentException if the workspace name is null
* @throws SearchEngineException if there is a problem during optimization
* @throws InvalidWorkspaceException if the workspace does not exist
*/
- public void optimize( String workspaceName ) throws SearchEngineException {
+ public boolean optimize( ExecutionContext context,
+ String workspaceName ) throws SearchEngineException {
CheckArg.isNotNull(workspaceName, "workspaceName");
- getWorkspace(workspaceName).execute(false, optimizeContent());
- }
-
- /**
- * Perform a full-text search of the content in the named workspace, given the maximum number of results and the offset
- * defining the first result the caller is interested in.
- *
- * @param context the execution context in which the search is to take place; may not be null
- * @param workspaceName the name of the workspace
- * @param fullTextSearch the full-text search to be performed; may not be null
- * @param maxResults the maximum number of results that are to be returned; always positive
- * @param offset the number of initial results to skip, or 0 if the first results are to be returned
- * @return the activity that will perform the work
- * @throws IllegalArgumentException if the execution context or workspace name are null
- * @throws SearchEngineException if there is a problem during optimization
- * @throws InvalidWorkspaceException if the workspace does not exist
- */
- public List<Location> fullTextSearch( ExecutionContext context,
- String workspaceName,
- String fullTextSearch,
- int maxResults,
- int offset ) {
- CheckArg.isNotNull(context, "context");
- CheckArg.isNotNull(workspaceName, "workspaceName");
- Search searchActivity = searchContent(context, fullTextSearch, maxResults, offset);
- getWorkspace(workspaceName).execute(false, searchActivity);
- return searchActivity.getResults();
- }
-
- /**
- * Perform a query of the content in the named workspace, given the Abstract Query Model representation of the query.
- *
- * @param context the execution context in which the search is to take place; may not be null
- * @param workspaceName the name of the workspace
- * @param query the query that is to be executed, in the form of the Abstract Query Model
- * @param schemata the definition of the tables and views that can be used in the query; may not be null
- * @return the query results; never null
- * @throws IllegalArgumentException if the context, query, or schemata references are null
- */
- public QueryResults query( ExecutionContext context,
- String workspaceName,
- QueryCommand query,
- Schemata schemata ) {
- CheckArg.isNotNull(context, "context");
- CheckArg.isNotNull(workspaceName, "workspaceName");
- CheckArg.isNotNull(query, "query");
- CheckArg.isNotNull(schemata, "schemata");
- QueryContext queryContext = new QueryContext(schemata, context.getValueFactories().getTypeSystem());
- Query queryActivity = queryContent(queryContext, query);
- getWorkspace(workspaceName).execute(false, queryActivity);
- return queryActivity.getResults();
- }
-
- /**
- * Remove the supplied index from the search engine. This is typically done when the workspace has been deleted from the
- * source, or when
- *
- * @param workspaceName the name of the workspace
- * @throws IllegalArgumentException if the workspace name is null
- * @throws SearchEngineException if there is a problem removing the workspace
- */
- public void removeWorkspace( String workspaceName ) throws SearchEngineException {
- CheckArg.isNotNull(workspaceName, "workspaceName");
+ ProcessorType processor = createProcessor(context, workspaces, null, true);
try {
- workspacesLock.writeLock().lock();
- // Check whether another thread got in and created the engine while we waited ...
- Workspace workspace = workspacesByName.remove(workspaceName);
- if (workspace != null) {
- indexLayout.destroyIndexes(context, getSourceName(), workspaceName);
- }
- } catch (IOException e) {
- String message = GraphI18n.errorWhileRemovingIndexesForWorkspace.text(sourceName, workspaceName, e.getMessage());
- throw new SearchEngineException(message, e);
+ return processor.optimize(workspaceName);
} finally {
- workspacesLock.writeLock().unlock();
+ processor.close();
}
}
- /**
- * Remove from the search engine all workspace-related indexes, thereby cleaning up any resources used by this search engine.
- *
- * @throws SearchEngineException if there is a problem removing any of the workspace
- */
- public void removeWorkspaces() throws SearchEngineException {
- try {
- workspacesLock.writeLock().lock();
- for (String workspaceName : new HashSet<String>(workspacesByName.keySet())) {
- removeWorkspace(workspaceName);
- }
- } finally {
- workspacesLock.writeLock().unlock();
- }
- }
+ public interface Workspaces<WorkspaceType extends SearchEngineWorkspace> {
+ /**
+ * Get the connection factory for repository sources.
+ *
+ * @return the connection factory; never null
+ */
+ RepositoryConnectionFactory getRepositoryConnectionFactory();
- /**
- * Get the search engine for the workspace with the supplied name.
- *
- * @param workspaceName the name of the workspace
- * @return the workspace's search engine
- * @throws InvalidWorkspaceException if the workspace does not exist
- */
- protected Workspace getWorkspace( String workspaceName ) {
- Workspace workspace = null;
- try {
- workspacesLock.readLock().lock();
- workspace = workspacesByName.get(workspaceName);
- } finally {
- workspacesLock.readLock().unlock();
- }
+ /**
+ * Get the search engine for the workspace with the supplied name.
+ *
+ * @param context the execution context; never null
+ * @param workspaceName the name of the workspace; never null
+ * @param createIfMissing true if the workspace should be created if missing, or false otherwise
+ * @return the workspace's search engine
+ * @throws InvalidWorkspaceException if the workspace does not exist
+ */
+ WorkspaceType getWorkspace( ExecutionContext context,
+ String workspaceName,
+ boolean createIfMissing );
- if (workspace == null) {
- // Verify the workspace does exist ...
- if (!graph().getWorkspaces().contains(workspaceName)) {
- String msg = GraphI18n.workspaceDoesNotExistInRepository.text(workspaceName, getSourceName());
- throw new InvalidWorkspaceException(msg);
- }
- try {
- workspacesLock.writeLock().lock();
- // Check whether another thread got in and created the engine while we waited ...
- workspace = workspacesByName.get(workspaceName);
- if (workspace == null) {
- // Create the engine and register it ...
- workspace = new Workspace(workspaceName);
- workspacesByName.put(workspaceName, workspace);
- }
- } finally {
- workspacesLock.writeLock().unlock();
- }
- }
- return workspace;
+ /**
+ * Get the existing workspaces.
+ *
+ * @return the workspaces
+ */
+ Collection<WorkspaceType> getWorkspaces();
+
+ /**
+ * Remove the supplied workspace from the search engine. This is typically done when the workspace is being deleted. Note
+ * that the resulting Workspace needs to then be cleaned up by the caller.
+ *
+ * @param workspaceName the name of the workspace
+ * @return the workspace that was removed, or null if there was workspace with the supplied name
+ */
+ WorkspaceType removeWorkspace( String workspaceName );
+
+ /**
+ * Remove from the search engine all workspace-related indexes, thereby cleaning up any resources used by this search
+ * engine.
+ *
+ * @return the mutable map containing the {@link SearchEngineWorkspace} objects keyed by their name; never null but
+ * possibly empty
+ */
+ Map<String, WorkspaceType> removeAllWorkspaces();
}
- protected class Workspace {
- private final String sourceName;
- private final String workspaceName;
- protected final AtomicInteger modifiedNodesSinceLastOptimize = new AtomicInteger(0);
+ protected class SearchWorkspaces implements Workspaces<WorkspaceType> {
+ private final ReadWriteLock workspacesLock = new ReentrantReadWriteLock();
+ @GuardedBy( "workspacesLock" )
+ private final Map<String, WorkspaceType> workspacesByName = new HashMap<String, WorkspaceType>();
+ private final RepositoryConnectionFactory connectionFactory;
- protected Workspace( String workspaceName ) {
- this.workspaceName = workspaceName;
- this.sourceName = getSourceName();
+ protected SearchWorkspaces( RepositoryConnectionFactory connectionFactory ) {
+ this.connectionFactory = connectionFactory;
}
/**
- * Get the workspace name.
+ * {@inheritDoc}
*
- * @return the workspace name; never null
+ * @see org.jboss.dna.graph.search.SearchEngine.Workspaces#getRepositoryConnectionFactory()
*/
- public String getWorkspaceName() {
- return workspaceName;
+ public RepositoryConnectionFactory getRepositoryConnectionFactory() {
+ return connectionFactory;
}
/**
- * Execute the supplied activities against the indexes.
+ * {@inheritDoc}
*
- * @param overwrite true if the existing indexes should be overwritten, or false if they should be used
- * @param activities the activities to execute
- * @throws SearchEngineException if there is a problem performing the activities
+ * @see org.jboss.dna.graph.search.SearchEngine.Workspaces#getWorkspace(org.jboss.dna.graph.ExecutionContext,
+ * java.lang.String, boolean)
*/
- protected final void execute( boolean overwrite,
- Activity... activities ) throws SearchEngineException {
- // Determine if the activities are readonly ...
- boolean readOnly = true;
- for (Activity activity : activities) {
- if (!(activity instanceof ReadOnlyActivity)) {
- readOnly = false;
- break;
- }
+ public WorkspaceType getWorkspace( ExecutionContext context,
+ String workspaceName,
+ boolean createIfMissing ) {
+ assert context != null;
+ assert workspaceName != null;
+ WorkspaceType workspace = null;
+ try {
+ workspacesLock.readLock().lock();
+ workspace = workspacesByName.get(workspaceName);
+ } finally {
+ workspacesLock.readLock().unlock();
}
- // Create a session ...
- Session session = indexLayout.createSession(context, sourceName, workspaceName, overwrite, readOnly);
- assert session != null;
-
- // Execute the various activities ...
- Throwable error = null;
- try {
- int numChanges = 0;
- for (Activity activity : activities) {
- try {
- numChanges += activity.execute(session);
- } catch (RuntimeException e) {
- error = e;
- throw e;
- }
+ if (workspace == null) {
+ // Verify the workspace does exist ...
+ if (isVerifyWorkspaceInSource() && connectionFactory != null
+ && !graph(context).getWorkspaces().contains(workspaceName)) {
+ String msg = GraphI18n.workspaceDoesNotExistInRepository.text(workspaceName, getSourceName());
+ throw new InvalidWorkspaceException(msg);
}
- if (numChanges > 0) {
- numChanges = this.modifiedNodesSinceLastOptimize.addAndGet(numChanges);
- // Determine if there have been enough changes made to run the optimizer ...
- int maxChanges = getMaxChangesBeforeAutomaticOptimization();
- if (maxChanges > 0 && numChanges >= maxChanges) {
- Activity optimizer = optimizeContent();
- try {
- optimizer.execute(session);
- } catch (RuntimeException e) {
- error = e;
- throw e;
- }
- }
- }
- } finally {
try {
- if (error == null) {
- session.commit();
- } else {
- session.rollback();
+ workspacesLock.writeLock().lock();
+ // Check whether another thread got in and created the engine while we waited ...
+ workspace = workspacesByName.get(workspaceName);
+ if (workspace == null) {
+ // Create the engine and register it ...
+ workspace = createWorkspace(context, workspaceName);
+ workspacesByName.put(workspaceName, workspace);
}
- } catch (RuntimeException e2) {
- // We don't want to lose the existing error, if there is one ...
- if (error == null) {
- I18n msg = GraphI18n.errorWhileCommittingIndexChanges;
- throw new SearchEngineException(msg.text(workspaceName, sourceName, e2.getMessage()), e2);
- }
+ } finally {
+ workspacesLock.writeLock().unlock();
}
}
+ return workspace;
}
- }
- /**
- * Create an activity that will optimize the indexes.
- *
- * @return the activity that will perform the work
- */
- protected Activity optimizeContent() {
- return new Activity() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchEngine.Activity#execute(org.jboss.dna.graph.search.SearchProvider.Session)
- */
- public int execute( Session session ) {
- session.optimize();
- return 0; // no lines changed
- }
-
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- return GraphI18n.errorWhileOptimizingIndexes.text(sourceName, workspaceName, error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will read from the source the content at the supplied location and add the content to the search
- * index.
- *
- * @param location the location of the content to read; may not be null
- * @param depthPerRead the depth of each read operation; always positive
- * @return the activity that will perform the work
- */
- protected Activity addContent( final Location location,
- final int depthPerRead ) {
- return new Activity() {
- public int execute( Session session ) {
-
- // Create a queue that we'll use to walk the content ...
- LinkedList<Location> locationsToRead = new LinkedList<Location>();
- locationsToRead.add(location);
- int count = 0;
-
- // Now read and index the content ...
- Graph graph = graph();
- graph.useWorkspace(session.getWorkspaceName());
- while (!locationsToRead.isEmpty()) {
- Location location = locationsToRead.poll();
- if (location == null) continue;
- Subgraph subgraph = graph.getSubgraphOfDepth(depthPerRead).at(location);
- // Index all of the nodes within this subgraph ...
- for (SubgraphNode node : subgraph) {
- // Index the node ...
- session.index(node);
- ++count;
-
- // Process the children ...
- for (Location child : node.getChildren()) {
- if (!subgraph.includes(child)) {
- // Record this location as needing to be read ...
- locationsToRead.add(child);
- }
- }
- }
- }
- return count;
- }
-
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- String path = readable(location.getPath());
- return GraphI18n.errorWhileIndexingContentAtPath.text(path, workspaceName, sourceName, error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will remove from the indexes all documents that represent content at or below the specified
- * location.
- *
- * @param location the location of the content to removed; may not be null
- * @return the activity that will perform the work
- */
- protected Activity removeContent( final Location location ) {
- return new Activity() {
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchEngine.Activity#execute(org.jboss.dna.graph.search.SearchProvider.Session)
- */
- public int execute( Session session ) {
- // Delete the content at/below the path ...
- return session.deleteBelow(location.getPath());
- }
-
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- String path = readable(location.getPath());
- return GraphI18n.errorWhileRemovingContentAtPath.text(path, workspaceName, sourceName, error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will update the indexes with changes that were already made to the content.
- *
- * @param changes the changes that have been made to the content; may not be null
- * @return the activity that will perform the work
- */
- protected Activity updateContent( final Iterable<ChangeRequest> changes ) {
- return new Activity() {
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchEngine.Activity#execute(org.jboss.dna.graph.search.SearchProvider.Session)
- */
- public int execute( Session session ) {
- return session.apply(changes);
- }
-
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- return GraphI18n.errorWhileUpdatingContent.text(workspaceName, sourceName, error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will perform a full-text search given the supplied query.
- *
- * @param context the context in which the search is to be performed; may not be null
- * @param fullTextSearch the full-text search to be performed; may not be null
- * @param maxResults the maximum number of results that are to be returned; always positive
- * @param offset the number of initial results to skip, or 0 if the first results are to be returned
- * @return the activity that will perform the work; never null
- */
- protected Search searchContent( final ExecutionContext context,
- final String fullTextSearch,
- final int maxResults,
- final int offset ) {
- final List<Location> results = new ArrayList<Location>(maxResults);
- return new Search() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchEngine.Activity#execute(org.jboss.dna.graph.search.SearchProvider.Session)
- */
- public int execute( Session session ) {
- session.search(context, fullTextSearch, maxResults, offset, results);
- return 0;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchEngine.Activity#messageFor(java.lang.Throwable, java.lang.String,
- * java.lang.String)
- */
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- return GraphI18n.errorWhilePerformingSearch.text(fullTextSearch, workspaceName, sourceName, error.getMessage());
- }
-
- public List<Location> getResults() {
- return results;
- }
- };
- }
-
- /**
- * Create an activity that will perform a query against the index.
- *
- * @param context the context in which the search is to be performed; may not be null
- * @param query the query to be performed; may not be null
- * @return the activity that will perform the query; never null
- */
- protected Query queryContent( final QueryContext context,
- final QueryCommand query ) {
- return new Query() {
- private QueryResults results = null;
-
- public int execute( Session session ) throws SearchException {
- results = session.query(context, query);
- return 0;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchEngine.Activity#messageFor(java.lang.Throwable, java.lang.String,
- * java.lang.String)
- */
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- return GraphI18n.errorWhilePerformingQuery.text(query, workspaceName, sourceName, error.getMessage());
- }
-
- public QueryResults getResults() {
- return results;
- }
- };
- }
-
- /**
- * Interface for activities that will be executed against a workspace. These activities don't have to commit or roll back the
- * writer, nor do they have to translate the exceptions, since this is done by the
- * {@link Workspace#execute(boolean, Activity...)} method.
- */
- protected interface Activity {
-
/**
- * Perform the activity by using the index writer.
+ * {@inheritDoc}
*
- * @param indexSession the index session that should be used by the activity; never null
- * @return the number of changes that were made by this activity
+ * @see org.jboss.dna.graph.search.SearchEngine.Workspaces#getWorkspaces()
*/
- int execute( Session indexSession );
+ public Collection<WorkspaceType> getWorkspaces() {
+ try {
+ workspacesLock.writeLock().lock();
+ return new ArrayList<WorkspaceType>(workspacesByName.values());
+ } finally {
+ workspacesByName.clear();
+ workspacesLock.writeLock().unlock();
+ }
+ }
/**
- * Translate an exception obtained during {@link #execute(Session) execution} into a single message.
+ * {@inheritDoc}
*
- * @param t the exception
- * @param sourceName the name of the source
- * @param workspaceName the name of the workspace
- * @return the error message
+ * @see org.jboss.dna.graph.search.SearchEngine.Workspaces#removeWorkspace(java.lang.String)
*/
- String messageFor( Throwable t,
- String sourceName,
- String workspaceName );
- }
+ public WorkspaceType removeWorkspace( String workspaceName ) {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ try {
+ workspacesLock.writeLock().lock();
+ // Check whether another thread got in and created the engine while we waited ...
+ return workspacesByName.remove(workspaceName);
+ } finally {
+ workspacesLock.writeLock().unlock();
+ }
+ }
- /**
- * A read-only activity.
- */
- protected interface ReadOnlyActivity extends Activity {
- }
-
- /**
- * A search activity.
- */
- protected interface Search extends ReadOnlyActivity {
/**
- * Get the results of the search.
+ * {@inheritDoc}
*
- * @return the list of {@link Location} objects for each node satisfying the results; never null
+ * @see org.jboss.dna.graph.search.SearchEngine.Workspaces#removeAllWorkspaces()
*/
- List<Location> getResults();
+ public Map<String, WorkspaceType> removeAllWorkspaces() {
+ try {
+ workspacesLock.writeLock().lock();
+ return new HashMap<String, WorkspaceType>(workspacesByName);
+ } finally {
+ workspacesByName.clear();
+ workspacesLock.writeLock().unlock();
+ }
+ }
}
-
- /**
- * A query activity.
- */
- protected interface Query extends ReadOnlyActivity {
- /**
- * Get the results of the query.
- *
- * @return the results of a query; never null
- */
- QueryResults getResults();
- }
-
}
Added: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineProcessor.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineProcessor.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineProcessor.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,288 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.search;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.connector.RepositorySourceException;
+import org.jboss.dna.graph.observe.Observer;
+import org.jboss.dna.graph.property.DateTime;
+import org.jboss.dna.graph.property.InvalidPathException;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.Property;
+import org.jboss.dna.graph.request.CompositeRequestChannel;
+import org.jboss.dna.graph.request.CreateNodeRequest;
+import org.jboss.dna.graph.request.DeleteBranchRequest;
+import org.jboss.dna.graph.request.DeleteChildrenRequest;
+import org.jboss.dna.graph.request.ReadBranchRequest;
+import org.jboss.dna.graph.request.Request;
+import org.jboss.dna.graph.request.UpdatePropertiesRequest;
+import org.jboss.dna.graph.request.processor.RequestProcessor;
+import org.jboss.dna.graph.search.SearchEngine.Workspaces;
+
+/**
+ * The processor that is created by the provider whenever a logical set of activities needs to be performed.
+ *
+ * @param <WorkspaceType> the type of workspace
+ */
+public abstract class SearchEngineProcessor<WorkspaceType extends SearchEngineWorkspace> extends RequestProcessor {
+
+ protected boolean rollback = false;
+ protected final Workspaces<WorkspaceType> workspaces;
+
+ /**
+ * @param sourceName
+ * @param context
+ * @param workspaces
+ * @param observer
+ * @param now
+ */
+ protected SearchEngineProcessor( String sourceName,
+ ExecutionContext context,
+ Workspaces<WorkspaceType> workspaces,
+ Observer observer,
+ DateTime now ) {
+ super(sourceName, context, observer, now);
+ this.workspaces = workspaces;
+ assert this.workspaces != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#close()
+ */
+ @Override
+ public void close() {
+ try {
+ if (rollback) rollback();
+ else commit();
+ } finally {
+ // publish any changes to the observer ...
+ super.close();
+ }
+ }
+
+ /**
+ * Subclasses should implement this method to throw away any work that has been done with this processor.
+ */
+ protected abstract void rollback();
+
+ /**
+ * Subclasses should implement this method to commit and save any work that has been done with this processor.
+ */
+ protected abstract void commit();
+
+ /**
+ * Optimize the indexes for all workspaces, if required.
+ *
+ * @return true if an optimization was performed, or false if there was no need
+ */
+ public boolean optimize() {
+ // do nothing by default
+ return false;
+ }
+
+ /**
+ * Optimize the indexes for the named workspace, if required.
+ *
+ * @param workspaceName the name of the workspace to be optimized; never null
+ * @return true if an optimization was performed, or false if there was no need
+ */
+ public boolean optimize( String workspaceName ) {
+ // do nothing by default
+ return false;
+ }
+
+ /**
+ * Utility method to index all of the content at or below the supplied path in the named workspace within the
+ * {@link #getSourceName() source}. If the starting point is the root node, then this method will drop the existing index(es)
+ * and rebuild from the content in the workspace of the source.
+ * <p>
+ * This method works by reading the graph and constructing and {@link #process(Request) processing} the corresponding
+ * {@link CreateNodeRequest}s ( and possibly a single {@link UpdatePropertiesRequest} for the top-level node) that result in
+ * the same subgraph being 'created' in the index.
+ * </p>
+ *
+ * @param workspaceName the name of the workspace to be crawled; may not be null
+ * @param startingPoint the location that represents the content to be indexed; must have a path
+ * @param depthPerRead the depth of each subgraph read operation
+ * @return the number of nodes that were indexed
+ * @throws RepositorySourceException if there is a problem accessing the content
+ */
+ protected int crawl( String workspaceName,
+ Location startingPoint,
+ int depthPerRead ) {
+ CompositeRequestChannel channel = new CompositeRequestChannel(getSourceName());
+ ExecutorService service = Executors.newSingleThreadExecutor();
+ channel.start(service, getExecutionContext(), workspaces.getRepositoryConnectionFactory());
+ try {
+ return crawl(workspaceName, startingPoint, depthPerRead, channel);
+ } catch (InterruptedException err) {
+ // Clear the interrupted status of the thread ...
+ Thread.interrupted();
+ } finally {
+ // Close the channel ...
+ try {
+ channel.close();
+ } finally {
+ // And shut down the service ...
+ service.shutdown();
+ try {
+ service.awaitTermination(5, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ // Clear the interrupted status of the thread ...
+ Thread.interrupted();
+ }
+ }
+ }
+ return 0;
+ }
+
+ /**
+ * Utility method to index all of the content at or below the supplied path in the named workspace within the
+ * {@link #getSourceName() source}. If the starting point is the root node, then this method will drop the existing index(es)
+ * and rebuild from the content in the workspace of the source.
+ * <p>
+ * This method works by reading the graph and constructing and {@link #process(Request) processing} the corresponding
+ * {@link CreateNodeRequest}s ( and possibly a single {@link UpdatePropertiesRequest} for the top-level node) that result in
+ * the same subgraph being 'created' in the index.
+ * </p>
+ *
+ * @param workspaceName the name of the workspace to be crawled; may not be null
+ * @param startingPoint the location that represents the content to be indexed; must have a path
+ * @param depthPerRead the depth of each subgraph read operation
+ * @param channel the channel that has been openned (and started) and that should be used to add requests to the underlying
+ * source; may not be null
+ * @return the number of nodes that were indexed
+ * @throws RepositorySourceException if there is a problem accessing the content
+ * @throws InterruptedException if the channel thread was interrupted
+ */
+ protected int crawl( String workspaceName,
+ Location startingPoint,
+ int depthPerRead,
+ CompositeRequestChannel channel ) throws InterruptedException {
+ List<Request> requests = new LinkedList<Request>();
+
+ // Read the first subgraph ...
+ try {
+ ReadBranchRequest readSubgraph = new ReadBranchRequest(startingPoint, workspaceName, depthPerRead);
+ channel.addAndAwait(readSubgraph);
+ if (readSubgraph.hasError()) {
+ channel.cancel(false);
+ Throwable t = readSubgraph.getError();
+ if (t instanceof RuntimeException) throw (RuntimeException)t;
+ throw new RepositorySourceException(getSourceName(), t);
+ }
+ Iterator<Location> locationIter = readSubgraph.iterator();
+ assert locationIter.hasNext();
+ int count = 0;
+
+ // Destroy the nodes at the supplied location ...
+ if (startingPoint.getPath().isRoot()) {
+ // Just delete the whole content ...
+ process(new DeleteBranchRequest(startingPoint, workspaceName));
+ } else {
+ // We can't delete the node, since later same-name-siblings might be changed. So delete the children ...
+ process(new DeleteChildrenRequest(startingPoint, workspaceName));
+ }
+
+ // Now update all of the properties, removing any that are no longer needed ...
+ Location topNode = locationIter.next();
+ Map<Name, Property> properties = readSubgraph.getPropertiesFor(topNode);
+ boolean removeOtherProperties = true;
+ UpdatePropertiesRequest request = new UpdatePropertiesRequest(startingPoint, workspaceName, properties,
+ removeOtherProperties);
+ request.setActualLocationOfNode(topNode);
+ process(request);
+ if (request.isCancelled() || request.hasError()) {
+ rollback = true;
+ return count;
+ }
+ ++count;
+
+ // Create a queue that we'll use to walk the content ...
+ LinkedList<Location> locationsToRead = new LinkedList<Location>();
+
+ // Now walk the remaining nodes in the subgraph ...
+ while (true) {
+ while (locationIter.hasNext()) {
+
+ // Index the node ...
+ Location location = locationIter.next();
+ Path path = location.getPath();
+ Location parent = readSubgraph.getLocationFor(path.getParent());
+ Name childName = path.getLastSegment().getName();
+ Collection<Property> nodePoperties = readSubgraph.getPropertiesFor(location).values();
+ CreateNodeRequest create = new CreateNodeRequest(parent, workspaceName, childName, nodePoperties);
+ create.setActualLocationOfNode(location); // set this so we don't have to figure it out
+ process(create);
+ if (create.isCancelled() || create.hasError()) {
+ rollback = true;
+ return count;
+ }
+ ++count;
+
+ // Process the children ...
+ for (Location child : readSubgraph.getChildren(location)) {
+ if (!readSubgraph.includes(child)) {
+ // Record this location as needing to be read ...
+ locationsToRead.add(child);
+ }
+ }
+ }
+
+ if (locationsToRead.isEmpty()) break;
+ Location location = locationsToRead.poll();
+ assert location != null;
+
+ readSubgraph = new ReadBranchRequest(location, workspaceName, depthPerRead);
+ channel.addAndAwait(readSubgraph);
+ if (readSubgraph.hasError()) {
+ if (readSubgraph.hasError()) {
+ channel.cancel(false);
+ Throwable t = readSubgraph.getError();
+ if (t instanceof RuntimeException) throw (RuntimeException)t;
+ throw new RepositorySourceException(getSourceName(), t);
+ }
+ }
+ }
+ return count;
+
+ } catch (InvalidPathException e) {
+ // The node must no longer exist, so delete it from the indexes ...
+ requests.add(new DeleteBranchRequest(startingPoint, workspaceName));
+ }
+ return 0;
+ }
+}
Property changes on: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineProcessor.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineWorkspace.java (from rev 1417, trunk/dna-search/src/test/java/org/jboss/dna/search/SearchI18nTest.java)
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineWorkspace.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineWorkspace.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,48 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.search;
+
+import java.io.IOException;
+import org.jboss.dna.graph.ExecutionContext;
+
+/**
+ * The representation of the persisted state of the indexes needed for searching.
+ */
+public interface SearchEngineWorkspace {
+
+ /**
+ * Get the name of the workspace.
+ *
+ * @return the workspace name; never null
+ */
+ String getWorkspaceName();
+
+ /**
+ * Destroy the indexes for the workspace with the supplied name.
+ *
+ * @param context the execution context in which the destruction should be performed; may not be null
+ * @throws IOException if there is a problem destroying the indexes
+ */
+ void destroy( ExecutionContext context ) throws IOException;
+}
Deleted: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchException.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchException.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchException.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,97 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.graph.search;
-
-/**
- * An exception signalling an error during a search.
- */
-public class SearchException extends RuntimeException {
-
- /**
- */
- private static final long serialVersionUID = 1L;
-
- private final String expression;
-
- /**
- * Create an exception with the search expression.
- *
- * @param expression the search expression
- */
- public SearchException( String expression ) {
- super();
- this.expression = expression;
- }
-
- /**
- * Create an exception with the search expression and a message.
- *
- * @param expression the search expression
- * @param message the exception message
- */
- public SearchException( String expression,
- String message ) {
- super(message);
- assert expression != null;
- this.expression = expression;
- }
-
- /**
- * Construct a system failure exception with another exception that is the cause of the failure.
- *
- * @param expression the search expression
- * @param cause the original cause of the failure
- */
- public SearchException( String expression,
- Throwable cause ) {
- super(cause);
- assert expression != null;
- this.expression = expression;
- }
-
- /**
- * Construct a system failure exception with a single message and another exception that is the cause of the failure.
- *
- * @param expression the search expression
- * @param message the message describing the failure
- * @param cause the original cause of the failure
- */
- public SearchException( String expression,
- String message,
- Throwable cause ) {
- super(message, cause);
- assert expression != null;
- this.expression = expression;
- }
-
- /**
- * Get the search expression.
- *
- * @return the search expression; never null
- */
- public String getSearchExpression() {
- return expression;
- }
-
-}
Deleted: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchProvider.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchProvider.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchProvider.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,178 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.graph.search;
-
-import java.io.IOException;
-import java.util.List;
-import net.jcip.annotations.ThreadSafe;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Node;
-import org.jboss.dna.graph.connector.RepositorySource;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.validate.Schemata;
-import org.jboss.dna.graph.request.ChangeRequest;
-
-/**
- * The representation of a single layout of one or more Lucene indexes.
- */
-@ThreadSafe
-public interface SearchProvider {
-
- /**
- * Create a new session to the indexes.
- *
- * @param context the execution context for which this session is to be established; may not be null
- * @param sourceName the name of the source; may not be null
- * @param workspaceName the name of the workspace; may not be null
- * @param overwrite true if the existing indexes should be overwritten, or false if they should be used
- * @param readOnly true if the resulting session can be optimized for use in read-only situations, or false if the session
- * needs to allow calling the write methods
- * @return the session to the indexes; never null
- */
- Session createSession( ExecutionContext context,
- String sourceName,
- String workspaceName,
- boolean overwrite,
- boolean readOnly );
-
- /**
- * Destroy the indexes for the workspace with the supplied name.
- *
- * @param context the execution context in which the destruction should be performed; may not be null
- * @param sourceName the name of the source; may not be null
- * @param workspaceName the name of the workspace; may not be null
- * @return true if the indexes for the workspace were destroyed, or false if there was no such workspace index
- * @throws IOException if there is a problem destroying the indexes
- */
- boolean destroyIndexes( ExecutionContext context,
- String sourceName,
- String workspaceName ) throws IOException;
-
- /**
- * A stateful session that is used to interact with the search provider to search a particular source and workspace.
- */
- public interface Session {
-
- /**
- * Get the name of the {@link RepositorySource repository source} for which this session exists. A session instance will
- * always return the same name.
- *
- * @return the source name; never null
- */
- String getSourceName();
-
- /**
- * Get the name of the workspace for which this session exists. A session instance will always return the same name.
- *
- * @return the workspace name; never null
- */
- String getWorkspaceName();
-
- /**
- * Get the execution context in which this session is operating.
- *
- * @return the execution context; never null
- */
- ExecutionContext getContext();
-
- /**
- * Return whether this session made changes to the indexed state.
- *
- * @return true if change were made, or false otherwise
- */
- boolean hasChanges();
-
- /**
- * Perform a full-text search given the supplied query.
- *
- * @param context the context in which the search should be executed; may not be null
- * @param fullTextString the full-text query; never null or blank
- * @param maxResults the maximum number of results that are to be returned; always positive
- * @param offset the number of initial results to skip, or 0 if the first results are to be returned
- * @param results the list where the results should be accumulated; never null
- */
- void search( ExecutionContext context,
- String fullTextString,
- int maxResults,
- int offset,
- List<Location> results );
-
- /**
- * Perform a query of the content. The {@link QueryCommand query} is supplied in the form of the Abstract Query Model,
- * with the {@link Schemata} that defines the tables and views that are available to the query, and the set of index
- * readers (and writers) that should be used.
- *
- * @param queryContext the context in which the query should be executed; may not be null
- * @param query the query; never null
- * @return the results of the query; never null
- */
- QueryResults query( QueryContext queryContext,
- QueryCommand query );
-
- /**
- * Index the node. Changes are recorded only when {@link #commit()} is called.
- *
- * @param node the node to be indexed; never null
- */
- void index( Node node );
-
- /**
- * Update the indexes to reflect the supplied changes to the graph content.
- *
- * @param changes the set of changes to the content
- * @return the (approximate) number of nodes that were affected by the changes
- * @throws SearchEngineException if there is a problem executing the query
- */
- int apply( Iterable<ChangeRequest> changes );
-
- /**
- * Remove from the index(es) all of the information pertaining to the nodes at or below the supplied path.
- *
- * @param path the path identifying the graph content that is to be removed; never null
- * @return the (approximate) number of nodes that were affected by the changes
- */
- int deleteBelow( Path path );
-
- /**
- * Optimize the indexes, if required.
- */
- void optimize();
-
- /**
- * Close this session by committing all of the changes. This session is no longer usable after this method is called.
- */
- void commit();
-
- /**
- * Close this session by rolling back all of the changes that have been made. This session is no longer usable after this
- * method is called.
- */
- void rollback();
- }
-
-}
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/GraphTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/GraphTest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/GraphTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -58,6 +58,7 @@
import org.jboss.dna.graph.property.Property;
import org.jboss.dna.graph.query.QueryResults;
import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.QueryResults.Statistics;
import org.jboss.dna.graph.query.model.Column;
import org.jboss.dna.graph.query.model.Constraint;
import org.jboss.dna.graph.query.model.Limit;
@@ -123,7 +124,9 @@
private String sourceName;
private MockRepositoryConnection connection;
private LinkedList<Request> executedRequests;
- private QueryResults nextQueryResults;
+ private Columns nextColumns;
+ private List<Object[]> nextTuples;
+ private Statistics nextStatistics;
private int numberOfExecutions;
/** Populate this with the properties (by location) that are to be read */
private Map<Location, Collection<Property>> properties;
@@ -155,7 +158,9 @@
properties = new HashMap<Location, Collection<Property>>();
children = new HashMap<Location, List<Location>>();
- nextQueryResults = null;
+ nextColumns = null;
+ nextTuples = null;
+ nextStatistics = null;
}
static class IsAnyRequest extends ArgumentMatcher<Request> {
@@ -877,8 +882,8 @@
assertThat(node3.getChildren().isEmpty(), is(true));
assertThat(node3.getProperties().isEmpty(), is(true));
}
-
- //@Test
+
+ // @Test
public void shouldConstructValidSubgraphToString() {
Location child1 = Location.create(createPath(validPath, "x"));
Location child2 = Location.create(createPath(validPath, "y"));
@@ -900,18 +905,15 @@
setPropertiesToReadOn(child12, validIdProperty2);
setPropertiesToReadOn(child121, validIdProperty1);
setPropertiesToReadOn(child122, validIdProperty2);
-
+
Subgraph subgraph = graph.getSubgraphOfDepth(2).at(validPath);
assertThat(subgraph, is(notNullValue()));
assertThat(subgraph.getMaximumDepth(), is(2));
-
- String expectedToStringValue =
- "Subgraph\n"
- + "<name = \"c\" id2 = \"2\" id1 = \"1\">\n"
- + " <name = \"x\" id1 = \"1\">\n"
- + " <name = \"y\" id2 = \"2\">\n"
- + " <name = \"z\" >\n";
-
+
+ String expectedToStringValue = "Subgraph\n" + "<name = \"c\" id2 = \"2\" id1 = \"1\">\n"
+ + " <name = \"x\" id1 = \"1\">\n" + " <name = \"y\" id2 = \"2\">\n"
+ + " <name = \"z\" >\n";
+
// Get nodes by relative path ...
Node root = subgraph.getNode("./");
assertThat(root.getChildren(), hasItems(child1, child2, child3));
@@ -1247,52 +1249,68 @@
@Test
public void shouldPerformSearchWhenConnectorSupportsQueries() {
// Set the expected results that will be returned from the connector ...
- QueryResults expected = mock(QueryResults.class);
- nextQueryResults = expected;
+ Columns columns = mock(Columns.class);
+ List<Object[]> tuples = Collections.emptyList();
+ Statistics stats = mock(Statistics.class);
+ nextColumns = columns;
+ nextTuples = tuples;
+ nextStatistics = stats;
// Execute the seach, and verify the results were consumed by the processor ...
String fullTextSearchExpression = "term1 term2";
- QueryResults results = graph.search(fullTextSearchExpression);
- assertThat(nextQueryResults, is(nullValue()));
+ QueryResults results = graph.search(fullTextSearchExpression, 10, 0);
+ assertThat(nextColumns, is(nullValue()));
+ assertThat(nextTuples, is(nullValue()));
+ assertThat(nextStatistics, is(nullValue()));
// The actual results should be what the processor returned ...
- assertThat(results, is(sameInstance(expected)));
+ assertThat(results.getColumns(), is(sameInstance(columns)));
+ assertThat(results.getTuples(), is(sameInstance(tuples)));
+ assertThat(results.getStatistics(), is(sameInstance(stats)));
}
@Test( expected = InvalidRequestException.class )
public void shouldFailToPerformSearchWhenConnectorDoesNotSupportsQueries() {
// Set the expected results that will be returned from the connector ...
- nextQueryResults = null;
+ nextColumns = null;
+ nextTuples = null;
+ nextStatistics = null;
+
// Execute the seach, and verify the results were consumed by the processor ...
String fullTextSearchExpression = "term1 term2";
- graph.search(fullTextSearchExpression);
+ graph.search(fullTextSearchExpression, 10, 0);
}
@Test
public void shouldPerformQueryWhenConnectorSupportsQueries() {
// Set the expected results that will be returned from the connector ...
- QueryResults expected = mock(QueryResults.class);
List<Object[]> tuples = Collections.singletonList(new Object[] {"v1", "v2", "v3"});
- stub(expected.getTuples()).toReturn(tuples);
- nextQueryResults = expected;
+ Statistics statistics = mock(Statistics.class);
+ nextTuples = tuples;
+ nextStatistics = statistics;
// Execute the query, and verify the results were consumed by the processor ...
TypeSystem typeSystem = context.getValueFactories().getTypeSystem();
Schemata schemata = ImmutableSchemata.createBuilder(typeSystem).addTable("t1", "c1", "c2", "c3").build();
QueryCommand query = new SqlQueryParser().parseQuery("SELECT * FROM t1", typeSystem);
QueryResults results = graph.query(query, schemata).execute();
- assertThat(nextQueryResults, is(nullValue()));
+ assertThat(nextColumns, is(nullValue()));
+ assertThat(nextTuples, is(nullValue()));
+ assertThat(nextStatistics, is(nullValue()));
// The actual results should be what the processor returned ...
- List<Object[]> actualTuples = results.getTuples();
- assertThat(actualTuples, is(tuples));
+ assertThat(results.getColumns(), is(notNullValue()));
+ assertThat(results.getTuples(), is(tuples));
+ assertThat(results.getStatistics(), is(notNullValue()));
assertNextRequestAccessQuery(graph.getCurrentWorkspaceName(), "t1", columns("t1", "c1", "c2", "c3"), Limit.NONE);
}
@Test( expected = InvalidRequestException.class )
public void shouldFailToPerformQueryWhenConnectorDoesNotSupportsQueries() {
// Set the expected results that will be returned from the connector ...
- nextQueryResults = null;
+ nextColumns = null;
+ nextTuples = null;
+ nextStatistics = null;
// Execute the query, and verify the results were consumed by the processor ...
TypeSystem typeSystem = context.getValueFactories().getTypeSystem();
@@ -1501,11 +1519,13 @@
*/
@Override
public void process( AccessQueryRequest request ) {
- if (nextQueryResults == null) {
+ if (nextTuples == null) {
super.process(request); // should result in error
}
- request.setResults(nextQueryResults);
- nextQueryResults = null;
+ request.setResults(nextTuples, nextStatistics);
+ nextColumns = null;
+ nextTuples = null;
+ nextStatistics = null;
}
/**
@@ -1515,11 +1535,13 @@
*/
@Override
public void process( FullTextSearchRequest request ) {
- if (nextQueryResults == null) {
+ if (nextTuples == null) {
super.process(request); // should result in error
}
- request.setResults(nextQueryResults);
- nextQueryResults = null;
+ request.setResults(nextColumns, nextTuples, nextStatistics);
+ nextColumns = null;
+ nextTuples = null;
+ nextStatistics = null;
}
private Location actualLocationOf( Location location ) {
Deleted: trunk/dna-graph/src/test/java/org/jboss/dna/graph/connector/federation/ForkRequestProcessorChannelTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/connector/federation/ForkRequestProcessorChannelTest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/connector/federation/ForkRequestProcessorChannelTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,225 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.graph.connector.federation;
-
-import static org.hamcrest.core.Is.is;
-import static org.hamcrest.core.IsSame.sameInstance;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.stub;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.connector.MockRepositoryConnection;
-import org.jboss.dna.graph.connector.RepositoryConnection;
-import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
-import org.jboss.dna.graph.connector.federation.ForkRequestProcessor.Channel;
-import org.jboss.dna.graph.request.Request;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.MockitoAnnotations;
-import org.mockito.MockitoAnnotations.Mock;
-
-/**
- *
- */
-public class ForkRequestProcessorChannelTest {
-
- private ExecutionContext context;
- private String sourceName;
- private Channel channel;
- private List<Request> requests;
- private ExecutorService executor;
- private LinkedList<Request> executedRequests;
- private RepositoryConnection connection;
- @Mock
- private RepositoryConnectionFactory connectionFactory;
-
- @Before
- public void beforeEach() {
- MockitoAnnotations.initMocks(this);
- context = new ExecutionContext();
- sourceName = "SourceA";
- channel = new ForkRequestProcessor.Channel(sourceName);
- requests = new ArrayList<Request>();
- requests.add(new MockRequest());
- requests.add(new MockRequest());
- requests.add(new MockRequest());
- requests.add(new MockRequest());
-
- // Create the mock connection ...
- executedRequests = new LinkedList<Request>(); // this is where requests submitted to the connection will go
- connection = new MockRepositoryConnection(sourceName, executedRequests);
-
- // Stub the connection factory ...
- stub(connectionFactory.createConnection(sourceName)).toReturn(connection);
-
- // Create the executor ...
- executor = Executors.newSingleThreadExecutor();
- }
-
- protected static class MockRequest extends Request {
- private static final long serialVersionUID = 1L;
-
- @Override
- public boolean isReadOnly() {
- return false;
- }
- }
-
- @Test
- public void shouldCreateEmptyIteratorIfDoneCalledBeforeObtainingIterator() {
- channel.done();
- Iterator<Request> iter = channel.createIterator();
- assertThat(iter.hasNext(), is(false));
- }
-
- @Test
- public void shouldCreateEmptyIteratorIfDoneCalledAfterObtainingIterator() {
- Iterator<Request> iter = channel.createIterator();
- channel.done();
- assertThat(iter.hasNext(), is(false));
- }
-
- @Test
- public void shouldCreateIteratorOverRequestsAddedToChannelAfterObtainingIterator() {
- Iterator<Request> iter = channel.createIterator();
- // Add the requests ...
- for (Request request : requests) {
- channel.add(request);
- }
- // Call done ...
- channel.done();
- // Start iterating ...
- for (Request expected : requests) {
- assertThat(iter.hasNext(), is(true));
- assertThat(iter.next(), is(sameInstance(expected)));
- }
- assertThat(iter.hasNext(), is(false));
- }
-
- @Test
- public void shouldCreateIteratorOverRequestsAddedToChannelAfterBeginningIteration() {
- Iterator<Request> iter = channel.createIterator();
- // Add the requests in a separate thread ...
- new Thread(new AddRequestsRunnable(channel, requests, 100)).start();
- // Start iterating ...
- for (Request expected : requests) {
- assertThat(iter.hasNext(), is(true)); // blocks
- assertThat(iter.next(), is(sameInstance(expected)));
- }
- assertThat(iter.hasNext(), is(false));
- }
-
- @Test
- public void shouldSubmitRequestsToConnection() throws Exception {
- // Start the channel ...
- channel.start(executor, context, connectionFactory);
-
- // Submit the requests to the channel ...
- for (Request request : requests) {
- channel.add(request);
- }
-
- // Mark the channel as done ...
- channel.done();
-
- // Wait until the channel has completed ...
- channel.await();
-
- // Verify that all the requests to the channel were processed ...
- Iterator<Request> iter = executedRequests.iterator();
- for (Request expected : requests) {
- assertThat(iter.hasNext(), is(true));
- assertThat(iter.next(), is(sameInstance(expected)));
- }
- assertThat(iter.hasNext(), is(false));
- }
-
- @Test
- public void shouldSubmitBlockedRequestsToConnection() throws Exception {
- // Start the channel ...
- channel.start(executor, context, connectionFactory);
-
- // Submit the requests to the channel ...
- List<CountDownLatch> latches = new ArrayList<CountDownLatch>();
- for (Request request : requests) {
- CountDownLatch latch = new CountDownLatch(1);
- latches.add(latch);
- channel.add(request, latch);
- }
-
- // Mark the channel as done ...
- channel.done();
-
- // Wait until the channel has completed ...
- channel.await();
-
- // Verify that all of the latches were decremented ...
- for (CountDownLatch latch : latches) {
- latch.await();
- assertThat(latch.getCount(), is(0L));
- }
-
- // Verify that all the requests to the channel were processed ...
- Iterator<Request> iter = executedRequests.iterator();
- for (Request expected : requests) {
- assertThat(iter.hasNext(), is(true));
- assertThat(iter.next(), is(sameInstance(expected)));
- }
- assertThat(iter.hasNext(), is(false));
- }
-
- protected static class AddRequestsRunnable implements Runnable {
- private final Channel channel;
- private final Iterator<Request> requests;
- private final int intervalInMillis;
-
- protected AddRequestsRunnable( Channel channel,
- List<Request> requests,
- int intervalInMillis ) {
- this.requests = requests.iterator();
- this.intervalInMillis = intervalInMillis;
- this.channel = channel;
- }
-
- public void run() {
- while (requests.hasNext()) {
- try {
- Thread.sleep(intervalInMillis);
- } catch (InterruptedException err) {
- fail(err.getMessage());
- }
- channel.add(requests.next());
- }
- // Call done ...
- channel.done();
- }
- }
-}
Copied: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/FullTextSearchResultColumnsTest.java (from rev 1417, trunk/dna-search/src/test/java/org/jboss/dna/search/IndexingRulesTest.java)
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/FullTextSearchResultColumnsTest.java (rev 0)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/FullTextSearchResultColumnsTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,47 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.query.process;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class FullTextSearchResultColumnsTest {
+
+ @Test
+ public void shouldHaveLocationAndScoresButNoColumns() {
+ FullTextSearchResultColumns columns = new FullTextSearchResultColumns();
+ assertThat(columns.getColumnCount(), is(0));
+ assertThat(columns.getLocationCount(), is(1));
+ assertThat(columns.getTupleSize(), is(2));
+ assertThat(columns.hasFullTextSearchScores(), is(true));
+ assertThat(columns.getFullTextSearchScoreIndex(), is(1));
+ assertThat(columns.getLocationIndexForColumn(0), is(0));
+ assertThat(columns.getLocationIndex(), is(0));
+ }
+
+}
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/QueryResultColumnsTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/QueryResultColumnsTest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/process/QueryResultColumnsTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -36,7 +36,6 @@
import org.jboss.dna.graph.query.QueryResults.Cursor;
import org.jboss.dna.graph.query.QueryResults.Statistics;
import org.jboss.dna.graph.query.model.Column;
-import org.jboss.dna.graph.query.model.QueryCommand;
import org.jboss.dna.graph.query.validate.Schemata;
import org.junit.Before;
import org.junit.Test;
@@ -56,8 +55,6 @@
@Mock
private Schemata schemata;
@Mock
- private QueryCommand command;
- @Mock
private Statistics statistics;
@Before
@@ -75,7 +72,7 @@
tuples = new ArrayList<Object[]>();
tuples.add(tuple(columns, new String[] {"/a/b/c", "/a/x/y"}, 1, 2, 3, "2a", "2b", "x"));
tuples.add(tuple(columns, new String[] {"/a/b/d", "/a/x/y"}, 4, 5, 6, "2a", "2b", "x"));
- results = new QueryResults(context, command, columns, statistics);
+ results = new QueryResults(columns, statistics, context.getProblems());
}
@Test
@@ -89,18 +86,13 @@
}
@Test
- public void shouldReturnSameQueryCommandPassedIntoConstructor() {
- assertThat(results.getCommand(), is(sameInstance(command)));
- }
-
- @Test
public void shouldReturnSameProblemsObjectAsInQueryContext() {
assertThat(results.getProblems(), is(sameInstance(context.getProblems())));
}
@Test
public void shouldReturnSameTuplesListPassedIntoConstructor() {
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
assertThat(results.getTuples(), is(sameInstance(tuples)));
}
@@ -113,14 +105,14 @@
@Test
public void shouldHaveNoTuplesIfConstructedWithEmptyTuplesList() {
tuples.clear();
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
assertThat(results.getTuples().isEmpty(), is(true));
assertThat(results.getCursor().hasNext(), is(false));
}
@Test
public void shouldReturnMutableTuplesList() {
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
assertThat(results.getTuples().isEmpty(), is(false));
results.getTuples().clear();
assertThat(results.getTuples().isEmpty(), is(true));
@@ -129,7 +121,7 @@
@Test
public void shouldReturnCursorThatAccessesTuples() {
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
Cursor cursor = results.getCursor();
Iterator<Object[]> expectedIter = tuples.iterator();
int rowNumber = 0;
@@ -163,7 +155,7 @@
@Test( expected = IllegalStateException.class )
public void shouldRequireNextOnCursorToBeCalledBeforeGettingValueUsingColumnIndex() {
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
Cursor cursor = results.getCursor();
assertThat(cursor.hasNext(), is(true));
cursor.getValue(0);
@@ -171,7 +163,7 @@
@Test( expected = IllegalStateException.class )
public void shouldRequireNextOnCursorToBeCalledBeforeGettingValueUsingColumnName() {
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
Cursor cursor = results.getCursor();
assertThat(cursor.hasNext(), is(true));
cursor.getValue("colA");
@@ -179,16 +171,16 @@
@Test
public void shouldPrintToStringAllResults() {
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
List<String> lines = StringUtil.splitLines(results.toString());
assertThat(lines.size(), is(tuples.size() + 4)); // = delim + header + delim + (...lines...) + delim
}
@Test
public void shouldPrintToStringBuilderAllResults() {
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
StringBuilder sb = new StringBuilder();
- results.toString(sb);
+ results.toString(typeSystem, sb);
List<String> lines = StringUtil.splitLines(sb.toString());
assertThat(lines.size(), is(tuples.size() + 4)); // = delim + header + delim + (...lines...) + delim
}
@@ -196,18 +188,18 @@
@Test
public void shouldPrintToStringBuilderAllResultsEvenWhenNoTuples() {
tuples.clear();
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
StringBuilder sb = new StringBuilder();
- results.toString(sb);
+ results.toString(typeSystem, sb);
List<String> lines = StringUtil.splitLines(sb.toString());
assertThat(lines.size(), is(4)); // = delim + header + delim + (...lines...) + delim
}
@Test
public void shouldPrintToStringBuilderOnlyFirstLinesOfResults() {
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
StringBuilder sb = new StringBuilder();
- results.toString(sb, 1);
+ results.toString(typeSystem, sb, 1);
List<String> lines = StringUtil.splitLines(sb.toString());
assertThat(lines.size(), is(1 + 4)); // = delim + header + delim + (...lines...) + delim
}
@@ -215,9 +207,9 @@
@Test
public void shouldPrintToStringBuilderOnlyFirstLinesOfResultsEvenWhenNoTuples() {
tuples.clear();
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
StringBuilder sb = new StringBuilder();
- results.toString(sb, 3);
+ results.toString(typeSystem, sb, 3);
List<String> lines = StringUtil.splitLines(sb.toString());
assertThat(lines.size(), is(4)); // = delim + header + delim + (...lines...) + delim
}
@@ -225,9 +217,9 @@
@Test
public void shouldPrintToStringBuilderAllResultsWhenMaxRowParameterIsLargerThanNumberOfTuples() {
tuples.clear();
- results = new QueryResults(context, command, columns, statistics, tuples);
+ results = new QueryResults(columns, statistics, tuples, context.getProblems());
StringBuilder sb = new StringBuilder();
- results.toString(sb, 3);
+ results.toString(typeSystem, sb, 3);
List<String> lines = StringUtil.splitLines(sb.toString());
assertThat(lines.size(), is(tuples.size() + 4)); // = delim + header + delim + (...lines...) + delim
}
Copied: trunk/dna-graph/src/test/java/org/jboss/dna/graph/request/CompositeRequestChannelTest.java (from rev 1417, trunk/dna-graph/src/test/java/org/jboss/dna/graph/connector/federation/ForkRequestProcessorChannelTest.java)
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/request/CompositeRequestChannelTest.java (rev 0)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/request/CompositeRequestChannelTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,223 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.request;
+
+import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.IsSame.sameInstance;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.stub;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.connector.MockRepositoryConnection;
+import org.jboss.dna.graph.connector.RepositoryConnection;
+import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.MockitoAnnotations;
+import org.mockito.MockitoAnnotations.Mock;
+
+/**
+ *
+ */
+public class CompositeRequestChannelTest {
+
+ private ExecutionContext context;
+ private String sourceName;
+ private CompositeRequestChannel channel;
+ private List<Request> requests;
+ private ExecutorService executor;
+ private LinkedList<Request> executedRequests;
+ private RepositoryConnection connection;
+ @Mock
+ private RepositoryConnectionFactory connectionFactory;
+
+ @Before
+ public void beforeEach() {
+ MockitoAnnotations.initMocks(this);
+ context = new ExecutionContext();
+ sourceName = "SourceA";
+ channel = new CompositeRequestChannel(sourceName);
+ requests = new ArrayList<Request>();
+ requests.add(new MockRequest());
+ requests.add(new MockRequest());
+ requests.add(new MockRequest());
+ requests.add(new MockRequest());
+
+ // Create the mock connection ...
+ executedRequests = new LinkedList<Request>(); // this is where requests submitted to the connection will go
+ connection = new MockRepositoryConnection(sourceName, executedRequests);
+
+ // Stub the connection factory ...
+ stub(connectionFactory.createConnection(sourceName)).toReturn(connection);
+
+ // Create the executor ...
+ executor = Executors.newSingleThreadExecutor();
+ }
+
+ protected static class MockRequest extends Request {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public boolean isReadOnly() {
+ return false;
+ }
+ }
+
+ @Test
+ public void shouldCreateEmptyIteratorIfDoneCalledBeforeObtainingIterator() {
+ channel.close();
+ Iterator<Request> iter = channel.createIterator();
+ assertThat(iter.hasNext(), is(false));
+ }
+
+ @Test
+ public void shouldCreateEmptyIteratorIfDoneCalledAfterObtainingIterator() {
+ Iterator<Request> iter = channel.createIterator();
+ channel.close();
+ assertThat(iter.hasNext(), is(false));
+ }
+
+ @Test
+ public void shouldCreateIteratorOverRequestsAddedToChannelAfterObtainingIterator() {
+ Iterator<Request> iter = channel.createIterator();
+ // Add the requests ...
+ for (Request request : requests) {
+ channel.add(request);
+ }
+ // Call done ...
+ channel.close();
+ // Start iterating ...
+ for (Request expected : requests) {
+ assertThat(iter.hasNext(), is(true));
+ assertThat(iter.next(), is(sameInstance(expected)));
+ }
+ assertThat(iter.hasNext(), is(false));
+ }
+
+ @Test
+ public void shouldCreateIteratorOverRequestsAddedToChannelAfterBeginningIteration() {
+ Iterator<Request> iter = channel.createIterator();
+ // Add the requests in a separate thread ...
+ new Thread(new AddRequestsRunnable(channel, requests, 100)).start();
+ // Start iterating ...
+ for (Request expected : requests) {
+ assertThat(iter.hasNext(), is(true)); // blocks
+ assertThat(iter.next(), is(sameInstance(expected)));
+ }
+ assertThat(iter.hasNext(), is(false));
+ }
+
+ @Test
+ public void shouldSubmitRequestsToConnection() throws Exception {
+ // Start the channel ...
+ channel.start(executor, context, connectionFactory);
+
+ // Submit the requests to the channel ...
+ for (Request request : requests) {
+ channel.add(request);
+ }
+
+ // Mark the channel as done ...
+ channel.close();
+
+ // Wait until the channel has completed ...
+ channel.await();
+
+ // Verify that all the requests to the channel were processed ...
+ Iterator<Request> iter = executedRequests.iterator();
+ for (Request expected : requests) {
+ assertThat(iter.hasNext(), is(true));
+ assertThat(iter.next(), is(sameInstance(expected)));
+ }
+ assertThat(iter.hasNext(), is(false));
+ }
+
+ @Test
+ public void shouldSubmitBlockedRequestsToConnection() throws Exception {
+ // Start the channel ...
+ channel.start(executor, context, connectionFactory);
+
+ // Submit the requests to the channel ...
+ List<CountDownLatch> latches = new ArrayList<CountDownLatch>();
+ for (Request request : requests) {
+ CountDownLatch latch = new CountDownLatch(1);
+ latches.add(latch);
+ channel.add(request, latch);
+ }
+
+ // Mark the channel as done ...
+ channel.close();
+
+ // Wait until the channel has completed ...
+ channel.await();
+
+ // Verify that all of the latches were decremented ...
+ for (CountDownLatch latch : latches) {
+ latch.await();
+ assertThat(latch.getCount(), is(0L));
+ }
+
+ // Verify that all the requests to the channel were processed ...
+ Iterator<Request> iter = executedRequests.iterator();
+ for (Request expected : requests) {
+ assertThat(iter.hasNext(), is(true));
+ assertThat(iter.next(), is(sameInstance(expected)));
+ }
+ assertThat(iter.hasNext(), is(false));
+ }
+
+ protected static class AddRequestsRunnable implements Runnable {
+ private final CompositeRequestChannel channel;
+ private final Iterator<Request> requests;
+ private final int intervalInMillis;
+
+ protected AddRequestsRunnable( CompositeRequestChannel channel,
+ List<Request> requests,
+ int intervalInMillis ) {
+ this.requests = requests.iterator();
+ this.intervalInMillis = intervalInMillis;
+ this.channel = channel;
+ }
+
+ public void run() {
+ while (requests.hasNext()) {
+ try {
+ Thread.sleep(intervalInMillis);
+ } catch (InterruptedException err) {
+ fail(err.getMessage());
+ }
+ channel.add(requests.next());
+ }
+ // Call done ...
+ channel.close();
+ }
+ }
+}
Property changes on: trunk/dna-graph/src/test/java/org/jboss/dna/graph/request/CompositeRequestChannelTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/dna-graph/src/test/java/org/jboss/dna/graph/search/SearchEngineTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/search/SearchEngineTest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/search/SearchEngineTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,279 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.graph.search;
-
-import static org.hamcrest.core.Is.is;
-import static org.hamcrest.core.IsNull.notNullValue;
-import static org.junit.Assert.assertThat;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.stub;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.verifyZeroInteractions;
-import java.util.List;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.Graph;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Node;
-import org.jboss.dna.graph.Subgraph;
-import org.jboss.dna.graph.connector.RepositoryConnection;
-import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
-import org.jboss.dna.graph.connector.RepositoryContext;
-import org.jboss.dna.graph.connector.RepositorySourceException;
-import org.jboss.dna.graph.connector.inmemory.InMemoryRepositorySource;
-import org.jboss.dna.graph.observe.Observer;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.model.Query;
-import org.jboss.dna.graph.query.validate.Schemata;
-import org.jboss.dna.graph.request.InvalidWorkspaceException;
-import org.jboss.dna.graph.search.SearchProvider.Session;
-import org.junit.Before;
-import org.junit.Test;
-
-public class SearchEngineTest {
-
- private SearchEngine engine;
- private ExecutionContext context;
- private String sourceName;
- private String workspaceName1;
- private String workspaceName2;
- private InMemoryRepositorySource source;
- private RepositoryConnectionFactory connectionFactory;
- private SearchProvider provider;
- private SearchProvider.Session sessionWs1;
- private SearchProvider.Session sessionWs2;
- private SearchProvider.Session sessionDefault;
- private Graph content;
-
- @Before
- public void beforeEach() throws Exception {
- context = new ExecutionContext();
- sourceName = "sourceA";
- workspaceName1 = "workspace1";
- workspaceName2 = "workspace2";
-
- // Set up the source and graph instance ...
- source = new InMemoryRepositorySource();
- source.setName(sourceName);
- RepositoryContext repositoryContext = new RepositoryContext() {
- @SuppressWarnings( "synthetic-access" )
- public ExecutionContext getExecutionContext() {
- return context;
- }
-
- public Observer getObserver() {
- return null;
- }
-
- public RepositoryConnectionFactory getRepositoryConnectionFactory() {
- return null;
- }
-
- @SuppressWarnings( "synthetic-access" )
- public Subgraph getConfiguration( int depth ) {
- Graph result = Graph.create(source, context);
- result.useWorkspace("configSpace");
- return result.getSubgraphOfDepth(depth).at("/");
- }
- };
- source.initialize(repositoryContext);
- content = Graph.create(source, context);
-
- // Create the workspaces ...
- content.createWorkspace().named(workspaceName1);
- content.createWorkspace().named(workspaceName2);
-
- // Set up the connection factory ...
- connectionFactory = new RepositoryConnectionFactory() {
- @SuppressWarnings( "synthetic-access" )
- public RepositoryConnection createConnection( String sourceName ) throws RepositorySourceException {
- return source.getConnection();
- }
- };
-
- // Set up the index layout ...
- provider = mock(SearchProvider.class);
- sessionWs1 = mockSession(provider, workspaceName1);
- sessionWs2 = mockSession(provider, workspaceName2);
- sessionDefault = mockSession(provider, "");
-
- // Now set up the search engine ...
- engine = new SearchEngine(context, sourceName, connectionFactory, provider);
- }
-
- protected Session mockSession( SearchProvider mockProvider,
- String workspaceName ) {
- Session session = mock(Session.class);
- stub(mockProvider.createSession(context, sourceName, workspaceName, false, false)).toReturn(session);
- stub(mockProvider.createSession(context, sourceName, workspaceName, false, true)).toReturn(session);
- stub(mockProvider.createSession(context, sourceName, workspaceName, true, false)).toReturn(session);
- stub(mockProvider.createSession(context, sourceName, workspaceName, true, true)).toReturn(session);
- stub(session.getWorkspaceName()).toReturn(workspaceName);
- stub(session.getSourceName()).toReturn(sourceName);
- return session;
- }
-
- protected Path path( String path ) {
- return context.getValueFactories().getPathFactory().create(path);
- }
-
- protected void loadContent() throws Exception {
- // Load some content ...
- content.useWorkspace(workspaceName1);
- content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("cars.xml")).into("/");
- content.useWorkspace(workspaceName2);
- content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("aircraft.xml")).into("/");
- }
-
- @Test
- public void shouldReturnSearchWorkspaceForExistingWorkspaceInSource() {
- SearchEngine.Workspace workspace = engine.getWorkspace(workspaceName1);
- assertThat(workspace, is(notNullValue()));
- assertThat(workspace.modifiedNodesSinceLastOptimize.get(), is(0));
- assertThat(workspace.getWorkspaceName(), is(workspaceName1));
- }
-
- @Test( expected = InvalidWorkspaceException.class )
- public void shouldFailToReturnSearchWorkspaceForNonExistantWorkspaceInSource() {
- engine.getWorkspace(workspaceName1 + "foobar");
- }
-
- @Test
- public void shouldDoNothingDuringRemoveWorkspaceIfWorkspaceHasNotBeenLoaded() throws Exception {
- engine.removeWorkspace(workspaceName1);
- verifyZeroInteractions(provider);
- }
-
- @Test
- public void shouldForwardRemoveWorkspaceToIndexLayout() throws Exception {
- engine.getWorkspace(workspaceName1);
- engine.removeWorkspace(workspaceName1);
- verify(provider).destroyIndexes(context, sourceName, workspaceName1);
- verifyNoMoreInteractions(provider);
- }
-
- @Test
- public void shouldForwardRemoveWorkspaceToIndexLayoutForEachWorkspaceThatWasLoaded() throws Exception {
- engine.getWorkspace(workspaceName1);
- engine.removeWorkspaces();
- verify(provider).destroyIndexes(context, sourceName, workspaceName1);
- verifyZeroInteractions(provider);
- }
-
- @Test
- public void shouldForwardRemoveWorkspaceToIndexLayoutForAllWorkspacesThatWereLoaded() throws Exception {
- engine.getWorkspace(workspaceName1);
- engine.getWorkspace(workspaceName2);
- engine.removeWorkspaces();
- verify(provider).destroyIndexes(context, sourceName, workspaceName1);
- verify(provider).destroyIndexes(context, sourceName, workspaceName2);
- verifyNoMoreInteractions(provider);
- }
-
- @Test( expected = IllegalArgumentException.class )
- public void shouldFailIfNullWorkspaceNamePassedToRemoveWorkspace() throws Exception {
- engine.removeWorkspace(null);
- }
-
- @Test
- public void shouldForwardOptimizeOfWorkspaceToIndexSession() throws Exception {
- engine.optimize(workspaceName1);
- verify(sessionWs1).optimize();
- verify(sessionWs1).commit();
- verifyNoMoreInteractions(sessionWs1);
- }
-
- @Test
- public void shouldForwardOptimizeOfAllWorkspacesToEachIndexSession() throws Exception {
- engine.optimize(); // will find all three workspaces
- verify(sessionWs1).optimize();
- verify(sessionWs1).commit();
- verifyNoMoreInteractions(sessionWs1);
- verify(sessionWs2).optimize();
- verify(sessionWs2).commit();
- verifyNoMoreInteractions(sessionWs2);
- verify(sessionDefault).optimize();
- verify(sessionDefault).commit();
- verifyNoMoreInteractions(sessionDefault);
- }
-
- @Test
- public void shouldForwardIndexOfWorkspaceToIndexSession() throws Exception {
- loadContent();
- engine.index(workspaceName1, 3);
- verify(sessionWs1, times(18)).index((Node)anyObject());
- verify(sessionWs1).commit();
- }
-
- @Test
- public void shouldForwardIndexOfSubgraphInWorkspaceToIndexSession() throws Exception {
- loadContent();
- engine.index(workspaceName1, path("/Cars"), 3);
- verify(sessionWs1).deleteBelow(path("/Cars"));
- verify(sessionWs1, times(17)).index((Node)anyObject());
- verify(sessionWs1).commit();
- }
-
- @Test
- public void shouldForwardIndexEntireWorkspaceToIndexSession() throws Exception {
- loadContent();
- engine.index(workspaceName1, path("/"), 3);
- verify(sessionWs1, times(18)).index((Node)anyObject());
- verify(sessionWs1).commit();
- }
-
- @Test
- public void shouldForwardIndexOfAllWorkspacesToEachIndexSession() throws Exception {
- loadContent();
- engine.index(3); // will find all three workspaces
- verify(sessionWs1, times(18)).index((Node)anyObject());
- verify(sessionWs1).commit();
- verify(sessionWs2, times(24)).index((Node)anyObject());
- verify(sessionWs2).commit();
- verify(sessionDefault, times(1)).index((Node)anyObject());
- verify(sessionDefault).commit();
- }
-
- @SuppressWarnings( "unchecked" )
- @Test
- public void shouldForwardSearchToIndexSession() throws Exception {
- String query = "term1 term2";
- engine.fullTextSearch(context, workspaceName1, query, 3, 0);
- verify(sessionWs1).search(eq(context), eq(query), eq(3), eq(0), (List<Location>)anyObject());
- verify(sessionWs1).commit();
- }
-
- @Test
- public void shouldForwardQueryToIndexSession() throws Exception {
- Query query = mock(Query.class);
- Schemata schemata = mock(Schemata.class);
- engine.query(context, workspaceName1, query, schemata);
- verify(sessionWs1).query(eq(new QueryContext(schemata, context.getValueFactories().getTypeSystem())), eq(query));
- verify(sessionWs1).commit();
- }
-}
Modified: trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrQueryManager.java
===================================================================
--- trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrQueryManager.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrQueryManager.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -60,6 +60,8 @@
@Immutable
class JcrQueryManager implements QueryManager {
+ public static final int MAXIMUM_RESULTS_FOR_FULL_TEXT_SEARCH_QUERIES = Integer.MAX_VALUE;
+
private final JcrSession session;
JcrQueryManager( JcrSession session ) {
@@ -317,7 +319,7 @@
*/
public QueryResult execute() {
// Submit immediately to the workspace graph ...
- QueryResults result = session.workspace().graph().search(statement);
+ QueryResults result = session.workspace().graph().search(statement, MAXIMUM_RESULTS_FOR_FULL_TEXT_SEARCH_QUERIES, 0);
return new JcrQueryResult(session, result);
}
}
Deleted: trunk/dna-search/.classpath
===================================================================
--- trunk/dna-search/.classpath 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/.classpath 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,10 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<classpath>
- <classpathentry kind="src" path="src/main/java"/>
- <classpathentry kind="src" path="src/main/resources"/>
- <classpathentry kind="src" output="target/test-classes" path="src/test/java"/>
- <classpathentry kind="src" output="target/test-classes" path="src/test/resources"/>
- <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
- <classpathentry kind="con" path="org.maven.ide.eclipse.MAVEN2_CLASSPATH_CONTAINER"/>
- <classpathentry kind="output" path="target/classes"/>
-</classpath>
Deleted: trunk/dna-search/.project
===================================================================
--- trunk/dna-search/.project 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/.project 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,23 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<projectDescription>
- <name>dna-search</name>
- <comment></comment>
- <projects>
- </projects>
- <buildSpec>
- <buildCommand>
- <name>org.maven.ide.eclipse.maven2Builder</name>
- <arguments>
- </arguments>
- </buildCommand>
- <buildCommand>
- <name>org.eclipse.jdt.core.javabuilder</name>
- <arguments>
- </arguments>
- </buildCommand>
- </buildSpec>
- <natures>
- <nature>org.eclipse.jdt.core.javanature</nature>
- <nature>org.maven.ide.eclipse.maven2Nature</nature>
- </natures>
-</projectDescription>
Deleted: trunk/dna-search/pom.xml
===================================================================
--- trunk/dna-search/pom.xml 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/pom.xml 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,102 +0,0 @@
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.jboss.dna</groupId>
- <artifactId>dna</artifactId>
- <version>0.7-SNAPSHOT</version>
- </parent>
- <!-- The groupId and version values are inherited from parent -->
- <artifactId>dna-search</artifactId>
- <packaging>jar</packaging>
- <name>JBoss DNA Search</name>
- <description>JBoss DNA search and query engine library</description>
- <url>http://labs.jboss.org/dna</url>
-
- <!--
- Define the dependencies. Note that all version and scopes default to those
- defined in the dependencyManagement section of the parent pom.
- -->
- <dependencies>
- <dependency>
- <groupId>org.jboss.dna</groupId>
- <artifactId>dna-common</artifactId>
- </dependency>
- <dependency>
- <groupId>org.jboss.dna</groupId>
- <artifactId>dna-common</artifactId>
- <version>${project.version}</version>
- <type>test-jar</type>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.jboss.dna</groupId>
- <artifactId>dna-graph</artifactId>
- </dependency>
- <dependency>
- <groupId>org.jboss.dna</groupId>
- <artifactId>dna-graph</artifactId>
- <version>${project.version}</version>
- <type>test-jar</type>
- <scope>test</scope>
- </dependency>
- <!--
- Lucene
- -->
- <dependency>
- <groupId>org.apache.lucene</groupId>
- <artifactId>lucene-core</artifactId>
- <version>3.0.0</version>
- </dependency>
- <dependency>
- <groupId>org.apache.lucene</groupId>
- <artifactId>lucene-analyzers</artifactId>
- <version>3.0.0</version>
- </dependency>
- <dependency>
- <groupId>org.apache.lucene</groupId>
- <artifactId>lucene-snowball</artifactId>
- <version>3.0.0</version>
- </dependency>
- <dependency>
- <groupId>org.apache.lucene</groupId>
- <artifactId>lucene-regex</artifactId>
- <version>3.0.0</version>
- </dependency>
- <!--
- Testing (note the scope)
- -->
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.mockito</groupId>
- <artifactId>mockito-all</artifactId>
- </dependency>
- <!--
- Logging (require SLF4J API for compiling, but use Log4J and its SLF4J binding for testing)
- -->
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-log4j12</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>log4j</groupId>
- <artifactId>log4j</artifactId>
- <scope>test</scope>
- </dependency>
- <!--
- Java Concurrency in Practice annotations
- -->
- <dependency>
- <groupId>net.jcip</groupId>
- <artifactId>jcip-annotations</artifactId>
- </dependency>
- </dependencies>
-</project>
\ No newline at end of file
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexSearchProvider.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexSearchProvider.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexSearchProvider.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,1974 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.FieldSelectorResult;
-import org.apache.lucene.document.NumericField;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter.MaxFieldLength;
-import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.NumericRangeQuery;
-import org.apache.lucene.search.PrefixQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Version;
-import org.jboss.dna.common.text.NoOpEncoder;
-import org.jboss.dna.common.text.SecureHashTextEncoder;
-import org.jboss.dna.common.text.TextEncoder;
-import org.jboss.dna.common.util.Logger;
-import org.jboss.dna.common.util.SecureHash.Algorithm;
-import org.jboss.dna.graph.DnaLexicon;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.JcrLexicon;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Node;
-import org.jboss.dna.graph.property.DateTime;
-import org.jboss.dna.graph.property.Name;
-import org.jboss.dna.graph.property.NamespaceRegistry;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.property.Property;
-import org.jboss.dna.graph.property.ValueFactories;
-import org.jboss.dna.graph.property.ValueFactory;
-import org.jboss.dna.graph.property.basic.JodaDateTime;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryEngine;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.QueryResults.Columns;
-import org.jboss.dna.graph.query.model.Length;
-import org.jboss.dna.graph.query.model.NodeDepth;
-import org.jboss.dna.graph.query.model.NodeLocalName;
-import org.jboss.dna.graph.query.model.NodeName;
-import org.jboss.dna.graph.query.model.NodePath;
-import org.jboss.dna.graph.query.model.Operator;
-import org.jboss.dna.graph.query.model.PropertyValue;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.optimize.Optimizer;
-import org.jboss.dna.graph.query.optimize.OptimizerRule;
-import org.jboss.dna.graph.query.optimize.RuleBasedOptimizer;
-import org.jboss.dna.graph.query.plan.CanonicalPlanner;
-import org.jboss.dna.graph.query.plan.PlanHints;
-import org.jboss.dna.graph.query.plan.PlanNode;
-import org.jboss.dna.graph.query.plan.Planner;
-import org.jboss.dna.graph.query.process.ProcessingComponent;
-import org.jboss.dna.graph.query.process.QueryProcessor;
-import org.jboss.dna.graph.request.ChangeRequest;
-import org.jboss.dna.graph.search.SearchException;
-import org.jboss.dna.graph.search.SearchProvider;
-import org.jboss.dna.search.IndexRules.FieldType;
-import org.jboss.dna.search.IndexRules.NumericRule;
-import org.jboss.dna.search.IndexRules.Rule;
-import org.jboss.dna.search.LuceneSession.TupleCollector;
-import org.jboss.dna.search.query.CompareLengthQuery;
-import org.jboss.dna.search.query.CompareNameQuery;
-import org.jboss.dna.search.query.ComparePathQuery;
-import org.jboss.dna.search.query.CompareStringQuery;
-import org.jboss.dna.search.query.IdsQuery;
-import org.jboss.dna.search.query.MatchNoneQuery;
-import org.jboss.dna.search.query.NotQuery;
-
-/**
- * A simple {@link SearchProvider} implementation that relies upon two separate indexes: one for the node content and a second one
- * for paths and UUIDs.
- */
-@ThreadSafe
-public class DualIndexSearchProvider implements SearchProvider {
-
- /**
- * The default set of {@link IndexRules} used by {@link DualIndexSearchProvider} instances when no rules are provided. These
- * rules default to index and analyze all properties, and to index the {@link DnaLexicon#UUID dna:uuid} and
- * {@link JcrLexicon#UUID jcr:uuid} properties to be indexed and stored only (not analyzed and not included in full-text
- * search. The rules also treat {@link JcrLexicon#CREATED jcr:created} and {@link JcrLexicon#LAST_MODIFIED jcr:lastModified}
- * properties as dates.
- */
- public static final IndexRules DEFAULT_RULES;
-
- static {
- // We know that the earliest creation/modified dates cannot be before November 1 2009,
- // which is before this feature was implemented
- long earliestChangeDate = new JodaDateTime(2009, 11, 01, 0, 0, 0, 0).getMilliseconds();
-
- IndexRules.Builder builder = IndexRules.createBuilder();
- // Configure the default behavior ...
- builder.defaultTo(Field.Store.YES, Field.Index.ANALYZED);
- // Configure the UUID properties to be just indexed and stored (not analyzed, not included in full-text) ...
- builder.stringField(JcrLexicon.UUID, Field.Store.YES, Field.Index.NOT_ANALYZED);
- builder.stringField(DnaLexicon.UUID, Field.Store.YES, Field.Index.NOT_ANALYZED);
- // Configure the properties that we'll treat as dates ...
- builder.dateField(JcrLexicon.CREATED, Field.Store.YES, Field.Index.NOT_ANALYZED, earliestChangeDate);
- builder.dateField(JcrLexicon.LAST_MODIFIED, Field.Store.YES, Field.Index.NOT_ANALYZED, earliestChangeDate);
- DEFAULT_RULES = builder.build();
- }
-
- protected static final int MIN_DEPTH = 0;
- protected static final int MAX_DEPTH = 100;
- protected static final int MIN_SNS_INDEX = 1;
- protected static final int MAX_SNS_INDEX = 1000; // assume there won't be more than 1000 same-name-siblings
-
- protected static final String PATHS_INDEX_NAME = "paths";
- protected static final String CONTENT_INDEX_NAME = "content";
-
- /**
- * Given the name of a property field of the form "<namespace>:<local>" (where <namespace> can be zero-length), this
- * provider also stores the value(s) for free-text searching in a field named ":ft:<namespace>:<local>". Thus, even if
- * the namespace is zero-length, the free-text search field will be named ":ft::<local>" and will not clash with any other
- * property name.
- */
- protected static final String FULL_TEXT_PREFIX = ":ft:";
-
- /**
- * This index stores only these fields, so we can use the most obvious names and not worry about clashes.
- */
- static class PathIndex {
- public static final String PATH = "pth";
- public static final String NODE_NAME = "nam";
- public static final String LOCAL_NAME = "loc";
- public static final String SNS_INDEX = "sns";
- public static final String LOCATION_ID_PROPERTIES = "idp";
- public static final String ID = ContentIndex.ID;
- public static final String DEPTH = "dep";
- }
-
- /**
- * This index stores these two fields <i>plus</i> all properties. Therefore, we have to worry about name clashes, which is why
- * these field names are prefixed with '::', which is something that does appear in property names as they are serialized.
- */
- static class ContentIndex {
- public static final String ID = "::id";
- public static final String FULL_TEXT = "::fts";
- }
-
- /**
- * The number of results that should be returned when performing queries while deleting entire branches of content. The
- * current value is {@value} .
- */
- protected static final int SIZE_OF_DELETE_BATCHES = 1000;
-
- private ThreadLocal<DateFormat> dateFormatter = new ThreadLocal<DateFormat>() {
- @Override
- protected DateFormat initialValue() {
- return new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss");
- }
- };
-
- /**
- * Obtain an immutable {@link FieldSelector} instance that accesses the UUID field.
- */
- protected static final FieldSelector UUID_FIELD_SELECTOR = new FieldSelector() {
- private static final long serialVersionUID = 1L;
-
- public FieldSelectorResult accept( String fieldName ) {
- return PathIndex.ID.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
- }
- };
-
- private final IndexRules rules;
- private final LuceneConfiguration directoryConfiguration;
- private final TextEncoder namespaceEncoder;
-
- public DualIndexSearchProvider( LuceneConfiguration directoryConfiguration,
- IndexRules rules ) {
- assert directoryConfiguration != null;
- assert rules != null;
- this.rules = rules;
- this.directoryConfiguration = directoryConfiguration;
- this.namespaceEncoder = new SecureHashTextEncoder(Algorithm.SHA_1, 10);
- }
-
- public DualIndexSearchProvider( LuceneConfiguration directoryConfiguration ) {
- this(directoryConfiguration, DEFAULT_RULES);
- }
-
- /**
- * Get the date formatter that can be reused safely within the current thread.
- *
- * @return the date formatter; never null
- */
- protected DateFormat dateFormatter() {
- return dateFormatter.get();
- }
-
- /**
- * Get the text encoder that should be used to encode namespaces in the search index.
- *
- * @return the namespace text encoder; never null
- */
- protected TextEncoder getNamespaceEncoder() {
- return new NoOpEncoder();
- }
-
- /**
- * Create a Lucene {@link Analyzer} analyzer that should be used for indexing and searching.
- *
- * @return the analyzer; never null
- */
- protected Analyzer createAnalyzer() {
- return new StandardAnalyzer(Version.LUCENE_CURRENT);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider#createSession(org.jboss.dna.graph.ExecutionContext, java.lang.String,
- * java.lang.String, boolean, boolean)
- */
- public SearchProvider.Session createSession( ExecutionContext context,
- String sourceName,
- String workspaceName,
- boolean overwrite,
- boolean readOnly ) {
- Directory pathIndexDirectory = directoryConfiguration.getDirectory(workspaceName, PATHS_INDEX_NAME);
- Directory contentIndexDirectory = directoryConfiguration.getDirectory(workspaceName, CONTENT_INDEX_NAME);
- assert pathIndexDirectory != null;
- assert contentIndexDirectory != null;
- Analyzer analyzer = createAnalyzer();
- assert analyzer != null;
- NamespaceRegistry encodingRegistry = new EncodingNamespaceRegistry(context.getNamespaceRegistry(), namespaceEncoder);
- ExecutionContext encodingContext = context.with(encodingRegistry);
- return new DualIndexSession(encodingContext, sourceName, workspaceName, rules, pathIndexDirectory, contentIndexDirectory,
- analyzer, overwrite, readOnly);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider#destroyIndexes(org.jboss.dna.graph.ExecutionContext, java.lang.String,
- * java.lang.String)
- */
- public boolean destroyIndexes( ExecutionContext context,
- String sourceName,
- String workspaceName ) {
- directoryConfiguration.destroyDirectory(workspaceName, PATHS_INDEX_NAME);
- directoryConfiguration.destroyDirectory(workspaceName, CONTENT_INDEX_NAME);
- return true;
- }
-
- protected class DualIndexSession extends LuceneSession {
- private final Directory pathsIndexDirectory;
- private final Directory contentIndexDirectory;
- private IndexReader pathsReader;
- private IndexWriter pathsWriter;
- private IndexSearcher pathsSearcher;
- private IndexReader contentReader;
- private IndexWriter contentWriter;
- private IndexSearcher contentSearcher;
-
- protected DualIndexSession( ExecutionContext context,
- String sourceName,
- String workspaceName,
- IndexRules rules,
- Directory pathsIndexDirectory,
- Directory contentIndexDirectory,
- Analyzer analyzer,
- boolean overwrite,
- boolean readOnly ) {
- super(context, sourceName, workspaceName, rules, analyzer, overwrite, readOnly);
- this.pathsIndexDirectory = pathsIndexDirectory;
- this.contentIndexDirectory = contentIndexDirectory;
- assert this.pathsIndexDirectory != null;
- assert this.contentIndexDirectory != null;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.LuceneSession#fullTextFieldName(java.lang.String)
- */
- @Override
- protected String fullTextFieldName( String propertyName ) {
- return FULL_TEXT_PREFIX + propertyName;
- }
-
- protected void addIdProperties( Location location,
- Document doc ) {
- if (!location.hasIdProperties()) return;
- for (Property idProp : location.getIdProperties()) {
- String fieldValue = serializeProperty(idProp);
- doc.add(new Field(PathIndex.LOCATION_ID_PROPERTIES, fieldValue, Field.Store.YES, Field.Index.NOT_ANALYZED));
- }
- }
-
- protected Location readLocation( Document doc ) {
- // Read the path ...
- String pathString = doc.get(PathIndex.PATH);
- Path path = pathFactory.create(pathString);
- // Look for the Location's ID properties ...
- String[] idProps = doc.getValues(PathIndex.LOCATION_ID_PROPERTIES);
- if (idProps.length == 0) {
- return Location.create(path);
- }
- if (idProps.length == 1) {
- Property idProp = deserializeProperty(idProps[0]);
- if (idProp == null) return Location.create(path);
- if (idProp.isSingle() && (idProp.getName().equals(JcrLexicon.UUID) || idProp.getName().equals(DnaLexicon.UUID))) {
- return Location.create(path, (UUID)idProp.getFirstValue()); // know that deserialize returns UUID value
- }
- return Location.create(path, idProp);
- }
- List<Property> properties = new LinkedList<Property>();
- for (String idProp : doc.getValues(PathIndex.LOCATION_ID_PROPERTIES)) {
- Property prop = deserializeProperty(idProp);
- if (prop != null) properties.add(prop);
- }
- return properties.isEmpty() ? Location.create(path) : Location.create(path, properties);
-
- }
-
- protected final String serializeProperty( Property property ) {
- StringBuilder sb = new StringBuilder();
- sb.append(stringFactory.create(property.getName()));
- sb.append('=');
- Iterator<?> iter = property.getValues();
- if (iter.hasNext()) {
- sb.append(stringFactory.create(iter.next()));
- }
- while (iter.hasNext()) {
- sb.append('\n');
- sb.append(stringFactory.create(iter.next()));
- }
- return sb.toString();
- }
-
- protected final Property deserializeProperty( String propertyString ) {
- int index = propertyString.indexOf('=');
- assert index > -1;
- if (index == propertyString.length() - 1) return null;
- Name propName = nameFactory.create(propertyString.substring(0, index));
- String valueString = propertyString.substring(index + 1);
- // Break into multiple values if multiple lines ...
- String[] values = valueString.split("\\n");
- if (values.length == 0) return null;
- if (values.length == 1) {
- Object value = values[0];
- if (DnaLexicon.UUID.equals(propName) || JcrLexicon.UUID.equals(propName)) {
- value = uuidFactory.create(value);
- }
- return context.getPropertyFactory().create(propName, value);
- }
- List<String> propValues = new LinkedList<String>();
- for (String value : values) {
- propValues.add(value);
- }
- return context.getPropertyFactory().create(propName, propValues);
- }
-
- protected IndexReader getPathsReader() throws IOException {
- if (pathsReader == null) {
- pathsReader = IndexReader.open(pathsIndexDirectory, readOnly);
- }
- return pathsReader;
- }
-
- protected IndexReader getContentReader() throws IOException {
- if (contentReader == null) {
- contentReader = IndexReader.open(contentIndexDirectory, readOnly);
- }
- return contentReader;
- }
-
- protected IndexWriter getPathsWriter() throws IOException {
- assert !readOnly;
- if (pathsWriter == null) {
- if (overwrite) {
- // Always overwrite it ...
- pathsWriter = new IndexWriter(pathsIndexDirectory, analyzer, overwrite, MaxFieldLength.UNLIMITED);
- } else {
- // Don't overwrite, but create if missing ...
- pathsWriter = new IndexWriter(pathsIndexDirectory, analyzer, MaxFieldLength.UNLIMITED);
- }
- }
- return pathsWriter;
- }
-
- protected IndexWriter getContentWriter() throws IOException {
- assert !readOnly;
- if (contentWriter == null) {
- if (overwrite) {
- // Always overwrite it ...
- contentWriter = new IndexWriter(contentIndexDirectory, analyzer, overwrite, MaxFieldLength.UNLIMITED);
- } else {
- // Don't overwrite, but create if missing ...
- contentWriter = new IndexWriter(contentIndexDirectory, analyzer, MaxFieldLength.UNLIMITED);
- }
- }
- return contentWriter;
- }
-
- protected IndexSearcher getPathsSearcher() throws IOException {
- if (pathsSearcher == null) {
- pathsSearcher = new IndexSearcher(getPathsReader());
- }
- return pathsSearcher;
- }
-
- @Override
- public IndexSearcher getContentSearcher() throws IOException {
- if (contentSearcher == null) {
- contentSearcher = new IndexSearcher(getContentReader());
- }
- return contentSearcher;
- }
-
- protected boolean hasWriters() {
- return pathsWriter != null || contentWriter != null;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#index(org.jboss.dna.graph.Node)
- */
- public void index( Node node ) {
- assert !readOnly;
- Location location = node.getLocation();
- UUID uuid = location.getUuid();
- if (uuid == null) uuid = UUID.randomUUID();
- Path path = location.getPath();
- String idStr = stringFactory.create(uuid);
- String pathStr = pathAsString(path, stringFactory);
- String nameStr = path.isRoot() ? "" : stringFactory.create(path.getLastSegment().getName());
- String localNameStr = path.isRoot() ? "" : path.getLastSegment().getName().getLocalName();
- int sns = path.isRoot() ? 1 : path.getLastSegment().getIndex();
-
- Logger logger = Logger.getLogger(getClass());
- if (logger.isTraceEnabled()) {
- logger.trace("indexing {0}", pathStr);
- }
-
- try {
-
- // Create a separate document for the path, which makes it easier to handle moves since the path can
- // be changed without changing any other content fields ...
- Document doc = new Document();
- doc.add(new Field(PathIndex.PATH, pathStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field(PathIndex.NODE_NAME, nameStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field(PathIndex.LOCAL_NAME, localNameStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new NumericField(PathIndex.SNS_INDEX, Field.Store.YES, true).setIntValue(sns));
- doc.add(new Field(PathIndex.ID, idStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new NumericField(PathIndex.DEPTH, Field.Store.YES, true).setIntValue(path.size()));
- addIdProperties(location, doc);
- getPathsWriter().addDocument(doc);
-
- // Create the document for the content (properties) ...
- doc = new Document();
- doc.add(new Field(ContentIndex.ID, idStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- String stringValue = null;
- StringBuilder fullTextSearchValue = null;
- for (Property property : node.getProperties()) {
- Name name = property.getName();
- Rule rule = rules.getRule(name);
- if (rule.isSkipped()) continue;
- String nameString = stringFactory.create(name);
- FieldType type = rule.getType();
- if (type == FieldType.DATE) {
- boolean index = rule.getIndexOption() != Field.Index.NO;
- for (Object value : property) {
- if (value == null) continue;
- // Add a separate field for each property value ...
- DateTime dateValue = dateFactory.create(value);
- long longValue = dateValue.getMillisecondsInUtc();
- doc.add(new NumericField(nameString, rule.getStoreOption(), index).setLongValue(longValue));
- }
- continue;
- }
- if (type == FieldType.INT) {
- ValueFactory<Long> longFactory = context.getValueFactories().getLongFactory();
- boolean index = rule.getIndexOption() != Field.Index.NO;
- for (Object value : property) {
- if (value == null) continue;
- // Add a separate field for each property value ...
- int intValue = longFactory.create(value).intValue();
- doc.add(new NumericField(nameString, rule.getStoreOption(), index).setIntValue(intValue));
- }
- continue;
- }
- if (type == FieldType.DOUBLE) {
- ValueFactory<Double> doubleFactory = context.getValueFactories().getDoubleFactory();
- boolean index = rule.getIndexOption() != Field.Index.NO;
- for (Object value : property) {
- if (value == null) continue;
- // Add a separate field for each property value ...
- double dValue = doubleFactory.create(value);
- doc.add(new NumericField(nameString, rule.getStoreOption(), index).setDoubleValue(dValue));
- }
- continue;
- }
- if (type == FieldType.FLOAT) {
- ValueFactory<Double> doubleFactory = context.getValueFactories().getDoubleFactory();
- boolean index = rule.getIndexOption() != Field.Index.NO;
- for (Object value : property) {
- if (value == null) continue;
- // Add a separate field for each property value ...
- float fValue = doubleFactory.create(value).floatValue();
- doc.add(new NumericField(nameString, rule.getStoreOption(), index).setFloatValue(fValue));
- }
- continue;
- }
- if (type == FieldType.BINARY) {
- // TODO : add to full-text search ...
- continue;
- }
- assert type == FieldType.STRING;
- for (Object value : property) {
- if (value == null) continue;
- stringValue = stringFactory.create(value);
- // Add a separate field for each property value ...
- doc.add(new Field(nameString, stringValue, rule.getStoreOption(), rule.getIndexOption()));
-
- if (rule.getIndexOption() != Field.Index.NO) {
- // This field is to be full-text searchable ...
- if (fullTextSearchValue == null) {
- fullTextSearchValue = new StringBuilder();
- } else {
- fullTextSearchValue.append(' ');
- }
- fullTextSearchValue.append(stringValue);
-
- // Also create a full-text-searchable field ...
- String fullTextNameString = fullTextFieldName(nameString);
- doc.add(new Field(fullTextNameString, stringValue, Store.NO, Index.ANALYZED));
- }
- }
- }
- // Add the full-text-search field ...
- if (fullTextSearchValue != null && fullTextSearchValue.length() != 0) {
- doc.add(new Field(ContentIndex.FULL_TEXT, fullTextSearchValue.toString(), Field.Store.NO,
- Field.Index.ANALYZED));
- }
- getContentWriter().addDocument(doc);
- } catch (IOException e) {
- throw new LuceneException(e);
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#optimize()
- */
- public void optimize() {
- try {
- getContentWriter().optimize();
- getPathsWriter().optimize();
- } catch (IOException e) {
- throw new LuceneException(e);
-
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#apply(java.lang.Iterable)
- */
- public int apply( Iterable<ChangeRequest> changes ) {
- for (ChangeRequest change : changes) {
- if (change != null) continue;
- }
- return 0;
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * Because this strategy uses multiple indexes, and since there's no correlation between the documents in those indexes,
- * we need to perform the delete in multiple steps. First, we need to perform a query to find out which nodes exist below
- * a certain path. Then, we need to delete those nodes from the paths index. Finally, we need to delete the corresponding
- * documents in the content index that represent those same nodes.
- * </p>
- * <p>
- * Since we don't know how many documents there will be, we perform these steps in batches, where each batch limits the
- * number of results to a maximum number. We repeat batches as long as we find more results. This approach has the
- * advantage that we'll never bring in a large number of results, and it allows us to delete the documents from the
- * content node using a query.
- * </p>
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#deleteBelow(org.jboss.dna.graph.property.Path)
- */
- public int deleteBelow( Path path ) {
- assert !readOnly;
- try {
- // Create a query to find all the nodes at or below the specified path ...
- Set<String> ids = getIdsForDescendantsOf(path, true);
- Query uuidQuery = findAllNodesWithIds(ids);
- // Now delete the documents from each index using this query, which we can reuse ...
- getPathsWriter().deleteDocuments(uuidQuery);
- getContentWriter().deleteDocuments(uuidQuery);
- return ids.size();
- } catch (FileNotFoundException e) {
- // There are no index files yet, so nothing to delete ...
- return 0;
- } catch (IOException e) {
- throw new LuceneException(e);
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#search(org.jboss.dna.graph.ExecutionContext, java.lang.String,
- * int, int, java.util.List)
- */
- public void search( ExecutionContext context,
- String fullTextString,
- int maxResults,
- int offset,
- List<Location> results ) {
- assert fullTextString != null;
- assert fullTextString.length() > 0;
- assert offset >= 0;
- assert maxResults > 0;
- assert results != null;
-
- try {
- // Parse the full-text search and search against the 'fts' field ...
- QueryParser parser = new QueryParser(Version.LUCENE_29, ContentIndex.FULL_TEXT, createAnalyzer());
- Query query = parser.parse(fullTextString);
- TopDocs docs = getContentSearcher().search(query, maxResults + offset);
-
- // Collect the results ...
- IndexReader contentReader = getContentReader();
- IndexReader pathReader = getPathsReader();
- IndexSearcher pathSearcher = getPathsSearcher();
- ScoreDoc[] scoreDocs = docs.scoreDocs;
- int numberOfResults = scoreDocs.length;
- if (numberOfResults > offset) {
- // There are enough results to satisfy the offset ...
- for (int i = offset, num = scoreDocs.length; i != num; ++i) {
- ScoreDoc result = scoreDocs[i];
- int docId = result.doc;
- // Find the UUID of the node (this UUID might be artificial, so we have to find the path) ...
- Document doc = contentReader.document(docId, UUID_FIELD_SELECTOR);
- String id = doc.get(ContentIndex.ID);
- Location location = getLocationForDocument(id, pathReader, pathSearcher);
- if (location == null) {
- // No path record found ...
- continue;
- }
- // Now add the location ...
- results.add(location);
- }
- }
- } catch (ParseException e) {
- String msg = SearchI18n.errorWhilePerformingSearch.text(workspaceName, sourceName, fullTextString, e.getMessage());
- throw new SearchException(fullTextString, msg, e);
- } catch (IOException e) {
- throw new LuceneException(e);
- }
- }
-
- protected Location getLocationForDocument( String id,
- IndexReader pathReader,
- IndexSearcher pathSearcher ) throws IOException {
- // Find the path for this node (is there a better way to do this than one search per ID?) ...
- TopDocs pathDocs = pathSearcher.search(new TermQuery(new Term(PathIndex.ID, id)), 1);
- if (pathDocs.scoreDocs.length < 1) {
- // No path record found ...
- return null;
- }
- Document pathDoc = pathReader.document(pathDocs.scoreDocs[0].doc);
- return readLocation(pathDoc);
- }
-
- protected UUID getUuid( Document document,
- Name name ) {
- String nameString = stringFactory.create(name);
- String value = document.get(nameString);
- return value != null ? uuidFactory.create(value) : null;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#query(org.jboss.dna.graph.query.QueryContext,
- * org.jboss.dna.graph.query.model.QueryCommand)
- */
- public QueryResults query( QueryContext queryContext,
- QueryCommand query ) {
- return queryEngine().execute(queryContext, query);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#commit()
- */
- public void commit() {
- IOException ioError = null;
- RuntimeException runtimeError = null;
- if (pathsReader != null) {
- try {
- pathsReader.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- pathsReader = null;
- }
- }
- if (contentReader != null) {
- try {
- contentReader.close();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- contentReader = null;
- }
- }
- if (pathsWriter != null) {
- // try {
- // pathsWriter.commit();
- // } catch (IOException e) {
- // if (ioError == null) ioError = e;
- // } catch (RuntimeException e) {
- // if (runtimeError == null) runtimeError = e;
- // } finally {
- try {
- pathsWriter.close();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- pathsWriter = null;
- }
- // }
- }
- if (contentWriter != null) {
- // try {
- // contentWriter.commit();
- // } catch (IOException e) {
- // if (ioError == null) ioError = e;
- // } catch (RuntimeException e) {
- // if (runtimeError == null) runtimeError = e;
- // } finally {
- try {
- contentWriter.close();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- contentWriter = null;
- }
- // }
- }
- if (ioError != null) {
- String msg = SearchI18n.errorWhileCommittingIndexChanges.text(workspaceName, sourceName, ioError.getMessage());
- throw new LuceneException(msg, ioError);
- }
- if (runtimeError != null) throw runtimeError;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#rollback()
- */
- public void rollback() {
- IOException ioError = null;
- RuntimeException runtimeError = null;
- if (pathsReader != null) {
- try {
- pathsReader.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- pathsReader = null;
- }
- }
- if (contentReader != null) {
- try {
- contentReader.close();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- contentReader = null;
- }
- }
- if (pathsWriter != null) {
- try {
- pathsWriter.rollback();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- try {
- pathsWriter.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- pathsWriter = null;
- }
- }
- }
- if (contentWriter != null) {
- try {
- contentWriter.rollback();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- try {
- contentWriter.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- contentWriter = null;
- }
- }
- }
- if (ioError != null) {
- String msg = SearchI18n.errorWhileRollingBackIndexChanges.text(workspaceName, sourceName, ioError.getMessage());
- throw new LuceneException(msg, ioError);
- }
- if (runtimeError != null) throw runtimeError;
- }
-
- protected QueryEngine createQueryProcessor() {
- // Create the query engine ...
- Planner planner = new CanonicalPlanner();
- Optimizer optimizer = new RuleBasedOptimizer() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.optimize.RuleBasedOptimizer#populateRuleStack(java.util.LinkedList,
- * org.jboss.dna.graph.query.plan.PlanHints)
- */
- @Override
- protected void populateRuleStack( LinkedList<OptimizerRule> ruleStack,
- PlanHints hints ) {
- super.populateRuleStack(ruleStack, hints);
- // Add any custom rules here, either at the front of the stack or at the end
- }
- };
- QueryProcessor processor = new QueryProcessor() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.process.QueryProcessor#createAccessComponent(org.jboss.dna.graph.query.model.QueryCommand,
- * org.jboss.dna.graph.query.QueryContext, org.jboss.dna.graph.query.plan.PlanNode,
- * org.jboss.dna.graph.query.QueryResults.Columns,
- * org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
- */
- @Override
- protected ProcessingComponent createAccessComponent( QueryCommand originalQuery,
- QueryContext context,
- PlanNode accessNode,
- Columns resultColumns,
- org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer ) {
- return DualIndexSession.this.createAccessComponent(originalQuery,
- context,
- accessNode,
- resultColumns,
- analyzer);
- }
- };
-
- return new QueryEngine(planner, optimizer, processor);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.LuceneSession#createAccessComponent(org.jboss.dna.graph.query.model.QueryCommand,
- * org.jboss.dna.graph.query.QueryContext, org.jboss.dna.graph.query.plan.PlanNode,
- * org.jboss.dna.graph.query.QueryResults.Columns, org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
- */
- @Override
- protected ProcessingComponent createAccessComponent( QueryCommand originalQuery,
- QueryContext context,
- PlanNode accessNode,
- Columns resultColumns,
- org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer ) {
- // Create a processing component for this access query ...
- return new LuceneQueryComponent(this, originalQuery, context, resultColumns, accessNode, analyzer, sourceName,
- workspaceName);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.LuceneSession#createTupleCollector(Columns)
- */
- @Override
- public TupleCollector createTupleCollector( Columns columns ) {
- return new DualIndexTupleCollector(this, columns);
- }
-
- /**
- * Get the set of IDs for the children of the node at the given path.
- *
- * @param parentPath the path to the parent node; may not be null
- * @return the doc IDs of the child nodes; never null but possibly empty
- * @throws IOException if there is an error accessing the indexes
- */
- protected Set<String> getIdsForChildrenOf( Path parentPath ) throws IOException {
- // Find the path of the parent ...
- String stringifiedPath = pathAsString(parentPath, stringFactory);
- // Append a '/' to the parent path, so we'll only get decendants ...
- stringifiedPath = stringifiedPath + '/';
-
- // Create a query to find all the nodes below the parent path ...
- Query query = new PrefixQuery(new Term(PathIndex.PATH, stringifiedPath));
- // Include only the children ...
- int childrenDepth = parentPath.size() + 1;
- Query depthQuery = NumericRangeQuery.newIntRange(PathIndex.DEPTH, childrenDepth, childrenDepth, true, true);
- // And combine ...
- BooleanQuery combinedQuery = new BooleanQuery();
- combinedQuery.add(query, Occur.MUST);
- combinedQuery.add(depthQuery, Occur.MUST);
- query = combinedQuery;
-
- // Now execute and collect the IDs ...
- IdCollector idCollector = new IdCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, idCollector);
- return idCollector.getIds();
- }
-
- /**
- * Get the set of IDs for the nodes that are descendants of the node at the given path.
- *
- * @param parentPath the path to the parent node; may not be null and <i>may not be the root node</i>
- * @param includeParent true if the parent node should be included in the results, or false if only the descendants should
- * be included
- * @return the IDs of the nodes; never null but possibly empty
- * @throws IOException if there is an error accessing the indexes
- */
- protected Set<String> getIdsForDescendantsOf( Path parentPath,
- boolean includeParent ) throws IOException {
- assert !parentPath.isRoot();
-
- // Find the path of the parent ...
- String stringifiedPath = pathAsString(parentPath, stringFactory);
- if (!includeParent) {
- // Append a '/' to the parent path, and we'll only get decendants ...
- stringifiedPath = stringifiedPath + '/';
- }
-
- // Create a prefix query ...
- Query query = new PrefixQuery(new Term(PathIndex.PATH, stringifiedPath));
-
- // Now execute and collect the IDs ...
- IdCollector idCollector = new IdCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, idCollector);
- return idCollector.getIds();
- }
-
- /**
- * Get the set containing the single ID for the node at the given path.
- *
- * @param path the path to the node; may not be null
- * @return the ID of the supplied node; or null if the node cannot be found
- * @throws IOException if there is an error accessing the indexes
- */
- protected String getIdFor( Path path ) throws IOException {
- // Create a query to find all the nodes below the parent path ...
- IndexSearcher searcher = getPathsSearcher();
- String stringifiedPath = pathAsString(path, stringFactory);
- TermQuery query = new TermQuery(new Term(PathIndex.PATH, stringifiedPath));
-
- // Now execute and collect the UUIDs ...
- TopDocs topDocs = searcher.search(query, 1);
- if (topDocs.totalHits == 0) return null;
- Document pathDoc = getPathsReader().document(topDocs.scoreDocs[0].doc);
- String idString = pathDoc.get(PathIndex.ID);
- assert idString != null;
- return idString;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.LuceneSession#findAllNodesWithIds(java.util.Set)
- */
- @Override
- public Query findAllNodesWithIds( Set<String> ids ) {
- if (ids.isEmpty()) {
- // There are no children, so return a null query ...
- return new MatchNoneQuery();
- }
- if (ids.size() == 1) {
- String id = ids.iterator().next();
- if (id == null) return new MatchNoneQuery();
- return new TermQuery(new Term(ContentIndex.ID, id));
- }
- if (ids.size() < 50) {
- // Create an OR boolean query for all the UUIDs, since this is probably more efficient ...
- BooleanQuery query = new BooleanQuery();
- for (String id : ids) {
- Query uuidQuery = new TermQuery(new Term(ContentIndex.ID, id));
- query.add(uuidQuery, Occur.SHOULD);
- }
- return query;
- }
- // Return a query that will always find all of the UUIDs ...
- return new IdsQuery(ContentIndex.ID, ids);
- }
-
- @Override
- public Query findAllNodesBelow( Path ancestorPath ) throws IOException {
- if (ancestorPath.isRoot()) {
- return new MatchAllDocsQuery();
- }
- Set<String> ids = getIdsForDescendantsOf(ancestorPath, false);
- return findAllNodesWithIds(ids);
- }
-
- /**
- * Return a query that can be used to find all of the documents that represent nodes that are children of the node at the
- * supplied path.
- *
- * @param parentPath the path of the parent node.
- * @return the query; never null
- * @throws IOException if there is an error finding the UUIDs of the child nodes
- */
- @Override
- public Query findChildNodes( Path parentPath ) throws IOException {
- if (parentPath.isRoot()) {
- return new MatchAllDocsQuery();
- }
- Set<String> childIds = getIdsForChildrenOf(parentPath);
- return findAllNodesWithIds(childIds);
- }
-
- /**
- * Create a query that can be used to find the one document (or node) that exists at the exact path supplied. This method
- * first queries the {@link PathIndex path index} to find the ID of the node at the supplied path, and then returns a
- * query that matches the ID.
- *
- * @param path the path of the node
- * @return the query; never null
- * @throws IOException if there is an error finding the ID for the supplied path
- */
- @Override
- public Query findNodeAt( Path path ) throws IOException {
- String id = getIdFor(path);
- if (id == null) return null;
- return new TermQuery(new Term(ContentIndex.ID, id));
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.LuceneSession#findNodesLike(java.lang.String, java.lang.String, boolean)
- */
- @Override
- public Query findNodesLike( String fieldName,
- String likeExpression,
- boolean caseSensitive ) {
- ValueFactories factories = context.getValueFactories();
- return CompareStringQuery.createQueryForNodesWithFieldLike(likeExpression, fieldName, factories, caseSensitive);
- }
-
- @Override
- public Query findNodesWith( Length propertyLength,
- Operator operator,
- Object value ) {
- assert propertyLength != null;
- assert value != null;
- PropertyValue propertyValue = propertyLength.getPropertyValue();
- String field = stringFactory.create(propertyValue.getPropertyName());
- ValueFactories factories = context.getValueFactories();
- int length = factories.getLongFactory().create(value).intValue();
- switch (operator) {
- case EQUAL_TO:
- return CompareLengthQuery.createQueryForNodesWithFieldEqualTo(length, field, factories);
- case NOT_EQUAL_TO:
- return CompareLengthQuery.createQueryForNodesWithFieldNotEqualTo(length, field, factories);
- case GREATER_THAN:
- return CompareLengthQuery.createQueryForNodesWithFieldGreaterThan(length, field, factories);
- case GREATER_THAN_OR_EQUAL_TO:
- return CompareLengthQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(length, field, factories);
- case LESS_THAN:
- return CompareLengthQuery.createQueryForNodesWithFieldLessThan(length, field, factories);
- case LESS_THAN_OR_EQUAL_TO:
- return CompareLengthQuery.createQueryForNodesWithFieldLessThanOrEqualTo(length, field, factories);
- case LIKE:
- // This is not allowed ...
- assert false;
- break;
- }
- return null;
- }
-
- @SuppressWarnings( "unchecked" )
- @Override
- public Query findNodesWith( PropertyValue propertyValue,
- Operator operator,
- Object value,
- boolean caseSensitive ) {
- String field = stringFactory.create(propertyValue.getPropertyName());
- Name fieldName = nameFactory.create(propertyValue.getPropertyName());
- ValueFactories factories = context.getValueFactories();
- IndexRules.Rule rule = rules.getRule(fieldName);
- if (rule == null || rule.isSkipped()) return new MatchNoneQuery();
- FieldType type = rule.getType();
- switch (type) {
- case STRING:
- String stringValue = stringFactory.create(value);
- if (value instanceof Path) {
- stringValue = pathAsString(pathFactory.create(value), stringFactory);
- }
- if (!caseSensitive) stringValue = stringValue.toLowerCase();
- switch (operator) {
- case EQUAL_TO:
- return CompareStringQuery.createQueryForNodesWithFieldEqualTo(stringValue,
- field,
- factories,
- caseSensitive);
- case NOT_EQUAL_TO:
- Query query = CompareStringQuery.createQueryForNodesWithFieldEqualTo(stringValue,
- field,
- factories,
- caseSensitive);
- return new NotQuery(query);
- case GREATER_THAN:
- return CompareStringQuery.createQueryForNodesWithFieldGreaterThan(stringValue,
- field,
- factories,
- caseSensitive);
- case GREATER_THAN_OR_EQUAL_TO:
- return CompareStringQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(stringValue,
- field,
- factories,
- caseSensitive);
- case LESS_THAN:
- return CompareStringQuery.createQueryForNodesWithFieldLessThan(stringValue,
- field,
- factories,
- caseSensitive);
- case LESS_THAN_OR_EQUAL_TO:
- return CompareStringQuery.createQueryForNodesWithFieldLessThanOrEqualTo(stringValue,
- field,
- factories,
- caseSensitive);
- case LIKE:
- return findNodesLike(field, stringValue, caseSensitive);
- }
- break;
- case DATE:
- NumericRule<Long> longRule = (NumericRule<Long>)rule;
- long date = factories.getLongFactory().create(value);
- switch (operator) {
- case EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, date, date, true, true);
- case NOT_EQUAL_TO:
- Query query = NumericRangeQuery.newLongRange(field, date, date, true, true);
- return new NotQuery(query);
- case GREATER_THAN:
- return NumericRangeQuery.newLongRange(field, date, longRule.getMaximum(), false, true);
- case GREATER_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, date, longRule.getMaximum(), true, true);
- case LESS_THAN:
- return NumericRangeQuery.newLongRange(field, longRule.getMinimum(), date, true, false);
- case LESS_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, longRule.getMinimum(), date, true, true);
- case LIKE:
- // This is not allowed ...
- assert false;
- return null;
- }
- break;
- case LONG:
- longRule = (NumericRule<Long>)rule;
- long longValue = factories.getLongFactory().create(value);
- switch (operator) {
- case EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, longValue, longValue, true, true);
- case NOT_EQUAL_TO:
- Query query = NumericRangeQuery.newLongRange(field, longValue, longValue, true, true);
- return new NotQuery(query);
- case GREATER_THAN:
- return NumericRangeQuery.newLongRange(field, longValue, longRule.getMaximum(), false, true);
- case GREATER_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, longValue, longRule.getMaximum(), true, true);
- case LESS_THAN:
- return NumericRangeQuery.newLongRange(field, longRule.getMinimum(), longValue, true, false);
- case LESS_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, longRule.getMinimum(), longValue, true, true);
- case LIKE:
- // This is not allowed ...
- assert false;
- return null;
- }
- break;
- case INT:
- NumericRule<Integer> intRule = (NumericRule<Integer>)rule;
- int intValue = factories.getLongFactory().create(value).intValue();
- switch (operator) {
- case EQUAL_TO:
- return NumericRangeQuery.newIntRange(field, intValue, intValue, true, true);
- case NOT_EQUAL_TO:
- Query query = NumericRangeQuery.newIntRange(field, intValue, intValue, true, true);
- return new NotQuery(query);
- case GREATER_THAN:
- return NumericRangeQuery.newIntRange(field, intValue, intRule.getMaximum(), false, true);
- case GREATER_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newIntRange(field, intValue, intRule.getMaximum(), true, true);
- case LESS_THAN:
- return NumericRangeQuery.newIntRange(field, intRule.getMinimum(), intValue, true, false);
- case LESS_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newIntRange(field, intRule.getMinimum(), intValue, true, true);
- case LIKE:
- // This is not allowed ...
- assert false;
- return null;
- }
- break;
- case DOUBLE:
- NumericRule<Double> dRule = (NumericRule<Double>)rule;
- double doubleValue = factories.getDoubleFactory().create(value);
- switch (operator) {
- case EQUAL_TO:
- return NumericRangeQuery.newDoubleRange(field, doubleValue, doubleValue, true, true);
- case NOT_EQUAL_TO:
- Query query = NumericRangeQuery.newDoubleRange(field, doubleValue, doubleValue, true, true);
- return new NotQuery(query);
- case GREATER_THAN:
- return NumericRangeQuery.newDoubleRange(field, doubleValue, dRule.getMaximum(), false, true);
- case GREATER_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newDoubleRange(field, doubleValue, dRule.getMaximum(), true, true);
- case LESS_THAN:
- return NumericRangeQuery.newDoubleRange(field, dRule.getMinimum(), doubleValue, true, false);
- case LESS_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newDoubleRange(field, dRule.getMinimum(), doubleValue, true, true);
- case LIKE:
- // This is not allowed ...
- assert false;
- return null;
- }
- break;
- case FLOAT:
- NumericRule<Float> fRule = (NumericRule<Float>)rule;
- float floatValue = factories.getDoubleFactory().create(value).floatValue();
- switch (operator) {
- case EQUAL_TO:
- return NumericRangeQuery.newFloatRange(field, floatValue, floatValue, true, true);
- case NOT_EQUAL_TO:
- Query query = NumericRangeQuery.newFloatRange(field, floatValue, floatValue, true, true);
- return new NotQuery(query);
- case GREATER_THAN:
- return NumericRangeQuery.newFloatRange(field, floatValue, fRule.getMaximum(), false, true);
- case GREATER_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newFloatRange(field, floatValue, fRule.getMaximum(), true, true);
- case LESS_THAN:
- return NumericRangeQuery.newFloatRange(field, fRule.getMinimum(), floatValue, true, false);
- case LESS_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newFloatRange(field, fRule.getMinimum(), floatValue, true, true);
- case LIKE:
- // This is not allowed ...
- assert false;
- return null;
- }
- break;
- case BOOLEAN:
- boolean booleanValue = factories.getBooleanFactory().create(value);
- stringValue = stringFactory.create(value);
- switch (operator) {
- case EQUAL_TO:
- return new TermQuery(new Term(field, stringValue));
- case NOT_EQUAL_TO:
- return new TermQuery(new Term(field, stringFactory.create(!booleanValue)));
- case GREATER_THAN:
- if (!booleanValue) {
- return new TermQuery(new Term(field, stringFactory.create(true)));
- }
- // Can't be greater than 'true', per JCR spec
- return new MatchNoneQuery();
- case GREATER_THAN_OR_EQUAL_TO:
- return new TermQuery(new Term(field, stringFactory.create(true)));
- case LESS_THAN:
- if (booleanValue) {
- return new TermQuery(new Term(field, stringFactory.create(false)));
- }
- // Can't be less than 'false', per JCR spec
- return new MatchNoneQuery();
- case LESS_THAN_OR_EQUAL_TO:
- return new TermQuery(new Term(field, stringFactory.create(false)));
- case LIKE:
- // This is not allowed ...
- assert false;
- return null;
- }
- break;
- case BINARY:
- // This is not allowed ...
- assert false;
- return null;
- }
- return null;
- }
-
- @Override
- public Query findNodesWithNumericRange( PropertyValue propertyValue,
- Object lowerValue,
- Object upperValue,
- boolean includesLower,
- boolean includesUpper ) {
- String field = stringFactory.create(propertyValue.getPropertyName());
- return findNodesWithNumericRange(field, lowerValue, upperValue, includesLower, includesUpper);
- }
-
- @Override
- public Query findNodesWithNumericRange( NodeDepth depth,
- Object lowerValue,
- Object upperValue,
- boolean includesLower,
- boolean includesUpper ) {
- return findNodesWithNumericRange(PathIndex.DEPTH, lowerValue, upperValue, includesLower, includesUpper);
- }
-
- protected Query findNodesWithNumericRange( String field,
- Object lowerValue,
- Object upperValue,
- boolean includesLower,
- boolean includesUpper ) {
- Name fieldName = nameFactory.create(field);
- IndexRules.Rule rule = rules.getRule(fieldName);
- if (rule == null || rule.isSkipped()) return new MatchNoneQuery();
- FieldType type = rule.getType();
- ValueFactories factories = context.getValueFactories();
- switch (type) {
- case DATE:
- long lowerDate = factories.getLongFactory().create(lowerValue);
- long upperDate = factories.getLongFactory().create(upperValue);
- return NumericRangeQuery.newLongRange(field, lowerDate, upperDate, includesLower, includesUpper);
- case LONG:
- long lowerLong = factories.getLongFactory().create(lowerValue);
- long upperLong = factories.getLongFactory().create(upperValue);
- return NumericRangeQuery.newLongRange(field, lowerLong, upperLong, includesLower, includesUpper);
- case DOUBLE:
- double lowerDouble = factories.getDoubleFactory().create(lowerValue);
- double upperDouble = factories.getDoubleFactory().create(upperValue);
- return NumericRangeQuery.newDoubleRange(field, lowerDouble, upperDouble, includesLower, includesUpper);
- case FLOAT:
- float lowerFloat = factories.getDoubleFactory().create(lowerValue).floatValue();
- float upperFloat = factories.getDoubleFactory().create(upperValue).floatValue();
- return NumericRangeQuery.newFloatRange(field, lowerFloat, upperFloat, includesLower, includesUpper);
- case INT:
- int lowerInt = factories.getLongFactory().create(lowerValue).intValue();
- int upperInt = factories.getLongFactory().create(upperValue).intValue();
- return NumericRangeQuery.newIntRange(field, lowerInt, upperInt, includesLower, includesUpper);
- case BOOLEAN:
- lowerInt = factories.getBooleanFactory().create(lowerValue).booleanValue() ? 1 : 0;
- upperInt = factories.getBooleanFactory().create(upperValue).booleanValue() ? 1 : 0;
- return NumericRangeQuery.newIntRange(field, lowerInt, upperInt, includesLower, includesUpper);
- case STRING:
- case BINARY:
- assert false;
- }
- return new MatchNoneQuery();
- }
-
- @Override
- public Query findNodesWith( NodePath nodePath,
- Operator operator,
- Object value,
- boolean caseSensitive ) throws IOException {
- if (!caseSensitive) value = stringFactory.create(value).toLowerCase();
- Path pathValue = operator != Operator.LIKE ? pathFactory.create(value) : null;
- Query query = null;
- switch (operator) {
- case EQUAL_TO:
- return findNodeAt(pathValue);
- case NOT_EQUAL_TO:
- return new NotQuery(findNodeAt(pathValue));
- case LIKE:
- String likeExpression = stringFactory.create(value);
- query = findNodesLike(PathIndex.PATH, likeExpression, caseSensitive);
- break;
- case GREATER_THAN:
- query = ComparePathQuery.createQueryForNodesWithPathGreaterThan(pathValue,
- PathIndex.PATH,
- context.getValueFactories(),
- caseSensitive);
- break;
- case GREATER_THAN_OR_EQUAL_TO:
- query = ComparePathQuery.createQueryForNodesWithPathGreaterThanOrEqualTo(pathValue,
- PathIndex.PATH,
- context.getValueFactories(),
- caseSensitive);
- break;
- case LESS_THAN:
- query = ComparePathQuery.createQueryForNodesWithPathLessThan(pathValue,
- PathIndex.PATH,
- context.getValueFactories(),
- caseSensitive);
- break;
- case LESS_THAN_OR_EQUAL_TO:
- query = ComparePathQuery.createQueryForNodesWithPathLessThanOrEqualTo(pathValue,
- PathIndex.PATH,
- context.getValueFactories(),
- caseSensitive);
- break;
- }
- // Now execute and collect the IDs ...
- IdCollector idCollector = new IdCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, idCollector);
- return findAllNodesWithIds(idCollector.getIds());
- }
-
- @Override
- public Query findNodesWith( NodeName nodeName,
- Operator operator,
- Object value,
- boolean caseSensitive ) throws IOException {
- ValueFactories factories = getContext().getValueFactories();
- String stringValue = stringFactory.create(value);
- if (!caseSensitive) stringValue = stringValue.toLowerCase();
- Path.Segment segment = operator != Operator.LIKE ? pathFactory.createSegment(stringValue) : null;
- int snsIndex = operator != Operator.LIKE ? segment.getIndex() : 0;
- Query query = null;
- switch (operator) {
- case EQUAL_TO:
- BooleanQuery booleanQuery = new BooleanQuery();
- booleanQuery.add(new TermQuery(new Term(PathIndex.NODE_NAME, stringValue)), Occur.MUST);
- booleanQuery.add(NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, snsIndex, snsIndex, true, false),
- Occur.MUST);
- return booleanQuery;
- case NOT_EQUAL_TO:
- booleanQuery = new BooleanQuery();
- booleanQuery.add(new TermQuery(new Term(PathIndex.NODE_NAME, stringValue)), Occur.MUST);
- booleanQuery.add(NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, snsIndex, snsIndex, true, false),
- Occur.MUST);
- return new NotQuery(booleanQuery);
- case GREATER_THAN:
- query = CompareNameQuery.createQueryForNodesWithNameGreaterThan(segment,
- PathIndex.NODE_NAME,
- PathIndex.SNS_INDEX,
- factories,
- caseSensitive);
- break;
- case GREATER_THAN_OR_EQUAL_TO:
- query = CompareNameQuery.createQueryForNodesWithNameGreaterThanOrEqualTo(segment,
- PathIndex.NODE_NAME,
- PathIndex.SNS_INDEX,
- factories,
- caseSensitive);
- break;
- case LESS_THAN:
- query = CompareNameQuery.createQueryForNodesWithNameLessThan(segment,
- PathIndex.NODE_NAME,
- PathIndex.SNS_INDEX,
- factories,
- caseSensitive);
- break;
- case LESS_THAN_OR_EQUAL_TO:
- query = CompareNameQuery.createQueryForNodesWithNameLessThanOrEqualTo(segment,
- PathIndex.NODE_NAME,
- PathIndex.SNS_INDEX,
- factories,
- caseSensitive);
- break;
- case LIKE:
- // See whether the like expression has brackets ...
- String likeExpression = stringValue;
- int openBracketIndex = likeExpression.indexOf('[');
- if (openBracketIndex != -1) {
- String localNameExpression = likeExpression.substring(0, openBracketIndex);
- String snsIndexExpression = likeExpression.substring(openBracketIndex);
- Query localNameQuery = CompareStringQuery.createQueryForNodesWithFieldLike(localNameExpression,
- PathIndex.NODE_NAME,
- factories,
- caseSensitive);
- Query snsQuery = createSnsIndexQuery(snsIndexExpression);
- if (localNameQuery == null) {
- if (snsQuery == null) {
- query = new MatchNoneQuery();
- } else {
- // There is just an SNS part ...
- query = snsQuery;
- }
- } else {
- // There is a local name part ...
- if (snsQuery == null) {
- query = localNameQuery;
- } else {
- // There is both a local name part and a SNS part ...
- booleanQuery = new BooleanQuery();
- booleanQuery.add(localNameQuery, Occur.MUST);
- booleanQuery.add(snsQuery, Occur.MUST);
- query = booleanQuery;
- }
- }
- } else {
- // There is no SNS expression ...
- query = CompareStringQuery.createQueryForNodesWithFieldLike(likeExpression,
- PathIndex.NODE_NAME,
- factories,
- caseSensitive);
- }
- assert query != null;
- break;
- }
-
- // Now execute and collect the IDs ...
- IdCollector idCollector = new IdCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, idCollector);
- return findAllNodesWithIds(idCollector.getIds());
- }
-
- @Override
- public Query findNodesWith( NodeLocalName nodeName,
- Operator operator,
- Object value,
- boolean caseSensitive ) throws IOException {
- String nameValue = stringFactory.create(value);
- Query query = null;
- switch (operator) {
- case LIKE:
- String likeExpression = stringFactory.create(value);
- query = findNodesLike(PathIndex.LOCAL_NAME, likeExpression, caseSensitive);
- break;
- case EQUAL_TO:
- query = CompareStringQuery.createQueryForNodesWithFieldEqualTo(nameValue,
- PathIndex.LOCAL_NAME,
- context.getValueFactories(),
- caseSensitive);
- break;
- case NOT_EQUAL_TO:
- query = CompareStringQuery.createQueryForNodesWithFieldEqualTo(nameValue,
- PathIndex.LOCAL_NAME,
- context.getValueFactories(),
- caseSensitive);
- query = new NotQuery(query);
- break;
- case GREATER_THAN:
- query = CompareStringQuery.createQueryForNodesWithFieldGreaterThan(nameValue,
- PathIndex.LOCAL_NAME,
- context.getValueFactories(),
- caseSensitive);
- break;
- case GREATER_THAN_OR_EQUAL_TO:
- query = CompareStringQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(nameValue,
- PathIndex.LOCAL_NAME,
- context.getValueFactories(),
- caseSensitive);
- break;
- case LESS_THAN:
- query = CompareStringQuery.createQueryForNodesWithFieldLessThan(nameValue,
- PathIndex.LOCAL_NAME,
- context.getValueFactories(),
- caseSensitive);
- break;
- case LESS_THAN_OR_EQUAL_TO:
- query = CompareStringQuery.createQueryForNodesWithFieldLessThanOrEqualTo(nameValue,
- PathIndex.LOCAL_NAME,
- context.getValueFactories(),
- caseSensitive);
- break;
- }
-
- // Now execute and collect the IDs ...
- IdCollector idCollector = new IdCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, idCollector);
- return findAllNodesWithIds(idCollector.getIds());
- }
-
- @Override
- public Query findNodesWith( NodeDepth depthConstraint,
- Operator operator,
- Object value ) throws IOException {
- int depth = context.getValueFactories().getLongFactory().create(value).intValue();
- Query query = null;
- switch (operator) {
- case EQUAL_TO:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, depth, true, true);
- break;
- case NOT_EQUAL_TO:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, depth, true, true);
- query = new NotQuery(query);
- break;
- case GREATER_THAN:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, MAX_DEPTH, false, true);
- break;
- case GREATER_THAN_OR_EQUAL_TO:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, MAX_DEPTH, true, true);
- break;
- case LESS_THAN:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, MIN_DEPTH, depth, true, false);
- break;
- case LESS_THAN_OR_EQUAL_TO:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, MIN_DEPTH, depth, true, true);
- break;
- case LIKE:
- // This is not allowed ...
- return null;
- }
-
- // Now execute and collect the IDs ...
- IdCollector idCollector = new IdCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, idCollector);
- return findAllNodesWithIds(idCollector.getIds());
- }
-
- protected Query createLocalNameQuery( String likeExpression,
- boolean caseSensitive ) {
- if (likeExpression == null) return null;
- ValueFactories factories = getContext().getValueFactories();
- return CompareStringQuery.createQueryForNodesWithFieldLike(likeExpression,
- PathIndex.LOCAL_NAME,
- factories,
- caseSensitive);
- }
-
- /**
- * Utility method to generate a query against the SNS indexes. This method attempts to generate a query that works most
- * efficiently, depending upon the supplied expression. For example, if the supplied expression is just "[3]", then a
- * range query is used to find all values matching '3'. However, if "[3_]" is used (where '_' matches any
- * single-character, or digit in this case), then a range query is used to find all values between '30' and '39'.
- * Similarly, if "[3%]" is used, then a regular expression query is used.
- *
- * @param likeExpression the expression that uses the JCR 2.0 LIKE representation, and which includes the leading '[' and
- * trailing ']' characters
- * @return the query, or null if the expression cannot be represented as a query
- */
- protected Query createSnsIndexQuery( String likeExpression ) {
- if (likeExpression == null) return null;
- likeExpression = likeExpression.trim();
- if (likeExpression.length() == 0) return null;
-
- // Remove the leading '[' ...
- assert likeExpression.charAt(0) == '[';
- likeExpression = likeExpression.substring(1);
-
- // Remove the trailing ']' if it exists ...
- int closeBracketIndex = likeExpression.indexOf(']');
- if (closeBracketIndex != -1) {
- likeExpression = likeExpression.substring(0, closeBracketIndex);
- }
- if (likeExpression.equals("_")) {
- // The SNS expression can only be one digit ...
- return NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, MIN_SNS_INDEX, 9, true, true);
- }
- if (likeExpression.equals("%")) {
- // The SNS expression can be any digits ...
- return NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, MIN_SNS_INDEX, MAX_SNS_INDEX, true, true);
- }
- if (likeExpression.indexOf('_') != -1) {
- if (likeExpression.indexOf('%') != -1) {
- // Contains both ...
- return findNodesLike(PathIndex.SNS_INDEX, likeExpression, true);
- }
- // It presumably contains some numbers and at least one '_' character ...
- int firstWildcardChar = likeExpression.indexOf('_');
- if (firstWildcardChar + 1 < likeExpression.length()) {
- // There's at least some characters after the first '_' ...
- int secondWildcardChar = likeExpression.indexOf('_', firstWildcardChar + 1);
- if (secondWildcardChar != -1) {
- // There are multiple '_' characters ...
- return findNodesLike(PathIndex.SNS_INDEX, likeExpression, true);
- }
- }
- // There's only one '_', so parse the lowermost value and uppermost value ...
- String lowerExpression = likeExpression.replace('_', '0');
- String upperExpression = likeExpression.replace('_', '9');
- try {
- // This SNS is just a number ...
- int lowerSns = Integer.parseInt(lowerExpression);
- int upperSns = Integer.parseInt(upperExpression);
- return NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, lowerSns, upperSns, true, true);
- } catch (NumberFormatException e) {
- // It's not a number but it's in the SNS field, so there will be no results ...
- return new MatchNoneQuery();
- }
- }
- if (likeExpression.indexOf('%') != -1) {
- // It presumably contains some numbers and at least one '%' character ...
- return findNodesLike(PathIndex.SNS_INDEX, likeExpression, true);
- }
- // This is not a LIKE expression but an exact value specification and should be a number ...
- try {
- // This SNS is just a number ...
- int sns = Integer.parseInt(likeExpression);
- return NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, sns, sns, true, true);
- } catch (NumberFormatException e) {
- // It's not a number but it's in the SNS field, so there will be no results ...
- return new MatchNoneQuery();
- }
- }
-
- }
-
- /**
- * A {@link Collector} implementation that only captures the UUID of the documents returned by a query. Score information is
- * not recorded. This is often used when querying the {@link PathIndex} to collect the UUIDs of a set of nodes satisfying some
- * path constraint.
- *
- * @see DualIndexSearchProvider.DualIndexSession#findChildNodes(Path)
- */
- protected static class IdCollector extends Collector {
- private final Set<String> ids = new HashSet<String>();
- private String[] idsByDocId;
-
- // private int baseDocId;
-
- protected IdCollector() {
- }
-
- /**
- * Get the UUIDs that have been collected.
- *
- * @return the set of UUIDs; never null
- */
- public Set<String> getIds() {
- return ids;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#acceptsDocsOutOfOrder()
- */
- @Override
- public boolean acceptsDocsOutOfOrder() {
- return true;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#setScorer(org.apache.lucene.search.Scorer)
- */
- @Override
- public void setScorer( Scorer scorer ) {
- // we don't care about scoring
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#collect(int)
- */
- @Override
- public void collect( int docId ) {
- assert docId >= 0;
- String idString = idsByDocId[docId];
- assert idString != null;
- ids.add(idString);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#setNextReader(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- public void setNextReader( IndexReader reader,
- int docBase ) throws IOException {
- this.idsByDocId = FieldCache.DEFAULT.getStrings(reader, ContentIndex.ID); // same value as PathIndex.ID
- // this.baseDocId = docBase;
- }
- }
-
- /**
- * This collector is responsible for loading the value for each of the columns into each tuple array.
- */
- protected class DualIndexTupleCollector extends TupleCollector {
- private final DualIndexSession session;
- private final LinkedList<Object[]> tuples = new LinkedList<Object[]>();
- private final Columns columns;
- private final int numValues;
- private final boolean recordScore;
- private final int scoreIndex;
- private final FieldSelector fieldSelector;
- private final int locationIndex;
- private Scorer scorer;
- private IndexReader currentReader;
- private int docOffset;
- private boolean resolvedLocations = false;
-
- protected DualIndexTupleCollector( DualIndexSession session,
- Columns columns ) {
- this.session = session;
- this.columns = columns;
- assert this.session != null;
- assert this.columns != null;
- this.numValues = this.columns.getTupleSize();
- assert this.numValues >= 0;
- assert this.columns.getSelectorNames().size() == 1;
- final String selectorName = this.columns.getSelectorNames().get(0);
- this.locationIndex = this.columns.getLocationIndex(selectorName);
- this.recordScore = this.columns.hasFullTextSearchScores();
- this.scoreIndex = this.recordScore ? this.columns.getFullTextSearchScoreIndexFor(selectorName) : -1;
- final Set<String> columnNames = new HashSet<String>(this.columns.getColumnNames());
- columnNames.add(ContentIndex.ID); // add the UUID, which we'll put into the Location ...
- this.fieldSelector = new FieldSelector() {
- private static final long serialVersionUID = 1L;
-
- public FieldSelectorResult accept( String fieldName ) {
- return columnNames.contains(fieldName) ? FieldSelectorResult.LOAD : FieldSelectorResult.NO_LOAD;
- }
- };
- }
-
- /**
- * @return tuples
- */
- @Override
- public LinkedList<Object[]> getTuples() {
- resolveLocations();
- return tuples;
- }
-
- protected void resolveLocations() {
- if (resolvedLocations) return;
- try {
- // The Location field in the tuples all contain the ID of the document, so we need to replace these
- // with the appropriate Location objects, using the content from the PathIndex ...
- IndexReader pathReader = session.getPathsReader();
- IndexSearcher pathSearcher = session.getPathsSearcher();
- for (Object[] tuple : tuples) {
- String id = (String)tuple[locationIndex];
- assert id != null;
- Location location = session.getLocationForDocument(id, pathReader, pathSearcher);
- assert location != null;
- tuple[locationIndex] = location;
- }
- resolvedLocations = true;
- } catch (IOException e) {
- throw new LuceneException(e);
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#acceptsDocsOutOfOrder()
- */
- @Override
- public boolean acceptsDocsOutOfOrder() {
- return true;
- }
-
- /**
- * Get the location index.
- *
- * @return locationIndex
- */
- public int getLocationIndex() {
- return locationIndex;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#setNextReader(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- public void setNextReader( IndexReader reader,
- int docBase ) {
- this.currentReader = reader;
- this.docOffset = docBase;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#setScorer(org.apache.lucene.search.Scorer)
- */
- @Override
- public void setScorer( Scorer scorer ) {
- this.scorer = scorer;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#collect(int)
- */
- @Override
- public void collect( int doc ) throws IOException {
- int docId = doc + docOffset;
- Object[] tuple = new Object[numValues];
- Document document = currentReader.document(docId, fieldSelector);
- for (String columnName : columns.getColumnNames()) {
- int index = columns.getColumnIndexForName(columnName);
- // We just need to retrieve the first value if there is more than one ...
- tuple[index] = document.get(columnName);
- }
-
- // Set the score column if required ...
- if (recordScore) {
- assert scorer != null;
- tuple[scoreIndex] = scorer.score();
- }
-
- // Load the document ID (which is a stringified UUID) into the Location slot,
- // which will be replaced later with a real Location ...
- tuple[locationIndex] = document.get(ContentIndex.ID);
- tuples.add(tuple);
- }
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/EncodingNamespaceRegistry.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/EncodingNamespaceRegistry.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/EncodingNamespaceRegistry.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,233 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import net.jcip.annotations.GuardedBy;
-import net.jcip.annotations.ThreadSafe;
-import org.jboss.dna.common.text.NoOpEncoder;
-import org.jboss.dna.common.text.TextEncoder;
-import org.jboss.dna.graph.DnaLexicon;
-import org.jboss.dna.graph.JcrLexicon;
-import org.jboss.dna.graph.JcrMixLexicon;
-import org.jboss.dna.graph.JcrNtLexicon;
-import org.jboss.dna.graph.property.NamespaceRegistry;
-import org.jboss.dna.graph.property.basic.BasicNamespace;
-
-/**
- * A {@link NamespaceRegistry} implementation that uses encoded representations of the namespace URIs for the namespace prefixes.
- */
-@ThreadSafe
-class EncodingNamespaceRegistry implements NamespaceRegistry {
-
- public static final Set<String> DEFAULT_FIXED_NAMESPACES = Collections.unmodifiableSet(new HashSet<String>(
- Arrays.asList(new String[] {
- "",
- DnaLexicon.Namespace.URI,
- JcrLexicon.Namespace.URI,
- JcrNtLexicon.Namespace.URI,
- JcrMixLexicon.Namespace.URI})));
-
- private final NamespaceRegistry registry;
- private final TextEncoder encoder;
- private final ReadWriteLock lock = new ReentrantReadWriteLock();
- @GuardedBy( "lock" )
- private final Map<String, String> uriToEncodedPrefix = new HashMap<String, String>();
- @GuardedBy( "lock" )
- private final Map<String, String> encodedPrefixToUri = new HashMap<String, String>();
- private final Set<String> fixedNamespaceUris;
-
- /**
- * @param registry the original registry
- * @param encoder the encoder; may be null if no encoding should be used
- */
- EncodingNamespaceRegistry( NamespaceRegistry registry,
- TextEncoder encoder ) {
- this(registry, encoder, null);
- }
-
- /**
- * @param registry the original registry
- * @param encoder the encoder; may be null if no encoding should be used
- * @param fixedUris the set of URIs that is to be fixed and not encoded; or null if the default namespaces are to be fixed
- */
- EncodingNamespaceRegistry( NamespaceRegistry registry,
- TextEncoder encoder,
- Set<String> fixedUris ) {
- this.registry = registry;
- this.encoder = encoder != null ? encoder : new NoOpEncoder();
- this.fixedNamespaceUris = fixedUris != null ? Collections.unmodifiableSet(new HashSet<String>(fixedUris)) : DEFAULT_FIXED_NAMESPACES;
- assert this.registry != null;
- assert this.encoder != null;
- assert this.fixedNamespaceUris != null;
- }
-
- /**
- * @return fixedNamespaceUris
- */
- public Set<String> getFixedNamespaceUris() {
- return fixedNamespaceUris;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.property.NamespaceRegistry#getDefaultNamespaceUri()
- */
- public String getDefaultNamespaceUri() {
- return this.registry.getDefaultNamespaceUri();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.property.NamespaceRegistry#getNamespaceForPrefix(java.lang.String)
- */
- public String getNamespaceForPrefix( String prefix ) {
- // First look in the map ...
- String result = null;
- try {
- lock.readLock().lock();
- result = encodedPrefixToUri.get(prefix);
- if (result != null) return result;
- } finally {
- lock.readLock().unlock();
- }
-
- // Make sure we have encoded all the namespaces in the registry ...
- Set<Namespace> namespaces = new HashSet<Namespace>(this.registry.getNamespaces());
- Set<Namespace> encodedNamespaces = this.getNamespaces();
- namespaces.removeAll(encodedNamespaces);
- try {
- lock.writeLock().lock();
- for (Namespace namespace : namespaces) {
- String namespaceUri = namespace.getNamespaceUri();
- String encoded = fixedNamespaceUris.contains(namespaceUri) ? namespace.getPrefix() : encoder.encode(namespaceUri);
- uriToEncodedPrefix.put(namespaceUri, encoded);
- encodedPrefixToUri.put(encoded, namespaceUri);
- if (result == null && encoded.equals(prefix)) result = namespaceUri;
- }
- } finally {
- lock.writeLock().unlock();
- }
- if (result != null) return result;
-
- // There's nothing, so just delegate to the registry ...
- return this.registry.getNamespaceForPrefix(prefix);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.property.NamespaceRegistry#getRegisteredNamespaceUris()
- */
- public Set<String> getRegisteredNamespaceUris() {
- return this.registry.getRegisteredNamespaceUris();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.property.NamespaceRegistry#isRegisteredNamespaceUri(java.lang.String)
- */
- public boolean isRegisteredNamespaceUri( String namespaceUri ) {
- return this.registry.isRegisteredNamespaceUri(namespaceUri);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.property.NamespaceRegistry#getPrefixForNamespaceUri(java.lang.String, boolean)
- */
- public String getPrefixForNamespaceUri( String namespaceUri,
- boolean generateIfMissing ) {
- if (fixedNamespaceUris.contains(namespaceUri)) {
- return this.registry.getPrefixForNamespaceUri(namespaceUri, generateIfMissing);
- }
- String encoded = null;
- try {
- lock.readLock().lock();
- encoded = uriToEncodedPrefix.get(namespaceUri);
- } finally {
- lock.readLock().unlock();
- }
- if (encoded == null) {
- encoded = encoder.encode(namespaceUri);
- try {
- lock.writeLock().lock();
- uriToEncodedPrefix.put(namespaceUri, encoded);
- encodedPrefixToUri.put(encoded, namespaceUri);
- } finally {
- lock.writeLock().unlock();
- }
- }
- return encoded;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.property.NamespaceRegistry#getNamespaces()
- */
- public Set<Namespace> getNamespaces() {
- Set<Namespace> results = new HashSet<Namespace>();
- try {
- lock.readLock().lock();
- for (Map.Entry<String, String> entry : uriToEncodedPrefix.entrySet()) {
- String uri = entry.getKey();
- String prefix = entry.getValue();
- results.add(new BasicNamespace(prefix, uri));
- }
- } finally {
- lock.readLock().unlock();
- }
- return results;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.property.NamespaceRegistry#register(java.lang.String, java.lang.String)
- */
- public String register( String prefix,
- String namespaceUri ) {
- return this.registry.register(prefix, namespaceUri);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.property.NamespaceRegistry#unregister(java.lang.String)
- */
- public boolean unregister( String namespaceUri ) {
- return this.registry.unregister(namespaceUri);
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,639 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import net.jcip.annotations.Immutable;
-import net.jcip.annotations.NotThreadSafe;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.jboss.dna.common.util.CheckArg;
-import org.jboss.dna.graph.property.Name;
-
-/**
- * The set of rules that dictate how properties should be indexed.
- */
-@Immutable
-public class IndexRules {
-
- public static enum FieldType {
- STRING,
- DOUBLE,
- FLOAT,
- INT,
- BOOLEAN,
- LONG,
- DATE,
- BINARY;
- }
-
- /**
- * A single rule that dictates how a single property should be indexed.
- *
- * @see IndexRules#getRule(Name)
- */
- @Immutable
- public static interface Rule {
-
- boolean isSkipped();
-
- FieldType getType();
-
- Field.Store getStoreOption();
-
- Field.Index getIndexOption();
- }
-
- @Immutable
- public static interface NumericRule<T> extends Rule {
- T getMinimum();
-
- T getMaximum();
- }
-
- public static final Rule SKIP = new SkipRule();
-
- @Immutable
- protected static class SkipRule implements Rule {
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexRules.Rule#getType()
- */
- public FieldType getType() {
- return FieldType.STRING;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexRules.Rule#isSkipped()
- */
- public boolean isSkipped() {
- return true;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexRules.Rule#getIndexOption()
- */
- public Index getIndexOption() {
- return Field.Index.NO;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexRules.Rule#getStoreOption()
- */
- public Store getStoreOption() {
- return Field.Store.NO;
- }
- }
-
- @Immutable
- protected static class TypedRule implements Rule {
- protected final FieldType type;
- protected final Field.Store store;
- protected final Field.Index index;
-
- protected TypedRule( FieldType type,
- Field.Store store,
- Field.Index index ) {
- this.type = type;
- this.index = index;
- this.store = store;
- assert this.type != null;
- assert this.index != null;
- assert this.store != null;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexRules.Rule#getType()
- */
- public FieldType getType() {
- return type;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexRules.Rule#isSkipped()
- */
- public boolean isSkipped() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexRules.Rule#getIndexOption()
- */
- public Index getIndexOption() {
- return index;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexRules.Rule#getStoreOption()
- */
- public Store getStoreOption() {
- return store;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- return type.name() + " rule (" + store + "," + index + ")";
- }
- }
-
- @Immutable
- protected static class NumericTypedRule<T> extends TypedRule implements NumericRule<T> {
- protected final T minValue;
- protected final T maxValue;
-
- protected NumericTypedRule( FieldType type,
- Field.Store store,
- Field.Index index,
- T minValue,
- T maxValue ) {
- super(type, store, index);
- this.minValue = minValue;
- this.maxValue = maxValue;
- assert this.minValue != null;
- assert this.maxValue != null;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexRules.NumericRule#getMaximum()
- */
- public T getMaximum() {
- return maxValue;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexRules.NumericRule#getMinimum()
- */
- public T getMinimum() {
- return minValue;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- return super.toString() + " with range [" + minValue + "," + maxValue + "]";
- }
- }
-
- private final Map<Name, Rule> rulesByName;
- private final Rule defaultRule;
-
- protected IndexRules( Map<Name, Rule> rulesByName,
- Rule defaultRule ) {
- this.rulesByName = rulesByName;
- this.defaultRule = defaultRule != null ? defaultRule : SKIP;
- assert this.defaultRule != null;
- }
-
- /**
- * Get the rule associated with the given property name.
- *
- * @param name the property name, or null if the default rule is to be returned
- * @return the rule; never null
- */
- public Rule getRule( Name name ) {
- Rule result = rulesByName.get(name);
- return result != null ? result : this.defaultRule;
- }
-
- /**
- * Return a new builder that can be used to create {@link IndexRules} objects.
- *
- * @return a builder; never null
- */
- public static Builder createBuilder() {
- return new Builder(new HashMap<Name, Rule>(), null);
- }
-
- /**
- * Return a new builder that can be used to create {@link IndexRules} objects.
- *
- * @param initialRules the rules that the builder should start with
- * @return a builder; never null
- * @throws IllegalArgumentException if the initial rules reference is null
- */
- public static Builder createBuilder( IndexRules initialRules ) {
- CheckArg.isNotNull(initialRules, "initialRules");
- return new Builder(new HashMap<Name, Rule>(initialRules.rulesByName), initialRules.defaultRule);
- }
-
- /**
- * A builder of immutable {@link IndexRules} objects.
- */
- @NotThreadSafe
- public static class Builder {
- private final Map<Name, Rule> rulesByName;
- private Rule defaultRule;
-
- Builder( Map<Name, Rule> rulesByName,
- Rule defaultRule ) {
- assert rulesByName != null;
- this.rulesByName = rulesByName;
- this.defaultRule = defaultRule;
- }
-
- /**
- * Mark the properties with the supplied names to be skipped from indexing.
- *
- * @param namesToIndex the names of the properties that are to be skipped
- * @return this builder for convenience and method chaining; never null
- */
- public Builder skip( Name... namesToIndex ) {
- if (namesToIndex != null) {
- for (Name name : namesToIndex) {
- rulesByName.put(name, SKIP);
- }
- }
- return this;
- }
-
- /**
- * Define a string-based field as the default.
- *
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @return this builder for convenience and method chaining; never null
- */
- public Builder defaultTo( Field.Store store,
- Field.Index index ) {
- if (store == null) store = Field.Store.YES;
- if (index == null) index = Field.Index.NOT_ANALYZED;
- defaultRule = new TypedRule(FieldType.STRING, store, index);
- return this;
- }
-
- /**
- * Define a string-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @return this builder for convenience and method chaining; never null
- */
- public Builder stringField( Name name,
- Field.Store store,
- Field.Index index ) {
- if (store == null) store = Field.Store.YES;
- if (index == null) index = Field.Index.NOT_ANALYZED;
- Rule rule = new TypedRule(FieldType.STRING, store, index);
- rulesByName.put(name, rule);
- return this;
- }
-
- /**
- * Define a binary-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @return this builder for convenience and method chaining; never null
- */
- public Builder binaryField( Name name,
- Field.Store store,
- Field.Index index ) {
- if (store == null) store = Field.Store.YES;
- if (index == null) index = Field.Index.NOT_ANALYZED;
- Rule rule = new TypedRule(FieldType.BINARY, store, index);
- rulesByName.put(name, rule);
- return this;
- }
-
- protected <T> Builder numericField( Name name,
- FieldType type,
- Field.Store store,
- Field.Index index,
- T minValue,
- T maxValue ) {
- if (store == null) store = Field.Store.YES;
- if (index == null) index = Field.Index.NOT_ANALYZED;
- Rule rule = new NumericTypedRule<T>(type, store, index, minValue, maxValue);
- rulesByName.put(name, rule);
- return this;
- }
-
- /**
- * Define a boolean-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @return this builder for convenience and method chaining; never null
- */
- public Builder booleanField( Name name,
- Field.Store store,
- Field.Index index ) {
- return numericField(name, FieldType.BOOLEAN, store, index, Boolean.FALSE, Boolean.TRUE);
- }
-
- /**
- * Define a integer-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @param minValue the minimum value for this field, or null if there is no minimum value
- * @param maxValue the maximum value for this field, or null if there is no maximum value
- * @return this builder for convenience and method chaining; never null
- */
- public Builder integerField( Name name,
- Field.Store store,
- Field.Index index,
- Integer minValue,
- Integer maxValue ) {
- if (minValue == null) minValue = Integer.MIN_VALUE;
- if (maxValue == null) maxValue = Integer.MAX_VALUE;
- return numericField(name, FieldType.INT, store, index, minValue, maxValue);
- }
-
- /**
- * Define a long-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @param minValue the minimum value for this field, or null if there is no minimum value
- * @param maxValue the maximum value for this field, or null if there is no maximum value
- * @return this builder for convenience and method chaining; never null
- */
- public Builder longField( Name name,
- Field.Store store,
- Field.Index index,
- Long minValue,
- Long maxValue ) {
- if (minValue == null) minValue = Long.MIN_VALUE;
- if (maxValue == null) maxValue = Long.MAX_VALUE;
- return numericField(name, FieldType.LONG, store, index, minValue, maxValue);
- }
-
- /**
- * Define a date-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @param minValue the minimum value for this field, or null if there is no minimum value
- * @param maxValue the maximum value for this field, or null if there is no maximum value
- * @return this builder for convenience and method chaining; never null
- */
- public Builder dateField( Name name,
- Field.Store store,
- Field.Index index,
- Long minValue,
- Long maxValue ) {
- if (minValue == null) minValue = 0L;
- if (maxValue == null) maxValue = Long.MAX_VALUE;
- return numericField(name, FieldType.DATE, store, index, minValue, maxValue);
- }
-
- /**
- * Define a float-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @param minValue the minimum value for this field, or null if there is no minimum value
- * @param maxValue the maximum value for this field, or null if there is no maximum value
- * @return this builder for convenience and method chaining; never null
- */
- public Builder floatField( Name name,
- Field.Store store,
- Field.Index index,
- Float minValue,
- Float maxValue ) {
- if (minValue == null) minValue = Float.MIN_VALUE;
- if (maxValue == null) maxValue = Float.MAX_VALUE;
- return numericField(name, FieldType.FLOAT, store, index, minValue, maxValue);
- }
-
- /**
- * Define a double-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @param minValue the minimum value for this field, or null if there is no minimum value
- * @param maxValue the maximum value for this field, or null if there is no maximum value
- * @return this builder for convenience and method chaining; never null
- */
- public Builder doubleField( Name name,
- Field.Store store,
- Field.Index index,
- Double minValue,
- Double maxValue ) {
- if (minValue == null) minValue = Double.MIN_VALUE;
- if (maxValue == null) maxValue = Double.MAX_VALUE;
- return numericField(name, FieldType.DOUBLE, store, index, minValue, maxValue);
- }
-
- /**
- * Define a integer-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @param minValue the minimum value for this field, or null if there is no minimum value
- * @return this builder for convenience and method chaining; never null
- */
- public Builder integerField( Name name,
- Field.Store store,
- Field.Index index,
- Integer minValue ) {
- return integerField(name, store, index, minValue, null);
- }
-
- /**
- * Define a long-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @param minValue the minimum value for this field, or null if there is no minimum value
- * @return this builder for convenience and method chaining; never null
- */
- public Builder longField( Name name,
- Field.Store store,
- Field.Index index,
- Long minValue ) {
- return longField(name, store, index, minValue, null);
- }
-
- /**
- * Define a date-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @param minValue the minimum value for this field, or null if there is no minimum value
- * @return this builder for convenience and method chaining; never null
- */
- public Builder dateField( Name name,
- Field.Store store,
- Field.Index index,
- Long minValue ) {
- return dateField(name, store, index, minValue, null);
- }
-
- /**
- * Define a float-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @param minValue the minimum value for this field, or null if there is no minimum value
- * @return this builder for convenience and method chaining; never null
- */
- public Builder floatField( Name name,
- Field.Store store,
- Field.Index index,
- Float minValue ) {
- return floatField(name, store, index, minValue, null);
- }
-
- /**
- * Define a double-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @param minValue the minimum value for this field, or null if there is no minimum value
- * @return this builder for convenience and method chaining; never null
- */
- public Builder doubleField( Name name,
- Field.Store store,
- Field.Index index,
- Double minValue ) {
- return doubleField(name, store, index, minValue, null);
- }
-
- /**
- * Define a integer-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @return this builder for convenience and method chaining; never null
- */
- public Builder integerField( Name name,
- Field.Store store,
- Field.Index index ) {
- return integerField(name, store, index, null, null);
- }
-
- /**
- * Define a long-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @return this builder for convenience and method chaining; never null
- */
- public Builder longField( Name name,
- Field.Store store,
- Field.Index index ) {
- return longField(name, store, index, null, null);
- }
-
- /**
- * Define a date-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @return this builder for convenience and method chaining; never null
- */
- public Builder dateField( Name name,
- Field.Store store,
- Field.Index index ) {
- return dateField(name, store, index, null, null);
- }
-
- /**
- * Define a float-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @return this builder for convenience and method chaining; never null
- */
- public Builder floatField( Name name,
- Field.Store store,
- Field.Index index ) {
- return floatField(name, store, index, null, null);
- }
-
- /**
- * Define a double-based field in the indexes. This method will overwrite any existing definition in this builder.
- *
- * @param name the name of the field
- * @param store the storage setting, or null if the field should be {@link Store#YES stored}
- * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
- * @return this builder for convenience and method chaining; never null
- */
- public Builder doubleField( Name name,
- Field.Store store,
- Field.Index index ) {
- return doubleField(name, store, index, null, null);
- }
-
- /**
- * Build the indexing rules.
- *
- * @return the immutable indexing rules.
- */
- public IndexRules build() {
- return new IndexRules(Collections.unmodifiableMap(new HashMap<Name, Rule>(rulesByName)), defaultRule);
- }
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfiguration.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfiguration.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfiguration.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,60 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.store.Directory;
-import org.jboss.dna.graph.search.SearchEngineException;
-
-/**
- * Interface used to obtain the Lucene {@link Directory} instance that should be used for a workspace given the name of the
- * workspace. There are several implementations (see {@link LuceneConfigurations}), but custom implementations can always be
- * used.
- */
-@ThreadSafe
-public interface LuceneConfiguration {
- /**
- * Get the {@link Directory} that should be used for the workspace with the supplied name.
- *
- * @param workspaceName the workspace name
- * @param indexName the name of the index to be created
- * @return the directory; never null
- * @throws IllegalArgumentException if the workspace name is null
- * @throws SearchEngineException if there is a problem creating the directory
- */
- Directory getDirectory( String workspaceName,
- String indexName ) throws SearchEngineException;
-
- /**
- * Destroy the {@link Directory} that is used for the workspace with the supplied name.
- *
- * @param workspaceName the workspace name
- * @param indexName the name of the index to be created
- * @return true if the directory existed and was destroyed, or false if the directory didn't exist
- * @throws IllegalArgumentException if the workspace name is null
- * @throws SearchEngineException if there is a problem creating the directory
- */
- boolean destroyDirectory( String workspaceName,
- String indexName ) throws SearchEngineException;
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfigurations.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfigurations.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfigurations.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,427 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.concurrent.ConcurrentHashMap;
-import net.jcip.annotations.Immutable;
-import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.LockFactory;
-import org.apache.lucene.store.RAMDirectory;
-import org.jboss.dna.common.i18n.I18n;
-import org.jboss.dna.common.text.NoOpEncoder;
-import org.jboss.dna.common.text.TextEncoder;
-import org.jboss.dna.common.util.CheckArg;
-import org.jboss.dna.common.util.FileUtil;
-import org.jboss.dna.common.util.HashCode;
-import org.jboss.dna.graph.search.SearchEngineException;
-
-/**
- * A family of {@link LuceneConfiguration} implementations.
- */
-public class LuceneConfigurations {
-
- /**
- * Return a new {@link LuceneConfiguration} that creates in-memory directories.
- *
- * @return the new directory configuration; never null
- */
- public static final LuceneConfiguration inMemory() {
- return new RamDirectoryFactory();
- }
-
- /**
- * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
- * workspace names.
- *
- * @param parent the parent folder
- * @return the new directory configuration; never null
- * @throws IllegalArgumentException if the parent file is null
- */
- public static final LuceneConfiguration using( File parent ) {
- return new FileSystemDirectoryFromNameFactory(parent);
- }
-
- /**
- * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
- * workspace names.
- *
- * @param parent the parent folder
- * @param lockFactory the lock factory; may be null
- * @return the new directory configuration; never null
- * @throws IllegalArgumentException if the parent file is null
- */
- public static final LuceneConfiguration using( File parent,
- LockFactory lockFactory ) {
- return new FileSystemDirectoryFromNameFactory(parent, lockFactory);
- }
-
- /**
- * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
- * workspace names.
- *
- * @param parent the parent folder
- * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
- * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
- * @return the new directory configuration; never null
- * @throws IllegalArgumentException if the parent file is null
- */
- public static final LuceneConfiguration using( File parent,
- TextEncoder workspaceNameEncoder,
- TextEncoder indexNameEncoder ) {
- return new FileSystemDirectoryFromNameFactory(parent, workspaceNameEncoder, indexNameEncoder);
- }
-
- /**
- * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
- * workspace names.
- *
- * @param parent the parent folder
- * @param lockFactory the lock factory; may be null
- * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
- * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
- * @return the new directory configuration; never null
- * @throws IllegalArgumentException if the parent file is null
- */
- public static final LuceneConfiguration using( File parent,
- LockFactory lockFactory,
- TextEncoder workspaceNameEncoder,
- TextEncoder indexNameEncoder ) {
- return new FileSystemDirectoryFromNameFactory(parent, lockFactory, workspaceNameEncoder, indexNameEncoder);
- }
-
- /**
- * A {@link LuceneConfiguration} implementation that creates {@link Directory} instances of the supplied type for each
- * workspace and pools the results, ensuring that the same {@link Directory} instance is always returned for the same
- * workspace name.
- *
- * @param <DirectoryType> the concrete type of the directory
- */
- @ThreadSafe
- protected static abstract class PoolingDirectoryFactory<DirectoryType extends Directory> implements LuceneConfiguration {
- private final ConcurrentHashMap<IndexId, DirectoryType> directories = new ConcurrentHashMap<IndexId, DirectoryType>();
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.LuceneConfiguration#getDirectory(java.lang.String, java.lang.String)
- */
- public Directory getDirectory( String workspaceName,
- String indexName ) throws SearchEngineException {
- CheckArg.isNotNull(workspaceName, "workspaceName");
- IndexId id = new IndexId(workspaceName, indexName);
- DirectoryType result = directories.get(id);
- if (result == null) {
- DirectoryType newDirectory = createDirectory(workspaceName, indexName);
- result = directories.putIfAbsent(id, newDirectory);
- if (result == null) result = newDirectory;
- }
- return result;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.LuceneConfiguration#destroyDirectory(java.lang.String, java.lang.String)
- */
- public boolean destroyDirectory( String workspaceName,
- String indexName ) throws SearchEngineException {
- CheckArg.isNotNull(workspaceName, "workspaceName");
- IndexId id = new IndexId(workspaceName, indexName);
- DirectoryType result = directories.remove(id);
- return result != null ? doDestroy(result) : false;
- }
-
- /**
- * Method implemented by subclasses to create a new Directory implementation.
- *
- * @param workspaceName the name of the workspace for which the {@link Directory} is to be created; never null
- * @param indexName the name of the index to be created
- * @return the new directory; may not be null
- * @throws SearchEngineException if there is a problem creating the directory
- */
- protected abstract DirectoryType createDirectory( String workspaceName,
- String indexName ) throws SearchEngineException;
-
- protected abstract boolean doDestroy( DirectoryType directory ) throws SearchEngineException;
- }
-
- /**
- * A {@link LuceneConfiguration} implementation that creates {@link RAMDirectory} instances for each workspace and index
- * name. Each factory instance maintains a pool of {@link RAMDirectory} instances, ensuring that the same {@link RAMDirectory}
- * is always returned for the same workspace name.
- */
- @ThreadSafe
- public static class RamDirectoryFactory extends PoolingDirectoryFactory<RAMDirectory> {
- protected RamDirectoryFactory() {
- }
-
- @Override
- protected RAMDirectory createDirectory( String workspaceName,
- String indexName ) {
- return new RAMDirectory();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.LuceneConfigurations.PoolingDirectoryFactory#doDestroy(org.apache.lucene.store.Directory)
- */
- @Override
- protected boolean doDestroy( RAMDirectory directory ) throws SearchEngineException {
- return directory != null;
- }
- }
-
- /**
- * A {@link LuceneConfiguration} implementation that creates {@link FSDirectory} instances for each workspace and index
- * name. This factory is created with a parent directory under which all workspace and index directories are created.
- * <p>
- * This uses the supplied encoders to translate the workspace and index names into valid directory names. By default, no
- * encoding is performed, meaning that the workspace and index names are used explicitly as directory names. This default
- * behavior, then, means that not all values of workspace names or index names will work. If you want to be sure that all
- * workspace names work, supply an encoder for the workspace names. (Index names are currently such that they will always be
- * valid directory names, but you can always supply an encoder if you'd like.)
- * </p>
- */
- public static class FileSystemDirectoryFromNameFactory extends PoolingDirectoryFactory<FSDirectory> {
- private final File parentFile;
- private final LockFactory lockFactory;
- private final TextEncoder workspaceNameEncoder;
- private final TextEncoder indexNameEncoder;
-
- /**
- * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
- * allowable workspace names.
- *
- * @param parent the parent folder
- * @throws IllegalArgumentException if the parent file is null
- */
- protected FileSystemDirectoryFromNameFactory( File parent ) {
- this(parent, null, null, null);
- }
-
- /**
- * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
- * allowable workspace names.
- *
- * @param parent the parent folder
- * @param lockFactory the lock factory; may be null
- * @throws IllegalArgumentException if the parent file is null
- */
- protected FileSystemDirectoryFromNameFactory( File parent,
- LockFactory lockFactory ) {
- this(parent, lockFactory, null, null);
- }
-
- /**
- * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
- * allowable workspace names.
- *
- * @param parent the parent folder
- * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
- * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
- * @throws IllegalArgumentException if the parent file is null
- */
- protected FileSystemDirectoryFromNameFactory( File parent,
- TextEncoder workspaceNameEncoder,
- TextEncoder indexNameEncoder ) {
- this(parent, null, workspaceNameEncoder, indexNameEncoder);
- }
-
- /**
- * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
- * allowable workspace names.
- *
- * @param parent the parent folder
- * @param lockFactory the lock factory; may be null
- * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
- * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
- * @throws IllegalArgumentException if the parent file is null
- */
- protected FileSystemDirectoryFromNameFactory( File parent,
- LockFactory lockFactory,
- TextEncoder workspaceNameEncoder,
- TextEncoder indexNameEncoder ) {
- CheckArg.isNotNull(parent, "parent");
- this.parentFile = parent;
- this.lockFactory = lockFactory;
- this.workspaceNameEncoder = workspaceNameEncoder != null ? workspaceNameEncoder : new NoOpEncoder();
- this.indexNameEncoder = indexNameEncoder != null ? indexNameEncoder : new NoOpEncoder();
- }
-
- @Override
- protected FSDirectory createDirectory( String workspaceName,
- String indexName ) {
- File workspaceFile = new File(parentFile, workspaceNameEncoder.encode(workspaceName));
- if (!workspaceFile.exists()) {
- workspaceFile.mkdirs();
- } else {
- if (!workspaceFile.isDirectory()) {
- I18n msg = SearchI18n.locationForIndexesIsNotDirectory;
- throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
- }
- if (!workspaceFile.canRead()) {
- I18n msg = SearchI18n.locationForIndexesCannotBeRead;
- throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
- }
- if (!workspaceFile.canWrite()) {
- I18n msg = SearchI18n.locationForIndexesCannotBeWritten;
- throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
- }
- }
- File directory = workspaceFile;
- if (indexName != null) {
- File indexFile = new File(workspaceFile, indexNameEncoder.encode(indexName));
- if (!indexFile.exists()) {
- indexFile.mkdirs();
- } else {
- if (!indexFile.isDirectory()) {
- I18n msg = SearchI18n.locationForIndexesIsNotDirectory;
- throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
- }
- if (!indexFile.canRead()) {
- I18n msg = SearchI18n.locationForIndexesCannotBeRead;
- throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
- }
- if (!indexFile.canWrite()) {
- I18n msg = SearchI18n.locationForIndexesCannotBeWritten;
- throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
- }
- }
- directory = indexFile;
- }
- try {
- return create(directory, lockFactory);
- } catch (IOException e) {
- throw new SearchEngineException(e);
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.LuceneConfigurations.PoolingDirectoryFactory#doDestroy(org.apache.lucene.store.Directory)
- */
- @Override
- protected boolean doDestroy( FSDirectory directory ) throws SearchEngineException {
- File file = directory.getFile();
- if (file.exists()) {
- return FileUtil.delete(file);
- }
- return false;
- }
-
- /**
- * Override this method to define which subclass of {@link FSDirectory} should be created.
- *
- * @param directory the file system directory; never null
- * @param lockFactory the lock factory; may be null
- * @return the {@link FSDirectory} instance
- * @throws IOException if there is a problem creating the FSDirectory instance
- */
- protected FSDirectory create( File directory,
- LockFactory lockFactory ) throws IOException {
- return FSDirectory.open(directory, lockFactory);
- }
- }
-
- @Immutable
- protected static final class IndexId {
- private final String workspaceName;
- private final String indexName;
- private final int hc;
-
- protected IndexId( String workspaceName,
- String indexName ) {
- assert workspaceName != null;
- this.workspaceName = workspaceName;
- this.indexName = indexName;
- this.hc = HashCode.compute(this.workspaceName, this.indexName);
- }
-
- /**
- * @return indexName
- */
- public String getIndexName() {
- return indexName;
- }
-
- /**
- * @return workspaceName
- */
- public String getWorkspaceName() {
- return workspaceName;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see java.lang.Object#hashCode()
- */
- @Override
- public int hashCode() {
- return hc;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see java.lang.Object#equals(java.lang.Object)
- */
- @Override
- public boolean equals( Object obj ) {
- if (obj == this) return true;
- if (obj instanceof IndexId) {
- IndexId that = (IndexId)obj;
- if (this.hashCode() != that.hashCode()) return false;
- if (!this.workspaceName.equals(that.workspaceName)) return false;
- if (!this.indexName.equals(that.indexName)) return false;
- return true;
- }
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- return indexName != null ? workspaceName + "/" + this.indexName : this.workspaceName;
- }
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneException.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneException.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneException.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,74 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-
-package org.jboss.dna.search;
-
-/**
- * A {@link RuntimeException runtime exception} representing a problem operating against Lucene.
- */
-public class LuceneException extends RuntimeException {
-
- /**
- */
- private static final long serialVersionUID = 8281373010920861138L;
-
- /**
- * Construct a system failure exception with no message.
- */
- public LuceneException() {
- }
-
- /**
- * Construct a system failure exception with a single message.
- *
- * @param message the message describing the failure
- */
- public LuceneException( String message ) {
- super(message);
-
- }
-
- /**
- * Construct a system failure exception with another exception that is the cause of the failure.
- *
- * @param cause the original cause of the failure
- */
- public LuceneException( Throwable cause ) {
- super(cause);
-
- }
-
- /**
- * Construct a system failure exception with a single message and another exception that is the cause of the failure.
- *
- * @param message the message describing the failure
- * @param cause the original cause of the failure
- */
- public LuceneException( String message,
- Throwable cause ) {
- super(message, cause);
-
- }
-
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,495 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.IOException;
-import java.util.List;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.jboss.dna.common.i18n.I18n;
-import org.jboss.dna.graph.property.Binary;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.property.PropertyType;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryResults.Columns;
-import org.jboss.dna.graph.query.model.And;
-import org.jboss.dna.graph.query.model.Between;
-import org.jboss.dna.graph.query.model.BindVariableName;
-import org.jboss.dna.graph.query.model.ChildNode;
-import org.jboss.dna.graph.query.model.Comparison;
-import org.jboss.dna.graph.query.model.Constraint;
-import org.jboss.dna.graph.query.model.DescendantNode;
-import org.jboss.dna.graph.query.model.DynamicOperand;
-import org.jboss.dna.graph.query.model.FullTextSearch;
-import org.jboss.dna.graph.query.model.FullTextSearchScore;
-import org.jboss.dna.graph.query.model.Length;
-import org.jboss.dna.graph.query.model.Literal;
-import org.jboss.dna.graph.query.model.LowerCase;
-import org.jboss.dna.graph.query.model.NodeDepth;
-import org.jboss.dna.graph.query.model.NodeLocalName;
-import org.jboss.dna.graph.query.model.NodeName;
-import org.jboss.dna.graph.query.model.NodePath;
-import org.jboss.dna.graph.query.model.Not;
-import org.jboss.dna.graph.query.model.Operator;
-import org.jboss.dna.graph.query.model.Or;
-import org.jboss.dna.graph.query.model.PropertyExistence;
-import org.jboss.dna.graph.query.model.PropertyValue;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.model.SameNode;
-import org.jboss.dna.graph.query.model.SelectorName;
-import org.jboss.dna.graph.query.model.SetCriteria;
-import org.jboss.dna.graph.query.model.StaticOperand;
-import org.jboss.dna.graph.query.model.TypeSystem;
-import org.jboss.dna.graph.query.model.UpperCase;
-import org.jboss.dna.graph.query.model.Visitors;
-import org.jboss.dna.graph.query.model.FullTextSearch.NegationTerm;
-import org.jboss.dna.graph.query.model.TypeSystem.TypeFactory;
-import org.jboss.dna.graph.query.plan.PlanNode;
-import org.jboss.dna.graph.query.process.AbstractAccessComponent;
-import org.jboss.dna.graph.query.process.ProcessingComponent;
-import org.jboss.dna.graph.query.process.SelectComponent;
-import org.jboss.dna.graph.query.process.SelectComponent.Analyzer;
-import org.jboss.dna.search.DualIndexSearchProvider.ContentIndex;
-import org.jboss.dna.search.LuceneSession.TupleCollector;
-
-/**
- *
- */
-/**
- * The {@link ProcessingComponent} implementation that executes a single atomic access query against the Lucene indexes.
- */
-public class LuceneQueryComponent extends AbstractAccessComponent {
- private final QueryCommand originalQuery;
- private final LuceneSession session;
- private final String sourceName;
- private final String workspaceName;
-
- protected LuceneQueryComponent( LuceneSession session,
- QueryCommand originalQuery,
- QueryContext context,
- Columns columns,
- PlanNode accessNode,
- Analyzer analyzer,
- String sourceName,
- String workspaceName ) {
- super(context, columns, accessNode);
- this.originalQuery = originalQuery;
- this.session = session;
- this.sourceName = sourceName;
- this.workspaceName = workspaceName;
- }
-
- protected String fieldNameFor( String name ) {
- // Convert to a name and then to a string, so that the namespaces are resolved
- return session.stringFactory.create(session.nameFactory.create(name));
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.process.ProcessingComponent#execute()
- */
- @Override
- public List<Object[]> execute() {
-
- // Some kinds of constraints are not easily pushed down to Lucene as are of a Lucene Query, and
- // instead are applied by filtering the results. For example, a FullTextSearchScore applies
- // to the score of the tuple, which cannot be (easily?) applied as a Query.
- //
- // Therefore, each of the AND-ed constraints of the query are evaluated separately. After all,
- // each of the tuples returned by the planned query must satisfy all of the AND-ed constraints.
- // Or, to put it another way, if a tuple does not satisfy one of the AND-ed constraints, the
- // tuple should not be included in the query results.
- //
- // Logically, any AND-ed criteria that cannot be pushed down to Lucene can of course be applied
- // as a filter on the results. Thus, each AND-ed constraint is processed to first determine if
- // it can be represented as a Lucene query; all other AND-ed constraints must be handled as
- // a results filter. Since most queries will likely use one or more simple constraints AND-ed
- // together, this approach will likely work very well.
- //
- // The only hairy case is when any AND-ed constraint is actually an OR-ed combination of multiple
- // constraints of which at least one cannot be pushed down to Lucene. In this case, the entire
- // AND-ed constraint must be treated as a results filter (even if many of those constraints that
- // make up the OR-ed constraint can be pushed down). Hopefully, this will not be a common case
- // in actual queries.
-
- // For each of the AND-ed constraints ...
- Query pushDownQuery = null;
- Constraint postProcessConstraint = null;
- try {
- for (Constraint andedConstraint : this.andedConstraints) {
- // Determine if it can be represented as a Lucene query ...
- Query constraintQuery = createQuery(andedConstraint);
- if (constraintQuery != null) {
- // The AND-ed constraint _can_ be represented as a push-down Lucene query ...
- if (pushDownQuery == null) {
- // This must be the first query ...
- pushDownQuery = constraintQuery;
- } else if (pushDownQuery instanceof BooleanQuery) {
- // We have to add the constraint query to the existing boolean ...
- BooleanQuery booleanQuery = (BooleanQuery)pushDownQuery;
- booleanQuery.add(constraintQuery, Occur.MUST);
- } else {
- // This is the second push-down query, so create a BooleanQuery ...
- BooleanQuery booleanQuery = new BooleanQuery();
- booleanQuery.add(pushDownQuery, Occur.MUST);
- booleanQuery.add(constraintQuery, Occur.MUST);
- pushDownQuery = booleanQuery;
- }
- } else {
- // The AND-ed constraint _cannot_ be represented as a push-down Lucene query ...
- if (postProcessConstraint == null) {
- postProcessConstraint = andedConstraint;
- } else {
- postProcessConstraint = new And(postProcessConstraint, andedConstraint);
- }
- }
- }
- } catch (IOException e) {
- // There was a error working with the constraints (such as a ValueFormatException) ...
- QueryContext context = getContext();
- I18n msg = SearchI18n.errorWhilePerformingQuery;
- String origQueryString = Visitors.readable(originalQuery);
- context.getProblems().addError(e, msg, origQueryString, workspaceName, sourceName, e.getMessage());
- return emptyTuples();
- } catch (RuntimeException e) {
- // There was a error working with the constraints (such as a ValueFormatException) ...
- QueryContext context = getContext();
- I18n msg = SearchI18n.errorWhilePerformingQuery;
- String origQueryString = Visitors.readable(originalQuery);
- context.getProblems().addError(e, msg, origQueryString, workspaceName, sourceName, e.getMessage());
- return emptyTuples();
- }
-
- if (pushDownQuery == null) {
- // There are no constraints that can be pushed down, so return _all_ the nodes ...
- pushDownQuery = new MatchAllDocsQuery();
- }
-
- // Get the results from Lucene ...
- List<Object[]> tuples = null;
- final Columns columns = getColumns();
- final QueryContext context = getContext();
- try {
- // Execute the query against the content indexes ...
- IndexSearcher searcher = session.getContentSearcher();
- TupleCollector collector = session.createTupleCollector(columns);
- searcher.search(pushDownQuery, collector);
- tuples = collector.getTuples();
- } catch (IOException e) {
- // There was a problem executing the Lucene query ...
- I18n msg = SearchI18n.errorWhilePerformingLuceneQuery;
- String origQueryString = Visitors.readable(originalQuery);
- context.getProblems().addError(e, msg, pushDownQuery, origQueryString, workspaceName, sourceName, e.getMessage());
- return emptyTuples();
- }
-
- if (postProcessConstraint != null && !tuples.isEmpty()) {
- // Create a delegate processing component that will return the tuples we've already found ...
- final List<Object[]> allTuples = tuples;
- ProcessingComponent tuplesProcessor = new ProcessingComponent(context, columns) {
- @Override
- public List<Object[]> execute() {
- return allTuples;
- }
- };
- // Create a processing component that will apply these constraints to the tuples we already found ...
- return new SelectComponent(tuplesProcessor, postProcessConstraint, context.getVariables()).execute();
- }
- return tuples;
- }
-
- protected Query createQuery( Constraint constraint ) throws IOException {
- if (constraint instanceof And) {
- And and = (And)constraint;
- Query leftQuery = createQuery(and.getLeft());
- Query rightQuery = createQuery(and.getRight());
- if (leftQuery == null || rightQuery == null) return null;
- BooleanQuery booleanQuery = new BooleanQuery();
- booleanQuery.add(createQuery(and.getLeft()), Occur.MUST);
- booleanQuery.add(createQuery(and.getRight()), Occur.MUST);
- return booleanQuery;
- }
- if (constraint instanceof Or) {
- Or or = (Or)constraint;
- Query leftQuery = createQuery(or.getLeft());
- Query rightQuery = createQuery(or.getRight());
- if (leftQuery == null) {
- return rightQuery != null ? rightQuery : null;
- } else if (rightQuery == null) {
- return leftQuery;
- }
- BooleanQuery booleanQuery = new BooleanQuery();
- booleanQuery.add(createQuery(or.getLeft()), Occur.SHOULD);
- booleanQuery.add(createQuery(or.getRight()), Occur.SHOULD);
- return booleanQuery;
- }
- if (constraint instanceof Not) {
- Not not = (Not)constraint;
- Query notted = createQuery(not.getConstraint());
- if (notted == null) return new MatchAllDocsQuery();
- }
- if (constraint instanceof SetCriteria) {
- SetCriteria setCriteria = (SetCriteria)constraint;
- DynamicOperand left = setCriteria.getLeftOperand();
- int numRightOperands = setCriteria.getRightOperands().size();
- assert numRightOperands > 0;
- if (numRightOperands == 1) {
- return createQuery(left, Operator.EQUAL_TO, setCriteria.getRightOperands().iterator().next());
- }
- BooleanQuery setQuery = new BooleanQuery();
- for (StaticOperand right : setCriteria.getRightOperands()) {
- Query rightQuery = createQuery(left, Operator.EQUAL_TO, right);
- if (rightQuery == null) return null;
- setQuery.add(rightQuery, Occur.SHOULD);
- }
- return setQuery;
- }
- if (constraint instanceof PropertyExistence) {
- PropertyExistence existence = (PropertyExistence)constraint;
- return createQuery(existence.getSelectorName(), existence.getPropertyName());
- }
- if (constraint instanceof Between) {
- Between between = (Between)constraint;
- return createQuery(between);
- }
- if (constraint instanceof Comparison) {
- Comparison comparison = (Comparison)constraint;
- return createQuery(comparison.getOperand1(), comparison.getOperator(), comparison.getOperand2());
- }
- if (constraint instanceof FullTextSearch) {
- FullTextSearch search = (FullTextSearch)constraint;
- String fieldName = ContentIndex.FULL_TEXT;
- String propertyName = search.getPropertyName();
- if (propertyName != null) {
- fieldName = session.fullTextFieldName(fieldNameFor(propertyName));
- }
- return createQuery(fieldName, search.getTerm());
- }
- try {
- if (constraint instanceof SameNode) {
- SameNode sameNode = (SameNode)constraint;
- Path path = session.pathFactory.create(sameNode.getPath());
- return session.findNodeAt(path);
- }
- if (constraint instanceof ChildNode) {
- ChildNode childNode = (ChildNode)constraint;
- Path path = session.pathFactory.create(childNode.getParentPath());
- return session.findChildNodes(path);
- }
- if (constraint instanceof DescendantNode) {
- DescendantNode descendantNode = (DescendantNode)constraint;
- Path path = session.pathFactory.create(descendantNode.getAncestorPath());
- return session.findAllNodesBelow(path);
- }
- } catch (IOException e) {
- I18n msg = SearchI18n.errorWhilePerformingQuery;
- getContext().getProblems().addError(e,
- msg,
- Visitors.readable(originalQuery),
- workspaceName,
- sourceName,
- e.getMessage());
- return null;
- }
- // Should not get here ...
- assert false;
- return null;
- }
-
- protected Query createQuery( DynamicOperand left,
- Operator operator,
- StaticOperand right ) throws IOException {
- return createQuery(left, operator, right, true);
- }
-
- protected Query createQuery( DynamicOperand left,
- Operator operator,
- StaticOperand right,
- boolean caseSensitive ) throws IOException {
- // Handle the static operand ...
- Object value = createOperand(right, caseSensitive);
- assert value != null;
-
- // Address the dynamic operand ...
- if (left instanceof FullTextSearchScore) {
- // This can only be represented as a filter ...
- return null;
- } else if (left instanceof PropertyValue) {
- return session.findNodesWith((PropertyValue)left, operator, value, caseSensitive);
- } else if (left instanceof Length) {
- return session.findNodesWith((Length)left, operator, right);
- } else if (left instanceof LowerCase) {
- LowerCase lowercase = (LowerCase)left;
- return createQuery(lowercase.getOperand(), operator, right, false);
- } else if (left instanceof UpperCase) {
- UpperCase lowercase = (UpperCase)left;
- return createQuery(lowercase.getOperand(), operator, right, false);
- } else if (left instanceof NodeDepth) {
- assert operator != Operator.LIKE;
- // Could be represented as a result filter, but let's do this now ...
- return session.findNodesWith((NodeDepth)left, operator, value);
- } else if (left instanceof NodePath) {
- return session.findNodesWith((NodePath)left, operator, value, caseSensitive);
- } else if (left instanceof NodeName) {
- return session.findNodesWith((NodeName)left, operator, value, caseSensitive);
- } else if (left instanceof NodeLocalName) {
- return session.findNodesWith((NodeLocalName)left, operator, value, caseSensitive);
- } else {
- assert false;
- return null;
- }
- }
-
- protected Object createOperand( StaticOperand operand,
- boolean caseSensitive ) {
- Object value = null;
- if (operand instanceof Literal) {
- Literal literal = (Literal)operand;
- value = literal.getValue();
- if (!caseSensitive) value = lowerCase(value);
- } else if (operand instanceof BindVariableName) {
- BindVariableName variable = (BindVariableName)operand;
- String variableName = variable.getVariableName();
- value = getContext().getVariables().get(variableName);
- if (!caseSensitive) value = lowerCase(value);
- } else {
- assert false;
- }
- return value;
- }
-
- protected Query createQuery( DynamicOperand left,
- StaticOperand lower,
- StaticOperand upper,
- boolean includesLower,
- boolean includesUpper,
- boolean caseSensitive ) throws IOException {
- // Handle the static operands ...
- Object lowerValue = createOperand(lower, caseSensitive);
- Object upperValue = createOperand(upper, caseSensitive);
- assert lowerValue != null;
- assert upperValue != null;
-
- // Only in the case of a PropertyValue and Depth will we need to do something special ...
- if (left instanceof NodeDepth) {
- return session.findNodesWithNumericRange((NodeDepth)left, lowerValue, upperValue, includesLower, includesUpper);
- } else if (left instanceof PropertyValue) {
- PropertyType lowerType = PropertyType.discoverType(lowerValue);
- PropertyType upperType = PropertyType.discoverType(upperValue);
- if (upperType == lowerType) {
- switch (upperType) {
- case DATE:
- case LONG:
- case DOUBLE:
- case DECIMAL:
- return session.findNodesWithNumericRange((PropertyValue)left,
- lowerValue,
- upperValue,
- includesLower,
- includesUpper);
- default:
- // continue on and handle as boolean query ...
- }
- }
- }
-
- // Otherwise, just create a boolean query ...
- BooleanQuery query = new BooleanQuery();
- Operator lowerOp = includesLower ? Operator.GREATER_THAN_OR_EQUAL_TO : Operator.GREATER_THAN;
- Operator upperOp = includesUpper ? Operator.LESS_THAN_OR_EQUAL_TO : Operator.LESS_THAN;
- Query lowerQuery = createQuery(left, lowerOp, lower, caseSensitive);
- Query upperQuery = createQuery(left, upperOp, upper, caseSensitive);
- if (lowerQuery == null || upperQuery == null) return null;
- query.add(lowerQuery, Occur.MUST);
- query.add(upperQuery, Occur.MUST);
- return query;
- }
-
- protected Object lowerCase( Object value ) {
- if (value instanceof String) {
- return ((String)value).toLowerCase();
- }
- assert !(value instanceof Binary);
- TypeSystem typeSystem = getContext().getTypeSystem();
- TypeFactory<String> stringFactory = typeSystem.getStringFactory();
- TypeFactory<?> valueFactory = typeSystem.getTypeFactory(value);
- return valueFactory.create(stringFactory.create(value).toLowerCase());
- }
-
- protected Query createQuery( SelectorName selectorName,
- String propertyName ) {
- Term term = new Term(fieldNameFor(propertyName));
- return new TermQuery(term);
- }
-
- protected Query createQuery( String fieldName,
- FullTextSearch.Term term ) {
- if (term instanceof FullTextSearch.Conjunction) {
- FullTextSearch.Conjunction conjunction = (FullTextSearch.Conjunction)term;
- BooleanQuery query = new BooleanQuery();
- for (FullTextSearch.Term nested : conjunction) {
- if (nested instanceof NegationTerm) {
- query.add(createQuery(fieldName, ((NegationTerm)nested).getNegatedTerm()), Occur.MUST_NOT);
- } else {
- query.add(createQuery(fieldName, nested), Occur.MUST);
- }
- }
- return query;
- }
- if (term instanceof FullTextSearch.Disjunction) {
- FullTextSearch.Disjunction disjunction = (FullTextSearch.Disjunction)term;
- BooleanQuery query = new BooleanQuery();
- for (FullTextSearch.Term nested : disjunction) {
- if (nested instanceof NegationTerm) {
- query.add(createQuery(fieldName, ((NegationTerm)nested).getNegatedTerm()), Occur.MUST_NOT);
- } else {
- query.add(createQuery(fieldName, nested), Occur.SHOULD);
- }
- }
- return query;
- }
- if (term instanceof FullTextSearch.SimpleTerm) {
- FullTextSearch.SimpleTerm simple = (FullTextSearch.SimpleTerm)term;
- if (simple.isQuotingRequired()) {
- PhraseQuery query = new PhraseQuery();
- query.setSlop(0); // terms must be adjacent
- for (String value : simple.getValues()) {
- query.add(new Term(fieldName, value));
- }
- return query;
- }
- return new TermQuery(new Term(fieldName, simple.getValue()));
- }
- // Should not get here ...
- assert false;
- return null;
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneSession.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneSession.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneSession.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,365 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.Set;
-import net.jcip.annotations.NotThreadSafe;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.jboss.dna.common.i18n.I18n;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.property.DateTimeFactory;
-import org.jboss.dna.graph.property.NameFactory;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.property.PathFactory;
-import org.jboss.dna.graph.property.UuidFactory;
-import org.jboss.dna.graph.property.ValueFactories;
-import org.jboss.dna.graph.property.ValueFactory;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryEngine;
-import org.jboss.dna.graph.query.QueryResults.Columns;
-import org.jboss.dna.graph.query.model.Length;
-import org.jboss.dna.graph.query.model.NodeDepth;
-import org.jboss.dna.graph.query.model.NodeLocalName;
-import org.jboss.dna.graph.query.model.NodeName;
-import org.jboss.dna.graph.query.model.NodePath;
-import org.jboss.dna.graph.query.model.Operator;
-import org.jboss.dna.graph.query.model.PropertyValue;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.model.Visitors;
-import org.jboss.dna.graph.query.optimize.Optimizer;
-import org.jboss.dna.graph.query.optimize.OptimizerRule;
-import org.jboss.dna.graph.query.optimize.RuleBasedOptimizer;
-import org.jboss.dna.graph.query.plan.CanonicalPlanner;
-import org.jboss.dna.graph.query.plan.PlanHints;
-import org.jboss.dna.graph.query.plan.PlanNode;
-import org.jboss.dna.graph.query.plan.Planner;
-import org.jboss.dna.graph.query.process.ProcessingComponent;
-import org.jboss.dna.graph.query.process.QueryProcessor;
-import org.jboss.dna.graph.search.SearchProvider;
-import org.jboss.dna.search.DualIndexSearchProvider.PathIndex;
-
-/**
- *
- */
-@NotThreadSafe
-public abstract class LuceneSession implements SearchProvider.Session {
- protected final ExecutionContext context;
- protected final String sourceName;
- protected final String workspaceName;
- protected final IndexRules rules;
- protected final Analyzer analyzer;
- protected final boolean overwrite;
- protected final boolean readOnly;
- protected final ValueFactory<String> stringFactory;
- protected final DateTimeFactory dateFactory;
- protected final PathFactory pathFactory;
- protected final UuidFactory uuidFactory;
- protected final NameFactory nameFactory;
- private int changeCount;
- private QueryEngine queryEngine;
-
- protected LuceneSession( ExecutionContext context,
- String sourceName,
- String workspaceName,
- IndexRules rules,
- Analyzer analyzer,
- boolean overwrite,
- boolean readOnly ) {
- this.context = context;
- this.sourceName = sourceName;
- this.workspaceName = workspaceName;
- this.rules = rules;
- this.overwrite = overwrite;
- this.readOnly = readOnly;
- this.analyzer = analyzer;
- ValueFactories factories = context.getValueFactories();
- this.stringFactory = factories.getStringFactory();
- this.dateFactory = factories.getDateFactory();
- this.pathFactory = factories.getPathFactory();
- this.uuidFactory = factories.getUuidFactory();
- this.nameFactory = factories.getNameFactory();
- assert this.context != null;
- assert this.sourceName != null;
- assert this.workspaceName != null;
- assert this.rules != null;
- assert this.analyzer != null;
- assert this.stringFactory != null;
- assert this.dateFactory != null;
- }
-
- /**
- * Create the field name that will be used to store the full-text searchable property values.
- *
- * @param propertyName the name of the property; may not be null
- * @return the field name for the full-text searchable property values; never null
- */
- protected abstract String fullTextFieldName( String propertyName );
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#getContext()
- */
- public final ExecutionContext getContext() {
- return context;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#getSourceName()
- */
- public final String getSourceName() {
- return sourceName;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#getWorkspaceName()
- */
- public String getWorkspaceName() {
- return workspaceName;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.search.SearchProvider.Session#hasChanges()
- */
- public boolean hasChanges() {
- return changeCount > 0;
- }
-
- /**
- * Get the Lucene index searcher that should be used to execute queries.
- *
- * @return the searcher; never null
- * @throws IOException if there is an error obtaining the index searcher
- */
- public abstract IndexSearcher getContentSearcher() throws IOException;
-
- /**
- * Get the query engine for this session.
- *
- * @return the query engine; never null
- */
- protected QueryEngine queryEngine() {
- if (queryEngine == null) {
- // Create the query engine ...
- Planner planner = new CanonicalPlanner();
- Optimizer optimizer = new RuleBasedOptimizer() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.optimize.RuleBasedOptimizer#populateRuleStack(java.util.LinkedList,
- * org.jboss.dna.graph.query.plan.PlanHints)
- */
- @Override
- protected void populateRuleStack( LinkedList<OptimizerRule> ruleStack,
- PlanHints hints ) {
- super.populateRuleStack(ruleStack, hints);
- // Add any custom rules here, either at the front of the stack or at the end
- }
- };
- QueryProcessor processor = new QueryProcessor() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.process.QueryProcessor#createAccessComponent(org.jboss.dna.graph.query.model.QueryCommand,
- * org.jboss.dna.graph.query.QueryContext, org.jboss.dna.graph.query.plan.PlanNode,
- * org.jboss.dna.graph.query.QueryResults.Columns,
- * org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
- */
- @Override
- protected ProcessingComponent createAccessComponent( QueryCommand originalQuery,
- QueryContext context,
- PlanNode accessNode,
- Columns resultColumns,
- org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer ) {
- try {
- return LuceneSession.this.createAccessComponent(originalQuery,
- context,
- accessNode,
- resultColumns,
- analyzer);
- } catch (IOException e) {
- I18n msg = SearchI18n.errorWhilePerformingQuery;
- context.getProblems().addError(e,
- msg,
- Visitors.readable(originalQuery),
- getWorkspaceName(),
- getSourceName(),
- e.getMessage());
- return null;
- }
- }
- };
-
- queryEngine = new QueryEngine(planner, optimizer, processor);
- }
- return queryEngine;
- }
-
- protected abstract ProcessingComponent createAccessComponent( QueryCommand originalQuery,
- QueryContext context,
- PlanNode accessNode,
- Columns resultColumns,
- org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer )
- throws IOException;
-
- /**
- * Create a {@link TupleCollector} instance that collects the results from the index(es).
- *
- * @param columns the column definitions; never null
- * @return the collector; never null
- */
- public abstract TupleCollector createTupleCollector( Columns columns );
-
- /**
- * Utility method to create a query to find all of the documents representing nodes with the supplied IDs.
- *
- * @param ids the IDs of the nodes that are to be found; may not be null
- * @return the query; never null
- * @throws IOException if there is a problem creating this query
- */
- public abstract Query findAllNodesWithIds( Set<String> ids ) throws IOException;
-
- public abstract Query findAllNodesBelow( Path ancestorPath ) throws IOException;
-
- /**
- * Return a query that can be used to find all of the documents that represent nodes that are children of the node at the
- * supplied path.
- *
- * @param parentPath the path of the parent node.
- * @return the query; never null
- * @throws IOException if there is an error creating the query
- */
- public abstract Query findChildNodes( Path parentPath ) throws IOException;
-
- /**
- * Create a query that can be used to find the one document (or node) that exists at the exact path supplied. This method
- * first queries the {@link PathIndex path index} to find the UUID of the node at the supplied path, and then returns a query
- * that matches the UUID.
- *
- * @param path the path of the node
- * @return the query; never null
- * @throws IOException if there is an error creating the query
- */
- public abstract Query findNodeAt( Path path ) throws IOException;
-
- /**
- * Create a query that can be used to find documents (or nodes) that have a field value that satisfies the supplied LIKE
- * expression.
- *
- * @param fieldName the name of the document field to search
- * @param likeExpression the JCR like expression
- * @param caseSensitive true if the evaluation should be performed in a case sensitive manner, or false otherwise
- * @return the query; never null
- * @throws IOException if there is an error creating the query
- */
- public abstract Query findNodesLike( String fieldName,
- String likeExpression,
- boolean caseSensitive ) throws IOException;
-
- public abstract Query findNodesWith( Length propertyLength,
- Operator operator,
- Object value ) throws IOException;
-
- public abstract Query findNodesWith( PropertyValue propertyValue,
- Operator operator,
- Object value,
- boolean caseSensitive ) throws IOException;
-
- public abstract Query findNodesWithNumericRange( PropertyValue propertyValue,
- Object lowerValue,
- Object upperValue,
- boolean includesLower,
- boolean includesUpper ) throws IOException;
-
- public abstract Query findNodesWithNumericRange( NodeDepth depth,
- Object lowerValue,
- Object upperValue,
- boolean includesLower,
- boolean includesUpper ) throws IOException;
-
- // public abstract Query findNodesWithNumericRange( String field,
- // Object lowerValue,
- // Object upperValue,
- // boolean includesLower,
- // boolean includesUpper ) throws IOException;
-
- public abstract Query findNodesWith( NodePath nodePath,
- Operator operator,
- Object value,
- boolean caseSensitive ) throws IOException;
-
- public abstract Query findNodesWith( NodeName nodeName,
- Operator operator,
- Object value,
- boolean caseSensitive ) throws IOException;
-
- public abstract Query findNodesWith( NodeLocalName nodeName,
- Operator operator,
- Object value,
- boolean caseSensitive ) throws IOException;
-
- public abstract Query findNodesWith( NodeDepth depthConstraint,
- Operator operator,
- Object value ) throws IOException;
-
- // public abstract Query createLocalNameQuery( String likeExpression ) throws IOException;
-
- // public abstract Query createSnsIndexQuery( String likeExpression ) throws IOException;
-
- public String pathAsString( Path path,
- ValueFactory<String> stringFactory ) {
- assert path != null;
- if (path.isRoot()) return "/";
- StringBuilder sb = new StringBuilder();
- for (Path.Segment segment : path) {
- sb.append('/');
- sb.append(stringFactory.create(segment.getName()));
- sb.append('[');
- sb.append(segment.getIndex());
- sb.append(']');
- }
- return sb.toString();
- }
-
- public static abstract class TupleCollector extends Collector {
-
- /**
- * Get the tuples.
- *
- * @return the tuples; never null
- */
- public abstract LinkedList<Object[]> getTuples();
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,61 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.util.Locale;
-import java.util.Set;
-import org.jboss.dna.common.CommonI18n;
-import org.jboss.dna.common.i18n.I18n;
-
-public class SearchI18n {
-
- public static I18n locationForIndexesIsNotDirectory;
- public static I18n locationForIndexesCannotBeRead;
- public static I18n locationForIndexesCannotBeWritten;
- public static I18n errorWhileCommittingIndexChanges;
- public static I18n errorWhileRollingBackIndexChanges;
- public static I18n errorWhilePerformingSearch;
- public static I18n errorWhilePerformingQuery;
- public static I18n errorWhilePerformingLuceneQuery;
-
- static {
- try {
- I18n.initialize(SearchI18n.class);
- } catch (final Exception err) {
- System.err.println(err);
- }
- }
-
- public static Set<Locale> getLocalizationProblemLocales() {
- return I18n.getLocalizationProblemLocales(CommonI18n.class);
- }
-
- public static Set<String> getLocalizationProblems() {
- return I18n.getLocalizationProblems(CommonI18n.class);
- }
-
- public static Set<String> getLocalizationProblems( Locale locale ) {
- return I18n.getLocalizationProblems(CommonI18n.class, locale);
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareLengthQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareLengthQuery.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareLengthQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,254 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.query;
-
-import java.io.IOException;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.Weight;
-import org.jboss.dna.graph.property.ValueFactories;
-import org.jboss.dna.graph.property.ValueFactory;
-import org.jboss.dna.graph.query.model.Length;
-
-/**
- * A Lucene {@link Query} implementation that is used to apply a {@link Length} constraint against a string field. This query
- * implementation works by using the {@link Query#weight(Searcher) weight} and
- * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
- * with string fields that satisfy the constraint.
- */
-public class CompareLengthQuery extends CompareQuery<Integer> {
-
- private static final long serialVersionUID = 1L;
- protected static final Evaluator<Integer> EQUAL_TO = new Evaluator<Integer>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Integer nodeValue,
- Integer length ) {
- return nodeValue == length;
- }
-
- @Override
- public String toString() {
- return " = ";
- }
- };
- protected static final Evaluator<Integer> NOT_EQUAL_TO = new Evaluator<Integer>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Integer nodeValue,
- Integer length ) {
- return nodeValue == length;
- }
-
- @Override
- public String toString() {
- return " != ";
- }
- };
- protected static final Evaluator<Integer> IS_LESS_THAN = new Evaluator<Integer>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Integer nodeValue,
- Integer length ) {
- return nodeValue < length;
- }
-
- @Override
- public String toString() {
- return " < ";
- }
- };
- protected static final Evaluator<Integer> IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<Integer>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Integer nodeValue,
- Integer length ) {
- return nodeValue < length;
- }
-
- @Override
- public String toString() {
- return " <= ";
- }
- };
- protected static final Evaluator<Integer> IS_GREATER_THAN = new Evaluator<Integer>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Integer nodeValue,
- Integer length ) {
- return nodeValue < length;
- }
-
- @Override
- public String toString() {
- return " > ";
- }
- };
- protected static final Evaluator<Integer> IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<Integer>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Integer nodeValue,
- Integer length ) {
- return nodeValue < length;
- }
-
- @Override
- public String toString() {
- return " >= ";
- }
- };
-
- /**
- * Construct a {@link Query} implementation that scores documents with a field length that is equal to the supplied constraint
- * value.
- *
- * @param constraintValue the constraint value; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @return the query; never null
- */
- public static CompareLengthQuery createQueryForNodesWithFieldEqualTo( Integer constraintValue,
- String fieldName,
- ValueFactories factories ) {
- return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents with a field length that is not equal to the supplied
- * constraint value.
- *
- * @param constraintValue the constraint value; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @return the query; never null
- */
- public static CompareLengthQuery createQueryForNodesWithFieldNotEqualTo( Integer constraintValue,
- String fieldName,
- ValueFactories factories ) {
- return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents with a field length that is greater than the supplied
- * constraint value.
- *
- * @param constraintValue the constraint value; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @return the query; never null
- */
- public static CompareLengthQuery createQueryForNodesWithFieldGreaterThan( Integer constraintValue,
- String fieldName,
- ValueFactories factories ) {
- return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents with a field length that is greater than or equal to the
- * supplied constraint value.
- *
- * @param constraintValue the constraint value; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @return the query; never null
- */
- public static CompareLengthQuery createQueryForNodesWithFieldGreaterThanOrEqualTo( Integer constraintValue,
- String fieldName,
- ValueFactories factories ) {
- return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN_OR_EQUAL_TO);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents with a field length that is less than the supplied
- * constraint value.
- *
- * @param constraintValue the constraint value; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @return the query; never null
- */
- public static CompareLengthQuery createQueryForNodesWithFieldLessThan( Integer constraintValue,
- String fieldName,
- ValueFactories factories ) {
- return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_LESS_THAN);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents with a field length that is less than or equal to the
- * supplied constraint value.
- *
- * @param constraintValue the constraint value; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @return the query; never null
- */
- public static CompareLengthQuery createQueryForNodesWithFieldLessThanOrEqualTo( Integer constraintValue,
- String fieldName,
- ValueFactories factories ) {
- return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_LESS_THAN_OR_EQUAL_TO);
- }
-
- /**
- * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
- *
- * @param fieldName the name of the document field containing the value; may not be null
- * @param constraintValue the constraint value; may not be null
- * @param stringFactory the string factory that can be used during the scoring; may not be null
- * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
- * constraint; may not be null
- */
- protected CompareLengthQuery( String fieldName,
- Integer constraintValue,
- ValueFactory<String> stringFactory,
- Evaluator<Integer> evaluator ) {
- super(fieldName, constraintValue, null, stringFactory, evaluator);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- protected Integer readFromDocument( IndexReader reader,
- int docId ) throws IOException {
- // This implementation reads the length of the field ...
- Document doc = reader.document(docId, fieldSelector);
- String valueString = doc.get(fieldName);
- String value = stringFactory.create(valueString);
- return value != null ? value.length() : 0;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#clone()
- */
- @Override
- public Object clone() {
- return new CompareLengthQuery(fieldName, constraintValue, stringFactory, evaluator);
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareNameQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareNameQuery.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareNameQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,258 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.query;
-
-import java.io.IOException;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.FieldSelectorResult;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.Weight;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.property.PathFactory;
-import org.jboss.dna.graph.property.ValueComparators;
-import org.jboss.dna.graph.property.ValueFactories;
-import org.jboss.dna.graph.property.ValueFactory;
-import org.jboss.dna.graph.query.model.Comparison;
-
-/**
- * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against the name of nodes. This
- * query implementation works by using the {@link Query#weight(Searcher) weight} and
- * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
- * that correspond to nodes with Names that satisfy the constraint.
- */
-public class CompareNameQuery extends CompareQuery<Path.Segment> {
-
- private static final long serialVersionUID = 1L;
- protected static final Evaluator<Path.Segment> IS_LESS_THAN = new Evaluator<Path.Segment>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Path.Segment nodeValue,
- Path.Segment constraintValue ) {
- return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) < 0;
- }
-
- @Override
- public String toString() {
- return " < ";
- }
- };
- protected static final Evaluator<Path.Segment> IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<Path.Segment>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Path.Segment nodeValue,
- Path.Segment constraintValue ) {
- return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) <= 0;
- }
-
- @Override
- public String toString() {
- return " <= ";
- }
- };
- protected static final Evaluator<Path.Segment> IS_GREATER_THAN = new Evaluator<Path.Segment>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Path.Segment nodeValue,
- Path.Segment constraintValue ) {
- return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) > 0;
- }
-
- @Override
- public String toString() {
- return " > ";
- }
- };
- protected static final Evaluator<Path.Segment> IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<Path.Segment>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Path.Segment nodeValue,
- Path.Segment constraintValue ) {
- return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) >= 0;
- }
-
- @Override
- public String toString() {
- return " >= ";
- }
- };
-
- /**
- * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
- * that is greater than the supplied constraint name.
- *
- * @param constraintValue the constraint value; may not be null
- * @param localNameField the name of the document field containing the local name value; may not be null
- * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the query; never null
- */
- public static CompareNameQuery createQueryForNodesWithNameGreaterThan( Path.Segment constraintValue,
- String localNameField,
- String snsIndexFieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
- factories.getStringFactory(), factories.getLongFactory(), IS_GREATER_THAN, caseSensitive);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
- * that is greater than or equal to the supplied constraint name.
- *
- * @param constraintValue the constraint value; may not be null
- * @param localNameField the name of the document field containing the local name value; may not be null
- * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the query; never null
- */
- public static CompareNameQuery createQueryForNodesWithNameGreaterThanOrEqualTo( Path.Segment constraintValue,
- String localNameField,
- String snsIndexFieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
- factories.getStringFactory(), factories.getLongFactory(), IS_GREATER_THAN_OR_EQUAL_TO,
- caseSensitive);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
- * that is less than the supplied constraint name.
- *
- * @param constraintValue the constraint value; may not be null
- * @param localNameField the name of the document field containing the local name value; may not be null
- * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the query; never null
- */
- public static CompareNameQuery createQueryForNodesWithNameLessThan( Path.Segment constraintValue,
- String localNameField,
- String snsIndexFieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
- factories.getStringFactory(), factories.getLongFactory(), IS_LESS_THAN, caseSensitive);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
- * that is less than or equal to the supplied constraint name.
- *
- * @param constraintValue the constraint value; may not be null
- * @param localNameField the name of the document field containing the local name value; may not be null
- * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the query; never null
- */
- public static CompareNameQuery createQueryForNodesWithNameLessThanOrEqualTo( Path.Segment constraintValue,
- String localNameField,
- String snsIndexFieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
- factories.getStringFactory(), factories.getLongFactory(), IS_LESS_THAN_OR_EQUAL_TO,
- caseSensitive);
- }
-
- private final String snsIndexFieldName;
- private final ValueFactory<Long> longFactory;
- private final PathFactory pathFactory;
- private final boolean caseSensitive;
-
- /**
- * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
- *
- * @param localNameField the name of the document field containing the local name value; may not be null
- * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
- * @param constraintValue the constraint path; may not be null
- * @param pathFactory the path factory that can be used during the scoring; may not be null
- * @param stringFactory the string factory that can be used during the scoring; may not be null
- * @param longFactory the long factory that can be used during the scoring; may not be null
- * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
- * constraint; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- */
- protected CompareNameQuery( final String localNameField,
- final String snsIndexFieldName,
- Path.Segment constraintValue,
- PathFactory pathFactory,
- ValueFactory<String> stringFactory,
- ValueFactory<Long> longFactory,
- Evaluator<Path.Segment> evaluator,
- boolean caseSensitive ) {
- super(localNameField, constraintValue, null, stringFactory, evaluator, new FieldSelector() {
- private static final long serialVersionUID = 1L;
-
- public FieldSelectorResult accept( String fieldName ) {
- if (fieldName.equals(localNameField)) return FieldSelectorResult.LOAD;
- if (fieldName.equals(snsIndexFieldName)) return FieldSelectorResult.LOAD;
- return FieldSelectorResult.NO_LOAD;
- }
- });
- this.snsIndexFieldName = snsIndexFieldName;
- this.longFactory = longFactory;
- this.pathFactory = pathFactory;
- this.caseSensitive = caseSensitive;
- assert this.snsIndexFieldName != null;
- assert this.longFactory != null;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- protected Path.Segment readFromDocument( IndexReader reader,
- int docId ) throws IOException {
- Document doc = reader.document(docId, fieldSelector);
- String localName = doc.get(fieldName);
- if (!caseSensitive) localName = localName.toLowerCase();
- int sns = longFactory.create(doc.get(snsIndexFieldName)).intValue();
- return pathFactory.createSegment(localName, sns);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#clone()
- */
- @Override
- public Object clone() {
- return new CompareNameQuery(fieldName, snsIndexFieldName, constraintValue, pathFactory, stringFactory, longFactory,
- evaluator, caseSensitive);
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/query/ComparePathQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/ComparePathQuery.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/ComparePathQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,223 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.query;
-
-import java.io.IOException;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.Weight;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.property.ValueComparators;
-import org.jboss.dna.graph.property.ValueFactories;
-import org.jboss.dna.graph.property.ValueFactory;
-import org.jboss.dna.graph.query.model.Comparison;
-
-/**
- * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against the Path of nodes. This
- * query implementation works by using the {@link Query#weight(Searcher) weight} and
- * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
- * that correspond to nodes with Paths that satisfy the constraint.
- */
-public class ComparePathQuery extends CompareQuery<Path> {
-
- private static final long serialVersionUID = 1L;
- protected static final Evaluator<Path> PATH_IS_LESS_THAN = new Evaluator<Path>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Path nodePath,
- Path constraintPath ) {
- return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) < 0;
- }
-
- @Override
- public String toString() {
- return " < ";
- }
- };
- protected static final Evaluator<Path> PATH_IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<Path>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Path nodePath,
- Path constraintPath ) {
- return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) <= 0;
- }
-
- @Override
- public String toString() {
- return " <= ";
- }
- };
- protected static final Evaluator<Path> PATH_IS_GREATER_THAN = new Evaluator<Path>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Path nodePath,
- Path constraintPath ) {
- return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) > 0;
- }
-
- @Override
- public String toString() {
- return " > ";
- }
- };
- protected static final Evaluator<Path> PATH_IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<Path>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( Path nodePath,
- Path constraintPath ) {
- return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) >= 0;
- }
-
- @Override
- public String toString() {
- return " >= ";
- }
- };
-
- /**
- * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
- * that is greater than the supplied constraint path.
- *
- * @param constraintPath the constraint path; may not be null
- * @param fieldName the name of the document field containing the path value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the path query; never null
- */
- public static ComparePathQuery createQueryForNodesWithPathGreaterThan( Path constraintPath,
- String fieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
- PATH_IS_GREATER_THAN, caseSensitive);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
- * that is greater than or equal to the supplied constraint path.
- *
- * @param constraintPath the constraint path; may not be null
- * @param fieldName the name of the document field containing the path value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the path query; never null
- */
- public static ComparePathQuery createQueryForNodesWithPathGreaterThanOrEqualTo( Path constraintPath,
- String fieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
- PATH_IS_GREATER_THAN_OR_EQUAL_TO, caseSensitive);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
- * that is less than the supplied constraint path.
- *
- * @param constraintPath the constraint path; may not be null
- * @param fieldName the name of the document field containing the path value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the path query; never null
- */
- public static ComparePathQuery createQueryForNodesWithPathLessThan( Path constraintPath,
- String fieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
- PATH_IS_LESS_THAN, caseSensitive);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
- * that is less than or equal to the supplied constraint path.
- *
- * @param constraintPath the constraint path; may not be null
- * @param fieldName the name of the document field containing the path value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the path query; never null
- */
- public static ComparePathQuery createQueryForNodesWithPathLessThanOrEqualTo( Path constraintPath,
- String fieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
- PATH_IS_LESS_THAN_OR_EQUAL_TO, caseSensitive);
- }
-
- private final boolean caseSensitive;
-
- /**
- * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
- *
- * @param fieldName the name of the document field containing the path value; may not be null
- * @param constraintPath the constraint path; may not be null
- * @param pathFactory the value factory that can be used during the scoring; may not be null
- * @param stringFactory the string factory that can be used during the scoring; may not be null
- * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
- * constraint; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- */
- protected ComparePathQuery( String fieldName,
- Path constraintPath,
- ValueFactory<Path> pathFactory,
- ValueFactory<String> stringFactory,
- Evaluator<Path> evaluator,
- boolean caseSensitive ) {
- super(fieldName, constraintPath, pathFactory, stringFactory, evaluator);
- this.caseSensitive = caseSensitive;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- protected Path readFromDocument( IndexReader reader,
- int docId ) throws IOException {
- Document doc = reader.document(docId, fieldSelector);
- String valueString = doc.get(fieldName);
- if (!caseSensitive) valueString = valueString.toLowerCase();
- return valueTypeFactory.create(valueString);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#clone()
- */
- @Override
- public Object clone() {
- return new ComparePathQuery(fieldName, constraintValue, valueTypeFactory, stringFactory, evaluator, caseSensitive);
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareQuery.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,308 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.query;
-
-import java.io.IOException;
-import java.io.Serializable;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.FieldSelectorResult;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.Weight;
-import org.jboss.dna.graph.property.ValueFactory;
-import org.jboss.dna.graph.query.model.Comparison;
-
-/**
- * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against the Path of nodes. This
- * query implementation works by using the {@link Query#weight(Searcher) weight} and
- * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
- * that correspond to nodes with Paths that satisfy the constraint.
- *
- * @param <ValueType>
- */
-public abstract class CompareQuery<ValueType> extends Query {
-
- private static final long serialVersionUID = 1L;
-
- protected static interface Evaluator<ValueType> extends Serializable {
- boolean satisfiesConstraint( ValueType nodeValue,
- ValueType constraintValue );
- }
-
- /**
- * The operand that is being negated by this query.
- */
- protected final String fieldName;
- protected final FieldSelector fieldSelector;
- protected final ValueType constraintValue;
- protected final Evaluator<ValueType> evaluator;
- protected final ValueFactory<ValueType> valueTypeFactory;
- protected final ValueFactory<String> stringFactory;
-
- /**
- * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
- *
- * @param fieldName the name of the document field containing the value; may not be null
- * @param constraintValue the constraint value; may not be null
- * @param valueTypeFactory the value factory that can be used during the scoring; may not be null
- * @param stringFactory the string factory that can be used during the scoring; may not be null
- * @param evaluator the {@link Evaluator} implementation that returns whether the node value satisfies the constraint; may not
- * be null
- */
- protected CompareQuery( String fieldName,
- ValueType constraintValue,
- ValueFactory<ValueType> valueTypeFactory,
- ValueFactory<String> stringFactory,
- Evaluator<ValueType> evaluator ) {
- this(fieldName, constraintValue, valueTypeFactory, stringFactory, evaluator, null);
- }
-
- /**
- * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
- *
- * @param fieldName the name of the document field containing the value; may not be null
- * @param constraintValue the constraint value; may not be null
- * @param valueTypeFactory the value factory that can be used during the scoring; may not be null unless
- * {@link #readFromDocument(IndexReader, int)} is overloaded to not use it
- * @param stringFactory the string factory that can be used during the scoring; may not be null
- * @param evaluator the {@link Evaluator} implementation that returns whether the node value satisfies the constraint; may not
- * be null
- * @param fieldSelector the field selector that should load the fields needed to recover the value; may be null if the field
- * selector should be generated automatically
- */
- protected CompareQuery( final String fieldName,
- ValueType constraintValue,
- ValueFactory<ValueType> valueTypeFactory,
- ValueFactory<String> stringFactory,
- Evaluator<ValueType> evaluator,
- FieldSelector fieldSelector ) {
- this.fieldName = fieldName;
- this.constraintValue = constraintValue;
- this.valueTypeFactory = valueTypeFactory;
- this.stringFactory = stringFactory;
- this.evaluator = evaluator;
- assert this.fieldName != null;
- assert this.constraintValue != null;
- assert this.evaluator != null;
- this.fieldSelector = fieldSelector != null ? fieldSelector : new FieldSelector() {
- private static final long serialVersionUID = 1L;
-
- public FieldSelectorResult accept( String fieldName ) {
- return CompareQuery.this.fieldName.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
- }
- };
- }
-
- protected ValueType readFromDocument( IndexReader reader,
- int docId ) throws IOException {
- Document doc = reader.document(docId, fieldSelector);
- String valueString = doc.get(fieldName);
- return valueTypeFactory.create(valueString);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
- */
- @Override
- public Weight createWeight( Searcher searcher ) {
- return new CompareWeight(searcher);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#toString(java.lang.String)
- */
- @Override
- public String toString( String field ) {
- return fieldName + " " + evaluator.toString() + " " + stringFactory != null ? stringFactory.create(constraintValue) : constraintValue.toString();
- }
-
- /**
- * Calculates query weights and builds query scores for our NOT queries.
- */
- protected class CompareWeight extends Weight {
- private static final long serialVersionUID = 1L;
- private final Searcher searcher;
-
- protected CompareWeight( Searcher searcher ) {
- this.searcher = searcher;
- assert this.searcher != null;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#getQuery()
- */
- @Override
- public Query getQuery() {
- return CompareQuery.this;
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always returns a weight factor of 1.0.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#getValue()
- */
- @Override
- public float getValue() {
- return 1.0f; // weight factor of 1.0
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always returns a normalization factor of 1.0.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
- */
- @Override
- public float sumOfSquaredWeights() {
- return 1.0f; // normalization factor of 1.0
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always does nothing, as there is nothing to normalize.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#normalize(float)
- */
- @Override
- public void normalize( float norm ) {
- // No need to do anything here
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
- */
- @Override
- public Scorer scorer( IndexReader reader,
- boolean scoreDocsInOrder,
- boolean topScorer ) {
- // Return a custom scorer ...
- return new CompareScorer(reader);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- public Explanation explain( IndexReader reader,
- int doc ) {
- return new Explanation(getValue(), getQuery().toString());
- }
- }
-
- /**
- * A scorer for the Path query.
- */
- protected class CompareScorer extends Scorer {
- private int docId = -1;
- private final int maxDocId;
- private final IndexReader reader;
-
- protected CompareScorer( IndexReader reader ) {
- // We don't care which Similarity we have, because we don't use it. So get the default.
- super(Similarity.getDefault());
- this.reader = reader;
- assert this.reader != null;
- this.maxDocId = this.reader.maxDoc() - 1;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#docID()
- */
- @Override
- public int docID() {
- return docId;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
- */
- @Override
- public int nextDoc() throws IOException {
- do {
- ++docId;
- if (reader.isDeleted(docId)) {
- // We should skip this document ...
- continue;
- }
- ValueType value = readFromDocument(reader, docId);
- if (evaluator.satisfiesConstraint(value, constraintValue)) return docId;
- } while (docId < maxDocId);
- return Scorer.NO_MORE_DOCS;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
- */
- @Override
- public int advance( int target ) throws IOException {
- if (target == Scorer.NO_MORE_DOCS) return target;
- while (true) {
- int doc = nextDoc();
- if (doc >= target) return doc;
- }
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
- * scored by this scorer.
- * </p>
- *
- * @see org.apache.lucene.search.Scorer#score()
- */
- @Override
- public float score() {
- return 1.0f;
- }
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareStringQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareStringQuery.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareStringQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,339 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.query;
-
-import java.io.IOException;
-import java.util.regex.Pattern;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.Weight;
-import org.apache.lucene.search.WildcardQuery;
-import org.apache.lucene.search.regex.JavaUtilRegexCapabilities;
-import org.apache.lucene.search.regex.RegexQuery;
-import org.jboss.dna.graph.property.ValueComparators;
-import org.jboss.dna.graph.property.ValueFactories;
-import org.jboss.dna.graph.property.ValueFactory;
-import org.jboss.dna.graph.query.model.Comparison;
-
-/**
- * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against a string field. This query
- * implementation works by using the {@link Query#weight(Searcher) weight} and
- * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
- * with string fields that satisfy the constraint.
- */
-public class CompareStringQuery extends CompareQuery<String> {
-
- private static final long serialVersionUID = 1L;
- protected static final Evaluator<String> EQUAL_TO = new Evaluator<String>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( String nodeValue,
- String constraintValue ) {
- return constraintValue.equals(nodeValue);
- }
-
- @Override
- public String toString() {
- return " = ";
- }
- };
- protected static final Evaluator<String> IS_LESS_THAN = new Evaluator<String>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( String nodeValue,
- String constraintValue ) {
- return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) < 0;
- }
-
- @Override
- public String toString() {
- return " < ";
- }
- };
- protected static final Evaluator<String> IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<String>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( String nodeValue,
- String constraintValue ) {
- return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) <= 0;
- }
-
- @Override
- public String toString() {
- return " <= ";
- }
- };
- protected static final Evaluator<String> IS_GREATER_THAN = new Evaluator<String>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( String nodeValue,
- String constraintValue ) {
- return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) > 0;
- }
-
- @Override
- public String toString() {
- return " > ";
- }
- };
- protected static final Evaluator<String> IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<String>() {
- private static final long serialVersionUID = 1L;
-
- public boolean satisfiesConstraint( String nodeValue,
- String constraintValue ) {
- return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) >= 0;
- }
-
- @Override
- public String toString() {
- return " >= ";
- }
- };
-
- /**
- * Construct a {@link Query} implementation that scores documents with a string field value that is equal to the supplied
- * constraint value.
- *
- * @param constraintValue the constraint value; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the query; never null
- */
- public static Query createQueryForNodesWithFieldEqualTo( String constraintValue,
- String fieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- if (caseSensitive) {
- // We can just do a normal TermQuery ...
- return new TermQuery(new Term(fieldName, constraintValue));
- }
- return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
- EQUAL_TO, caseSensitive);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents with a string field value that is greater than the supplied
- * constraint value.
- *
- * @param constraintValue the constraint value; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the query; never null
- */
- public static CompareStringQuery createQueryForNodesWithFieldGreaterThan( String constraintValue,
- String fieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
- IS_GREATER_THAN, caseSensitive);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents with a string field value that is greater than or equal to
- * the supplied constraint value.
- *
- * @param constraintValue the constraint value; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the query; never null
- */
- public static CompareStringQuery createQueryForNodesWithFieldGreaterThanOrEqualTo( String constraintValue,
- String fieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
- IS_GREATER_THAN_OR_EQUAL_TO, caseSensitive);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents with a string field value that is less than the supplied
- * constraint value.
- *
- * @param constraintValue the constraint value; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the query; never null
- */
- public static CompareStringQuery createQueryForNodesWithFieldLessThan( String constraintValue,
- String fieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
- IS_LESS_THAN, caseSensitive);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents with a string field value that is less than or equal to the
- * supplied constraint value.
- *
- * @param constraintValue the constraint value; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the query; never null
- */
- public static CompareStringQuery createQueryForNodesWithFieldLessThanOrEqualTo( String constraintValue,
- String fieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
- IS_LESS_THAN_OR_EQUAL_TO, caseSensitive);
- }
-
- /**
- * Construct a {@link Query} implementation that scores documents with a string field value that is LIKE the supplied
- * constraint value.
- *
- * @param likeExpression the LIKE expression; may not be null
- * @param fieldName the name of the document field containing the value; may not be null
- * @param factories the value factories that can be used during the scoring; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- * @return the query; never null
- */
- public static Query createQueryForNodesWithFieldLike( String likeExpression,
- String fieldName,
- ValueFactories factories,
- boolean caseSensitive ) {
- assert likeExpression != null;
- assert likeExpression.length() > 0;
- if (likeExpression.indexOf('%') == -1 && likeExpression.indexOf('_') == -1) {
- // This is not a like expression, so just do an equals ...
- return createQueryForNodesWithFieldEqualTo(likeExpression, fieldName, factories, caseSensitive);
- }
- if (caseSensitive) {
- // We can just do a normal Wildcard or RegEx query ...
-
- // '%' matches 0 or more characters
- // '_' matches any single character
- // '\x' matches 'x'
- // all other characters match themselves
-
- // Wildcard queries are a better match, but they can be slow and should not be used
- // if the first character of the expression is a '%' or '_' ...
- char firstChar = likeExpression.charAt(0);
- if (firstChar != '%' && firstChar != '_') {
- // Create a wildcard query ...
- String expression = toWildcardExpression(likeExpression);
- return new WildcardQuery(new Term(fieldName, expression));
- }
- }
- // Create a regex query (which will be done using the correct case) ...
- String regex = toRegularExpression(likeExpression);
- RegexQuery query = new RegexQuery(new Term(fieldName, regex));
- int flags = caseSensitive ? 0 : Pattern.CASE_INSENSITIVE;
- query.setRegexImplementation(new JavaUtilRegexCapabilities(flags));
- return query;
- }
-
- /**
- * Convert the JCR like expression to a Lucene wildcard expression. The JCR like expression uses '%' to match 0 or more
- * characters, '_' to match any single character, '\x' to match the 'x' character, and all other characters to match
- * themselves.
- *
- * @param likeExpression the like expression; may not be null
- * @return the expression that can be used with a WildcardQuery; never null
- */
- protected static String toWildcardExpression( String likeExpression ) {
- return likeExpression.replace('%', '*').replace('_', '?').replaceAll("\\\\(.)", "$1");
- }
-
- /**
- * Convert the JCR like expression to a regular expression. The JCR like expression uses '%' to match 0 or more characters,
- * '_' to match any single character, '\x' to match the 'x' character, and all other characters to match themselves. Note that
- * if any regex metacharacters appear in the like expression, they will be escaped within the resulting regular expression.
- *
- * @param likeExpression the like expression; may not be null
- * @return the expression that can be used with a WildcardQuery; never null
- */
- protected static String toRegularExpression( String likeExpression ) {
- // Replace all '\x' with 'x' ...
- String result = likeExpression.replaceAll("\\\\(.)", "$1");
- // Escape characters used as metacharacters in regular expressions, including
- // '[', '^', '\', '$', '.', '|', '?', '*', '+', '(', and ')'
- result = result.replaceAll("([\\[^\\\\$.|?*+()])", "\\$1");
- // Replace '%'->'[.]+' and '_'->'[.]
- result = likeExpression.replace("%", ".+").replace("_", ".");
- return result;
- }
-
- private final boolean caseSensitive;
-
- /**
- * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
- *
- * @param fieldName the name of the document field containing the value; may not be null
- * @param constraintValue the constraint value; may not be null
- * @param valueFactory the value factory that can be used during the scoring; may not be null
- * @param stringFactory the string factory that can be used during the scoring; may not be null
- * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
- * constraint; may not be null
- * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
- * case-insensitive
- */
- protected CompareStringQuery( String fieldName,
- String constraintValue,
- ValueFactory<String> valueFactory,
- ValueFactory<String> stringFactory,
- Evaluator<String> evaluator,
- boolean caseSensitive ) {
- super(fieldName, caseSensitive ? constraintValue : constraintValue.toLowerCase(), valueFactory, stringFactory, evaluator);
- this.caseSensitive = caseSensitive;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- protected String readFromDocument( IndexReader reader,
- int docId ) throws IOException {
- String result = super.readFromDocument(reader, docId);
- if (result == null) return null;
- return caseSensitive ? result : result.toLowerCase();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#clone()
- */
- @Override
- public Object clone() {
- return new CompareStringQuery(fieldName, constraintValue, valueTypeFactory, stringFactory, evaluator, caseSensitive);
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/query/IdsQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/IdsQuery.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/IdsQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,261 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.query;
-
-import java.io.IOException;
-import java.util.Set;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.FieldSelectorResult;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.Weight;
-
-/**
- * A Lucene {@link Query} implementation that is used to score positively those documents that have a ID in the supplied set. This
- * works for large sets of IDs; in smaller numbers, it may be more efficient to create a boolean query that checks for each of the
- * IDs.
- */
-public class IdsQuery extends Query {
-
- private static final long serialVersionUID = 1L;
-
- /**
- * The operand that is being negated by this query.
- */
- protected final Set<String> uuids;
- protected final FieldSelector fieldSelector;
- protected final String fieldName;
-
- /**
- * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
- *
- * @param fieldName the name of the document field containing the value; may not be null
- * @param ids the set of ID values; may not be null
- */
- public IdsQuery( String fieldName,
- Set<String> ids ) {
- this.fieldName = fieldName;
- this.uuids = ids;
- assert this.fieldName != null;
- assert this.uuids != null;
- this.fieldSelector = new FieldSelector() {
- private static final long serialVersionUID = 1L;
-
- public FieldSelectorResult accept( String fieldName ) {
- return fieldName.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
- }
- };
- }
-
- protected boolean includeDocument( IndexReader reader,
- int docId ) throws IOException {
- Document doc = reader.document(docId, fieldSelector);
- String valueString = doc.get(fieldName);
- return uuids.contains(valueString);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
- */
- @Override
- public Weight createWeight( Searcher searcher ) {
- return new IdSetWeight(searcher);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#toString(java.lang.String)
- */
- @Override
- public String toString( String field ) {
- return fieldName + " IN UUIDs";
- }
-
- /**
- * Calculates query weights and builds query scores for our NOT queries.
- */
- protected class IdSetWeight extends Weight {
- private static final long serialVersionUID = 1L;
- private final Searcher searcher;
-
- protected IdSetWeight( Searcher searcher ) {
- this.searcher = searcher;
- assert this.searcher != null;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#getQuery()
- */
- @Override
- public Query getQuery() {
- return IdsQuery.this;
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always returns a weight factor of 1.0.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#getValue()
- */
- @Override
- public float getValue() {
- return 1.0f; // weight factor of 1.0
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always returns a normalization factor of 1.0.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
- */
- @Override
- public float sumOfSquaredWeights() {
- return 1.0f; // normalization factor of 1.0
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always does nothing, as there is nothing to normalize.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#normalize(float)
- */
- @Override
- public void normalize( float norm ) {
- // No need to do anything here
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
- */
- @Override
- public Scorer scorer( IndexReader reader,
- boolean scoreDocsInOrder,
- boolean topScorer ) {
- // Return a custom scorer ...
- return new IdScorer(reader);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- public Explanation explain( IndexReader reader,
- int doc ) {
- return new Explanation(getValue(), getQuery().toString());
- }
- }
-
- /**
- * A scorer for the Path query.
- */
- protected class IdScorer extends Scorer {
- private int docId = -1;
- private final int maxDocId;
- private final IndexReader reader;
-
- protected IdScorer( IndexReader reader ) {
- // We don't care which Similarity we have, because we don't use it. So get the default.
- super(Similarity.getDefault());
- this.reader = reader;
- assert this.reader != null;
- this.maxDocId = this.reader.maxDoc();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#docID()
- */
- @Override
- public int docID() {
- return docId;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
- */
- @Override
- public int nextDoc() throws IOException {
- do {
- ++docId;
- if (reader.isDeleted(docId)) {
- // We should skip this document ...
- continue;
- }
- if (includeDocument(reader, docId)) return docId;
- } while (docId < maxDocId);
- return Scorer.NO_MORE_DOCS;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
- */
- @Override
- public int advance( int target ) throws IOException {
- if (target == Scorer.NO_MORE_DOCS) return target;
- while (true) {
- int doc = nextDoc();
- if (doc >= target) return doc;
- }
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
- * scored by this scorer.
- * </p>
- *
- * @see org.apache.lucene.search.Scorer#score()
- */
- @Override
- public float score() {
- return 1.0f;
- }
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/query/MatchNoneQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/MatchNoneQuery.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/MatchNoneQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,216 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.query;
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.Weight;
-
-/**
- * A Lucene {@link Query} implementation that always matches no documents.
- */
-public class MatchNoneQuery extends Query {
-
- private static final long serialVersionUID = 1L;
-
- /**
- * Construct a query that always returns no documents.
- */
- public MatchNoneQuery() {
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
- */
- @Override
- public Weight createWeight( Searcher searcher ) {
- return new NoneWeight();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#clone()
- */
- @Override
- public Object clone() {
- return new MatchNoneQuery();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#toString(java.lang.String)
- */
- @Override
- public String toString( String field ) {
- return "NO DOCS";
- }
-
- /**
- * Calculates query weights and builds query scores for our NOT queries.
- */
- protected class NoneWeight extends Weight {
- private static final long serialVersionUID = 1L;
-
- protected NoneWeight() {
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#getQuery()
- */
- @Override
- public Query getQuery() {
- return MatchNoneQuery.this;
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always returns a weight factor of 1.0.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#getValue()
- */
- @Override
- public float getValue() {
- return 1.0f; // weight factor of 1.0
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always returns a normalization factor of 1.0.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
- */
- @Override
- public float sumOfSquaredWeights() {
- return 1.0f; // normalization factor of 1.0
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always does nothing, as there is nothing to normalize.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#normalize(float)
- */
- @Override
- public void normalize( float norm ) {
- // No need to do anything here
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
- */
- @Override
- public Scorer scorer( IndexReader reader,
- boolean scoreDocsInOrder,
- boolean topScorer ) {
- return new NoneScorer();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- public Explanation explain( IndexReader reader,
- int doc ) {
- return new Explanation(getValue(), "NO VALUES");
- }
- }
-
- /**
- * A scorer for the NOT query that iterates over documents (in increasing docId order), using the given scorer implementation
- * for the operand of the NOT.
- */
- protected static class NoneScorer extends Scorer {
- private int docId = -1;
-
- protected NoneScorer() {
- // We don't care which Similarity we have, because we don't use it. So get the default.
- super(Similarity.getDefault());
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#docID()
- */
- @Override
- public int docID() {
- return docId;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
- */
- @Override
- public int nextDoc() {
- docId = Scorer.NO_MORE_DOCS;
- return docId;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
- */
- @Override
- public int advance( int target ) {
- return Scorer.NO_MORE_DOCS;
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
- * scored by this scorer.
- * </p>
- *
- * @see org.apache.lucene.search.Scorer#score()
- */
- @Override
- public float score() {
- return 1.0f;
- }
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/query/NotQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/NotQuery.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/NotQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,274 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.query;
-
-import java.io.IOException;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.Weight;
-
-/**
- * A Lucene {@link Query} implementation that is used to represent a NOT expression of another wrapped Query object. This query
- * implementation works by using the {@link Query#weight(Searcher) weight} and
- * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
- * that were <i>not</i> scored by the wrapped query. In other words, if the wrapped query ended up scoring any document, that
- * document is <i>not</i> scored (i.e., skipped) by this query.
- */
-public class NotQuery extends Query {
-
- private static final long serialVersionUID = 1L;
-
- /**
- * The operand that is being negated by this query.
- */
- protected final Query operand;
-
- /**
- * Construct a NOT(x) constraint where the 'x' operand is supplied.
- *
- * @param operand the operand being negated
- */
- public NotQuery( Query operand ) {
- this.operand = operand;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
- */
- @Override
- public Weight createWeight( Searcher searcher ) {
- return new NotWeight(searcher);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#clone()
- */
- @Override
- public Object clone() {
- return new NotQuery(operand);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#toString(java.lang.String)
- */
- @Override
- public String toString( String field ) {
- return "NOT(" + operand.toString(field) + ")";
- }
-
- /**
- * Calculates query weights and builds query scores for our NOT queries.
- */
- protected class NotWeight extends Weight {
- private static final long serialVersionUID = 1L;
- private final Searcher searcher;
-
- protected NotWeight( Searcher searcher ) {
- this.searcher = searcher;
- assert this.searcher != null;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#getQuery()
- */
- @Override
- public Query getQuery() {
- return NotQuery.this;
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always returns a weight factor of 1.0.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#getValue()
- */
- @Override
- public float getValue() {
- return 1.0f; // weight factor of 1.0
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always returns a normalization factor of 1.0.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
- */
- @Override
- public float sumOfSquaredWeights() {
- return 1.0f; // normalization factor of 1.0
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always does nothing, as there is nothing to normalize.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#normalize(float)
- */
- @Override
- public void normalize( float norm ) {
- // No need to do anything here
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
- */
- @Override
- public Scorer scorer( IndexReader reader,
- boolean scoreDocsInOrder,
- boolean topScorer ) throws IOException {
- // Get the operand's score, and set this on the NOT query
- Scorer operandScorer = operand.weight(searcher).scorer(reader, scoreDocsInOrder, topScorer);
- // Return a custom scorer ...
- return new NotScorer(operandScorer, reader);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- public Explanation explain( IndexReader reader,
- int doc ) throws IOException {
- Explanation operandExplanation = operand.weight(searcher).explain(reader, doc);
- String desc = operandExplanation.getDescription();
- return new Explanation(getValue(), "NOT(" + desc + ")");
- }
- }
-
- /**
- * A scorer for the NOT query that iterates over documents (in increasing docId order), using the given scorer implementation
- * for the operand of the NOT.
- */
- protected static class NotScorer extends Scorer {
- private int docId = -1;
- private int nextScoredDocId = -1;
- private final Scorer operandScorer;
- private final IndexReader reader;
- private final int pastMaxDocId;
-
- /**
- * @param operandScorer the scorer that is used to score the documents based upon the operand of the NOT; may not be null
- * @param reader the reader that has access to all the docs ...
- */
- protected NotScorer( Scorer operandScorer,
- IndexReader reader ) {
- // We don't care which Similarity we have, because we don't use it. So get the default.
- super(Similarity.getDefault());
- this.operandScorer = operandScorer;
- this.reader = reader;
- assert this.operandScorer != null;
- assert this.reader != null;
- this.pastMaxDocId = this.reader.maxDoc();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#docID()
- */
- @Override
- public int docID() {
- return docId;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
- */
- @Override
- public int nextDoc() throws IOException {
- if (nextScoredDocId == -1) {
- // Find the first document that is scored by the operand's scorer ...
- nextScoredDocId = operandScorer.nextDoc();
- }
- do {
- ++docId;
- if (docId == pastMaxDocId) {
- // We're aleady to the end of the documents in the index, so return no more docs
- return Scorer.NO_MORE_DOCS;
- }
- if (docId == nextScoredDocId) {
- // Find the next document that is scored by the operand's scorer ...
- nextScoredDocId = operandScorer.nextDoc();
- continue;
- }
- if (reader.isDeleted(docId)) {
- // We should skip this document ...
- continue;
- }
- return docId;
- } while (true);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
- */
- @Override
- public int advance( int target ) throws IOException {
- if (target == Scorer.NO_MORE_DOCS) return target;
- while (true) {
- int doc = nextDoc();
- if (doc >= target) return doc;
- }
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
- * scored by this scorer.
- * </p>
- *
- * @see org.apache.lucene.search.Scorer#score()
- */
- @Override
- public float score() {
- return 1.0f;
- }
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/query/ScoreQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/ScoreQuery.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/ScoreQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,275 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.query;
-
-import java.io.IOException;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.Weight;
-import org.jboss.dna.graph.query.model.FullTextSearchScore;
-
-/**
- * A Lucene {@link Query} implementation that is used to apply a {@link FullTextSearchScore} criteria a NOT expression of another
- * wrapped Query object. This query implementation works by using the {@link Query#weight(Searcher) weight} and
- * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
- * that were <i>not</i> scored by the wrapped query. In other words, if the wrapped query ended up scoring any document, that
- * document is <i>not</i> scored (i.e., skipped) by this query.
- */
-public class ScoreQuery extends Query {
-
- private static final long serialVersionUID = 1L;
-
- /**
- * The operand that is being negated by this query.
- */
- protected final Query operand;
-
- /**
- * Construct a NOT(x) constraint where the 'x' operand is supplied.
- *
- * @param operand the operand being negated
- */
- public ScoreQuery( Query operand ) {
- this.operand = operand;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
- */
- @Override
- public Weight createWeight( Searcher searcher ) {
- return new NotWeight(searcher);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#clone()
- */
- @Override
- public Object clone() {
- return new ScoreQuery(operand);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Query#toString(java.lang.String)
- */
- @Override
- public String toString( String field ) {
- return "NOT(" + operand.toString(field) + ")";
- }
-
- /**
- * Calculates query weights and builds query scores for our NOT queries.
- */
- protected class NotWeight extends Weight {
- private static final long serialVersionUID = 1L;
- private final Searcher searcher;
-
- protected NotWeight( Searcher searcher ) {
- this.searcher = searcher;
- assert this.searcher != null;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#getQuery()
- */
- @Override
- public Query getQuery() {
- return ScoreQuery.this;
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always returns a weight factor of 1.0.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#getValue()
- */
- @Override
- public float getValue() {
- return 1.0f; // weight factor of 1.0
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always returns a normalization factor of 1.0.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
- */
- @Override
- public float sumOfSquaredWeights() {
- return 1.0f; // normalization factor of 1.0
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation always does nothing, as there is nothing to normalize.
- * </p>
- *
- * @see org.apache.lucene.search.Weight#normalize(float)
- */
- @Override
- public void normalize( float norm ) {
- // No need to do anything here
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
- */
- @Override
- public Scorer scorer( IndexReader reader,
- boolean scoreDocsInOrder,
- boolean topScorer ) throws IOException {
- // Get the operand's score, and set this on the NOT query
- Scorer operandScorer = operand.weight(searcher).scorer(reader, scoreDocsInOrder, topScorer);
- // Return a custom scorer ...
- return new NotScorer(operandScorer, reader);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- public Explanation explain( IndexReader reader,
- int doc ) throws IOException {
- Explanation operandExplanation = operand.weight(searcher).explain(reader, doc);
- String desc = operandExplanation.getDescription();
- return new Explanation(getValue(), "NOT(" + desc + ")");
- }
- }
-
- /**
- * A scorer for the NOT query that iterates over documents (in increasing docId order), using the given scorer implementation
- * for the operand of the NOT.
- */
- protected static class NotScorer extends Scorer {
- private int docId = -1;
- private int nextScoredDocId = -1;
- private final Scorer operandScorer;
- private final IndexReader reader;
- private final int pastMaxDocId;
-
- /**
- * @param operandScorer the scorer that is used to score the documents based upon the operand of the NOT; may not be null
- * @param reader the reader that has access to all the docs ...
- */
- protected NotScorer( Scorer operandScorer,
- IndexReader reader ) {
- // We don't care which Similarity we have, because we don't use it. So get the default.
- super(Similarity.getDefault());
- this.operandScorer = operandScorer;
- this.reader = reader;
- assert this.operandScorer != null;
- assert this.reader != null;
- this.pastMaxDocId = this.reader.maxDoc();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#docID()
- */
- @Override
- public int docID() {
- return docId;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
- */
- @Override
- public int nextDoc() throws IOException {
- if (nextScoredDocId == -1) {
- // Find the first document that is scored by the operand's scorer ...
- nextScoredDocId = operandScorer.nextDoc();
- }
- do {
- ++docId;
- if (docId == pastMaxDocId) {
- // We're aleady to the end of the documents in the index, so return no more docs
- return Scorer.NO_MORE_DOCS;
- }
- if (docId == nextScoredDocId) {
- // Find the next document that is scored by the operand's scorer ...
- nextScoredDocId = operandScorer.nextDoc();
- continue;
- }
- if (reader.isDeleted(docId)) {
- // We should skip this document ...
- continue;
- }
- return docId;
- } while (true);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
- */
- @Override
- public int advance( int target ) throws IOException {
- if (target == Scorer.NO_MORE_DOCS) return target;
- while (true) {
- int doc = nextDoc();
- if (doc >= target) return doc;
- }
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
- * scored by this scorer.
- * </p>
- *
- * @see org.apache.lucene.search.Scorer#score()
- */
- @Override
- public float score() {
- return 1.0f;
- }
- }
-}
Deleted: trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties
===================================================================
--- trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,33 +0,0 @@
-#
-# JBoss DNA (http://www.jboss.org/dna)
-# See the COPYRIGHT.txt file distributed with this work for information
-# regarding copyright ownership. Some portions may be licensed
-# to Red Hat, Inc. under one or more contributor license agreements.
-# See the AUTHORS.txt file in the distribution for a full listing of
-# individual contributors.
-#
-# JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
-# is licensed to you under the terms of the GNU Lesser General Public License as
-# published by the Free Software Foundation; either version 2.1 of
-# the License, or (at your option) any later version.
-#
-# JBoss DNA is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this software; if not, write to the Free
-# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
-# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
-#
-
-locationForIndexesIsNotDirectory = Location "{0}" cannot be used for search indexes for workspace "{1}" because it is a directory
-locationForIndexesCannotBeRead = Location "{0}" cannot be used for search indexes for workspace "{1}" because it cannot be read
-locationForIndexesCannotBeWritten = Location "{0}" cannot be used for search indexes for workspace "{1}" because its contents cannot be written or updated
-
-errorWhileCommittingIndexChanges = Error while committing changes to the indexes for the "{0}" workspace of the "{1}" source: {2}
-errorWhileRollingBackIndexChanges = Error while rolling back changes to the indexes for the "{0}" workspace of the "{1}" source: {2}
-errorWhilePerformingSearch = Error while searching the "{0}" workspace in the "{1}" source for "{2}": {3}
-errorWhilePerformingQuery = Error while performing the query "{0}" against the content in the "{1}" workspace of the "{2}" source: {3}
-errorWhilePerformingLuceneQuery = Error while performing the Lucene query "{0}" as part of the "{1}" query against the "{2}" workspace of the "{3}" source: {4}
\ No newline at end of file
Deleted: trunk/dna-search/src/test/java/org/jboss/dna/search/EncodingNamespaceRegistryTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/EncodingNamespaceRegistryTest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/EncodingNamespaceRegistryTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,102 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import java.util.Collection;
-import org.jboss.dna.common.text.SecureHashTextEncoder;
-import org.jboss.dna.common.text.TextEncoder;
-import org.jboss.dna.common.util.SecureHash.Algorithm;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.property.NamespaceRegistry;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.property.NamespaceRegistry.Namespace;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- *
- */
-public class EncodingNamespaceRegistryTest {
-
- private ExecutionContext context;
- private NamespaceRegistry registry;
- private EncodingNamespaceRegistry encodedRegistry;
- private TextEncoder encoder;
- private ExecutionContext encodedContext;
-
- @Before
- public void beforeEach() {
- this.context = new ExecutionContext();
- this.registry = this.context.getNamespaceRegistry();
- this.encoder = new SecureHashTextEncoder(Algorithm.SHA_1, 10);
- this.encodedRegistry = new EncodingNamespaceRegistry(registry, encoder);
- this.encodedContext = context.with(encodedRegistry);
- }
-
- @Test
- public void shouldHaveEncodedPrefixesForAllRegisteredNamespacesExceptFixedOnes() {
- Collection<Namespace> namespaces = registry.getNamespaces();
- assertThat(namespaces.size() > 4, is(true));
- for (Namespace namespace : namespaces) {
- String uri = namespace.getNamespaceUri();
- String actualEncodedPrefix = encodedRegistry.getPrefixForNamespaceUri(uri, false);
- if (encodedRegistry.getFixedNamespaceUris().contains(uri)) {
- assertThat(actualEncodedPrefix, is(namespace.getPrefix()));
- } else {
- String expectedEncodedPrefix = encoder.encode(uri);
- assertThat(expectedEncodedPrefix, is(actualEncodedPrefix));
- }
- String actualUri = encodedRegistry.getNamespaceForPrefix(actualEncodedPrefix);
- assertThat(uri, is(actualUri));
- }
- }
-
- @Test
- public void shouldAllowPathConversionToAndFromString() {
- String uri1 = "http://acme.com/wabbler";
- String uri2 = "http://troublemakers.com/contixity";
- String uri3 = "http://example.com/infinitiy";
- String ns1 = "wab";
- String ns2 = "ctx";
- String ns3 = "inf";
- registry.register(ns1, uri1);
- registry.register(ns2, uri2);
- registry.register(ns3, uri3);
- String pathStr = "/wab:part1/wab:part2/ctx:part3/inf:part4/dna:part5";
- Path actualPath = context.getValueFactories().getPathFactory().create(pathStr);
- String actualPathStr = context.getValueFactories().getStringFactory().create(actualPath);
- assertThat(pathStr, is(actualPathStr));
- String encodedPathStr = encodedContext.getValueFactories().getStringFactory().create(actualPath);
- String encodedPrefix1 = encoder.encode(uri1);
- String encodedPrefix2 = encoder.encode(uri2);
- String encodedPrefix3 = encoder.encode(uri3);
- String expectedPathStr = "/" + encodedPrefix1 + ":part1/" + encodedPrefix1 + ":part2/" + encodedPrefix2 + ":part3/"
- + encodedPrefix3 + ":part4/dna:part5";
- assertThat(expectedPathStr, is(encodedPathStr));
- Path actualPath2 = encodedContext.getValueFactories().getPathFactory().create(encodedPathStr);
- assertThat(actualPath, is(actualPath2));
- }
-}
Deleted: trunk/dna-search/src/test/java/org/jboss/dna/search/IndexingRulesTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/IndexingRulesTest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/IndexingRulesTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,60 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import org.apache.lucene.document.Field;
-import org.jboss.dna.search.IndexRules.Builder;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- *
- */
-public class IndexingRulesTest {
-
- private Builder builder;
- private IndexRules rules;
-
- @Before
- public void beforeEach() {
- builder = IndexRules.createBuilder();
- rules = builder.build();
- }
-
- @Test
- public void shouldBuildValidRulesFromBuilderThatIsNotInvoked() {
- builder = IndexRules.createBuilder();
- rules = builder.build();
- }
-
- @Test
- public void shouldBuildValidRulesFromBuilderAfterJustSettingDefaultRules() {
- builder.defaultTo(Field.Store.NO, Field.Index.ANALYZED_NO_NORMS);
- rules = builder.build();
- assertThat(rules.getRule(null).getIndexOption(), is(Field.Index.ANALYZED_NO_NORMS));
- assertThat(rules.getRule(null).getStoreOption(), is(Field.Store.NO));
- }
-}
Deleted: trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,423 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import static org.hamcrest.core.Is.is;
-import static org.hamcrest.core.IsNull.notNullValue;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
-import java.io.IOException;
-import java.util.List;
-import org.apache.lucene.document.Field;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.Graph;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.connector.RepositoryConnection;
-import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
-import org.jboss.dna.graph.connector.RepositorySourceException;
-import org.jboss.dna.graph.connector.inmemory.InMemoryRepositorySource;
-import org.jboss.dna.graph.property.Name;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.model.TypeSystem;
-import org.jboss.dna.graph.query.parse.SqlQueryParser;
-import org.jboss.dna.graph.query.validate.ImmutableSchemata;
-import org.jboss.dna.graph.query.validate.Schemata;
-import org.jboss.dna.graph.search.SearchEngine;
-import org.jboss.dna.graph.search.SearchProvider;
-import org.junit.Before;
-import org.junit.Test;
-import org.xml.sax.SAXException;
-
-public class SearchEngineTest {
-
- private SearchEngine engine;
- private SearchProvider provider;
- private ExecutionContext context;
- private TypeSystem typeSystem;
- private String sourceName;
- private String workspaceName1;
- private String workspaceName2;
- private InMemoryRepositorySource source;
- private RepositoryConnectionFactory connectionFactory;
- private Graph content;
- private Schemata schemata;
- private SqlQueryParser sql;
-
- @Before
- public void beforeEach() throws Exception {
- context = new ExecutionContext();
- typeSystem = context.getValueFactories().getTypeSystem();
- sourceName = "sourceA";
- workspaceName1 = "workspace1";
- workspaceName2 = "workspace2";
-
- // Set up the source and graph instance ...
- source = new InMemoryRepositorySource();
- source.setName(sourceName);
- content = Graph.create(source, context);
-
- // Create the workspaces ...
- content.createWorkspace().named(workspaceName1);
- content.createWorkspace().named(workspaceName2);
-
- // Set up the connection factory ...
- connectionFactory = new RepositoryConnectionFactory() {
- @SuppressWarnings( "synthetic-access" )
- public RepositoryConnection createConnection( String sourceName ) throws RepositorySourceException {
- return source.getConnection();
- }
- };
-
- // Set up the provider and the search engine ...
- IndexRules.Builder rulesBuilder = IndexRules.createBuilder(DualIndexSearchProvider.DEFAULT_RULES);
- rulesBuilder.defaultTo(Field.Store.YES, Field.Index.NOT_ANALYZED);
- rulesBuilder.stringField(name("model"), Field.Store.YES, Field.Index.ANALYZED);
- rulesBuilder.integerField(name("year"), Field.Store.YES, Field.Index.NOT_ANALYZED);
- rulesBuilder.floatField(name("userRating"), Field.Store.YES, Field.Index.NOT_ANALYZED, 0.0f, 10.0f);
- rulesBuilder.integerField(name("mpgCity"), Field.Store.YES, Field.Index.NOT_ANALYZED, 0, 50);
- rulesBuilder.integerField(name("mpgHighway"), Field.Store.YES, Field.Index.NOT_ANALYZED, 0, 50);
- // rulesBuilder.analyzeAndStoreAndFullText(name("maker"));
- IndexRules rules = rulesBuilder.build();
- LuceneConfiguration luceneConfig = LuceneConfigurations.inMemory();
- // LuceneConfiguration luceneConfig = LuceneConfigurations.using(new File("target/testIndexes"));
- provider = new DualIndexSearchProvider(luceneConfig, rules);
- engine = new SearchEngine(context, sourceName, connectionFactory, provider);
- loadContent();
-
- // Create the schemata for the workspaces ...
- schemata = ImmutableSchemata.createBuilder(typeSystem)
- .addTable("__ALLNODES__", "maker", "model", "year", "msrp", "mpgHighway", "mpgCity")
- .makeSearchable("__ALLNODES__", "maker")
- .build();
-
- // And create the SQL parser ...
- sql = new SqlQueryParser();
- }
-
- protected Name name( String name ) {
- return context.getValueFactories().getNameFactory().create(name);
- }
-
- protected Path path( String path ) {
- return context.getValueFactories().getPathFactory().create(path);
- }
-
- protected void loadContent() throws IOException, SAXException {
- // Load the content ...
- content.useWorkspace(workspaceName1);
- content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("cars.xml")).into("/");
- content.useWorkspace(workspaceName2);
- content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("aircraft.xml")).into("/");
- }
-
- @Test
- public void shouldIndexAllContentInRepositorySource() throws Exception {
- engine.index(3);
- }
-
- @Test
- public void shouldIndexAllContentInWorkspace() throws Exception {
- engine.index(workspaceName1, 3);
- engine.index(workspaceName2, 5);
- }
-
- @Test
- public void shouldIndexAllContentInWorkspaceBelowPath() throws Exception {
- engine.index(workspaceName1, path("/Cars/Hybrid"), 3);
- engine.index(workspaceName2, path("/Aircraft/Commercial"), 5);
- }
-
- @Test
- public void shouldReIndexAllContentInWorkspaceBelowPath() throws Exception {
- for (int i = 0; i != 0; i++) {
- engine.index(workspaceName1, path("/Cars/Hybrid"), 3);
- engine.index(workspaceName2, path("/Aircraft/Commercial"), 5);
- }
- }
-
- @Test
- public void shouldHaveLoadedTestContentIntoRepositorySource() {
- content.useWorkspace(workspaceName1);
- assertThat(content.getNodeAt("/Cars/Hybrid/Toyota Prius").getProperty("msrp").getFirstValue(), is((Object)"$21,500"));
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfOne() {
- engine.index(workspaceName1, path("/"), 1);
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfTwo() {
- engine.index(workspaceName1, path("/"), 2);
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfThree() {
- engine.index(workspaceName1, path("/"), 3);
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfFour() {
- engine.index(workspaceName1, path("/"), 4);
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfTen() {
- engine.index(workspaceName1, path("/"), 10);
- }
-
- @Test
- public void shouldIndexRepositoryContentStartingAtNonRootNode() {
- engine.index(workspaceName1, path("/Cars"), 10);
- }
-
- @Test
- public void shouldReIndexRepositoryContentStartingAtNonRootNode() {
- engine.index(workspaceName1, path("/Cars"), 10);
- engine.index(workspaceName1, path("/Cars"), 10);
- engine.index(workspaceName1, path("/Cars"), 10);
- }
-
- // ----------------------------------------------------------------------------------------------------------------
- // Full-text search
- // ----------------------------------------------------------------------------------------------------------------
-
- @Test
- public void shouldFindNodesByFullTextSearch() {
- engine.index(workspaceName1, path("/"), 100);
- List<Location> results = engine.fullTextSearch(context, workspaceName1, "Toyota Prius", 10, 0);
- assertThat(results, is(notNullValue()));
- assertThat(results.size(), is(2));
- assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Prius")));
- assertThat(results.get(1).getPath(), is(path("/Cars/Hybrid/Toyota Highlander")));
- }
-
- @Test
- public void shouldFindNodesByFullTextSearchWithOffset() {
- engine.index(workspaceName1, path("/"), 100);
- List<Location> results = engine.fullTextSearch(context, workspaceName1, "toyota prius", 1, 0);
- assertThat(results, is(notNullValue()));
- assertThat(results.size(), is(1));
- assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Prius")));
-
- results = engine.fullTextSearch(context, workspaceName1, "+Toyota", 1, 1);
- assertThat(results, is(notNullValue()));
- assertThat(results.size(), is(1));
- assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Highlander")));
- }
-
- @Test
- public void shouldFindNodesBySimpleXpathQuery() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__", typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(18));
- System.out.println(results);
- }
-
- // ----------------------------------------------------------------------------------------------------------------
- // Query
- // ----------------------------------------------------------------------------------------------------------------
-
- @Test
- public void shouldFindNodesBySimpleQuery() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__", typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(18));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithEqualityComparisonCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE maker = 'Toyota'", typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(2));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithGreaterThanComparisonCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker, mpgHighway, mpgCity FROM __ALLNODES__ WHERE mpgHighway > 20",
- typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(6));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithLowercaseEqualityComparisonCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE LOWER(maker) = 'toyota'", typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(2));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithUppercaseEqualityComparisonCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE UPPER(maker) = 'TOYOTA'", typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(2));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithLikeComparisonCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE maker LIKE 'Toyo%'", typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(2));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithLikeComparisonCriteriaWithLeadingWildcard() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE maker LIKE '%yota'", typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(2));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithLowercaseLikeComparisonCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE LOWER(maker) LIKE 'toyo%'", typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(2));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithFullTextSearchCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE CONTAINS(maker,'martin')", typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(1));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithDepthCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE DEPTH() > 2", typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(12));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithLocalNameCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE LOCALNAME() LIKE 'Toyota%' OR LOCALNAME() LIKE 'Land %'",
- typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(4));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithNameCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE NAME() LIKE 'Toyota%[1]' OR NAME() LIKE 'Land %'",
- typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(4));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithNameCriteriaThatMatchesNoNodes() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE NAME() LIKE 'Toyota%[2]'", typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(0));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithPathCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE PATH() LIKE '/Cars[%]/Hy%/Toyota%' OR PATH() LIKE '/Cars[1]/Utility[1]/%'",
- typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(6));
- System.out.println(results);
- }
-
- @Test
- public void shouldFindNodesBySimpleQueryWithDescendantCriteria() {
- engine.index(workspaceName1, path("/"), 100);
- QueryCommand query = sql.parseQuery("SELECT model, maker FROM __ALLNODES__ WHERE ISDESCENDANTNODE('/Cars/Hybrid')",
- typeSystem);
- QueryResults results = engine.query(context, workspaceName1, query, schemata);
- assertNoErrors(results);
- assertThat(results, is(notNullValue()));
- assertThat(results.getRowCount(), is(3));
- System.out.println(results);
- }
-
- protected void assertNoErrors( QueryResults results ) {
- if (results.getProblems().hasErrors()) {
- fail("Found errors: " + results.getProblems());
- }
- assertThat(results.getProblems().hasErrors(), is(false));
- }
-
-}
Deleted: trunk/dna-search/src/test/java/org/jboss/dna/search/SearchI18nTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/SearchI18nTest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/SearchI18nTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,33 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import org.jboss.dna.common.AbstractI18nTest;
-
-public class SearchI18nTest extends AbstractI18nTest {
-
- public SearchI18nTest() {
- super(SearchI18n.class);
- }
-}
Deleted: trunk/dna-search/src/test/java/org/jboss/dna/search/query/NotQueryTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/query/NotQueryTest.java 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/query/NotQueryTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,148 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.query;
-
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.mockito.Matchers.anyInt;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.stub;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Similarity;
-import org.junit.Test;
-
-public class NotQueryTest {
-
- @Test
- public void scorerShouldSkipAdjacentDocsIfScoredByOperandScorer() throws IOException {
- IndexReader reader = mock(IndexReader.class);
- stub(reader.isDeleted(anyInt())).toReturn(false);
- stub(reader.maxDoc()).toReturn(10);
- Scorer operandScorer = new MockScorer(0, 1, 2, 3, 4);
- Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
- assertScores(notScorer, 5, 6, 7, 8, 9);
- }
-
- @Test
- public void scorerShouldSkipDocsAtEndIfScoredByOperandScorer() throws IOException {
- IndexReader reader = mock(IndexReader.class);
- stub(reader.isDeleted(anyInt())).toReturn(false);
- stub(reader.maxDoc()).toReturn(10);
- Scorer operandScorer = new MockScorer(8, 9);
- Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
- assertScores(notScorer, 0, 1, 2, 3, 4, 5, 6, 7);
- }
-
- @Test
- public void scorerShouldScoreFirstDocsIfNotScoredByOperandScorer() throws IOException {
- IndexReader reader = mock(IndexReader.class);
- stub(reader.isDeleted(anyInt())).toReturn(false);
- stub(reader.maxDoc()).toReturn(10);
- Scorer operandScorer = new MockScorer(2, 3, 4);
- Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
- assertScores(notScorer, 0, 1, 5, 6, 7, 8, 9);
- }
-
- @Test
- public void scorerShouldScoreNonAdjacentDocsNotScoredByOperandScorer() throws IOException {
- IndexReader reader = mock(IndexReader.class);
- stub(reader.isDeleted(anyInt())).toReturn(false);
- stub(reader.maxDoc()).toReturn(10);
- Scorer operandScorer = new MockScorer(2, 4, 8);
- Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
- assertScores(notScorer, 0, 1, 3, 5, 6, 7, 9);
- }
-
- protected void assertScores( Scorer scorer,
- int... docIds ) throws IOException {
- for (int docId : docIds) {
- assertThat(scorer.nextDoc(), is(docId));
- assertThat(scorer.score(), is(1.0f));
- }
- assertThat(scorer.nextDoc(), is(Scorer.NO_MORE_DOCS));
- }
-
- protected static class MockScorer extends Scorer {
- private final Iterator<Integer> docIds;
-
- protected MockScorer( int... docIds ) {
- super(Similarity.getDefault());
- List<Integer> ids = new ArrayList<Integer>();
- for (int docId : docIds) {
- ids.add(new Integer(docId));
- }
- this.docIds = ids.iterator();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
- */
- @Override
- public int advance( int target ) {
- int doc;
- while ((doc = nextDoc()) < target) {
- }
- return doc;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#docID()
- */
- @Override
- public int docID() {
- return nextDoc();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
- */
- @Override
- public int nextDoc() {
- if (docIds.hasNext()) return docIds.next();
- return Scorer.NO_MORE_DOCS;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Scorer#score()
- */
- @Override
- public float score() {
- throw new UnsupportedOperationException("Should not be called");
- }
- }
-
-}
Deleted: trunk/dna-search/src/test/resources/aircraft.xml
===================================================================
--- trunk/dna-search/src/test/resources/aircraft.xml 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/test/resources/aircraft.xml 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,54 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- ~ JBoss DNA (http://www.jboss.org/dna)
- ~
- ~ See the COPYRIGHT.txt file distributed with this work for information
- ~ regarding copyright ownership. Some portions may be licensed
- ~ to Red Hat, Inc. under one or more contributor license agreements.
- ~ See the AUTHORS.txt file in the distribution for a full listing of
- ~ individual contributors.
- ~
- ~ JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- ~ is licensed to you under the terms of the GNU Lesser General Public License as
- ~ published by the Free Software Foundation; either version 2.1 of
- ~ the License, or (at your option) any later version.
- ~
- ~ JBoss DNA is distributed in the hope that it will be useful,
- ~ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- ~ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
- ~ for more details.
- ~
- ~ You should have received a copy of the GNU Lesser General Public License
- ~ along with this distribution; if not, write to:
- ~ Free Software Foundation, Inc.
- ~ 51 Franklin Street, Fifth Floor
- ~ Boston, MA 02110-1301 USA
- -->
-<Aircraft xmlns:jcr="http://www.jcp.org/jcr/1.0">
- <Business>
- <aircraft jcr:name="Gulfstream V" maker="Gulfstream" model="G-V" introduced="1995" range="5800nm" cruiseSpeed="488kt" crew="2" emptyWeight="46200lb" url="http://en.wikipedia.org/wiki/Gulfstream_V"/>
- <aircraft jcr:name="Learjet 45" maker="Learjet" model="LJ45" introduced="1995" numberBuilt="264+" crew="2" emptyWeight="13695lb" range="2120nm" cruiseSpeed="457kt" url="http://en.wikipedia.org/wiki/Learjet_45"/>
- </Business>
- <Commercial>
- <aircraft jcr:name="Boeing 777" maker="Boeing" model="777-200LR" introduced="1995" numberBuilt="731+" maxRange="7500nm" emptyWeight="326000lb" cruiseSpeed="560mph" url="http://en.wikipedia.org/wiki/Boeing_777"/>
- <aircraft jcr:name="Boeing 767" maker="Boeing" model="767-200" introduced="1982" numberBuilt="966+" maxRange="3950nm" emptyWeight="176650lb" cruiseSpeed="530mph" url="http://en.wikipedia.org/wiki/Boeing_767"/>
- <aircraft jcr:name="Boeing 787" maker="Boeing" model="787-3" introduced="2009" range="3050nm" emptyWeight="223000lb" cruiseSpeed="561mph" url="http://en.wikipedia.org/wiki/Boeing_787"/>
- <aircraft jcr:name="Boeing 757" maker="Boeing" model="757-200" introduced="1983" numberBuilt="1050" range="3900nm" maxWeight="255000lb" cruiseSpeed="530mph" url="http://en.wikipedia.org/wiki/Boeing_757"/>
- <aircraft jcr:name="Airbus A380" maker="Airbus" model="A380-800" introduced="2007" numberBuilt="18" range="8200nm" maxWeight="1235000lb" cruiseSpeed="647mph" url="http://en.wikipedia.org/wiki/Airbus_a380"/>
- <aircraft jcr:name="Airbus A340" maker="Airbus" model="A340-200" introduced="1993" numberBuilt="354" range="8000nm" maxWeight="606300lb" cruiseSpeed="557mph" url="http://en.wikipedia.org/wiki/Airbus_A-340"/>
- <aircraft jcr:name="Airbus A310" maker="Airbus" model="A310-200" introduced="1983" numberBuilt="255" cruiseSpeed="850km/h" emptyWeight="176312lb" range="3670nm" url="http://en.wikipedia.org/wiki/Airbus_A-310"/>
- <aircraft jcr:name="Embraer RJ-175" maker="Embraer" model="ERJ170-200" introduced="2004" range="3334km" cruiseSpeed="481kt" emptyWeight="21810kg" url="http://en.wikipedia.org/wiki/EMBRAER_170"/>
- </Commercial>
- <Vintage>
- <aircraft jcr:name="Fokker Trimotor" maker="Fokker" model="F.VII" introduced="1925" cruiseSpeed="170km/h" emptyWeight="3050kg" crew="2" url="http://en.wikipedia.org/wiki/Fokker_trimotor"/>
- <aircraft jcr:name="P-38 Lightning" maker="Lockheed" model="P-38" designedBy="Kelly Johnson" introduced="1941" numberBuilt="10037" rateOfClimb="4750ft/min" range="1300mi" emptyWeight="12780lb" crew="1" url="http://en.wikipedia.org/wiki/P-38_Lightning"/>
- <aircraft jcr:name="A6M Zero" maker="Mitsubishi" model="A6M" designedBy="Jiro Horikoshi" introduced="1940" numberBuilt="11000" crew="1" emptyWeight="3704lb" serviceCeiling="33000ft" maxSpeed="331mph" range="1929mi" rateOfClimb="3100ft/min" url="http://en.wikipedia.org/wiki/A6M_Zero"/>
- <aircraft jcr:name="Bf 109" maker="Messerschmitt" model="Bf 109" introduced="1937" url="http://en.wikipedia.org/wiki/BF_109"/>
- <aircraft jcr:name="Wright Flyer" maker="Wright Brothers" introduced="1903" range="852ft" maxSpeed="30mph" emptyWeight="605lb" crew="1"/>
- </Vintage>
- <Homebuilt>
- <aircraft jcr:name="Long-EZ" maker="Rutan Aircraft Factory" model="61" emptyWeight="760lb" fuelCapacity="200L" maxSpeed="185kt" since="1976" range="1200nm" url="http://en.wikipedia.org/wiki/Rutan_Long-EZ"/>
- <aircraft jcr:name="Cirrus VK-30" maker="Cirrus Design" model="VK-30" emptyWeight="2400lb" maxLoad="1200lb" maxSpeed="250mph" rateOfClimb="1500ft/min" range="1300mi" url="http://en.wikipedia.org/wiki/Cirrus_VK-30"/>
- <aircraft jcr:name="Van's RV-4" maker="Van's Aircraft" model="RV-4" introduced="1980" emptyWeight="905lb" maxLoad="500lb" maxSpeed="200mph" rateOfClimb="2450ft/min" range="725mi" url="http://en.wikipedia.org/wiki/Van%27s_Aircraft_RV-4"/>
- </Homebuilt>
-</Aircraft>
\ No newline at end of file
Deleted: trunk/dna-search/src/test/resources/cars.xml
===================================================================
--- trunk/dna-search/src/test/resources/cars.xml 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/test/resources/cars.xml 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,48 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- ~ JBoss DNA (http://www.jboss.org/dna)
- ~
- ~ See the COPYRIGHT.txt file distributed with this work for information
- ~ regarding copyright ownership. Some portions may be licensed
- ~ to Red Hat, Inc. under one or more contributor license agreements.
- ~ See the AUTHORS.txt file in the distribution for a full listing of
- ~ individual contributors.
- ~
- ~ JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- ~ is licensed to you under the terms of the GNU Lesser General Public License as
- ~ published by the Free Software Foundation; either version 2.1 of
- ~ the License, or (at your option) any later version.
- ~
- ~ JBoss DNA is distributed in the hope that it will be useful,
- ~ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- ~ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
- ~ for more details.
- ~
- ~ You should have received a copy of the GNU Lesser General Public License
- ~ along with this distribution; if not, write to:
- ~ Free Software Foundation, Inc.
- ~ 51 Franklin Street, Fifth Floor
- ~ Boston, MA 02110-1301 USA
- -->
-<Cars xmlns:jcr="http://www.jcp.org/jcr/1.0">
- <Hybrid>
- <car jcr:name="Toyota Prius" maker="Toyota" model="Prius" year="2008" msrp="$21,500" userRating="4.2" valueRating="5" mpgCity="48" mpgHighway="45"/>
- <car jcr:name="Toyota Highlander" maker="Toyota" model="Highlander" year="2008" msrp="$34,200" userRating="4" valueRating="5" mpgCity="27" mpgHighway="25"/>
- <car jcr:name="Nissan Altima" maker="Nissan" model="Altima" year="2008" msrp="$18,260" mpgCity="23" mpgHighway="32"/>
- </Hybrid>
- <Sports>
- <car jcr:name="Aston Martin DB9" maker="Aston Martin" model="DB9" year="2008" msrp="$171,600" userRating="5" mpgCity="12" mpgHighway="19" lengthInInches="185.5" wheelbaseInInches="108.0" engine="5,935 cc 5.9 liters V 12"/>
- <car jcr:name="Infiniti G37" maker="Infiniti" model="G37" year="2008" msrp="$34,900" userRating="3.5" valueRating="4" mpgCity="18" mpgHighway="24" />
- </Sports>
- <Luxury>
- <car jcr:name="Cadillac DTS" maker="Cadillac" model="DTS" year="2008" engine="3.6-liter V6" userRating="0"/>
- <car jcr:name="Bentley Continental" maker="Bentley" model="Continental" year="2008" msrp="$170,990" mpgCity="10" mpgHighway="17" />
- <car jcr:name="Lexus IS350" maker="Lexus" model="IS350" year="2008" msrp="$36,305" mpgCity="18" mpgHighway="25" userRating="4" valueRating="5" />
- </Luxury>
- <Utility>
- <car jcr:name="Land Rover LR2" maker="Land Rover" model="LR2" year="2008" msrp="$33,985" userRating="4.5" valueRating="5" mpgCity="16" mpgHighway="23" />
- <car jcr:name="Land Rover LR3" maker="Land Rover" model="LR3" year="2008" msrp="$48,525" userRating="5" valueRating="2" mpgCity="12" mpgHighway="17" />
- <car jcr:name="Hummer H3" maker="Hummer" model="H3" year="2008" msrp="$30,595" userRating="3.5" valueRating="4" mpgCity="13" mpgHighway="16" />
- <car jcr:name="Ford F-150" maker="Ford" model="F-150" year="2008" msrp="$23,910" userRating="4" valueRating="1" mpgCity="14" mpgHighway="20" />
- </Utility>
-</Cars>
\ No newline at end of file
Deleted: trunk/dna-search/src/test/resources/log4j.properties
===================================================================
--- trunk/dna-search/src/test/resources/log4j.properties 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/dna-search/src/test/resources/log4j.properties 2009-12-09 19:36:29 UTC (rev 1418)
@@ -1,13 +0,0 @@
-# Direct log messages to stdout
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.Target=System.out
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ABSOLUTE} %5p %m%n
-
-# Root logger option
-log4j.rootLogger=INFO, stdout
-
-# Set up the default logging to be INFO level, then override specific units
-log4j.logger.org.jboss.dna=INFO
-#log4j.logger.org.jboss.dna.search.SimpleIndexingStrategy=TRACE
-
Copied: trunk/extensions/dna-search-lucene/.classpath (from rev 1417, trunk/dna-search/.classpath)
===================================================================
--- trunk/extensions/dna-search-lucene/.classpath (rev 0)
+++ trunk/extensions/dna-search-lucene/.classpath 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+ <classpathentry kind="src" path="src/main/java"/>
+ <classpathentry kind="src" path="src/main/resources"/>
+ <classpathentry kind="src" output="target/test-classes" path="src/test/java"/>
+ <classpathentry kind="src" output="target/test-classes" path="src/test/resources"/>
+ <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+ <classpathentry kind="con" path="org.maven.ide.eclipse.MAVEN2_CLASSPATH_CONTAINER"/>
+ <classpathentry kind="output" path="target/classes"/>
+</classpath>
Property changes on: trunk/extensions/dna-search-lucene/.classpath
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/.project (from rev 1417, trunk/dna-search/.project)
===================================================================
--- trunk/extensions/dna-search-lucene/.project (rev 0)
+++ trunk/extensions/dna-search-lucene/.project 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>dna-search-lucene</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ <buildCommand>
+ <name>org.maven.ide.eclipse.maven2Builder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ <nature>org.maven.ide.eclipse.maven2Nature</nature>
+ </natures>
+</projectDescription>
Copied: trunk/extensions/dna-search-lucene/bin/.project (from rev 1417, trunk/dna-search/.project)
===================================================================
--- trunk/extensions/dna-search-lucene/bin/.project (rev 0)
+++ trunk/extensions/dna-search-lucene/bin/.project 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>dna-search-provider-lucene</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ <buildCommand>
+ <name>org.maven.ide.eclipse.maven2Builder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ <nature>org.maven.ide.eclipse.maven2Nature</nature>
+ </natures>
+</projectDescription>
Property changes on: trunk/extensions/dna-search-lucene/bin/.project
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/bin/pom.xml (from rev 1417, trunk/dna-search/pom.xml)
===================================================================
--- trunk/extensions/dna-search-lucene/bin/pom.xml (rev 0)
+++ trunk/extensions/dna-search-lucene/bin/pom.xml 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,107 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.jboss.dna</groupId>
+ <artifactId>dna</artifactId>
+ <version>0.7-SNAPSHOT</version>
+ <relativePath>../..</relativePath>
+ </parent>
+ <!-- The groupId and version values are inherited from parent -->
+ <artifactId>dna-search-provider</artifactId>
+ <packaging>jar</packaging>
+ <name>JBoss DNA Search Provider for Lucene</name>
+ <description>JBoss DNA Search Provider that uses Lucene.</description>
+ <url>http://labs.jboss.org/dna</url>
+ <!--
+ Define the dependencies. Note that all version and scopes default to those defined in the dependencyManagement section of the
+ parent pom.
+ -->
+ <dependencies>
+ <!--
+ JBoss DNA
+ -->
+ <dependency>
+ <groupId>org.jboss.dna</groupId>
+ <artifactId>dna-graph</artifactId>
+ </dependency>
+ <!--
+ Lucene
+ -->
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-core</artifactId>
+ <version>3.0.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-analyzers</artifactId>
+ <version>3.0.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-snowball</artifactId>
+ <version>3.0.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-regex</artifactId>
+ <version>3.0.0</version>
+ </dependency>
+ <!--
+ Testing (note the scope)
+ -->
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.jboss.dna</groupId>
+ <artifactId>dna-common</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.jboss.dna</groupId>
+ <artifactId>dna-graph</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <!--
+ Logging (require SLF4J API for compiling, but use Log4J and its SLF4J binding for testing)
+ -->
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ </dependency>
+ <!--
+ Java Concurrency in Practice annotations
+ -->
+ <dependency>
+ <groupId>net.jcip</groupId>
+ <artifactId>jcip-annotations</artifactId>
+ </dependency>
+ </dependencies>
+ <reporting>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-report-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </reporting>
+</project>
\ No newline at end of file
Copied: trunk/extensions/dna-search-lucene/pom.xml (from rev 1417, trunk/dna-search/pom.xml)
===================================================================
--- trunk/extensions/dna-search-lucene/pom.xml (rev 0)
+++ trunk/extensions/dna-search-lucene/pom.xml 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,107 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.jboss.dna</groupId>
+ <artifactId>dna</artifactId>
+ <version>0.7-SNAPSHOT</version>
+ <relativePath>../..</relativePath>
+ </parent>
+ <!-- The groupId and version values are inherited from parent -->
+ <artifactId>dna-search-provider</artifactId>
+ <packaging>jar</packaging>
+ <name>JBoss DNA Search Provider for Lucene</name>
+ <description>JBoss DNA Search Provider that uses Lucene.</description>
+ <url>http://labs.jboss.org/dna</url>
+ <!--
+ Define the dependencies. Note that all version and scopes default to those defined in the dependencyManagement section of the
+ parent pom.
+ -->
+ <dependencies>
+ <!--
+ JBoss DNA
+ -->
+ <dependency>
+ <groupId>org.jboss.dna</groupId>
+ <artifactId>dna-graph</artifactId>
+ </dependency>
+ <!--
+ Lucene
+ -->
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-core</artifactId>
+ <version>3.0.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-analyzers</artifactId>
+ <version>3.0.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-snowball</artifactId>
+ <version>3.0.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-regex</artifactId>
+ <version>3.0.0</version>
+ </dependency>
+ <!--
+ Testing (note the scope)
+ -->
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.jboss.dna</groupId>
+ <artifactId>dna-common</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.jboss.dna</groupId>
+ <artifactId>dna-graph</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <!--
+ Logging (require SLF4J API for compiling, but use Log4J and its SLF4J binding for testing)
+ -->
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ </dependency>
+ <!--
+ Java Concurrency in Practice annotations
+ -->
+ <dependency>
+ <groupId>net.jcip</groupId>
+ <artifactId>jcip-annotations</artifactId>
+ </dependency>
+ </dependencies>
+ <reporting>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-report-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </reporting>
+</project>
\ No newline at end of file
Property changes on: trunk/extensions/dna-search-lucene/pom.xml
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/AbstractLuceneSearchEngine.java
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/AbstractLuceneSearchEngine.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/AbstractLuceneSearchEngine.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,872 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import net.jcip.annotations.NotThreadSafe;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.jboss.dna.common.collection.SimpleProblems;
+import org.jboss.dna.graph.DnaLexicon;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.GraphI18n;
+import org.jboss.dna.graph.JcrLexicon;
+import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
+import org.jboss.dna.graph.observe.Observer;
+import org.jboss.dna.graph.property.Binary;
+import org.jboss.dna.graph.property.DateTime;
+import org.jboss.dna.graph.property.DateTimeFactory;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.NameFactory;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.PathFactory;
+import org.jboss.dna.graph.property.Property;
+import org.jboss.dna.graph.property.PropertyFactory;
+import org.jboss.dna.graph.property.PropertyType;
+import org.jboss.dna.graph.property.UuidFactory;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.QueryResults.Statistics;
+import org.jboss.dna.graph.query.model.And;
+import org.jboss.dna.graph.query.model.Between;
+import org.jboss.dna.graph.query.model.BindVariableName;
+import org.jboss.dna.graph.query.model.ChildNode;
+import org.jboss.dna.graph.query.model.Comparison;
+import org.jboss.dna.graph.query.model.Constraint;
+import org.jboss.dna.graph.query.model.DescendantNode;
+import org.jboss.dna.graph.query.model.DynamicOperand;
+import org.jboss.dna.graph.query.model.FullTextSearch;
+import org.jboss.dna.graph.query.model.FullTextSearchScore;
+import org.jboss.dna.graph.query.model.Length;
+import org.jboss.dna.graph.query.model.Literal;
+import org.jboss.dna.graph.query.model.LowerCase;
+import org.jboss.dna.graph.query.model.NodeDepth;
+import org.jboss.dna.graph.query.model.NodeLocalName;
+import org.jboss.dna.graph.query.model.NodeName;
+import org.jboss.dna.graph.query.model.NodePath;
+import org.jboss.dna.graph.query.model.Not;
+import org.jboss.dna.graph.query.model.Operator;
+import org.jboss.dna.graph.query.model.Or;
+import org.jboss.dna.graph.query.model.PropertyExistence;
+import org.jboss.dna.graph.query.model.PropertyValue;
+import org.jboss.dna.graph.query.model.SameNode;
+import org.jboss.dna.graph.query.model.SelectorName;
+import org.jboss.dna.graph.query.model.SetCriteria;
+import org.jboss.dna.graph.query.model.StaticOperand;
+import org.jboss.dna.graph.query.model.TypeSystem;
+import org.jboss.dna.graph.query.model.UpperCase;
+import org.jboss.dna.graph.query.model.FullTextSearch.NegationTerm;
+import org.jboss.dna.graph.query.model.TypeSystem.TypeFactory;
+import org.jboss.dna.graph.query.process.ProcessingComponent;
+import org.jboss.dna.graph.query.process.SelectComponent;
+import org.jboss.dna.graph.request.AccessQueryRequest;
+import org.jboss.dna.graph.request.InvalidWorkspaceException;
+import org.jboss.dna.graph.request.Request;
+import org.jboss.dna.graph.search.SearchEngine;
+import org.jboss.dna.graph.search.SearchEngineProcessor;
+import org.jboss.dna.graph.search.SearchEngineWorkspace;
+
+/**
+ * An abstract {@link SearchEngine} implementation that is set up to use the Lucene library. This provides an abstract
+ * {@link SearchEngineProcessor Processor} base class that has some commonly-needed methods, simplifying the implementation.
+ * However, this class does not presume any number or layout of the Lucene indexes, and requires a subclass to do that.
+ *
+ * @param <WorkspaceType> the type of workspace
+ * @param <ProcessorType> type type of processor
+ */
+public abstract class AbstractLuceneSearchEngine<WorkspaceType extends SearchEngineWorkspace, ProcessorType extends SearchEngineProcessor<WorkspaceType>>
+ extends SearchEngine<WorkspaceType, ProcessorType> {
+
+ /**
+ * Create a {@link SearchEngine} instance that uses Lucene.
+ *
+ * @param sourceName the name of the source that this engine will search over
+ * @param connectionFactory the factory for making connections to the source
+ * @param verifyWorkspaceInSource true if the workspaces are to be verified using the source, or false if this engine is used
+ * in a way such that all workspaces are known to exist
+ * @throws IllegalArgumentException if any of the parameters are null
+ */
+ protected AbstractLuceneSearchEngine( String sourceName,
+ RepositoryConnectionFactory connectionFactory,
+ boolean verifyWorkspaceInSource ) {
+ super(sourceName, connectionFactory, verifyWorkspaceInSource);
+ }
+
+ /**
+ * Abstract {@link SearchEngineProcessor} implementation for the {@link AbstractLuceneSearchEngine}.
+ *
+ * @param <SessionType> the type of session
+ * @param <WorkspaceType> the type of workspace
+ */
+ protected static abstract class AbstractLuceneProcessor<WorkspaceType extends SearchEngineWorkspace, SessionType extends WorkspaceSession>
+ extends SearchEngineProcessor<WorkspaceType> {
+ private final Map<String, SessionType> workspaceSessions = new HashMap<String, SessionType>();
+ protected final boolean readOnly;
+ protected final ValueFactories valueFactories;
+ protected final ValueFactory<String> stringFactory;
+ protected final DateTimeFactory dateFactory;
+ protected final PathFactory pathFactory;
+ protected final UuidFactory uuidFactory;
+ protected final NameFactory nameFactory;
+ protected final TypeSystem typeSystem;
+ protected final PropertyFactory propertyFactory;
+
+ private int changeCount;
+
+ protected AbstractLuceneProcessor( String sourceName,
+ ExecutionContext context,
+ Workspaces<WorkspaceType> workspaces,
+ Observer observer,
+ DateTime now,
+ boolean readOnly ) {
+ super(sourceName, context, workspaces, observer, now);
+ this.readOnly = readOnly;
+ this.valueFactories = context.getValueFactories();
+ this.stringFactory = valueFactories.getStringFactory();
+ this.dateFactory = valueFactories.getDateFactory();
+ this.pathFactory = valueFactories.getPathFactory();
+ this.uuidFactory = valueFactories.getUuidFactory();
+ this.nameFactory = valueFactories.getNameFactory();
+ this.typeSystem = valueFactories.getTypeSystem();
+ this.propertyFactory = context.getPropertyFactory();
+ assert this.stringFactory != null;
+ assert this.dateFactory != null;
+ }
+
+ protected abstract SessionType createSessionFor( WorkspaceType workspace );
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngineProcessor#commit()
+ */
+ @Override
+ protected void commit() {
+ for (SessionType session : getSessions()) {
+ session.commit();
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngineProcessor#rollback()
+ */
+ @Override
+ protected void rollback() {
+ for (SessionType session : getSessions()) {
+ session.rollback();
+ }
+ }
+
+ protected WorkspaceType getWorkspace( String workspaceName,
+ boolean createIfMissing ) {
+ return getWorkspace(workspaceName, createIfMissing);
+ }
+
+ protected WorkspaceType getWorkspace( Request request,
+ String workspaceName,
+ boolean createIfMissing ) {
+ WorkspaceType workspace = workspaces.getWorkspace(getExecutionContext(), workspaceName, createIfMissing);
+ if (workspace == null) {
+ if (request != null) {
+ String msg = GraphI18n.workspaceDoesNotExistInRepository.text(workspaceName, getSourceName());
+ request.setError(new InvalidWorkspaceException(msg));
+ }
+ return null;
+ }
+ return workspace;
+ }
+
+ protected SessionType getSessionFor( Request request,
+ String workspaceName ) {
+ return getSessionFor(request, workspaceName, true);
+ }
+
+ protected SessionType getSessionFor( Request request,
+ String workspaceName,
+ boolean createIfMissing ) {
+ SessionType result = workspaceSessions.get(workspaceName);
+ if (result == null) {
+ // See if there is a workspace with the supplied name ...
+ WorkspaceType workspace = getWorkspace(request, workspaceName, createIfMissing);
+ if (workspace == null) return null;
+ result = createSessionFor(workspace);
+ workspaceSessions.put(workspaceName, result);
+ }
+ return result;
+ }
+
+ protected Collection<SessionType> getSessions() {
+ return workspaceSessions.values();
+ }
+
+ protected final String serializeProperty( Property property ) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(stringFactory.create(property.getName()));
+ sb.append('=');
+ Iterator<?> iter = property.getValues();
+ if (iter.hasNext()) {
+ sb.append(stringFactory.create(iter.next()));
+ }
+ while (iter.hasNext()) {
+ sb.append('\n');
+ sb.append(stringFactory.create(iter.next()));
+ }
+ return sb.toString();
+ }
+
+ protected final Property deserializeProperty( String propertyString ) {
+ int index = propertyString.indexOf('=');
+ assert index > -1;
+ if (index == propertyString.length() - 1) return null;
+ Name propName = nameFactory.create(propertyString.substring(0, index));
+ String valueString = propertyString.substring(index + 1);
+ // Break into multiple values if multiple lines ...
+ String[] values = valueString.split("\\n");
+ if (values.length == 0) return null;
+ if (values.length == 1) {
+ Object value = values[0];
+ if (DnaLexicon.UUID.equals(propName) || JcrLexicon.UUID.equals(propName)) {
+ value = uuidFactory.create(value);
+ }
+ return propertyFactory.create(propName, value);
+ }
+ List<String> propValues = new LinkedList<String>();
+ for (String value : values) {
+ propValues.add(value);
+ }
+ return propertyFactory.create(propName, propValues);
+ }
+
+ /**
+ * Create the field name that will be used to store the full-text searchable property values.
+ *
+ * @param propertyName the name of the property; may not be null
+ * @return the field name for the full-text searchable property values; never null
+ */
+ protected abstract String fullTextFieldName( String propertyName );
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngineProcessor#optimize()
+ */
+ @Override
+ public boolean optimize() {
+ boolean result = false;
+ try {
+ for (WorkspaceSession context : getSessions()) {
+ if (context.optimize()) result = true;
+ }
+ } catch (IOException e) {
+ throw new LuceneException(e);
+ }
+ return result;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngineProcessor#optimize(java.lang.String)
+ */
+ @Override
+ public boolean optimize( String workspaceName ) {
+ try {
+ return getSessionFor(null, workspaceName).optimize();
+ } catch (IOException e) {
+ throw new LuceneException(e);
+ }
+ }
+
+ /**
+ * Return whether this session made changes to the indexed state.
+ *
+ * @return true if change were made, or false otherwise
+ */
+ public boolean hasChanges() {
+ return changeCount > 0;
+ }
+
+ public String pathAsString( Path path ) {
+ assert path != null;
+ if (path.isRoot()) return "/";
+ StringBuilder sb = new StringBuilder();
+ for (Path.Segment segment : path) {
+ sb.append('/');
+ sb.append(stringFactory.create(segment.getName()));
+ sb.append('[');
+ sb.append(segment.getIndex());
+ sb.append(']');
+ }
+ return sb.toString();
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Some kinds of constraints are not easily pushed down to Lucene as are of a Lucene Query, and instead are applied by
+ * filtering the results. For example, a FullTextSearchScore applies to the score of the tuple, which cannot be (easily?)
+ * applied as a Query.
+ * </p>
+ * <p>
+ * Therefore, each of the AND-ed constraints of the query are evaluated separately. After all, each of the tuples returned
+ * by the planned query must satisfy all of the AND-ed constraints. Or, to put it another way, if a tuple does not satisfy
+ * one of the AND-ed constraints, the tuple should not be included in the query results.
+ * </p>
+ * <p>
+ * Logically, any AND-ed criteria that cannot be pushed down to Lucene can of course be applied as a filter on the
+ * results. Thus, each AND-ed constraint is processed to first determine if it can be represented as a Lucene query; all
+ * other AND-ed constraints must be handled as a results filter. Since most queries will likely use one or more simple
+ * constraints AND-ed together, this approach will likely work very well.
+ * </p>
+ * <p>
+ * The only hairy case is when any AND-ed constraint is actually an OR-ed combination of multiple constraints of which at
+ * least one cannot be pushed down to Lucene. In this case, the entire AND-ed constraint must be treated as a results
+ * filter (even if many of those constraints that make up the OR-ed constraint can be pushed down). Hopefully, this will
+ * not be a common case in actual queries.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.AccessQueryRequest)
+ */
+ @Override
+ public void process( AccessQueryRequest request ) {
+ SessionType session = getSessionFor(request, request.workspace());
+ if (session == null) return;
+
+ long planningNanos = System.nanoTime();
+ // For each of the AND-ed constraints ...
+ Query pushDownQuery = null;
+ Constraint postProcessConstraint = null;
+ try {
+ for (Constraint andedConstraint : request.andedConstraints()) {
+ // Determine if it can be represented as a Lucene query ...
+ assert andedConstraint != null;
+ Query constraintQuery = queryFactory(session, request.variables()).createQuery(andedConstraint);
+ if (constraintQuery != null) {
+ // The AND-ed constraint _can_ be represented as a push-down Lucene query ...
+ if (pushDownQuery == null) {
+ // This must be the first query ...
+ pushDownQuery = constraintQuery;
+ } else if (pushDownQuery instanceof BooleanQuery) {
+ // We have to add the constraint query to the existing boolean ...
+ BooleanQuery booleanQuery = (BooleanQuery)pushDownQuery;
+ booleanQuery.add(constraintQuery, Occur.MUST);
+ } else {
+ // This is the second push-down query, so create a BooleanQuery ...
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(pushDownQuery, Occur.MUST);
+ booleanQuery.add(constraintQuery, Occur.MUST);
+ pushDownQuery = booleanQuery;
+ }
+ } else {
+ // The AND-ed constraint _cannot_ be represented as a push-down Lucene query ...
+ if (postProcessConstraint == null) {
+ postProcessConstraint = andedConstraint;
+ } else {
+ postProcessConstraint = new And(postProcessConstraint, andedConstraint);
+ }
+ }
+ }
+ } catch (IOException e) {
+ // There was a error working with the constraints (such as a ValueFormatException) ...
+ request.setError(e);
+ return;
+ } catch (RuntimeException e) {
+ // There was a error working with the constraints (such as a ValueFormatException) ...
+ request.setError(e);
+ return;
+ }
+
+ if (pushDownQuery == null) {
+ // There are no constraints that can be pushed down, so return _all_ the nodes ...
+ pushDownQuery = new MatchAllDocsQuery();
+ }
+ long executingNanos = System.nanoTime();
+ planningNanos = executingNanos - planningNanos;
+
+ // Get the results from Lucene ...
+ List<Object[]> tuples = null;
+ final Columns columns = request.resultColumns();
+ try {
+ // Execute the query against the content indexes ...
+ IndexSearcher searcher = session.getContentSearcher();
+ TupleCollector collector = session.createTupleCollector(columns);
+ searcher.search(pushDownQuery, collector);
+ tuples = collector.getTuples();
+ } catch (IOException e) {
+ // There was a problem executing the Lucene query ...
+ request.setError(e);
+ return;
+ }
+
+ if (postProcessConstraint != null && !tuples.isEmpty()) {
+ // Create a delegate processing component that will return the tuples we've already found ...
+ final List<Object[]> allTuples = tuples;
+ QueryContext queryContext = new QueryContext(request.schemata(), typeSystem, null, new SimpleProblems(),
+ request.variables());
+ ProcessingComponent tuplesProcessor = new ProcessingComponent(queryContext, columns) {
+ @Override
+ public List<Object[]> execute() {
+ return allTuples;
+ }
+ };
+ // Create a processing component that will apply these constraints to the tuples we already found ...
+ SelectComponent selector = new SelectComponent(tuplesProcessor, postProcessConstraint, request.variables());
+ tuples = selector.execute();
+ }
+ executingNanos = System.nanoTime() - executingNanos;
+ Statistics stats = new Statistics(planningNanos, 0L, 0L, executingNanos);
+ request.setResults(tuples, stats);
+ }
+
+ protected QueryFactory queryFactory( WorkspaceSession session,
+ Map<String, Object> variables ) {
+ return new QueryFactory(session, variables);
+ }
+
+ protected class QueryFactory {
+ private final WorkspaceSession session;
+ private final Map<String, Object> variables;
+ private final String fullTextFieldName;
+
+ protected QueryFactory( WorkspaceSession session,
+ Map<String, Object> variables ) {
+ this(session, variables, null);
+ }
+
+ protected QueryFactory( WorkspaceSession session,
+ Map<String, Object> variables,
+ String fullTextFieldName ) {
+ this.session = session;
+ this.variables = variables;
+ this.fullTextFieldName = fullTextFieldName;
+ }
+
+ public Query createQuery( Constraint constraint ) throws IOException {
+ if (constraint instanceof And) {
+ And and = (And)constraint;
+ Query leftQuery = createQuery(and.getLeft());
+ Query rightQuery = createQuery(and.getRight());
+ if (leftQuery == null || rightQuery == null) return null;
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(createQuery(and.getLeft()), Occur.MUST);
+ booleanQuery.add(createQuery(and.getRight()), Occur.MUST);
+ return booleanQuery;
+ }
+ if (constraint instanceof Or) {
+ Or or = (Or)constraint;
+ Query leftQuery = createQuery(or.getLeft());
+ Query rightQuery = createQuery(or.getRight());
+ if (leftQuery == null) {
+ return rightQuery != null ? rightQuery : null;
+ } else if (rightQuery == null) {
+ return leftQuery;
+ }
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(createQuery(or.getLeft()), Occur.SHOULD);
+ booleanQuery.add(createQuery(or.getRight()), Occur.SHOULD);
+ return booleanQuery;
+ }
+ if (constraint instanceof Not) {
+ Not not = (Not)constraint;
+ Query notted = createQuery(not.getConstraint());
+ if (notted == null) return new MatchAllDocsQuery();
+ }
+ if (constraint instanceof SetCriteria) {
+ SetCriteria setCriteria = (SetCriteria)constraint;
+ DynamicOperand left = setCriteria.getLeftOperand();
+ int numRightOperands = setCriteria.getRightOperands().size();
+ assert numRightOperands > 0;
+ if (numRightOperands == 1) {
+ return createQuery(left, Operator.EQUAL_TO, setCriteria.getRightOperands().iterator().next());
+ }
+ BooleanQuery setQuery = new BooleanQuery();
+ for (StaticOperand right : setCriteria.getRightOperands()) {
+ Query rightQuery = createQuery(left, Operator.EQUAL_TO, right);
+ if (rightQuery == null) return null;
+ setQuery.add(rightQuery, Occur.SHOULD);
+ }
+ return setQuery;
+ }
+ if (constraint instanceof PropertyExistence) {
+ PropertyExistence existence = (PropertyExistence)constraint;
+ return createQuery(existence.getSelectorName(), existence.getPropertyName());
+ }
+ if (constraint instanceof Between) {
+ Between between = (Between)constraint;
+ return createQuery(between);
+ }
+ if (constraint instanceof Comparison) {
+ Comparison comparison = (Comparison)constraint;
+ return createQuery(comparison.getOperand1(), comparison.getOperator(), comparison.getOperand2());
+ }
+ if (constraint instanceof FullTextSearch) {
+ FullTextSearch search = (FullTextSearch)constraint;
+ String fieldName = this.fullTextFieldName;
+ String propertyName = search.getPropertyName();
+ if (propertyName != null) {
+ fieldName = fullTextFieldName(fieldNameFor(propertyName));
+ }
+ return createQuery(fieldName, search.getTerm());
+ }
+ if (constraint instanceof SameNode) {
+ SameNode sameNode = (SameNode)constraint;
+ Path path = pathFactory.create(sameNode.getPath());
+ return session.findNodeAt(path);
+ }
+ if (constraint instanceof ChildNode) {
+ ChildNode childNode = (ChildNode)constraint;
+ Path path = pathFactory.create(childNode.getParentPath());
+ return session.findChildNodes(path);
+ }
+ if (constraint instanceof DescendantNode) {
+ DescendantNode descendantNode = (DescendantNode)constraint;
+ Path path = pathFactory.create(descendantNode.getAncestorPath());
+ return session.findAllNodesBelow(path);
+ }
+ // Should not get here ...
+ assert false;
+ return null;
+ }
+
+ public Query createQuery( DynamicOperand left,
+ Operator operator,
+ StaticOperand right ) throws IOException {
+ return createQuery(left, operator, right, true);
+ }
+
+ public Query createQuery( DynamicOperand left,
+ Operator operator,
+ StaticOperand right,
+ boolean caseSensitive ) throws IOException {
+ // Handle the static operand ...
+ Object value = createOperand(right, caseSensitive);
+ assert value != null;
+
+ // Address the dynamic operand ...
+ if (left instanceof FullTextSearchScore) {
+ // This can only be represented as a filter ...
+ return null;
+ } else if (left instanceof PropertyValue) {
+ return session.findNodesWith((PropertyValue)left, operator, value, caseSensitive);
+ } else if (left instanceof Length) {
+ return session.findNodesWith((Length)left, operator, right);
+ } else if (left instanceof LowerCase) {
+ LowerCase lowercase = (LowerCase)left;
+ return createQuery(lowercase.getOperand(), operator, right, false);
+ } else if (left instanceof UpperCase) {
+ UpperCase lowercase = (UpperCase)left;
+ return createQuery(lowercase.getOperand(), operator, right, false);
+ } else if (left instanceof NodeDepth) {
+ assert operator != Operator.LIKE;
+ // Could be represented as a result filter, but let's do this now ...
+ return session.findNodesWith((NodeDepth)left, operator, value);
+ } else if (left instanceof NodePath) {
+ return session.findNodesWith((NodePath)left, operator, value, caseSensitive);
+ } else if (left instanceof NodeName) {
+ return session.findNodesWith((NodeName)left, operator, value, caseSensitive);
+ } else if (left instanceof NodeLocalName) {
+ return session.findNodesWith((NodeLocalName)left, operator, value, caseSensitive);
+ } else {
+ assert false;
+ return null;
+ }
+ }
+
+ public Object createOperand( StaticOperand operand,
+ boolean caseSensitive ) {
+ Object value = null;
+ if (operand instanceof Literal) {
+ Literal literal = (Literal)operand;
+ value = literal.getValue();
+ if (!caseSensitive) value = lowerCase(value);
+ } else if (operand instanceof BindVariableName) {
+ BindVariableName variable = (BindVariableName)operand;
+ String variableName = variable.getVariableName();
+ value = variables.get(variableName);
+ if (!caseSensitive) value = lowerCase(value);
+ } else {
+ assert false;
+ }
+ return value;
+ }
+
+ public Query createQuery( DynamicOperand left,
+ StaticOperand lower,
+ StaticOperand upper,
+ boolean includesLower,
+ boolean includesUpper,
+ boolean caseSensitive ) throws IOException {
+ // Handle the static operands ...
+ Object lowerValue = createOperand(lower, caseSensitive);
+ Object upperValue = createOperand(upper, caseSensitive);
+ assert lowerValue != null;
+ assert upperValue != null;
+
+ // Only in the case of a PropertyValue and Depth will we need to do something special ...
+ if (left instanceof NodeDepth) {
+ return session.findNodesWithNumericRange((NodeDepth)left,
+ lowerValue,
+ upperValue,
+ includesLower,
+ includesUpper);
+ } else if (left instanceof PropertyValue) {
+ PropertyType lowerType = PropertyType.discoverType(lowerValue);
+ PropertyType upperType = PropertyType.discoverType(upperValue);
+ if (upperType == lowerType) {
+ switch (upperType) {
+ case DATE:
+ case LONG:
+ case DOUBLE:
+ case DECIMAL:
+ return session.findNodesWithNumericRange((PropertyValue)left,
+ lowerValue,
+ upperValue,
+ includesLower,
+ includesUpper);
+ default:
+ // continue on and handle as boolean query ...
+ }
+ }
+ }
+
+ // Otherwise, just create a boolean query ...
+ BooleanQuery query = new BooleanQuery();
+ Operator lowerOp = includesLower ? Operator.GREATER_THAN_OR_EQUAL_TO : Operator.GREATER_THAN;
+ Operator upperOp = includesUpper ? Operator.LESS_THAN_OR_EQUAL_TO : Operator.LESS_THAN;
+ Query lowerQuery = createQuery(left, lowerOp, lower, caseSensitive);
+ Query upperQuery = createQuery(left, upperOp, upper, caseSensitive);
+ if (lowerQuery == null || upperQuery == null) return null;
+ query.add(lowerQuery, Occur.MUST);
+ query.add(upperQuery, Occur.MUST);
+ return query;
+ }
+
+ public Object lowerCase( Object value ) {
+ if (value instanceof String) {
+ return ((String)value).toLowerCase();
+ }
+ assert !(value instanceof Binary);
+ TypeFactory<String> stringFactory = typeSystem.getStringFactory();
+ TypeFactory<?> valueFactory = typeSystem.getTypeFactory(value);
+ return valueFactory.create(stringFactory.create(value).toLowerCase());
+ }
+
+ public Query createQuery( SelectorName selectorName,
+ String propertyName ) {
+ Term term = new Term(fieldNameFor(propertyName));
+ return new TermQuery(term);
+ }
+
+ public Query createQuery( String fieldName,
+ FullTextSearch.Term term ) {
+ if (term instanceof FullTextSearch.Conjunction) {
+ FullTextSearch.Conjunction conjunction = (FullTextSearch.Conjunction)term;
+ BooleanQuery query = new BooleanQuery();
+ for (FullTextSearch.Term nested : conjunction) {
+ if (nested instanceof NegationTerm) {
+ query.add(createQuery(fieldName, ((NegationTerm)nested).getNegatedTerm()), Occur.MUST_NOT);
+ } else {
+ query.add(createQuery(fieldName, nested), Occur.MUST);
+ }
+ }
+ return query;
+ }
+ if (term instanceof FullTextSearch.Disjunction) {
+ FullTextSearch.Disjunction disjunction = (FullTextSearch.Disjunction)term;
+ BooleanQuery query = new BooleanQuery();
+ for (FullTextSearch.Term nested : disjunction) {
+ if (nested instanceof NegationTerm) {
+ query.add(createQuery(fieldName, ((NegationTerm)nested).getNegatedTerm()), Occur.MUST_NOT);
+ } else {
+ query.add(createQuery(fieldName, nested), Occur.SHOULD);
+ }
+ }
+ return query;
+ }
+ if (term instanceof FullTextSearch.SimpleTerm) {
+ FullTextSearch.SimpleTerm simple = (FullTextSearch.SimpleTerm)term;
+ if (simple.isQuotingRequired()) {
+ PhraseQuery query = new PhraseQuery();
+ query.setSlop(0); // terms must be adjacent
+ for (String value : simple.getValues()) {
+ query.add(new Term(fieldName, value));
+ }
+ return query;
+ }
+ return new TermQuery(new Term(fieldName, simple.getValue()));
+ }
+ // Should not get here ...
+ assert false;
+ return null;
+ }
+
+ public String fieldNameFor( String name ) {
+ // Convert to a name and then to a string, so that the namespaces are resolved
+ return stringFactory.create(nameFactory.create(name));
+ }
+ }
+ }
+
+ @NotThreadSafe
+ protected static interface WorkspaceSession {
+
+ String getWorkspaceName();
+
+ boolean hasWriters();
+
+ /**
+ * Subclasses should implement this method to throw away any work that has been done with this processor.
+ */
+ void rollback();
+
+ /**
+ * Subclasses should implement this method to commit and save any work that has been done with this processor.
+ */
+ void commit();
+
+ boolean optimize() throws IOException;
+
+ IndexSearcher getContentSearcher() throws IOException;
+
+ /**
+ * Create a {@link TupleCollector} instance that collects the results from the index(es).
+ *
+ * @param columns the column definitions; never null
+ * @return the collector; never null
+ */
+ TupleCollector createTupleCollector( Columns columns );
+
+ /**
+ * Utility method to create a query to find all of the documents representing nodes with the supplied IDs.
+ *
+ * @param ids the IDs of the nodes that are to be found; may not be null
+ * @return the query; never null
+ * @throws IOException if there is a problem creating this query
+ */
+ Query findAllNodesWithIds( Set<String> ids ) throws IOException;
+
+ Query findAllNodesBelow( Path ancestorPath ) throws IOException;
+
+ /**
+ * Return a query that can be used to find all of the documents that represent nodes that are children of the node at the
+ * supplied path.
+ *
+ * @param parentPath the path of the parent node.
+ * @return the query; never null
+ * @throws IOException if there is an error creating the query
+ */
+ Query findChildNodes( Path parentPath ) throws IOException;
+
+ /**
+ * Create a query that can be used to find the one document (or node) that exists at the exact path supplied.
+ *
+ * @param path the path of the node
+ * @return the query; never null
+ * @throws IOException if there is an error creating the query
+ */
+ Query findNodeAt( Path path ) throws IOException;
+
+ /**
+ * Create a query that can be used to find documents (or nodes) that have a field value that satisfies the supplied LIKE
+ * expression.
+ *
+ * @param fieldName the name of the document field to search
+ * @param likeExpression the JCR like expression
+ * @param caseSensitive true if the evaluation should be performed in a case sensitive manner, or false otherwise
+ * @return the query; never null
+ * @throws IOException if there is an error creating the query
+ */
+ Query findNodesLike( String fieldName,
+ String likeExpression,
+ boolean caseSensitive ) throws IOException;
+
+ Query findNodesWith( Length propertyLength,
+ Operator operator,
+ Object value ) throws IOException;
+
+ Query findNodesWith( PropertyValue propertyValue,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException;
+
+ Query findNodesWithNumericRange( PropertyValue propertyValue,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) throws IOException;
+
+ Query findNodesWithNumericRange( NodeDepth depth,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) throws IOException;
+
+ Query findNodesWith( NodePath nodePath,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException;
+
+ Query findNodesWith( NodeName nodeName,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException;
+
+ Query findNodesWith( NodeLocalName nodeName,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException;
+
+ Query findNodesWith( NodeDepth depthConstraint,
+ Operator operator,
+ Object value ) throws IOException;
+ }
+
+ public static abstract class TupleCollector extends Collector {
+
+ /**
+ * Get the tuples.
+ *
+ * @return the tuples; never null
+ */
+ public abstract LinkedList<Object[]> getTuples();
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/AbstractLuceneSearchEngine.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/EncodingNamespaceRegistry.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/EncodingNamespaceRegistry.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/EncodingNamespaceRegistry.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/EncodingNamespaceRegistry.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,233 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import net.jcip.annotations.GuardedBy;
+import net.jcip.annotations.ThreadSafe;
+import org.jboss.dna.common.text.NoOpEncoder;
+import org.jboss.dna.common.text.TextEncoder;
+import org.jboss.dna.graph.DnaLexicon;
+import org.jboss.dna.graph.JcrLexicon;
+import org.jboss.dna.graph.JcrMixLexicon;
+import org.jboss.dna.graph.JcrNtLexicon;
+import org.jboss.dna.graph.property.NamespaceRegistry;
+import org.jboss.dna.graph.property.basic.BasicNamespace;
+
+/**
+ * A {@link NamespaceRegistry} implementation that uses encoded representations of the namespace URIs for the namespace prefixes.
+ */
+@ThreadSafe
+class EncodingNamespaceRegistry implements NamespaceRegistry {
+
+ public static final Set<String> DEFAULT_FIXED_NAMESPACES = Collections.unmodifiableSet(new HashSet<String>(
+ Arrays.asList(new String[] {
+ "",
+ DnaLexicon.Namespace.URI,
+ JcrLexicon.Namespace.URI,
+ JcrNtLexicon.Namespace.URI,
+ JcrMixLexicon.Namespace.URI})));
+
+ private final NamespaceRegistry registry;
+ private final TextEncoder encoder;
+ private final ReadWriteLock lock = new ReentrantReadWriteLock();
+ @GuardedBy( "lock" )
+ private final Map<String, String> uriToEncodedPrefix = new HashMap<String, String>();
+ @GuardedBy( "lock" )
+ private final Map<String, String> encodedPrefixToUri = new HashMap<String, String>();
+ private final Set<String> fixedNamespaceUris;
+
+ /**
+ * @param registry the original registry
+ * @param encoder the encoder; may be null if no encoding should be used
+ */
+ EncodingNamespaceRegistry( NamespaceRegistry registry,
+ TextEncoder encoder ) {
+ this(registry, encoder, null);
+ }
+
+ /**
+ * @param registry the original registry
+ * @param encoder the encoder; may be null if no encoding should be used
+ * @param fixedUris the set of URIs that is to be fixed and not encoded; or null if the default namespaces are to be fixed
+ */
+ EncodingNamespaceRegistry( NamespaceRegistry registry,
+ TextEncoder encoder,
+ Set<String> fixedUris ) {
+ this.registry = registry;
+ this.encoder = encoder != null ? encoder : new NoOpEncoder();
+ this.fixedNamespaceUris = fixedUris != null ? Collections.unmodifiableSet(new HashSet<String>(fixedUris)) : DEFAULT_FIXED_NAMESPACES;
+ assert this.registry != null;
+ assert this.encoder != null;
+ assert this.fixedNamespaceUris != null;
+ }
+
+ /**
+ * @return fixedNamespaceUris
+ */
+ public Set<String> getFixedNamespaceUris() {
+ return fixedNamespaceUris;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.property.NamespaceRegistry#getDefaultNamespaceUri()
+ */
+ public String getDefaultNamespaceUri() {
+ return this.registry.getDefaultNamespaceUri();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.property.NamespaceRegistry#getNamespaceForPrefix(java.lang.String)
+ */
+ public String getNamespaceForPrefix( String prefix ) {
+ // First look in the map ...
+ String result = null;
+ try {
+ lock.readLock().lock();
+ result = encodedPrefixToUri.get(prefix);
+ if (result != null) return result;
+ } finally {
+ lock.readLock().unlock();
+ }
+
+ // Make sure we have encoded all the namespaces in the registry ...
+ Set<Namespace> namespaces = new HashSet<Namespace>(this.registry.getNamespaces());
+ Set<Namespace> encodedNamespaces = this.getNamespaces();
+ namespaces.removeAll(encodedNamespaces);
+ try {
+ lock.writeLock().lock();
+ for (Namespace namespace : namespaces) {
+ String namespaceUri = namespace.getNamespaceUri();
+ String encoded = fixedNamespaceUris.contains(namespaceUri) ? namespace.getPrefix() : encoder.encode(namespaceUri);
+ uriToEncodedPrefix.put(namespaceUri, encoded);
+ encodedPrefixToUri.put(encoded, namespaceUri);
+ if (result == null && encoded.equals(prefix)) result = namespaceUri;
+ }
+ } finally {
+ lock.writeLock().unlock();
+ }
+ if (result != null) return result;
+
+ // There's nothing, so just delegate to the registry ...
+ return this.registry.getNamespaceForPrefix(prefix);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.property.NamespaceRegistry#getRegisteredNamespaceUris()
+ */
+ public Set<String> getRegisteredNamespaceUris() {
+ return this.registry.getRegisteredNamespaceUris();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.property.NamespaceRegistry#isRegisteredNamespaceUri(java.lang.String)
+ */
+ public boolean isRegisteredNamespaceUri( String namespaceUri ) {
+ return this.registry.isRegisteredNamespaceUri(namespaceUri);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.property.NamespaceRegistry#getPrefixForNamespaceUri(java.lang.String, boolean)
+ */
+ public String getPrefixForNamespaceUri( String namespaceUri,
+ boolean generateIfMissing ) {
+ if (fixedNamespaceUris.contains(namespaceUri)) {
+ return this.registry.getPrefixForNamespaceUri(namespaceUri, generateIfMissing);
+ }
+ String encoded = null;
+ try {
+ lock.readLock().lock();
+ encoded = uriToEncodedPrefix.get(namespaceUri);
+ } finally {
+ lock.readLock().unlock();
+ }
+ if (encoded == null) {
+ encoded = encoder.encode(namespaceUri);
+ try {
+ lock.writeLock().lock();
+ uriToEncodedPrefix.put(namespaceUri, encoded);
+ encodedPrefixToUri.put(encoded, namespaceUri);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+ return encoded;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.property.NamespaceRegistry#getNamespaces()
+ */
+ public Set<Namespace> getNamespaces() {
+ Set<Namespace> results = new HashSet<Namespace>();
+ try {
+ lock.readLock().lock();
+ for (Map.Entry<String, String> entry : uriToEncodedPrefix.entrySet()) {
+ String uri = entry.getKey();
+ String prefix = entry.getValue();
+ results.add(new BasicNamespace(prefix, uri));
+ }
+ } finally {
+ lock.readLock().unlock();
+ }
+ return results;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.property.NamespaceRegistry#register(java.lang.String, java.lang.String)
+ */
+ public String register( String prefix,
+ String namespaceUri ) {
+ return this.registry.register(prefix, namespaceUri);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.property.NamespaceRegistry#unregister(java.lang.String)
+ */
+ public boolean unregister( String namespaceUri ) {
+ return this.registry.unregister(namespaceUri);
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/EncodingNamespaceRegistry.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/IndexRules.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/IndexRules.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/IndexRules.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,639 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import net.jcip.annotations.Immutable;
+import net.jcip.annotations.NotThreadSafe;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.graph.property.Name;
+
+/**
+ * The set of rules that dictate how properties should be indexed.
+ */
+@Immutable
+public class IndexRules {
+
+ public static enum FieldType {
+ STRING,
+ DOUBLE,
+ FLOAT,
+ INT,
+ BOOLEAN,
+ LONG,
+ DATE,
+ BINARY;
+ }
+
+ /**
+ * A single rule that dictates how a single property should be indexed.
+ *
+ * @see IndexRules#getRule(Name)
+ */
+ @Immutable
+ public static interface Rule {
+
+ boolean isSkipped();
+
+ FieldType getType();
+
+ Field.Store getStoreOption();
+
+ Field.Index getIndexOption();
+ }
+
+ @Immutable
+ public static interface NumericRule<T> extends Rule {
+ T getMinimum();
+
+ T getMaximum();
+ }
+
+ public static final Rule SKIP = new SkipRule();
+
+ @Immutable
+ protected static class SkipRule implements Rule {
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see Rule#getType()
+ */
+ public FieldType getType() {
+ return FieldType.STRING;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see Rule#isSkipped()
+ */
+ public boolean isSkipped() {
+ return true;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see Rule#getIndexOption()
+ */
+ public Index getIndexOption() {
+ return Field.Index.NO;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see Rule#getStoreOption()
+ */
+ public Store getStoreOption() {
+ return Field.Store.NO;
+ }
+ }
+
+ @Immutable
+ protected static class TypedRule implements Rule {
+ protected final FieldType type;
+ protected final Field.Store store;
+ protected final Field.Index index;
+
+ protected TypedRule( FieldType type,
+ Field.Store store,
+ Field.Index index ) {
+ this.type = type;
+ this.index = index;
+ this.store = store;
+ assert this.type != null;
+ assert this.index != null;
+ assert this.store != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see IndexRules.Rule#getType()
+ */
+ public FieldType getType() {
+ return type;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see IndexRules.Rule#isSkipped()
+ */
+ public boolean isSkipped() {
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see IndexRules.Rule#getIndexOption()
+ */
+ public Index getIndexOption() {
+ return index;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see IndexRules.Rule#getStoreOption()
+ */
+ public Store getStoreOption() {
+ return store;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return type.name() + " rule (" + store + "," + index + ")";
+ }
+ }
+
+ @Immutable
+ protected static class NumericTypedRule<T> extends TypedRule implements NumericRule<T> {
+ protected final T minValue;
+ protected final T maxValue;
+
+ protected NumericTypedRule( FieldType type,
+ Field.Store store,
+ Field.Index index,
+ T minValue,
+ T maxValue ) {
+ super(type, store, index);
+ this.minValue = minValue;
+ this.maxValue = maxValue;
+ assert this.minValue != null;
+ assert this.maxValue != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see IndexRules.NumericRule#getMaximum()
+ */
+ public T getMaximum() {
+ return maxValue;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see IndexRules.NumericRule#getMinimum()
+ */
+ public T getMinimum() {
+ return minValue;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return super.toString() + " with range [" + minValue + "," + maxValue + "]";
+ }
+ }
+
+ private final Map<Name, Rule> rulesByName;
+ private final Rule defaultRule;
+
+ protected IndexRules( Map<Name, Rule> rulesByName,
+ Rule defaultRule ) {
+ this.rulesByName = rulesByName;
+ this.defaultRule = defaultRule != null ? defaultRule : SKIP;
+ assert this.defaultRule != null;
+ }
+
+ /**
+ * Get the rule associated with the given property name.
+ *
+ * @param name the property name, or null if the default rule is to be returned
+ * @return the rule; never null
+ */
+ public Rule getRule( Name name ) {
+ Rule result = rulesByName.get(name);
+ return result != null ? result : this.defaultRule;
+ }
+
+ /**
+ * Return a new builder that can be used to create {@link IndexRules} objects.
+ *
+ * @return a builder; never null
+ */
+ public static Builder createBuilder() {
+ return new Builder(new HashMap<Name, Rule>(), null);
+ }
+
+ /**
+ * Return a new builder that can be used to create {@link IndexRules} objects.
+ *
+ * @param initialRules the rules that the builder should start with
+ * @return a builder; never null
+ * @throws IllegalArgumentException if the initial rules reference is null
+ */
+ public static Builder createBuilder( IndexRules initialRules ) {
+ CheckArg.isNotNull(initialRules, "initialRules");
+ return new Builder(new HashMap<Name, Rule>(initialRules.rulesByName), initialRules.defaultRule);
+ }
+
+ /**
+ * A builder of immutable {@link IndexRules} objects.
+ */
+ @NotThreadSafe
+ public static class Builder {
+ private final Map<Name, Rule> rulesByName;
+ private Rule defaultRule;
+
+ Builder( Map<Name, Rule> rulesByName,
+ Rule defaultRule ) {
+ assert rulesByName != null;
+ this.rulesByName = rulesByName;
+ this.defaultRule = defaultRule;
+ }
+
+ /**
+ * Mark the properties with the supplied names to be skipped from indexing.
+ *
+ * @param namesToIndex the names of the properties that are to be skipped
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder skip( Name... namesToIndex ) {
+ if (namesToIndex != null) {
+ for (Name name : namesToIndex) {
+ rulesByName.put(name, SKIP);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Define a string-based field as the default.
+ *
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder defaultTo( Field.Store store,
+ Field.Index index ) {
+ if (store == null) store = Field.Store.YES;
+ if (index == null) index = Field.Index.NOT_ANALYZED;
+ defaultRule = new TypedRule(FieldType.STRING, store, index);
+ return this;
+ }
+
+ /**
+ * Define a string-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder stringField( Name name,
+ Field.Store store,
+ Field.Index index ) {
+ if (store == null) store = Field.Store.YES;
+ if (index == null) index = Field.Index.NOT_ANALYZED;
+ Rule rule = new TypedRule(FieldType.STRING, store, index);
+ rulesByName.put(name, rule);
+ return this;
+ }
+
+ /**
+ * Define a binary-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder binaryField( Name name,
+ Field.Store store,
+ Field.Index index ) {
+ if (store == null) store = Field.Store.YES;
+ if (index == null) index = Field.Index.NOT_ANALYZED;
+ Rule rule = new TypedRule(FieldType.BINARY, store, index);
+ rulesByName.put(name, rule);
+ return this;
+ }
+
+ protected <T> Builder numericField( Name name,
+ FieldType type,
+ Field.Store store,
+ Field.Index index,
+ T minValue,
+ T maxValue ) {
+ if (store == null) store = Field.Store.YES;
+ if (index == null) index = Field.Index.NOT_ANALYZED;
+ Rule rule = new NumericTypedRule<T>(type, store, index, minValue, maxValue);
+ rulesByName.put(name, rule);
+ return this;
+ }
+
+ /**
+ * Define a boolean-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder booleanField( Name name,
+ Field.Store store,
+ Field.Index index ) {
+ return numericField(name, FieldType.BOOLEAN, store, index, Boolean.FALSE, Boolean.TRUE);
+ }
+
+ /**
+ * Define a integer-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @param minValue the minimum value for this field, or null if there is no minimum value
+ * @param maxValue the maximum value for this field, or null if there is no maximum value
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder integerField( Name name,
+ Field.Store store,
+ Field.Index index,
+ Integer minValue,
+ Integer maxValue ) {
+ if (minValue == null) minValue = Integer.MIN_VALUE;
+ if (maxValue == null) maxValue = Integer.MAX_VALUE;
+ return numericField(name, FieldType.INT, store, index, minValue, maxValue);
+ }
+
+ /**
+ * Define a long-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @param minValue the minimum value for this field, or null if there is no minimum value
+ * @param maxValue the maximum value for this field, or null if there is no maximum value
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder longField( Name name,
+ Field.Store store,
+ Field.Index index,
+ Long minValue,
+ Long maxValue ) {
+ if (minValue == null) minValue = Long.MIN_VALUE;
+ if (maxValue == null) maxValue = Long.MAX_VALUE;
+ return numericField(name, FieldType.LONG, store, index, minValue, maxValue);
+ }
+
+ /**
+ * Define a date-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @param minValue the minimum value for this field, or null if there is no minimum value
+ * @param maxValue the maximum value for this field, or null if there is no maximum value
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder dateField( Name name,
+ Field.Store store,
+ Field.Index index,
+ Long minValue,
+ Long maxValue ) {
+ if (minValue == null) minValue = 0L;
+ if (maxValue == null) maxValue = Long.MAX_VALUE;
+ return numericField(name, FieldType.DATE, store, index, minValue, maxValue);
+ }
+
+ /**
+ * Define a float-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @param minValue the minimum value for this field, or null if there is no minimum value
+ * @param maxValue the maximum value for this field, or null if there is no maximum value
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder floatField( Name name,
+ Field.Store store,
+ Field.Index index,
+ Float minValue,
+ Float maxValue ) {
+ if (minValue == null) minValue = Float.MIN_VALUE;
+ if (maxValue == null) maxValue = Float.MAX_VALUE;
+ return numericField(name, FieldType.FLOAT, store, index, minValue, maxValue);
+ }
+
+ /**
+ * Define a double-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @param minValue the minimum value for this field, or null if there is no minimum value
+ * @param maxValue the maximum value for this field, or null if there is no maximum value
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder doubleField( Name name,
+ Field.Store store,
+ Field.Index index,
+ Double minValue,
+ Double maxValue ) {
+ if (minValue == null) minValue = Double.MIN_VALUE;
+ if (maxValue == null) maxValue = Double.MAX_VALUE;
+ return numericField(name, FieldType.DOUBLE, store, index, minValue, maxValue);
+ }
+
+ /**
+ * Define a integer-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @param minValue the minimum value for this field, or null if there is no minimum value
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder integerField( Name name,
+ Field.Store store,
+ Field.Index index,
+ Integer minValue ) {
+ return integerField(name, store, index, minValue, null);
+ }
+
+ /**
+ * Define a long-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @param minValue the minimum value for this field, or null if there is no minimum value
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder longField( Name name,
+ Field.Store store,
+ Field.Index index,
+ Long minValue ) {
+ return longField(name, store, index, minValue, null);
+ }
+
+ /**
+ * Define a date-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @param minValue the minimum value for this field, or null if there is no minimum value
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder dateField( Name name,
+ Field.Store store,
+ Field.Index index,
+ Long minValue ) {
+ return dateField(name, store, index, minValue, null);
+ }
+
+ /**
+ * Define a float-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @param minValue the minimum value for this field, or null if there is no minimum value
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder floatField( Name name,
+ Field.Store store,
+ Field.Index index,
+ Float minValue ) {
+ return floatField(name, store, index, minValue, null);
+ }
+
+ /**
+ * Define a double-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @param minValue the minimum value for this field, or null if there is no minimum value
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder doubleField( Name name,
+ Field.Store store,
+ Field.Index index,
+ Double minValue ) {
+ return doubleField(name, store, index, minValue, null);
+ }
+
+ /**
+ * Define a integer-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder integerField( Name name,
+ Field.Store store,
+ Field.Index index ) {
+ return integerField(name, store, index, null, null);
+ }
+
+ /**
+ * Define a long-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder longField( Name name,
+ Field.Store store,
+ Field.Index index ) {
+ return longField(name, store, index, null, null);
+ }
+
+ /**
+ * Define a date-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder dateField( Name name,
+ Field.Store store,
+ Field.Index index ) {
+ return dateField(name, store, index, null, null);
+ }
+
+ /**
+ * Define a float-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder floatField( Name name,
+ Field.Store store,
+ Field.Index index ) {
+ return floatField(name, store, index, null, null);
+ }
+
+ /**
+ * Define a double-based field in the indexes. This method will overwrite any existing definition in this builder.
+ *
+ * @param name the name of the field
+ * @param store the storage setting, or null if the field should be {@link Store#YES stored}
+ * @param index the index setting, or null if the field should be indexed but {@link Index#NOT_ANALYZED not analyzed}
+ * @return this builder for convenience and method chaining; never null
+ */
+ public Builder doubleField( Name name,
+ Field.Store store,
+ Field.Index index ) {
+ return doubleField(name, store, index, null, null);
+ }
+
+ /**
+ * Build the indexing rules.
+ *
+ * @return the immutable indexing rules.
+ */
+ public IndexRules build() {
+ return new IndexRules(Collections.unmodifiableMap(new HashMap<Name, Rule>(rulesByName)), defaultRule);
+ }
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/IndexRules.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneConfiguration.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfiguration.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneConfiguration.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneConfiguration.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,60 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import net.jcip.annotations.ThreadSafe;
+import org.apache.lucene.store.Directory;
+import org.jboss.dna.graph.search.SearchEngineException;
+
+/**
+ * Interface used to obtain the Lucene {@link Directory} instance that should be used for a workspace given the name of the
+ * workspace. There are several implementations (see {@link LuceneConfigurations}), but custom implementations can always be
+ * used.
+ */
+@ThreadSafe
+public interface LuceneConfiguration {
+ /**
+ * Get the {@link Directory} that should be used for the workspace with the supplied name.
+ *
+ * @param workspaceName the workspace name
+ * @param indexName the name of the index to be created
+ * @return the directory; never null
+ * @throws IllegalArgumentException if the workspace name is null
+ * @throws SearchEngineException if there is a problem creating the directory
+ */
+ Directory getDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException;
+
+ /**
+ * Destroy the {@link Directory} that is used for the workspace with the supplied name.
+ *
+ * @param workspaceName the workspace name
+ * @param indexName the name of the index to be created
+ * @return true if the directory existed and was destroyed, or false if the directory didn't exist
+ * @throws IllegalArgumentException if the workspace name is null
+ * @throws SearchEngineException if there is a problem creating the directory
+ */
+ boolean destroyDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException;
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneConfiguration.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneConfigurations.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfigurations.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneConfigurations.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneConfigurations.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,431 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.concurrent.ConcurrentHashMap;
+import net.jcip.annotations.Immutable;
+import net.jcip.annotations.ThreadSafe;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.RAMDirectory;
+import org.jboss.dna.common.i18n.I18n;
+import org.jboss.dna.common.text.NoOpEncoder;
+import org.jboss.dna.common.text.TextEncoder;
+import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.common.util.FileUtil;
+import org.jboss.dna.common.util.HashCode;
+import org.jboss.dna.graph.search.SearchEngineException;
+
+/**
+ * A family of {@link LuceneConfiguration} implementations.
+ */
+public class LuceneConfigurations {
+
+ /**
+ * Return a new {@link LuceneConfiguration} that creates in-memory directories.
+ *
+ * @return the new directory configuration; never null
+ */
+ public static final LuceneConfiguration inMemory() {
+ return new RamDirectoryFactory();
+ }
+
+ /**
+ * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
+ * workspace names.
+ *
+ * @param parent the parent folder
+ * @return the new directory configuration; never null
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ public static final LuceneConfiguration using( File parent ) {
+ CheckArg.isNotNull(parent, "parent");
+ return new FileSystemDirectoryFromNameFactory(parent);
+ }
+
+ /**
+ * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
+ * workspace names.
+ *
+ * @param parent the parent folder
+ * @param lockFactory the lock factory; may be null
+ * @return the new directory configuration; never null
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ public static final LuceneConfiguration using( File parent,
+ LockFactory lockFactory ) {
+ CheckArg.isNotNull(parent, "parent");
+ return new FileSystemDirectoryFromNameFactory(parent, lockFactory);
+ }
+
+ /**
+ * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
+ * workspace names.
+ *
+ * @param parent the parent folder
+ * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
+ * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
+ * @return the new directory configuration; never null
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ public static final LuceneConfiguration using( File parent,
+ TextEncoder workspaceNameEncoder,
+ TextEncoder indexNameEncoder ) {
+ CheckArg.isNotNull(parent, "parent");
+ return new FileSystemDirectoryFromNameFactory(parent, workspaceNameEncoder, indexNameEncoder);
+ }
+
+ /**
+ * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
+ * workspace names.
+ *
+ * @param parent the parent folder
+ * @param lockFactory the lock factory; may be null
+ * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
+ * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
+ * @return the new directory configuration; never null
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ public static final LuceneConfiguration using( File parent,
+ LockFactory lockFactory,
+ TextEncoder workspaceNameEncoder,
+ TextEncoder indexNameEncoder ) {
+ CheckArg.isNotNull(parent, "parent");
+ return new FileSystemDirectoryFromNameFactory(parent, lockFactory, workspaceNameEncoder, indexNameEncoder);
+ }
+
+ /**
+ * A {@link LuceneConfiguration} implementation that creates {@link Directory} instances of the supplied type for each
+ * workspace and pools the results, ensuring that the same {@link Directory} instance is always returned for the same
+ * workspace name.
+ *
+ * @param <DirectoryType> the concrete type of the directory
+ */
+ @ThreadSafe
+ protected static abstract class PoolingDirectoryFactory<DirectoryType extends Directory> implements LuceneConfiguration {
+ private final ConcurrentHashMap<IndexId, DirectoryType> directories = new ConcurrentHashMap<IndexId, DirectoryType>();
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see LuceneConfiguration#getDirectory(java.lang.String, java.lang.String)
+ */
+ public Directory getDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ IndexId id = new IndexId(workspaceName, indexName);
+ DirectoryType result = directories.get(id);
+ if (result == null) {
+ DirectoryType newDirectory = createDirectory(workspaceName, indexName);
+ result = directories.putIfAbsent(id, newDirectory);
+ if (result == null) result = newDirectory;
+ }
+ return result;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see LuceneConfiguration#destroyDirectory(java.lang.String, java.lang.String)
+ */
+ public boolean destroyDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ IndexId id = new IndexId(workspaceName, indexName);
+ DirectoryType result = directories.remove(id);
+ return result != null ? doDestroy(result) : false;
+ }
+
+ /**
+ * Method implemented by subclasses to create a new Directory implementation.
+ *
+ * @param workspaceName the name of the workspace for which the {@link Directory} is to be created; never null
+ * @param indexName the name of the index to be created
+ * @return the new directory; may not be null
+ * @throws SearchEngineException if there is a problem creating the directory
+ */
+ protected abstract DirectoryType createDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException;
+
+ protected abstract boolean doDestroy( DirectoryType directory ) throws SearchEngineException;
+ }
+
+ /**
+ * A {@link LuceneConfiguration} implementation that creates {@link RAMDirectory} instances for each workspace and index name.
+ * Each factory instance maintains a pool of {@link RAMDirectory} instances, ensuring that the same {@link RAMDirectory} is
+ * always returned for the same workspace name.
+ */
+ @ThreadSafe
+ public static class RamDirectoryFactory extends PoolingDirectoryFactory<RAMDirectory> {
+ protected RamDirectoryFactory() {
+ }
+
+ @Override
+ protected RAMDirectory createDirectory( String workspaceName,
+ String indexName ) {
+ return new RAMDirectory();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see LuceneConfigurations.PoolingDirectoryFactory#doDestroy(org.apache.lucene.store.Directory)
+ */
+ @Override
+ protected boolean doDestroy( RAMDirectory directory ) throws SearchEngineException {
+ return directory != null;
+ }
+ }
+
+ /**
+ * A {@link LuceneConfiguration} implementation that creates {@link FSDirectory} instances for each workspace and index name.
+ * This factory is created with a parent directory under which all workspace and index directories are created.
+ * <p>
+ * This uses the supplied encoders to translate the workspace and index names into valid directory names. By default, no
+ * encoding is performed, meaning that the workspace and index names are used explicitly as directory names. This default
+ * behavior, then, means that not all values of workspace names or index names will work. If you want to be sure that all
+ * workspace names work, supply an encoder for the workspace names. (Index names are currently such that they will always be
+ * valid directory names, but you can always supply an encoder if you'd like.)
+ * </p>
+ */
+ public static class FileSystemDirectoryFromNameFactory extends PoolingDirectoryFactory<FSDirectory> {
+ private final File parentFile;
+ private final LockFactory lockFactory;
+ private final TextEncoder workspaceNameEncoder;
+ private final TextEncoder indexNameEncoder;
+
+ /**
+ * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
+ * allowable workspace names.
+ *
+ * @param parent the parent folder
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ protected FileSystemDirectoryFromNameFactory( File parent ) {
+ this(parent, null, null, null);
+ }
+
+ /**
+ * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
+ * allowable workspace names.
+ *
+ * @param parent the parent folder
+ * @param lockFactory the lock factory; may be null
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ protected FileSystemDirectoryFromNameFactory( File parent,
+ LockFactory lockFactory ) {
+ this(parent, lockFactory, null, null);
+ }
+
+ /**
+ * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
+ * allowable workspace names.
+ *
+ * @param parent the parent folder
+ * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
+ * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ protected FileSystemDirectoryFromNameFactory( File parent,
+ TextEncoder workspaceNameEncoder,
+ TextEncoder indexNameEncoder ) {
+ this(parent, null, workspaceNameEncoder, indexNameEncoder);
+ }
+
+ /**
+ * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
+ * allowable workspace names.
+ *
+ * @param parent the parent folder
+ * @param lockFactory the lock factory; may be null
+ * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
+ * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ protected FileSystemDirectoryFromNameFactory( File parent,
+ LockFactory lockFactory,
+ TextEncoder workspaceNameEncoder,
+ TextEncoder indexNameEncoder ) {
+ CheckArg.isNotNull(parent, "parent");
+ this.parentFile = parent;
+ this.lockFactory = lockFactory;
+ this.workspaceNameEncoder = workspaceNameEncoder != null ? workspaceNameEncoder : new NoOpEncoder();
+ this.indexNameEncoder = indexNameEncoder != null ? indexNameEncoder : new NoOpEncoder();
+ }
+
+ @Override
+ protected FSDirectory createDirectory( String workspaceName,
+ String indexName ) {
+ File workspaceFile = new File(parentFile, workspaceNameEncoder.encode(workspaceName));
+ if (!workspaceFile.exists()) {
+ workspaceFile.mkdirs();
+ } else {
+ if (!workspaceFile.isDirectory()) {
+ I18n msg = LuceneI18n.locationForIndexesIsNotDirectory;
+ throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
+ }
+ if (!workspaceFile.canRead()) {
+ I18n msg = LuceneI18n.locationForIndexesCannotBeRead;
+ throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
+ }
+ if (!workspaceFile.canWrite()) {
+ I18n msg = LuceneI18n.locationForIndexesCannotBeWritten;
+ throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
+ }
+ }
+ File directory = workspaceFile;
+ if (indexName != null) {
+ File indexFile = new File(workspaceFile, indexNameEncoder.encode(indexName));
+ if (!indexFile.exists()) {
+ indexFile.mkdirs();
+ } else {
+ if (!indexFile.isDirectory()) {
+ I18n msg = LuceneI18n.locationForIndexesIsNotDirectory;
+ throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
+ }
+ if (!indexFile.canRead()) {
+ I18n msg = LuceneI18n.locationForIndexesCannotBeRead;
+ throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
+ }
+ if (!indexFile.canWrite()) {
+ I18n msg = LuceneI18n.locationForIndexesCannotBeWritten;
+ throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
+ }
+ }
+ directory = indexFile;
+ }
+ try {
+ return create(directory, lockFactory);
+ } catch (IOException e) {
+ throw new SearchEngineException(e);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see LuceneConfigurations.PoolingDirectoryFactory#doDestroy(org.apache.lucene.store.Directory)
+ */
+ @Override
+ protected boolean doDestroy( FSDirectory directory ) throws SearchEngineException {
+ File file = directory.getFile();
+ if (file.exists()) {
+ return FileUtil.delete(file);
+ }
+ return false;
+ }
+
+ /**
+ * Override this method to define which subclass of {@link FSDirectory} should be created.
+ *
+ * @param directory the file system directory; never null
+ * @param lockFactory the lock factory; may be null
+ * @return the {@link FSDirectory} instance
+ * @throws IOException if there is a problem creating the FSDirectory instance
+ */
+ protected FSDirectory create( File directory,
+ LockFactory lockFactory ) throws IOException {
+ return FSDirectory.open(directory, lockFactory);
+ }
+ }
+
+ @Immutable
+ protected static final class IndexId {
+ private final String workspaceName;
+ private final String indexName;
+ private final int hc;
+
+ protected IndexId( String workspaceName,
+ String indexName ) {
+ assert workspaceName != null;
+ this.workspaceName = workspaceName;
+ this.indexName = indexName;
+ this.hc = HashCode.compute(this.workspaceName, this.indexName);
+ }
+
+ /**
+ * @return indexName
+ */
+ public String getIndexName() {
+ return indexName;
+ }
+
+ /**
+ * @return workspaceName
+ */
+ public String getWorkspaceName() {
+ return workspaceName;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#hashCode()
+ */
+ @Override
+ public int hashCode() {
+ return hc;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#equals(java.lang.Object)
+ */
+ @Override
+ public boolean equals( Object obj ) {
+ if (obj == this) return true;
+ if (obj instanceof IndexId) {
+ IndexId that = (IndexId)obj;
+ if (this.hashCode() != that.hashCode()) return false;
+ if (!this.workspaceName.equals(that.workspaceName)) return false;
+ if (!this.indexName.equals(that.indexName)) return false;
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return indexName != null ? workspaceName + "/" + this.indexName : this.workspaceName;
+ }
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneConfigurations.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneException.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneException.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneException.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneException.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,74 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+
+package org.jboss.dna.search.lucene;
+
+/**
+ * A {@link RuntimeException runtime exception} representing a problem operating against Lucene.
+ */
+public class LuceneException extends RuntimeException {
+
+ /**
+ */
+ private static final long serialVersionUID = 8281373010920861138L;
+
+ /**
+ * Construct a system failure exception with no message.
+ */
+ public LuceneException() {
+ }
+
+ /**
+ * Construct a system failure exception with a single message.
+ *
+ * @param message the message describing the failure
+ */
+ public LuceneException( String message ) {
+ super(message);
+
+ }
+
+ /**
+ * Construct a system failure exception with another exception that is the cause of the failure.
+ *
+ * @param cause the original cause of the failure
+ */
+ public LuceneException( Throwable cause ) {
+ super(cause);
+
+ }
+
+ /**
+ * Construct a system failure exception with a single message and another exception that is the cause of the failure.
+ *
+ * @param message the message describing the failure
+ * @param cause the original cause of the failure
+ */
+ public LuceneException( String message,
+ Throwable cause ) {
+ super(message, cause);
+
+ }
+
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneException.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneI18n.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneI18n.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneI18n.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,58 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import java.util.Locale;
+import java.util.Set;
+import org.jboss.dna.common.CommonI18n;
+import org.jboss.dna.common.i18n.I18n;
+
+public class LuceneI18n {
+
+ public static I18n locationForIndexesIsNotDirectory;
+ public static I18n locationForIndexesCannotBeRead;
+ public static I18n locationForIndexesCannotBeWritten;
+ public static I18n errorWhileCommittingIndexChanges;
+ public static I18n errorWhileRollingBackIndexChanges;
+
+ static {
+ try {
+ I18n.initialize(LuceneI18n.class);
+ } catch (final Exception err) {
+ System.err.println(err);
+ }
+ }
+
+ public static Set<Locale> getLocalizationProblemLocales() {
+ return I18n.getLocalizationProblemLocales(CommonI18n.class);
+ }
+
+ public static Set<String> getLocalizationProblems() {
+ return I18n.getLocalizationProblems(CommonI18n.class);
+ }
+
+ public static Set<String> getLocalizationProblems( Locale locale ) {
+ return I18n.getLocalizationProblems(CommonI18n.class, locale);
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneI18n.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchEngine.java
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchEngine.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchEngine.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,207 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import java.io.File;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.util.Version;
+import org.jboss.dna.common.text.TextEncoder;
+import org.jboss.dna.common.text.UrlEncoder;
+import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.graph.DnaLexicon;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.JcrLexicon;
+import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
+import org.jboss.dna.graph.observe.Observer;
+import org.jboss.dna.graph.property.basic.JodaDateTime;
+import org.jboss.dna.graph.search.SearchEngine;
+import org.jboss.dna.graph.search.SearchEngineException;
+
+/**
+ * A {@link SearchEngine} implementation that relies upon two separate indexes to manage the node properties and the node
+ * structure (path and children). Using two indexes is more efficient when the node content and structure are updated
+ * independently. For example, the structure of the nodes changes whenever same-name-sibling indexes are changed, when sibling
+ * nodes are deleted, or when nodes are moved around; in all of these cases, the properties of the nodes do not change.
+ */
+public class LuceneSearchEngine extends AbstractLuceneSearchEngine<LuceneSearchWorkspace, LuceneSearchProcessor> {
+
+ /**
+ * The default set of {@link IndexRules} used by {@link LuceneSearchEngine} instances when no rules are provided. These rules
+ * default to index and analyze all properties, and to index the {@link DnaLexicon#UUID dna:uuid} and {@link JcrLexicon#UUID
+ * jcr:uuid} properties to be indexed and stored only (not analyzed and not included in full-text search. The rules also treat
+ * {@link JcrLexicon#CREATED jcr:created} and {@link JcrLexicon#LAST_MODIFIED jcr:lastModified} properties as dates.
+ */
+ public static final IndexRules DEFAULT_RULES;
+
+ static {
+ // We know that the earliest creation/modified dates cannot be before November 1 2009,
+ // which is before this feature was implemented
+ long earliestChangeDate = new JodaDateTime(2009, 11, 01, 0, 0, 0, 0).getMilliseconds();
+
+ IndexRules.Builder builder = IndexRules.createBuilder();
+ // Configure the default behavior ...
+ builder.defaultTo(Field.Store.YES, Field.Index.ANALYZED);
+ // Configure the UUID properties to be just indexed and stored (not analyzed, not included in full-text) ...
+ builder.stringField(JcrLexicon.UUID, Field.Store.YES, Field.Index.NOT_ANALYZED);
+ builder.stringField(DnaLexicon.UUID, Field.Store.YES, Field.Index.NOT_ANALYZED);
+ // Configure the properties that we'll treat as dates ...
+ builder.dateField(JcrLexicon.CREATED, Field.Store.YES, Field.Index.NOT_ANALYZED, earliestChangeDate);
+ builder.dateField(JcrLexicon.LAST_MODIFIED, Field.Store.YES, Field.Index.NOT_ANALYZED, earliestChangeDate);
+ DEFAULT_RULES = builder.build();
+ }
+
+ protected static final TextEncoder DEFAULT_ENCODER = new UrlEncoder();
+
+ /** A thread-local DateFormat instance that is thread-safe, since a new instance is created for each thread. */
+ protected ThreadLocal<DateFormat> dateFormatter = new ThreadLocal<DateFormat>() {
+ @Override
+ protected DateFormat initialValue() {
+ return new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss");
+ }
+ };
+
+ private final LuceneConfiguration configuration;
+ private final IndexRules rules;
+ private final Analyzer analyzer;
+
+ /**
+ * Create a new instance of a {@link SearchEngine} that uses Lucene and a two-index design, and that stores the indexes using
+ * the supplied {@link LuceneConfiguration}.
+ *
+ * @param sourceName the name of the source that this engine will search over
+ * @param connectionFactory the factory for making connections to the source
+ * @param verifyWorkspaceInSource true if the workspaces are to be verified using the source, or false if this engine is used
+ * in a way such that all workspaces are known to exist
+ * @param configuration the configuration of the Lucene indexes
+ * @param rules the index rule, or null if the default index rules should be used
+ * @param analyzer the analyzer, or null if the default analyzer should be used
+ * @throws IllegalArgumentException if any of the source name, connection factory, or configuration are null
+ */
+ public LuceneSearchEngine( String sourceName,
+ RepositoryConnectionFactory connectionFactory,
+ boolean verifyWorkspaceInSource,
+ LuceneConfiguration configuration,
+ IndexRules rules,
+ Analyzer analyzer ) {
+ super(sourceName, connectionFactory, verifyWorkspaceInSource);
+ CheckArg.isNotNull(configuration, "configuration");
+ this.configuration = configuration;
+ this.analyzer = analyzer != null ? analyzer : new StandardAnalyzer(Version.LUCENE_30);
+ this.rules = rules != null ? rules : DEFAULT_RULES;
+ }
+
+ /**
+ * Create a new instance of a {@link SearchEngine} that uses Lucene and a two-index design, and that stores the indexes in the
+ * supplied directory.
+ * <p>
+ * This is identical to the following:
+ *
+ * <pre>
+ * TextEncoder encoder = new UrlEncoder();
+ * LuceneConfiguration config = LuceneConfigurations.using(indexStorageDirectory, null, encoder, encoder);
+ * new LuceneSearchEngine(sourceName, connectionFactory, verifyWorkspaceInSource, config, rules, analyzer);
+ * </pre>
+ *
+ * where the {@link UrlEncoder} is used to ensure that workspace names and index names can be turned into file system
+ * directory names.
+ * </p>
+ *
+ * @param sourceName the name of the source that this engine will search over
+ * @param connectionFactory the factory for making connections to the source
+ * @param verifyWorkspaceInSource true if the workspaces are to be verified using the source, or false if this engine is used
+ * in a way such that all workspaces are known to exist
+ * @param indexStorageDirectory the file system directory in which the indexes are to be kept
+ * @param rules the index rule, or null if the default index rules should be used
+ * @param analyzer the analyzer, or null if the default analyzer should be used
+ * @throws IllegalArgumentException if any of the source name, connection factory, or directory are null
+ */
+ public LuceneSearchEngine( String sourceName,
+ RepositoryConnectionFactory connectionFactory,
+ boolean verifyWorkspaceInSource,
+ File indexStorageDirectory,
+ IndexRules rules,
+ Analyzer analyzer ) {
+ this(sourceName, connectionFactory, verifyWorkspaceInSource, LuceneConfigurations.using(indexStorageDirectory,
+ null,
+ DEFAULT_ENCODER,
+ DEFAULT_ENCODER), null, null);
+ }
+
+ /**
+ * Create a new instance of a {@link SearchEngine} that uses Lucene and a two-index design, and that stores the Lucene indexes
+ * in memory.
+ * <p>
+ * This is identical to the following:
+ *
+ * <pre>
+ * new LuceneSearchEngine(sourceName, connectionFactory, verifyWorkspaceInSource, LuceneConfigurations.inMemory(), rules, analyzer);
+ * </pre>
+ *
+ * </p>
+ *
+ * @param sourceName the name of the source that this engine will search over
+ * @param connectionFactory the factory for making connections to the source
+ * @param verifyWorkspaceInSource true if the workspaces are to be verified using the source, or false if this engine is used
+ * in a way such that all workspaces are known to exist
+ * @param rules the index rule, or null if the default index rules should be used
+ * @param analyzer the analyzer, or null if the default analyzer should be used
+ * @throws IllegalArgumentException if any of the source name or connection factory are null
+ */
+ public LuceneSearchEngine( String sourceName,
+ RepositoryConnectionFactory connectionFactory,
+ boolean verifyWorkspaceInSource,
+ IndexRules rules,
+ Analyzer analyzer ) {
+ this(sourceName, connectionFactory, verifyWorkspaceInSource, LuceneConfigurations.inMemory(), null, null);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngine#createProcessor(org.jboss.dna.graph.ExecutionContext,
+ * org.jboss.dna.graph.search.SearchEngine.Workspaces, org.jboss.dna.graph.observe.Observer,boolean)
+ */
+ @Override
+ protected LuceneSearchProcessor createProcessor( ExecutionContext context,
+ Workspaces<LuceneSearchWorkspace> workspaces,
+ Observer observer,
+ boolean readOnly ) {
+ return new LuceneSearchProcessor(getSourceName(), context, workspaces, observer, null, readOnly);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngine#createWorkspace(org.jboss.dna.graph.ExecutionContext, java.lang.String)
+ */
+ @Override
+ protected LuceneSearchWorkspace createWorkspace( ExecutionContext context,
+ String workspaceName ) throws SearchEngineException {
+ return new LuceneSearchWorkspace(workspaceName, configuration, rules, analyzer, false);
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchEngine.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchProcessor.java
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchProcessor.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchProcessor.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,415 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import net.jcip.annotations.NotThreadSafe;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.jboss.dna.common.text.SecureHashTextEncoder;
+import org.jboss.dna.common.text.TextEncoder;
+import org.jboss.dna.common.util.Logger;
+import org.jboss.dna.common.util.SecureHash.Algorithm;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.observe.Observer;
+import org.jboss.dna.graph.property.DateTime;
+import org.jboss.dna.graph.property.NamespaceRegistry;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.Property;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.QueryResults.Statistics;
+import org.jboss.dna.graph.query.process.FullTextSearchResultColumns;
+import org.jboss.dna.graph.request.CloneBranchRequest;
+import org.jboss.dna.graph.request.CloneWorkspaceRequest;
+import org.jboss.dna.graph.request.CopyBranchRequest;
+import org.jboss.dna.graph.request.CreateNodeRequest;
+import org.jboss.dna.graph.request.CreateWorkspaceRequest;
+import org.jboss.dna.graph.request.DeleteBranchRequest;
+import org.jboss.dna.graph.request.DestroyWorkspaceRequest;
+import org.jboss.dna.graph.request.FullTextSearchRequest;
+import org.jboss.dna.graph.request.GetWorkspacesRequest;
+import org.jboss.dna.graph.request.LockBranchRequest;
+import org.jboss.dna.graph.request.MoveBranchRequest;
+import org.jboss.dna.graph.request.ReadAllChildrenRequest;
+import org.jboss.dna.graph.request.ReadAllPropertiesRequest;
+import org.jboss.dna.graph.request.UnlockBranchRequest;
+import org.jboss.dna.graph.request.UpdatePropertiesRequest;
+import org.jboss.dna.graph.request.VerifyWorkspaceRequest;
+import org.jboss.dna.graph.search.SearchEngineProcessor;
+import org.jboss.dna.graph.search.SearchEngine.Workspaces;
+import org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.AbstractLuceneProcessor;
+import org.jboss.dna.search.lucene.LuceneSearchWorkspace.PathIndex;
+
+/**
+ * Abstract {@link SearchEngineProcessor} implementation for the {@link LuceneSearchEngine}.
+ */
+@NotThreadSafe
+public class LuceneSearchProcessor extends AbstractLuceneProcessor<LuceneSearchWorkspace, LuceneSearchSession> {
+
+ protected static final TextEncoder NAMESPACE_ENCODER = new SecureHashTextEncoder(Algorithm.SHA_1, 10);
+
+ protected static ExecutionContext contextWithEncodedNamespaces( ExecutionContext context ) {
+ NamespaceRegistry encodingRegistry = new EncodingNamespaceRegistry(context.getNamespaceRegistry(), NAMESPACE_ENCODER);
+ ExecutionContext encodingContext = context.with(encodingRegistry);
+ return encodingContext;
+ }
+
+ protected static final Columns FULL_TEXT_RESULT_COLUMNS = new FullTextSearchResultColumns();
+
+ protected LuceneSearchProcessor( String sourceName,
+ ExecutionContext context,
+ Workspaces<LuceneSearchWorkspace> workspaces,
+ Observer observer,
+ DateTime now,
+ boolean readOnly ) {
+ super(sourceName, contextWithEncodedNamespaces(context), workspaces, observer, now, readOnly);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.AbstractLuceneProcessor#createSessionFor(org.jboss.dna.graph.search.SearchEngineWorkspace)
+ */
+ @Override
+ protected LuceneSearchSession createSessionFor( LuceneSearchWorkspace workspace ) {
+ return new LuceneSearchSession(workspace, this);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.AbstractLuceneProcessor#fullTextFieldName(java.lang.String)
+ */
+ @Override
+ protected String fullTextFieldName( String propertyName ) {
+ return LuceneSearchWorkspace.FULL_TEXT_PREFIX + propertyName;
+ }
+
+ protected void addIdProperties( Location location,
+ Document doc ) {
+ if (!location.hasIdProperties()) return;
+ for (Property idProp : location.getIdProperties()) {
+ String fieldValue = serializeProperty(idProp);
+ doc.add(new Field(PathIndex.LOCATION_ID_PROPERTIES, fieldValue, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.FullTextSearchRequest)
+ */
+ @Override
+ public void process( FullTextSearchRequest request ) {
+ LuceneSearchSession session = getSessionFor(request, request.workspace(), false);
+ if (session == null) return;
+ try {
+ List<Object[]> results = new ArrayList<Object[]>();
+ Statistics statistics = session.search(request.expression(), results, request.maxResults(), request.offset());
+ request.setResults(FULL_TEXT_RESULT_COLUMNS, results, statistics);
+ } catch (ParseException e) {
+ request.setError(e);
+ } catch (IOException e) {
+ request.setError(e);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.VerifyWorkspaceRequest)
+ */
+ @Override
+ public void process( VerifyWorkspaceRequest request ) {
+ LuceneSearchSession session = getSessionFor(request, request.workspaceName(), false);
+ if (session == null) return;
+ try {
+ request.setActualWorkspaceName(session.getWorkspaceName());
+ request.setActualRootLocation(session.getLocationFor(pathFactory.createRootPath()));
+ } catch (IOException e) {
+ request.setError(e);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.GetWorkspacesRequest)
+ */
+ @Override
+ public void process( GetWorkspacesRequest request ) {
+ Set<String> names = new HashSet<String>();
+ for (LuceneSearchWorkspace workspace : workspaces.getWorkspaces()) {
+ names.add(workspace.getWorkspaceName());
+ }
+ request.setAvailableWorkspaceNames(names);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.CreateNodeRequest)
+ */
+ @Override
+ public void process( CreateNodeRequest request ) {
+ // Get the session to the workspace ...
+ LuceneSearchSession session = getSessionFor(request, request.inWorkspace(), true);
+ if (session == null) return;
+
+ // We make a big assumption here: the CreateNodeRequests created by the SearchEngineProcessor have the
+ // actual locations set ...
+ Location location = request.getActualLocationOfNode();
+ assert location != null;
+
+ Logger logger = Logger.getLogger(getClass());
+ if (logger.isTraceEnabled()) {
+ logger.trace("indexing {0} in workspace \"{1}\"",
+ location.getString(getExecutionContext().getNamespaceRegistry()),
+ request.inWorkspace());
+ }
+
+ try {
+ // Create a separate document for the path, which makes it easier to handle moves since the path can
+ // be changed without changing any other content fields ...
+ Document doc = new Document();
+ String idStr = createPathDocument(location, doc);
+ session.getPathsWriter().addDocument(doc);
+
+ // Now set the content ...
+ session.setOrReplaceProperties(idStr, request.properties());
+ } catch (IOException e) {
+ request.setError(e);
+ }
+ }
+
+ protected String createPathDocument( Location location,
+ Document doc ) {
+ UUID uuid = location.getUuid();
+ if (uuid == null) uuid = UUID.randomUUID();
+ Path path = location.getPath();
+ String idStr = stringFactory.create(uuid);
+ String pathStr = pathAsString(path);
+ String nameStr = path.isRoot() ? "" : stringFactory.create(path.getLastSegment().getName());
+ String localNameStr = path.isRoot() ? "" : path.getLastSegment().getName().getLocalName();
+ int sns = path.isRoot() ? 1 : path.getLastSegment().getIndex();
+
+ // Create a separate document for the path, which makes it easier to handle moves since the path can
+ // be changed without changing any other content fields ...
+ doc.add(new Field(PathIndex.PATH, pathStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field(PathIndex.NODE_NAME, nameStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field(PathIndex.LOCAL_NAME, localNameStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new NumericField(PathIndex.SNS_INDEX, Field.Store.YES, true).setIntValue(sns));
+ doc.add(new Field(PathIndex.ID, idStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new NumericField(PathIndex.DEPTH, Field.Store.YES, true).setIntValue(path.size()));
+ addIdProperties(location, doc);
+ return idStr;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.UpdatePropertiesRequest)
+ */
+ @Override
+ public void process( UpdatePropertiesRequest request ) {
+ // Get the session to the workspace ...
+ LuceneSearchSession session = getSessionFor(request, request.inWorkspace(), true);
+ if (session == null) return;
+
+ Location location = request.getActualLocationOfNode();
+ assert location != null;
+ UUID uuid = location.getUuid();
+
+ try {
+ // If we're updating the root properties, make sure there is a document in the path index ...
+ String idStr = null;
+ if (location.getPath() != null && location.getPath().isRoot()) {
+ Document doc = new Document();
+ idStr = createPathDocument(location, doc);
+ session.getPathsWriter().addDocument(doc);
+ } else if (uuid != null) {
+ idStr = stringFactory.create(uuid);
+ } else {
+ // Need to look up the id string ...
+ idStr = session.getIdFor(location.getPath());
+ }
+
+ // We make a big assumption here: the UpdatePropertiesRequest created by the SearchEngineProcessor have the
+ // actual locations set ...
+ session.setOrReplaceProperties(idStr, request.properties().values());
+ } catch (IOException e) {
+ request.setError(e);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.DeleteBranchRequest)
+ */
+ @Override
+ public void process( DeleteBranchRequest request ) {
+ LuceneSearchSession session = getSessionFor(request, request.inWorkspace());
+ if (session == null) return;
+
+ Path path = request.at().getPath();
+ assert !readOnly;
+ try {
+ Query query = null;
+ if (path.isRoot()) {
+ query = new MatchAllDocsQuery();
+ } else {
+ // Create a query to find all the nodes at or below the specified path ...
+ Set<String> ids = session.getIdsForDescendantsOf(path, true);
+ query = session.findAllNodesWithIds(ids);
+ }
+ // Now delete the documents from each index using this query, which we can reuse ...
+ session.getPathsWriter().deleteDocuments(query);
+ session.getContentWriter().deleteDocuments(query);
+ } catch (FileNotFoundException e) {
+ // There are no index files yet, so nothing to delete ...
+ } catch (IOException e) {
+ request.setError(e);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.DestroyWorkspaceRequest)
+ */
+ @Override
+ public void process( DestroyWorkspaceRequest request ) {
+ LuceneSearchWorkspace workspace = getWorkspace(request, request.workspaceName(), false);
+ if (workspace == null) return;
+ try {
+ LuceneSearchSession session = getSessionFor(request, workspace.getWorkspaceName());
+ request.setActualRootLocation(session.getLocationFor(pathFactory.createRootPath()));
+ workspace.destroy(getExecutionContext());
+ } catch (IOException e) {
+ request.setError(e);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.LockBranchRequest)
+ */
+ @Override
+ public void process( LockBranchRequest request ) {
+ request.setActualLocation(request.at());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.UnlockBranchRequest)
+ */
+ @Override
+ public void process( UnlockBranchRequest request ) {
+ request.setActualLocation(request.at());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.CloneBranchRequest)
+ */
+ @Override
+ public void process( CloneBranchRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.CloneWorkspaceRequest)
+ */
+ @Override
+ public void process( CloneWorkspaceRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.CopyBranchRequest)
+ */
+ @Override
+ public void process( CopyBranchRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.CreateWorkspaceRequest)
+ */
+ @Override
+ public void process( CreateWorkspaceRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.MoveBranchRequest)
+ */
+ @Override
+ public void process( MoveBranchRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.ReadAllChildrenRequest)
+ */
+ @Override
+ public void process( ReadAllChildrenRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.ReadAllPropertiesRequest)
+ */
+ @Override
+ public void process( ReadAllPropertiesRequest request ) {
+ super.processUnknownRequest(request);
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchProcessor.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchSession.java
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchSession.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchSession.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,1550 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import net.jcip.annotations.NotThreadSafe;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriter.MaxFieldLength;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Version;
+import org.jboss.dna.graph.DnaLexicon;
+import org.jboss.dna.graph.JcrLexicon;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.property.DateTime;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.Property;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.QueryResults.Statistics;
+import org.jboss.dna.graph.query.model.Length;
+import org.jboss.dna.graph.query.model.NodeDepth;
+import org.jboss.dna.graph.query.model.NodeLocalName;
+import org.jboss.dna.graph.query.model.NodeName;
+import org.jboss.dna.graph.query.model.NodePath;
+import org.jboss.dna.graph.query.model.Operator;
+import org.jboss.dna.graph.query.model.PropertyValue;
+import org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.TupleCollector;
+import org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.WorkspaceSession;
+import org.jboss.dna.search.lucene.IndexRules.FieldType;
+import org.jboss.dna.search.lucene.IndexRules.NumericRule;
+import org.jboss.dna.search.lucene.IndexRules.Rule;
+import org.jboss.dna.search.lucene.LuceneSearchWorkspace.ContentIndex;
+import org.jboss.dna.search.lucene.LuceneSearchWorkspace.PathIndex;
+import org.jboss.dna.search.lucene.query.CompareLengthQuery;
+import org.jboss.dna.search.lucene.query.CompareNameQuery;
+import org.jboss.dna.search.lucene.query.ComparePathQuery;
+import org.jboss.dna.search.lucene.query.CompareStringQuery;
+import org.jboss.dna.search.lucene.query.IdsQuery;
+import org.jboss.dna.search.lucene.query.MatchNoneQuery;
+import org.jboss.dna.search.lucene.query.NotQuery;
+
+/**
+ * The {@link WorkspaceSession} implementation for the {@link LuceneSearchEngine}.
+ */
+@NotThreadSafe
+public class LuceneSearchSession implements WorkspaceSession {
+
+ /**
+ * Obtain an immutable {@link FieldSelector} instance that accesses the UUID field.
+ */
+ protected static final FieldSelector UUID_FIELD_SELECTOR = new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ return PathIndex.ID.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
+ }
+ };
+
+ protected static final int MIN_DEPTH = 0;
+ protected static final int MAX_DEPTH = 100;
+ protected static final int MIN_SNS_INDEX = 1;
+ protected static final int MAX_SNS_INDEX = 1000; // assume there won't be more than 1000 same-name-siblings
+
+ private final LuceneSearchWorkspace workspace;
+ protected final LuceneSearchProcessor processor;
+ private final Directory pathsIndexDirectory;
+ private final Directory contentIndexDirectory;
+ private IndexReader pathsReader;
+ private IndexWriter pathsWriter;
+ private IndexSearcher pathsSearcher;
+ private IndexReader contentReader;
+ private IndexWriter contentWriter;
+ private IndexSearcher contentSearcher;
+
+ protected LuceneSearchSession( LuceneSearchWorkspace workspace,
+ LuceneSearchProcessor processor ) {
+ assert workspace != null;
+ assert processor != null;
+ this.workspace = workspace;
+ this.pathsIndexDirectory = workspace.pathDirectory;
+ this.contentIndexDirectory = workspace.contentDirectory;
+ this.processor = processor;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.WorkspaceSession#getWorkspaceName()
+ */
+ public String getWorkspaceName() {
+ return workspace.getWorkspaceName();
+ }
+
+ /**
+ * @return workspace
+ */
+ public LuceneSearchWorkspace getWorkspace() {
+ return workspace;
+ }
+
+ protected IndexReader getPathsReader() throws IOException {
+ if (pathsReader == null) {
+ pathsReader = IndexReader.open(pathsIndexDirectory, processor.readOnly);
+ }
+ return pathsReader;
+ }
+
+ protected IndexReader getContentReader() throws IOException {
+ if (contentReader == null) {
+ contentReader = IndexReader.open(contentIndexDirectory, processor.readOnly);
+ }
+ return contentReader;
+ }
+
+ protected IndexWriter getPathsWriter() throws IOException {
+ assert !processor.readOnly;
+ if (pathsWriter == null) {
+ // Don't overwrite, but create if missing ...
+ pathsWriter = new IndexWriter(pathsIndexDirectory, workspace.analyzer, MaxFieldLength.UNLIMITED);
+ }
+ return pathsWriter;
+ }
+
+ protected IndexWriter getContentWriter() throws IOException {
+ assert !processor.readOnly;
+ if (contentWriter == null) {
+ // Don't overwrite, but create if missing ...
+ contentWriter = new IndexWriter(contentIndexDirectory, workspace.analyzer, MaxFieldLength.UNLIMITED);
+ }
+ return contentWriter;
+ }
+
+ protected IndexSearcher getPathsSearcher() throws IOException {
+ if (pathsSearcher == null) {
+ pathsSearcher = new IndexSearcher(getPathsReader());
+ }
+ return pathsSearcher;
+ }
+
+ public IndexSearcher getContentSearcher() throws IOException {
+ if (contentSearcher == null) {
+ contentSearcher = new IndexSearcher(getContentReader());
+ }
+ return contentSearcher;
+ }
+
+ public boolean hasWriters() {
+ return pathsWriter != null || contentWriter != null;
+ }
+
+ public boolean optimize() throws IOException {
+ getContentWriter().optimize();
+ getPathsWriter().optimize();
+ return true;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.WorkspaceSession#commit()
+ */
+ public void commit() {
+ IOException ioError = null;
+ RuntimeException runtimeError = null;
+ if (pathsReader != null) {
+ try {
+ pathsReader.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ pathsReader = null;
+ }
+ }
+ if (contentReader != null) {
+ try {
+ contentReader.close();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ contentReader = null;
+ }
+ }
+ if (pathsWriter != null) {
+ // try {
+ // pathsWriter.commit();
+ // } catch (IOException e) {
+ // if (ioError == null) ioError = e;
+ // } catch (RuntimeException e) {
+ // if (runtimeError == null) runtimeError = e;
+ // } finally {
+ try {
+ pathsWriter.close();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ pathsWriter = null;
+ }
+ // }
+ }
+ if (contentWriter != null) {
+ // try {
+ // contentWriter.commit();
+ // } catch (IOException e) {
+ // if (ioError == null) ioError = e;
+ // } catch (RuntimeException e) {
+ // if (runtimeError == null) runtimeError = e;
+ // } finally {
+ try {
+ contentWriter.close();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ contentWriter = null;
+ }
+ // }
+ }
+ if (ioError != null) {
+ String msg = LuceneI18n.errorWhileCommittingIndexChanges.text(workspace.getWorkspaceName(),
+ processor.getSourceName(),
+ ioError.getMessage());
+ throw new LuceneException(msg, ioError);
+ }
+ if (runtimeError != null) throw runtimeError;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.WorkspaceSession#rollback()
+ */
+ public void rollback() {
+ IOException ioError = null;
+ RuntimeException runtimeError = null;
+ if (pathsReader != null) {
+ try {
+ pathsReader.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ pathsReader = null;
+ }
+ }
+ if (contentReader != null) {
+ try {
+ contentReader.close();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ contentReader = null;
+ }
+ }
+ if (pathsWriter != null) {
+ try {
+ pathsWriter.rollback();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ try {
+ pathsWriter.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ pathsWriter = null;
+ }
+ }
+ }
+ if (contentWriter != null) {
+ try {
+ contentWriter.rollback();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ try {
+ contentWriter.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ contentWriter = null;
+ }
+ }
+ }
+ if (ioError != null) {
+ String msg = LuceneI18n.errorWhileRollingBackIndexChanges.text(workspace.getWorkspaceName(),
+ processor.getSourceName(),
+ ioError.getMessage());
+ throw new LuceneException(msg, ioError);
+ }
+ if (runtimeError != null) throw runtimeError;
+ }
+
+ protected Statistics search( String fullTextSearchExpression,
+ List<Object[]> results,
+ int maxRows,
+ int offset ) throws ParseException, IOException {
+ // Parse the full-text search and search against the 'fts' field ...
+ long planningNanos = System.nanoTime();
+ QueryParser parser = new QueryParser(Version.LUCENE_29, ContentIndex.FULL_TEXT, workspace.analyzer);
+ Query query = parser.parse(fullTextSearchExpression);
+ planningNanos = System.nanoTime() - planningNanos;
+ TopDocs docs = getContentSearcher().search(query, maxRows + offset);
+
+ // Collect the results ...
+ IndexReader contentReader = getContentReader();
+ IndexReader pathReader = getPathsReader();
+ IndexSearcher pathSearcher = getPathsSearcher();
+ ScoreDoc[] scoreDocs = docs.scoreDocs;
+ int numberOfResults = scoreDocs.length;
+ if (numberOfResults > offset) {
+ // There are enough results to satisfy the offset ...
+ for (int i = offset, num = scoreDocs.length; i != num; ++i) {
+ ScoreDoc result = scoreDocs[i];
+ int docId = result.doc;
+ // Find the UUID of the node (this UUID might be artificial, so we have to find the path) ...
+ Document doc = contentReader.document(docId, UUID_FIELD_SELECTOR);
+ String id = doc.get(ContentIndex.ID);
+ Document pathDoc = getPathDocument(id, pathReader, pathSearcher);
+ Location location = readLocation(pathDoc);
+ if (location == null) {
+ // No path record found ...
+ continue;
+ }
+ // Now add the location ...
+ results.add(new Object[] {location, result.score});
+ }
+ }
+ long executionNanos = System.nanoTime() - planningNanos;
+ return new Statistics(planningNanos, 0L, 0L, executionNanos);
+ }
+
+ protected Location readLocation( Document doc ) {
+ // Read the path ...
+ String pathString = doc.get(PathIndex.PATH);
+ Path path = processor.pathFactory.create(pathString);
+ // Look for the Location's ID properties ...
+ String[] idProps = doc.getValues(PathIndex.LOCATION_ID_PROPERTIES);
+ if (idProps.length == 0) {
+ return Location.create(path);
+ }
+ if (idProps.length == 1) {
+ Property idProp = processor.deserializeProperty(idProps[0]);
+ if (idProp == null) return Location.create(path);
+ if (idProp.isSingle() && (idProp.getName().equals(JcrLexicon.UUID) || idProp.getName().equals(DnaLexicon.UUID))) {
+ return Location.create(path, (UUID)idProp.getFirstValue()); // know that deserialize returns UUID value
+ }
+ return Location.create(path, idProp);
+ }
+ List<Property> properties = new LinkedList<Property>();
+ for (String idProp : idProps) {
+ Property prop = processor.deserializeProperty(idProp);
+ if (prop != null) properties.add(prop);
+ }
+ return properties.isEmpty() ? Location.create(path) : Location.create(path, properties);
+ }
+
+ protected void setOrReplaceProperties( String idString,
+ Iterable<Property> properties ) throws IOException {
+ // Create the document for the content (properties) ...
+ Document doc = new Document();
+ doc.add(new Field(ContentIndex.ID, idString, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ String stringValue = null;
+ StringBuilder fullTextSearchValue = null;
+ for (Property property : properties) {
+ Name name = property.getName();
+ Rule rule = workspace.rules.getRule(name);
+ if (rule.isSkipped()) continue;
+ String nameString = processor.stringFactory.create(name);
+ FieldType type = rule.getType();
+ if (type == FieldType.DATE) {
+ boolean index = rule.getIndexOption() != Field.Index.NO;
+ for (Object value : property) {
+ if (value == null) continue;
+ // Add a separate field for each property value ...
+ DateTime dateValue = processor.dateFactory.create(value);
+ long longValue = dateValue.getMillisecondsInUtc();
+ doc.add(new NumericField(nameString, rule.getStoreOption(), index).setLongValue(longValue));
+ }
+ continue;
+ }
+ if (type == FieldType.INT) {
+ ValueFactory<Long> longFactory = processor.valueFactories.getLongFactory();
+ boolean index = rule.getIndexOption() != Field.Index.NO;
+ for (Object value : property) {
+ if (value == null) continue;
+ // Add a separate field for each property value ...
+ int intValue = longFactory.create(value).intValue();
+ doc.add(new NumericField(nameString, rule.getStoreOption(), index).setIntValue(intValue));
+ }
+ continue;
+ }
+ if (type == FieldType.DOUBLE) {
+ ValueFactory<Double> doubleFactory = processor.valueFactories.getDoubleFactory();
+ boolean index = rule.getIndexOption() != Field.Index.NO;
+ for (Object value : property) {
+ if (value == null) continue;
+ // Add a separate field for each property value ...
+ double dValue = doubleFactory.create(value);
+ doc.add(new NumericField(nameString, rule.getStoreOption(), index).setDoubleValue(dValue));
+ }
+ continue;
+ }
+ if (type == FieldType.FLOAT) {
+ ValueFactory<Double> doubleFactory = processor.valueFactories.getDoubleFactory();
+ boolean index = rule.getIndexOption() != Field.Index.NO;
+ for (Object value : property) {
+ if (value == null) continue;
+ // Add a separate field for each property value ...
+ float fValue = doubleFactory.create(value).floatValue();
+ doc.add(new NumericField(nameString, rule.getStoreOption(), index).setFloatValue(fValue));
+ }
+ continue;
+ }
+ if (type == FieldType.BINARY) {
+ // TODO : add to full-text search ...
+ continue;
+ }
+ assert type == FieldType.STRING;
+ for (Object value : property) {
+ if (value == null) continue;
+ stringValue = processor.stringFactory.create(value);
+ // Add a separate field for each property value ...
+ doc.add(new Field(nameString, stringValue, rule.getStoreOption(), rule.getIndexOption()));
+
+ if (rule.getIndexOption() != Field.Index.NO) {
+ // This field is to be full-text searchable ...
+ if (fullTextSearchValue == null) {
+ fullTextSearchValue = new StringBuilder();
+ } else {
+ fullTextSearchValue.append(' ');
+ }
+ fullTextSearchValue.append(stringValue);
+
+ // Also create a full-text-searchable field ...
+ String fullTextNameString = processor.fullTextFieldName(nameString);
+ doc.add(new Field(fullTextNameString, stringValue, Store.NO, Index.ANALYZED));
+ }
+ }
+ }
+ // Add the full-text-search field ...
+ if (fullTextSearchValue != null && fullTextSearchValue.length() != 0) {
+ doc.add(new Field(ContentIndex.FULL_TEXT, fullTextSearchValue.toString(), Field.Store.NO, Field.Index.ANALYZED));
+ }
+ getContentWriter().addDocument(doc);
+ }
+
+ protected Document getPathDocument( String id,
+ IndexReader pathReader,
+ IndexSearcher pathSearcher ) throws IOException {
+ // Find the path for this node (is there a better way to do this than one search per ID?) ...
+ TopDocs pathDocs = pathSearcher.search(new TermQuery(new Term(PathIndex.ID, id)), 1);
+ if (pathDocs.scoreDocs.length < 1) {
+ // No path record found ...
+ return null;
+ }
+ return pathReader.document(pathDocs.scoreDocs[0].doc);
+ }
+
+ /**
+ * Get the set of IDs for the children of the node at the given path.
+ *
+ * @param parentPath the path to the parent node; may not be null
+ * @return the doc IDs of the child nodes; never null but possibly empty
+ * @throws IOException if there is an error accessing the indexes
+ */
+ protected Set<String> getIdsForChildrenOf( Path parentPath ) throws IOException {
+ // Find the path of the parent ...
+ String stringifiedPath = processor.pathAsString(parentPath);
+ // Append a '/' to the parent path, so we'll only get decendants ...
+ stringifiedPath = stringifiedPath + '/';
+
+ // Create a query to find all the nodes below the parent path ...
+ Query query = new PrefixQuery(new Term(PathIndex.PATH, stringifiedPath));
+ // Include only the children ...
+ int childrenDepth = parentPath.size() + 1;
+ Query depthQuery = NumericRangeQuery.newIntRange(PathIndex.DEPTH, childrenDepth, childrenDepth, true, true);
+ // And combine ...
+ BooleanQuery combinedQuery = new BooleanQuery();
+ combinedQuery.add(query, Occur.MUST);
+ combinedQuery.add(depthQuery, Occur.MUST);
+ query = combinedQuery;
+
+ // Now execute and collect the IDs ...
+ IdCollector idCollector = new IdCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, idCollector);
+ return idCollector.getIds();
+ }
+
+ /**
+ * Get the set of IDs for the nodes that are descendants of the node at the given path.
+ *
+ * @param parentPath the path to the parent node; may not be null and <i>may not be the root node</i>
+ * @param includeParent true if the parent node should be included in the results, or false if only the descendants should be
+ * included
+ * @return the IDs of the nodes; never null but possibly empty
+ * @throws IOException if there is an error accessing the indexes
+ */
+ protected Set<String> getIdsForDescendantsOf( Path parentPath,
+ boolean includeParent ) throws IOException {
+ assert !parentPath.isRoot();
+
+ // Find the path of the parent ...
+ String stringifiedPath = processor.pathAsString(parentPath);
+ if (!includeParent) {
+ // Append a '/' to the parent path, and we'll only get decendants ...
+ stringifiedPath = stringifiedPath + '/';
+ }
+
+ // Create a prefix query ...
+ Query query = new PrefixQuery(new Term(PathIndex.PATH, stringifiedPath));
+
+ // Now execute and collect the IDs ...
+ IdCollector idCollector = new IdCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, idCollector);
+ return idCollector.getIds();
+ }
+
+ /**
+ * Get the set containing the single ID for the node at the given path.
+ *
+ * @param path the path to the node; may not be null
+ * @return the ID of the supplied node; or null if the node cannot be found
+ * @throws IOException if there is an error accessing the indexes
+ */
+ protected String getIdFor( Path path ) throws IOException {
+ // Create a query to find all the nodes below the parent path ...
+ IndexSearcher searcher = getPathsSearcher();
+ String stringifiedPath = processor.pathAsString(path);
+ TermQuery query = new TermQuery(new Term(PathIndex.PATH, stringifiedPath));
+
+ // Now execute and collect the UUIDs ...
+ TopDocs topDocs = searcher.search(query, 1);
+ if (topDocs.totalHits == 0) return null;
+ Document pathDoc = getPathsReader().document(topDocs.scoreDocs[0].doc);
+ String idString = pathDoc.get(PathIndex.ID);
+ assert idString != null;
+ return idString;
+ }
+
+ protected Location getLocationFor( Path path ) throws IOException {
+ // Create a query to find all the nodes below the parent path ...
+ IndexSearcher searcher = getPathsSearcher();
+ String stringifiedPath = processor.pathAsString(path);
+ TermQuery query = new TermQuery(new Term(PathIndex.PATH, stringifiedPath));
+
+ // Now execute and collect the UUIDs ...
+ TopDocs topDocs = searcher.search(query, 1);
+ if (topDocs.totalHits == 0) return null;
+ Document pathDoc = getPathsReader().document(topDocs.scoreDocs[0].doc);
+ return readLocation(pathDoc);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.WorkspaceSession#createTupleCollector(org.jboss.dna.graph.query.QueryResults.Columns)
+ */
+ public TupleCollector createTupleCollector( Columns columns ) {
+ return new DualIndexTupleCollector(this, columns);
+ }
+
+ public Query findAllNodesWithIds( Set<String> ids ) {
+ if (ids.isEmpty()) {
+ // There are no children, so return a null query ...
+ return new MatchNoneQuery();
+ }
+ if (ids.size() == 1) {
+ String id = ids.iterator().next();
+ if (id == null) return new MatchNoneQuery();
+ return new TermQuery(new Term(ContentIndex.ID, id));
+ }
+ if (ids.size() < 50) {
+ // Create an OR boolean query for all the UUIDs, since this is probably more efficient ...
+ BooleanQuery query = new BooleanQuery();
+ for (String id : ids) {
+ Query uuidQuery = new TermQuery(new Term(ContentIndex.ID, id));
+ query.add(uuidQuery, Occur.SHOULD);
+ }
+ return query;
+ }
+ // Return a query that will always find all of the UUIDs ...
+ return new IdsQuery(ContentIndex.ID, ids);
+ }
+
+ public Query findAllNodesBelow( Path ancestorPath ) throws IOException {
+ if (ancestorPath.isRoot()) {
+ return new MatchAllDocsQuery();
+ }
+ Set<String> ids = getIdsForDescendantsOf(ancestorPath, false);
+ return findAllNodesWithIds(ids);
+ }
+
+ /**
+ * Return a query that can be used to find all of the documents that represent nodes that are children of the node at the
+ * supplied path.
+ *
+ * @param parentPath the path of the parent node.
+ * @return the query; never null
+ * @throws IOException if there is an error finding the UUIDs of the child nodes
+ */
+ public Query findChildNodes( Path parentPath ) throws IOException {
+ if (parentPath.isRoot()) {
+ return new MatchAllDocsQuery();
+ }
+ Set<String> childIds = getIdsForChildrenOf(parentPath);
+ return findAllNodesWithIds(childIds);
+ }
+
+ /**
+ * Create a query that can be used to find the one document (or node) that exists at the exact path supplied. This method
+ * first queries the {@link PathIndex path index} to find the ID of the node at the supplied path, and then returns a query
+ * that matches the ID.
+ *
+ * @param path the path of the node
+ * @return the query; never null
+ * @throws IOException if there is an error finding the ID for the supplied path
+ */
+ public Query findNodeAt( Path path ) throws IOException {
+ String id = getIdFor(path);
+ if (id == null) return null;
+ return new TermQuery(new Term(ContentIndex.ID, id));
+ }
+
+ public Query findNodesLike( String fieldName,
+ String likeExpression,
+ boolean caseSensitive ) {
+ ValueFactories factories = processor.valueFactories;
+ return CompareStringQuery.createQueryForNodesWithFieldLike(likeExpression, fieldName, factories, caseSensitive);
+ }
+
+ public Query findNodesWith( Length propertyLength,
+ Operator operator,
+ Object value ) {
+ assert propertyLength != null;
+ assert value != null;
+ PropertyValue propertyValue = propertyLength.getPropertyValue();
+ String field = processor.stringFactory.create(propertyValue.getPropertyName());
+ ValueFactories factories = processor.valueFactories;
+ int length = factories.getLongFactory().create(value).intValue();
+ switch (operator) {
+ case EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldEqualTo(length, field, factories);
+ case NOT_EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldNotEqualTo(length, field, factories);
+ case GREATER_THAN:
+ return CompareLengthQuery.createQueryForNodesWithFieldGreaterThan(length, field, factories);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(length, field, factories);
+ case LESS_THAN:
+ return CompareLengthQuery.createQueryForNodesWithFieldLessThan(length, field, factories);
+ case LESS_THAN_OR_EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldLessThanOrEqualTo(length, field, factories);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ break;
+ }
+ return null;
+ }
+
+ @SuppressWarnings( "unchecked" )
+ public Query findNodesWith( PropertyValue propertyValue,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) {
+ ValueFactory<String> stringFactory = processor.stringFactory;
+ String field = stringFactory.create(propertyValue.getPropertyName());
+ Name fieldName = processor.nameFactory.create(propertyValue.getPropertyName());
+ ValueFactories factories = processor.valueFactories;
+ IndexRules.Rule rule = workspace.rules.getRule(fieldName);
+ if (rule == null || rule.isSkipped()) return new MatchNoneQuery();
+ FieldType type = rule.getType();
+ switch (type) {
+ case STRING:
+ String stringValue = stringFactory.create(value);
+ if (value instanceof Path) {
+ stringValue = processor.pathAsString((Path)value);
+ }
+ if (!caseSensitive) stringValue = stringValue.toLowerCase();
+ switch (operator) {
+ case EQUAL_TO:
+ return CompareStringQuery.createQueryForNodesWithFieldEqualTo(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case NOT_EQUAL_TO:
+ Query query = CompareStringQuery.createQueryForNodesWithFieldEqualTo(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return CompareStringQuery.createQueryForNodesWithFieldGreaterThan(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return CompareStringQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case LESS_THAN:
+ return CompareStringQuery.createQueryForNodesWithFieldLessThan(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case LESS_THAN_OR_EQUAL_TO:
+ return CompareStringQuery.createQueryForNodesWithFieldLessThanOrEqualTo(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case LIKE:
+ return findNodesLike(field, stringValue, caseSensitive);
+ }
+ break;
+ case DATE:
+ NumericRule<Long> longRule = (NumericRule<Long>)rule;
+ long date = factories.getLongFactory().create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, date, date, true, true);
+ case NOT_EQUAL_TO:
+ Query query = NumericRangeQuery.newLongRange(field, date, date, true, true);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return NumericRangeQuery.newLongRange(field, date, longRule.getMaximum(), false, true);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, date, longRule.getMaximum(), true, true);
+ case LESS_THAN:
+ return NumericRangeQuery.newLongRange(field, longRule.getMinimum(), date, true, false);
+ case LESS_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, longRule.getMinimum(), date, true, true);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case LONG:
+ longRule = (NumericRule<Long>)rule;
+ long longValue = factories.getLongFactory().create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, longValue, longValue, true, true);
+ case NOT_EQUAL_TO:
+ Query query = NumericRangeQuery.newLongRange(field, longValue, longValue, true, true);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return NumericRangeQuery.newLongRange(field, longValue, longRule.getMaximum(), false, true);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, longValue, longRule.getMaximum(), true, true);
+ case LESS_THAN:
+ return NumericRangeQuery.newLongRange(field, longRule.getMinimum(), longValue, true, false);
+ case LESS_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, longRule.getMinimum(), longValue, true, true);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case INT:
+ NumericRule<Integer> intRule = (NumericRule<Integer>)rule;
+ int intValue = factories.getLongFactory().create(value).intValue();
+ switch (operator) {
+ case EQUAL_TO:
+ return NumericRangeQuery.newIntRange(field, intValue, intValue, true, true);
+ case NOT_EQUAL_TO:
+ Query query = NumericRangeQuery.newIntRange(field, intValue, intValue, true, true);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return NumericRangeQuery.newIntRange(field, intValue, intRule.getMaximum(), false, true);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newIntRange(field, intValue, intRule.getMaximum(), true, true);
+ case LESS_THAN:
+ return NumericRangeQuery.newIntRange(field, intRule.getMinimum(), intValue, true, false);
+ case LESS_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newIntRange(field, intRule.getMinimum(), intValue, true, true);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case DOUBLE:
+ NumericRule<Double> dRule = (NumericRule<Double>)rule;
+ double doubleValue = factories.getDoubleFactory().create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return NumericRangeQuery.newDoubleRange(field, doubleValue, doubleValue, true, true);
+ case NOT_EQUAL_TO:
+ Query query = NumericRangeQuery.newDoubleRange(field, doubleValue, doubleValue, true, true);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return NumericRangeQuery.newDoubleRange(field, doubleValue, dRule.getMaximum(), false, true);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newDoubleRange(field, doubleValue, dRule.getMaximum(), true, true);
+ case LESS_THAN:
+ return NumericRangeQuery.newDoubleRange(field, dRule.getMinimum(), doubleValue, true, false);
+ case LESS_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newDoubleRange(field, dRule.getMinimum(), doubleValue, true, true);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case FLOAT:
+ NumericRule<Float> fRule = (NumericRule<Float>)rule;
+ float floatValue = factories.getDoubleFactory().create(value).floatValue();
+ switch (operator) {
+ case EQUAL_TO:
+ return NumericRangeQuery.newFloatRange(field, floatValue, floatValue, true, true);
+ case NOT_EQUAL_TO:
+ Query query = NumericRangeQuery.newFloatRange(field, floatValue, floatValue, true, true);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return NumericRangeQuery.newFloatRange(field, floatValue, fRule.getMaximum(), false, true);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newFloatRange(field, floatValue, fRule.getMaximum(), true, true);
+ case LESS_THAN:
+ return NumericRangeQuery.newFloatRange(field, fRule.getMinimum(), floatValue, true, false);
+ case LESS_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newFloatRange(field, fRule.getMinimum(), floatValue, true, true);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case BOOLEAN:
+ boolean booleanValue = factories.getBooleanFactory().create(value);
+ stringValue = stringFactory.create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return new TermQuery(new Term(field, stringValue));
+ case NOT_EQUAL_TO:
+ return new TermQuery(new Term(field, stringFactory.create(!booleanValue)));
+ case GREATER_THAN:
+ if (!booleanValue) {
+ return new TermQuery(new Term(field, stringFactory.create(true)));
+ }
+ // Can't be greater than 'true', per JCR spec
+ return new MatchNoneQuery();
+ case GREATER_THAN_OR_EQUAL_TO:
+ return new TermQuery(new Term(field, stringFactory.create(true)));
+ case LESS_THAN:
+ if (booleanValue) {
+ return new TermQuery(new Term(field, stringFactory.create(false)));
+ }
+ // Can't be less than 'false', per JCR spec
+ return new MatchNoneQuery();
+ case LESS_THAN_OR_EQUAL_TO:
+ return new TermQuery(new Term(field, stringFactory.create(false)));
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case BINARY:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ return null;
+ }
+
+ public Query findNodesWithNumericRange( PropertyValue propertyValue,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) {
+ String field = processor.stringFactory.create(propertyValue.getPropertyName());
+ return findNodesWithNumericRange(field, lowerValue, upperValue, includesLower, includesUpper);
+ }
+
+ public Query findNodesWithNumericRange( NodeDepth depth,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) {
+ return findNodesWithNumericRange(PathIndex.DEPTH, lowerValue, upperValue, includesLower, includesUpper);
+ }
+
+ protected Query findNodesWithNumericRange( String field,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) {
+ Name fieldName = processor.nameFactory.create(field);
+ IndexRules.Rule rule = workspace.rules.getRule(fieldName);
+ if (rule == null || rule.isSkipped()) return new MatchNoneQuery();
+ FieldType type = rule.getType();
+ ValueFactories factories = processor.valueFactories;
+ switch (type) {
+ case DATE:
+ long lowerDate = factories.getLongFactory().create(lowerValue);
+ long upperDate = factories.getLongFactory().create(upperValue);
+ return NumericRangeQuery.newLongRange(field, lowerDate, upperDate, includesLower, includesUpper);
+ case LONG:
+ long lowerLong = factories.getLongFactory().create(lowerValue);
+ long upperLong = factories.getLongFactory().create(upperValue);
+ return NumericRangeQuery.newLongRange(field, lowerLong, upperLong, includesLower, includesUpper);
+ case DOUBLE:
+ double lowerDouble = factories.getDoubleFactory().create(lowerValue);
+ double upperDouble = factories.getDoubleFactory().create(upperValue);
+ return NumericRangeQuery.newDoubleRange(field, lowerDouble, upperDouble, includesLower, includesUpper);
+ case FLOAT:
+ float lowerFloat = factories.getDoubleFactory().create(lowerValue).floatValue();
+ float upperFloat = factories.getDoubleFactory().create(upperValue).floatValue();
+ return NumericRangeQuery.newFloatRange(field, lowerFloat, upperFloat, includesLower, includesUpper);
+ case INT:
+ int lowerInt = factories.getLongFactory().create(lowerValue).intValue();
+ int upperInt = factories.getLongFactory().create(upperValue).intValue();
+ return NumericRangeQuery.newIntRange(field, lowerInt, upperInt, includesLower, includesUpper);
+ case BOOLEAN:
+ lowerInt = factories.getBooleanFactory().create(lowerValue).booleanValue() ? 1 : 0;
+ upperInt = factories.getBooleanFactory().create(upperValue).booleanValue() ? 1 : 0;
+ return NumericRangeQuery.newIntRange(field, lowerInt, upperInt, includesLower, includesUpper);
+ case STRING:
+ case BINARY:
+ assert false;
+ }
+ return new MatchNoneQuery();
+ }
+
+ public Query findNodesWith( NodePath nodePath,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException {
+ if (!caseSensitive) value = processor.stringFactory.create(value).toLowerCase();
+ Path pathValue = operator != Operator.LIKE ? processor.pathFactory.create(value) : null;
+ Query query = null;
+ switch (operator) {
+ case EQUAL_TO:
+ return findNodeAt(pathValue);
+ case NOT_EQUAL_TO:
+ return new NotQuery(findNodeAt(pathValue));
+ case LIKE:
+ String likeExpression = processor.stringFactory.create(value);
+ query = findNodesLike(PathIndex.PATH, likeExpression, caseSensitive);
+ break;
+ case GREATER_THAN:
+ query = ComparePathQuery.createQueryForNodesWithPathGreaterThan(pathValue,
+ PathIndex.PATH,
+ processor.valueFactories,
+ caseSensitive);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = ComparePathQuery.createQueryForNodesWithPathGreaterThanOrEqualTo(pathValue,
+ PathIndex.PATH,
+ processor.valueFactories,
+ caseSensitive);
+ break;
+ case LESS_THAN:
+ query = ComparePathQuery.createQueryForNodesWithPathLessThan(pathValue,
+ PathIndex.PATH,
+ processor.valueFactories,
+ caseSensitive);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = ComparePathQuery.createQueryForNodesWithPathLessThanOrEqualTo(pathValue,
+ PathIndex.PATH,
+ processor.valueFactories,
+ caseSensitive);
+ break;
+ }
+ // Now execute and collect the IDs ...
+ IdCollector idCollector = new IdCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, idCollector);
+ return findAllNodesWithIds(idCollector.getIds());
+ }
+
+ public Query findNodesWith( NodeName nodeName,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException {
+ ValueFactories factories = processor.valueFactories;
+ String stringValue = processor.stringFactory.create(value);
+ if (!caseSensitive) stringValue = stringValue.toLowerCase();
+ Path.Segment segment = operator != Operator.LIKE ? processor.pathFactory.createSegment(stringValue) : null;
+ int snsIndex = operator != Operator.LIKE ? segment.getIndex() : 0;
+ Query query = null;
+ switch (operator) {
+ case EQUAL_TO:
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(new TermQuery(new Term(PathIndex.NODE_NAME, stringValue)), Occur.MUST);
+ booleanQuery.add(NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, snsIndex, snsIndex, true, false), Occur.MUST);
+ return booleanQuery;
+ case NOT_EQUAL_TO:
+ booleanQuery = new BooleanQuery();
+ booleanQuery.add(new TermQuery(new Term(PathIndex.NODE_NAME, stringValue)), Occur.MUST);
+ booleanQuery.add(NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, snsIndex, snsIndex, true, false), Occur.MUST);
+ return new NotQuery(booleanQuery);
+ case GREATER_THAN:
+ query = CompareNameQuery.createQueryForNodesWithNameGreaterThan(segment,
+ PathIndex.NODE_NAME,
+ PathIndex.SNS_INDEX,
+ factories,
+ caseSensitive);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = CompareNameQuery.createQueryForNodesWithNameGreaterThanOrEqualTo(segment,
+ PathIndex.NODE_NAME,
+ PathIndex.SNS_INDEX,
+ factories,
+ caseSensitive);
+ break;
+ case LESS_THAN:
+ query = CompareNameQuery.createQueryForNodesWithNameLessThan(segment,
+ PathIndex.NODE_NAME,
+ PathIndex.SNS_INDEX,
+ factories,
+ caseSensitive);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = CompareNameQuery.createQueryForNodesWithNameLessThanOrEqualTo(segment,
+ PathIndex.NODE_NAME,
+ PathIndex.SNS_INDEX,
+ factories,
+ caseSensitive);
+ break;
+ case LIKE:
+ // See whether the like expression has brackets ...
+ String likeExpression = stringValue;
+ int openBracketIndex = likeExpression.indexOf('[');
+ if (openBracketIndex != -1) {
+ String localNameExpression = likeExpression.substring(0, openBracketIndex);
+ String snsIndexExpression = likeExpression.substring(openBracketIndex);
+ Query localNameQuery = CompareStringQuery.createQueryForNodesWithFieldLike(localNameExpression,
+ PathIndex.NODE_NAME,
+ factories,
+ caseSensitive);
+ Query snsQuery = createSnsIndexQuery(snsIndexExpression);
+ if (localNameQuery == null) {
+ if (snsQuery == null) {
+ query = new MatchNoneQuery();
+ } else {
+ // There is just an SNS part ...
+ query = snsQuery;
+ }
+ } else {
+ // There is a local name part ...
+ if (snsQuery == null) {
+ query = localNameQuery;
+ } else {
+ // There is both a local name part and a SNS part ...
+ booleanQuery = new BooleanQuery();
+ booleanQuery.add(localNameQuery, Occur.MUST);
+ booleanQuery.add(snsQuery, Occur.MUST);
+ query = booleanQuery;
+ }
+ }
+ } else {
+ // There is no SNS expression ...
+ query = CompareStringQuery.createQueryForNodesWithFieldLike(likeExpression,
+ PathIndex.NODE_NAME,
+ factories,
+ caseSensitive);
+ }
+ assert query != null;
+ break;
+ }
+
+ // Now execute and collect the IDs ...
+ IdCollector idCollector = new IdCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, idCollector);
+ return findAllNodesWithIds(idCollector.getIds());
+ }
+
+ public Query findNodesWith( NodeLocalName nodeName,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException {
+ String nameValue = processor.stringFactory.create(value);
+ Query query = null;
+ switch (operator) {
+ case LIKE:
+ String likeExpression = processor.stringFactory.create(value);
+ query = findNodesLike(PathIndex.LOCAL_NAME, likeExpression, caseSensitive);
+ break;
+ case EQUAL_TO:
+ query = CompareStringQuery.createQueryForNodesWithFieldEqualTo(nameValue,
+ PathIndex.LOCAL_NAME,
+ processor.valueFactories,
+ caseSensitive);
+ break;
+ case NOT_EQUAL_TO:
+ query = CompareStringQuery.createQueryForNodesWithFieldEqualTo(nameValue,
+ PathIndex.LOCAL_NAME,
+ processor.valueFactories,
+ caseSensitive);
+ query = new NotQuery(query);
+ break;
+ case GREATER_THAN:
+ query = CompareStringQuery.createQueryForNodesWithFieldGreaterThan(nameValue,
+ PathIndex.LOCAL_NAME,
+ processor.valueFactories,
+ caseSensitive);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = CompareStringQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(nameValue,
+ PathIndex.LOCAL_NAME,
+ processor.valueFactories,
+ caseSensitive);
+ break;
+ case LESS_THAN:
+ query = CompareStringQuery.createQueryForNodesWithFieldLessThan(nameValue,
+ PathIndex.LOCAL_NAME,
+ processor.valueFactories,
+ caseSensitive);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = CompareStringQuery.createQueryForNodesWithFieldLessThanOrEqualTo(nameValue,
+ PathIndex.LOCAL_NAME,
+ processor.valueFactories,
+ caseSensitive);
+ break;
+ }
+
+ // Now execute and collect the IDs ...
+ IdCollector idCollector = new IdCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, idCollector);
+ return findAllNodesWithIds(idCollector.getIds());
+ }
+
+ public Query findNodesWith( NodeDepth depthConstraint,
+ Operator operator,
+ Object value ) throws IOException {
+ int depth = processor.valueFactories.getLongFactory().create(value).intValue();
+ Query query = null;
+ switch (operator) {
+ case EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, depth, true, true);
+ break;
+ case NOT_EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, depth, true, true);
+ query = new NotQuery(query);
+ break;
+ case GREATER_THAN:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, MAX_DEPTH, false, true);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, MAX_DEPTH, true, true);
+ break;
+ case LESS_THAN:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, MIN_DEPTH, depth, true, false);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, MIN_DEPTH, depth, true, true);
+ break;
+ case LIKE:
+ // This is not allowed ...
+ return null;
+ }
+
+ // Now execute and collect the IDs ...
+ IdCollector idCollector = new IdCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, idCollector);
+ return findAllNodesWithIds(idCollector.getIds());
+ }
+
+ protected Query createLocalNameQuery( String likeExpression,
+ boolean caseSensitive ) {
+ if (likeExpression == null) return null;
+ return CompareStringQuery.createQueryForNodesWithFieldLike(likeExpression,
+ PathIndex.LOCAL_NAME,
+ processor.valueFactories,
+ caseSensitive);
+ }
+
+ /**
+ * Utility method to generate a query against the SNS indexes. This method attempts to generate a query that works most
+ * efficiently, depending upon the supplied expression. For example, if the supplied expression is just "[3]", then a range
+ * query is used to find all values matching '3'. However, if "[3_]" is used (where '_' matches any single-character, or digit
+ * in this case), then a range query is used to find all values between '30' and '39'. Similarly, if "[3%]" is used, then a
+ * regular expression query is used.
+ *
+ * @param likeExpression the expression that uses the JCR 2.0 LIKE representation, and which includes the leading '[' and
+ * trailing ']' characters
+ * @return the query, or null if the expression cannot be represented as a query
+ */
+ protected Query createSnsIndexQuery( String likeExpression ) {
+ if (likeExpression == null) return null;
+ likeExpression = likeExpression.trim();
+ if (likeExpression.length() == 0) return null;
+
+ // Remove the leading '[' ...
+ assert likeExpression.charAt(0) == '[';
+ likeExpression = likeExpression.substring(1);
+
+ // Remove the trailing ']' if it exists ...
+ int closeBracketIndex = likeExpression.indexOf(']');
+ if (closeBracketIndex != -1) {
+ likeExpression = likeExpression.substring(0, closeBracketIndex);
+ }
+ if (likeExpression.equals("_")) {
+ // The SNS expression can only be one digit ...
+ return NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, MIN_SNS_INDEX, 9, true, true);
+ }
+ if (likeExpression.equals("%")) {
+ // The SNS expression can be any digits ...
+ return NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, MIN_SNS_INDEX, MAX_SNS_INDEX, true, true);
+ }
+ if (likeExpression.indexOf('_') != -1) {
+ if (likeExpression.indexOf('%') != -1) {
+ // Contains both ...
+ return findNodesLike(PathIndex.SNS_INDEX, likeExpression, true);
+ }
+ // It presumably contains some numbers and at least one '_' character ...
+ int firstWildcardChar = likeExpression.indexOf('_');
+ if (firstWildcardChar + 1 < likeExpression.length()) {
+ // There's at least some characters after the first '_' ...
+ int secondWildcardChar = likeExpression.indexOf('_', firstWildcardChar + 1);
+ if (secondWildcardChar != -1) {
+ // There are multiple '_' characters ...
+ return findNodesLike(PathIndex.SNS_INDEX, likeExpression, true);
+ }
+ }
+ // There's only one '_', so parse the lowermost value and uppermost value ...
+ String lowerExpression = likeExpression.replace('_', '0');
+ String upperExpression = likeExpression.replace('_', '9');
+ try {
+ // This SNS is just a number ...
+ int lowerSns = Integer.parseInt(lowerExpression);
+ int upperSns = Integer.parseInt(upperExpression);
+ return NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, lowerSns, upperSns, true, true);
+ } catch (NumberFormatException e) {
+ // It's not a number but it's in the SNS field, so there will be no results ...
+ return new MatchNoneQuery();
+ }
+ }
+ if (likeExpression.indexOf('%') != -1) {
+ // It presumably contains some numbers and at least one '%' character ...
+ return findNodesLike(PathIndex.SNS_INDEX, likeExpression, true);
+ }
+ // This is not a LIKE expression but an exact value specification and should be a number ...
+ try {
+ // This SNS is just a number ...
+ int sns = Integer.parseInt(likeExpression);
+ return NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, sns, sns, true, true);
+ } catch (NumberFormatException e) {
+ // It's not a number but it's in the SNS field, so there will be no results ...
+ return new MatchNoneQuery();
+ }
+ }
+
+ /**
+ * A {@link Collector} implementation that only captures the UUID of the documents returned by a query. Score information is
+ * not recorded. This is often used when querying the {@link PathIndex} to collect the UUIDs of a set of nodes satisfying some
+ * path constraint.
+ *
+ * @see LuceneSearchSession#findChildNodes(Path)
+ */
+ protected static class IdCollector extends Collector {
+ private final Set<String> ids = new HashSet<String>();
+ private String[] idsByDocId;
+
+ // private int baseDocId;
+
+ protected IdCollector() {
+ }
+
+ /**
+ * Get the UUIDs that have been collected.
+ *
+ * @return the set of UUIDs; never null
+ */
+ public Set<String> getIds() {
+ return ids;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#acceptsDocsOutOfOrder()
+ */
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setScorer(org.apache.lucene.search.Scorer)
+ */
+ @Override
+ public void setScorer( Scorer scorer ) {
+ // we don't care about scoring
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#collect(int)
+ */
+ @Override
+ public void collect( int docId ) {
+ assert docId >= 0;
+ String idString = idsByDocId[docId];
+ assert idString != null;
+ ids.add(idString);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setNextReader(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public void setNextReader( IndexReader reader,
+ int docBase ) throws IOException {
+ this.idsByDocId = FieldCache.DEFAULT.getStrings(reader, ContentIndex.ID); // same value as PathIndex.ID
+ // this.baseDocId = docBase;
+ }
+ }
+
+ /**
+ * This collector is responsible for loading the value for each of the columns into each tuple array.
+ */
+ protected static class DualIndexTupleCollector extends TupleCollector {
+ private final LuceneSearchSession session;
+ private final LuceneSearchProcessor processor;
+ private final LinkedList<Object[]> tuples = new LinkedList<Object[]>();
+ private final Columns columns;
+ private final int numValues;
+ private final boolean recordScore;
+ private final int scoreIndex;
+ private final FieldSelector fieldSelector;
+ private final int locationIndex;
+ private Scorer scorer;
+ private IndexReader currentReader;
+ private int docOffset;
+ private boolean resolvedLocations = false;
+
+ protected DualIndexTupleCollector( LuceneSearchSession session,
+ Columns columns ) {
+ this.session = session;
+ this.processor = session.processor;
+ this.columns = columns;
+ assert this.processor != null;
+ assert this.columns != null;
+ this.numValues = this.columns.getTupleSize();
+ assert this.numValues >= 0;
+ assert this.columns.getSelectorNames().size() == 1;
+ final String selectorName = this.columns.getSelectorNames().get(0);
+ this.locationIndex = this.columns.getLocationIndex(selectorName);
+ this.recordScore = this.columns.hasFullTextSearchScores();
+ this.scoreIndex = this.recordScore ? this.columns.getFullTextSearchScoreIndexFor(selectorName) : -1;
+ final Set<String> columnNames = new HashSet<String>(this.columns.getColumnNames());
+ columnNames.add(ContentIndex.ID); // add the UUID, which we'll put into the Location ...
+ this.fieldSelector = new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ return columnNames.contains(fieldName) ? FieldSelectorResult.LOAD : FieldSelectorResult.NO_LOAD;
+ }
+ };
+ }
+
+ /**
+ * @return tuples
+ */
+ @Override
+ public LinkedList<Object[]> getTuples() {
+ resolveLocations();
+ return tuples;
+ }
+
+ protected void resolveLocations() {
+ if (resolvedLocations) return;
+ try {
+ // The Location field in the tuples all contain the ID of the document, so we need to replace these
+ // with the appropriate Location objects, using the content from the PathIndex ...
+ IndexReader pathReader = session.getPathsReader();
+ IndexSearcher pathSearcher = session.getPathsSearcher();
+ for (Object[] tuple : tuples) {
+ String id = (String)tuple[locationIndex];
+ assert id != null;
+ Location location = getLocationForDocument(id, pathReader, pathSearcher);
+ if (location == null) continue;
+ tuple[locationIndex] = location;
+ }
+ resolvedLocations = true;
+ } catch (IOException e) {
+ throw new LuceneException(e);
+ }
+ }
+
+ protected Location getLocationForDocument( String id,
+ IndexReader pathReader,
+ IndexSearcher pathSearcher ) throws IOException {
+ // Find the path for this node (is there a better way to do this than one search per ID?) ...
+ TopDocs pathDocs = pathSearcher.search(new TermQuery(new Term(PathIndex.ID, id)), 1);
+ if (pathDocs.scoreDocs.length < 1) {
+ // No path record found ...
+ return null;
+ }
+ Document pathDoc = pathReader.document(pathDocs.scoreDocs[0].doc);
+ return session.readLocation(pathDoc);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#acceptsDocsOutOfOrder()
+ */
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+
+ /**
+ * Get the location index.
+ *
+ * @return locationIndex
+ */
+ public int getLocationIndex() {
+ return locationIndex;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setNextReader(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public void setNextReader( IndexReader reader,
+ int docBase ) {
+ this.currentReader = reader;
+ this.docOffset = docBase;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setScorer(org.apache.lucene.search.Scorer)
+ */
+ @Override
+ public void setScorer( Scorer scorer ) {
+ this.scorer = scorer;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#collect(int)
+ */
+ @Override
+ public void collect( int doc ) throws IOException {
+ int docId = doc + docOffset;
+ Object[] tuple = new Object[numValues];
+ Document document = currentReader.document(docId, fieldSelector);
+ for (String columnName : columns.getColumnNames()) {
+ int index = columns.getColumnIndexForName(columnName);
+ // We just need to retrieve the first value if there is more than one ...
+ tuple[index] = document.get(columnName);
+ }
+
+ // Set the score column if required ...
+ if (recordScore) {
+ assert scorer != null;
+ tuple[scoreIndex] = scorer.score();
+ }
+
+ // Load the document ID (which is a stringified UUID) into the Location slot,
+ // which will be replaced later with a real Location ...
+ tuple[locationIndex] = document.get(ContentIndex.ID);
+ tuples.add(tuple);
+ }
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchSession.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchWorkspace.java
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchWorkspace.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchWorkspace.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,120 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import net.jcip.annotations.Immutable;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Version;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.search.SearchEngineWorkspace;
+
+/**
+ * The {@link SearchEngineWorkspace} implementation for the {@link LuceneSearchEngine}.
+ */
+@Immutable
+public class LuceneSearchWorkspace implements SearchEngineWorkspace {
+
+ protected static final String PATHS_INDEX_NAME = "paths";
+ protected static final String CONTENT_INDEX_NAME = "content";
+
+ /**
+ * Given the name of a property field of the form "<namespace>:<local>" (where <namespace> can be zero-length), this
+ * provider also stores the value(s) for free-text searching in a field named ":ft:<namespace>:<local>". Thus, even if
+ * the namespace is zero-length, the free-text search field will be named ":ft::<local>" and will not clash with any other
+ * property name.
+ */
+ protected static final String FULL_TEXT_PREFIX = ":ft:";
+
+ /**
+ * This index stores only these fields, so we can use the most obvious names and not worry about clashes.
+ */
+ static class PathIndex {
+ public static final String PATH = "pth";
+ public static final String NODE_NAME = "nam";
+ public static final String LOCAL_NAME = "loc";
+ public static final String SNS_INDEX = "sns";
+ public static final String LOCATION_ID_PROPERTIES = "idp";
+ public static final String ID = ContentIndex.ID;
+ public static final String DEPTH = "dep";
+ }
+
+ /**
+ * This index stores these two fields <i>plus</i> all properties. Therefore, we have to worry about name clashes, which is why
+ * these field names are prefixed with '::', which is something that does appear in property names as they are serialized.
+ */
+ static class ContentIndex {
+ public static final String ID = "::id";
+ public static final String FULL_TEXT = "::fts";
+ }
+
+ private final String workspaceName;
+ protected final IndexRules rules;
+ private final LuceneConfiguration configuration;
+ protected final Directory pathDirectory;
+ protected final Directory contentDirectory;
+ protected final Analyzer analyzer;
+
+ protected LuceneSearchWorkspace( String workspaceName,
+ LuceneConfiguration configuration,
+ IndexRules rules,
+ Analyzer analyzer,
+ boolean overwrite ) {
+ assert workspaceName != null;
+ assert configuration != null;
+ this.workspaceName = workspaceName;
+ this.analyzer = analyzer != null ? analyzer : new StandardAnalyzer(Version.LUCENE_30);
+ this.rules = rules != null ? rules : LuceneSearchEngine.DEFAULT_RULES;
+ this.configuration = configuration;
+ this.pathDirectory = this.configuration.getDirectory(workspaceName, PATHS_INDEX_NAME);
+ this.contentDirectory = this.configuration.getDirectory(workspaceName, CONTENT_INDEX_NAME);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngineWorkspace#getWorkspaceName()
+ */
+ public String getWorkspaceName() {
+ return workspaceName;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngineWorkspace#destroy(org.jboss.dna.graph.ExecutionContext)
+ */
+ public void destroy( ExecutionContext context ) {
+ configuration.destroyDirectory(workspaceName, PATHS_INDEX_NAME);
+ configuration.destroyDirectory(workspaceName, CONTENT_INDEX_NAME);
+ }
+
+ /**
+ * @return rules
+ */
+ public IndexRules getRules() {
+ return rules;
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/LuceneSearchWorkspace.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareLengthQuery.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareLengthQuery.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareLengthQuery.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareLengthQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,254 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene.query;
+
+import java.io.IOException;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.model.Length;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link Length} constraint against a string field. This query
+ * implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * with string fields that satisfy the constraint.
+ */
+public class CompareLengthQuery extends CompareQuery<Integer> {
+
+ private static final long serialVersionUID = 1L;
+ protected static final Evaluator<Integer> EQUAL_TO = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue == length;
+ }
+
+ @Override
+ public String toString() {
+ return " = ";
+ }
+ };
+ protected static final Evaluator<Integer> NOT_EQUAL_TO = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue == length;
+ }
+
+ @Override
+ public String toString() {
+ return " != ";
+ }
+ };
+ protected static final Evaluator<Integer> IS_LESS_THAN = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue < length;
+ }
+
+ @Override
+ public String toString() {
+ return " < ";
+ }
+ };
+ protected static final Evaluator<Integer> IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue < length;
+ }
+
+ @Override
+ public String toString() {
+ return " <= ";
+ }
+ };
+ protected static final Evaluator<Integer> IS_GREATER_THAN = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue < length;
+ }
+
+ @Override
+ public String toString() {
+ return " > ";
+ }
+ };
+ protected static final Evaluator<Integer> IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue < length;
+ }
+
+ @Override
+ public String toString() {
+ return " >= ";
+ }
+ };
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is equal to the supplied constraint
+ * value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldEqualTo( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is not equal to the supplied
+ * constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldNotEqualTo( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is greater than the supplied
+ * constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldGreaterThan( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is greater than or equal to the
+ * supplied constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldGreaterThanOrEqualTo( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN_OR_EQUAL_TO);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is less than the supplied
+ * constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldLessThan( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_LESS_THAN);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is less than or equal to the
+ * supplied constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldLessThanOrEqualTo( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_LESS_THAN_OR_EQUAL_TO);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param constraintValue the constraint value; may not be null
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
+ * constraint; may not be null
+ */
+ protected CompareLengthQuery( String fieldName,
+ Integer constraintValue,
+ ValueFactory<String> stringFactory,
+ Evaluator<Integer> evaluator ) {
+ super(fieldName, constraintValue, null, stringFactory, evaluator);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ protected Integer readFromDocument( IndexReader reader,
+ int docId ) throws IOException {
+ // This implementation reads the length of the field ...
+ Document doc = reader.document(docId, fieldSelector);
+ String valueString = doc.get(fieldName);
+ String value = stringFactory.create(valueString);
+ return value != null ? value.length() : 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new CompareLengthQuery(fieldName, constraintValue, stringFactory, evaluator);
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareLengthQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareNameQuery.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareNameQuery.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareNameQuery.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareNameQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,258 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene.query;
+
+import java.io.IOException;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.PathFactory;
+import org.jboss.dna.graph.property.ValueComparators;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.model.Comparison;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against the name of nodes. This
+ * query implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * that correspond to nodes with Names that satisfy the constraint.
+ */
+public class CompareNameQuery extends CompareQuery<Path.Segment> {
+
+ private static final long serialVersionUID = 1L;
+ protected static final Evaluator<Path.Segment> IS_LESS_THAN = new Evaluator<Path.Segment>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path.Segment nodeValue,
+ Path.Segment constraintValue ) {
+ return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) < 0;
+ }
+
+ @Override
+ public String toString() {
+ return " < ";
+ }
+ };
+ protected static final Evaluator<Path.Segment> IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<Path.Segment>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path.Segment nodeValue,
+ Path.Segment constraintValue ) {
+ return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) <= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " <= ";
+ }
+ };
+ protected static final Evaluator<Path.Segment> IS_GREATER_THAN = new Evaluator<Path.Segment>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path.Segment nodeValue,
+ Path.Segment constraintValue ) {
+ return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) > 0;
+ }
+
+ @Override
+ public String toString() {
+ return " > ";
+ }
+ };
+ protected static final Evaluator<Path.Segment> IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<Path.Segment>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path.Segment nodeValue,
+ Path.Segment constraintValue ) {
+ return ValueComparators.PATH_SEGMENT_COMPARATOR.compare(nodeValue, constraintValue) >= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " >= ";
+ }
+ };
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
+ * that is greater than the supplied constraint name.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param localNameField the name of the document field containing the local name value; may not be null
+ * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareNameQuery createQueryForNodesWithNameGreaterThan( Path.Segment constraintValue,
+ String localNameField,
+ String snsIndexFieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
+ factories.getStringFactory(), factories.getLongFactory(), IS_GREATER_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
+ * that is greater than or equal to the supplied constraint name.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param localNameField the name of the document field containing the local name value; may not be null
+ * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareNameQuery createQueryForNodesWithNameGreaterThanOrEqualTo( Path.Segment constraintValue,
+ String localNameField,
+ String snsIndexFieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
+ factories.getStringFactory(), factories.getLongFactory(), IS_GREATER_THAN_OR_EQUAL_TO,
+ caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
+ * that is less than the supplied constraint name.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param localNameField the name of the document field containing the local name value; may not be null
+ * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareNameQuery createQueryForNodesWithNameLessThan( Path.Segment constraintValue,
+ String localNameField,
+ String snsIndexFieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
+ factories.getStringFactory(), factories.getLongFactory(), IS_LESS_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a name
+ * that is less than or equal to the supplied constraint name.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param localNameField the name of the document field containing the local name value; may not be null
+ * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareNameQuery createQueryForNodesWithNameLessThanOrEqualTo( Path.Segment constraintValue,
+ String localNameField,
+ String snsIndexFieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareNameQuery(localNameField, snsIndexFieldName, constraintValue, factories.getPathFactory(),
+ factories.getStringFactory(), factories.getLongFactory(), IS_LESS_THAN_OR_EQUAL_TO,
+ caseSensitive);
+ }
+
+ private final String snsIndexFieldName;
+ private final ValueFactory<Long> longFactory;
+ private final PathFactory pathFactory;
+ private final boolean caseSensitive;
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param localNameField the name of the document field containing the local name value; may not be null
+ * @param snsIndexFieldName the name of the document field containing the same-name-sibling index; may not be null
+ * @param constraintValue the constraint path; may not be null
+ * @param pathFactory the path factory that can be used during the scoring; may not be null
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param longFactory the long factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
+ * constraint; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ */
+ protected CompareNameQuery( final String localNameField,
+ final String snsIndexFieldName,
+ Path.Segment constraintValue,
+ PathFactory pathFactory,
+ ValueFactory<String> stringFactory,
+ ValueFactory<Long> longFactory,
+ Evaluator<Path.Segment> evaluator,
+ boolean caseSensitive ) {
+ super(localNameField, constraintValue, null, stringFactory, evaluator, new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ if (fieldName.equals(localNameField)) return FieldSelectorResult.LOAD;
+ if (fieldName.equals(snsIndexFieldName)) return FieldSelectorResult.LOAD;
+ return FieldSelectorResult.NO_LOAD;
+ }
+ });
+ this.snsIndexFieldName = snsIndexFieldName;
+ this.longFactory = longFactory;
+ this.pathFactory = pathFactory;
+ this.caseSensitive = caseSensitive;
+ assert this.snsIndexFieldName != null;
+ assert this.longFactory != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ protected Path.Segment readFromDocument( IndexReader reader,
+ int docId ) throws IOException {
+ Document doc = reader.document(docId, fieldSelector);
+ String localName = doc.get(fieldName);
+ if (!caseSensitive) localName = localName.toLowerCase();
+ int sns = longFactory.create(doc.get(snsIndexFieldName)).intValue();
+ return pathFactory.createSegment(localName, sns);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new CompareNameQuery(fieldName, snsIndexFieldName, constraintValue, pathFactory, stringFactory, longFactory,
+ evaluator, caseSensitive);
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareNameQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/ComparePathQuery.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/query/ComparePathQuery.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/ComparePathQuery.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/ComparePathQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,223 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene.query;
+
+import java.io.IOException;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.ValueComparators;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.model.Comparison;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against the Path of nodes. This
+ * query implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * that correspond to nodes with Paths that satisfy the constraint.
+ */
+public class ComparePathQuery extends CompareQuery<Path> {
+
+ private static final long serialVersionUID = 1L;
+ protected static final Evaluator<Path> PATH_IS_LESS_THAN = new Evaluator<Path>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path nodePath,
+ Path constraintPath ) {
+ return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) < 0;
+ }
+
+ @Override
+ public String toString() {
+ return " < ";
+ }
+ };
+ protected static final Evaluator<Path> PATH_IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<Path>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path nodePath,
+ Path constraintPath ) {
+ return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) <= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " <= ";
+ }
+ };
+ protected static final Evaluator<Path> PATH_IS_GREATER_THAN = new Evaluator<Path>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path nodePath,
+ Path constraintPath ) {
+ return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) > 0;
+ }
+
+ @Override
+ public String toString() {
+ return " > ";
+ }
+ };
+ protected static final Evaluator<Path> PATH_IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<Path>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Path nodePath,
+ Path constraintPath ) {
+ return ValueComparators.PATH_COMPARATOR.compare(nodePath, constraintPath) >= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " >= ";
+ }
+ };
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
+ * that is greater than the supplied constraint path.
+ *
+ * @param constraintPath the constraint path; may not be null
+ * @param fieldName the name of the document field containing the path value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the path query; never null
+ */
+ public static ComparePathQuery createQueryForNodesWithPathGreaterThan( Path constraintPath,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
+ PATH_IS_GREATER_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
+ * that is greater than or equal to the supplied constraint path.
+ *
+ * @param constraintPath the constraint path; may not be null
+ * @param fieldName the name of the document field containing the path value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the path query; never null
+ */
+ public static ComparePathQuery createQueryForNodesWithPathGreaterThanOrEqualTo( Path constraintPath,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
+ PATH_IS_GREATER_THAN_OR_EQUAL_TO, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
+ * that is less than the supplied constraint path.
+ *
+ * @param constraintPath the constraint path; may not be null
+ * @param fieldName the name of the document field containing the path value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the path query; never null
+ */
+ public static ComparePathQuery createQueryForNodesWithPathLessThan( Path constraintPath,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
+ PATH_IS_LESS_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents such that the node represented by the document has a path
+ * that is less than or equal to the supplied constraint path.
+ *
+ * @param constraintPath the constraint path; may not be null
+ * @param fieldName the name of the document field containing the path value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the path query; never null
+ */
+ public static ComparePathQuery createQueryForNodesWithPathLessThanOrEqualTo( Path constraintPath,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new ComparePathQuery(fieldName, constraintPath, factories.getPathFactory(), factories.getStringFactory(),
+ PATH_IS_LESS_THAN_OR_EQUAL_TO, caseSensitive);
+ }
+
+ private final boolean caseSensitive;
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the path value; may not be null
+ * @param constraintPath the constraint path; may not be null
+ * @param pathFactory the value factory that can be used during the scoring; may not be null
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
+ * constraint; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ */
+ protected ComparePathQuery( String fieldName,
+ Path constraintPath,
+ ValueFactory<Path> pathFactory,
+ ValueFactory<String> stringFactory,
+ Evaluator<Path> evaluator,
+ boolean caseSensitive ) {
+ super(fieldName, constraintPath, pathFactory, stringFactory, evaluator);
+ this.caseSensitive = caseSensitive;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ protected Path readFromDocument( IndexReader reader,
+ int docId ) throws IOException {
+ Document doc = reader.document(docId, fieldSelector);
+ String valueString = doc.get(fieldName);
+ if (!caseSensitive) valueString = valueString.toLowerCase();
+ return valueTypeFactory.create(valueString);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new ComparePathQuery(fieldName, constraintValue, valueTypeFactory, stringFactory, evaluator, caseSensitive);
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/ComparePathQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareQuery.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareQuery.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareQuery.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,308 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene.query;
+
+import java.io.IOException;
+import java.io.Serializable;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.model.Comparison;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against the Path of nodes. This
+ * query implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * that correspond to nodes with Paths that satisfy the constraint.
+ *
+ * @param <ValueType>
+ */
+public abstract class CompareQuery<ValueType> extends Query {
+
+ private static final long serialVersionUID = 1L;
+
+ protected static interface Evaluator<ValueType> extends Serializable {
+ boolean satisfiesConstraint( ValueType nodeValue,
+ ValueType constraintValue );
+ }
+
+ /**
+ * The operand that is being negated by this query.
+ */
+ protected final String fieldName;
+ protected final FieldSelector fieldSelector;
+ protected final ValueType constraintValue;
+ protected final Evaluator<ValueType> evaluator;
+ protected final ValueFactory<ValueType> valueTypeFactory;
+ protected final ValueFactory<String> stringFactory;
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param constraintValue the constraint value; may not be null
+ * @param valueTypeFactory the value factory that can be used during the scoring; may not be null
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link Evaluator} implementation that returns whether the node value satisfies the constraint; may not
+ * be null
+ */
+ protected CompareQuery( String fieldName,
+ ValueType constraintValue,
+ ValueFactory<ValueType> valueTypeFactory,
+ ValueFactory<String> stringFactory,
+ Evaluator<ValueType> evaluator ) {
+ this(fieldName, constraintValue, valueTypeFactory, stringFactory, evaluator, null);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param constraintValue the constraint value; may not be null
+ * @param valueTypeFactory the value factory that can be used during the scoring; may not be null unless
+ * {@link #readFromDocument(IndexReader, int)} is overloaded to not use it
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link Evaluator} implementation that returns whether the node value satisfies the constraint; may not
+ * be null
+ * @param fieldSelector the field selector that should load the fields needed to recover the value; may be null if the field
+ * selector should be generated automatically
+ */
+ protected CompareQuery( final String fieldName,
+ ValueType constraintValue,
+ ValueFactory<ValueType> valueTypeFactory,
+ ValueFactory<String> stringFactory,
+ Evaluator<ValueType> evaluator,
+ FieldSelector fieldSelector ) {
+ this.fieldName = fieldName;
+ this.constraintValue = constraintValue;
+ this.valueTypeFactory = valueTypeFactory;
+ this.stringFactory = stringFactory;
+ this.evaluator = evaluator;
+ assert this.fieldName != null;
+ assert this.constraintValue != null;
+ assert this.evaluator != null;
+ this.fieldSelector = fieldSelector != null ? fieldSelector : new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ return CompareQuery.this.fieldName.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
+ }
+ };
+ }
+
+ protected ValueType readFromDocument( IndexReader reader,
+ int docId ) throws IOException {
+ Document doc = reader.document(docId, fieldSelector);
+ String valueString = doc.get(fieldName);
+ return valueTypeFactory.create(valueString);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
+ */
+ @Override
+ public Weight createWeight( Searcher searcher ) {
+ return new CompareWeight(searcher);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#toString(java.lang.String)
+ */
+ @Override
+ public String toString( String field ) {
+ return fieldName + " " + evaluator.toString() + " " + stringFactory != null ? stringFactory.create(constraintValue) : constraintValue.toString();
+ }
+
+ /**
+ * Calculates query weights and builds query scores for our NOT queries.
+ */
+ protected class CompareWeight extends Weight {
+ private static final long serialVersionUID = 1L;
+ private final Searcher searcher;
+
+ protected CompareWeight( Searcher searcher ) {
+ this.searcher = searcher;
+ assert this.searcher != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#getQuery()
+ */
+ @Override
+ public Query getQuery() {
+ return CompareQuery.this;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a weight factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#getValue()
+ */
+ @Override
+ public float getValue() {
+ return 1.0f; // weight factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a normalization factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
+ */
+ @Override
+ public float sumOfSquaredWeights() {
+ return 1.0f; // normalization factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always does nothing, as there is nothing to normalize.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#normalize(float)
+ */
+ @Override
+ public void normalize( float norm ) {
+ // No need to do anything here
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
+ */
+ @Override
+ public Scorer scorer( IndexReader reader,
+ boolean scoreDocsInOrder,
+ boolean topScorer ) {
+ // Return a custom scorer ...
+ return new CompareScorer(reader);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public Explanation explain( IndexReader reader,
+ int doc ) {
+ return new Explanation(getValue(), getQuery().toString());
+ }
+ }
+
+ /**
+ * A scorer for the Path query.
+ */
+ protected class CompareScorer extends Scorer {
+ private int docId = -1;
+ private final int maxDocId;
+ private final IndexReader reader;
+
+ protected CompareScorer( IndexReader reader ) {
+ // We don't care which Similarity we have, because we don't use it. So get the default.
+ super(Similarity.getDefault());
+ this.reader = reader;
+ assert this.reader != null;
+ this.maxDocId = this.reader.maxDoc() - 1;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#docID()
+ */
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() throws IOException {
+ do {
+ ++docId;
+ if (reader.isDeleted(docId)) {
+ // We should skip this document ...
+ continue;
+ }
+ ValueType value = readFromDocument(reader, docId);
+ if (evaluator.satisfiesConstraint(value, constraintValue)) return docId;
+ } while (docId < maxDocId);
+ return Scorer.NO_MORE_DOCS;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
+ */
+ @Override
+ public int advance( int target ) throws IOException {
+ if (target == Scorer.NO_MORE_DOCS) return target;
+ while (true) {
+ int doc = nextDoc();
+ if (doc >= target) return doc;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
+ * scored by this scorer.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ return 1.0f;
+ }
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareStringQuery.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareStringQuery.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareStringQuery.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareStringQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,339 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene.query;
+
+import java.io.IOException;
+import java.util.regex.Pattern;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.search.regex.JavaUtilRegexCapabilities;
+import org.apache.lucene.search.regex.RegexQuery;
+import org.jboss.dna.graph.property.ValueComparators;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.model.Comparison;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link Comparison} constraint against a string field. This query
+ * implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * with string fields that satisfy the constraint.
+ */
+public class CompareStringQuery extends CompareQuery<String> {
+
+ private static final long serialVersionUID = 1L;
+ protected static final Evaluator<String> EQUAL_TO = new Evaluator<String>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( String nodeValue,
+ String constraintValue ) {
+ return constraintValue.equals(nodeValue);
+ }
+
+ @Override
+ public String toString() {
+ return " = ";
+ }
+ };
+ protected static final Evaluator<String> IS_LESS_THAN = new Evaluator<String>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( String nodeValue,
+ String constraintValue ) {
+ return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) < 0;
+ }
+
+ @Override
+ public String toString() {
+ return " < ";
+ }
+ };
+ protected static final Evaluator<String> IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<String>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( String nodeValue,
+ String constraintValue ) {
+ return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) <= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " <= ";
+ }
+ };
+ protected static final Evaluator<String> IS_GREATER_THAN = new Evaluator<String>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( String nodeValue,
+ String constraintValue ) {
+ return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) > 0;
+ }
+
+ @Override
+ public String toString() {
+ return " > ";
+ }
+ };
+ protected static final Evaluator<String> IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<String>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( String nodeValue,
+ String constraintValue ) {
+ return ValueComparators.STRING_COMPARATOR.compare(nodeValue, constraintValue) >= 0;
+ }
+
+ @Override
+ public String toString() {
+ return " >= ";
+ }
+ };
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a string field value that is equal to the supplied
+ * constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static Query createQueryForNodesWithFieldEqualTo( String constraintValue,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ if (caseSensitive) {
+ // We can just do a normal TermQuery ...
+ return new TermQuery(new Term(fieldName, constraintValue));
+ }
+ return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
+ EQUAL_TO, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a string field value that is greater than the supplied
+ * constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareStringQuery createQueryForNodesWithFieldGreaterThan( String constraintValue,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
+ IS_GREATER_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a string field value that is greater than or equal to
+ * the supplied constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareStringQuery createQueryForNodesWithFieldGreaterThanOrEqualTo( String constraintValue,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
+ IS_GREATER_THAN_OR_EQUAL_TO, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a string field value that is less than the supplied
+ * constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareStringQuery createQueryForNodesWithFieldLessThan( String constraintValue,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
+ IS_LESS_THAN, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a string field value that is less than or equal to the
+ * supplied constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static CompareStringQuery createQueryForNodesWithFieldLessThanOrEqualTo( String constraintValue,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ return new CompareStringQuery(fieldName, constraintValue, factories.getStringFactory(), factories.getStringFactory(),
+ IS_LESS_THAN_OR_EQUAL_TO, caseSensitive);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a string field value that is LIKE the supplied
+ * constraint value.
+ *
+ * @param likeExpression the LIKE expression; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ * @return the query; never null
+ */
+ public static Query createQueryForNodesWithFieldLike( String likeExpression,
+ String fieldName,
+ ValueFactories factories,
+ boolean caseSensitive ) {
+ assert likeExpression != null;
+ assert likeExpression.length() > 0;
+ if (likeExpression.indexOf('%') == -1 && likeExpression.indexOf('_') == -1) {
+ // This is not a like expression, so just do an equals ...
+ return createQueryForNodesWithFieldEqualTo(likeExpression, fieldName, factories, caseSensitive);
+ }
+ if (caseSensitive) {
+ // We can just do a normal Wildcard or RegEx query ...
+
+ // '%' matches 0 or more characters
+ // '_' matches any single character
+ // '\x' matches 'x'
+ // all other characters match themselves
+
+ // Wildcard queries are a better match, but they can be slow and should not be used
+ // if the first character of the expression is a '%' or '_' ...
+ char firstChar = likeExpression.charAt(0);
+ if (firstChar != '%' && firstChar != '_') {
+ // Create a wildcard query ...
+ String expression = toWildcardExpression(likeExpression);
+ return new WildcardQuery(new Term(fieldName, expression));
+ }
+ }
+ // Create a regex query (which will be done using the correct case) ...
+ String regex = toRegularExpression(likeExpression);
+ RegexQuery query = new RegexQuery(new Term(fieldName, regex));
+ int flags = caseSensitive ? 0 : Pattern.CASE_INSENSITIVE;
+ query.setRegexImplementation(new JavaUtilRegexCapabilities(flags));
+ return query;
+ }
+
+ /**
+ * Convert the JCR like expression to a Lucene wildcard expression. The JCR like expression uses '%' to match 0 or more
+ * characters, '_' to match any single character, '\x' to match the 'x' character, and all other characters to match
+ * themselves.
+ *
+ * @param likeExpression the like expression; may not be null
+ * @return the expression that can be used with a WildcardQuery; never null
+ */
+ protected static String toWildcardExpression( String likeExpression ) {
+ return likeExpression.replace('%', '*').replace('_', '?').replaceAll("\\\\(.)", "$1");
+ }
+
+ /**
+ * Convert the JCR like expression to a regular expression. The JCR like expression uses '%' to match 0 or more characters,
+ * '_' to match any single character, '\x' to match the 'x' character, and all other characters to match themselves. Note that
+ * if any regex metacharacters appear in the like expression, they will be escaped within the resulting regular expression.
+ *
+ * @param likeExpression the like expression; may not be null
+ * @return the expression that can be used with a WildcardQuery; never null
+ */
+ protected static String toRegularExpression( String likeExpression ) {
+ // Replace all '\x' with 'x' ...
+ String result = likeExpression.replaceAll("\\\\(.)", "$1");
+ // Escape characters used as metacharacters in regular expressions, including
+ // '[', '^', '\', '$', '.', '|', '?', '*', '+', '(', and ')'
+ result = result.replaceAll("([\\[^\\\\$.|?*+()])", "\\$1");
+ // Replace '%'->'[.]+' and '_'->'[.]
+ result = likeExpression.replace("%", ".+").replace("_", ".");
+ return result;
+ }
+
+ private final boolean caseSensitive;
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param constraintValue the constraint value; may not be null
+ * @param valueFactory the value factory that can be used during the scoring; may not be null
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
+ * constraint; may not be null
+ * @param caseSensitive true if the comparison should be done in a case-sensitive manner, or false if it is to be
+ * case-insensitive
+ */
+ protected CompareStringQuery( String fieldName,
+ String constraintValue,
+ ValueFactory<String> valueFactory,
+ ValueFactory<String> stringFactory,
+ Evaluator<String> evaluator,
+ boolean caseSensitive ) {
+ super(fieldName, caseSensitive ? constraintValue : constraintValue.toLowerCase(), valueFactory, stringFactory, evaluator);
+ this.caseSensitive = caseSensitive;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ protected String readFromDocument( IndexReader reader,
+ int docId ) throws IOException {
+ String result = super.readFromDocument(reader, docId);
+ if (result == null) return null;
+ return caseSensitive ? result : result.toLowerCase();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new CompareStringQuery(fieldName, constraintValue, valueTypeFactory, stringFactory, evaluator, caseSensitive);
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/CompareStringQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/IdsQuery.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/query/IdsQuery.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/IdsQuery.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/IdsQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,261 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene.query;
+
+import java.io.IOException;
+import java.util.Set;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.Weight;
+
+/**
+ * A Lucene {@link Query} implementation that is used to score positively those documents that have a ID in the supplied set. This
+ * works for large sets of IDs; in smaller numbers, it may be more efficient to create a boolean query that checks for each of the
+ * IDs.
+ */
+public class IdsQuery extends Query {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * The operand that is being negated by this query.
+ */
+ protected final Set<String> uuids;
+ protected final FieldSelector fieldSelector;
+ protected final String fieldName;
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param ids the set of ID values; may not be null
+ */
+ public IdsQuery( String fieldName,
+ Set<String> ids ) {
+ this.fieldName = fieldName;
+ this.uuids = ids;
+ assert this.fieldName != null;
+ assert this.uuids != null;
+ this.fieldSelector = new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ return fieldName.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
+ }
+ };
+ }
+
+ protected boolean includeDocument( IndexReader reader,
+ int docId ) throws IOException {
+ Document doc = reader.document(docId, fieldSelector);
+ String valueString = doc.get(fieldName);
+ return uuids.contains(valueString);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
+ */
+ @Override
+ public Weight createWeight( Searcher searcher ) {
+ return new IdSetWeight(searcher);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#toString(java.lang.String)
+ */
+ @Override
+ public String toString( String field ) {
+ return fieldName + " IN UUIDs";
+ }
+
+ /**
+ * Calculates query weights and builds query scores for our NOT queries.
+ */
+ protected class IdSetWeight extends Weight {
+ private static final long serialVersionUID = 1L;
+ private final Searcher searcher;
+
+ protected IdSetWeight( Searcher searcher ) {
+ this.searcher = searcher;
+ assert this.searcher != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#getQuery()
+ */
+ @Override
+ public Query getQuery() {
+ return IdsQuery.this;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a weight factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#getValue()
+ */
+ @Override
+ public float getValue() {
+ return 1.0f; // weight factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a normalization factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
+ */
+ @Override
+ public float sumOfSquaredWeights() {
+ return 1.0f; // normalization factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always does nothing, as there is nothing to normalize.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#normalize(float)
+ */
+ @Override
+ public void normalize( float norm ) {
+ // No need to do anything here
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
+ */
+ @Override
+ public Scorer scorer( IndexReader reader,
+ boolean scoreDocsInOrder,
+ boolean topScorer ) {
+ // Return a custom scorer ...
+ return new IdScorer(reader);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public Explanation explain( IndexReader reader,
+ int doc ) {
+ return new Explanation(getValue(), getQuery().toString());
+ }
+ }
+
+ /**
+ * A scorer for the Path query.
+ */
+ protected class IdScorer extends Scorer {
+ private int docId = -1;
+ private final int maxDocId;
+ private final IndexReader reader;
+
+ protected IdScorer( IndexReader reader ) {
+ // We don't care which Similarity we have, because we don't use it. So get the default.
+ super(Similarity.getDefault());
+ this.reader = reader;
+ assert this.reader != null;
+ this.maxDocId = this.reader.maxDoc();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#docID()
+ */
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() throws IOException {
+ do {
+ ++docId;
+ if (reader.isDeleted(docId)) {
+ // We should skip this document ...
+ continue;
+ }
+ if (includeDocument(reader, docId)) return docId;
+ } while (docId < maxDocId);
+ return Scorer.NO_MORE_DOCS;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
+ */
+ @Override
+ public int advance( int target ) throws IOException {
+ if (target == Scorer.NO_MORE_DOCS) return target;
+ while (true) {
+ int doc = nextDoc();
+ if (doc >= target) return doc;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
+ * scored by this scorer.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ return 1.0f;
+ }
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/IdsQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/MatchNoneQuery.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/query/MatchNoneQuery.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/MatchNoneQuery.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/MatchNoneQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,216 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene.query;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.Weight;
+
+/**
+ * A Lucene {@link Query} implementation that always matches no documents.
+ */
+public class MatchNoneQuery extends Query {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Construct a query that always returns no documents.
+ */
+ public MatchNoneQuery() {
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
+ */
+ @Override
+ public Weight createWeight( Searcher searcher ) {
+ return new NoneWeight();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new MatchNoneQuery();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#toString(java.lang.String)
+ */
+ @Override
+ public String toString( String field ) {
+ return "NO DOCS";
+ }
+
+ /**
+ * Calculates query weights and builds query scores for our NOT queries.
+ */
+ protected class NoneWeight extends Weight {
+ private static final long serialVersionUID = 1L;
+
+ protected NoneWeight() {
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#getQuery()
+ */
+ @Override
+ public Query getQuery() {
+ return MatchNoneQuery.this;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a weight factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#getValue()
+ */
+ @Override
+ public float getValue() {
+ return 1.0f; // weight factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a normalization factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
+ */
+ @Override
+ public float sumOfSquaredWeights() {
+ return 1.0f; // normalization factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always does nothing, as there is nothing to normalize.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#normalize(float)
+ */
+ @Override
+ public void normalize( float norm ) {
+ // No need to do anything here
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
+ */
+ @Override
+ public Scorer scorer( IndexReader reader,
+ boolean scoreDocsInOrder,
+ boolean topScorer ) {
+ return new NoneScorer();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public Explanation explain( IndexReader reader,
+ int doc ) {
+ return new Explanation(getValue(), "NO VALUES");
+ }
+ }
+
+ /**
+ * A scorer for the NOT query that iterates over documents (in increasing docId order), using the given scorer implementation
+ * for the operand of the NOT.
+ */
+ protected static class NoneScorer extends Scorer {
+ private int docId = -1;
+
+ protected NoneScorer() {
+ // We don't care which Similarity we have, because we don't use it. So get the default.
+ super(Similarity.getDefault());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#docID()
+ */
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() {
+ docId = Scorer.NO_MORE_DOCS;
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
+ */
+ @Override
+ public int advance( int target ) {
+ return Scorer.NO_MORE_DOCS;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
+ * scored by this scorer.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ return 1.0f;
+ }
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/MatchNoneQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/NotQuery.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/query/NotQuery.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/NotQuery.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/NotQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,274 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene.query;
+
+import java.io.IOException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.Weight;
+
+/**
+ * A Lucene {@link Query} implementation that is used to represent a NOT expression of another wrapped Query object. This query
+ * implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * that were <i>not</i> scored by the wrapped query. In other words, if the wrapped query ended up scoring any document, that
+ * document is <i>not</i> scored (i.e., skipped) by this query.
+ */
+public class NotQuery extends Query {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * The operand that is being negated by this query.
+ */
+ protected final Query operand;
+
+ /**
+ * Construct a NOT(x) constraint where the 'x' operand is supplied.
+ *
+ * @param operand the operand being negated
+ */
+ public NotQuery( Query operand ) {
+ this.operand = operand;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
+ */
+ @Override
+ public Weight createWeight( Searcher searcher ) {
+ return new NotWeight(searcher);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new NotQuery(operand);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#toString(java.lang.String)
+ */
+ @Override
+ public String toString( String field ) {
+ return "NOT(" + operand.toString(field) + ")";
+ }
+
+ /**
+ * Calculates query weights and builds query scores for our NOT queries.
+ */
+ protected class NotWeight extends Weight {
+ private static final long serialVersionUID = 1L;
+ private final Searcher searcher;
+
+ protected NotWeight( Searcher searcher ) {
+ this.searcher = searcher;
+ assert this.searcher != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#getQuery()
+ */
+ @Override
+ public Query getQuery() {
+ return NotQuery.this;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a weight factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#getValue()
+ */
+ @Override
+ public float getValue() {
+ return 1.0f; // weight factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a normalization factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
+ */
+ @Override
+ public float sumOfSquaredWeights() {
+ return 1.0f; // normalization factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always does nothing, as there is nothing to normalize.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#normalize(float)
+ */
+ @Override
+ public void normalize( float norm ) {
+ // No need to do anything here
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
+ */
+ @Override
+ public Scorer scorer( IndexReader reader,
+ boolean scoreDocsInOrder,
+ boolean topScorer ) throws IOException {
+ // Get the operand's score, and set this on the NOT query
+ Scorer operandScorer = operand.weight(searcher).scorer(reader, scoreDocsInOrder, topScorer);
+ // Return a custom scorer ...
+ return new NotScorer(operandScorer, reader);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public Explanation explain( IndexReader reader,
+ int doc ) throws IOException {
+ Explanation operandExplanation = operand.weight(searcher).explain(reader, doc);
+ String desc = operandExplanation.getDescription();
+ return new Explanation(getValue(), "NOT(" + desc + ")");
+ }
+ }
+
+ /**
+ * A scorer for the NOT query that iterates over documents (in increasing docId order), using the given scorer implementation
+ * for the operand of the NOT.
+ */
+ protected static class NotScorer extends Scorer {
+ private int docId = -1;
+ private int nextScoredDocId = -1;
+ private final Scorer operandScorer;
+ private final IndexReader reader;
+ private final int pastMaxDocId;
+
+ /**
+ * @param operandScorer the scorer that is used to score the documents based upon the operand of the NOT; may not be null
+ * @param reader the reader that has access to all the docs ...
+ */
+ protected NotScorer( Scorer operandScorer,
+ IndexReader reader ) {
+ // We don't care which Similarity we have, because we don't use it. So get the default.
+ super(Similarity.getDefault());
+ this.operandScorer = operandScorer;
+ this.reader = reader;
+ assert this.operandScorer != null;
+ assert this.reader != null;
+ this.pastMaxDocId = this.reader.maxDoc();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#docID()
+ */
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() throws IOException {
+ if (nextScoredDocId == -1) {
+ // Find the first document that is scored by the operand's scorer ...
+ nextScoredDocId = operandScorer.nextDoc();
+ }
+ do {
+ ++docId;
+ if (docId == pastMaxDocId) {
+ // We're aleady to the end of the documents in the index, so return no more docs
+ return Scorer.NO_MORE_DOCS;
+ }
+ if (docId == nextScoredDocId) {
+ // Find the next document that is scored by the operand's scorer ...
+ nextScoredDocId = operandScorer.nextDoc();
+ continue;
+ }
+ if (reader.isDeleted(docId)) {
+ // We should skip this document ...
+ continue;
+ }
+ return docId;
+ } while (true);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
+ */
+ @Override
+ public int advance( int target ) throws IOException {
+ if (target == Scorer.NO_MORE_DOCS) return target;
+ while (true) {
+ int doc = nextDoc();
+ if (doc >= target) return doc;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
+ * scored by this scorer.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ return 1.0f;
+ }
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/NotQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/ScoreQuery.java (from rev 1417, trunk/dna-search/src/main/java/org/jboss/dna/search/query/ScoreQuery.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/ScoreQuery.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/ScoreQuery.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,275 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene.query;
+
+import java.io.IOException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.query.model.FullTextSearchScore;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link FullTextSearchScore} criteria a NOT expression of another
+ * wrapped Query object. This query implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * that were <i>not</i> scored by the wrapped query. In other words, if the wrapped query ended up scoring any document, that
+ * document is <i>not</i> scored (i.e., skipped) by this query.
+ */
+public class ScoreQuery extends Query {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * The operand that is being negated by this query.
+ */
+ protected final Query operand;
+
+ /**
+ * Construct a NOT(x) constraint where the 'x' operand is supplied.
+ *
+ * @param operand the operand being negated
+ */
+ public ScoreQuery( Query operand ) {
+ this.operand = operand;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher)
+ */
+ @Override
+ public Weight createWeight( Searcher searcher ) {
+ return new NotWeight(searcher);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new ScoreQuery(operand);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#toString(java.lang.String)
+ */
+ @Override
+ public String toString( String field ) {
+ return "NOT(" + operand.toString(field) + ")";
+ }
+
+ /**
+ * Calculates query weights and builds query scores for our NOT queries.
+ */
+ protected class NotWeight extends Weight {
+ private static final long serialVersionUID = 1L;
+ private final Searcher searcher;
+
+ protected NotWeight( Searcher searcher ) {
+ this.searcher = searcher;
+ assert this.searcher != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#getQuery()
+ */
+ @Override
+ public Query getQuery() {
+ return ScoreQuery.this;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a weight factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#getValue()
+ */
+ @Override
+ public float getValue() {
+ return 1.0f; // weight factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always returns a normalization factor of 1.0.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#sumOfSquaredWeights()
+ */
+ @Override
+ public float sumOfSquaredWeights() {
+ return 1.0f; // normalization factor of 1.0
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation always does nothing, as there is nothing to normalize.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Weight#normalize(float)
+ */
+ @Override
+ public void normalize( float norm ) {
+ // No need to do anything here
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)
+ */
+ @Override
+ public Scorer scorer( IndexReader reader,
+ boolean scoreDocsInOrder,
+ boolean topScorer ) throws IOException {
+ // Get the operand's score, and set this on the NOT query
+ Scorer operandScorer = operand.weight(searcher).scorer(reader, scoreDocsInOrder, topScorer);
+ // Return a custom scorer ...
+ return new NotScorer(operandScorer, reader);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public Explanation explain( IndexReader reader,
+ int doc ) throws IOException {
+ Explanation operandExplanation = operand.weight(searcher).explain(reader, doc);
+ String desc = operandExplanation.getDescription();
+ return new Explanation(getValue(), "NOT(" + desc + ")");
+ }
+ }
+
+ /**
+ * A scorer for the NOT query that iterates over documents (in increasing docId order), using the given scorer implementation
+ * for the operand of the NOT.
+ */
+ protected static class NotScorer extends Scorer {
+ private int docId = -1;
+ private int nextScoredDocId = -1;
+ private final Scorer operandScorer;
+ private final IndexReader reader;
+ private final int pastMaxDocId;
+
+ /**
+ * @param operandScorer the scorer that is used to score the documents based upon the operand of the NOT; may not be null
+ * @param reader the reader that has access to all the docs ...
+ */
+ protected NotScorer( Scorer operandScorer,
+ IndexReader reader ) {
+ // We don't care which Similarity we have, because we don't use it. So get the default.
+ super(Similarity.getDefault());
+ this.operandScorer = operandScorer;
+ this.reader = reader;
+ assert this.operandScorer != null;
+ assert this.reader != null;
+ this.pastMaxDocId = this.reader.maxDoc();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#docID()
+ */
+ @Override
+ public int docID() {
+ return docId;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() throws IOException {
+ if (nextScoredDocId == -1) {
+ // Find the first document that is scored by the operand's scorer ...
+ nextScoredDocId = operandScorer.nextDoc();
+ }
+ do {
+ ++docId;
+ if (docId == pastMaxDocId) {
+ // We're aleady to the end of the documents in the index, so return no more docs
+ return Scorer.NO_MORE_DOCS;
+ }
+ if (docId == nextScoredDocId) {
+ // Find the next document that is scored by the operand's scorer ...
+ nextScoredDocId = operandScorer.nextDoc();
+ continue;
+ }
+ if (reader.isDeleted(docId)) {
+ // We should skip this document ...
+ continue;
+ }
+ return docId;
+ } while (true);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
+ */
+ @Override
+ public int advance( int target ) throws IOException {
+ if (target == Scorer.NO_MORE_DOCS) return target;
+ while (true) {
+ int doc = nextDoc();
+ if (doc >= target) return doc;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This method always returns a score of 1.0 for the current document, since only those documents that satisfy the NOT are
+ * scored by this scorer.
+ * </p>
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ return 1.0f;
+ }
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/main/java/org/jboss/dna/search/lucene/query/ScoreQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/main/resources/org/jboss/dna/search/lucene/LuceneI18n.properties (from rev 1417, trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties)
===================================================================
--- trunk/extensions/dna-search-lucene/src/main/resources/org/jboss/dna/search/lucene/LuceneI18n.properties (rev 0)
+++ trunk/extensions/dna-search-lucene/src/main/resources/org/jboss/dna/search/lucene/LuceneI18n.properties 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,30 @@
+#
+# JBoss DNA (http://www.jboss.org/dna)
+# See the COPYRIGHT.txt file distributed with this work for information
+# regarding copyright ownership. Some portions may be licensed
+# to Red Hat, Inc. under one or more contributor license agreements.
+# See the AUTHORS.txt file in the distribution for a full listing of
+# individual contributors.
+#
+# JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+# is licensed to you under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation; either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# JBoss DNA is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free
+# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+#
+
+locationForIndexesIsNotDirectory = Location "{0}" cannot be used for search indexes for workspace "{1}" because it is a directory
+locationForIndexesCannotBeRead = Location "{0}" cannot be used for search indexes for workspace "{1}" because it cannot be read
+locationForIndexesCannotBeWritten = Location "{0}" cannot be used for search indexes for workspace "{1}" because its contents cannot be written or updated
+
+errorWhileCommittingIndexChanges = Error while committing changes to the indexes for the "{0}" workspace of the "{1}" source: {2}
+errorWhileRollingBackIndexChanges = Error while rolling back changes to the indexes for the "{0}" workspace of the "{1}" source: {2}
Added: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/AbstractLuceneSearchEngineTest.java
===================================================================
--- trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/AbstractLuceneSearchEngineTest.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/AbstractLuceneSearchEngineTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,335 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+import static org.mockito.Mockito.mock;
+import java.util.UUID;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.NameFactory;
+import org.jboss.dna.graph.property.Property;
+import org.jboss.dna.graph.request.CloneBranchRequest;
+import org.jboss.dna.graph.request.CloneWorkspaceRequest;
+import org.jboss.dna.graph.request.CopyBranchRequest;
+import org.jboss.dna.graph.request.CreateNodeRequest;
+import org.jboss.dna.graph.request.CreateWorkspaceRequest;
+import org.jboss.dna.graph.request.DeleteBranchRequest;
+import org.jboss.dna.graph.request.DestroyWorkspaceRequest;
+import org.jboss.dna.graph.request.GetWorkspacesRequest;
+import org.jboss.dna.graph.request.MoveBranchRequest;
+import org.jboss.dna.graph.request.ReadAllChildrenRequest;
+import org.jboss.dna.graph.request.ReadAllPropertiesRequest;
+import org.jboss.dna.graph.request.UpdatePropertiesRequest;
+import org.jboss.dna.graph.request.VerifyWorkspaceRequest;
+import org.jboss.dna.graph.search.SearchEngineWorkspace;
+import org.jboss.dna.graph.search.SearchEngine.Workspaces;
+import org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.AbstractLuceneProcessor;
+import org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.WorkspaceSession;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class AbstractLuceneSearchEngineTest {
+
+ private ExecutionContext context;
+ private AbstractLuceneProcessor<TestWorkspace, TestSession> processor;
+ private Workspaces<TestWorkspace> workspaces;
+
+ @SuppressWarnings( "unchecked" )
+ @Before
+ public void beforeEach() {
+ context = new ExecutionContext();
+ workspaces = mock(Workspaces.class);
+ processor = new TestProcessor("source", context, workspaces, false);
+ }
+
+ protected Property property( String name,
+ Object... values ) {
+ return context.getPropertyFactory().create(name(name), values);
+ }
+
+ protected Name name( String name ) {
+ return context.getValueFactories().getNameFactory().create(name);
+ }
+
+ @Test
+ public void shouldSerializeSingleValuedProperty() {
+ Property p1 = property("p1", "v1");
+ String serialized = processor.serializeProperty(p1);
+ assertThat(serialized, is("p1=v1"));
+ }
+
+ @Test
+ public void shouldSerializeTwoValuedProperty() {
+ Property p1 = property("p1", "v1", "v2");
+ String serialized = processor.serializeProperty(p1);
+ assertThat(serialized, is("p1=v1\nv2"));
+ }
+
+ @Test
+ public void shouldSerializeMultiValuedProperty() {
+ Property p1 = property("p1", "v1", "v2", "v3");
+ String serialized = processor.serializeProperty(p1);
+ assertThat(serialized, is("p1=v1\nv2\nv3"));
+ }
+
+ @Test
+ public void shouldDeserializeSingleValuedProperty() {
+ Property p1 = property("p1", "v1");
+ Property p1a = processor.deserializeProperty(processor.serializeProperty(p1));
+ assertThat(p1a, is(p1));
+ }
+
+ @Test
+ public void shouldDeserializeTwoValuedProperty() {
+ Property p1 = property("p1", "v1", 4L);
+ Property p1a = processor.deserializeProperty(processor.serializeProperty(p1));
+ assertThat(p1a, is(p1));
+ }
+
+ @Test
+ public void shouldDeserializeMultiValuedProperty() {
+ // The values are stored as strings, so names and paths must be stored as string-values in the property
+ Property p1 = property("p1", "v1", 4L, name("dna:something").getString(context.getNamespaceRegistry()), UUID.randomUUID());
+ Property p1a = processor.deserializeProperty(processor.serializeProperty(p1));
+ assertThat(p1a, is(p1));
+ }
+
+ @Test
+ public void shouldSerializeAndDeserializePropertyWithNameValues() {
+ Property p1 = property("p1", name("v1"), name("dna:something"));
+ Property p2 = processor.deserializeProperty(processor.serializeProperty(p1));
+ assertThat(p2.getName(), is(p1.getName()));
+ Object[] values1 = p1.getValuesAsArray();
+ Object[] values2 = p2.getValuesAsArray();
+ assertThat(values1.length, is(values2.length));
+ // The standard way is to access the values with a value factory, so doing this does work ...
+ NameFactory names = context.getValueFactories().getNameFactory();
+ for (int i = 0; i != values1.length; ++i) {
+ assertThat(names.create(values1[i]), is(names.create(values2[i])));
+ }
+ }
+
+ protected static class TestProcessor extends AbstractLuceneProcessor<TestWorkspace, TestSession> {
+
+ protected TestProcessor( String sourceName,
+ ExecutionContext context,
+ Workspaces<TestWorkspace> workspaces,
+ boolean readOnly ) {
+ super(sourceName, context, workspaces, null, null, readOnly);
+
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.AbstractLuceneProcessor#createSessionFor(org.jboss.dna.graph.search.SearchEngineWorkspace)
+ */
+ @Override
+ protected TestSession createSessionFor( TestWorkspace workspace ) {
+ return null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.lucene.AbstractLuceneSearchEngine.AbstractLuceneProcessor#fullTextFieldName(java.lang.String)
+ */
+ @Override
+ protected String fullTextFieldName( String propertyName ) {
+ return null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.VerifyWorkspaceRequest)
+ */
+ @Override
+ public void process( VerifyWorkspaceRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.GetWorkspacesRequest)
+ */
+ @Override
+ public void process( GetWorkspacesRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.CreateWorkspaceRequest)
+ */
+ @Override
+ public void process( CreateWorkspaceRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.CloneBranchRequest)
+ */
+ @Override
+ public void process( CloneBranchRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.CloneWorkspaceRequest)
+ */
+ @Override
+ public void process( CloneWorkspaceRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.DestroyWorkspaceRequest)
+ */
+ @Override
+ public void process( DestroyWorkspaceRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.CopyBranchRequest)
+ */
+ @Override
+ public void process( CopyBranchRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.CreateNodeRequest)
+ */
+ @Override
+ public void process( CreateNodeRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.DeleteBranchRequest)
+ */
+ @Override
+ public void process( DeleteBranchRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.MoveBranchRequest)
+ */
+ @Override
+ public void process( MoveBranchRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.ReadAllChildrenRequest)
+ */
+ @Override
+ public void process( ReadAllChildrenRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.ReadAllPropertiesRequest)
+ */
+ @Override
+ public void process( ReadAllPropertiesRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.request.processor.RequestProcessor#process(org.jboss.dna.graph.request.UpdatePropertiesRequest)
+ */
+ @Override
+ public void process( UpdatePropertiesRequest request ) {
+ super.processUnknownRequest(request);
+ }
+
+ }
+
+ protected static abstract class TestSession implements WorkspaceSession {
+
+ }
+
+ protected static class TestWorkspace implements SearchEngineWorkspace {
+ private final String name;
+ private boolean destroyed = false;
+
+ protected TestWorkspace( String name ) {
+ this.name = name;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngineWorkspace#destroy(org.jboss.dna.graph.ExecutionContext)
+ */
+ public void destroy( ExecutionContext context ) {
+ destroyed = true;
+ }
+
+ public boolean isDestroyed() {
+ return destroyed;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngineWorkspace#getWorkspaceName()
+ */
+ public String getWorkspaceName() {
+ return name;
+ }
+ }
+
+}
Property changes on: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/AbstractLuceneSearchEngineTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/EncodingNamespaceRegistryTest.java (from rev 1417, trunk/dna-search/src/test/java/org/jboss/dna/search/EncodingNamespaceRegistryTest.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/EncodingNamespaceRegistryTest.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/EncodingNamespaceRegistryTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,102 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+import java.util.Collection;
+import org.jboss.dna.common.text.SecureHashTextEncoder;
+import org.jboss.dna.common.text.TextEncoder;
+import org.jboss.dna.common.util.SecureHash.Algorithm;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.property.NamespaceRegistry;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.NamespaceRegistry.Namespace;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class EncodingNamespaceRegistryTest {
+
+ private ExecutionContext context;
+ private NamespaceRegistry registry;
+ private EncodingNamespaceRegistry encodedRegistry;
+ private TextEncoder encoder;
+ private ExecutionContext encodedContext;
+
+ @Before
+ public void beforeEach() {
+ this.context = new ExecutionContext();
+ this.registry = this.context.getNamespaceRegistry();
+ this.encoder = new SecureHashTextEncoder(Algorithm.SHA_1, 10);
+ this.encodedRegistry = new EncodingNamespaceRegistry(registry, encoder);
+ this.encodedContext = context.with(encodedRegistry);
+ }
+
+ @Test
+ public void shouldHaveEncodedPrefixesForAllRegisteredNamespacesExceptFixedOnes() {
+ Collection<Namespace> namespaces = registry.getNamespaces();
+ assertThat(namespaces.size() > 4, is(true));
+ for (Namespace namespace : namespaces) {
+ String uri = namespace.getNamespaceUri();
+ String actualEncodedPrefix = encodedRegistry.getPrefixForNamespaceUri(uri, false);
+ if (encodedRegistry.getFixedNamespaceUris().contains(uri)) {
+ assertThat(actualEncodedPrefix, is(namespace.getPrefix()));
+ } else {
+ String expectedEncodedPrefix = encoder.encode(uri);
+ assertThat(expectedEncodedPrefix, is(actualEncodedPrefix));
+ }
+ String actualUri = encodedRegistry.getNamespaceForPrefix(actualEncodedPrefix);
+ assertThat(uri, is(actualUri));
+ }
+ }
+
+ @Test
+ public void shouldAllowPathConversionToAndFromString() {
+ String uri1 = "http://acme.com/wabbler";
+ String uri2 = "http://troublemakers.com/contixity";
+ String uri3 = "http://example.com/infinitiy";
+ String ns1 = "wab";
+ String ns2 = "ctx";
+ String ns3 = "inf";
+ registry.register(ns1, uri1);
+ registry.register(ns2, uri2);
+ registry.register(ns3, uri3);
+ String pathStr = "/wab:part1/wab:part2/ctx:part3/inf:part4/dna:part5";
+ Path actualPath = context.getValueFactories().getPathFactory().create(pathStr);
+ String actualPathStr = context.getValueFactories().getStringFactory().create(actualPath);
+ assertThat(pathStr, is(actualPathStr));
+ String encodedPathStr = encodedContext.getValueFactories().getStringFactory().create(actualPath);
+ String encodedPrefix1 = encoder.encode(uri1);
+ String encodedPrefix2 = encoder.encode(uri2);
+ String encodedPrefix3 = encoder.encode(uri3);
+ String expectedPathStr = "/" + encodedPrefix1 + ":part1/" + encodedPrefix1 + ":part2/" + encodedPrefix2 + ":part3/"
+ + encodedPrefix3 + ":part4/dna:part5";
+ assertThat(expectedPathStr, is(encodedPathStr));
+ Path actualPath2 = encodedContext.getValueFactories().getPathFactory().create(encodedPathStr);
+ assertThat(actualPath, is(actualPath2));
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/EncodingNamespaceRegistryTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/IndexingRulesTest.java (from rev 1417, trunk/dna-search/src/test/java/org/jboss/dna/search/IndexingRulesTest.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/IndexingRulesTest.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/IndexingRulesTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,60 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+import org.apache.lucene.document.Field;
+import org.jboss.dna.search.lucene.IndexRules.Builder;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class IndexingRulesTest {
+
+ private Builder builder;
+ private IndexRules rules;
+
+ @Before
+ public void beforeEach() {
+ builder = IndexRules.createBuilder();
+ rules = builder.build();
+ }
+
+ @Test
+ public void shouldBuildValidRulesFromBuilderThatIsNotInvoked() {
+ builder = IndexRules.createBuilder();
+ rules = builder.build();
+ }
+
+ @Test
+ public void shouldBuildValidRulesFromBuilderAfterJustSettingDefaultRules() {
+ builder.defaultTo(Field.Store.NO, Field.Index.ANALYZED_NO_NORMS);
+ rules = builder.build();
+ assertThat(rules.getRule(null).getIndexOption(), is(Field.Index.ANALYZED_NO_NORMS));
+ assertThat(rules.getRule(null).getStoreOption(), is(Field.Store.NO));
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/IndexingRulesTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneConfigurationsTest.java
===================================================================
--- trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneConfigurationsTest.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneConfigurationsTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,174 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.IsInstanceOf.instanceOf;
+import static org.hamcrest.core.IsNot.not;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsSame.sameInstance;
+import static org.junit.Assert.assertThat;
+import java.io.File;
+import java.util.Map;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.RAMDirectory;
+import org.jboss.dna.common.text.TextEncoder;
+import org.jboss.dna.common.text.UrlEncoder;
+import org.jboss.dna.common.util.FileUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+
+public class LuceneConfigurationsTest {
+
+ private LuceneConfiguration config;
+ private String workspace;
+ private String index;
+ private Directory directory;
+ private Multimap<String, String> indexNamesByWorkspaceName;
+ private File tempArea;
+
+ @Before
+ public void beforeEach() {
+ workspace = "workspace";
+ index = "index";
+ indexNamesByWorkspaceName = HashMultimap.create();
+ tempArea = new File("target/configTest");
+ if (tempArea.exists()) FileUtil.delete(tempArea); // deletes recursively
+ tempArea.mkdirs();
+ }
+
+ @After
+ public void afterEach() {
+ if (config != null) {
+ try {
+ for (Map.Entry<String, String> entry : indexNamesByWorkspaceName.entries()) {
+ assertThat(config.destroyDirectory(entry.getKey(), entry.getValue()), is(true));
+ }
+ } finally {
+ config = null;
+ directory = null;
+ indexNamesByWorkspaceName.clear();
+ }
+ }
+ if (tempArea != null) {
+ try {
+ FileUtil.delete(tempArea); // deletes recursively
+ } finally {
+ tempArea = null;
+ }
+ }
+ }
+
+ protected void destroyDirectory( LuceneConfiguration config,
+ String workspaceName,
+ String indexName ) {
+ assertThat(config.destroyDirectory(workspace, index), is(true));
+ indexNamesByWorkspaceName.remove(workspaceName, indexName);
+ }
+
+ protected Directory getDirectory( LuceneConfiguration config,
+ String workspaceName,
+ String indexName ) {
+ Directory result = config.getDirectory(workspaceName, indexName);
+ assertThat(result, is(notNullValue()));
+ indexNamesByWorkspaceName.put(workspaceName, indexName);
+ return result;
+ }
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // In-Memory directories ...
+ // ----------------------------------------------------------------------------------------------------------------
+
+ @Test
+ public void shouldCreateConfigurationFromInMemoryStorage() {
+ config = LuceneConfigurations.inMemory();
+ assertThat(config, is(notNullValue()));
+ directory = getDirectory(config, workspace, index);
+ assertThat(directory, is(instanceOf(RAMDirectory.class)));
+ }
+
+ @Test
+ public void shouldReturnSameDirectoryForSameWorkspaceAndIndexNamesFromInMemoryConfiguration() {
+ config = LuceneConfigurations.inMemory();
+ assertThat(config, is(notNullValue()));
+ directory = getDirectory(config, workspace, index);
+ assertThat(directory, is(instanceOf(RAMDirectory.class)));
+ for (int i = 0; i != 10; ++i) {
+ assertThat(getDirectory(config, workspace, index), is(sameInstance(directory)));
+ }
+ assertThat(indexNamesByWorkspaceName.size(), is(1));
+ }
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // FileSystem directories ...
+ // ----------------------------------------------------------------------------------------------------------------
+
+ @Test
+ public void shouldCreateConfigurationFromFileSystemStorage() {
+ config = LuceneConfigurations.using(tempArea);
+ assertThat(config, is(notNullValue()));
+ directory = getDirectory(config, workspace, index);
+ assertThat(directory, is(instanceOf(FSDirectory.class)));
+ FSDirectory fsDirectory = (FSDirectory)directory;
+ assertThat(fsDirectory.getFile().getName(), is(index));
+ assertThat(fsDirectory.getFile().getParentFile().getName(), is(workspace));
+ }
+
+ @Test
+ public void shouldReturnSameDirectoryForSameWorkspaceAndIndexNamesFromFileSystemStorage() {
+ config = LuceneConfigurations.using(tempArea);
+ assertThat(config, is(notNullValue()));
+ directory = getDirectory(config, workspace, index);
+ assertThat(directory, is(instanceOf(FSDirectory.class)));
+ FSDirectory fsDirectory = (FSDirectory)directory;
+ assertThat(fsDirectory.getFile().getName(), is(index));
+ assertThat(fsDirectory.getFile().getParentFile().getName(), is(workspace));
+ for (int i = 0; i != 10; ++i) {
+ assertThat(getDirectory(config, workspace, index), is(sameInstance(directory)));
+ }
+ assertThat(indexNamesByWorkspaceName.size(), is(1));
+ }
+
+ @Test
+ public void shouldEncodeDirectoryNames() {
+ // Set up an encoder and make sure that the names for the index and workspace can't be used as file system names ...
+ TextEncoder encoder = new UrlEncoder();
+ index = "some/special::/\nindex(name)";
+ workspace = "some/special::/\nworkspace(name)/illegalInWindows:\\/?%*|\"'<>.txt";
+ assertThat(index, is(not(encoder.encode(index))));
+ assertThat(workspace, is(not(encoder.encode(workspace))));
+
+ config = LuceneConfigurations.using(tempArea, null, encoder, encoder);
+ assertThat(config, is(notNullValue()));
+ directory = getDirectory(config, workspace, index);
+ assertThat(directory, is(instanceOf(FSDirectory.class)));
+ FSDirectory fsDirectory = (FSDirectory)directory;
+ assertThat(fsDirectory.getFile().getName(), is(encoder.encode(index)));
+ assertThat(fsDirectory.getFile().getParentFile().getName(), is(encoder.encode(workspace)));
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneConfigurationsTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneI18nTest.java (from rev 1417, trunk/dna-search/src/test/java/org/jboss/dna/search/SearchI18nTest.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneI18nTest.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneI18nTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,33 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import org.jboss.dna.common.AbstractI18nTest;
+
+public class LuceneI18nTest extends AbstractI18nTest {
+
+ public LuceneI18nTest() {
+ super(LuceneI18n.class);
+ }
+}
Property changes on: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneI18nTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneSearchEngineTest.java
===================================================================
--- trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneSearchEngineTest.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneSearchEngineTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,464 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene;
+
+import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.IsInstanceOf.instanceOf;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Field;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.Graph;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.connector.RepositoryConnection;
+import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
+import org.jboss.dna.graph.connector.RepositorySourceException;
+import org.jboss.dna.graph.connector.inmemory.InMemoryRepositorySource;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.query.QueryResults;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.model.And;
+import org.jboss.dna.graph.query.model.Constraint;
+import org.jboss.dna.graph.query.model.Limit;
+import org.jboss.dna.graph.query.model.Query;
+import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.model.Selector;
+import org.jboss.dna.graph.query.model.SelectorName;
+import org.jboss.dna.graph.query.model.Source;
+import org.jboss.dna.graph.query.model.TypeSystem;
+import org.jboss.dna.graph.query.parse.SqlQueryParser;
+import org.jboss.dna.graph.query.process.QueryResultColumns;
+import org.jboss.dna.graph.query.validate.ImmutableSchemata;
+import org.jboss.dna.graph.query.validate.Schemata;
+import org.jboss.dna.graph.request.AccessQueryRequest;
+import org.jboss.dna.graph.request.FullTextSearchRequest;
+import org.junit.Before;
+import org.junit.Test;
+import org.xml.sax.SAXException;
+
+public class LuceneSearchEngineTest {
+
+ private LuceneSearchEngine engine;
+ private ExecutionContext context;
+ private TypeSystem typeSystem;
+ private String sourceName;
+ private String workspaceName1;
+ private String workspaceName2;
+ private InMemoryRepositorySource source;
+ private RepositoryConnectionFactory connectionFactory;
+ private Graph content;
+ private Schemata schemata;
+ private SqlQueryParser sql;
+ private Map<String, Object> variables;
+
+ /** Controls whether the results from each test should be printed to System.out */
+ private boolean print = false;
+
+ @Before
+ public void beforeEach() throws Exception {
+ context = new ExecutionContext();
+ typeSystem = context.getValueFactories().getTypeSystem();
+ sourceName = "sourceA";
+ workspaceName1 = "workspace1";
+ workspaceName2 = "workspace2";
+
+ // Set up the source and graph instance ...
+ source = new InMemoryRepositorySource();
+ source.setName(sourceName);
+ content = Graph.create(source, context);
+
+ // Create the workspaces ...
+ content.createWorkspace().named(workspaceName1);
+ content.createWorkspace().named(workspaceName2);
+
+ // Set up the connection factory ...
+ connectionFactory = new RepositoryConnectionFactory() {
+ @SuppressWarnings( "synthetic-access" )
+ public RepositoryConnection createConnection( String sourceName ) throws RepositorySourceException {
+ return source.getConnection();
+ }
+ };
+
+ // Set up the provider and the search engine ...
+ IndexRules.Builder rulesBuilder = IndexRules.createBuilder(LuceneSearchEngine.DEFAULT_RULES);
+ rulesBuilder.defaultTo(Field.Store.YES, Field.Index.NOT_ANALYZED);
+ rulesBuilder.stringField(name("model"), Field.Store.YES, Field.Index.ANALYZED);
+ rulesBuilder.integerField(name("year"), Field.Store.YES, Field.Index.NOT_ANALYZED);
+ rulesBuilder.floatField(name("userRating"), Field.Store.YES, Field.Index.NOT_ANALYZED, 0.0f, 10.0f);
+ rulesBuilder.integerField(name("mpgCity"), Field.Store.YES, Field.Index.NOT_ANALYZED, 0, 50);
+ rulesBuilder.integerField(name("mpgHighway"), Field.Store.YES, Field.Index.NOT_ANALYZED, 0, 50);
+ // rulesBuilder.analyzeAndStoreAndFullText(name("maker"));
+ IndexRules rules = rulesBuilder.build();
+ LuceneConfiguration luceneConfig = LuceneConfigurations.inMemory();
+ // LuceneConfiguration luceneConfig = LuceneConfigurations.using(new File("target/testIndexes"));
+ Analyzer analyzer = null;
+ engine = new LuceneSearchEngine(sourceName, connectionFactory, true, luceneConfig, rules, analyzer);
+ loadContent();
+
+ // Create the schemata for the workspaces ...
+ schemata = ImmutableSchemata.createBuilder(typeSystem)
+ .addTable("__ALLNODES__", "maker", "model", "year", "msrp", "mpgHighway", "mpgCity")
+ .makeSearchable("__ALLNODES__", "maker")
+ .build();
+
+ // And create the SQL parser ...
+ sql = new SqlQueryParser();
+
+ variables = new HashMap<String, Object>();
+ }
+
+ protected Name name( String name ) {
+ return context.getValueFactories().getNameFactory().create(name);
+ }
+
+ protected Path path( String path ) {
+ return context.getValueFactories().getPathFactory().create(path);
+ }
+
+ protected void loadContent() throws IOException, SAXException {
+ // Load the content ...
+ content.useWorkspace(workspaceName1);
+ content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("cars.xml")).into("/");
+ content.useWorkspace(workspaceName2);
+ content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("aircraft.xml")).into("/");
+ }
+
+ protected QueryResults search( String workspaceName,
+ String searchExpression,
+ int maxResults,
+ int offset ) {
+ LuceneSearchProcessor processor = engine.createProcessor(context, null, true);
+ try {
+ FullTextSearchRequest request = new FullTextSearchRequest(searchExpression, workspaceName, maxResults, offset);
+ processor.process(request);
+ if (request.hasError()) {
+ fail(request.getError().getMessage());
+ return null;
+ }
+ assertThat(request.getResultColumns().getColumnCount(), is(0));
+ assertThat(request.getResultColumns().getLocationCount(), is(1));
+ assertThat(request.getResultColumns().hasFullTextSearchScores(), is(true));
+ // Convert the results to a List<Location>
+ List<Object[]> tuples = request.getTuples();
+ List<Location> results = new ArrayList<Location>(tuples.size());
+ for (Object[] tuple : tuples) {
+ results.add((Location)tuple[0]);
+ Float score = (Float)tuple[1];
+ assertThat(score, is(notNullValue()));
+ }
+ return new org.jboss.dna.graph.query.process.QueryResults(request.getResultColumns(), request.getStatistics(),
+ request.getTuples());
+ } finally {
+ processor.close();
+ }
+ }
+
+ protected List<Constraint> getAndedConstraint( Constraint constraint,
+ List<Constraint> andedConstraints ) {
+ if (constraint != null) {
+ if (constraint instanceof And) {
+ And and = (And)constraint;
+ getAndedConstraint(and.getLeft(), andedConstraints);
+ getAndedConstraint(and.getRight(), andedConstraints);
+ } else {
+ andedConstraints.add(constraint);
+ }
+ }
+ return andedConstraints;
+ }
+
+ protected QueryResults query( String workspaceName,
+ String sql ) {
+ QueryCommand command = this.sql.parseQuery(sql, typeSystem);
+ assertThat(command, is(instanceOf(Query.class)));
+ Query query = (Query)command;
+ Source source = query.getSource();
+ assertThat(source, is(instanceOf(Selector.class)));
+ SelectorName tableName = ((Selector)source).getName();
+ Constraint constraint = query.getConstraint();
+ Columns resultColumns = new QueryResultColumns(query.getColumns(), QueryResultColumns.includeFullTextScores(constraint));
+ List<Constraint> andedConstraints = getAndedConstraint(constraint, new ArrayList<Constraint>());
+ Limit limit = query.getLimits();
+ LuceneSearchProcessor processor = engine.createProcessor(context, null, true);
+ try {
+ AccessQueryRequest request = new AccessQueryRequest(workspaceName, tableName, resultColumns, andedConstraints, limit,
+ schemata, variables);
+ processor.process(request);
+ if (request.hasError()) {
+ fail(request.getError().getMessage());
+ }
+ return new org.jboss.dna.graph.query.process.QueryResults(request.resultColumns(), request.getStatistics(),
+ request.getTuples());
+ } finally {
+ processor.close();
+ }
+ }
+
+ @Test
+ public void shouldIndexAllContentInRepositorySource() throws Exception {
+ engine.index(context, 3);
+ }
+
+ @Test
+ public void shouldIndexAllContentInWorkspace() throws Exception {
+ engine.index(context, workspaceName1, 3);
+ engine.index(context, workspaceName2, 5);
+ }
+
+ @Test
+ public void shouldIndexAllContentInWorkspaceBelowPath() throws Exception {
+ engine.index(context, workspaceName1, path("/Cars/Hybrid"), 3);
+ engine.index(context, workspaceName2, path("/Aircraft/Commercial"), 5);
+ }
+
+ @Test
+ public void shouldReIndexAllContentInWorkspaceBelowPath() throws Exception {
+ for (int i = 0; i != 0; i++) {
+ engine.index(context, workspaceName1, path("/Cars/Hybrid"), 3);
+ engine.index(context, workspaceName2, path("/Aircraft/Commercial"), 5);
+ }
+ }
+
+ @Test
+ public void shouldHaveLoadedTestContentIntoRepositorySource() {
+ content.useWorkspace(workspaceName1);
+ assertThat(content.getNodeAt("/Cars/Hybrid/Toyota Prius").getProperty("msrp").getFirstValue(), is((Object)"$21,500"));
+ }
+
+ @Test
+ public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfOne() {
+ engine.index(context, workspaceName1, path("/"), 1);
+ }
+
+ @Test
+ public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfTwo() {
+ engine.index(context, workspaceName1, path("/"), 2);
+ }
+
+ @Test
+ public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfThree() {
+ engine.index(context, workspaceName1, path("/"), 3);
+ }
+
+ @Test
+ public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfFour() {
+ engine.index(context, workspaceName1, path("/"), 4);
+ }
+
+ @Test
+ public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfTen() {
+ engine.index(context, workspaceName1, path("/"), 10);
+ }
+
+ @Test
+ public void shouldIndexRepositoryContentStartingAtNonRootNode() {
+ engine.index(context, workspaceName1, path("/Cars"), 10);
+ }
+
+ @Test
+ public void shouldReIndexRepositoryContentStartingAtNonRootNode() {
+ engine.index(context, workspaceName1, path("/Cars"), 10);
+ engine.index(context, workspaceName1, path("/Cars"), 10);
+ engine.index(context, workspaceName1, path("/Cars"), 10);
+ }
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // Full-text search
+ // ----------------------------------------------------------------------------------------------------------------
+
+ @Test
+ public void shouldFindNodesByFullTextSearch() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ QueryResults results = search(workspaceName1, "Toyota Prius", 10, 0);
+ assertThat(results, is(notNullValue()));
+ assertRowCount(results, 2);
+ Location first = (Location)(results.getTuples().get(0)[0]);
+ Location second = (Location)(results.getTuples().get(1)[0]);
+ assertThat(first.getPath(), is(path("/Cars/Hybrid/Toyota Prius")));
+ assertThat(second.getPath(), is(path("/Cars/Hybrid/Toyota Highlander")));
+ }
+
+ @Test
+ public void shouldFindNodesByFullTextSearchWithOffset() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ QueryResults results = search(workspaceName1, "toyota prius", 1, 0);
+ assertThat(results, is(notNullValue()));
+ assertRowCount(results, 1);
+ Location first = (Location)(results.getTuples().get(0)[0]);
+ assertThat(first.getPath(), is(path("/Cars/Hybrid/Toyota Prius")));
+
+ results = search(workspaceName1, "+Toyota", 1, 1);
+ assertThat(results, is(notNullValue()));
+ assertRowCount(results, 1);
+ first = (Location)(results.getTuples().get(0)[0]);
+ assertThat(first.getPath(), is(path("/Cars/Hybrid/Toyota Highlander")));
+ }
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // Query
+ // ----------------------------------------------------------------------------------------------------------------
+
+ @Test
+ public void shouldFindNodesBySimpleQuery() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 18);
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithEqualityComparisonCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE maker = 'Toyota'";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 2);
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithGreaterThanComparisonCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker, mpgHighway, mpgCity FROM __ALLNODES__ WHERE mpgHighway > 20";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 6);
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithLowercaseEqualityComparisonCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE LOWER(maker) = 'toyota'";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 2);
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithUppercaseEqualityComparisonCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE UPPER(maker) = 'TOYOTA'";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 2);
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithLikeComparisonCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE maker LIKE 'Toyo%'";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 2);
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithLikeComparisonCriteriaWithLeadingWildcard() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE maker LIKE '%yota'";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 2);
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithLowercaseLikeComparisonCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE LOWER(maker) LIKE 'toyo%'";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 2);
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithFullTextSearchCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE CONTAINS(maker,'martin')";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 1);
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithDepthCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE DEPTH() > 2";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 12);
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithLocalNameCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE LOCALNAME() LIKE 'Toyota%' OR LOCALNAME() LIKE 'Land %'";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 4);
+
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithNameCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE NAME() LIKE 'Toyota%[1]' OR NAME() LIKE 'Land %'";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 4);
+
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithNameCriteriaThatMatchesNoNodes() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE NAME() LIKE 'Toyota%[2]'";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 0);
+
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithPathCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE PATH() LIKE '/Cars[%]/Hy%/Toyota%' OR PATH() LIKE '/Cars[1]/Utility[1]/%'";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 6);
+
+ }
+
+ @Test
+ public void shouldFindNodesBySimpleQueryWithDescendantCriteria() {
+ engine.index(context, workspaceName1, path("/"), 100);
+ String query = "SELECT model, maker FROM __ALLNODES__ WHERE ISDESCENDANTNODE('/Cars/Hybrid')";
+ QueryResults results = query(workspaceName1, query);
+ assertRowCount(results, 3);
+
+ }
+
+ protected void assertRowCount( QueryResults results,
+ int rowCount ) {
+ assertThat(results.getProblems().isEmpty(), is(true));
+ assertThat(results.getTuples().size(), is(rowCount));
+ if (print) {
+ System.out.println(results);
+ }
+ }
+
+}
Property changes on: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/LuceneSearchEngineTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/query/NotQueryTest.java (from rev 1417, trunk/dna-search/src/test/java/org/jboss/dna/search/query/NotQueryTest.java)
===================================================================
--- trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/query/NotQueryTest.java (rev 0)
+++ trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/query/NotQueryTest.java 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,148 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.lucene.query;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.stub;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Similarity;
+import org.junit.Test;
+
+public class NotQueryTest {
+
+ @Test
+ public void scorerShouldSkipAdjacentDocsIfScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(0, 1, 2, 3, 4);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 5, 6, 7, 8, 9);
+ }
+
+ @Test
+ public void scorerShouldSkipDocsAtEndIfScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(8, 9);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 0, 1, 2, 3, 4, 5, 6, 7);
+ }
+
+ @Test
+ public void scorerShouldScoreFirstDocsIfNotScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(2, 3, 4);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 0, 1, 5, 6, 7, 8, 9);
+ }
+
+ @Test
+ public void scorerShouldScoreNonAdjacentDocsNotScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(2, 4, 8);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 0, 1, 3, 5, 6, 7, 9);
+ }
+
+ protected void assertScores( Scorer scorer,
+ int... docIds ) throws IOException {
+ for (int docId : docIds) {
+ assertThat(scorer.nextDoc(), is(docId));
+ assertThat(scorer.score(), is(1.0f));
+ }
+ assertThat(scorer.nextDoc(), is(Scorer.NO_MORE_DOCS));
+ }
+
+ protected static class MockScorer extends Scorer {
+ private final Iterator<Integer> docIds;
+
+ protected MockScorer( int... docIds ) {
+ super(Similarity.getDefault());
+ List<Integer> ids = new ArrayList<Integer>();
+ for (int docId : docIds) {
+ ids.add(new Integer(docId));
+ }
+ this.docIds = ids.iterator();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#advance(int)
+ */
+ @Override
+ public int advance( int target ) {
+ int doc;
+ while ((doc = nextDoc()) < target) {
+ }
+ return doc;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#docID()
+ */
+ @Override
+ public int docID() {
+ return nextDoc();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() {
+ if (docIds.hasNext()) return docIds.next();
+ return Scorer.NO_MORE_DOCS;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ throw new UnsupportedOperationException("Should not be called");
+ }
+ }
+
+}
Property changes on: trunk/extensions/dna-search-lucene/src/test/java/org/jboss/dna/search/lucene/query/NotQueryTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/test/resources/aircraft.xml (from rev 1417, trunk/dna-search/src/test/resources/aircraft.xml)
===================================================================
--- trunk/extensions/dna-search-lucene/src/test/resources/aircraft.xml (rev 0)
+++ trunk/extensions/dna-search-lucene/src/test/resources/aircraft.xml 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ JBoss DNA (http://www.jboss.org/dna)
+ ~
+ ~ See the COPYRIGHT.txt file distributed with this work for information
+ ~ regarding copyright ownership. Some portions may be licensed
+ ~ to Red Hat, Inc. under one or more contributor license agreements.
+ ~ See the AUTHORS.txt file in the distribution for a full listing of
+ ~ individual contributors.
+ ~
+ ~ JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ ~ is licensed to you under the terms of the GNU Lesser General Public License as
+ ~ published by the Free Software Foundation; either version 2.1 of
+ ~ the License, or (at your option) any later version.
+ ~
+ ~ JBoss DNA is distributed in the hope that it will be useful,
+ ~ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ ~ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
+ ~ for more details.
+ ~
+ ~ You should have received a copy of the GNU Lesser General Public License
+ ~ along with this distribution; if not, write to:
+ ~ Free Software Foundation, Inc.
+ ~ 51 Franklin Street, Fifth Floor
+ ~ Boston, MA 02110-1301 USA
+ -->
+<Aircraft xmlns:jcr="http://www.jcp.org/jcr/1.0">
+ <Business>
+ <aircraft jcr:name="Gulfstream V" maker="Gulfstream" model="G-V" introduced="1995" range="5800nm" cruiseSpeed="488kt" crew="2" emptyWeight="46200lb" url="http://en.wikipedia.org/wiki/Gulfstream_V"/>
+ <aircraft jcr:name="Learjet 45" maker="Learjet" model="LJ45" introduced="1995" numberBuilt="264+" crew="2" emptyWeight="13695lb" range="2120nm" cruiseSpeed="457kt" url="http://en.wikipedia.org/wiki/Learjet_45"/>
+ </Business>
+ <Commercial>
+ <aircraft jcr:name="Boeing 777" maker="Boeing" model="777-200LR" introduced="1995" numberBuilt="731+" maxRange="7500nm" emptyWeight="326000lb" cruiseSpeed="560mph" url="http://en.wikipedia.org/wiki/Boeing_777"/>
+ <aircraft jcr:name="Boeing 767" maker="Boeing" model="767-200" introduced="1982" numberBuilt="966+" maxRange="3950nm" emptyWeight="176650lb" cruiseSpeed="530mph" url="http://en.wikipedia.org/wiki/Boeing_767"/>
+ <aircraft jcr:name="Boeing 787" maker="Boeing" model="787-3" introduced="2009" range="3050nm" emptyWeight="223000lb" cruiseSpeed="561mph" url="http://en.wikipedia.org/wiki/Boeing_787"/>
+ <aircraft jcr:name="Boeing 757" maker="Boeing" model="757-200" introduced="1983" numberBuilt="1050" range="3900nm" maxWeight="255000lb" cruiseSpeed="530mph" url="http://en.wikipedia.org/wiki/Boeing_757"/>
+ <aircraft jcr:name="Airbus A380" maker="Airbus" model="A380-800" introduced="2007" numberBuilt="18" range="8200nm" maxWeight="1235000lb" cruiseSpeed="647mph" url="http://en.wikipedia.org/wiki/Airbus_a380"/>
+ <aircraft jcr:name="Airbus A340" maker="Airbus" model="A340-200" introduced="1993" numberBuilt="354" range="8000nm" maxWeight="606300lb" cruiseSpeed="557mph" url="http://en.wikipedia.org/wiki/Airbus_A-340"/>
+ <aircraft jcr:name="Airbus A310" maker="Airbus" model="A310-200" introduced="1983" numberBuilt="255" cruiseSpeed="850km/h" emptyWeight="176312lb" range="3670nm" url="http://en.wikipedia.org/wiki/Airbus_A-310"/>
+ <aircraft jcr:name="Embraer RJ-175" maker="Embraer" model="ERJ170-200" introduced="2004" range="3334km" cruiseSpeed="481kt" emptyWeight="21810kg" url="http://en.wikipedia.org/wiki/EMBRAER_170"/>
+ </Commercial>
+ <Vintage>
+ <aircraft jcr:name="Fokker Trimotor" maker="Fokker" model="F.VII" introduced="1925" cruiseSpeed="170km/h" emptyWeight="3050kg" crew="2" url="http://en.wikipedia.org/wiki/Fokker_trimotor"/>
+ <aircraft jcr:name="P-38 Lightning" maker="Lockheed" model="P-38" designedBy="Kelly Johnson" introduced="1941" numberBuilt="10037" rateOfClimb="4750ft/min" range="1300mi" emptyWeight="12780lb" crew="1" url="http://en.wikipedia.org/wiki/P-38_Lightning"/>
+ <aircraft jcr:name="A6M Zero" maker="Mitsubishi" model="A6M" designedBy="Jiro Horikoshi" introduced="1940" numberBuilt="11000" crew="1" emptyWeight="3704lb" serviceCeiling="33000ft" maxSpeed="331mph" range="1929mi" rateOfClimb="3100ft/min" url="http://en.wikipedia.org/wiki/A6M_Zero"/>
+ <aircraft jcr:name="Bf 109" maker="Messerschmitt" model="Bf 109" introduced="1937" url="http://en.wikipedia.org/wiki/BF_109"/>
+ <aircraft jcr:name="Wright Flyer" maker="Wright Brothers" introduced="1903" range="852ft" maxSpeed="30mph" emptyWeight="605lb" crew="1"/>
+ </Vintage>
+ <Homebuilt>
+ <aircraft jcr:name="Long-EZ" maker="Rutan Aircraft Factory" model="61" emptyWeight="760lb" fuelCapacity="200L" maxSpeed="185kt" since="1976" range="1200nm" url="http://en.wikipedia.org/wiki/Rutan_Long-EZ"/>
+ <aircraft jcr:name="Cirrus VK-30" maker="Cirrus Design" model="VK-30" emptyWeight="2400lb" maxLoad="1200lb" maxSpeed="250mph" rateOfClimb="1500ft/min" range="1300mi" url="http://en.wikipedia.org/wiki/Cirrus_VK-30"/>
+ <aircraft jcr:name="Van's RV-4" maker="Van's Aircraft" model="RV-4" introduced="1980" emptyWeight="905lb" maxLoad="500lb" maxSpeed="200mph" rateOfClimb="2450ft/min" range="725mi" url="http://en.wikipedia.org/wiki/Van%27s_Aircraft_RV-4"/>
+ </Homebuilt>
+</Aircraft>
\ No newline at end of file
Property changes on: trunk/extensions/dna-search-lucene/src/test/resources/aircraft.xml
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/test/resources/cars.xml (from rev 1417, trunk/dna-search/src/test/resources/cars.xml)
===================================================================
--- trunk/extensions/dna-search-lucene/src/test/resources/cars.xml (rev 0)
+++ trunk/extensions/dna-search-lucene/src/test/resources/cars.xml 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ JBoss DNA (http://www.jboss.org/dna)
+ ~
+ ~ See the COPYRIGHT.txt file distributed with this work for information
+ ~ regarding copyright ownership. Some portions may be licensed
+ ~ to Red Hat, Inc. under one or more contributor license agreements.
+ ~ See the AUTHORS.txt file in the distribution for a full listing of
+ ~ individual contributors.
+ ~
+ ~ JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ ~ is licensed to you under the terms of the GNU Lesser General Public License as
+ ~ published by the Free Software Foundation; either version 2.1 of
+ ~ the License, or (at your option) any later version.
+ ~
+ ~ JBoss DNA is distributed in the hope that it will be useful,
+ ~ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ ~ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
+ ~ for more details.
+ ~
+ ~ You should have received a copy of the GNU Lesser General Public License
+ ~ along with this distribution; if not, write to:
+ ~ Free Software Foundation, Inc.
+ ~ 51 Franklin Street, Fifth Floor
+ ~ Boston, MA 02110-1301 USA
+ -->
+<Cars xmlns:jcr="http://www.jcp.org/jcr/1.0">
+ <Hybrid>
+ <car jcr:name="Toyota Prius" maker="Toyota" model="Prius" year="2008" msrp="$21,500" userRating="4.2" valueRating="5" mpgCity="48" mpgHighway="45"/>
+ <car jcr:name="Toyota Highlander" maker="Toyota" model="Highlander" year="2008" msrp="$34,200" userRating="4" valueRating="5" mpgCity="27" mpgHighway="25"/>
+ <car jcr:name="Nissan Altima" maker="Nissan" model="Altima" year="2008" msrp="$18,260" mpgCity="23" mpgHighway="32"/>
+ </Hybrid>
+ <Sports>
+ <car jcr:name="Aston Martin DB9" maker="Aston Martin" model="DB9" year="2008" msrp="$171,600" userRating="5" mpgCity="12" mpgHighway="19" lengthInInches="185.5" wheelbaseInInches="108.0" engine="5,935 cc 5.9 liters V 12"/>
+ <car jcr:name="Infiniti G37" maker="Infiniti" model="G37" year="2008" msrp="$34,900" userRating="3.5" valueRating="4" mpgCity="18" mpgHighway="24" />
+ </Sports>
+ <Luxury>
+ <car jcr:name="Cadillac DTS" maker="Cadillac" model="DTS" year="2008" engine="3.6-liter V6" userRating="0"/>
+ <car jcr:name="Bentley Continental" maker="Bentley" model="Continental" year="2008" msrp="$170,990" mpgCity="10" mpgHighway="17" />
+ <car jcr:name="Lexus IS350" maker="Lexus" model="IS350" year="2008" msrp="$36,305" mpgCity="18" mpgHighway="25" userRating="4" valueRating="5" />
+ </Luxury>
+ <Utility>
+ <car jcr:name="Land Rover LR2" maker="Land Rover" model="LR2" year="2008" msrp="$33,985" userRating="4.5" valueRating="5" mpgCity="16" mpgHighway="23" />
+ <car jcr:name="Land Rover LR3" maker="Land Rover" model="LR3" year="2008" msrp="$48,525" userRating="5" valueRating="2" mpgCity="12" mpgHighway="17" />
+ <car jcr:name="Hummer H3" maker="Hummer" model="H3" year="2008" msrp="$30,595" userRating="3.5" valueRating="4" mpgCity="13" mpgHighway="16" />
+ <car jcr:name="Ford F-150" maker="Ford" model="F-150" year="2008" msrp="$23,910" userRating="4" valueRating="1" mpgCity="14" mpgHighway="20" />
+ </Utility>
+</Cars>
\ No newline at end of file
Property changes on: trunk/extensions/dna-search-lucene/src/test/resources/cars.xml
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/extensions/dna-search-lucene/src/test/resources/log4j.properties (from rev 1417, trunk/dna-search/src/test/resources/log4j.properties)
===================================================================
--- trunk/extensions/dna-search-lucene/src/test/resources/log4j.properties (rev 0)
+++ trunk/extensions/dna-search-lucene/src/test/resources/log4j.properties 2009-12-09 19:36:29 UTC (rev 1418)
@@ -0,0 +1,13 @@
+# Direct log messages to stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.Target=System.out
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ABSOLUTE} %5p %m%n
+
+# Root logger option
+log4j.rootLogger=INFO, stdout
+
+# Set up the default logging to be INFO level, then override specific units
+log4j.logger.org.jboss.dna=INFO
+#log4j.logger.org.jboss.dna.search.SimpleIndexingStrategy=TRACE
+
Modified: trunk/pom.xml
===================================================================
--- trunk/pom.xml 2009-12-09 14:20:49 UTC (rev 1417)
+++ trunk/pom.xml 2009-12-09 19:36:29 UTC (rev 1418)
@@ -119,8 +119,8 @@
<modules>
<module>dna-common</module>
<module>dna-graph</module>
+ <module>extensions/dna-search-lucene</module>
<module>dna-repository</module>
- <module>dna-search</module>
<module>dna-cnd</module>
<module>dna-jcr</module>
<module>extensions/dna-classloader-maven</module>
14 years, 5 months
DNA SVN: r1417 - in trunk/web: dna-web-jcr-rest-war and 1 other directory.
by dna-commits@lists.jboss.org
Author: bcarothers
Date: 2009-12-09 09:20:49 -0500 (Wed, 09 Dec 2009)
New Revision: 1417
Modified:
trunk/web/dna-web-jcr-rest-client/pom.xml
trunk/web/dna-web-jcr-rest-war/pom.xml
Log:
Updated POMs for REST projects to see if the nightly integration failures can be fixed.
Modified: trunk/web/dna-web-jcr-rest-client/pom.xml
===================================================================
--- trunk/web/dna-web-jcr-rest-client/pom.xml 2009-12-08 20:52:17 UTC (rev 1416)
+++ trunk/web/dna-web-jcr-rest-client/pom.xml 2009-12-09 14:20:49 UTC (rev 1417)
@@ -46,12 +46,6 @@
<groupId>org.jboss.resteasy</groupId>
<artifactId>resteasy-jaxb-provider</artifactId>
<version>1.2.1.GA</version>
- <exclusions>
- <exclusion>
- <groupId>com.sun.xml.bind</groupId>
- <artifactId>jaxb-impl</artifactId>
- </exclusion>
- </exclusions>
</dependency>
<dependency>
<groupId>org.jboss.resteasy</groupId>
Modified: trunk/web/dna-web-jcr-rest-war/pom.xml
===================================================================
--- trunk/web/dna-web-jcr-rest-war/pom.xml 2009-12-08 20:52:17 UTC (rev 1416)
+++ trunk/web/dna-web-jcr-rest-war/pom.xml 2009-12-09 14:20:49 UTC (rev 1417)
@@ -24,6 +24,12 @@
<version>1.5.8</version>
<scope>runtime</scope>
</dependency>
+ <dependency>
+ <groupId>com.sun.xml.bind</groupId>
+ <artifactId>jaxb-impl</artifactId>
+ <version>2.1.12</version>
+ <scope>runtime</scope>
+ </dependency>
<!-- TESTING DEPENDENCIES -->
<dependency>
14 years, 5 months
DNA SVN: r1416 - trunk/dna-jcr/src/test/java/org/jboss/dna/jcr.
by dna-commits@lists.jboss.org
Author: elvisisking
Date: 2009-12-08 15:52:17 -0500 (Tue, 08 Dec 2009)
New Revision: 1416
Modified:
trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/JcrObservationManagerTest.java
Log:
DNA-579 JcrObservationManagerTest Should Check To See If Listeners Receive Too Many Events: Changed JcrObservationManagerTest CountDownLatch to use the expected number of EventIterators (which is usually only one) instead of the expected number of events. Then when the going through the iterator I determine if the correct number of events have been received. An EventIterator is received each time a "transaction" is committed.
Modified: trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/JcrObservationManagerTest.java
===================================================================
--- trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/JcrObservationManagerTest.java 2009-12-07 21:12:58 UTC (rev 1415)
+++ trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/JcrObservationManagerTest.java 2009-12-08 20:52:17 UTC (rev 1416)
@@ -129,7 +129,18 @@
String[] uuids,
String[] nodeTypeNames,
boolean noLocal ) throws Exception {
- TestListener listener = new TestListener(eventsExpected, eventTypes);
+ return addListener(eventsExpected, 1, eventTypes, absPath, isDeep, uuids, nodeTypeNames, noLocal);
+ }
+
+ TestListener addListener( int eventsExpected,
+ int numIterators,
+ int eventTypes,
+ String absPath,
+ boolean isDeep,
+ String[] uuids,
+ String[] nodeTypeNames,
+ boolean noLocal ) throws Exception {
+ TestListener listener = new TestListener(eventsExpected, numIterators, eventTypes);
this.session.getWorkspace().getObservationManager().addEventListener(listener,
eventTypes,
absPath,
@@ -913,7 +924,7 @@
// register listeners
TestListener addNodeListener = addListener(1, Event.NODE_ADDED, null, false, null, null, false);
- TestListener removeNodeListener = addListener(2, Event.NODE_REMOVED, null, false, null, null, false);
+ TestListener removeNodeListener = addListener(2, 2, Event.NODE_REMOVED, null, false, null, null, false);
// move node
String oldPath = n2.getPath();
@@ -1645,13 +1656,12 @@
private final CountDownLatch latch;
public TestListener( int expectedEvents,
+ int numIterators,
int eventTypes ) {
this.eventTypes = eventTypes;
this.expectedEvents = expectedEvents;
this.events = new ArrayList<Event>();
-
- // if no events are expected set it to 1 and let the timeout stop the test
- this.latch = new CountDownLatch((this.expectedEvents == 0) ? 1 : this.expectedEvents);
+ this.latch = new CountDownLatch(numIterators);
}
public int getActualEventCount() {
@@ -1676,22 +1686,31 @@
* @see javax.jcr.observation.EventListener#onEvent(javax.jcr.observation.EventIterator)
*/
public void onEvent( EventIterator itr ) {
- long position = itr.getPosition();
+ // this is called each time a "transaction" is committed. Most times this means after a session.save. But there are
+ // other times, like a workspace.move and a node.lock
+ try {
+ long position = itr.getPosition();
- // iterator position must be set initially zero
- if (position == 0) {
- while (itr.hasNext()) {
- try {
+ // iterator position must be set initially zero
+ if (position == 0) {
+ while (itr.hasNext()) {
Event event = itr.nextEvent();
+
// check iterator position
if (++position != itr.getPosition()) {
this.errorMessage = "EventIterator position was " + itr.getPosition() + " and should be " + position;
break;
}
+ // add event to collection and increment total
this.events.add(event);
++this.eventsProcessed;
+ // check to make sure we haven't received too many events
+ if (this.eventsProcessed > this.expectedEvents) {
+ break;
+ }
+
// check event type
int eventType = event.getType();
@@ -1699,18 +1718,18 @@
this.errorMessage = "Received a wrong event type of " + eventType;
break;
}
- } finally {
- // This has to be done LAST, otherwise waitForEvents() will return before the above stuff is done
- this.latch.countDown();
}
+ } else {
+ this.errorMessage = "EventIterator position was not initially set to zero";
}
- } else {
- this.errorMessage = "EventIterator position was not initially set to zero";
+ } finally {
+ // This has to be done LAST, otherwise waitForEvents() will return before the above stuff is done
+ this.latch.countDown();
}
}
public void waitForEvents() throws Exception {
- this.latch.await(5, TimeUnit.SECONDS);
+ this.latch.await(2000, TimeUnit.MILLISECONDS);
}
}
14 years, 5 months
DNA SVN: r1415 - trunk/dna-jcr/src/main/java/org/jboss/dna/jcr.
by dna-commits@lists.jboss.org
Author: bcarothers
Date: 2009-12-07 16:12:58 -0500 (Mon, 07 Dec 2009)
New Revision: 1415
Modified:
trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrEngine.java
Log:
DNA-5DNA-541 Locking Implementation Does Not Support Timeouts
Got caught on the "JDK6 lets you @Override an unimplemented method from an interface but JDK5 does not" issue again. Fix attached.
Modified: trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrEngine.java
===================================================================
--- trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrEngine.java 2009-12-07 20:56:18 UTC (rev 1414)
+++ trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrEngine.java 2009-12-07 21:12:58 UTC (rev 1415)
@@ -133,7 +133,6 @@
final JcrEngine engine = this;
Runnable cleanUpTask = new Runnable() {
- @Override
public void run() {
engine.cleanUpLocks();
}
14 years, 6 months
DNA SVN: r1414 - in trunk/dna-jcr/src: main/resources/org/jboss/dna/jcr and 1 other directories.
by dna-commits@lists.jboss.org
Author: bcarothers
Date: 2009-12-07 15:56:18 -0500 (Mon, 07 Dec 2009)
New Revision: 1414
Modified:
trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/AbstractJcrNode.java
trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/DnaLexicon.java
trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrEngine.java
trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrI18n.java
trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrRepository.java
trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrSession.java
trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/WorkspaceLockManager.java
trunk/dna-jcr/src/main/resources/org/jboss/dna/jcr/JcrI18n.properties
trunk/dna-jcr/src/main/resources/org/jboss/dna/jcr/dna_builtins.cnd
trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/JcrRepositoryTest.java
trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/WorkspaceLockManagerTest.java
Log:
DNA-541 Locking Implementation Does Not Support Timeouts
Committed a patch that implements lock expiration based on the algorithm above modified by Randall's comments. The patch includes two tests to confirm that the functionality works, but one is @Ignored because it depends on garbage collector behavior to test the results and the other is @Ignored because it has to sleep for 30 seconds to test the timeout.
The patch also addresses specific feedback from the JIRA by integrating the ScheduledExecutionService in the JcrEngine into the DnaEngine's lifecycle methods (start, shutdown, awaitTermination) and synchronizing access to JcrRepository.activeSessions
Modified: trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/AbstractJcrNode.java
===================================================================
--- trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/AbstractJcrNode.java 2009-12-07 15:19:53 UTC (rev 1413)
+++ trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/AbstractJcrNode.java 2009-12-07 20:56:18 UTC (rev 1414)
@@ -1409,7 +1409,7 @@
}
}
- session().workspace().lockManager().unlock(session(), lock);
+ session().workspace().lockManager().unlock(session().getExecutionContext(), lock);
session().removeLockToken(lock.getLockToken());
}
Modified: trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/DnaLexicon.java
===================================================================
--- trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/DnaLexicon.java 2009-12-07 15:19:53 UTC (rev 1413)
+++ trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/DnaLexicon.java 2009-12-07 20:56:18 UTC (rev 1414)
@@ -34,10 +34,12 @@
public class DnaLexicon extends org.jboss.dna.repository.DnaLexicon {
public static final Name BASE = new BasicName(Namespace.URI, "base");
+ public static final Name EXPIRATION_DATE = new BasicName(Namespace.URI, "expirationDate");
public static final Name IS_HELD_BY_SESSION = new BasicName(Namespace.URI, "isHeldBySession");
public static final Name IS_SESSION_SCOPED = new BasicName(Namespace.URI, "isSessionScoped");
public static final Name LOCK = new BasicName(Namespace.URI, "lock");
public static final Name LOCKED_UUID = new BasicName(Namespace.URI, "lockedUuid");
+ public static final Name LOCKING_SESSION = new BasicName(Namespace.URI, "lockingSession");
public static final Name LOCKS = new BasicName(Namespace.URI, "locks");
public static final Name NAMESPACE = new BasicName(Namespace.URI, "namespace");
public static final Name NODE_TYPES = new BasicName(Namespace.URI, "nodeTypes");
Modified: trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrEngine.java
===================================================================
--- trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrEngine.java 2009-12-07 15:19:53 UTC (rev 1413)
+++ trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrEngine.java 2009-12-07 20:56:18 UTC (rev 1414)
@@ -23,17 +23,23 @@
*/
package org.jboss.dna.jcr;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import javax.jcr.Repository;
import javax.jcr.RepositoryException;
import net.jcip.annotations.ThreadSafe;
import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.common.util.Logger;
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.Graph;
import org.jboss.dna.graph.Location;
@@ -57,9 +63,19 @@
@ThreadSafe
public class JcrEngine extends DnaEngine {
+ final static int LOCK_SWEEP_INTERVAL_IN_MILLIS = 30000;
+ final static int LOCK_EXTENSION_INTERVAL_IN_MILLIS = LOCK_SWEEP_INTERVAL_IN_MILLIS * 2;
+
+ private final Logger log = Logger.getLogger(DnaEngine.class);
+
private final Map<String, JcrRepository> repositories;
private final Lock repositoriesLock;
+ /**
+ * Provides the ability to schedule lock clean-up
+ */
+ private final ScheduledExecutorService scheduler = new ScheduledThreadPoolExecutor(2);
+
JcrEngine( ExecutionContext context,
DnaConfiguration.ConfigurationDefinition configuration ) {
super(context, configuration);
@@ -68,6 +84,65 @@
}
/**
+ * Clean up session-scoped locks created by session that are no longer active by iterating over the {@link JcrRepository
+ * repositories} and calling their {@link JcrRepository#cleanUpLocks() clean-up method}.
+ * <p>
+ * It should not be possible for a session to be terminated without cleaning up its locks, but this method will help clean-up
+ * dangling locks should a session terminate abnormally.
+ * </p>
+ */
+ void cleanUpLocks() {
+ Collection<JcrRepository> repos;
+
+ try {
+ // Make a copy of the repositories to minimize the time that the lock needs to be held
+ repositoriesLock.lock();
+ repos = new ArrayList<JcrRepository>(repositories.values());
+ } finally {
+ repositoriesLock.unlock();
+ }
+
+ for (JcrRepository repository : repos) {
+ try {
+ repository.cleanUpLocks();
+ } catch (Throwable t) {
+ log.error(t, JcrI18n.errorCleaningUpLocks, repository.getRepositorySourceName());
+ }
+ }
+ }
+
+ @Override
+ public void shutdown() {
+ scheduler.shutdown();
+
+ super.shutdown();
+ }
+
+ @Override
+ public boolean awaitTermination( long timeout,
+ TimeUnit unit ) throws InterruptedException {
+ if (!scheduler.awaitTermination(timeout, unit)) return false;
+
+ return super.awaitTermination(timeout, unit);
+ }
+
+ @Override
+ public void start() {
+ super.start();
+
+ final JcrEngine engine = this;
+ Runnable cleanUpTask = new Runnable() {
+
+ @Override
+ public void run() {
+ engine.cleanUpLocks();
+ }
+
+ };
+ scheduler.scheduleAtFixedRate(cleanUpTask, 0, LOCK_SWEEP_INTERVAL_IN_MILLIS, TimeUnit.MILLISECONDS);
+ }
+
+ /**
* Get the {@link Repository} implementation for the named repository.
*
* @param repositoryName the name of the repository, which corresponds to the name of a configured {@link RepositorySource}
Modified: trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrI18n.java
===================================================================
--- trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrI18n.java 2009-12-07 15:19:53 UTC (rev 1413)
+++ trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrI18n.java 2009-12-07 20:56:18 UTC (rev 1414)
@@ -66,6 +66,9 @@
public static I18n unableToRemapUriUsingPrefixUsedInNamespaceRegistry;
public static I18n errorWhileInitializingTheNamespaceRegistry;
+ public static I18n errorCleaningUpLocks;
+ public static I18n cleaningUpLocks;
+ public static I18n cleanedUpLocks;
public static I18n invalidRelativePath;
public static I18n invalidPathParameter;
public static I18n invalidNamePattern;
Modified: trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrRepository.java
===================================================================
--- trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrRepository.java 2009-12-07 15:19:53 UTC (rev 1413)
+++ trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrRepository.java 2009-12-07 20:56:18 UTC (rev 1414)
@@ -33,10 +33,12 @@
import java.util.Collections;
import java.util.EnumMap;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.WeakHashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
@@ -61,6 +63,8 @@
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.Graph;
import org.jboss.dna.graph.JaasSecurityContext;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.Node;
import org.jboss.dna.graph.SecurityContext;
import org.jboss.dna.graph.Subgraph;
import org.jboss.dna.graph.connector.RepositoryConnection;
@@ -75,12 +79,16 @@
import org.jboss.dna.graph.observe.Changes;
import org.jboss.dna.graph.observe.Observable;
import org.jboss.dna.graph.observe.Observer;
+import org.jboss.dna.graph.property.DateTime;
+import org.jboss.dna.graph.property.DateTimeFactory;
import org.jboss.dna.graph.property.Name;
import org.jboss.dna.graph.property.NamespaceRegistry;
import org.jboss.dna.graph.property.Path;
import org.jboss.dna.graph.property.PathFactory;
+import org.jboss.dna.graph.property.PathNotFoundException;
import org.jboss.dna.graph.property.Property;
import org.jboss.dna.graph.property.PropertyFactory;
+import org.jboss.dna.graph.property.ValueFactory;
import org.jboss.dna.graph.property.basic.GraphNamespaceRegistry;
import org.jboss.dna.graph.query.parse.QueryParsers;
import org.jboss.dna.graph.query.parse.SqlQueryParser;
@@ -111,6 +119,8 @@
@ThreadSafe
public class JcrRepository implements Repository {
+ private static final Logger log = Logger.getLogger(JcrRepository.class);
+
/**
* A flag that controls whether the repository uses a shared repository (or workspace) for the "/jcr:system" content in all of
* the workspaces. In production, this needs to be "true" for proper JCR functionality, but in some debugging cases it can be
@@ -122,6 +132,13 @@
static final boolean WORKSPACES_SHARE_SYSTEM_BRANCH = true;
/**
+ * The user name for anonymous sessions
+ *
+ * @see Option#ANONYMOUS_USER_ROLES
+ */
+ static final String ANONYMOUS_USER_NAME = "<anonymous>";
+
+ /**
* The available options for the {@code JcrRepository}.
*/
public enum Option {
@@ -270,6 +287,9 @@
private final QueryParsers queryParsers = new QueryParsers(new SqlQueryParser(), new XPathQueryParser(),
new FullTextSearchParser());
+ // package-scoped to facilitate testing
+ final WeakHashMap<JcrSession, Object> activeSessions = new WeakHashMap<JcrSession, Object>();
+
/**
* Creates a JCR repository that uses the supplied {@link RepositoryConnectionFactory repository connection factory} to
* establish {@link Session sessions} to the underlying repository source upon {@link #login() login}.
@@ -471,7 +491,7 @@
anonymousUserContext = new SecurityContext() {
public String getUserName() {
- return null;
+ return ANONYMOUS_USER_NAME;
}
public boolean hasRole( String roleName ) {
@@ -788,6 +808,11 @@
} catch (AccessControlException ace) {
throw new NoSuchWorkspaceException(JcrI18n.workspaceNameIsInvalid.text(sourceName, workspaceName));
}
+
+ synchronized (this.activeSessions) {
+ activeSessions.put(session, null);
+ }
+
return session;
}
@@ -809,6 +834,101 @@
return lockManager;
}
+ /**
+ * Marks the given session as inactive (by removing it from the {@link #activeSessions active sessions map}.
+ *
+ * @param session the session to be marked as inactive
+ */
+ void sessionLoggedOut( JcrSession session ) {
+ synchronized (this.activeSessions) {
+ this.activeSessions.remove(session);
+ }
+ }
+
+ /**
+ * Returns the set of active sessions in this repository
+ *
+ * @return the set of active sessions in this repository
+ */
+ Set<JcrSession> activeSessions() {
+ Set<JcrSession> activeSessions;
+ synchronized (this.activeSessions) {
+ activeSessions = new HashSet<JcrSession>(this.activeSessions.keySet());
+ }
+ // There can and will be elements in this set that are no longer live but haven't yet been gc'ed.
+ // Filter those out
+ for (Iterator<JcrSession> iter = activeSessions.iterator(); iter.hasNext();) {
+ JcrSession session = iter.next();
+ if (session != null && !session.isLive()) {
+ iter.remove();
+ }
+ }
+
+ return activeSessions;
+ }
+
+ /**
+ * Iterates through the list of session-scoped locks in this repository, deleting any session-scoped locks that were created
+ * by a session that is no longer active.
+ */
+ void cleanUpLocks() {
+ if (log.isTraceEnabled()) {
+ log.trace(JcrI18n.cleaningUpLocks.text());
+ }
+
+ Set<JcrSession> activeSessions = activeSessions();
+ Set<String> activeSessionIds = new HashSet<String>(activeSessions.size());
+
+ for (JcrSession activeSession : activeSessions) {
+ activeSessionIds.add(activeSession.sessionId());
+ }
+
+ Graph systemGraph = createSystemGraph(executionContext);
+ PathFactory pathFactory = executionContext.getValueFactories().getPathFactory();
+ ValueFactory<Boolean> booleanFactory = executionContext.getValueFactories().getBooleanFactory();
+ ValueFactory<String> stringFactory = executionContext.getValueFactories().getStringFactory();
+
+ DateTimeFactory dateFactory = executionContext.getValueFactories().getDateFactory();
+ DateTime now = dateFactory.create();
+ DateTime newExpirationDate = now.plusMillis(JcrEngine.LOCK_EXTENSION_INTERVAL_IN_MILLIS);
+
+ Path locksPath = pathFactory.createAbsolutePath(JcrLexicon.SYSTEM, DnaLexicon.LOCKS);
+
+ Subgraph locksGraph = null;
+ try {
+ locksGraph = systemGraph.getSubgraphOfDepth(2).at(locksPath);
+ } catch (PathNotFoundException pnfe) {
+ // It's possible for this to run before the dna:locks child node gets added to the /jcr:system node.
+ return;
+ }
+
+ for (Location lockLocation : locksGraph.getRoot().getChildren()) {
+ Node lockNode = locksGraph.getNode(lockLocation);
+
+ Boolean isSessionScoped = booleanFactory.create(lockNode.getProperty(DnaLexicon.IS_SESSION_SCOPED).getFirstValue());
+
+ if (!isSessionScoped) continue;
+ String lockingSession = stringFactory.create(lockNode.getProperty(DnaLexicon.LOCKING_SESSION).getFirstValue());
+
+ // Extend locks held by active sessions
+ if (activeSessionIds.contains(lockingSession)) {
+ systemGraph.set(DnaLexicon.EXPIRATION_DATE).on(lockLocation).to(newExpirationDate);
+ } else {
+ DateTime expirationDate = dateFactory.create(lockNode.getProperty(DnaLexicon.EXPIRATION_DATE).getFirstValue());
+ // Destroy expired locks (if it was still held by an active session, it would have been extended by now)
+ if (expirationDate.isBefore(now)) {
+ String workspaceName = stringFactory.create(lockNode.getProperty(DnaLexicon.WORKSPACE).getFirstValue());
+ WorkspaceLockManager lockManager = lockManagers.get(workspaceName);
+ lockManager.unlock(executionContext, lockManager.createLock(lockNode));
+ }
+ }
+ }
+
+ if (log.isTraceEnabled()) {
+ log.trace(JcrI18n.cleanedUpLocks.text());
+ }
+ }
+
protected class FederatedRepositoryContext implements RepositoryContext {
private final RepositoryConnectionFactory connectionFactory;
Modified: trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrSession.java
===================================================================
--- trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrSession.java 2009-12-07 15:19:53 UTC (rev 1413)
+++ trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrSession.java 2009-12-07 20:56:18 UTC (rev 1414)
@@ -785,6 +785,7 @@
isLive = false;
this.workspace().observationManager().removeAllEventListeners();
this.workspace().lockManager().cleanLocks(this);
+ this.repository.sessionLoggedOut(this);
this.executionContext.getSecurityContext().logout();
}
Modified: trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/WorkspaceLockManager.java
===================================================================
--- trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/WorkspaceLockManager.java 2009-12-07 15:19:53 UTC (rev 1413)
+++ trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/WorkspaceLockManager.java 2009-12-07 20:56:18 UTC (rev 1414)
@@ -16,6 +16,8 @@
import org.jboss.dna.graph.Graph;
import org.jboss.dna.graph.Location;
import org.jboss.dna.graph.connector.LockFailedException;
+import org.jboss.dna.graph.property.DateTime;
+import org.jboss.dna.graph.property.DateTimeFactory;
import org.jboss.dna.graph.property.Path;
import org.jboss.dna.graph.property.PathFactory;
import org.jboss.dna.graph.property.PathNotFoundException;
@@ -31,6 +33,7 @@
@ThreadSafe
class WorkspaceLockManager {
+ private final ExecutionContext context;
private final Path locksPath;
private final JcrRepository repository;
private final String workspaceName;
@@ -40,6 +43,7 @@
JcrRepository repository,
String workspaceName,
Path locksPath ) {
+ this.context = context;
this.repository = repository;
this.workspaceName = workspaceName;
this.locksPath = locksPath;
@@ -79,7 +83,7 @@
}
ExecutionContext sessionContext = session.getExecutionContext();
- String lockOwner = sessionContext.getSecurityContext().getUserName();
+ String lockOwner = session.getUserID();
DnaLock lock = createLock(lockOwner, lockUuid, nodeUuid, isDeep, isSessionScoped);
Graph.Batch batch = repository.createSystemGraph(sessionContext).batch();
@@ -89,11 +93,17 @@
Property lockOwnerProp = propFactory.create(JcrLexicon.LOCK_OWNER, lockOwner);
Property lockIsDeepProp = propFactory.create(JcrLexicon.LOCK_IS_DEEP, isDeep);
+ DateTimeFactory dateFactory = sessionContext.getValueFactories().getDateFactory();
+ DateTime expirationDate = dateFactory.create();
+ expirationDate = expirationDate.plusMillis(JcrEngine.LOCK_EXTENSION_INTERVAL_IN_MILLIS);
+
batch.create(pathFactory.create(locksPath, pathFactory.createSegment(lockUuid.toString())),
propFactory.create(JcrLexicon.PRIMARY_TYPE, DnaLexicon.LOCK),
propFactory.create(DnaLexicon.WORKSPACE, workspaceName),
propFactory.create(DnaLexicon.LOCKED_UUID, nodeUuid.toString()),
propFactory.create(DnaLexicon.IS_SESSION_SCOPED, isSessionScoped),
+ propFactory.create(DnaLexicon.LOCKING_SESSION, session.sessionId()),
+ propFactory.create(DnaLexicon.EXPIRATION_DATE, expirationDate),
// This gets set after the lock succeeds and the lock token gets added to the session
propFactory.create(DnaLexicon.IS_HELD_BY_SESSION, false),
lockOwnerProp,
@@ -116,6 +126,10 @@
return lock;
}
+ DnaLock createLock( org.jboss.dna.graph.Node lockNode ) {
+ return new DnaLock(lockNode);
+ }
+
/* Factory method added to facilitate mocked testing */
DnaLock createLock( String lockOwner,
UUID lockUuid,
@@ -171,7 +185,7 @@
workspaceBatch.execute();
} catch (LockFailedException lfe) {
// Attempt to lock node at the repo level failed - cancel lock
- unlock(session, lock);
+ unlock(session.getExecutionContext(), lock);
throw new RepositoryException(lfe);
}
@@ -180,22 +194,21 @@
/**
* Removes the provided lock, effectively unlocking the node to which the lock is associated.
*
- * @param session the session in which the node is being unlocked
+ * @param sessionExecutionContext the execution context of the session in which the node is being unlocked
* @param lock the lock to be removed
*/
- void unlock( JcrSession session,
+ void unlock( ExecutionContext sessionExecutionContext,
DnaLock lock ) {
try {
- ExecutionContext context = session.getExecutionContext();
- PathFactory pathFactory = context.getValueFactories().getPathFactory();
+ PathFactory pathFactory = sessionExecutionContext.getValueFactories().getPathFactory();
// Remove the lock node under the /jcr:system branch ...
- Graph.Batch batch = repository.createSystemGraph(context).batch();
+ Graph.Batch batch = repository.createSystemGraph(sessionExecutionContext).batch();
batch.delete(pathFactory.create(locksPath, pathFactory.createSegment(lock.getUuid().toString())));
batch.execute();
// Unlock the node in the repository graph ...
- unlockNodeInRepository(session, lock);
+ unlockNodeInRepository(sessionExecutionContext, lock);
workspaceLocksByNodeUuid.remove(lock.nodeUuid);
} catch (PathNotFoundException pnfe) {
@@ -220,12 +233,12 @@
* /jcr:system/dna:locks} subgraph.
* </p>
*
- * @param session the session in which the node is being unlocked
+ * @param sessionExecutionContext the execution context of the session in which the node is being unlocked
* @param lock
*/
- void unlockNodeInRepository( JcrSession session,
+ void unlockNodeInRepository( ExecutionContext sessionExecutionContext,
DnaLock lock ) {
- Graph.Batch workspaceBatch = repository.createWorkspaceGraph(this.workspaceName, session.getExecutionContext()).batch();
+ Graph.Batch workspaceBatch = repository.createWorkspaceGraph(this.workspaceName, sessionExecutionContext).batch();
workspaceBatch.remove(JcrLexicon.LOCK_OWNER, JcrLexicon.LOCK_IS_DEEP).on(lock.nodeUuid);
workspaceBatch.unlock(lock.nodeUuid);
@@ -249,9 +262,8 @@
ValueFactory<Boolean> booleanFactory = context.getValueFactories().getBooleanFactory();
PathFactory pathFactory = context.getValueFactories().getPathFactory();
- org.jboss.dna.graph.Node lockNode = repository.createSystemGraph(context)
- .getNodeAt(pathFactory.create(locksPath,
- pathFactory.createSegment(lockToken)));
+ org.jboss.dna.graph.Node lockNode = repository.createSystemGraph(context).getNodeAt(pathFactory.create(locksPath,
+ pathFactory.createSegment(lockToken)));
return booleanFactory.create(lockNode.getProperty(DnaLexicon.IS_HELD_BY_SESSION).getFirstValue());
@@ -275,9 +287,8 @@
PropertyFactory propFactory = context.getPropertyFactory();
PathFactory pathFactory = context.getValueFactories().getPathFactory();
- repository.createSystemGraph(context)
- .set(propFactory.create(DnaLexicon.IS_HELD_BY_SESSION, value))
- .on(pathFactory.create(locksPath, pathFactory.createSegment(lockToken)));
+ repository.createSystemGraph(context).set(propFactory.create(DnaLexicon.IS_HELD_BY_SESSION, value)).on(pathFactory.create(locksPath,
+ pathFactory.createSegment(lockToken)));
}
/**
@@ -340,11 +351,12 @@
* @param session the session on behalf of which the lock operation is being performed
*/
void cleanLocks( JcrSession session ) {
+ ExecutionContext context = session.getExecutionContext();
Collection<String> lockTokens = session.lockTokens();
for (String lockToken : lockTokens) {
DnaLock lock = lockFor(lockToken);
if (lock != null && lock.isSessionScoped()) {
- unlock(session, lock);
+ unlock(context, lock);
}
}
}
@@ -361,6 +373,32 @@
private final boolean deep;
private final boolean sessionScoped;
+ DnaLock( org.jboss.dna.graph.Node lockNode ) {
+ ValueFactory<String> stringFactory = context.getValueFactories().getStringFactory();
+ ValueFactory<UUID> uuidFactory = context.getValueFactories().getUuidFactory();
+ ValueFactory<Boolean> booleanFactory = context.getValueFactories().getBooleanFactory();
+
+ assert lockNode.getLocation().getPath() != null;
+
+ String lockUuidAsString = lockNode.getLocation().getPath().getLastSegment().getName().getLocalName();
+ Property lockOwnerProperty = lockNode.getProperty(JcrLexicon.LOCK_OWNER);
+ Property nodeUuidProperty = lockNode.getProperty(DnaLexicon.LOCKED_UUID);
+ Property lockIsDeepProperty = lockNode.getProperty(JcrLexicon.LOCK_IS_DEEP);
+ Property isSessionScopedProperty = lockNode.getProperty(DnaLexicon.IS_SESSION_SCOPED);
+
+ assert lockUuidAsString != null;
+ assert lockOwnerProperty != null;
+ assert nodeUuidProperty != null;
+ assert lockIsDeepProperty != null;
+ assert isSessionScopedProperty != null;
+
+ this.lockOwner = stringFactory.create(lockOwnerProperty.getFirstValue());
+ this.lockUuid = UUID.fromString(lockUuidAsString);
+ this.nodeUuid = uuidFactory.create(nodeUuidProperty.getFirstValue());
+ this.deep = booleanFactory.create(lockIsDeepProperty.getFirstValue());
+ this.sessionScoped = booleanFactory.create(isSessionScopedProperty.getFirstValue());
+ }
+
DnaLock( String lockOwner,
UUID lockUuid,
UUID nodeUuid,
Modified: trunk/dna-jcr/src/main/resources/org/jboss/dna/jcr/JcrI18n.properties
===================================================================
--- trunk/dna-jcr/src/main/resources/org/jboss/dna/jcr/JcrI18n.properties 2009-12-07 15:19:53 UTC (rev 1413)
+++ trunk/dna-jcr/src/main/resources/org/jboss/dna/jcr/JcrI18n.properties 2009-12-07 20:56:18 UTC (rev 1414)
@@ -54,6 +54,9 @@
unableToRemapUriNotRegisteredInNamespaceRegistry = Unable to remap the namespace "{1}" to prefix "{0}" because the URI is not already registered in the workspace's namespace registry
unableToRemapUriUsingPrefixUsedInNamespaceRegistry = Unable to remap the namespace "{1}" to prefix "{0}" because the prefix is already used as the prefix for the namespace "{2}" in the workspace's namespace registry
+errorCleaningUpLocks = Error while cleaning up locks for JCR repository "{0}"
+cleaningUpLocks = Lock clean up process begun
+cleanedUpLocks = Lock clean up process completed
errorWhileInitializingTheNamespaceRegistry = Error while initializing the namespace registry for workspace "{0}"
invalidRelativePath = "{0}" is not a valid relative path
invalidPathParameter = The "{1}" parameter value "{0}" was not a valid path
Modified: trunk/dna-jcr/src/main/resources/org/jboss/dna/jcr/dna_builtins.cnd
===================================================================
--- trunk/dna-jcr/src/main/resources/org/jboss/dna/jcr/dna_builtins.cnd 2009-12-07 15:19:53 UTC (rev 1413)
+++ trunk/dna-jcr/src/main/resources/org/jboss/dna/jcr/dna_builtins.cnd 2009-12-07 20:56:18 UTC (rev 1414)
@@ -46,6 +46,8 @@
[dna:lock] > nt:base
- dna:lockedUuid (string) protected ignore
- jcr:lockOwner (string) protected ignore
+- dna:lockingSession (string) protected ignore
+- dna:expirationDate (date) protected ignore
- dna:sessionScope (boolean) protected ignore
- jcr:isDeep (boolean) protected ignore
- dna:isHeldBySession (boolean) protected ignore
Modified: trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/JcrRepositoryTest.java
===================================================================
--- trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/JcrRepositoryTest.java 2009-12-07 15:19:53 UTC (rev 1413)
+++ trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/JcrRepositoryTest.java 2009-12-07 20:56:18 UTC (rev 1414)
@@ -26,7 +26,6 @@
import static org.hamcrest.collection.IsArrayContaining.hasItemInArray;
import static org.hamcrest.core.Is.is;
import static org.hamcrest.core.IsNull.notNullValue;
-import static org.hamcrest.core.IsNull.nullValue;
import static org.junit.Assert.assertThat;
import java.security.AccessControlContext;
import java.security.AccessController;
@@ -51,10 +50,12 @@
import org.jboss.dna.graph.connector.RepositorySourceException;
import org.jboss.dna.graph.connector.inmemory.InMemoryRepositorySource;
import org.jboss.dna.graph.observe.MockObservable;
+import org.jboss.dna.jcr.JcrRepository.Option;
import org.jboss.security.config.IDTrustConfiguration;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
+import org.junit.Ignore;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
@@ -192,12 +193,12 @@
testDescriptorKeys(repository);
testDescriptorValues(repository);
}
-
+
@Test
public void shouldProvideObserver() {
assertThat(this.repository.getObserver(), is(notNullValue()));
}
-
+
@Test
public void shouldProvideRepositoryObservable() {
assertThat(this.repository.getRepositoryObservable(), is(notNullValue()));
@@ -205,7 +206,8 @@
@Test
public void shouldHaveDefaultOptionsWhenNotOverridden() {
- JcrRepository repository = new JcrRepository(context, connectionFactory, sourceName, new MockObservable(), descriptors, null);
+ JcrRepository repository = new JcrRepository(context, connectionFactory, sourceName, new MockObservable(), descriptors,
+ null);
assertThat(repository.getOptions().get(JcrRepository.Option.PROJECT_NODE_TYPES),
is(JcrRepository.DefaultOption.PROJECT_NODE_TYPES));
}
@@ -258,7 +260,7 @@
session = (JcrSession)repository.login();
assertThat(session, is(notNullValue()));
- assertThat(session.getUserID(), is(nullValue()));
+ assertThat(session.getUserID(), is(JcrRepository.ANONYMOUS_USER_NAME));
}
@@ -483,4 +485,76 @@
assertThat(repository.getDescriptor(Repository.SPEC_NAME_DESC), is(JcrI18n.SPEC_NAME_DESC.text()));
assertThat(repository.getDescriptor(Repository.SPEC_VERSION_DESC), is("1.0"));
}
+
+ @Ignore( "GC behavior is non-deterministic from the application's POV - this test _will_ occasionally fail" )
+ @Test
+ public void shouldAllowManySessionLoginsAndLogouts() throws Exception {
+ // Use a different repository that supports anonymous logins to make this test cleaner
+ Map<Option, String> options = new HashMap<Option, String>();
+ options.put(JcrRepository.Option.ANONYMOUS_USER_ROLES, JcrSession.DNA_ADMIN_PERMISSION);
+ JcrRepository repository = new JcrRepository(context, connectionFactory, sourceName, new MockObservable(), descriptors,
+ options);
+
+ Session session;
+
+ for (int i = 0; i < 10000; i++) {
+ session = repository.login();
+ session.logout();
+ }
+
+ session = repository.login();
+ session = null;
+
+ // Give the gc a chance to run
+ System.gc();
+ Thread.sleep(100);
+
+ assertThat(repository.activeSessions().size(), is(0));
+ }
+
+ @Ignore( "This test normally sleeps for 30 seconds" )
+ @Test
+ public void shouldCleanUpLocksFromDeadSessions() throws Exception {
+ // Use a different repository that supports anonymous logins to make this test cleaner
+ Map<Option, String> options = new HashMap<Option, String>();
+ options.put(JcrRepository.Option.ANONYMOUS_USER_ROLES, JcrSession.DNA_ADMIN_PERMISSION);
+ JcrRepository repository = new JcrRepository(context, connectionFactory, sourceName, new MockObservable(), descriptors,
+ options);
+
+ String lockedNodeName = "lockedNode";
+ JcrSession locker = (JcrSession)repository.login();
+
+ // Create a node to lock
+ javax.jcr.Node lockedNode = locker.getRootNode().addNode(lockedNodeName);
+ lockedNode.addMixin("mix:lockable");
+ locker.save();
+
+ // Create a session-scoped lock (not deep)
+ lockedNode.lock(false, true);
+ assertThat(lockedNode.isLocked(), is(true));
+
+ Session reader = repository.login();
+ javax.jcr.Node readerNode = (javax.jcr.Node)reader.getItem("/" + lockedNodeName);
+ assertThat(readerNode.isLocked(), is(true));
+
+ // No locks should have changed yet.
+ repository.cleanUpLocks();
+ assertThat(lockedNode.isLocked(), is(true));
+ assertThat(readerNode.isLocked(), is(true));
+
+ /*
+ * Simulate the GC cleaning up the session and it being purged from the activeSessions() map.
+ * This can't really be tested in a consistent way due to a lack of specificity around when
+ * the garbage collector runs. The @Ignored test above does cause a GC sweep on by computer and
+ * confirms that the code works in principle. A different chicken dance may be required to
+ * fully test this on a different computer.
+ */
+ repository.activeSessions.remove(locker);
+ Thread.sleep(JcrEngine.LOCK_EXTENSION_INTERVAL_IN_MILLIS + 100);
+
+ // The locker thread should be inactive and the lock cleaned up
+ repository.cleanUpLocks();
+ assertThat(readerNode.isLocked(), is(false));
+ }
+
}
Modified: trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/WorkspaceLockManagerTest.java
===================================================================
--- trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/WorkspaceLockManagerTest.java 2009-12-07 15:19:53 UTC (rev 1413)
+++ trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/WorkspaceLockManagerTest.java 2009-12-07 20:56:18 UTC (rev 1414)
@@ -151,9 +151,7 @@
@Test
public void shouldCreateLockRequestWhenUnlockingNode() {
DnaLock lock = workspaceLockManager.createLock("testOwner", UUID.randomUUID(), validUuid, false, false);
- JcrSession session = mock(JcrSession.class);
- stub(session.getExecutionContext()).toReturn(context);
- workspaceLockManager.unlockNodeInRepository(session, lock);
+ workspaceLockManager.unlockNodeInRepository(context, lock);
assertNextRequestIsUnlock(validLocation);
}
14 years, 6 months
DNA SVN: r1413 - in trunk/dna-integration-tests/src/test/resources/tck: basic-jpa and 1 other directory.
by dna-commits@lists.jboss.org
Author: bcarothers
Date: 2009-12-07 10:19:53 -0500 (Mon, 07 Dec 2009)
New Revision: 1413
Added:
trunk/dna-integration-tests/src/test/resources/tck/basic-jpa/
trunk/dna-integration-tests/src/test/resources/tck/basic-jpa/configRepository.xml
trunk/dna-integration-tests/src/test/resources/tck/basic-jpa/repositoryOverlay.properties
Removed:
trunk/dna-integration-tests/src/test/resources/tck/jpa/
Log:
Renamed the jpa folder to basic-jpa to align with the BasicJpaRepositoryTckTest
Added: trunk/dna-integration-tests/src/test/resources/tck/basic-jpa/configRepository.xml
===================================================================
--- trunk/dna-integration-tests/src/test/resources/tck/basic-jpa/configRepository.xml (rev 0)
+++ trunk/dna-integration-tests/src/test/resources/tck/basic-jpa/configRepository.xml 2009-12-07 15:19:53 UTC (rev 1413)
@@ -0,0 +1,100 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ JBoss DNA (http://www.jboss.org/dna)
+ ~
+ ~ See the COPYRIGHT.txt file distributed with this work for information
+ ~ regarding copyright ownership. Some portions may be licensed
+ ~ to Red Hat, Inc. under one or more contributor license agreements.
+ ~ See the AUTHORS.txt file in the distribution for a full listing of
+ ~ individual contributors.
+ ~
+ ~ JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ ~ is licensed to you under the terms of the GNU Lesser General Public License as
+ ~ published by the Free Software Foundation; either version 2.1 of
+ ~ the License, or (at your option) any later version.
+ ~
+ ~ JBoss DNA is distributed in the hope that it will be useful,
+ ~ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ ~ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
+ ~ for more details.
+ ~
+ ~ You should have received a copy of the GNU Lesser General Public License
+ ~ along with this distribution; if not, write to:
+ ~ Free Software Foundation, Inc.
+ ~ 51 Franklin Street, Fifth Floor
+ ~ Boston, MA 02110-1301 USA
+ -->
+<configuration xmlns:dna="http://www.jboss.org/dna/1.0" xmlns:jcr="http://www.jcp.org/jcr/1.0">
+ <!--
+ Define the sources for the content. These sources are directly accessible using the DNA-specific Graph API.
+ In fact, this is how the DNA JCR implementation works. You can think of these as being similar to
+ JDBC DataSource objects, except that they expose graph content via the Graph API instead of records via SQL or JDBC.
+ -->
+ <dna:sources jcr:primaryType="nt:unstructured">
+ <!--
+ The 'JCR' repository is a JBoss Cache source with a single default workspace (though others could be created, too).
+ -->
+ <dna:source jcr:name="Store" dna:classname="org.jboss.dna.connector.store.jpa.JpaSource"
+ dna:model="Basic"
+ dna:dialect="${jpaSource.dialect}"
+ dna:driverClassName="${jpaSource.driverClassName}"
+ dna:username="${jpaSource.username}"
+ dna:password="${jpaSource.password}"
+ dna:url="${jpaSource.url}"
+ dna:maximumConnectionsInPool="${jpaSource.maximumConnectionsInPool}"
+ dna:referentialIntegrityEnforced="${jpaSource.referentialIntegrityEnforced}"
+ dna:largeValueSizeInBytes="${jpaSource.largeValueSizeInBytes}"
+ dna:retryLimit="${jpaSource.retryLimit}"
+ dna:compressData="${jpaSource.compressData}"
+ dna:predefinedWorkspaceNames="default"
+ dna:showSql="${jpaSource.showSql}"
+ dna:autoGenerateSchema="${jpaSource.autoGenerateSchema}"
+ dna:defaultWorkspaceName="default"/>
+ </dna:sources>
+ <!--
+ Define the mime type detectors. This is an optional section. By default, each engine will use the
+ MIME type detector that uses filename extensions. So we wouldn't need to define the same detector again,
+ but this is how you'd define another extension.
+ -->
+ <dna:mimeTypeDetectors>
+ <dna:mimeTypeDetector jcr:name="Detector">
+ <dna:description>Standard extension-based MIME type detector</dna:description>
+ <!--
+ Specify the implementation class (required), as a child element or attribute on parent element.
+ -->
+ <dna:classname>org.jboss.dna.graph.mimetype.ExtensionBasedMimeTypeDetector</dna:classname>
+ <!--
+ Specify the classpath (optional) as an ordered list of 'names', where each name is significant to
+ the classpath factory. For example, a name could be an OSGI identifier or a Maven coordinate,
+ depending upon the classpath factory being used. If there is only one 'name' in the classpath,
+ it may be specified as an attribute on the 'mimeTypeDetector' element. If there is more than one
+ 'name', then they must be specified as child 'classpath' elements. Blank or empty values are ignored.
+ -->
+ <dna:classpath></dna:classpath>
+ </dna:mimeTypeDetector>
+ </dna:mimeTypeDetectors>
+ <!--
+ Define the JCR repositories
+ -->
+ <dna:repositories>
+ <!--
+ Define a JCR repository that accesses the 'JCR' source directly.
+ This of course is optional, since we could access the same content through 'JCR'.
+ -->
+ <dna:repository jcr:name="Test Repository Source">
+ <!-- Specify the source that should be used for the repository -->
+ <dna:source>Store</dna:source>
+ <!-- Define the options for the JCR repository, using camelcase version of JcrRepository.Option names
+-->
+ <dna:options jcr:primaryType="dna:options">
+ <jaasLoginConfigName jcr:primaryType="dna:option" dna:value="dna-jcr"/>
+ <projectNodeTypes jcr:primaryType="dna:option" dna:value="false"/>
+ </dna:options>
+ <!-- Define any namespaces for this repository, other than those already defined by JCR or DNA
+-->
+ <namespaces jcr:primaryType="dna:namespaces">
+ <dnatest jcr:primaryType="dna:namespace" dna:uri="http://jboss.org/dna/test/1.0"/>
+ </namespaces>
+ </dna:repository>
+ </dna:repositories>
+</configuration>
Property changes on: trunk/dna-integration-tests/src/test/resources/tck/basic-jpa/configRepository.xml
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Added: trunk/dna-integration-tests/src/test/resources/tck/basic-jpa/repositoryOverlay.properties
===================================================================
--- trunk/dna-integration-tests/src/test/resources/tck/basic-jpa/repositoryOverlay.properties (rev 0)
+++ trunk/dna-integration-tests/src/test/resources/tck/basic-jpa/repositoryOverlay.properties 2009-12-07 15:19:53 UTC (rev 1413)
@@ -0,0 +1 @@
+# Placeholder for any overlaid properties for this repo configuration
14 years, 6 months
DNA SVN: r1412 - in trunk: dna-integration-tests/src/test/java/org/jboss/dna and 3 other directories.
by dna-commits@lists.jboss.org
Author: bcarothers
Date: 2009-12-07 10:10:54 -0500 (Mon, 07 Dec 2009)
New Revision: 1412
Added:
trunk/dna-integration-tests/src/test/java/org/jboss/dna/test/integration/BasicJpaRepositoryTckTest.java
Removed:
trunk/dna-integration-tests/src/test/java/org/jboss/dna/test/integration/JpaRepositoryTckTest.java
trunk/dna-integration-tests/src/test/java/org/jboss/dna/tests/
Modified:
trunk/dna-integration-tests/pom.xml
trunk/dna-integration-tests/src/test/resources/tck/simple-jpa/configRepository.xml
trunk/extensions/dna-connector-store-jpa/src/main/java/org/jboss/dna/connector/store/jpa/JpaSource.java
Log:
Applied fixes to correct test failures in nightly integration build and clean up the dna-integration-tests package.
Modified: trunk/dna-integration-tests/pom.xml
===================================================================
--- trunk/dna-integration-tests/pom.xml 2009-12-06 01:00:08 UTC (rev 1411)
+++ trunk/dna-integration-tests/pom.xml 2009-12-07 15:10:54 UTC (rev 1412)
@@ -239,7 +239,8 @@
<filtering>true</filtering>
<directory>src/test/resources</directory>
<includes>
- <include>tck/jpa/configRepository.xml</include>
+ <include>tck/basic-jpa/configRepository.xml</include>
+ <include>tck/simple-jpa/configRepository.xml</include>
</includes>
</testResource>
</testResources>
Added: trunk/dna-integration-tests/src/test/java/org/jboss/dna/test/integration/BasicJpaRepositoryTckTest.java
===================================================================
--- trunk/dna-integration-tests/src/test/java/org/jboss/dna/test/integration/BasicJpaRepositoryTckTest.java (rev 0)
+++ trunk/dna-integration-tests/src/test/java/org/jboss/dna/test/integration/BasicJpaRepositoryTckTest.java 2009-12-07 15:10:54 UTC (rev 1412)
@@ -0,0 +1,170 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.test.integration;
+
+import junit.framework.Test;
+import junit.framework.TestSuite;
+import org.apache.jackrabbit.test.api.AddNodeTest;
+import org.apache.jackrabbit.test.api.CheckPermissionTest;
+import org.apache.jackrabbit.test.api.ImpersonateTest;
+import org.apache.jackrabbit.test.api.NamespaceRegistryTest;
+import org.apache.jackrabbit.test.api.NodeAddMixinTest;
+import org.apache.jackrabbit.test.api.NodeCanAddMixinTest;
+import org.apache.jackrabbit.test.api.NodeItemIsModifiedTest;
+import org.apache.jackrabbit.test.api.NodeItemIsNewTest;
+import org.apache.jackrabbit.test.api.NodeOrderableChildNodesTest;
+import org.apache.jackrabbit.test.api.NodeRemoveMixinTest;
+import org.apache.jackrabbit.test.api.NodeTest;
+import org.apache.jackrabbit.test.api.PropertyItemIsModifiedTest;
+import org.apache.jackrabbit.test.api.PropertyItemIsNewTest;
+import org.apache.jackrabbit.test.api.PropertyTest;
+import org.apache.jackrabbit.test.api.RepositoryLoginTest;
+import org.apache.jackrabbit.test.api.SerializationTest;
+import org.apache.jackrabbit.test.api.SessionTest;
+import org.apache.jackrabbit.test.api.SetPropertyAssumeTypeTest;
+import org.apache.jackrabbit.test.api.SetPropertyBooleanTest;
+import org.apache.jackrabbit.test.api.SetPropertyCalendarTest;
+import org.apache.jackrabbit.test.api.SetPropertyConstraintViolationExceptionTest;
+import org.apache.jackrabbit.test.api.SetPropertyDoubleTest;
+import org.apache.jackrabbit.test.api.SetPropertyInputStreamTest;
+import org.apache.jackrabbit.test.api.SetPropertyLongTest;
+import org.apache.jackrabbit.test.api.SetPropertyNodeTest;
+import org.apache.jackrabbit.test.api.SetPropertyStringTest;
+import org.apache.jackrabbit.test.api.SetPropertyValueTest;
+import org.apache.jackrabbit.test.api.SetValueBinaryTest;
+import org.apache.jackrabbit.test.api.SetValueBooleanTest;
+import org.apache.jackrabbit.test.api.SetValueConstraintViolationExceptionTest;
+import org.apache.jackrabbit.test.api.SetValueDateTest;
+import org.apache.jackrabbit.test.api.SetValueDoubleTest;
+import org.apache.jackrabbit.test.api.SetValueLongTest;
+import org.apache.jackrabbit.test.api.SetValueReferenceTest;
+import org.apache.jackrabbit.test.api.SetValueStringTest;
+import org.apache.jackrabbit.test.api.SetValueValueFormatExceptionTest;
+import org.apache.jackrabbit.test.api.SetValueVersionExceptionTest;
+import org.apache.jackrabbit.test.api.ValueFactoryTest;
+import org.apache.jackrabbit.test.api.WorkspaceCloneReferenceableTest;
+import org.apache.jackrabbit.test.api.WorkspaceCloneSameNameSibsTest;
+import org.apache.jackrabbit.test.api.WorkspaceCloneTest;
+import org.apache.jackrabbit.test.api.WorkspaceCloneVersionableTest;
+import org.apache.jackrabbit.test.api.WorkspaceCopyBetweenWorkspacesReferenceableTest;
+import org.apache.jackrabbit.test.api.WorkspaceCopyBetweenWorkspacesSameNameSibsTest;
+import org.apache.jackrabbit.test.api.WorkspaceCopyBetweenWorkspacesTest;
+import org.apache.jackrabbit.test.api.WorkspaceCopyBetweenWorkspacesVersionableTest;
+import org.apache.jackrabbit.test.api.WorkspaceCopyReferenceableTest;
+import org.apache.jackrabbit.test.api.WorkspaceCopySameNameSibsTest;
+import org.apache.jackrabbit.test.api.WorkspaceCopyTest;
+import org.apache.jackrabbit.test.api.WorkspaceCopyVersionableTest;
+import org.apache.jackrabbit.test.api.WorkspaceMoveReferenceableTest;
+import org.apache.jackrabbit.test.api.WorkspaceMoveSameNameSibsTest;
+import org.apache.jackrabbit.test.api.WorkspaceMoveTest;
+import org.apache.jackrabbit.test.api.WorkspaceMoveVersionableTest;
+
+public class BasicJpaRepositoryTckTest {
+
+ public static Test suite() {
+ TestSuite suite = AbstractRepositoryTckTest.readOnlyRepositorySuite("basic-jpa");
+ suite.addTest(new LevelTwoFeatureTests());
+ // suite.addTest(org.apache.jackrabbit.test.api.lock.TestAll.suite());
+
+ return suite;
+
+ }
+
+ private static class LevelTwoFeatureTests extends TestSuite {
+ protected LevelTwoFeatureTests() {
+ super("JCR Level 2 API Tests");
+ // We currently don't pass the tests in those suites that are commented out
+ // See https://jira.jboss.org/jira/browse/DNA-285
+
+ // level 2 tests
+ addTestSuite(AddNodeTest.class);
+ addTestSuite(NamespaceRegistryTest.class);
+ // addTestSuite(ReferencesTest.class);
+ addTestSuite(SessionTest.class);
+ // addTestSuite(SessionUUIDTest.class);
+ addTestSuite(NodeTest.class);
+ // addTestSuite(NodeUUIDTest.class);
+ addTestSuite(NodeOrderableChildNodesTest.class);
+ addTestSuite(PropertyTest.class);
+
+ addTestSuite(SetValueBinaryTest.class);
+ addTestSuite(SetValueBooleanTest.class);
+ addTestSuite(SetValueDateTest.class);
+ addTestSuite(SetValueDoubleTest.class);
+ addTestSuite(SetValueLongTest.class);
+ addTestSuite(SetValueReferenceTest.class);
+ addTestSuite(SetValueStringTest.class);
+ addTestSuite(SetValueConstraintViolationExceptionTest.class);
+ addTestSuite(SetValueValueFormatExceptionTest.class);
+ addTestSuite(SetValueVersionExceptionTest.class);
+
+ addTestSuite(SetPropertyBooleanTest.class);
+ addTestSuite(SetPropertyCalendarTest.class);
+ addTestSuite(SetPropertyDoubleTest.class);
+ addTestSuite(SetPropertyInputStreamTest.class);
+ addTestSuite(SetPropertyLongTest.class);
+ addTestSuite(SetPropertyNodeTest.class);
+ addTestSuite(SetPropertyStringTest.class);
+ addTestSuite(SetPropertyValueTest.class);
+ addTestSuite(SetPropertyConstraintViolationExceptionTest.class);
+ addTestSuite(SetPropertyAssumeTypeTest.class);
+
+ addTestSuite(NodeItemIsModifiedTest.class);
+ addTestSuite(NodeItemIsNewTest.class);
+ addTestSuite(PropertyItemIsModifiedTest.class);
+ addTestSuite(PropertyItemIsNewTest.class);
+
+ addTestSuite(NodeAddMixinTest.class);
+ addTestSuite(NodeCanAddMixinTest.class);
+ addTestSuite(NodeRemoveMixinTest.class);
+
+ addTestSuite(WorkspaceCloneReferenceableTest.class);
+ addTestSuite(WorkspaceCloneSameNameSibsTest.class);
+ addTestSuite(WorkspaceCloneTest.class);
+ addTestSuite(WorkspaceCloneVersionableTest.class);
+ addTestSuite(WorkspaceCopyBetweenWorkspacesReferenceableTest.class);
+ addTestSuite(WorkspaceCopyBetweenWorkspacesSameNameSibsTest.class);
+ addTestSuite(WorkspaceCopyBetweenWorkspacesTest.class);
+ addTestSuite(WorkspaceCopyBetweenWorkspacesVersionableTest.class);
+ addTestSuite(WorkspaceCopyReferenceableTest.class);
+ addTestSuite(WorkspaceCopySameNameSibsTest.class);
+ addTestSuite(WorkspaceCopyTest.class);
+ addTestSuite(WorkspaceCopyVersionableTest.class);
+ addTestSuite(WorkspaceMoveReferenceableTest.class);
+ addTestSuite(WorkspaceMoveSameNameSibsTest.class);
+ addTestSuite(WorkspaceMoveTest.class);
+ addTestSuite(WorkspaceMoveVersionableTest.class);
+
+ addTestSuite(RepositoryLoginTest.class);
+ addTestSuite(ImpersonateTest.class);
+ addTestSuite(CheckPermissionTest.class);
+
+ // addTestSuite(DocumentViewImportTest.class);
+ addTestSuite(SerializationTest.class);
+
+ addTestSuite(ValueFactoryTest.class);
+ }
+ }
+
+}
Property changes on: trunk/dna-integration-tests/src/test/java/org/jboss/dna/test/integration/BasicJpaRepositoryTckTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/dna-integration-tests/src/test/java/org/jboss/dna/test/integration/JpaRepositoryTckTest.java
===================================================================
--- trunk/dna-integration-tests/src/test/java/org/jboss/dna/test/integration/JpaRepositoryTckTest.java 2009-12-06 01:00:08 UTC (rev 1411)
+++ trunk/dna-integration-tests/src/test/java/org/jboss/dna/test/integration/JpaRepositoryTckTest.java 2009-12-07 15:10:54 UTC (rev 1412)
@@ -1,170 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.test.integration;
-
-import junit.framework.Test;
-import junit.framework.TestSuite;
-import org.apache.jackrabbit.test.api.AddNodeTest;
-import org.apache.jackrabbit.test.api.CheckPermissionTest;
-import org.apache.jackrabbit.test.api.ImpersonateTest;
-import org.apache.jackrabbit.test.api.NamespaceRegistryTest;
-import org.apache.jackrabbit.test.api.NodeAddMixinTest;
-import org.apache.jackrabbit.test.api.NodeCanAddMixinTest;
-import org.apache.jackrabbit.test.api.NodeItemIsModifiedTest;
-import org.apache.jackrabbit.test.api.NodeItemIsNewTest;
-import org.apache.jackrabbit.test.api.NodeOrderableChildNodesTest;
-import org.apache.jackrabbit.test.api.NodeRemoveMixinTest;
-import org.apache.jackrabbit.test.api.NodeTest;
-import org.apache.jackrabbit.test.api.PropertyItemIsModifiedTest;
-import org.apache.jackrabbit.test.api.PropertyItemIsNewTest;
-import org.apache.jackrabbit.test.api.PropertyTest;
-import org.apache.jackrabbit.test.api.RepositoryLoginTest;
-import org.apache.jackrabbit.test.api.SerializationTest;
-import org.apache.jackrabbit.test.api.SessionTest;
-import org.apache.jackrabbit.test.api.SetPropertyAssumeTypeTest;
-import org.apache.jackrabbit.test.api.SetPropertyBooleanTest;
-import org.apache.jackrabbit.test.api.SetPropertyCalendarTest;
-import org.apache.jackrabbit.test.api.SetPropertyConstraintViolationExceptionTest;
-import org.apache.jackrabbit.test.api.SetPropertyDoubleTest;
-import org.apache.jackrabbit.test.api.SetPropertyInputStreamTest;
-import org.apache.jackrabbit.test.api.SetPropertyLongTest;
-import org.apache.jackrabbit.test.api.SetPropertyNodeTest;
-import org.apache.jackrabbit.test.api.SetPropertyStringTest;
-import org.apache.jackrabbit.test.api.SetPropertyValueTest;
-import org.apache.jackrabbit.test.api.SetValueBinaryTest;
-import org.apache.jackrabbit.test.api.SetValueBooleanTest;
-import org.apache.jackrabbit.test.api.SetValueConstraintViolationExceptionTest;
-import org.apache.jackrabbit.test.api.SetValueDateTest;
-import org.apache.jackrabbit.test.api.SetValueDoubleTest;
-import org.apache.jackrabbit.test.api.SetValueLongTest;
-import org.apache.jackrabbit.test.api.SetValueReferenceTest;
-import org.apache.jackrabbit.test.api.SetValueStringTest;
-import org.apache.jackrabbit.test.api.SetValueValueFormatExceptionTest;
-import org.apache.jackrabbit.test.api.SetValueVersionExceptionTest;
-import org.apache.jackrabbit.test.api.ValueFactoryTest;
-import org.apache.jackrabbit.test.api.WorkspaceCloneReferenceableTest;
-import org.apache.jackrabbit.test.api.WorkspaceCloneSameNameSibsTest;
-import org.apache.jackrabbit.test.api.WorkspaceCloneTest;
-import org.apache.jackrabbit.test.api.WorkspaceCloneVersionableTest;
-import org.apache.jackrabbit.test.api.WorkspaceCopyBetweenWorkspacesReferenceableTest;
-import org.apache.jackrabbit.test.api.WorkspaceCopyBetweenWorkspacesSameNameSibsTest;
-import org.apache.jackrabbit.test.api.WorkspaceCopyBetweenWorkspacesTest;
-import org.apache.jackrabbit.test.api.WorkspaceCopyBetweenWorkspacesVersionableTest;
-import org.apache.jackrabbit.test.api.WorkspaceCopyReferenceableTest;
-import org.apache.jackrabbit.test.api.WorkspaceCopySameNameSibsTest;
-import org.apache.jackrabbit.test.api.WorkspaceCopyTest;
-import org.apache.jackrabbit.test.api.WorkspaceCopyVersionableTest;
-import org.apache.jackrabbit.test.api.WorkspaceMoveReferenceableTest;
-import org.apache.jackrabbit.test.api.WorkspaceMoveSameNameSibsTest;
-import org.apache.jackrabbit.test.api.WorkspaceMoveTest;
-import org.apache.jackrabbit.test.api.WorkspaceMoveVersionableTest;
-
-public class JpaRepositoryTckTest {
-
- public static Test suite() {
- TestSuite suite = AbstractRepositoryTckTest.readOnlyRepositorySuite("jpa");
- suite.addTest(new LevelTwoFeatureTests());
- // suite.addTest(org.apache.jackrabbit.test.api.lock.TestAll.suite());
-
- return suite;
-
- }
-
- private static class LevelTwoFeatureTests extends TestSuite {
- protected LevelTwoFeatureTests() {
- super("JCR Level 2 API Tests");
- // We currently don't pass the tests in those suites that are commented out
- // See https://jira.jboss.org/jira/browse/DNA-285
-
- // level 2 tests
- addTestSuite(AddNodeTest.class);
- addTestSuite(NamespaceRegistryTest.class);
- // addTestSuite(ReferencesTest.class);
- addTestSuite(SessionTest.class);
- // addTestSuite(SessionUUIDTest.class);
- addTestSuite(NodeTest.class);
- // addTestSuite(NodeUUIDTest.class);
- addTestSuite(NodeOrderableChildNodesTest.class);
- addTestSuite(PropertyTest.class);
-
- addTestSuite(SetValueBinaryTest.class);
- addTestSuite(SetValueBooleanTest.class);
- addTestSuite(SetValueDateTest.class);
- addTestSuite(SetValueDoubleTest.class);
- addTestSuite(SetValueLongTest.class);
- addTestSuite(SetValueReferenceTest.class);
- addTestSuite(SetValueStringTest.class);
- addTestSuite(SetValueConstraintViolationExceptionTest.class);
- addTestSuite(SetValueValueFormatExceptionTest.class);
- addTestSuite(SetValueVersionExceptionTest.class);
-
- addTestSuite(SetPropertyBooleanTest.class);
- addTestSuite(SetPropertyCalendarTest.class);
- addTestSuite(SetPropertyDoubleTest.class);
- addTestSuite(SetPropertyInputStreamTest.class);
- addTestSuite(SetPropertyLongTest.class);
- addTestSuite(SetPropertyNodeTest.class);
- addTestSuite(SetPropertyStringTest.class);
- addTestSuite(SetPropertyValueTest.class);
- addTestSuite(SetPropertyConstraintViolationExceptionTest.class);
- addTestSuite(SetPropertyAssumeTypeTest.class);
-
- addTestSuite(NodeItemIsModifiedTest.class);
- addTestSuite(NodeItemIsNewTest.class);
- addTestSuite(PropertyItemIsModifiedTest.class);
- addTestSuite(PropertyItemIsNewTest.class);
-
- addTestSuite(NodeAddMixinTest.class);
- addTestSuite(NodeCanAddMixinTest.class);
- addTestSuite(NodeRemoveMixinTest.class);
-
- addTestSuite(WorkspaceCloneReferenceableTest.class);
- addTestSuite(WorkspaceCloneSameNameSibsTest.class);
- addTestSuite(WorkspaceCloneTest.class);
- addTestSuite(WorkspaceCloneVersionableTest.class);
- addTestSuite(WorkspaceCopyBetweenWorkspacesReferenceableTest.class);
- addTestSuite(WorkspaceCopyBetweenWorkspacesSameNameSibsTest.class);
- addTestSuite(WorkspaceCopyBetweenWorkspacesTest.class);
- addTestSuite(WorkspaceCopyBetweenWorkspacesVersionableTest.class);
- addTestSuite(WorkspaceCopyReferenceableTest.class);
- addTestSuite(WorkspaceCopySameNameSibsTest.class);
- addTestSuite(WorkspaceCopyTest.class);
- addTestSuite(WorkspaceCopyVersionableTest.class);
- addTestSuite(WorkspaceMoveReferenceableTest.class);
- addTestSuite(WorkspaceMoveSameNameSibsTest.class);
- addTestSuite(WorkspaceMoveTest.class);
- addTestSuite(WorkspaceMoveVersionableTest.class);
-
- addTestSuite(RepositoryLoginTest.class);
- addTestSuite(ImpersonateTest.class);
- addTestSuite(CheckPermissionTest.class);
-
- // addTestSuite(DocumentViewImportTest.class);
- addTestSuite(SerializationTest.class);
-
- addTestSuite(ValueFactoryTest.class);
- }
- }
-
-}
Modified: trunk/dna-integration-tests/src/test/resources/tck/simple-jpa/configRepository.xml
===================================================================
--- trunk/dna-integration-tests/src/test/resources/tck/simple-jpa/configRepository.xml 2009-12-06 01:00:08 UTC (rev 1411)
+++ trunk/dna-integration-tests/src/test/resources/tck/simple-jpa/configRepository.xml 2009-12-07 15:10:54 UTC (rev 1412)
@@ -35,16 +35,21 @@
The 'JCR' repository is a JBoss Cache source with a single default workspace (though others could be created, too).
-->
<dna:source jcr:name="Store" dna:classname="org.jboss.dna.connector.store.jpa.JpaSource"
- dna:dialect="org.hibernate.dialect.HSQLDialect"
dna:model="Simple"
- dna:driverClassName="org.hsqldb.jdbcDriver"
- dna:username="sa"
- dna:password=""
- dna:url="jdbc:hsqldb:mem:."
- dna:predefinedWorkspaceNames="otherWorkspace"
- dna:showSql="false"
- dna:autoGenerateSchema="create"
- dna:maximumConnectionsInPool="5"
+ dna:dialect="${jpaSource.dialect}"
+ dna:driverClassName="${jpaSource.driverClassName}"
+ dna:username="${jpaSource.username}"
+ dna:password="${jpaSource.password}"
+ dna:url="${jpaSource.url}"
+ dna:maximumConnectionsInPool="${jpaSource.maximumConnectionsInPool}"
+ dna:referentialIntegrityEnforced="${jpaSource.referentialIntegrityEnforced}"
+ dna:largeValueSizeInBytes="${jpaSource.largeValueSizeInBytes}"
+ dna:retryLimit="${jpaSource.retryLimit}"
+ dna:compressData="${jpaSource.compressData}"
+ dna:predefinedWorkspaceNames="default, otherWorkspace"
+ dna:showSql="${jpaSource.showSql}"
+ dna:autoGenerateSchema="${jpaSource.autoGenerateSchema}"
+ dna:creatingWorkspacesAllowed="true"
dna:defaultWorkspaceName="default"/>
</dna:sources>
<!--
Modified: trunk/extensions/dna-connector-store-jpa/src/main/java/org/jboss/dna/connector/store/jpa/JpaSource.java
===================================================================
--- trunk/extensions/dna-connector-store-jpa/src/main/java/org/jboss/dna/connector/store/jpa/JpaSource.java 2009-12-06 01:00:08 UTC (rev 1411)
+++ trunk/extensions/dna-connector-store-jpa/src/main/java/org/jboss/dna/connector/store/jpa/JpaSource.java 2009-12-07 15:10:54 UTC (rev 1412)
@@ -450,6 +450,9 @@
* @see #getPredefinedWorkspaceNames()
*/
public synchronized void setPredefinedWorkspaceNames( String[] predefinedWorkspaceNames ) {
+ if (predefinedWorkspaceNames != null && predefinedWorkspaceNames.length == 1) {
+ predefinedWorkspaceNames = predefinedWorkspaceNames[0].split("\\s*,\\s*");
+ }
this.predefinedWorkspaces = predefinedWorkspaceNames != null ? predefinedWorkspaceNames : new String[] {};
}
14 years, 6 months
DNA SVN: r1411 - in trunk/dna-jcr/src: test/java/org/jboss/dna/jcr and 1 other directory.
by dna-commits@lists.jboss.org
Author: bcarothers
Date: 2009-12-05 20:00:08 -0500 (Sat, 05 Dec 2009)
New Revision: 1411
Modified:
trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/AbstractJcrNode.java
trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrSession.java
trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/DnaTckTest.java
Log:
DNA-541 Locking Implementation Does Not Support Timeouts
Applied patch that adds a new permission (granted to anyone with the admin role) that allows them to break any locks. This will at least give DNA administrators an ability to manually clean up any dangling locks.
Modified: trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/AbstractJcrNode.java
===================================================================
--- trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/AbstractJcrNode.java 2009-12-06 00:54:08 UTC (rev 1410)
+++ trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/AbstractJcrNode.java 2009-12-06 01:00:08 UTC (rev 1411)
@@ -24,6 +24,7 @@
package org.jboss.dna.jcr;
import java.io.InputStream;
+import java.security.AccessControlException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collection;
@@ -1399,8 +1400,13 @@
throw new LockException(JcrI18n.notLocked.text(this.location));
}
- if (!cache.session().lockTokens().contains(lock.getLockToken())) {
- throw new LockException(JcrI18n.lockTokenNotHeld.text(this.location));
+ if (!session().lockTokens().contains(lock.getLockToken())) {
+ try {
+ // See if the user has the permission to break someone else's lock
+ session().checkPermission(cache.workspaceName(), null, JcrSession.DNA_UNLOCK_ANY_PERMISSION);
+ } catch (AccessControlException iae) {
+ throw new LockException(JcrI18n.lockTokenNotHeld.text(this.location));
+ }
}
session().workspace().lockManager().unlock(session(), lock);
@@ -1651,8 +1657,8 @@
if (destChildRelPath != null) {
Path destPath = pathFactory.create(destChildRelPath);
if (destPath.isAbsolute() || destPath.size() != 1) {
- throw new ItemNotFoundException(JcrI18n.pathNotFound.text(destPath.getString(cache.context()
- .getNamespaceRegistry()),
+ throw new ItemNotFoundException(
+ JcrI18n.pathNotFound.text(destPath.getString(cache.context().getNamespaceRegistry()),
cache.session().workspace().getName()));
}
Modified: trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrSession.java
===================================================================
--- trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrSession.java 2009-12-06 00:54:08 UTC (rev 1410)
+++ trunk/dna-jcr/src/main/java/org/jboss/dna/jcr/JcrSession.java 2009-12-06 01:00:08 UTC (rev 1411)
@@ -92,6 +92,7 @@
public static final String DNA_REGISTER_NAMESPACE_PERMISSION = "register_namespace";
public static final String DNA_REGISTER_TYPE_PERMISSION = "register_type";
+ public static final String DNA_UNLOCK_ANY_PERMISSION = "unlock_any";
public static final String JCR_ADD_NODE_PERMISSION = "add_node";
public static final String JCR_SET_PROPERTY_PERMISSION = "set_property";
@@ -389,7 +390,8 @@
if (JCR_READ_PERMISSION.equals(action)) {
hasPermission &= hasRole(DNA_READ_PERMISSION, workspaceName) || hasRole(DNA_WRITE_PERMISSION, workspaceName)
|| hasRole(DNA_ADMIN_PERMISSION, workspaceName);
- } else if (DNA_REGISTER_NAMESPACE_PERMISSION.equals(action) || DNA_REGISTER_TYPE_PERMISSION.equals(action)) {
+ } else if (DNA_REGISTER_NAMESPACE_PERMISSION.equals(action) || DNA_REGISTER_TYPE_PERMISSION.equals(action)
+ || DNA_UNLOCK_ANY_PERMISSION.equals(action)) {
hasPermission &= hasRole(DNA_ADMIN_PERMISSION, workspaceName);
} else {
hasPermission &= hasRole(DNA_ADMIN_PERMISSION, workspaceName) || hasRole(DNA_WRITE_PERMISSION, workspaceName);
Modified: trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/DnaTckTest.java
===================================================================
--- trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/DnaTckTest.java 2009-12-06 00:54:08 UTC (rev 1410)
+++ trunk/dna-jcr/src/test/java/org/jboss/dna/jcr/DnaTckTest.java 2009-12-06 01:00:08 UTC (rev 1411)
@@ -432,4 +432,26 @@
}
}
+
+ public void testAdminUserCanBreakOthersLocks() throws Exception {
+ String lockNodeName = "lockTestNode";
+ session = helper.getReadWriteSession();
+ Node root = session.getRootNode();
+ Node lockNode = root.addNode(lockNodeName);
+ lockNode.addMixin("mix:lockable");
+ session.save();
+
+ lockNode.lock(false, false);
+ assertThat(lockNode.isLocked(), is(true));
+
+ Session superuser = helper.getSuperuserSession();
+ root = superuser.getRootNode();
+ lockNode = root.getNode(lockNodeName);
+
+ assertThat(lockNode.isLocked(), is(true));
+ lockNode.unlock();
+ assertThat(lockNode.isLocked(), is(false));
+ superuser.logout();
+
+ }
}
14 years, 6 months
DNA SVN: r1410 - trunk/extensions/dna-connector-store-jpa/src/test/java/org/jboss/dna/connector/store/jpa.
by dna-commits@lists.jboss.org
Author: bcarothers
Date: 2009-12-05 19:54:08 -0500 (Sat, 05 Dec 2009)
New Revision: 1410
Modified:
trunk/extensions/dna-connector-store-jpa/src/test/java/org/jboss/dna/connector/store/jpa/JpaSourceTest.java
Log:
DNA-577 Change the default model of the JPA connector from 'Basic' to 'Simple'
Fixed broken test.
Modified: trunk/extensions/dna-connector-store-jpa/src/test/java/org/jboss/dna/connector/store/jpa/JpaSourceTest.java
===================================================================
--- trunk/extensions/dna-connector-store-jpa/src/test/java/org/jboss/dna/connector/store/jpa/JpaSourceTest.java 2009-12-06 00:11:37 UTC (rev 1409)
+++ trunk/extensions/dna-connector-store-jpa/src/test/java/org/jboss/dna/connector/store/jpa/JpaSourceTest.java 2009-12-06 00:54:08 UTC (rev 1410)
@@ -68,6 +68,7 @@
@Test
public void shouldHaveNoDefaultModelUponConstruction() {
+ JpaSource source = new JpaSource();
assertThat(source.getModel(), is(nullValue()));
}
14 years, 6 months