DNA SVN: r1330 - trunk/dna-search/src/test/java/org/jboss/dna/search/query.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-18 14:42:04 -0500 (Wed, 18 Nov 2009)
New Revision: 1330
Added:
trunk/dna-search/src/test/java/org/jboss/dna/search/query/NotQueryTest.java
Removed:
trunk/dna-search/src/test/java/org/jboss/dna/search/query/LuceneNotQueryTest.java
Log:
DNA-467 Renamed unit test to reflect the name of the class it's testing.
Deleted: trunk/dna-search/src/test/java/org/jboss/dna/search/query/LuceneNotQueryTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/query/LuceneNotQueryTest.java 2009-11-18 19:39:05 UTC (rev 1329)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/query/LuceneNotQueryTest.java 2009-11-18 19:42:04 UTC (rev 1330)
@@ -1,126 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.query;
-
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.mockito.Matchers.anyInt;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.stub;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Similarity;
-import org.jboss.dna.search.query.NotQuery;
-import org.junit.Test;
-
-public class LuceneNotQueryTest {
-
- @Test
- public void scorerShouldSkipAdjacentDocsIfScoredByOperandScorer() throws IOException {
- IndexReader reader = mock(IndexReader.class);
- stub(reader.isDeleted(anyInt())).toReturn(false);
- stub(reader.maxDoc()).toReturn(10);
- Scorer operandScorer = new MockScorer(0, 1, 2, 3, 4);
- Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
- assertScores(notScorer, 5, 6, 7, 8, 9);
- }
-
- @Test
- public void scorerShouldSkipDocsAtEndIfScoredByOperandScorer() throws IOException {
- IndexReader reader = mock(IndexReader.class);
- stub(reader.isDeleted(anyInt())).toReturn(false);
- stub(reader.maxDoc()).toReturn(10);
- Scorer operandScorer = new MockScorer(8, 9);
- Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
- assertScores(notScorer, 0, 1, 2, 3, 4, 5, 6, 7);
- }
-
- @Test
- public void scorerShouldScoreFirstDocsIfNotScoredByOperandScorer() throws IOException {
- IndexReader reader = mock(IndexReader.class);
- stub(reader.isDeleted(anyInt())).toReturn(false);
- stub(reader.maxDoc()).toReturn(10);
- Scorer operandScorer = new MockScorer(2, 3, 4);
- Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
- assertScores(notScorer, 0, 1, 5, 6, 7, 8, 9);
- }
-
- @Test
- public void scorerShouldScoreNonAdjacentDocsNotScoredByOperandScorer() throws IOException {
- IndexReader reader = mock(IndexReader.class);
- stub(reader.isDeleted(anyInt())).toReturn(false);
- stub(reader.maxDoc()).toReturn(10);
- Scorer operandScorer = new MockScorer(2, 4, 8);
- Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
- assertScores(notScorer, 0, 1, 3, 5, 6, 7, 9);
- }
-
- protected void assertScores( Scorer scorer,
- int... docIds ) throws IOException {
- for (int docId : docIds) {
- assertThat(scorer.nextDoc(), is(docId));
- assertThat(scorer.score(), is(1.0f));
- }
- assertThat(scorer.nextDoc(), is(Scorer.NO_MORE_DOCS));
- }
-
- protected static class MockScorer extends Scorer {
- private final Iterator<Integer> docIds;
-
- protected MockScorer( int... docIds ) {
- super(Similarity.getDefault());
- List<Integer> ids = new ArrayList<Integer>();
- for (int docId : docIds) {
- ids.add(new Integer(docId));
- }
- this.docIds = ids.iterator();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
- */
- @Override
- public int nextDoc() {
- if (docIds.hasNext()) return docIds.next();
- return Scorer.NO_MORE_DOCS;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Scorer#score()
- */
- @Override
- public float score() {
- throw new UnsupportedOperationException("Should not be called");
- }
- }
-
-}
Copied: trunk/dna-search/src/test/java/org/jboss/dna/search/query/NotQueryTest.java (from rev 1329, trunk/dna-search/src/test/java/org/jboss/dna/search/query/LuceneNotQueryTest.java)
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/query/NotQueryTest.java (rev 0)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/query/NotQueryTest.java 2009-11-18 19:42:04 UTC (rev 1330)
@@ -0,0 +1,126 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.query;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.stub;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Similarity;
+import org.jboss.dna.search.query.NotQuery;
+import org.junit.Test;
+
+public class NotQueryTest {
+
+ @Test
+ public void scorerShouldSkipAdjacentDocsIfScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(0, 1, 2, 3, 4);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 5, 6, 7, 8, 9);
+ }
+
+ @Test
+ public void scorerShouldSkipDocsAtEndIfScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(8, 9);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 0, 1, 2, 3, 4, 5, 6, 7);
+ }
+
+ @Test
+ public void scorerShouldScoreFirstDocsIfNotScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(2, 3, 4);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 0, 1, 5, 6, 7, 8, 9);
+ }
+
+ @Test
+ public void scorerShouldScoreNonAdjacentDocsNotScoredByOperandScorer() throws IOException {
+ IndexReader reader = mock(IndexReader.class);
+ stub(reader.isDeleted(anyInt())).toReturn(false);
+ stub(reader.maxDoc()).toReturn(10);
+ Scorer operandScorer = new MockScorer(2, 4, 8);
+ Scorer notScorer = new NotQuery.NotScorer(operandScorer, reader);
+ assertScores(notScorer, 0, 1, 3, 5, 6, 7, 9);
+ }
+
+ protected void assertScores( Scorer scorer,
+ int... docIds ) throws IOException {
+ for (int docId : docIds) {
+ assertThat(scorer.nextDoc(), is(docId));
+ assertThat(scorer.score(), is(1.0f));
+ }
+ assertThat(scorer.nextDoc(), is(Scorer.NO_MORE_DOCS));
+ }
+
+ protected static class MockScorer extends Scorer {
+ private final Iterator<Integer> docIds;
+
+ protected MockScorer( int... docIds ) {
+ super(Similarity.getDefault());
+ List<Integer> ids = new ArrayList<Integer>();
+ for (int docId : docIds) {
+ ids.add(new Integer(docId));
+ }
+ this.docIds = ids.iterator();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.DocIdSetIterator#nextDoc()
+ */
+ @Override
+ public int nextDoc() {
+ if (docIds.hasNext()) return docIds.next();
+ return Scorer.NO_MORE_DOCS;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Scorer#score()
+ */
+ @Override
+ public float score() {
+ throw new UnsupportedOperationException("Should not be called");
+ }
+ }
+
+}
Property changes on: trunk/dna-search/src/test/java/org/jboss/dna/search/query/NotQueryTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
14 years, 5 months
DNA SVN: r1329 - in trunk: dna-graph/src/main/java/org/jboss/dna/graph/search and 7 other directories.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-18 14:39:05 -0500 (Wed, 18 Nov 2009)
New Revision: 1329
Added:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/
trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngine.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineException.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchException.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchProvider.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/search/
trunk/dna-graph/src/test/java/org/jboss/dna/graph/search/SearchEngineTest.java
trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexSearchProvider.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfiguration.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfigurations.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneException.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java
trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneSession.java
Removed:
trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfiguration.java
trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfigurations.java
trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexLayout.java
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexSession.java
trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java
trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java
trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngineException.java
trunk/dna-search/src/main/java/org/jboss/dna/search/filters/ResultFilter.java
trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java
Modified:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/GraphI18n.java
trunk/dna-graph/src/main/resources/org/jboss/dna/graph/GraphI18n.properties
trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java
trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java
trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties
Log:
DNA-467 refactored the search engine components, moving the general-purpose classes (i.e., those that don't depend on Lucene) into 'dna-graph'.
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/GraphI18n.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/GraphI18n.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/GraphI18n.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -160,6 +160,16 @@
public static I18n expectingLiteralAndUnableToParseAsDouble;
public static I18n expectingLiteralAndUnableToParseAsDate;
+ /* Search */
+ public static I18n errorWhileIndexingContentAtPath;
+ public static I18n errorWhileRemovingContentAtPath;
+ public static I18n errorWhileUpdatingContent;
+ public static I18n errorWhileCommittingIndexChanges;
+ public static I18n errorWhileOptimizingIndexes;
+ public static I18n errorWhilePerformingSearch;
+ public static I18n errorWhilePerformingQuery;
+ public static I18n errorWhileRemovingIndexesForWorkspace;
+
static {
try {
I18n.initialize(GraphI18n.class);
Copied: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngine.java (from rev 1328, trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java)
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngine.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngine.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -0,0 +1,807 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.search;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import net.jcip.annotations.GuardedBy;
+import net.jcip.annotations.ThreadSafe;
+import org.jboss.dna.common.i18n.I18n;
+import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.Graph;
+import org.jboss.dna.graph.GraphI18n;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.Subgraph;
+import org.jboss.dna.graph.SubgraphNode;
+import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
+import org.jboss.dna.graph.connector.RepositorySource;
+import org.jboss.dna.graph.connector.RepositorySourceException;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.QueryResults;
+import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.validate.Schemata;
+import org.jboss.dna.graph.request.ChangeRequest;
+import org.jboss.dna.graph.request.InvalidWorkspaceException;
+import org.jboss.dna.graph.search.SearchProvider.Session;
+
+/**
+ * A component that acts as a search engine for the content within a single {@link RepositorySource}. This engine manages a set of
+ * indexes and provides search functionality for each of the workspaces within the source, and provides various methods to
+ * (re)index the content contained with source's workspaces and keep the indexes up-to-date via changes.
+ */
+@ThreadSafe
+public class SearchEngine {
+
+ /**
+ * The default maximum number of changes that can be made to an index before the indexes are automatically optimized is * * *
+ * * {@value}
+ */
+ public static final int DEFAULT_MAX_CHANGES_BEFORE_AUTOMATIC_OPTIMIZATION = 0;
+
+ protected final ExecutionContext context;
+ private final String sourceName;
+ private final RepositoryConnectionFactory connectionFactory;
+ protected final SearchProvider indexLayout;
+ private final int maxChangesBeforeAutomaticOptimization;
+ @GuardedBy( "workspacesLock" )
+ private final Map<String, Workspace> workspacesByName = new HashMap<String, Workspace>();
+ private final ReadWriteLock workspacesLock = new ReentrantReadWriteLock();
+
+ /**
+ * Create a search engine instance given the supplied {@link ExecutionContext execution context}, name of the
+ * {@link RepositorySource}, the {@link RepositoryConnectionFactory factory for RepositorySource connections}, and the
+ * {@link SearchProvider search provider}.
+ *
+ * @param context the execution context for indexing and optimization operations
+ * @param sourceName the name of the {@link RepositorySource}
+ * @param connectionFactory the connection factory
+ * @param indexLayout the specification of the Lucene index layout
+ * @param maxChangesBeforeAutomaticOptimization the number of changes that can be made to the index before the indexes are
+ * automatically optimized; may be 0 or a negative number if no automatic optimization should be done
+ * @throws IllegalArgumentException if any of the parameters (other than indexing strategy) are null
+ */
+ public SearchEngine( ExecutionContext context,
+ String sourceName,
+ RepositoryConnectionFactory connectionFactory,
+ SearchProvider indexLayout,
+ int maxChangesBeforeAutomaticOptimization ) {
+ CheckArg.isNotNull(context, "context");
+ CheckArg.isNotNull(sourceName, "sourceName");
+ CheckArg.isNotNull(connectionFactory, "connectionFactory");
+ this.sourceName = sourceName;
+ this.connectionFactory = connectionFactory;
+ this.indexLayout = indexLayout;
+ this.context = context;
+ this.maxChangesBeforeAutomaticOptimization = maxChangesBeforeAutomaticOptimization < 0 ? 0 : maxChangesBeforeAutomaticOptimization;
+ }
+
+ /**
+ * Create a search engine instance given the supplied {@link ExecutionContext execution context}, name of the
+ * {@link RepositorySource}, the {@link RepositoryConnectionFactory factory for RepositorySource connections}, and the
+ * {@link SearchProvider search provider} that defines where each workspace's indexes should be placed.
+ *
+ * @param context the execution context for indexing and optimization operations
+ * @param sourceName the name of the {@link RepositorySource}
+ * @param connectionFactory the connection factory
+ * @param indexLayout the specification of the Lucene index layout
+ * @throws IllegalArgumentException if any of the parameters (other than indexing strategy) are null
+ */
+ public SearchEngine( ExecutionContext context,
+ String sourceName,
+ RepositoryConnectionFactory connectionFactory,
+ SearchProvider indexLayout ) {
+ this(context, sourceName, connectionFactory, indexLayout, DEFAULT_MAX_CHANGES_BEFORE_AUTOMATIC_OPTIMIZATION);
+ }
+
+ /**
+ * Get the name of the RepositorySource that this engine is to use.
+ *
+ * @return the source name; never null
+ */
+ public String getSourceName() {
+ return sourceName;
+ }
+
+ /**
+ * Get the context in which all indexing operations execute.
+ *
+ * @return the execution context; never null
+ */
+ public ExecutionContext getContext() {
+ return context;
+ }
+
+ /**
+ * @return maxChangesBeforeAutomaticOptimization
+ */
+ public int getMaxChangesBeforeAutomaticOptimization() {
+ return maxChangesBeforeAutomaticOptimization;
+ }
+
+ /**
+ * Utility to create a Graph for the source.
+ *
+ * @return the graph instance; never null
+ */
+ final Graph graph() {
+ return Graph.create(sourceName, connectionFactory, context);
+ }
+
+ /**
+ * Utility to obtain the root path.
+ *
+ * @return the root path; never null
+ */
+ final Path rootPath() {
+ return context.getValueFactories().getPathFactory().createRootPath();
+ }
+
+ /**
+ * Utility to obtain a readable string representation of the supplied path.
+ *
+ * @param path the path
+ * @return the readable string representation; may be null if path is null
+ */
+ final String readable( Path path ) {
+ return context.getValueFactories().getStringFactory().create(path);
+ }
+
+ /**
+ * Index all of the content at or below the supplied path in the named workspace within the {@link #getSourceName() source}.
+ * If the starting point is the root node, then this method will drop the existing index(es) and rebuild from the content in
+ * the workspace and source.
+ * <p>
+ * This method operates synchronously and returns when the requested indexing is completed.
+ * </p>
+ *
+ * @param workspaceName the name of the workspace
+ * @param startingPoint the location that represents the content to be indexed; must have a path
+ * @param depthPerRead the depth of each subgraph read operation
+ * @throws IllegalArgumentException if the workspace name or location are null
+ * @throws RepositorySourceException if there is a problem accessing the content
+ * @throws SearchEngineException if there is a problem updating the indexes
+ * @throws InvalidWorkspaceException if the workspace does not exist
+ */
+ public void index( String workspaceName,
+ Location startingPoint,
+ int depthPerRead ) throws RepositorySourceException, SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ CheckArg.isNotNull(startingPoint, "startingPoint");
+ assert startingPoint.hasPath();
+
+ Workspace workspace = getWorkspace(workspaceName);
+ if (startingPoint.getPath().isRoot()) {
+ // More efficient to just start over with a new index ...
+ workspace.execute(true, addContent(startingPoint, depthPerRead));
+ } else {
+ // Have to first remove the content below the starting point, then add it again ...
+ workspace.execute(false, removeContent(startingPoint), addContent(startingPoint, depthPerRead));
+ }
+ }
+
+ /**
+ * Index all of the content at or below the supplied path in the named workspace within the {@link #getSourceName() source}.
+ * If the starting point is the root node, then this method will drop the existing index(es) and rebuild from the content in
+ * the workspace and source.
+ * <p>
+ * This method operates synchronously and returns when the requested indexing is completed.
+ * </p>
+ *
+ * @param workspaceName the name of the workspace
+ * @param startingPoint the path that represents the content to be indexed
+ * @param depthPerRead the depth of each subgraph read operation
+ * @throws IllegalArgumentException if the workspace name or path are null
+ * @throws RepositorySourceException if there is a problem accessing the content
+ * @throws SearchEngineException if there is a problem updating the indexes
+ * @throws InvalidWorkspaceException if the workspace does not exist
+ */
+ public void index( String workspaceName,
+ Path startingPoint,
+ int depthPerRead ) throws RepositorySourceException, SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ CheckArg.isNotNull(startingPoint, "startingPoint");
+ index(workspaceName, Location.create(startingPoint), depthPerRead);
+ }
+
+ /**
+ * Index all of the content in the named workspace within the {@link #getSourceName() source}. This method operates
+ * synchronously and returns when the requested indexing is completed.
+ *
+ * @param workspaceName the name of the workspace
+ * @param depthPerRead the depth of each subgraph read operation
+ * @throws IllegalArgumentException if the workspace name is null
+ * @throws RepositorySourceException if there is a problem accessing the content
+ * @throws SearchEngineException if there is a problem updating the indexes
+ * @throws InvalidWorkspaceException if the workspace does not exist
+ */
+ public void index( String workspaceName,
+ int depthPerRead ) throws RepositorySourceException, SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ index(workspaceName, rootPath(), depthPerRead);
+ }
+
+ /**
+ * Index (or re-index) all of the content in all of the workspaces within the source. This method operates synchronously and
+ * returns when the requested indexing is completed.
+ *
+ * @param depthPerRead the depth of each subgraph read operation
+ * @throws RepositorySourceException if there is a problem accessing the content
+ * @throws SearchEngineException if there is a problem updating the indexes
+ */
+ public void index( int depthPerRead ) throws RepositorySourceException, SearchEngineException {
+ Path rootPath = rootPath();
+ for (String workspaceName : graph().getWorkspaces()) {
+ index(workspaceName, rootPath, depthPerRead);
+ }
+ }
+
+ /**
+ * Update the indexes with the supplied set of changes to the content.
+ *
+ * @param changes the set of changes to the content
+ * @throws IllegalArgumentException if the path is null
+ * @throws RepositorySourceException if there is a problem accessing the content
+ * @throws SearchEngineException if there is a problem updating the indexes
+ */
+ public void index( final Iterable<ChangeRequest> changes ) throws SearchEngineException {
+ // First break up all the changes into different collections, one collection per workspace ...
+ Map<String, Collection<ChangeRequest>> changesByWorkspace = new HashMap<String, Collection<ChangeRequest>>();
+ for (ChangeRequest request : changes) {
+ String workspaceName = request.changedWorkspace();
+ Collection<ChangeRequest> changesForWorkspace = changesByWorkspace.get(workspaceName);
+ if (changesForWorkspace == null) {
+ changesForWorkspace = new LinkedList<ChangeRequest>();
+ changesByWorkspace.put(workspaceName, changesForWorkspace);
+ }
+ changesForWorkspace.add(request);
+ }
+ // Now update the indexes for each workspace (serially). This minimizes the time that each workspace
+ // locks its indexes for writing.
+ for (Map.Entry<String, Collection<ChangeRequest>> entry : changesByWorkspace.entrySet()) {
+ String workspaceName = entry.getKey();
+ Collection<ChangeRequest> changesForWorkspace = entry.getValue();
+ getWorkspace(workspaceName).execute(false, updateContent(changesForWorkspace));
+ }
+ }
+
+ /**
+ * Invoke the engine's garbage collection on all indexes used by all workspaces in the source. This method reclaims space and
+ * optimizes the index. This should be done on a periodic basis after changes are made to the engine's indexes.
+ *
+ * @throws SearchEngineException if there is a problem during optimization
+ */
+ public void optimize() throws SearchEngineException {
+ for (String workspaceName : graph().getWorkspaces()) {
+ getWorkspace(workspaceName).execute(false, optimizeContent());
+ }
+ }
+
+ /**
+ * Invoke the engine's garbage collection for the indexes associated with the specified workspace. This method reclaims space
+ * and optimizes the index. This should be done on a periodic basis after changes are made to the engine's indexes.
+ *
+ * @param workspaceName the name of the workspace
+ * @throws IllegalArgumentException if the workspace name is null
+ * @throws SearchEngineException if there is a problem during optimization
+ * @throws InvalidWorkspaceException if the workspace does not exist
+ */
+ public void optimize( String workspaceName ) throws SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ getWorkspace(workspaceName).execute(false, optimizeContent());
+ }
+
+ /**
+ * Perform a full-text search of the content in the named workspace, given the maximum number of results and the offset
+ * defining the first result the caller is interested in.
+ *
+ * @param context the execution context in which the search is to take place; may not be null
+ * @param workspaceName the name of the workspace
+ * @param fullTextSearch the full-text search to be performed; may not be null
+ * @param maxResults the maximum number of results that are to be returned; always positive
+ * @param offset the number of initial results to skip, or 0 if the first results are to be returned
+ * @return the activity that will perform the work
+ * @throws IllegalArgumentException if the execution context or workspace name are null
+ * @throws SearchEngineException if there is a problem during optimization
+ * @throws InvalidWorkspaceException if the workspace does not exist
+ */
+ public List<Location> fullTextSearch( ExecutionContext context,
+ String workspaceName,
+ String fullTextSearch,
+ int maxResults,
+ int offset ) {
+ CheckArg.isNotNull(context, "context");
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ Search searchActivity = searchContent(context, fullTextSearch, maxResults, offset);
+ getWorkspace(workspaceName).execute(false, searchActivity);
+ return searchActivity.getResults();
+ }
+
+ /**
+ * Perform a query of the content in the named workspace, given the Abstract Query Model representation of the query.
+ *
+ * @param context the execution context in which the search is to take place; may not be null
+ * @param workspaceName the name of the workspace
+ * @param query the query that is to be executed, in the form of the Abstract Query Model
+ * @param schemata the definition of the tables and views that can be used in the query; may not be null
+ * @return the query results; never null
+ * @throws IllegalArgumentException if the context, query, or schemata references are null
+ */
+ public QueryResults query( ExecutionContext context,
+ String workspaceName,
+ QueryCommand query,
+ Schemata schemata ) {
+ CheckArg.isNotNull(context, "context");
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ CheckArg.isNotNull(query, "query");
+ CheckArg.isNotNull(schemata, "schemata");
+ QueryContext queryContext = new QueryContext(context, schemata);
+ Query queryActivity = queryContent(queryContext, query);
+ getWorkspace(workspaceName).execute(false, queryActivity);
+ return queryActivity.getResults();
+ }
+
+ /**
+ * Remove the supplied index from the search engine. This is typically done when the workspace has been deleted from the
+ * source, or when
+ *
+ * @param workspaceName the name of the workspace
+ * @throws IllegalArgumentException if the workspace name is null
+ * @throws SearchEngineException if there is a problem removing the workspace
+ */
+ public void removeWorkspace( String workspaceName ) throws SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ try {
+ workspacesLock.writeLock().lock();
+ // Check whether another thread got in and created the engine while we waited ...
+ Workspace workspace = workspacesByName.remove(workspaceName);
+ if (workspace != null) {
+ indexLayout.destroyIndexes(context, getSourceName(), workspaceName);
+ }
+ } catch (IOException e) {
+ String message = GraphI18n.errorWhileRemovingIndexesForWorkspace.text(sourceName, workspaceName, e.getMessage());
+ throw new SearchEngineException(message, e);
+ } finally {
+ workspacesLock.writeLock().unlock();
+ }
+ }
+
+ /**
+ * Remove from the search engine all workspace-related indexes, thereby cleaning up any resources used by this search engine.
+ *
+ * @throws SearchEngineException if there is a problem removing any of the workspace
+ */
+ public void removeWorkspaces() throws SearchEngineException {
+ try {
+ workspacesLock.writeLock().lock();
+ for (String workspaceName : new HashSet<String>(workspacesByName.keySet())) {
+ removeWorkspace(workspaceName);
+ }
+ } finally {
+ workspacesLock.writeLock().unlock();
+ }
+ }
+
+ /**
+ * Get the search engine for the workspace with the supplied name.
+ *
+ * @param workspaceName the name of the workspace
+ * @return the workspace's search engine
+ * @throws InvalidWorkspaceException if the workspace does not exist
+ */
+ protected Workspace getWorkspace( String workspaceName ) {
+ Workspace workspace = null;
+ try {
+ workspacesLock.readLock().lock();
+ workspace = workspacesByName.get(workspaceName);
+ } finally {
+ workspacesLock.readLock().unlock();
+ }
+
+ if (workspace == null) {
+ // Verify the workspace does exist ...
+ if (!graph().getWorkspaces().contains(workspaceName)) {
+ String msg = GraphI18n.workspaceDoesNotExistInRepository.text(workspaceName, getSourceName());
+ throw new InvalidWorkspaceException(msg);
+ }
+ try {
+ workspacesLock.writeLock().lock();
+ // Check whether another thread got in and created the engine while we waited ...
+ workspace = workspacesByName.get(workspaceName);
+ if (workspace == null) {
+ // Create the engine and register it ...
+ workspace = new Workspace(workspaceName);
+ workspacesByName.put(workspaceName, workspace);
+ }
+ } finally {
+ workspacesLock.writeLock().unlock();
+ }
+ }
+ return workspace;
+ }
+
+ protected class Workspace {
+ private final String sourceName;
+ private final String workspaceName;
+ protected final AtomicInteger modifiedNodesSinceLastOptimize = new AtomicInteger(0);
+
+ protected Workspace( String workspaceName ) {
+ this.workspaceName = workspaceName;
+ this.sourceName = getSourceName();
+ }
+
+ /**
+ * Get the workspace name.
+ *
+ * @return the workspace name; never null
+ */
+ public String getWorkspaceName() {
+ return workspaceName;
+ }
+
+ /**
+ * Execute the supplied activities against the indexes.
+ *
+ * @param overwrite true if the existing indexes should be overwritten, or false if they should be used
+ * @param activities the activities to execute
+ * @throws SearchEngineException if there is a problem performing the activities
+ */
+ protected final void execute( boolean overwrite,
+ Activity... activities ) throws SearchEngineException {
+ // Determine if the activities are readonly ...
+ boolean readOnly = true;
+ for (Activity activity : activities) {
+ if (!(activity instanceof ReadOnlyActivity)) {
+ readOnly = false;
+ break;
+ }
+ }
+
+ // Create a session ...
+ Session session = indexLayout.createSession(context, sourceName, workspaceName, overwrite, readOnly);
+ assert session != null;
+
+ // Execute the various activities ...
+ Throwable error = null;
+ try {
+ int numChanges = 0;
+ for (Activity activity : activities) {
+ try {
+ numChanges += activity.execute(session);
+ } catch (RuntimeException e) {
+ error = e;
+ throw e;
+ }
+ }
+ if (numChanges > 0) {
+ numChanges = this.modifiedNodesSinceLastOptimize.addAndGet(numChanges);
+ // Determine if there have been enough changes made to run the optimizer ...
+ int maxChanges = getMaxChangesBeforeAutomaticOptimization();
+ if (maxChanges > 0 && numChanges >= maxChanges) {
+ Activity optimizer = optimizeContent();
+ try {
+ optimizer.execute(session);
+ } catch (RuntimeException e) {
+ error = e;
+ throw e;
+ }
+ }
+ }
+ } finally {
+ try {
+ if (error == null) {
+ session.commit();
+ } else {
+ session.rollback();
+ }
+ } catch (RuntimeException e2) {
+ // We don't want to lose the existing error, if there is one ...
+ if (error == null) {
+ I18n msg = GraphI18n.errorWhileCommittingIndexChanges;
+ throw new SearchEngineException(msg.text(workspaceName, sourceName, e2.getMessage()), e2);
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Create an activity that will optimize the indexes.
+ *
+ * @return the activity that will perform the work
+ */
+ protected Activity optimizeContent() {
+ return new Activity() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngine.Activity#execute(org.jboss.dna.graph.search.SearchProvider.Session)
+ */
+ public int execute( Session session ) {
+ session.optimize();
+ return 0; // no lines changed
+ }
+
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ return GraphI18n.errorWhileOptimizingIndexes.text(sourceName, workspaceName, error.getMessage());
+ }
+ };
+ }
+
+ /**
+ * Create an activity that will read from the source the content at the supplied location and add the content to the search
+ * index.
+ *
+ * @param location the location of the content to read; may not be null
+ * @param depthPerRead the depth of each read operation; always positive
+ * @return the activity that will perform the work
+ */
+ protected Activity addContent( final Location location,
+ final int depthPerRead ) {
+ return new Activity() {
+ public int execute( Session session ) {
+
+ // Create a queue that we'll use to walk the content ...
+ LinkedList<Location> locationsToRead = new LinkedList<Location>();
+ locationsToRead.add(location);
+ int count = 0;
+
+ // Now read and index the content ...
+ Graph graph = graph();
+ graph.useWorkspace(session.getWorkspaceName());
+ while (!locationsToRead.isEmpty()) {
+ Location location = locationsToRead.poll();
+ if (location == null) continue;
+ Subgraph subgraph = graph.getSubgraphOfDepth(depthPerRead).at(location);
+ // Index all of the nodes within this subgraph ...
+ for (SubgraphNode node : subgraph) {
+ // Index the node ...
+ session.index(node);
+ ++count;
+
+ // Process the children ...
+ for (Location child : node.getChildren()) {
+ if (!subgraph.includes(child)) {
+ // Record this location as needing to be read ...
+ locationsToRead.add(child);
+ }
+ }
+ }
+ }
+ return count;
+ }
+
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ String path = readable(location.getPath());
+ return GraphI18n.errorWhileIndexingContentAtPath.text(path, workspaceName, sourceName, error.getMessage());
+ }
+ };
+ }
+
+ /**
+ * Create an activity that will remove from the indexes all documents that represent content at or below the specified
+ * location.
+ *
+ * @param location the location of the content to removed; may not be null
+ * @return the activity that will perform the work
+ */
+ protected Activity removeContent( final Location location ) {
+ return new Activity() {
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngine.Activity#execute(org.jboss.dna.graph.search.SearchProvider.Session)
+ */
+ public int execute( Session session ) {
+ // Delete the content at/below the path ...
+ return session.deleteBelow(location.getPath());
+ }
+
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ String path = readable(location.getPath());
+ return GraphI18n.errorWhileRemovingContentAtPath.text(path, workspaceName, sourceName, error.getMessage());
+ }
+ };
+ }
+
+ /**
+ * Create an activity that will update the indexes with changes that were already made to the content.
+ *
+ * @param changes the changes that have been made to the content; may not be null
+ * @return the activity that will perform the work
+ */
+ protected Activity updateContent( final Iterable<ChangeRequest> changes ) {
+ return new Activity() {
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngine.Activity#execute(org.jboss.dna.graph.search.SearchProvider.Session)
+ */
+ public int execute( Session session ) {
+ return session.apply(changes);
+ }
+
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ return GraphI18n.errorWhileUpdatingContent.text(workspaceName, sourceName, error.getMessage());
+ }
+ };
+ }
+
+ /**
+ * Create an activity that will perform a full-text search given the supplied query.
+ *
+ * @param context the context in which the search is to be performed; may not be null
+ * @param fullTextSearch the full-text search to be performed; may not be null
+ * @param maxResults the maximum number of results that are to be returned; always positive
+ * @param offset the number of initial results to skip, or 0 if the first results are to be returned
+ * @return the activity that will perform the work; never null
+ */
+ protected Search searchContent( final ExecutionContext context,
+ final String fullTextSearch,
+ final int maxResults,
+ final int offset ) {
+ final List<Location> results = new ArrayList<Location>(maxResults);
+ return new Search() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngine.Activity#execute(org.jboss.dna.graph.search.SearchProvider.Session)
+ */
+ public int execute( Session session ) {
+ session.search(context, fullTextSearch, maxResults, offset, results);
+ return 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngine.Activity#messageFor(java.lang.Throwable, java.lang.String,
+ * java.lang.String)
+ */
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ return GraphI18n.errorWhilePerformingSearch.text(fullTextSearch, workspaceName, sourceName, error.getMessage());
+ }
+
+ public List<Location> getResults() {
+ return results;
+ }
+ };
+ }
+
+ /**
+ * Create an activity that will perform a query against the index.
+ *
+ * @param context the context in which the search is to be performed; may not be null
+ * @param query the query to be performed; may not be null
+ * @return the activity that will perform the query; never null
+ */
+ protected Query queryContent( final QueryContext context,
+ final QueryCommand query ) {
+ return new Query() {
+ private QueryResults results = null;
+
+ public int execute( Session session ) throws SearchException {
+ results = session.query(context, query);
+ return 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchEngine.Activity#messageFor(java.lang.Throwable, java.lang.String,
+ * java.lang.String)
+ */
+ public String messageFor( Throwable error,
+ String sourceName,
+ String workspaceName ) {
+ return GraphI18n.errorWhilePerformingQuery.text(query, workspaceName, sourceName, error.getMessage());
+ }
+
+ public QueryResults getResults() {
+ return results;
+ }
+ };
+ }
+
+ /**
+ * Interface for activities that will be executed against a workspace. These activities don't have to commit or roll back the
+ * writer, nor do they have to translate the exceptions, since this is done by the
+ * {@link Workspace#execute(boolean, Activity...)} method.
+ */
+ protected interface Activity {
+
+ /**
+ * Perform the activity by using the index writer.
+ *
+ * @param indexSession the index session that should be used by the activity; never null
+ * @return the number of changes that were made by this activity
+ */
+ int execute( Session indexSession );
+
+ /**
+ * Translate an exception obtained during {@link #execute(Session) execution} into a single message.
+ *
+ * @param t the exception
+ * @param sourceName the name of the source
+ * @param workspaceName the name of the workspace
+ * @return the error message
+ */
+ String messageFor( Throwable t,
+ String sourceName,
+ String workspaceName );
+ }
+
+ /**
+ * A read-only activity.
+ */
+ protected interface ReadOnlyActivity extends Activity {
+ }
+
+ /**
+ * A search activity.
+ */
+ protected interface Search extends ReadOnlyActivity {
+ /**
+ * Get the results of the search.
+ *
+ * @return the list of {@link Location} objects for each node satisfying the results; never null
+ */
+ List<Location> getResults();
+ }
+
+ /**
+ * A query activity.
+ */
+ protected interface Query extends ReadOnlyActivity {
+ /**
+ * Get the results of the query.
+ *
+ * @return the results of a query; never null
+ */
+ QueryResults getResults();
+ }
+
+}
Property changes on: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngine.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineException.java (from rev 1328, trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngineException.java)
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineException.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchEngineException.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -0,0 +1,67 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.search;
+
+/**
+ * An exception that represents a problem within a search engine.
+ */
+public class SearchEngineException extends RuntimeException {
+
+ /**
+ */
+ private static final long serialVersionUID = 1L;
+
+ /**
+ *
+ */
+ public SearchEngineException() {
+ }
+
+ /**
+ * @param message
+ */
+ public SearchEngineException( String message ) {
+ super(message);
+
+ }
+
+ /**
+ * @param cause
+ */
+ public SearchEngineException( Throwable cause ) {
+ super(cause);
+
+ }
+
+ /**
+ * @param message
+ * @param cause
+ */
+ public SearchEngineException( String message,
+ Throwable cause ) {
+ super(message, cause);
+
+ }
+
+}
Added: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchException.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchException.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchException.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -0,0 +1,97 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.search;
+
+/**
+ * An exception signalling an error during a search.
+ */
+public class SearchException extends RuntimeException {
+
+ /**
+ */
+ private static final long serialVersionUID = 1L;
+
+ private final String expression;
+
+ /**
+ * Create an exception with the search expression.
+ *
+ * @param expression the search expression
+ */
+ public SearchException( String expression ) {
+ super();
+ this.expression = expression;
+ }
+
+ /**
+ * Create an exception with the search expression and a message.
+ *
+ * @param expression the search expression
+ * @param message the exception message
+ */
+ public SearchException( String expression,
+ String message ) {
+ super(message);
+ assert expression != null;
+ this.expression = expression;
+ }
+
+ /**
+ * Construct a system failure exception with another exception that is the cause of the failure.
+ *
+ * @param expression the search expression
+ * @param cause the original cause of the failure
+ */
+ public SearchException( String expression,
+ Throwable cause ) {
+ super(cause);
+ assert expression != null;
+ this.expression = expression;
+ }
+
+ /**
+ * Construct a system failure exception with a single message and another exception that is the cause of the failure.
+ *
+ * @param expression the search expression
+ * @param message the message describing the failure
+ * @param cause the original cause of the failure
+ */
+ public SearchException( String expression,
+ String message,
+ Throwable cause ) {
+ super(message, cause);
+ assert expression != null;
+ this.expression = expression;
+ }
+
+ /**
+ * Get the search expression.
+ *
+ * @return the search expression; never null
+ */
+ public String getSearchExpression() {
+ return expression;
+ }
+
+}
Property changes on: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchException.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchProvider.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchProvider.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchProvider.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -0,0 +1,178 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.search;
+
+import java.io.IOException;
+import java.util.List;
+import net.jcip.annotations.ThreadSafe;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.Node;
+import org.jboss.dna.graph.connector.RepositorySource;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.QueryResults;
+import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.validate.Schemata;
+import org.jboss.dna.graph.request.ChangeRequest;
+
+/**
+ * The representation of a single layout of one or more Lucene indexes.
+ */
+@ThreadSafe
+public interface SearchProvider {
+
+ /**
+ * Create a new session to the indexes.
+ *
+ * @param context the execution context for which this session is to be established; may not be null
+ * @param sourceName the name of the source; may not be null
+ * @param workspaceName the name of the workspace; may not be null
+ * @param overwrite true if the existing indexes should be overwritten, or false if they should be used
+ * @param readOnly true if the resulting session can be optimized for use in read-only situations, or false if the session
+ * needs to allow calling the write methods
+ * @return the session to the indexes; never null
+ */
+ Session createSession( ExecutionContext context,
+ String sourceName,
+ String workspaceName,
+ boolean overwrite,
+ boolean readOnly );
+
+ /**
+ * Destroy the indexes for the workspace with the supplied name.
+ *
+ * @param context the execution context in which the destruction should be performed; may not be null
+ * @param sourceName the name of the source; may not be null
+ * @param workspaceName the name of the workspace; may not be null
+ * @return true if the indexes for the workspace were destroyed, or false if there was no such workspace index
+ * @throws IOException if there is a problem destroying the indexes
+ */
+ boolean destroyIndexes( ExecutionContext context,
+ String sourceName,
+ String workspaceName ) throws IOException;
+
+ /**
+ * A stateful session that is used to interact with the search provider to search a particular source and workspace.
+ */
+ public interface Session {
+
+ /**
+ * Get the name of the {@link RepositorySource repository source} for which this session exists. A session instance will
+ * always return the same name.
+ *
+ * @return the source name; never null
+ */
+ String getSourceName();
+
+ /**
+ * Get the name of the workspace for which this session exists. A session instance will always return the same name.
+ *
+ * @return the workspace name; never null
+ */
+ String getWorkspaceName();
+
+ /**
+ * Get the execution context in which this session is operating.
+ *
+ * @return the execution context; never null
+ */
+ ExecutionContext getContext();
+
+ /**
+ * Return whether this session made changes to the indexed state.
+ *
+ * @return true if change were made, or false otherwise
+ */
+ boolean hasChanges();
+
+ /**
+ * Perform a full-text search given the supplied query.
+ *
+ * @param context the context in which the search should be executed; may not be null
+ * @param fullTextString the full-text query; never null or blank
+ * @param maxResults the maximum number of results that are to be returned; always positive
+ * @param offset the number of initial results to skip, or 0 if the first results are to be returned
+ * @param results the list where the results should be accumulated; never null
+ */
+ void search( ExecutionContext context,
+ String fullTextString,
+ int maxResults,
+ int offset,
+ List<Location> results );
+
+ /**
+ * Perform a query of the content. The {@link QueryCommand query} is supplied in the form of the Abstract Query Model,
+ * with the {@link Schemata} that defines the tables and views that are available to the query, and the set of index
+ * readers (and writers) that should be used.
+ *
+ * @param queryContext the context in which the query should be executed; may not be null
+ * @param query the query; never null
+ * @return the results of the query; never null
+ */
+ QueryResults query( QueryContext queryContext,
+ QueryCommand query );
+
+ /**
+ * Index the node. Changes are recorded only when {@link #commit()} is called.
+ *
+ * @param node the node to be indexed; never null
+ */
+ void index( Node node );
+
+ /**
+ * Update the indexes to reflect the supplied changes to the graph content.
+ *
+ * @param changes the set of changes to the content
+ * @return the (approximate) number of nodes that were affected by the changes
+ * @throws SearchEngineException if there is a problem executing the query
+ */
+ int apply( Iterable<ChangeRequest> changes );
+
+ /**
+ * Remove from the index(es) all of the information pertaining to the nodes at or below the supplied path.
+ *
+ * @param path the path identifying the graph content that is to be removed; never null
+ * @return the (approximate) number of nodes that were affected by the changes
+ */
+ int deleteBelow( Path path );
+
+ /**
+ * Optimize the indexes, if required.
+ */
+ void optimize();
+
+ /**
+ * Close this session by committing all of the changes. This session is no longer usable after this method is called.
+ */
+ void commit();
+
+ /**
+ * Close this session by rolling back all of the changes that have been made. This session is no longer usable after this
+ * method is called.
+ */
+ void rollback();
+ }
+
+}
Property changes on: trunk/dna-graph/src/main/java/org/jboss/dna/graph/search/SearchProvider.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/dna-graph/src/main/resources/org/jboss/dna/graph/GraphI18n.properties
===================================================================
--- trunk/dna-graph/src/main/resources/org/jboss/dna/graph/GraphI18n.properties 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-graph/src/main/resources/org/jboss/dna/graph/GraphI18n.properties 2009-11-18 19:39:05 UTC (rev 1329)
@@ -147,3 +147,13 @@
expectingLiteralAndUnableToParseAsLong = Expecting literal and unable to parse '{0}' at line {1}, column {2} as a long
expectingLiteralAndUnableToParseAsDouble = Expecting literal and unable to parse '{0}' at line {1}, column {2} as a double
expectingLiteralAndUnableToParseAsDate = Expecting literal and unable to parse '{0}' at line {1}, column {2} as a date
+
+# Search
+errorWhileIndexingContentAtPath = Error while indexing the content at "{0}" in the "{1}" workspace of the "{2}" source: {3}
+errorWhileRemovingContentAtPath = Error while removing the content at/below "{0}" in the "{1}" workspace of the "{2}" source: {3}
+errorWhileUpdatingContent = Error while updating content in the "{0}" workspace of the "{1}" source: {2}
+errorWhileCommittingIndexChanges = Error while committing changes to the indexes for the "{0}" workspace of the "{1}" source: {2}
+errorWhileOptimizingIndexes = Error while optimizing the indexes for the "{0}" workspace of the "{1}" source: {2}
+errorWhilePerformingSearch = Error while searching for "{0}" in the "{1}" workspace of the "{2}" source: {3}
+errorWhilePerformingQuery = Error while performing the query "{0}" against the content in the "{1}" workspace of the "{2}" source: {3}
+errorWhileRemovingIndexesForWorkspace = Error while removing the indexes for the "{0}" workspace of the "{1}" source: {2}
Copied: trunk/dna-graph/src/test/java/org/jboss/dna/graph/search/SearchEngineTest.java (from rev 1328, trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java)
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/search/SearchEngineTest.java (rev 0)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/search/SearchEngineTest.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -0,0 +1,254 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.search;
+
+import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.junit.Assert.assertThat;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.stub;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import java.util.List;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.Graph;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.Node;
+import org.jboss.dna.graph.connector.RepositoryConnection;
+import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
+import org.jboss.dna.graph.connector.RepositorySourceException;
+import org.jboss.dna.graph.connector.inmemory.InMemoryRepositorySource;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.model.Query;
+import org.jboss.dna.graph.query.validate.Schemata;
+import org.jboss.dna.graph.request.InvalidWorkspaceException;
+import org.jboss.dna.graph.search.SearchProvider.Session;
+import org.junit.Before;
+import org.junit.Test;
+
+public class SearchEngineTest {
+
+ private SearchEngine engine;
+ private ExecutionContext context;
+ private String sourceName;
+ private String workspaceName1;
+ private String workspaceName2;
+ private InMemoryRepositorySource source;
+ private RepositoryConnectionFactory connectionFactory;
+ private SearchProvider provider;
+ private SearchProvider.Session sessionWs1;
+ private SearchProvider.Session sessionWs2;
+ private SearchProvider.Session sessionDefault;
+ private Graph content;
+
+ @Before
+ public void beforeEach() throws Exception {
+ context = new ExecutionContext();
+ sourceName = "sourceA";
+ workspaceName1 = "workspace1";
+ workspaceName2 = "workspace2";
+
+ // Set up the source and graph instance ...
+ source = new InMemoryRepositorySource();
+ source.setName(sourceName);
+ content = Graph.create(source, context);
+
+ // Create the workspaces ...
+ content.createWorkspace().named(workspaceName1);
+ content.createWorkspace().named(workspaceName2);
+
+ // Set up the connection factory ...
+ connectionFactory = new RepositoryConnectionFactory() {
+ @SuppressWarnings( "synthetic-access" )
+ public RepositoryConnection createConnection( String sourceName ) throws RepositorySourceException {
+ return source.getConnection();
+ }
+ };
+
+ // Set up the index layout ...
+ provider = mock(SearchProvider.class);
+ sessionWs1 = mockSession(provider, workspaceName1);
+ sessionWs2 = mockSession(provider, workspaceName2);
+ sessionDefault = mockSession(provider, "");
+
+ // Now set up the search engine ...
+ engine = new SearchEngine(context, sourceName, connectionFactory, provider);
+ }
+
+ protected Session mockSession( SearchProvider mockProvider,
+ String workspaceName ) {
+ Session session = mock(Session.class);
+ stub(mockProvider.createSession(context, sourceName, workspaceName, false, false)).toReturn(session);
+ stub(mockProvider.createSession(context, sourceName, workspaceName, false, true)).toReturn(session);
+ stub(mockProvider.createSession(context, sourceName, workspaceName, true, false)).toReturn(session);
+ stub(mockProvider.createSession(context, sourceName, workspaceName, true, true)).toReturn(session);
+ stub(session.getWorkspaceName()).toReturn(workspaceName);
+ stub(session.getSourceName()).toReturn(sourceName);
+ return session;
+ }
+
+ protected Path path( String path ) {
+ return context.getValueFactories().getPathFactory().create(path);
+ }
+
+ protected void loadContent() throws Exception {
+ // Load some content ...
+ content.useWorkspace(workspaceName1);
+ content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("cars.xml")).into("/");
+ content.useWorkspace(workspaceName2);
+ content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("aircraft.xml")).into("/");
+ }
+
+ @Test
+ public void shouldReturnSearchWorkspaceForExistingWorkspaceInSource() {
+ SearchEngine.Workspace workspace = engine.getWorkspace(workspaceName1);
+ assertThat(workspace, is(notNullValue()));
+ assertThat(workspace.modifiedNodesSinceLastOptimize.get(), is(0));
+ assertThat(workspace.getWorkspaceName(), is(workspaceName1));
+ }
+
+ @Test( expected = InvalidWorkspaceException.class )
+ public void shouldFailToReturnSearchWorkspaceForNonExistantWorkspaceInSource() {
+ engine.getWorkspace(workspaceName1 + "foobar");
+ }
+
+ @Test
+ public void shouldDoNothingDuringRemoveWorkspaceIfWorkspaceHasNotBeenLoaded() throws Exception {
+ engine.removeWorkspace(workspaceName1);
+ verifyZeroInteractions(provider);
+ }
+
+ @Test
+ public void shouldForwardRemoveWorkspaceToIndexLayout() throws Exception {
+ engine.getWorkspace(workspaceName1);
+ engine.removeWorkspace(workspaceName1);
+ verify(provider).destroyIndexes(context, sourceName, workspaceName1);
+ verifyNoMoreInteractions(provider);
+ }
+
+ @Test
+ public void shouldForwardRemoveWorkspaceToIndexLayoutForEachWorkspaceThatWasLoaded() throws Exception {
+ engine.getWorkspace(workspaceName1);
+ engine.removeWorkspaces();
+ verify(provider).destroyIndexes(context, sourceName, workspaceName1);
+ verifyZeroInteractions(provider);
+ }
+
+ @Test
+ public void shouldForwardRemoveWorkspaceToIndexLayoutForAllWorkspacesThatWereLoaded() throws Exception {
+ engine.getWorkspace(workspaceName1);
+ engine.getWorkspace(workspaceName2);
+ engine.removeWorkspaces();
+ verify(provider).destroyIndexes(context, sourceName, workspaceName1);
+ verify(provider).destroyIndexes(context, sourceName, workspaceName2);
+ verifyNoMoreInteractions(provider);
+ }
+
+ @Test( expected = IllegalArgumentException.class )
+ public void shouldFailIfNullWorkspaceNamePassedToRemoveWorkspace() throws Exception {
+ engine.removeWorkspace(null);
+ }
+
+ @Test
+ public void shouldForwardOptimizeOfWorkspaceToIndexSession() throws Exception {
+ engine.optimize(workspaceName1);
+ verify(sessionWs1).optimize();
+ verify(sessionWs1).commit();
+ verifyNoMoreInteractions(sessionWs1);
+ }
+
+ @Test
+ public void shouldForwardOptimizeOfAllWorkspacesToEachIndexSession() throws Exception {
+ engine.optimize(); // will find all three workspaces
+ verify(sessionWs1).optimize();
+ verify(sessionWs1).commit();
+ verifyNoMoreInteractions(sessionWs1);
+ verify(sessionWs2).optimize();
+ verify(sessionWs2).commit();
+ verifyNoMoreInteractions(sessionWs2);
+ verify(sessionDefault).optimize();
+ verify(sessionDefault).commit();
+ verifyNoMoreInteractions(sessionDefault);
+ }
+
+ @Test
+ public void shouldForwardIndexOfWorkspaceToIndexSession() throws Exception {
+ loadContent();
+ engine.index(workspaceName1, 3);
+ verify(sessionWs1, times(18)).index((Node)anyObject());
+ verify(sessionWs1).commit();
+ }
+
+ @Test
+ public void shouldForwardIndexOfSubgraphInWorkspaceToIndexSession() throws Exception {
+ loadContent();
+ engine.index(workspaceName1, path("/Cars"), 3);
+ verify(sessionWs1).deleteBelow(path("/Cars"));
+ verify(sessionWs1, times(17)).index((Node)anyObject());
+ verify(sessionWs1).commit();
+ }
+
+ @Test
+ public void shouldForwardIndexEntireWorkspaceToIndexSession() throws Exception {
+ loadContent();
+ engine.index(workspaceName1, path("/"), 3);
+ verify(sessionWs1, times(18)).index((Node)anyObject());
+ verify(sessionWs1).commit();
+ }
+
+ @Test
+ public void shouldForwardIndexOfAllWorkspacesToEachIndexSession() throws Exception {
+ loadContent();
+ engine.index(3); // will find all three workspaces
+ verify(sessionWs1, times(18)).index((Node)anyObject());
+ verify(sessionWs1).commit();
+ verify(sessionWs2, times(24)).index((Node)anyObject());
+ verify(sessionWs2).commit();
+ verify(sessionDefault, times(1)).index((Node)anyObject());
+ verify(sessionDefault).commit();
+ }
+
+ @SuppressWarnings( "unchecked" )
+ @Test
+ public void shouldForwardSearchToIndexSession() throws Exception {
+ String query = "term1 term2";
+ engine.fullTextSearch(context, workspaceName1, query, 3, 0);
+ verify(sessionWs1).search(eq(context), eq(query), eq(3), eq(0), (List<Location>)anyObject());
+ verify(sessionWs1).commit();
+ }
+
+ @Test
+ public void shouldForwardQueryToIndexSession() throws Exception {
+ Query query = mock(Query.class);
+ Schemata schemata = mock(Schemata.class);
+ engine.query(context, workspaceName1, query, schemata);
+ verify(sessionWs1).query(eq(new QueryContext(context, schemata)), eq(query));
+ verify(sessionWs1).commit();
+ }
+}
Property changes on: trunk/dna-graph/src/test/java/org/jboss/dna/graph/search/SearchEngineTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfiguration.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfiguration.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfiguration.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -1,59 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.store.Directory;
-
-/**
- * Interface used to obtain the Lucene {@link Directory} instance that should be used for a workspace given the name of the
- * workspace. There are several implementations (see {@link DirectoryConfigurations}), but custom implementations can always be
- * used.
- */
-@ThreadSafe
-public interface DirectoryConfiguration {
- /**
- * Get the {@link Directory} that should be used for the workspace with the supplied name.
- *
- * @param workspaceName the workspace name
- * @param indexName the name of the index to be created
- * @return the directory; never null
- * @throws IllegalArgumentException if the workspace name is null
- * @throws SearchEngineException if there is a problem creating the directory
- */
- Directory getDirectory( String workspaceName,
- String indexName ) throws SearchEngineException;
-
- /**
- * Destroy the {@link Directory} that is used for the workspace with the supplied name.
- *
- * @param workspaceName the workspace name
- * @param indexName the name of the index to be created
- * @return true if the directory existed and was destroyed, or false if the directory didn't exist
- * @throws IllegalArgumentException if the workspace name is null
- * @throws SearchEngineException if there is a problem creating the directory
- */
- boolean destroyDirectory( String workspaceName,
- String indexName ) throws SearchEngineException;
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfigurations.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfigurations.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfigurations.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -1,426 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.concurrent.ConcurrentHashMap;
-import net.jcip.annotations.Immutable;
-import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.LockFactory;
-import org.apache.lucene.store.RAMDirectory;
-import org.jboss.dna.common.i18n.I18n;
-import org.jboss.dna.common.text.NoOpEncoder;
-import org.jboss.dna.common.text.TextEncoder;
-import org.jboss.dna.common.util.CheckArg;
-import org.jboss.dna.common.util.FileUtil;
-import org.jboss.dna.common.util.HashCode;
-
-/**
- * A family of {@link DirectoryConfiguration} implementations.
- */
-public class DirectoryConfigurations {
-
- /**
- * Return a new {@link DirectoryConfiguration} that creates in-memory directories.
- *
- * @return the new directory configuration; never null
- */
- public static final DirectoryConfiguration inMemory() {
- return new RamDirectoryFactory();
- }
-
- /**
- * Return a new {@link DirectoryConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
- * workspace names.
- *
- * @param parent the parent folder
- * @return the new directory configuration; never null
- * @throws IllegalArgumentException if the parent file is null
- */
- public static final DirectoryConfiguration using( File parent ) {
- return new FileSystemDirectoryFromNameFactory(parent);
- }
-
- /**
- * Return a new {@link DirectoryConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
- * workspace names.
- *
- * @param parent the parent folder
- * @param lockFactory the lock factory; may be null
- * @return the new directory configuration; never null
- * @throws IllegalArgumentException if the parent file is null
- */
- public static final DirectoryConfiguration using( File parent,
- LockFactory lockFactory ) {
- return new FileSystemDirectoryFromNameFactory(parent, lockFactory);
- }
-
- /**
- * Return a new {@link DirectoryConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
- * workspace names.
- *
- * @param parent the parent folder
- * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
- * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
- * @return the new directory configuration; never null
- * @throws IllegalArgumentException if the parent file is null
- */
- public static final DirectoryConfiguration using( File parent,
- TextEncoder workspaceNameEncoder,
- TextEncoder indexNameEncoder ) {
- return new FileSystemDirectoryFromNameFactory(parent, workspaceNameEncoder, indexNameEncoder);
- }
-
- /**
- * Return a new {@link DirectoryConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
- * workspace names.
- *
- * @param parent the parent folder
- * @param lockFactory the lock factory; may be null
- * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
- * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
- * @return the new directory configuration; never null
- * @throws IllegalArgumentException if the parent file is null
- */
- public static final DirectoryConfiguration using( File parent,
- LockFactory lockFactory,
- TextEncoder workspaceNameEncoder,
- TextEncoder indexNameEncoder ) {
- return new FileSystemDirectoryFromNameFactory(parent, lockFactory, workspaceNameEncoder, indexNameEncoder);
- }
-
- /**
- * A {@link DirectoryConfiguration} implementation that creates {@link Directory} instances of the supplied type for each
- * workspace and pools the results, ensuring that the same {@link Directory} instance is always returned for the same
- * workspace name.
- *
- * @param <DirectoryType> the concrete type of the directory
- */
- @ThreadSafe
- protected static abstract class PoolingDirectoryFactory<DirectoryType extends Directory> implements DirectoryConfiguration {
- private final ConcurrentHashMap<IndexId, DirectoryType> directories = new ConcurrentHashMap<IndexId, DirectoryType>();
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.DirectoryConfiguration#getDirectory(java.lang.String, java.lang.String)
- */
- public Directory getDirectory( String workspaceName,
- String indexName ) throws SearchEngineException {
- CheckArg.isNotNull(workspaceName, "workspaceName");
- IndexId id = new IndexId(workspaceName, indexName);
- DirectoryType result = directories.get(id);
- if (result == null) {
- DirectoryType newDirectory = createDirectory(workspaceName, indexName);
- result = directories.putIfAbsent(id, newDirectory);
- if (result == null) result = newDirectory;
- }
- return result;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.DirectoryConfiguration#destroyDirectory(java.lang.String, java.lang.String)
- */
- public boolean destroyDirectory( String workspaceName,
- String indexName ) throws SearchEngineException {
- CheckArg.isNotNull(workspaceName, "workspaceName");
- IndexId id = new IndexId(workspaceName, indexName);
- DirectoryType result = directories.remove(id);
- return result != null ? doDestroy(result) : false;
- }
-
- /**
- * Method implemented by subclasses to create a new Directory implementation.
- *
- * @param workspaceName the name of the workspace for which the {@link Directory} is to be created; never null
- * @param indexName the name of the index to be created
- * @return the new directory; may not be null
- * @throws SearchEngineException if there is a problem creating the directory
- */
- protected abstract DirectoryType createDirectory( String workspaceName,
- String indexName ) throws SearchEngineException;
-
- protected abstract boolean doDestroy( DirectoryType directory ) throws SearchEngineException;
- }
-
- /**
- * A {@link DirectoryConfiguration} implementation that creates {@link RAMDirectory} instances for each workspace and index
- * name. Each factory instance maintains a pool of {@link RAMDirectory} instances, ensuring that the same {@link RAMDirectory}
- * is always returned for the same workspace name.
- */
- @ThreadSafe
- public static class RamDirectoryFactory extends PoolingDirectoryFactory<RAMDirectory> {
- protected RamDirectoryFactory() {
- }
-
- @Override
- protected RAMDirectory createDirectory( String workspaceName,
- String indexName ) {
- return new RAMDirectory();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.DirectoryConfigurations.PoolingDirectoryFactory#doDestroy(org.apache.lucene.store.Directory)
- */
- @Override
- protected boolean doDestroy( RAMDirectory directory ) throws SearchEngineException {
- return directory != null;
- }
- }
-
- /**
- * A {@link DirectoryConfiguration} implementation that creates {@link FSDirectory} instances for each workspace and index
- * name. This factory is created with a parent directory under which all workspace and index directories are created.
- * <p>
- * This uses the supplied encoders to translate the workspace and index names into valid directory names. By default, no
- * encoding is performed, meaning that the workspace and index names are used explicitly as directory names. This default
- * behavior, then, means that not all values of workspace names or index names will work. If you want to be sure that all
- * workspace names work, supply an encoder for the workspace names. (Index names are currently such that they will always be
- * valid directory names, but you can always supply an encoder if you'd like.)
- * </p>
- */
- public static class FileSystemDirectoryFromNameFactory extends PoolingDirectoryFactory<FSDirectory> {
- private final File parentFile;
- private final LockFactory lockFactory;
- private final TextEncoder workspaceNameEncoder;
- private final TextEncoder indexNameEncoder;
-
- /**
- * Create a new {@link DirectoryConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
- * allowable workspace names.
- *
- * @param parent the parent folder
- * @throws IllegalArgumentException if the parent file is null
- */
- protected FileSystemDirectoryFromNameFactory( File parent ) {
- this(parent, null, null, null);
- }
-
- /**
- * Create a new {@link DirectoryConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
- * allowable workspace names.
- *
- * @param parent the parent folder
- * @param lockFactory the lock factory; may be null
- * @throws IllegalArgumentException if the parent file is null
- */
- protected FileSystemDirectoryFromNameFactory( File parent,
- LockFactory lockFactory ) {
- this(parent, lockFactory, null, null);
- }
-
- /**
- * Create a new {@link DirectoryConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
- * allowable workspace names.
- *
- * @param parent the parent folder
- * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
- * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
- * @throws IllegalArgumentException if the parent file is null
- */
- protected FileSystemDirectoryFromNameFactory( File parent,
- TextEncoder workspaceNameEncoder,
- TextEncoder indexNameEncoder ) {
- this(parent, null, workspaceNameEncoder, indexNameEncoder);
- }
-
- /**
- * Create a new {@link DirectoryConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
- * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
- * allowable workspace names.
- *
- * @param parent the parent folder
- * @param lockFactory the lock factory; may be null
- * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
- * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
- * @throws IllegalArgumentException if the parent file is null
- */
- protected FileSystemDirectoryFromNameFactory( File parent,
- LockFactory lockFactory,
- TextEncoder workspaceNameEncoder,
- TextEncoder indexNameEncoder ) {
- CheckArg.isNotNull(parent, "parent");
- this.parentFile = parent;
- this.lockFactory = lockFactory;
- this.workspaceNameEncoder = workspaceNameEncoder != null ? workspaceNameEncoder : new NoOpEncoder();
- this.indexNameEncoder = indexNameEncoder != null ? indexNameEncoder : new NoOpEncoder();
- }
-
- @Override
- protected FSDirectory createDirectory( String workspaceName,
- String indexName ) {
- File workspaceFile = new File(parentFile, workspaceNameEncoder.encode(workspaceName));
- if (!workspaceFile.exists()) {
- workspaceFile.mkdirs();
- } else {
- if (!workspaceFile.isDirectory()) {
- I18n msg = SearchI18n.locationForIndexesIsNotDirectory;
- throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
- }
- if (!workspaceFile.canRead()) {
- I18n msg = SearchI18n.locationForIndexesCannotBeRead;
- throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
- }
- if (!workspaceFile.canWrite()) {
- I18n msg = SearchI18n.locationForIndexesCannotBeWritten;
- throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
- }
- }
- File directory = workspaceFile;
- if (indexName != null) {
- File indexFile = new File(workspaceFile, indexNameEncoder.encode(indexName));
- if (!indexFile.exists()) {
- indexFile.mkdirs();
- } else {
- if (!indexFile.isDirectory()) {
- I18n msg = SearchI18n.locationForIndexesIsNotDirectory;
- throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
- }
- if (!indexFile.canRead()) {
- I18n msg = SearchI18n.locationForIndexesCannotBeRead;
- throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
- }
- if (!indexFile.canWrite()) {
- I18n msg = SearchI18n.locationForIndexesCannotBeWritten;
- throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
- }
- }
- directory = indexFile;
- }
- try {
- return create(directory, lockFactory);
- } catch (IOException e) {
- throw new SearchEngineException(e);
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.DirectoryConfigurations.PoolingDirectoryFactory#doDestroy(org.apache.lucene.store.Directory)
- */
- @Override
- protected boolean doDestroy( FSDirectory directory ) throws SearchEngineException {
- File file = directory.getFile();
- if (file.exists()) {
- return FileUtil.delete(file);
- }
- return false;
- }
-
- /**
- * Override this method to define which subclass of {@link FSDirectory} should be created.
- *
- * @param directory the file system directory; never null
- * @param lockFactory the lock factory; may be null
- * @return the {@link FSDirectory} instance
- * @throws IOException if there is a problem creating the FSDirectory instance
- */
- protected FSDirectory create( File directory,
- LockFactory lockFactory ) throws IOException {
- return FSDirectory.open(directory, lockFactory);
- }
- }
-
- @Immutable
- protected static final class IndexId {
- private final String workspaceName;
- private final String indexName;
- private final int hc;
-
- protected IndexId( String workspaceName,
- String indexName ) {
- assert workspaceName != null;
- this.workspaceName = workspaceName;
- this.indexName = indexName;
- this.hc = HashCode.compute(this.workspaceName, this.indexName);
- }
-
- /**
- * @return indexName
- */
- public String getIndexName() {
- return indexName;
- }
-
- /**
- * @return workspaceName
- */
- public String getWorkspaceName() {
- return workspaceName;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see java.lang.Object#hashCode()
- */
- @Override
- public int hashCode() {
- return hc;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see java.lang.Object#equals(java.lang.Object)
- */
- @Override
- public boolean equals( Object obj ) {
- if (obj == this) return true;
- if (obj instanceof IndexId) {
- IndexId that = (IndexId)obj;
- if (this.hashCode() != that.hashCode()) return false;
- if (!this.workspaceName.equals(that.workspaceName)) return false;
- if (!this.indexName.equals(that.indexName)) return false;
- return true;
- }
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- return indexName != null ? workspaceName + "/" + this.indexName : this.workspaceName;
- }
- }
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -1,1580 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.FieldSelectorResult;
-import org.apache.lucene.document.NumericField;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter.MaxFieldLength;
-import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.NumericRangeQuery;
-import org.apache.lucene.search.PrefixQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.WildcardQuery;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.regex.JavaUtilRegexCapabilities;
-import org.apache.lucene.search.regex.RegexQuery;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Version;
-import org.jboss.dna.common.i18n.I18n;
-import org.jboss.dna.common.text.NoOpEncoder;
-import org.jboss.dna.common.text.TextEncoder;
-import org.jboss.dna.common.util.Logger;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Node;
-import org.jboss.dna.graph.property.Binary;
-import org.jboss.dna.graph.property.DateTime;
-import org.jboss.dna.graph.property.DateTimeFactory;
-import org.jboss.dna.graph.property.Name;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.property.PathFactory;
-import org.jboss.dna.graph.property.Property;
-import org.jboss.dna.graph.property.PropertyType;
-import org.jboss.dna.graph.property.ValueFactories;
-import org.jboss.dna.graph.property.ValueFactory;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryEngine;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.QueryResults.Columns;
-import org.jboss.dna.graph.query.model.Length;
-import org.jboss.dna.graph.query.model.NodeDepth;
-import org.jboss.dna.graph.query.model.NodeLocalName;
-import org.jboss.dna.graph.query.model.NodeName;
-import org.jboss.dna.graph.query.model.NodePath;
-import org.jboss.dna.graph.query.model.Operator;
-import org.jboss.dna.graph.query.model.PropertyValue;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.model.Visitors;
-import org.jboss.dna.graph.query.optimize.Optimizer;
-import org.jboss.dna.graph.query.optimize.OptimizerRule;
-import org.jboss.dna.graph.query.optimize.RuleBasedOptimizer;
-import org.jboss.dna.graph.query.plan.CanonicalPlanner;
-import org.jboss.dna.graph.query.plan.PlanHints;
-import org.jboss.dna.graph.query.plan.PlanNode;
-import org.jboss.dna.graph.query.plan.Planner;
-import org.jboss.dna.graph.query.process.ProcessingComponent;
-import org.jboss.dna.graph.query.process.QueryProcessor;
-import org.jboss.dna.graph.request.ChangeRequest;
-import org.jboss.dna.search.IndexRules.Rule;
-import org.jboss.dna.search.query.CompareLengthQuery;
-import org.jboss.dna.search.query.CompareNameQuery;
-import org.jboss.dna.search.query.ComparePathQuery;
-import org.jboss.dna.search.query.CompareStringQuery;
-import org.jboss.dna.search.query.MatchNoneQuery;
-import org.jboss.dna.search.query.NotQuery;
-import org.jboss.dna.search.query.UuidsQuery;
-
-/**
- * A simple {@link IndexLayout} implementation that relies upon two separate indexes: one for the node content and a second one
- * for paths and UUIDs.
- */
-@ThreadSafe
-public abstract class DualIndexLayout implements IndexLayout {
-
- protected static final long MIN_DATE = 0;
- protected static final long MAX_DATE = Long.MAX_VALUE;
- protected static final long MIN_LONG = Long.MIN_VALUE;
- protected static final long MAX_LONG = Long.MAX_VALUE;
- protected static final double MIN_DOUBLE = Double.MIN_VALUE;
- protected static final double MAX_DOUBLE = Double.MAX_VALUE;
- protected static final int MIN_DEPTH = 0;
- protected static final int MAX_DEPTH = 100;
-
- protected static final String PATHS_INDEX_NAME = "paths";
- protected static final String CONTENT_INDEX_NAME = "content";
-
- protected static final String UUID_FIELD = "uuid";
- protected static final String FULL_TEXT_SUFFIX = "/fs"; // the slash character is not allowed in a property name unescaped
-
- static class PathIndex {
- public static final String PATH = "path";
- public static final String LOCAL_NAME = "name";
- public static final String SNS_INDEX = "sns";
- public static final String UUID = UUID_FIELD;
- public static final String DEPTH = "depth";
- }
-
- static class ContentIndex {
- public static final String UUID = UUID_FIELD;
- public static final String FULL_TEXT = "fts";
- }
-
- /**
- * The number of results that should be returned when performing queries while deleting entire branches of content. The
- * current value is {@value} .
- */
- protected static final int SIZE_OF_DELETE_BATCHES = 1000;
-
- private ThreadLocal<DateFormat> dateFormatter = new ThreadLocal<DateFormat>() {
- @Override
- protected DateFormat initialValue() {
- return new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss");
- }
- };
-
- /**
- * Obtain an immutable {@link FieldSelector} instance that accesses the UUID field.
- */
- protected static final FieldSelector UUID_FIELD_SELECTOR = new FieldSelector() {
- private static final long serialVersionUID = 1L;
-
- public FieldSelectorResult accept( String fieldName ) {
- return PathIndex.UUID.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
- }
- };
-
- /**
- * Get the date formatter that can be reused safely within the current thread.
- *
- * @return the date formatter; never null
- */
- protected DateFormat dateFormatter() {
- return dateFormatter.get();
- }
-
- /**
- * Get the text encoder that should be used to encode namespaces in the search index.
- *
- * @return the namespace text encoder; never null
- */
- protected TextEncoder getNamespaceEncoder() {
- return new NoOpEncoder();
- }
-
- /**
- * Create a Lucene {@link Analyzer} analyzer that should be used for indexing and searching.
- *
- * @return the analyzer; never null
- */
- protected Analyzer createAnalyzer() {
- return new StandardAnalyzer(Version.LUCENE_CURRENT);
- }
-
- protected abstract class LuceneSession implements IndexSession {
- protected final ExecutionContext context;
- protected final String sourceName;
- protected final String workspaceName;
- protected final IndexRules rules;
- private final QueryEngine queryEngine;
- private final Analyzer analyzer;
- private final Directory pathsIndexDirectory;
- private final Directory contentIndexDirectory;
- protected final boolean overwrite;
- protected final boolean readOnly;
- protected final ValueFactory<String> stringFactory;
- protected final DateTimeFactory dateFactory;
- protected final PathFactory pathFactory;
- private int changeCount;
- private IndexReader pathsReader;
- private IndexWriter pathsWriter;
- private IndexSearcher pathsSearcher;
- private IndexReader contentReader;
- private IndexWriter contentWriter;
- private IndexSearcher contentSearcher;
-
- protected LuceneSession( ExecutionContext context,
- String sourceName,
- String workspaceName,
- IndexRules rules,
- Directory pathsIndexDirectory,
- Directory contentIndexDirectory,
- boolean overwrite,
- boolean readOnly ) {
- this.context = context;
- this.sourceName = sourceName;
- this.workspaceName = workspaceName;
- this.rules = rules;
- this.overwrite = overwrite;
- this.readOnly = readOnly;
- this.pathsIndexDirectory = pathsIndexDirectory;
- this.contentIndexDirectory = contentIndexDirectory;
- this.analyzer = createAnalyzer();
- this.stringFactory = context.getValueFactories().getStringFactory();
- this.dateFactory = context.getValueFactories().getDateFactory();
- this.pathFactory = context.getValueFactories().getPathFactory();
- assert this.context != null;
- assert this.sourceName != null;
- assert this.workspaceName != null;
- assert this.rules != null;
- assert this.analyzer != null;
- assert this.pathsIndexDirectory != null;
- assert this.contentIndexDirectory != null;
- assert this.stringFactory != null;
- assert this.dateFactory != null;
- // do this last ...
- this.queryEngine = createQueryProcessor();
- assert this.queryEngine != null;
- }
-
- /**
- * Create the field name that will be used to store the full-text searchable property values.
- *
- * @param propertyName the name of the property; may not be null
- * @return the field name for the full-text searchable property values; never null
- */
- protected String fullTextFieldName( String propertyName ) {
- return propertyName + FULL_TEXT_SUFFIX;
- }
-
- protected IndexReader getPathsReader() throws IOException {
- if (pathsReader == null) {
- pathsReader = IndexReader.open(pathsIndexDirectory, readOnly);
- }
- return pathsReader;
- }
-
- protected IndexReader getContentReader() throws IOException {
- if (contentReader == null) {
- contentReader = IndexReader.open(contentIndexDirectory, readOnly);
- }
- return contentReader;
- }
-
- protected IndexWriter getPathsWriter() throws IOException {
- assert !readOnly;
- if (pathsWriter == null) {
- pathsWriter = new IndexWriter(pathsIndexDirectory, analyzer, overwrite, MaxFieldLength.UNLIMITED);
- }
- return pathsWriter;
- }
-
- protected IndexWriter getContentWriter() throws IOException {
- assert !readOnly;
- if (contentWriter == null) {
- contentWriter = new IndexWriter(contentIndexDirectory, analyzer, overwrite, MaxFieldLength.UNLIMITED);
- }
- return contentWriter;
- }
-
- protected IndexSearcher getPathsSearcher() throws IOException {
- if (pathsSearcher == null) {
- pathsSearcher = new IndexSearcher(getPathsReader());
- }
- return pathsSearcher;
- }
-
- protected IndexSearcher getContentSearcher() throws IOException {
- if (contentSearcher == null) {
- contentSearcher = new IndexSearcher(getContentReader());
- }
- return contentSearcher;
- }
-
- protected boolean hasWriters() {
- return pathsWriter != null || contentWriter != null;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexSession#getContext()
- */
- public final ExecutionContext getContext() {
- return context;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexSession#getSourceName()
- */
- public final String getSourceName() {
- return sourceName;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexSession#getWorkspaceName()
- */
- public String getWorkspaceName() {
- return workspaceName;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexSession#hasChanges()
- */
- public boolean hasChanges() {
- return changeCount > 0;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexSession#index(org.jboss.dna.graph.Node)
- */
- public void index( Node node ) throws IOException {
- assert !readOnly;
- Location location = node.getLocation();
- UUID uuid = location.getUuid();
- if (uuid == null) uuid = UUID.randomUUID();
- Path path = location.getPath();
- String uuidStr = stringFactory.create(uuid);
- String pathStr = pathAsString(path, stringFactory);
- String nameStr = path.isRoot() ? "" : stringFactory.create(path.getLastSegment().getName());
- int sns = path.isRoot() ? 1 : path.getLastSegment().getIndex();
-
- Logger logger = Logger.getLogger(getClass());
- if (logger.isTraceEnabled()) {
- logger.trace("indexing {0}", pathStr);
- }
-
- // Create a separate document for the path, which makes it easier to handle moves since the path can
- // be changed without changing any other content fields ...
- Document doc = new Document();
- doc.add(new Field(PathIndex.PATH, pathStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field(PathIndex.LOCAL_NAME, nameStr, Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new NumericField(PathIndex.LOCAL_NAME, Field.Store.YES, true).setIntValue(sns));
- doc.add(new Field(PathIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new NumericField(PathIndex.DEPTH, Field.Store.YES, true).setIntValue(path.size()));
- getPathsWriter().addDocument(doc);
-
- // Create the document for the content (properties) ...
- doc = new Document();
- doc.add(new Field(ContentIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
- String stringValue = null;
- StringBuilder fullTextSearchValue = null;
- for (Property property : node.getProperties()) {
- Name name = property.getName();
- Rule rule = rules.getRule(name);
- if (rule.isSkipped()) continue;
- String nameString = stringFactory.create(name);
- if (rule.isDate()) {
- for (Object value : property) {
- if (value == null) continue;
- DateTime dateValue = dateFactory.create(value);
- // Add a separate field for each property value ...
- doc.add(new NumericField(nameString, rule.getStoreOption(), true).setLongValue(dateValue.getMillisecondsInUtc()));
- // Dates are not added to the full-text search field (since this wouldn't make sense)
- }
- continue;
- }
- for (Object value : property) {
- if (value == null) continue;
- if (value instanceof Binary) {
- // don't include binary values as individual fields but do include them in the full-text search ...
- // TODO : add to full-text search ...
- continue;
- }
- stringValue = stringFactory.create(value);
- // Add a separate field for each property value ...
- doc.add(new Field(nameString, stringValue, rule.getStoreOption(), rule.getIndexOption()));
-
- if (rule.isFullText()) {
- // Add this text to the full-text field ...
- if (fullTextSearchValue == null) {
- fullTextSearchValue = new StringBuilder();
- } else {
- fullTextSearchValue.append(' ');
- }
- fullTextSearchValue.append(stringValue);
-
- // Also create a full-text-searchable field ...
- String fullTextNameString = fullTextFieldName(nameString);
- doc.add(new Field(fullTextNameString, stringValue, Store.NO, Index.ANALYZED));
- }
- }
- }
- // Add the full-text-search field ...
- if (fullTextSearchValue != null) {
- doc.add(new Field(ContentIndex.FULL_TEXT, fullTextSearchValue.toString(), Field.Store.NO, Field.Index.ANALYZED));
- }
- getContentWriter().addDocument(doc);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexSession#optimize()
- */
- public void optimize() throws IOException {
- getContentWriter().optimize();
- getPathsWriter().optimize();
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexSession#apply(java.lang.Iterable)
- */
- public int apply( Iterable<ChangeRequest> changes ) /*throws IOException*/{
- for (ChangeRequest change : changes) {
- if (change != null) continue;
- }
- return 0;
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * Because this strategy uses multiple indexes, and since there's no correlation between the documents in those indexes,
- * we need to perform the delete in multiple steps. First, we need to perform a query to find out which nodes exist below
- * a certain path. Then, we need to delete those nodes from the paths index. Finally, we need to delete the corresponding
- * documents in the content index that represent those same nodes.
- * </p>
- * <p>
- * Since we don't know how many documents there will be, we perform these steps in batches, where each batch limits the
- * number of results to a maximum number. We repeat batches as long as we find more results. This approach has the
- * advantage that we'll never bring in a large number of results, and it allows us to delete the documents from the
- * content node using a query.
- * </p>
- *
- * @see org.jboss.dna.search.IndexSession#deleteBelow(org.jboss.dna.graph.property.Path)
- */
- public int deleteBelow( Path path ) throws IOException {
- assert !readOnly;
- // Perform a query using the reader to find those nodes at/below the path ...
- try {
- IndexReader pathReader = getPathsReader();
- IndexSearcher pathSearcher = new IndexSearcher(pathReader);
- String pathStr = stringFactory.create(path) + "/";
- PrefixQuery query = new PrefixQuery(new Term(PathIndex.PATH, pathStr));
- int numberDeleted = 0;
- while (true) {
- // Execute the query and get the results ...
- TopDocs results = pathSearcher.search(query, SIZE_OF_DELETE_BATCHES);
- int numResultsInBatch = results.scoreDocs.length;
- // Walk the results, delete the doc, and add to the query that we'll use against the content index ...
- IndexReader contentReader = getContentReader();
- for (ScoreDoc result : results.scoreDocs) {
- int docId = result.doc;
- // Find the UUID of the node ...
- Document doc = pathReader.document(docId, UUID_FIELD_SELECTOR);
- String uuid = doc.get(PathIndex.UUID);
- // Delete the document from the paths index ...
- pathReader.deleteDocument(docId);
- // Delete the corresponding document from the content index ...
- contentReader.deleteDocuments(new Term(ContentIndex.UUID, uuid));
- }
- numberDeleted += numResultsInBatch;
- if (numResultsInBatch < SIZE_OF_DELETE_BATCHES) break;
- }
- return numberDeleted;
- } catch (FileNotFoundException e) {
- // There are no index files yet, so nothing to delete ...
- return 0;
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexSession#search(org.jboss.dna.graph.ExecutionContext, java.lang.String, int, int,
- * java.util.List)
- */
- public void search( ExecutionContext context,
- String fullTextString,
- int maxResults,
- int offset,
- List<Location> results ) throws IOException, ParseException {
- assert fullTextString != null;
- assert fullTextString.length() > 0;
- assert offset >= 0;
- assert maxResults > 0;
- assert results != null;
-
- // Parse the full-text search and search against the 'fts' field ...
- QueryParser parser = new QueryParser(ContentIndex.FULL_TEXT, createAnalyzer());
- Query query = parser.parse(fullTextString);
- TopDocs docs = getContentSearcher().search(query, maxResults + offset);
-
- // Collect the results ...
- IndexReader contentReader = getContentReader();
- IndexReader pathReader = getPathsReader();
- IndexSearcher pathSearcher = getPathsSearcher();
- ScoreDoc[] scoreDocs = docs.scoreDocs;
- int numberOfResults = scoreDocs.length;
- if (numberOfResults > offset) {
- // There are enough results to satisfy the offset ...
- PathFactory pathFactory = context.getValueFactories().getPathFactory();
- for (int i = offset, num = scoreDocs.length; i != num; ++i) {
- ScoreDoc result = scoreDocs[i];
- int docId = result.doc;
- // Find the UUID of the node (this UUID might be artificial, so we have to find the path) ...
- Document doc = contentReader.document(docId, UUID_FIELD_SELECTOR);
- String uuid = doc.get(ContentIndex.UUID);
- // Find the path for this node (is there a better way to do this than one search per UUID?) ...
- TopDocs pathDocs = pathSearcher.search(new TermQuery(new Term(PathIndex.UUID, uuid)), 1);
- if (pathDocs.scoreDocs.length < 1) {
- // No path record found ...
- continue;
- }
- Document pathDoc = pathReader.document(pathDocs.scoreDocs[0].doc);
- String pathString = pathDoc.get(PathIndex.PATH);
- Path path = pathFactory.create(pathString);
- // Now add the location ...
- results.add(Location.create(path, UUID.fromString(uuid)));
- }
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexSession#query(org.jboss.dna.graph.query.QueryContext,
- * org.jboss.dna.graph.query.model.QueryCommand)
- */
- public QueryResults query( QueryContext queryContext,
- QueryCommand query ) {
- return queryEngine.execute(queryContext, query);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexSession#commit()
- */
- public void commit() throws IOException {
- IOException ioError = null;
- RuntimeException runtimeError = null;
- if (pathsReader != null) {
- try {
- pathsReader.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- pathsReader = null;
- }
- }
- if (contentReader != null) {
- try {
- contentReader.close();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- contentReader = null;
- }
- }
- if (pathsWriter != null) {
- try {
- pathsWriter.commit();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- try {
- pathsWriter.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- pathsWriter = null;
- }
- }
- }
- if (contentWriter != null) {
- try {
- contentWriter.commit();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- try {
- contentWriter.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- contentWriter = null;
- }
- }
- }
- if (ioError != null) throw ioError;
- if (runtimeError != null) throw runtimeError;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexSession#rollback()
- */
- public void rollback() throws IOException {
- IOException ioError = null;
- RuntimeException runtimeError = null;
- if (pathsReader != null) {
- try {
- pathsReader.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- pathsReader = null;
- }
- }
- if (contentReader != null) {
- try {
- contentReader.close();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- contentReader = null;
- }
- }
- if (pathsWriter != null) {
- try {
- pathsWriter.rollback();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- try {
- pathsWriter.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- pathsWriter = null;
- }
- }
- }
- if (contentWriter != null) {
- try {
- contentWriter.rollback();
- } catch (IOException e) {
- if (ioError == null) ioError = e;
- } catch (RuntimeException e) {
- if (runtimeError == null) runtimeError = e;
- } finally {
- try {
- contentWriter.close();
- } catch (IOException e) {
- ioError = e;
- } catch (RuntimeException e) {
- runtimeError = e;
- } finally {
- contentWriter = null;
- }
- }
- }
- if (ioError != null) throw ioError;
- if (runtimeError != null) throw runtimeError;
- }
-
- protected QueryEngine createQueryProcessor() {
- // Create the query engine ...
- Planner planner = new CanonicalPlanner();
- Optimizer optimizer = new RuleBasedOptimizer() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.optimize.RuleBasedOptimizer#populateRuleStack(java.util.LinkedList,
- * org.jboss.dna.graph.query.plan.PlanHints)
- */
- @Override
- protected void populateRuleStack( LinkedList<OptimizerRule> ruleStack,
- PlanHints hints ) {
- super.populateRuleStack(ruleStack, hints);
- // Add any custom rules here, either at the front of the stack or at the end
- }
- };
- QueryProcessor processor = new QueryProcessor() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.process.QueryProcessor#createAccessComponent(org.jboss.dna.graph.query.model.QueryCommand,
- * org.jboss.dna.graph.query.QueryContext, org.jboss.dna.graph.query.plan.PlanNode,
- * org.jboss.dna.graph.query.QueryResults.Columns,
- * org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
- */
- @Override
- protected ProcessingComponent createAccessComponent( QueryCommand originalQuery,
- QueryContext context,
- PlanNode accessNode,
- Columns resultColumns,
- org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer ) {
- try {
- return LuceneSession.this.createAccessComponent(originalQuery,
- context,
- accessNode,
- resultColumns,
- analyzer);
- } catch (IOException e) {
- I18n msg = SearchI18n.errorWhilePerformingQuery;
- context.getProblems().addError(e,
- msg,
- Visitors.readable(originalQuery),
- getWorkspaceName(),
- getSourceName(),
- e.getMessage());
- return null;
- }
- }
- };
-
- return new QueryEngine(planner, optimizer, processor);
- }
-
- protected abstract ProcessingComponent createAccessComponent( QueryCommand originalQuery,
- QueryContext context,
- PlanNode accessNode,
- Columns resultColumns,
- org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer )
- throws IOException;
-
- /**
- * Get the set of UUIDs for the children of the node at the given path.
- *
- * @param parentPath the path to the parent node; may not be null
- * @return the UUIDs of the child nodes; never null but possibly empty
- * @throws IOException if there is an error accessing the indexes
- */
- protected Set<UUID> getUuidsForChildrenOf( Path parentPath ) throws IOException {
- // Find the path of the parent ...
- String stringifiedPath = pathAsString(parentPath, stringFactory);
- // Append a '/' to the parent path, so we'll only get decendants ...
- stringifiedPath = stringifiedPath + '/';
-
- // Create a query to find all the nodes below the parent path ...
- Query query = new PrefixQuery(new Term(PathIndex.PATH, stringifiedPath));
- // Include only the children ...
- int childrenDepth = parentPath.size() + 1;
- Query depthQuery = NumericRangeQuery.newIntRange(PathIndex.DEPTH, childrenDepth, childrenDepth, true, true);
- // And combine ...
- BooleanQuery combinedQuery = new BooleanQuery();
- combinedQuery.add(query, Occur.MUST);
- combinedQuery.add(depthQuery, Occur.MUST);
- query = combinedQuery;
-
- // Now execute and collect the UUIDs ...
- UuidCollector uuidCollector = new UuidCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, uuidCollector);
- return uuidCollector.getUuids();
- }
-
- /**
- * Get the set of UUIDs for the nodes that are descendants of the node at the given path.
- *
- * @param parentPath the path to the parent node; may not be null and <i>may not be the root node</i>
- * @param includeParent true if the parent node should be included in the results, or false if only the descendants should
- * be included
- * @return the UUIDs of the nodes; never null but possibly empty
- * @throws IOException if there is an error accessing the indexes
- */
- protected Set<UUID> getUuidsForDescendantsOf( Path parentPath,
- boolean includeParent ) throws IOException {
- assert !parentPath.isRoot();
-
- // Find the path of the parent ...
- String stringifiedPath = pathAsString(parentPath, stringFactory);
- if (!includeParent) {
- // Append a '/' to the parent path, and we'll only get decendants ...
- stringifiedPath = stringifiedPath + '/';
- }
-
- // Create a prefix query ...
- Query query = new PrefixQuery(new Term(PathIndex.PATH, stringifiedPath));
-
- // Now execute and collect the UUIDs ...
- UuidCollector uuidCollector = new UuidCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, uuidCollector);
- return uuidCollector.getUuids();
- }
-
- /**
- * Get the set containing the single UUID for the node at the given path.
- *
- * @param path the path to the node; may not be null
- * @return the UUID of the supplied node; or null if the node cannot be found
- * @throws IOException if there is an error accessing the indexes
- */
- protected UUID getUuidFor( Path path ) throws IOException {
- // Create a query to find all the nodes below the parent path ...
- IndexSearcher searcher = getPathsSearcher();
- String stringifiedPath = pathAsString(path, stringFactory);
- TermQuery query = new TermQuery(new Term(PathIndex.PATH, stringifiedPath));
-
- // Now execute and collect the UUIDs ...
- TopDocs topDocs = searcher.search(query, 1);
- if (topDocs.totalHits == 0) return null;
- Document pathDoc = getPathsReader().document(topDocs.scoreDocs[0].doc);
- String uuidString = pathDoc.get(PathIndex.UUID);
- return UUID.fromString(uuidString);
- }
-
- /**
- * Utility method to create a query to find all of the documents representing nodes with the supplied UUIDs.
- *
- * @param uuids the UUIDs of the nodes that are to be found; may not be null
- * @return the query; never null
- */
- protected Query findAllNodesWithUuids( Set<UUID> uuids ) {
- if (uuids.isEmpty()) {
- // There are no children, so return a null query ...
- return new MatchNoneQuery();
- }
- if (uuids.size() == 1) {
- UUID uuid = uuids.iterator().next();
- if (uuid == null) return new MatchNoneQuery();
- return new TermQuery(new Term(ContentIndex.UUID, uuid.toString()));
- }
- if (uuids.size() < 50) {
- // Create an OR boolean query for all the UUIDs, since this is probably more efficient ...
- BooleanQuery query = new BooleanQuery();
- for (UUID uuid : uuids) {
- Query uuidQuery = new TermQuery(new Term(ContentIndex.UUID, uuid.toString()));
- query.add(uuidQuery, Occur.SHOULD);
- }
- return query;
- }
- // Returna query that will always find all of the UUIDs ...
- return new UuidsQuery(ContentIndex.UUID, uuids, getContext().getValueFactories().getUuidFactory());
- }
-
- protected Query findAllNodesBelow( Path ancestorPath ) throws IOException {
- if (ancestorPath.isRoot()) {
- return new MatchAllDocsQuery();
- }
- Set<UUID> uuids = getUuidsForDescendantsOf(ancestorPath, false);
- return findAllNodesWithUuids(uuids);
- }
-
- /**
- * Return a query that can be used to find all of the documents that represent nodes that are children of the node at the
- * supplied path.
- *
- * @param parentPath the path of the parent node.
- * @return the query; never null
- * @throws IOException if there is an error finding the UUIDs of the child nodes
- */
- protected Query findChildNodes( Path parentPath ) throws IOException {
- if (parentPath.isRoot()) {
- return new MatchAllDocsQuery();
- }
- Set<UUID> childUuids = getUuidsForChildrenOf(parentPath);
- return findAllNodesWithUuids(childUuids);
- }
-
- /**
- * Create a query that can be used to find the one document (or node) that exists at the exact path supplied. This method
- * first queries the {@link PathIndex path index} to find the UUID of the node at the supplied path, and then returns a
- * query that matches the UUID.
- *
- * @param path the path of the node
- * @return the query; never null
- * @throws IOException if there is an error finding the UUID for the supplied path
- */
- protected Query findNodeAt( Path path ) throws IOException {
- UUID uuid = getUuidFor(path);
- if (uuid == null) return null;
- return new TermQuery(new Term(ContentIndex.UUID, uuid.toString()));
- }
-
- /**
- * Create a query that can be used to find documents (or nodes) that have a field value that satisfies the supplied LIKE
- * expression.
- *
- * @param fieldName the name of the document field to search
- * @param likeExpression the JCR like expression
- * @return the query; never null
- */
- protected Query findNodesLike( String fieldName,
- String likeExpression ) {
- assert likeExpression != null;
- assert likeExpression.length() > 0;
-
- // '%' matches 0 or more characters
- // '_' matches any single character
- // '\x' matches 'x'
- // all other characters match themselves
-
- // Wildcard queries are a better match, but they can be slow and should not be used
- // if the first character of the expression is a '%' or '_' ...
- char firstChar = likeExpression.charAt(0);
- if (firstChar != '%' && firstChar != '_') {
- // Create a wildcard query ...
- String expression = toWildcardExpression(likeExpression);
- return new WildcardQuery(new Term(fieldName, expression));
- }
- // Create a regex query,
- String regex = toRegularExpression(likeExpression);
- RegexQuery query = new RegexQuery(new Term(fieldName, regex));
- query.setRegexImplementation(new JavaUtilRegexCapabilities());
- return query;
- }
-
- protected Query findNodesWith( Length propertyLength,
- Operator operator,
- Object value ) {
- assert propertyLength != null;
- assert value != null;
- PropertyValue propertyValue = propertyLength.getPropertyValue();
- String field = stringFactory.create(propertyValue.getPropertyName());
- ValueFactories factories = context.getValueFactories();
- int length = factories.getLongFactory().create(value).intValue();
- switch (operator) {
- case EQUAL_TO:
- return CompareLengthQuery.createQueryForNodesWithFieldEqualTo(length, field, factories);
- case NOT_EQUAL_TO:
- return CompareLengthQuery.createQueryForNodesWithFieldNotEqualTo(length, field, factories);
- case GREATER_THAN:
- return CompareLengthQuery.createQueryForNodesWithFieldGreaterThan(length, field, factories);
- case GREATER_THAN_OR_EQUAL_TO:
- return CompareLengthQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(length, field, factories);
- case LESS_THAN:
- return CompareLengthQuery.createQueryForNodesWithFieldLessThan(length, field, factories);
- case LESS_THAN_OR_EQUAL_TO:
- return CompareLengthQuery.createQueryForNodesWithFieldLessThanOrEqualTo(length, field, factories);
- case LIKE:
- // This is not allowed ...
- assert false;
- break;
- }
- return null;
- }
-
- protected Query findNodesWith( PropertyValue propertyValue,
- Operator operator,
- Object value,
- boolean caseSensitive ) {
- String field = stringFactory.create(propertyValue.getPropertyName());
- PropertyType valueType = PropertyType.discoverType(value);
- ValueFactories factories = context.getValueFactories();
- switch (valueType) {
- case NAME:
- case PATH:
- case REFERENCE:
- case URI:
- case UUID:
- case STRING:
- String stringValue = stringFactory.create(value);
- if (valueType == PropertyType.PATH) {
- stringValue = pathAsString(pathFactory.create(value), stringFactory);
- }
- if (!caseSensitive) stringValue = stringValue.toLowerCase();
- switch (operator) {
- case EQUAL_TO:
- return new TermQuery(new Term(field, stringValue));
- case NOT_EQUAL_TO:
- Query query = new TermQuery(new Term(field, stringValue));
- return new NotQuery(query);
- case GREATER_THAN:
- return CompareStringQuery.createQueryForNodesWithFieldGreaterThan(stringValue,
- field,
- factories,
- caseSensitive);
- case GREATER_THAN_OR_EQUAL_TO:
- return CompareStringQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(stringValue,
- field,
- factories,
- caseSensitive);
- case LESS_THAN:
- return CompareStringQuery.createQueryForNodesWithFieldLessThan(stringValue,
- field,
- factories,
- caseSensitive);
- case LESS_THAN_OR_EQUAL_TO:
- return CompareStringQuery.createQueryForNodesWithFieldLessThanOrEqualTo(stringValue,
- field,
- factories,
- caseSensitive);
- case LIKE:
- return findNodesLike(field, stringValue);
- }
- break;
- case DATE:
- long date = factories.getLongFactory().create(value);
- switch (operator) {
- case EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, date, date, true, true);
- case NOT_EQUAL_TO:
- Query query = NumericRangeQuery.newLongRange(field, date, date, true, true);
- return new NotQuery(query);
- case GREATER_THAN:
- return NumericRangeQuery.newLongRange(field, date, MAX_DATE, false, true);
- case GREATER_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, date, MAX_DATE, true, true);
- case LESS_THAN:
- return NumericRangeQuery.newLongRange(field, MIN_DATE, date, true, false);
- case LESS_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, MIN_DATE, date, true, true);
- case LIKE:
- // This is not allowed ...
- assert false;
- return null;
- }
- break;
- case LONG:
- long longValue = factories.getLongFactory().create(value);
- switch (operator) {
- case EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, longValue, longValue, true, true);
- case NOT_EQUAL_TO:
- Query query = NumericRangeQuery.newLongRange(field, longValue, longValue, true, true);
- return new NotQuery(query);
- case GREATER_THAN:
- return NumericRangeQuery.newLongRange(field, longValue, MAX_LONG, false, true);
- case GREATER_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, longValue, MAX_LONG, true, true);
- case LESS_THAN:
- return NumericRangeQuery.newLongRange(field, MIN_LONG, longValue, true, false);
- case LESS_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newLongRange(field, MIN_LONG, longValue, true, true);
- case LIKE:
- // This is not allowed ...
- assert false;
- return null;
- }
- break;
- case DECIMAL:
- case DOUBLE:
- double doubleValue = factories.getDoubleFactory().create(value);
- switch (operator) {
- case EQUAL_TO:
- return NumericRangeQuery.newDoubleRange(field, doubleValue, doubleValue, true, true);
- case NOT_EQUAL_TO:
- Query query = NumericRangeQuery.newDoubleRange(field, doubleValue, doubleValue, true, true);
- return new NotQuery(query);
- case GREATER_THAN:
- return NumericRangeQuery.newDoubleRange(field, doubleValue, MAX_DOUBLE, false, true);
- case GREATER_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newDoubleRange(field, doubleValue, MAX_DOUBLE, true, true);
- case LESS_THAN:
- return NumericRangeQuery.newDoubleRange(field, MIN_DOUBLE, doubleValue, true, false);
- case LESS_THAN_OR_EQUAL_TO:
- return NumericRangeQuery.newDoubleRange(field, MIN_DOUBLE, doubleValue, true, true);
- case LIKE:
- // This is not allowed ...
- assert false;
- return null;
- }
- break;
- case BOOLEAN:
- boolean booleanValue = factories.getBooleanFactory().create(value);
- stringValue = stringFactory.create(value);
- switch (operator) {
- case EQUAL_TO:
- return new TermQuery(new Term(field, stringValue));
- case NOT_EQUAL_TO:
- return new TermQuery(new Term(field, stringFactory.create(!booleanValue)));
- case GREATER_THAN:
- if (!booleanValue) {
- return new TermQuery(new Term(field, stringFactory.create(true)));
- }
- // Can't be greater than 'true', per JCR spec
- return new MatchNoneQuery();
- case GREATER_THAN_OR_EQUAL_TO:
- return new TermQuery(new Term(field, stringFactory.create(true)));
- case LESS_THAN:
- if (booleanValue) {
- return new TermQuery(new Term(field, stringFactory.create(false)));
- }
- // Can't be less than 'false', per JCR spec
- return new MatchNoneQuery();
- case LESS_THAN_OR_EQUAL_TO:
- return new TermQuery(new Term(field, stringFactory.create(false)));
- case LIKE:
- // This is not allowed ...
- assert false;
- return null;
- }
- break;
- case OBJECT:
- case BINARY:
- // This is not allowed ...
- assert false;
- return null;
- }
- return null;
- }
-
- protected Query findNodesWithNumericRange( PropertyValue propertyValue,
- Object lowerValue,
- Object upperValue,
- boolean includesLower,
- boolean includesUpper ) {
- String field = stringFactory.create(propertyValue.getPropertyName());
- return findNodesWithNumericRange(field, lowerValue, upperValue, includesLower, includesUpper);
- }
-
- protected Query findNodesWithNumericRange( NodeDepth depth,
- Object lowerValue,
- Object upperValue,
- boolean includesLower,
- boolean includesUpper ) {
- return findNodesWithNumericRange(PathIndex.DEPTH, lowerValue, upperValue, includesLower, includesUpper);
- }
-
- protected Query findNodesWithNumericRange( String field,
- Object lowerValue,
- Object upperValue,
- boolean includesLower,
- boolean includesUpper ) {
- PropertyType type = PropertyType.discoverType(lowerValue);
- assert type == PropertyType.discoverType(upperValue);
- ValueFactories factories = context.getValueFactories();
- switch (type) {
- case DATE:
- long lowerDate = factories.getLongFactory().create(lowerValue);
- long upperDate = factories.getLongFactory().create(upperValue);
- return NumericRangeQuery.newLongRange(field, lowerDate, upperDate, includesLower, includesUpper);
- case LONG:
- long lowerLong = factories.getLongFactory().create(lowerValue);
- long upperLong = factories.getLongFactory().create(upperValue);
- return NumericRangeQuery.newLongRange(field, lowerLong, upperLong, includesLower, includesUpper);
- case DECIMAL:
- case DOUBLE:
- double lowerDouble = factories.getDoubleFactory().create(lowerValue);
- double upperDouble = factories.getDoubleFactory().create(upperValue);
- return NumericRangeQuery.newDoubleRange(field, lowerDouble, upperDouble, includesLower, includesUpper);
- default:
- // This is not allowed ...
- assert false;
- return null;
- }
- }
-
- protected Query findNodesWith( NodePath nodePath,
- Operator operator,
- Object value,
- boolean caseSensitive ) throws IOException {
- if (!caseSensitive) value = stringFactory.create(value).toLowerCase();
- Path pathValue = operator != Operator.LIKE ? pathFactory.create(value) : null;
- Query query = null;
- switch (operator) {
- case EQUAL_TO:
- return findNodeAt(pathValue);
- case NOT_EQUAL_TO:
- return new NotQuery(findNodeAt(pathValue));
- case LIKE:
- String likeExpression = stringFactory.create(value);
- return findNodesLike(PathIndex.PATH, likeExpression);
- case GREATER_THAN:
- query = ComparePathQuery.createQueryForNodesWithPathGreaterThan(pathValue,
- PathIndex.PATH,
- context.getValueFactories(),
- caseSensitive);
- break;
- case GREATER_THAN_OR_EQUAL_TO:
- query = ComparePathQuery.createQueryForNodesWithPathGreaterThanOrEqualTo(pathValue,
- PathIndex.PATH,
- context.getValueFactories(),
- caseSensitive);
- break;
- case LESS_THAN:
- query = ComparePathQuery.createQueryForNodesWithPathLessThan(pathValue,
- PathIndex.PATH,
- context.getValueFactories(),
- caseSensitive);
- break;
- case LESS_THAN_OR_EQUAL_TO:
- query = ComparePathQuery.createQueryForNodesWithPathLessThanOrEqualTo(pathValue,
- PathIndex.PATH,
- context.getValueFactories(),
- caseSensitive);
- break;
- }
- // Now execute and collect the UUIDs ...
- UuidCollector uuidCollector = new UuidCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, uuidCollector);
- return findAllNodesWithUuids(uuidCollector.getUuids());
- }
-
- protected Query findNodesWith( NodeName nodeName,
- Operator operator,
- Object value,
- boolean caseSensitive ) throws IOException {
- String stringValue = stringFactory.create(value);
- if (!caseSensitive) stringValue = stringValue.toLowerCase();
- Path.Segment segment = operator != Operator.LIKE ? pathFactory.createSegment(stringValue) : null;
- int snsIndex = operator != Operator.LIKE ? segment.getIndex() : 0;
- Query query = null;
- switch (operator) {
- case EQUAL_TO:
- BooleanQuery booleanQuery = new BooleanQuery();
- booleanQuery.add(new TermQuery(new Term(PathIndex.LOCAL_NAME, stringValue)), Occur.MUST);
- booleanQuery.add(NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, snsIndex, snsIndex, true, false),
- Occur.MUST);
- return booleanQuery;
- case NOT_EQUAL_TO:
- booleanQuery = new BooleanQuery();
- booleanQuery.add(new TermQuery(new Term(PathIndex.LOCAL_NAME, stringValue)), Occur.MUST);
- booleanQuery.add(NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, snsIndex, snsIndex, true, false),
- Occur.MUST);
- return new NotQuery(booleanQuery);
- case GREATER_THAN:
- query = CompareNameQuery.createQueryForNodesWithNameGreaterThan(segment,
- PathIndex.LOCAL_NAME,
- PathIndex.SNS_INDEX,
- context.getValueFactories(),
- caseSensitive);
- break;
- case GREATER_THAN_OR_EQUAL_TO:
- query = CompareNameQuery.createQueryForNodesWithNameGreaterThanOrEqualTo(segment,
- PathIndex.LOCAL_NAME,
- PathIndex.SNS_INDEX,
- context.getValueFactories(),
- caseSensitive);
- break;
- case LESS_THAN:
- query = CompareNameQuery.createQueryForNodesWithNameLessThan(segment,
- PathIndex.LOCAL_NAME,
- PathIndex.SNS_INDEX,
- context.getValueFactories(),
- caseSensitive);
- break;
- case LESS_THAN_OR_EQUAL_TO:
- query = CompareNameQuery.createQueryForNodesWithNameLessThanOrEqualTo(segment,
- PathIndex.LOCAL_NAME,
- PathIndex.SNS_INDEX,
- context.getValueFactories(),
- caseSensitive);
- break;
- case LIKE:
- // See whether the like expression has brackets ...
- String likeExpression = stringValue;
- int openBracketIndex = likeExpression.indexOf('[');
- if (openBracketIndex != -1) {
- String localNameExpression = likeExpression.substring(0, openBracketIndex);
- String snsIndexExpression = likeExpression.substring(openBracketIndex);
- Query localNameQuery = createLocalNameQuery(localNameExpression);
- Query snsQuery = createSnsIndexQuery(snsIndexExpression);
- if (localNameQuery == null) {
- if (snsQuery == null) {
- query = new MatchNoneQuery();
- } else {
- // There is just an SNS part ...
- query = snsQuery;
- }
- } else {
- // There is a local name part ...
- if (snsQuery == null) {
- query = localNameQuery;
- } else {
- // There is both a local name part and a SNS part ...
- booleanQuery = new BooleanQuery();
- booleanQuery.add(localNameQuery, Occur.MUST);
- booleanQuery.add(snsQuery, Occur.MUST);
- query = booleanQuery;
- }
- }
- } else {
- // There is no SNS expression ...
- query = createLocalNameQuery(likeExpression);
- }
- assert query != null;
- break;
- }
-
- // Now execute and collect the UUIDs ...
- UuidCollector uuidCollector = new UuidCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, uuidCollector);
- return findAllNodesWithUuids(uuidCollector.getUuids());
- }
-
- protected Query findNodesWith( NodeLocalName nodeName,
- Operator operator,
- Object value,
- boolean caseSensitive ) throws IOException {
- String nameValue = stringFactory.create(value);
- Query query = null;
- switch (operator) {
- case LIKE:
- String likeExpression = stringFactory.create(value);
- return findNodesLike(PathIndex.LOCAL_NAME, likeExpression); // already is a query with UUIDs
- case EQUAL_TO:
- query = new TermQuery(new Term(PathIndex.LOCAL_NAME, nameValue));
- break;
- case NOT_EQUAL_TO:
- query = new NotQuery(new TermQuery(new Term(PathIndex.LOCAL_NAME, nameValue)));
- break;
- case GREATER_THAN:
- query = CompareStringQuery.createQueryForNodesWithFieldGreaterThan(nameValue,
- PathIndex.LOCAL_NAME,
- context.getValueFactories(),
- caseSensitive);
- break;
- case GREATER_THAN_OR_EQUAL_TO:
- query = CompareStringQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(nameValue,
- PathIndex.LOCAL_NAME,
- context.getValueFactories(),
- caseSensitive);
- break;
- case LESS_THAN:
- query = CompareStringQuery.createQueryForNodesWithFieldLessThan(nameValue,
- PathIndex.LOCAL_NAME,
- context.getValueFactories(),
- caseSensitive);
- break;
- case LESS_THAN_OR_EQUAL_TO:
- query = CompareStringQuery.createQueryForNodesWithFieldLessThanOrEqualTo(nameValue,
- PathIndex.LOCAL_NAME,
- context.getValueFactories(),
- caseSensitive);
- break;
- }
-
- // Now execute and collect the UUIDs ...
- UuidCollector uuidCollector = new UuidCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, uuidCollector);
- return findAllNodesWithUuids(uuidCollector.getUuids());
- }
-
- protected Query findNodesWith( NodeDepth depthConstraint,
- Operator operator,
- Object value ) throws IOException {
- int depth = context.getValueFactories().getLongFactory().create(value).intValue();
- Query query = null;
- switch (operator) {
- case EQUAL_TO:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, depth, true, true);
- break;
- case NOT_EQUAL_TO:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, depth, true, true);
- query = new NotQuery(query);
- break;
- case GREATER_THAN:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, MAX_DEPTH, false, true);
- break;
- case GREATER_THAN_OR_EQUAL_TO:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, MAX_DEPTH, true, true);
- break;
- case LESS_THAN:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, MIN_DEPTH, depth, true, false);
- break;
- case LESS_THAN_OR_EQUAL_TO:
- query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, MIN_DEPTH, depth, true, true);
- break;
- case LIKE:
- // This is not allowed ...
- return null;
- }
-
- // Now execute and collect the UUIDs ...
- UuidCollector uuidCollector = new UuidCollector();
- IndexSearcher searcher = getPathsSearcher();
- searcher.search(query, uuidCollector);
- return findAllNodesWithUuids(uuidCollector.getUuids());
- }
-
- protected Query createLocalNameQuery( String likeExpression ) {
- if (likeExpression == null) return null;
- likeExpression = likeExpression.trim();
- if (likeExpression.length() == 0) return null;
- if (likeExpression.indexOf('?') != -1 || likeExpression.indexOf('*') != -1) {
- // The local name is a like ...
- return findNodesLike(PathIndex.LOCAL_NAME, likeExpression);
- }
- // The local name is an exact match ...
- return new TermQuery(new Term(PathIndex.LOCAL_NAME, likeExpression));
- }
-
- protected Query createSnsIndexQuery( String likeExpression ) {
- if (likeExpression == null) return null;
- likeExpression = likeExpression.trim();
- if (likeExpression.length() == 0) return null;
-
- // Remove the leading '[' ...
- assert likeExpression.charAt(0) == '[';
- likeExpression = likeExpression.substring(1);
-
- // Remove the trailing ']' if it exists ...
- int closeBracketIndex = likeExpression.indexOf(']');
- if (closeBracketIndex != -1) {
- likeExpression = likeExpression.substring(0, closeBracketIndex);
- }
- // If SNS expression contains '?' or '*' ...
- if (likeExpression.indexOf('?') != -1 || likeExpression.indexOf('*') != -1) {
- // There is a LIKE expression for the SNS ...
- return findNodesLike(PathIndex.SNS_INDEX, likeExpression);
- }
- // This is not a LIKE expression but an exact value specification and should be a number ...
- try {
- // This SNS is just a number ...
- int sns = Integer.parseInt(likeExpression);
- return NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, sns, sns, true, false);
- } catch (NumberFormatException e) {
- // It's not a number but it's in the SNS field, so there will be no results ...
- return new MatchNoneQuery();
- }
- }
-
- }
-
- /**
- * Convert the JCR like expression to a Lucene wildcard expression. The JCR like expression uses '%' to match 0 or more
- * characters, '_' to match any single character, '\x' to match the 'x' character, and all other characters to match
- * themselves.
- *
- * @param likeExpression the like expression; may not be null
- * @return the expression that can be used with a WildcardQuery; never null
- */
- protected static String toWildcardExpression( String likeExpression ) {
- assert likeExpression != null;
- assert likeExpression.length() > 0;
- return likeExpression.replace('%', '*').replace('_', '?').replaceAll("\\\\(.)", "$1");
- }
-
- /**
- * Convert the JCR like expression to a regular expression. The JCR like expression uses '%' to match 0 or more characters,
- * '_' to match any single character, '\x' to match the 'x' character, and all other characters to match themselves. Note that
- * if any regex metacharacters appear in the like expression, they will be escaped within the resulting regular expression.
- *
- * @param likeExpression the like expression; may not be null
- * @return the expression that can be used with a WildcardQuery; never null
- */
- protected static String toRegularExpression( String likeExpression ) {
- assert likeExpression != null;
- assert likeExpression.length() > 0;
- // Replace all '\x' with 'x' ...
- String result = likeExpression.replaceAll("\\\\(.)", "$1");
- // Escape characters used as metacharacters in regular expressions, including
- // '[', '^', '\', '$', '.', '|', '?', '*', '+', '(', and ')'
- result = result.replaceAll("([[^\\\\$.|?*+()])", "\\$1");
- // Replace '%'->'[.]+' and '_'->'[.]
- result = likeExpression.replace("%", "[.]+").replace("_", "[.]");
- return result;
- }
-
- protected static String pathAsString( Path path,
- ValueFactory<String> stringFactory ) {
- assert path != null;
- if (path.isRoot()) return "/";
- String pathStr = stringFactory.create(path);
- if (!pathStr.endsWith("]")) {
- pathStr = pathStr + '[' + Path.DEFAULT_INDEX + ']';
- }
- return pathStr;
- }
-
- /**
- * A {@link Collector} implementation that only captures the UUID of the documents returned by a query. Score information is
- * not recorded. This is often used when querying the {@link PathIndex} to collect the UUIDs of a set of nodes satisfying some
- * path constraint.
- *
- * @see DualIndexLayout.LuceneSession#findChildNodes(Path)
- */
- protected static class UuidCollector extends Collector {
- private final Set<UUID> uuids = new HashSet<UUID>();
- private String[] uuidsByDocId;
- private int baseDocId;
-
- protected UuidCollector() {
- }
-
- /**
- * Get the UUIDs that have been collected.
- *
- * @return the set of UUIDs; never null
- */
- public Set<UUID> getUuids() {
- return uuids;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#acceptsDocsOutOfOrder()
- */
- @Override
- public boolean acceptsDocsOutOfOrder() {
- return true;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#setScorer(org.apache.lucene.search.Scorer)
- */
- @Override
- public void setScorer( Scorer scorer ) {
- // we don't care about scoring
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#collect(int)
- */
- @Override
- public void collect( int doc ) {
- int index = doc - baseDocId;
- assert index >= 0;
- String uuidString = uuidsByDocId[index];
- assert uuidString != null;
- uuids.add(UUID.fromString(uuidString));
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#setNextReader(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- public void setNextReader( IndexReader reader,
- int docBase ) throws IOException {
- this.uuidsByDocId = FieldCache.DEFAULT.getStrings(reader, UUID_FIELD);
- this.baseDocId = docBase;
- }
- }
-}
Copied: trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexSearchProvider.java (from rev 1328, trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java)
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexSearchProvider.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexSearchProvider.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -0,0 +1,1578 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import net.jcip.annotations.ThreadSafe;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriter.MaxFieldLength;
+import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.regex.JavaUtilRegexCapabilities;
+import org.apache.lucene.search.regex.RegexQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Version;
+import org.jboss.dna.common.text.NoOpEncoder;
+import org.jboss.dna.common.text.TextEncoder;
+import org.jboss.dna.common.util.Logger;
+import org.jboss.dna.graph.DnaLexicon;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.JcrLexicon;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.Node;
+import org.jboss.dna.graph.property.Binary;
+import org.jboss.dna.graph.property.DateTime;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.PathFactory;
+import org.jboss.dna.graph.property.Property;
+import org.jboss.dna.graph.property.PropertyType;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.QueryEngine;
+import org.jboss.dna.graph.query.QueryResults;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.model.Length;
+import org.jboss.dna.graph.query.model.NodeDepth;
+import org.jboss.dna.graph.query.model.NodeLocalName;
+import org.jboss.dna.graph.query.model.NodeName;
+import org.jboss.dna.graph.query.model.NodePath;
+import org.jboss.dna.graph.query.model.Operator;
+import org.jboss.dna.graph.query.model.PropertyValue;
+import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.optimize.Optimizer;
+import org.jboss.dna.graph.query.optimize.OptimizerRule;
+import org.jboss.dna.graph.query.optimize.RuleBasedOptimizer;
+import org.jboss.dna.graph.query.plan.CanonicalPlanner;
+import org.jboss.dna.graph.query.plan.PlanHints;
+import org.jboss.dna.graph.query.plan.PlanNode;
+import org.jboss.dna.graph.query.plan.Planner;
+import org.jboss.dna.graph.query.process.ProcessingComponent;
+import org.jboss.dna.graph.query.process.QueryProcessor;
+import org.jboss.dna.graph.request.ChangeRequest;
+import org.jboss.dna.graph.search.SearchException;
+import org.jboss.dna.graph.search.SearchProvider;
+import org.jboss.dna.search.IndexRules.Rule;
+import org.jboss.dna.search.query.CompareLengthQuery;
+import org.jboss.dna.search.query.CompareNameQuery;
+import org.jboss.dna.search.query.ComparePathQuery;
+import org.jboss.dna.search.query.CompareStringQuery;
+import org.jboss.dna.search.query.MatchNoneQuery;
+import org.jboss.dna.search.query.NotQuery;
+import org.jboss.dna.search.query.UuidsQuery;
+
+/**
+ * A simple {@link SearchProvider} implementation that relies upon two separate indexes: one for the node content and a second one
+ * for paths and UUIDs.
+ */
+@ThreadSafe
+public class DualIndexSearchProvider implements SearchProvider {
+
+ /**
+ * The default set of {@link IndexRules} used by {@link DualIndexSearchProvider} instances when no rules are provided. These
+ * rules default to index and analyze all properties, and to index the {@link DnaLexicon#UUID dna:uuid} and
+ * {@link JcrLexicon#UUID jcr:uuid} properties to be indexed and stored only (not analyzed and not included in full-text
+ * search. The rules also treat {@link JcrLexicon#CREATED jcr:created} and {@link JcrLexicon#LAST_MODIFIED jcr:lastModified}
+ * properties as dates.
+ */
+ public static final IndexRules DEFAULT_RULES;
+
+ static {
+ IndexRules.Builder builder = IndexRules.createBuilder();
+ // Configure the default behavior ...
+ builder.defaultTo(IndexRules.INDEX | IndexRules.ANALYZE);
+ // Configure the UUID properties to be just indexed (not stored, not analyzed, not included in full-text) ...
+ builder.store(JcrLexicon.UUID, DnaLexicon.UUID);
+ // Configure the properties that we'll treat as dates ...
+ builder.treatAsDates(JcrLexicon.CREATED, JcrLexicon.LAST_MODIFIED);
+ DEFAULT_RULES = builder.build();
+ }
+
+ protected static final long MIN_DATE = 0;
+ protected static final long MAX_DATE = Long.MAX_VALUE;
+ protected static final long MIN_LONG = Long.MIN_VALUE;
+ protected static final long MAX_LONG = Long.MAX_VALUE;
+ protected static final double MIN_DOUBLE = Double.MIN_VALUE;
+ protected static final double MAX_DOUBLE = Double.MAX_VALUE;
+ protected static final int MIN_DEPTH = 0;
+ protected static final int MAX_DEPTH = 100;
+
+ protected static final String PATHS_INDEX_NAME = "paths";
+ protected static final String CONTENT_INDEX_NAME = "content";
+
+ protected static final String UUID_FIELD = "uuid";
+ protected static final String FULL_TEXT_SUFFIX = "/fs"; // the slash character is not allowed in a property name unescaped
+
+ static class PathIndex {
+ public static final String PATH = "path";
+ public static final String LOCAL_NAME = "name";
+ public static final String SNS_INDEX = "sns";
+ public static final String UUID = UUID_FIELD;
+ public static final String DEPTH = "depth";
+ }
+
+ static class ContentIndex {
+ public static final String UUID = UUID_FIELD;
+ public static final String FULL_TEXT = "fts";
+ }
+
+ /**
+ * The number of results that should be returned when performing queries while deleting entire branches of content. The
+ * current value is {@value} .
+ */
+ protected static final int SIZE_OF_DELETE_BATCHES = 1000;
+
+ private ThreadLocal<DateFormat> dateFormatter = new ThreadLocal<DateFormat>() {
+ @Override
+ protected DateFormat initialValue() {
+ return new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss");
+ }
+ };
+
+ /**
+ * Obtain an immutable {@link FieldSelector} instance that accesses the UUID field.
+ */
+ protected static final FieldSelector UUID_FIELD_SELECTOR = new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ return PathIndex.UUID.equals(fieldName) ? FieldSelectorResult.LOAD_AND_BREAK : FieldSelectorResult.NO_LOAD;
+ }
+ };
+
+ private final IndexRules rules;
+ private final LuceneConfiguration directoryConfiguration;
+
+ public DualIndexSearchProvider( LuceneConfiguration directoryConfiguration,
+ IndexRules rules ) {
+ assert directoryConfiguration != null;
+ assert rules != null;
+ this.rules = rules;
+ this.directoryConfiguration = directoryConfiguration;
+ }
+
+ public DualIndexSearchProvider( LuceneConfiguration directoryConfiguration ) {
+ this(directoryConfiguration, DEFAULT_RULES);
+ }
+
+ /**
+ * Get the date formatter that can be reused safely within the current thread.
+ *
+ * @return the date formatter; never null
+ */
+ protected DateFormat dateFormatter() {
+ return dateFormatter.get();
+ }
+
+ /**
+ * Get the text encoder that should be used to encode namespaces in the search index.
+ *
+ * @return the namespace text encoder; never null
+ */
+ protected TextEncoder getNamespaceEncoder() {
+ return new NoOpEncoder();
+ }
+
+ /**
+ * Create a Lucene {@link Analyzer} analyzer that should be used for indexing and searching.
+ *
+ * @return the analyzer; never null
+ */
+ protected Analyzer createAnalyzer() {
+ return new StandardAnalyzer(Version.LUCENE_CURRENT);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider#createSession(org.jboss.dna.graph.ExecutionContext, java.lang.String,
+ * java.lang.String, boolean, boolean)
+ */
+ public SearchProvider.Session createSession( ExecutionContext context,
+ String sourceName,
+ String workspaceName,
+ boolean overwrite,
+ boolean readOnly ) {
+ Directory pathIndexDirectory = directoryConfiguration.getDirectory(workspaceName, PATHS_INDEX_NAME);
+ Directory contentIndexDirectory = directoryConfiguration.getDirectory(workspaceName, CONTENT_INDEX_NAME);
+ assert pathIndexDirectory != null;
+ assert contentIndexDirectory != null;
+ Analyzer analyzer = createAnalyzer();
+ assert analyzer != null;
+ return new DualIndexSession(context, sourceName, workspaceName, rules, pathIndexDirectory, contentIndexDirectory,
+ analyzer, overwrite, readOnly);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider#destroyIndexes(org.jboss.dna.graph.ExecutionContext, java.lang.String,
+ * java.lang.String)
+ */
+ public boolean destroyIndexes( ExecutionContext context,
+ String sourceName,
+ String workspaceName ) {
+ directoryConfiguration.destroyDirectory(workspaceName, PATHS_INDEX_NAME);
+ directoryConfiguration.destroyDirectory(workspaceName, CONTENT_INDEX_NAME);
+ return true;
+ }
+
+ protected class DualIndexSession extends LuceneSession {
+ private final Directory pathsIndexDirectory;
+ private final Directory contentIndexDirectory;
+ private IndexReader pathsReader;
+ private IndexWriter pathsWriter;
+ private IndexSearcher pathsSearcher;
+ private IndexReader contentReader;
+ private IndexWriter contentWriter;
+ private IndexSearcher contentSearcher;
+
+ protected DualIndexSession( ExecutionContext context,
+ String sourceName,
+ String workspaceName,
+ IndexRules rules,
+ Directory pathsIndexDirectory,
+ Directory contentIndexDirectory,
+ Analyzer analyzer,
+ boolean overwrite,
+ boolean readOnly ) {
+ super(context, sourceName, workspaceName, rules, analyzer, overwrite, readOnly);
+ this.pathsIndexDirectory = pathsIndexDirectory;
+ this.contentIndexDirectory = contentIndexDirectory;
+ assert this.pathsIndexDirectory != null;
+ assert this.contentIndexDirectory != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.LuceneSession#fullTextFieldName(java.lang.String)
+ */
+ @Override
+ protected String fullTextFieldName( String propertyName ) {
+ return propertyName + FULL_TEXT_SUFFIX;
+ }
+
+ protected IndexReader getPathsReader() throws IOException {
+ if (pathsReader == null) {
+ pathsReader = IndexReader.open(pathsIndexDirectory, readOnly);
+ }
+ return pathsReader;
+ }
+
+ protected IndexReader getContentReader() throws IOException {
+ if (contentReader == null) {
+ contentReader = IndexReader.open(contentIndexDirectory, readOnly);
+ }
+ return contentReader;
+ }
+
+ protected IndexWriter getPathsWriter() throws IOException {
+ assert !readOnly;
+ if (pathsWriter == null) {
+ pathsWriter = new IndexWriter(pathsIndexDirectory, analyzer, overwrite, MaxFieldLength.UNLIMITED);
+ }
+ return pathsWriter;
+ }
+
+ protected IndexWriter getContentWriter() throws IOException {
+ assert !readOnly;
+ if (contentWriter == null) {
+ contentWriter = new IndexWriter(contentIndexDirectory, analyzer, overwrite, MaxFieldLength.UNLIMITED);
+ }
+ return contentWriter;
+ }
+
+ protected IndexSearcher getPathsSearcher() throws IOException {
+ if (pathsSearcher == null) {
+ pathsSearcher = new IndexSearcher(getPathsReader());
+ }
+ return pathsSearcher;
+ }
+
+ @Override
+ public IndexSearcher getContentSearcher() throws IOException {
+ if (contentSearcher == null) {
+ contentSearcher = new IndexSearcher(getContentReader());
+ }
+ return contentSearcher;
+ }
+
+ protected boolean hasWriters() {
+ return pathsWriter != null || contentWriter != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#index(org.jboss.dna.graph.Node)
+ */
+ public void index( Node node ) {
+ assert !readOnly;
+ Location location = node.getLocation();
+ UUID uuid = location.getUuid();
+ if (uuid == null) uuid = UUID.randomUUID();
+ Path path = location.getPath();
+ String uuidStr = stringFactory.create(uuid);
+ String pathStr = pathAsString(path, stringFactory);
+ String nameStr = path.isRoot() ? "" : stringFactory.create(path.getLastSegment().getName());
+ int sns = path.isRoot() ? 1 : path.getLastSegment().getIndex();
+
+ Logger logger = Logger.getLogger(getClass());
+ if (logger.isTraceEnabled()) {
+ logger.trace("indexing {0}", pathStr);
+ }
+
+ try {
+
+ // Create a separate document for the path, which makes it easier to handle moves since the path can
+ // be changed without changing any other content fields ...
+ Document doc = new Document();
+ doc.add(new Field(PathIndex.PATH, pathStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field(PathIndex.LOCAL_NAME, nameStr, Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new NumericField(PathIndex.LOCAL_NAME, Field.Store.YES, true).setIntValue(sns));
+ doc.add(new Field(PathIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new NumericField(PathIndex.DEPTH, Field.Store.YES, true).setIntValue(path.size()));
+ getPathsWriter().addDocument(doc);
+
+ // Create the document for the content (properties) ...
+ doc = new Document();
+ doc.add(new Field(ContentIndex.UUID, uuidStr, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ String stringValue = null;
+ StringBuilder fullTextSearchValue = null;
+ for (Property property : node.getProperties()) {
+ Name name = property.getName();
+ Rule rule = rules.getRule(name);
+ if (!rule.isIncluded()) continue;
+ String nameString = stringFactory.create(name);
+ if (rule.isDate()) {
+ for (Object value : property) {
+ if (value == null) continue;
+ DateTime dateValue = dateFactory.create(value);
+ // Add a separate field for each property value ...
+ doc.add(new NumericField(nameString, rule.getStoreOption(), true).setLongValue(dateValue.getMillisecondsInUtc()));
+ // Dates are not added to the full-text search field (since this wouldn't make sense)
+ }
+ continue;
+ }
+ for (Object value : property) {
+ if (value == null) continue;
+ if (value instanceof Binary) {
+ // don't include binary values as individual fields but do include them in the full-text search ...
+ // TODO : add to full-text search ...
+ continue;
+ }
+ stringValue = stringFactory.create(value);
+ // Add a separate field for each property value ...
+ doc.add(new Field(nameString, stringValue, rule.getStoreOption(), rule.getIndexOption()));
+
+ if (rule.isFullText()) {
+ // Add this text to the full-text field ...
+ if (fullTextSearchValue == null) {
+ fullTextSearchValue = new StringBuilder();
+ } else {
+ fullTextSearchValue.append(' ');
+ }
+ fullTextSearchValue.append(stringValue);
+
+ // Also create a full-text-searchable field ...
+ String fullTextNameString = fullTextFieldName(nameString);
+ doc.add(new Field(fullTextNameString, stringValue, Store.NO, Index.ANALYZED));
+ }
+ }
+ }
+ // Add the full-text-search field ...
+ if (fullTextSearchValue != null) {
+ doc.add(new Field(ContentIndex.FULL_TEXT, fullTextSearchValue.toString(), Field.Store.NO,
+ Field.Index.ANALYZED));
+ }
+ getContentWriter().addDocument(doc);
+ } catch (IOException e) {
+ throw new LuceneException(e);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#optimize()
+ */
+ public void optimize() {
+ try {
+ getContentWriter().optimize();
+ getPathsWriter().optimize();
+ } catch (IOException e) {
+ throw new LuceneException(e);
+
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#apply(java.lang.Iterable)
+ */
+ public int apply( Iterable<ChangeRequest> changes ) {
+ for (ChangeRequest change : changes) {
+ if (change != null) continue;
+ }
+ return 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Because this strategy uses multiple indexes, and since there's no correlation between the documents in those indexes,
+ * we need to perform the delete in multiple steps. First, we need to perform a query to find out which nodes exist below
+ * a certain path. Then, we need to delete those nodes from the paths index. Finally, we need to delete the corresponding
+ * documents in the content index that represent those same nodes.
+ * </p>
+ * <p>
+ * Since we don't know how many documents there will be, we perform these steps in batches, where each batch limits the
+ * number of results to a maximum number. We repeat batches as long as we find more results. This approach has the
+ * advantage that we'll never bring in a large number of results, and it allows us to delete the documents from the
+ * content node using a query.
+ * </p>
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#deleteBelow(org.jboss.dna.graph.property.Path)
+ */
+ public int deleteBelow( Path path ) {
+ assert !readOnly;
+ // Perform a query using the reader to find those nodes at/below the path ...
+ try {
+ IndexReader pathReader = getPathsReader();
+ IndexSearcher pathSearcher = new IndexSearcher(pathReader);
+ String pathStr = stringFactory.create(path) + "/";
+ PrefixQuery query = new PrefixQuery(new Term(PathIndex.PATH, pathStr));
+ int numberDeleted = 0;
+ while (true) {
+ // Execute the query and get the results ...
+ TopDocs results = pathSearcher.search(query, SIZE_OF_DELETE_BATCHES);
+ int numResultsInBatch = results.scoreDocs.length;
+ // Walk the results, delete the doc, and add to the query that we'll use against the content index ...
+ IndexReader contentReader = getContentReader();
+ for (ScoreDoc result : results.scoreDocs) {
+ int docId = result.doc;
+ // Find the UUID of the node ...
+ Document doc = pathReader.document(docId, UUID_FIELD_SELECTOR);
+ String uuid = doc.get(PathIndex.UUID);
+ // Delete the document from the paths index ...
+ pathReader.deleteDocument(docId);
+ // Delete the corresponding document from the content index ...
+ contentReader.deleteDocuments(new Term(ContentIndex.UUID, uuid));
+ }
+ numberDeleted += numResultsInBatch;
+ if (numResultsInBatch < SIZE_OF_DELETE_BATCHES) break;
+ }
+ return numberDeleted;
+ } catch (FileNotFoundException e) {
+ // There are no index files yet, so nothing to delete ...
+ return 0;
+ } catch (IOException e) {
+ throw new LuceneException(e);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#search(org.jboss.dna.graph.ExecutionContext, java.lang.String,
+ * int, int, java.util.List)
+ */
+ public void search( ExecutionContext context,
+ String fullTextString,
+ int maxResults,
+ int offset,
+ List<Location> results ) {
+ assert fullTextString != null;
+ assert fullTextString.length() > 0;
+ assert offset >= 0;
+ assert maxResults > 0;
+ assert results != null;
+
+ try {
+ // Parse the full-text search and search against the 'fts' field ...
+ QueryParser parser = new QueryParser(ContentIndex.FULL_TEXT, createAnalyzer());
+ Query query = parser.parse(fullTextString);
+ TopDocs docs = getContentSearcher().search(query, maxResults + offset);
+
+ // Collect the results ...
+ IndexReader contentReader = getContentReader();
+ IndexReader pathReader = getPathsReader();
+ IndexSearcher pathSearcher = getPathsSearcher();
+ ScoreDoc[] scoreDocs = docs.scoreDocs;
+ int numberOfResults = scoreDocs.length;
+ if (numberOfResults > offset) {
+ // There are enough results to satisfy the offset ...
+ PathFactory pathFactory = context.getValueFactories().getPathFactory();
+ for (int i = offset, num = scoreDocs.length; i != num; ++i) {
+ ScoreDoc result = scoreDocs[i];
+ int docId = result.doc;
+ // Find the UUID of the node (this UUID might be artificial, so we have to find the path) ...
+ Document doc = contentReader.document(docId, UUID_FIELD_SELECTOR);
+ String uuid = doc.get(ContentIndex.UUID);
+ // Find the path for this node (is there a better way to do this than one search per UUID?) ...
+ TopDocs pathDocs = pathSearcher.search(new TermQuery(new Term(PathIndex.UUID, uuid)), 1);
+ if (pathDocs.scoreDocs.length < 1) {
+ // No path record found ...
+ continue;
+ }
+ Document pathDoc = pathReader.document(pathDocs.scoreDocs[0].doc);
+ String pathString = pathDoc.get(PathIndex.PATH);
+ Path path = pathFactory.create(pathString);
+ // Now add the location ...
+ results.add(Location.create(path, UUID.fromString(uuid)));
+ }
+ }
+ } catch (ParseException e) {
+ String msg = SearchI18n.errorWhilePerformingSearch.text(workspaceName, sourceName, fullTextString, e.getMessage());
+ throw new SearchException(fullTextString, msg, e);
+ } catch (IOException e) {
+ throw new LuceneException(e);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#query(org.jboss.dna.graph.query.QueryContext,
+ * org.jboss.dna.graph.query.model.QueryCommand)
+ */
+ public QueryResults query( QueryContext queryContext,
+ QueryCommand query ) {
+ return queryEngine().execute(queryContext, query);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#commit()
+ */
+ public void commit() {
+ IOException ioError = null;
+ RuntimeException runtimeError = null;
+ if (pathsReader != null) {
+ try {
+ pathsReader.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ pathsReader = null;
+ }
+ }
+ if (contentReader != null) {
+ try {
+ contentReader.close();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ contentReader = null;
+ }
+ }
+ if (pathsWriter != null) {
+ try {
+ pathsWriter.commit();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ try {
+ pathsWriter.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ pathsWriter = null;
+ }
+ }
+ }
+ if (contentWriter != null) {
+ try {
+ contentWriter.commit();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ try {
+ contentWriter.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ contentWriter = null;
+ }
+ }
+ }
+ if (ioError != null) {
+ String msg = SearchI18n.errorWhileCommittingIndexChanges.text(workspaceName, sourceName, ioError.getMessage());
+ throw new LuceneException(msg, ioError);
+ }
+ if (runtimeError != null) throw runtimeError;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#rollback()
+ */
+ public void rollback() {
+ IOException ioError = null;
+ RuntimeException runtimeError = null;
+ if (pathsReader != null) {
+ try {
+ pathsReader.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ pathsReader = null;
+ }
+ }
+ if (contentReader != null) {
+ try {
+ contentReader.close();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ contentReader = null;
+ }
+ }
+ if (pathsWriter != null) {
+ try {
+ pathsWriter.rollback();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ try {
+ pathsWriter.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ pathsWriter = null;
+ }
+ }
+ }
+ if (contentWriter != null) {
+ try {
+ contentWriter.rollback();
+ } catch (IOException e) {
+ if (ioError == null) ioError = e;
+ } catch (RuntimeException e) {
+ if (runtimeError == null) runtimeError = e;
+ } finally {
+ try {
+ contentWriter.close();
+ } catch (IOException e) {
+ ioError = e;
+ } catch (RuntimeException e) {
+ runtimeError = e;
+ } finally {
+ contentWriter = null;
+ }
+ }
+ }
+ if (ioError != null) {
+ String msg = SearchI18n.errorWhileRollingBackIndexChanges.text(workspaceName, sourceName, ioError.getMessage());
+ throw new LuceneException(msg, ioError);
+ }
+ if (runtimeError != null) throw runtimeError;
+ }
+
+ protected QueryEngine createQueryProcessor() {
+ // Create the query engine ...
+ Planner planner = new CanonicalPlanner();
+ Optimizer optimizer = new RuleBasedOptimizer() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.optimize.RuleBasedOptimizer#populateRuleStack(java.util.LinkedList,
+ * org.jboss.dna.graph.query.plan.PlanHints)
+ */
+ @Override
+ protected void populateRuleStack( LinkedList<OptimizerRule> ruleStack,
+ PlanHints hints ) {
+ super.populateRuleStack(ruleStack, hints);
+ // Add any custom rules here, either at the front of the stack or at the end
+ }
+ };
+ QueryProcessor processor = new QueryProcessor() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.process.QueryProcessor#createAccessComponent(org.jboss.dna.graph.query.model.QueryCommand,
+ * org.jboss.dna.graph.query.QueryContext, org.jboss.dna.graph.query.plan.PlanNode,
+ * org.jboss.dna.graph.query.QueryResults.Columns,
+ * org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
+ */
+ @Override
+ protected ProcessingComponent createAccessComponent( QueryCommand originalQuery,
+ QueryContext context,
+ PlanNode accessNode,
+ Columns resultColumns,
+ org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer ) {
+ return DualIndexSession.this.createAccessComponent(originalQuery,
+ context,
+ accessNode,
+ resultColumns,
+ analyzer);
+ }
+ };
+
+ return new QueryEngine(planner, optimizer, processor);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.LuceneSession#createAccessComponent(org.jboss.dna.graph.query.model.QueryCommand,
+ * org.jboss.dna.graph.query.QueryContext, org.jboss.dna.graph.query.plan.PlanNode,
+ * org.jboss.dna.graph.query.QueryResults.Columns, org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
+ */
+ @Override
+ protected ProcessingComponent createAccessComponent( QueryCommand originalQuery,
+ QueryContext context,
+ PlanNode accessNode,
+ Columns resultColumns,
+ org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer ) {
+ // Create a processing component for this access query ...
+ return new LuceneQueryComponent(this, originalQuery, context, resultColumns, accessNode, analyzer, sourceName,
+ workspaceName);
+ }
+
+ /**
+ * Get the set of UUIDs for the children of the node at the given path.
+ *
+ * @param parentPath the path to the parent node; may not be null
+ * @return the UUIDs of the child nodes; never null but possibly empty
+ * @throws IOException if there is an error accessing the indexes
+ */
+ protected Set<UUID> getUuidsForChildrenOf( Path parentPath ) throws IOException {
+ // Find the path of the parent ...
+ String stringifiedPath = pathAsString(parentPath, stringFactory);
+ // Append a '/' to the parent path, so we'll only get decendants ...
+ stringifiedPath = stringifiedPath + '/';
+
+ // Create a query to find all the nodes below the parent path ...
+ Query query = new PrefixQuery(new Term(PathIndex.PATH, stringifiedPath));
+ // Include only the children ...
+ int childrenDepth = parentPath.size() + 1;
+ Query depthQuery = NumericRangeQuery.newIntRange(PathIndex.DEPTH, childrenDepth, childrenDepth, true, true);
+ // And combine ...
+ BooleanQuery combinedQuery = new BooleanQuery();
+ combinedQuery.add(query, Occur.MUST);
+ combinedQuery.add(depthQuery, Occur.MUST);
+ query = combinedQuery;
+
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return uuidCollector.getUuids();
+ }
+
+ /**
+ * Get the set of UUIDs for the nodes that are descendants of the node at the given path.
+ *
+ * @param parentPath the path to the parent node; may not be null and <i>may not be the root node</i>
+ * @param includeParent true if the parent node should be included in the results, or false if only the descendants should
+ * be included
+ * @return the UUIDs of the nodes; never null but possibly empty
+ * @throws IOException if there is an error accessing the indexes
+ */
+ protected Set<UUID> getUuidsForDescendantsOf( Path parentPath,
+ boolean includeParent ) throws IOException {
+ assert !parentPath.isRoot();
+
+ // Find the path of the parent ...
+ String stringifiedPath = pathAsString(parentPath, stringFactory);
+ if (!includeParent) {
+ // Append a '/' to the parent path, and we'll only get decendants ...
+ stringifiedPath = stringifiedPath + '/';
+ }
+
+ // Create a prefix query ...
+ Query query = new PrefixQuery(new Term(PathIndex.PATH, stringifiedPath));
+
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return uuidCollector.getUuids();
+ }
+
+ /**
+ * Get the set containing the single UUID for the node at the given path.
+ *
+ * @param path the path to the node; may not be null
+ * @return the UUID of the supplied node; or null if the node cannot be found
+ * @throws IOException if there is an error accessing the indexes
+ */
+ protected UUID getUuidFor( Path path ) throws IOException {
+ // Create a query to find all the nodes below the parent path ...
+ IndexSearcher searcher = getPathsSearcher();
+ String stringifiedPath = pathAsString(path, stringFactory);
+ TermQuery query = new TermQuery(new Term(PathIndex.PATH, stringifiedPath));
+
+ // Now execute and collect the UUIDs ...
+ TopDocs topDocs = searcher.search(query, 1);
+ if (topDocs.totalHits == 0) return null;
+ Document pathDoc = getPathsReader().document(topDocs.scoreDocs[0].doc);
+ String uuidString = pathDoc.get(PathIndex.UUID);
+ return UUID.fromString(uuidString);
+ }
+
+ /**
+ * Utility method to create a query to find all of the documents representing nodes with the supplied UUIDs.
+ *
+ * @param uuids the UUIDs of the nodes that are to be found; may not be null
+ * @return the query; never null
+ */
+ @Override
+ public Query findAllNodesWithUuids( Set<UUID> uuids ) {
+ if (uuids.isEmpty()) {
+ // There are no children, so return a null query ...
+ return new MatchNoneQuery();
+ }
+ if (uuids.size() == 1) {
+ UUID uuid = uuids.iterator().next();
+ if (uuid == null) return new MatchNoneQuery();
+ return new TermQuery(new Term(ContentIndex.UUID, uuid.toString()));
+ }
+ if (uuids.size() < 50) {
+ // Create an OR boolean query for all the UUIDs, since this is probably more efficient ...
+ BooleanQuery query = new BooleanQuery();
+ for (UUID uuid : uuids) {
+ Query uuidQuery = new TermQuery(new Term(ContentIndex.UUID, uuid.toString()));
+ query.add(uuidQuery, Occur.SHOULD);
+ }
+ return query;
+ }
+ // Returna query that will always find all of the UUIDs ...
+ return new UuidsQuery(ContentIndex.UUID, uuids, getContext().getValueFactories().getUuidFactory());
+ }
+
+ @Override
+ public Query findAllNodesBelow( Path ancestorPath ) throws IOException {
+ if (ancestorPath.isRoot()) {
+ return new MatchAllDocsQuery();
+ }
+ Set<UUID> uuids = getUuidsForDescendantsOf(ancestorPath, false);
+ return findAllNodesWithUuids(uuids);
+ }
+
+ /**
+ * Return a query that can be used to find all of the documents that represent nodes that are children of the node at the
+ * supplied path.
+ *
+ * @param parentPath the path of the parent node.
+ * @return the query; never null
+ * @throws IOException if there is an error finding the UUIDs of the child nodes
+ */
+ @Override
+ public Query findChildNodes( Path parentPath ) throws IOException {
+ if (parentPath.isRoot()) {
+ return new MatchAllDocsQuery();
+ }
+ Set<UUID> childUuids = getUuidsForChildrenOf(parentPath);
+ return findAllNodesWithUuids(childUuids);
+ }
+
+ /**
+ * Create a query that can be used to find the one document (or node) that exists at the exact path supplied. This method
+ * first queries the {@link PathIndex path index} to find the UUID of the node at the supplied path, and then returns a
+ * query that matches the UUID.
+ *
+ * @param path the path of the node
+ * @return the query; never null
+ * @throws IOException if there is an error finding the UUID for the supplied path
+ */
+ @Override
+ public Query findNodeAt( Path path ) throws IOException {
+ UUID uuid = getUuidFor(path);
+ if (uuid == null) return null;
+ return new TermQuery(new Term(ContentIndex.UUID, uuid.toString()));
+ }
+
+ /**
+ * Create a query that can be used to find documents (or nodes) that have a field value that satisfies the supplied LIKE
+ * expression.
+ *
+ * @param fieldName the name of the document field to search
+ * @param likeExpression the JCR like expression
+ * @return the query; never null
+ */
+ @Override
+ public Query findNodesLike( String fieldName,
+ String likeExpression ) {
+ assert likeExpression != null;
+ assert likeExpression.length() > 0;
+
+ // '%' matches 0 or more characters
+ // '_' matches any single character
+ // '\x' matches 'x'
+ // all other characters match themselves
+
+ // Wildcard queries are a better match, but they can be slow and should not be used
+ // if the first character of the expression is a '%' or '_' ...
+ char firstChar = likeExpression.charAt(0);
+ if (firstChar != '%' && firstChar != '_') {
+ // Create a wildcard query ...
+ String expression = toWildcardExpression(likeExpression);
+ return new WildcardQuery(new Term(fieldName, expression));
+ }
+ // Create a regex query,
+ String regex = toRegularExpression(likeExpression);
+ RegexQuery query = new RegexQuery(new Term(fieldName, regex));
+ query.setRegexImplementation(new JavaUtilRegexCapabilities());
+ return query;
+ }
+
+ @Override
+ public Query findNodesWith( Length propertyLength,
+ Operator operator,
+ Object value ) {
+ assert propertyLength != null;
+ assert value != null;
+ PropertyValue propertyValue = propertyLength.getPropertyValue();
+ String field = stringFactory.create(propertyValue.getPropertyName());
+ ValueFactories factories = context.getValueFactories();
+ int length = factories.getLongFactory().create(value).intValue();
+ switch (operator) {
+ case EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldEqualTo(length, field, factories);
+ case NOT_EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldNotEqualTo(length, field, factories);
+ case GREATER_THAN:
+ return CompareLengthQuery.createQueryForNodesWithFieldGreaterThan(length, field, factories);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(length, field, factories);
+ case LESS_THAN:
+ return CompareLengthQuery.createQueryForNodesWithFieldLessThan(length, field, factories);
+ case LESS_THAN_OR_EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldLessThanOrEqualTo(length, field, factories);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ break;
+ }
+ return null;
+ }
+
+ @Override
+ public Query findNodesWith( PropertyValue propertyValue,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) {
+ String field = stringFactory.create(propertyValue.getPropertyName());
+ PropertyType valueType = PropertyType.discoverType(value);
+ ValueFactories factories = context.getValueFactories();
+ switch (valueType) {
+ case NAME:
+ case PATH:
+ case REFERENCE:
+ case URI:
+ case UUID:
+ case STRING:
+ String stringValue = stringFactory.create(value);
+ if (valueType == PropertyType.PATH) {
+ stringValue = pathAsString(pathFactory.create(value), stringFactory);
+ }
+ if (!caseSensitive) stringValue = stringValue.toLowerCase();
+ switch (operator) {
+ case EQUAL_TO:
+ return new TermQuery(new Term(field, stringValue));
+ case NOT_EQUAL_TO:
+ Query query = new TermQuery(new Term(field, stringValue));
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return CompareStringQuery.createQueryForNodesWithFieldGreaterThan(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return CompareStringQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case LESS_THAN:
+ return CompareStringQuery.createQueryForNodesWithFieldLessThan(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case LESS_THAN_OR_EQUAL_TO:
+ return CompareStringQuery.createQueryForNodesWithFieldLessThanOrEqualTo(stringValue,
+ field,
+ factories,
+ caseSensitive);
+ case LIKE:
+ return findNodesLike(field, stringValue);
+ }
+ break;
+ case DATE:
+ long date = factories.getLongFactory().create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, date, date, true, true);
+ case NOT_EQUAL_TO:
+ Query query = NumericRangeQuery.newLongRange(field, date, date, true, true);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return NumericRangeQuery.newLongRange(field, date, MAX_DATE, false, true);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, date, MAX_DATE, true, true);
+ case LESS_THAN:
+ return NumericRangeQuery.newLongRange(field, MIN_DATE, date, true, false);
+ case LESS_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, MIN_DATE, date, true, true);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case LONG:
+ long longValue = factories.getLongFactory().create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, longValue, longValue, true, true);
+ case NOT_EQUAL_TO:
+ Query query = NumericRangeQuery.newLongRange(field, longValue, longValue, true, true);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return NumericRangeQuery.newLongRange(field, longValue, MAX_LONG, false, true);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, longValue, MAX_LONG, true, true);
+ case LESS_THAN:
+ return NumericRangeQuery.newLongRange(field, MIN_LONG, longValue, true, false);
+ case LESS_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newLongRange(field, MIN_LONG, longValue, true, true);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case DECIMAL:
+ case DOUBLE:
+ double doubleValue = factories.getDoubleFactory().create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return NumericRangeQuery.newDoubleRange(field, doubleValue, doubleValue, true, true);
+ case NOT_EQUAL_TO:
+ Query query = NumericRangeQuery.newDoubleRange(field, doubleValue, doubleValue, true, true);
+ return new NotQuery(query);
+ case GREATER_THAN:
+ return NumericRangeQuery.newDoubleRange(field, doubleValue, MAX_DOUBLE, false, true);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newDoubleRange(field, doubleValue, MAX_DOUBLE, true, true);
+ case LESS_THAN:
+ return NumericRangeQuery.newDoubleRange(field, MIN_DOUBLE, doubleValue, true, false);
+ case LESS_THAN_OR_EQUAL_TO:
+ return NumericRangeQuery.newDoubleRange(field, MIN_DOUBLE, doubleValue, true, true);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case BOOLEAN:
+ boolean booleanValue = factories.getBooleanFactory().create(value);
+ stringValue = stringFactory.create(value);
+ switch (operator) {
+ case EQUAL_TO:
+ return new TermQuery(new Term(field, stringValue));
+ case NOT_EQUAL_TO:
+ return new TermQuery(new Term(field, stringFactory.create(!booleanValue)));
+ case GREATER_THAN:
+ if (!booleanValue) {
+ return new TermQuery(new Term(field, stringFactory.create(true)));
+ }
+ // Can't be greater than 'true', per JCR spec
+ return new MatchNoneQuery();
+ case GREATER_THAN_OR_EQUAL_TO:
+ return new TermQuery(new Term(field, stringFactory.create(true)));
+ case LESS_THAN:
+ if (booleanValue) {
+ return new TermQuery(new Term(field, stringFactory.create(false)));
+ }
+ // Can't be less than 'false', per JCR spec
+ return new MatchNoneQuery();
+ case LESS_THAN_OR_EQUAL_TO:
+ return new TermQuery(new Term(field, stringFactory.create(false)));
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ break;
+ case OBJECT:
+ case BINARY:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ return null;
+ }
+
+ @Override
+ public Query findNodesWithNumericRange( PropertyValue propertyValue,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) {
+ String field = stringFactory.create(propertyValue.getPropertyName());
+ return findNodesWithNumericRange(field, lowerValue, upperValue, includesLower, includesUpper);
+ }
+
+ @Override
+ public Query findNodesWithNumericRange( NodeDepth depth,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) {
+ return findNodesWithNumericRange(PathIndex.DEPTH, lowerValue, upperValue, includesLower, includesUpper);
+ }
+
+ protected Query findNodesWithNumericRange( String field,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) {
+ PropertyType type = PropertyType.discoverType(lowerValue);
+ assert type == PropertyType.discoverType(upperValue);
+ ValueFactories factories = context.getValueFactories();
+ switch (type) {
+ case DATE:
+ long lowerDate = factories.getLongFactory().create(lowerValue);
+ long upperDate = factories.getLongFactory().create(upperValue);
+ return NumericRangeQuery.newLongRange(field, lowerDate, upperDate, includesLower, includesUpper);
+ case LONG:
+ long lowerLong = factories.getLongFactory().create(lowerValue);
+ long upperLong = factories.getLongFactory().create(upperValue);
+ return NumericRangeQuery.newLongRange(field, lowerLong, upperLong, includesLower, includesUpper);
+ case DECIMAL:
+ case DOUBLE:
+ double lowerDouble = factories.getDoubleFactory().create(lowerValue);
+ double upperDouble = factories.getDoubleFactory().create(upperValue);
+ return NumericRangeQuery.newDoubleRange(field, lowerDouble, upperDouble, includesLower, includesUpper);
+ default:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ }
+
+ @Override
+ public Query findNodesWith( NodePath nodePath,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException {
+ if (!caseSensitive) value = stringFactory.create(value).toLowerCase();
+ Path pathValue = operator != Operator.LIKE ? pathFactory.create(value) : null;
+ Query query = null;
+ switch (operator) {
+ case EQUAL_TO:
+ return findNodeAt(pathValue);
+ case NOT_EQUAL_TO:
+ return new NotQuery(findNodeAt(pathValue));
+ case LIKE:
+ String likeExpression = stringFactory.create(value);
+ return findNodesLike(PathIndex.PATH, likeExpression);
+ case GREATER_THAN:
+ query = ComparePathQuery.createQueryForNodesWithPathGreaterThan(pathValue,
+ PathIndex.PATH,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = ComparePathQuery.createQueryForNodesWithPathGreaterThanOrEqualTo(pathValue,
+ PathIndex.PATH,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN:
+ query = ComparePathQuery.createQueryForNodesWithPathLessThan(pathValue,
+ PathIndex.PATH,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = ComparePathQuery.createQueryForNodesWithPathLessThanOrEqualTo(pathValue,
+ PathIndex.PATH,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ }
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return findAllNodesWithUuids(uuidCollector.getUuids());
+ }
+
+ @Override
+ public Query findNodesWith( NodeName nodeName,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException {
+ String stringValue = stringFactory.create(value);
+ if (!caseSensitive) stringValue = stringValue.toLowerCase();
+ Path.Segment segment = operator != Operator.LIKE ? pathFactory.createSegment(stringValue) : null;
+ int snsIndex = operator != Operator.LIKE ? segment.getIndex() : 0;
+ Query query = null;
+ switch (operator) {
+ case EQUAL_TO:
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(new TermQuery(new Term(PathIndex.LOCAL_NAME, stringValue)), Occur.MUST);
+ booleanQuery.add(NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, snsIndex, snsIndex, true, false),
+ Occur.MUST);
+ return booleanQuery;
+ case NOT_EQUAL_TO:
+ booleanQuery = new BooleanQuery();
+ booleanQuery.add(new TermQuery(new Term(PathIndex.LOCAL_NAME, stringValue)), Occur.MUST);
+ booleanQuery.add(NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, snsIndex, snsIndex, true, false),
+ Occur.MUST);
+ return new NotQuery(booleanQuery);
+ case GREATER_THAN:
+ query = CompareNameQuery.createQueryForNodesWithNameGreaterThan(segment,
+ PathIndex.LOCAL_NAME,
+ PathIndex.SNS_INDEX,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = CompareNameQuery.createQueryForNodesWithNameGreaterThanOrEqualTo(segment,
+ PathIndex.LOCAL_NAME,
+ PathIndex.SNS_INDEX,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN:
+ query = CompareNameQuery.createQueryForNodesWithNameLessThan(segment,
+ PathIndex.LOCAL_NAME,
+ PathIndex.SNS_INDEX,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = CompareNameQuery.createQueryForNodesWithNameLessThanOrEqualTo(segment,
+ PathIndex.LOCAL_NAME,
+ PathIndex.SNS_INDEX,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LIKE:
+ // See whether the like expression has brackets ...
+ String likeExpression = stringValue;
+ int openBracketIndex = likeExpression.indexOf('[');
+ if (openBracketIndex != -1) {
+ String localNameExpression = likeExpression.substring(0, openBracketIndex);
+ String snsIndexExpression = likeExpression.substring(openBracketIndex);
+ Query localNameQuery = createLocalNameQuery(localNameExpression);
+ Query snsQuery = createSnsIndexQuery(snsIndexExpression);
+ if (localNameQuery == null) {
+ if (snsQuery == null) {
+ query = new MatchNoneQuery();
+ } else {
+ // There is just an SNS part ...
+ query = snsQuery;
+ }
+ } else {
+ // There is a local name part ...
+ if (snsQuery == null) {
+ query = localNameQuery;
+ } else {
+ // There is both a local name part and a SNS part ...
+ booleanQuery = new BooleanQuery();
+ booleanQuery.add(localNameQuery, Occur.MUST);
+ booleanQuery.add(snsQuery, Occur.MUST);
+ query = booleanQuery;
+ }
+ }
+ } else {
+ // There is no SNS expression ...
+ query = createLocalNameQuery(likeExpression);
+ }
+ assert query != null;
+ break;
+ }
+
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return findAllNodesWithUuids(uuidCollector.getUuids());
+ }
+
+ @Override
+ public Query findNodesWith( NodeLocalName nodeName,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException {
+ String nameValue = stringFactory.create(value);
+ Query query = null;
+ switch (operator) {
+ case LIKE:
+ String likeExpression = stringFactory.create(value);
+ return findNodesLike(PathIndex.LOCAL_NAME, likeExpression); // already is a query with UUIDs
+ case EQUAL_TO:
+ query = new TermQuery(new Term(PathIndex.LOCAL_NAME, nameValue));
+ break;
+ case NOT_EQUAL_TO:
+ query = new NotQuery(new TermQuery(new Term(PathIndex.LOCAL_NAME, nameValue)));
+ break;
+ case GREATER_THAN:
+ query = CompareStringQuery.createQueryForNodesWithFieldGreaterThan(nameValue,
+ PathIndex.LOCAL_NAME,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = CompareStringQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(nameValue,
+ PathIndex.LOCAL_NAME,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN:
+ query = CompareStringQuery.createQueryForNodesWithFieldLessThan(nameValue,
+ PathIndex.LOCAL_NAME,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = CompareStringQuery.createQueryForNodesWithFieldLessThanOrEqualTo(nameValue,
+ PathIndex.LOCAL_NAME,
+ context.getValueFactories(),
+ caseSensitive);
+ break;
+ }
+
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return findAllNodesWithUuids(uuidCollector.getUuids());
+ }
+
+ @Override
+ public Query findNodesWith( NodeDepth depthConstraint,
+ Operator operator,
+ Object value ) throws IOException {
+ int depth = context.getValueFactories().getLongFactory().create(value).intValue();
+ Query query = null;
+ switch (operator) {
+ case EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, depth, true, true);
+ break;
+ case NOT_EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, depth, true, true);
+ query = new NotQuery(query);
+ break;
+ case GREATER_THAN:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, MAX_DEPTH, false, true);
+ break;
+ case GREATER_THAN_OR_EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, depth, MAX_DEPTH, true, true);
+ break;
+ case LESS_THAN:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, MIN_DEPTH, depth, true, false);
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ query = NumericRangeQuery.newIntRange(PathIndex.DEPTH, MIN_DEPTH, depth, true, true);
+ break;
+ case LIKE:
+ // This is not allowed ...
+ return null;
+ }
+
+ // Now execute and collect the UUIDs ...
+ UuidCollector uuidCollector = new UuidCollector();
+ IndexSearcher searcher = getPathsSearcher();
+ searcher.search(query, uuidCollector);
+ return findAllNodesWithUuids(uuidCollector.getUuids());
+ }
+
+ protected Query createLocalNameQuery( String likeExpression ) {
+ if (likeExpression == null) return null;
+ likeExpression = likeExpression.trim();
+ if (likeExpression.length() == 0) return null;
+ if (likeExpression.indexOf('?') != -1 || likeExpression.indexOf('*') != -1) {
+ // The local name is a like ...
+ return findNodesLike(PathIndex.LOCAL_NAME, likeExpression);
+ }
+ // The local name is an exact match ...
+ return new TermQuery(new Term(PathIndex.LOCAL_NAME, likeExpression));
+ }
+
+ protected Query createSnsIndexQuery( String likeExpression ) {
+ if (likeExpression == null) return null;
+ likeExpression = likeExpression.trim();
+ if (likeExpression.length() == 0) return null;
+
+ // Remove the leading '[' ...
+ assert likeExpression.charAt(0) == '[';
+ likeExpression = likeExpression.substring(1);
+
+ // Remove the trailing ']' if it exists ...
+ int closeBracketIndex = likeExpression.indexOf(']');
+ if (closeBracketIndex != -1) {
+ likeExpression = likeExpression.substring(0, closeBracketIndex);
+ }
+ // If SNS expression contains '?' or '*' ...
+ if (likeExpression.indexOf('?') != -1 || likeExpression.indexOf('*') != -1) {
+ // There is a LIKE expression for the SNS ...
+ return findNodesLike(PathIndex.SNS_INDEX, likeExpression);
+ }
+ // This is not a LIKE expression but an exact value specification and should be a number ...
+ try {
+ // This SNS is just a number ...
+ int sns = Integer.parseInt(likeExpression);
+ return NumericRangeQuery.newIntRange(PathIndex.SNS_INDEX, sns, sns, true, false);
+ } catch (NumberFormatException e) {
+ // It's not a number but it's in the SNS field, so there will be no results ...
+ return new MatchNoneQuery();
+ }
+ }
+
+ }
+
+ /**
+ * A {@link Collector} implementation that only captures the UUID of the documents returned by a query. Score information is
+ * not recorded. This is often used when querying the {@link PathIndex} to collect the UUIDs of a set of nodes satisfying some
+ * path constraint.
+ *
+ * @see DualIndexSearchProvider.DualIndexSession#findChildNodes(Path)
+ */
+ protected static class UuidCollector extends Collector {
+ private final Set<UUID> uuids = new HashSet<UUID>();
+ private String[] uuidsByDocId;
+ private int baseDocId;
+
+ protected UuidCollector() {
+ }
+
+ /**
+ * Get the UUIDs that have been collected.
+ *
+ * @return the set of UUIDs; never null
+ */
+ public Set<UUID> getUuids() {
+ return uuids;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#acceptsDocsOutOfOrder()
+ */
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setScorer(org.apache.lucene.search.Scorer)
+ */
+ @Override
+ public void setScorer( Scorer scorer ) {
+ // we don't care about scoring
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#collect(int)
+ */
+ @Override
+ public void collect( int doc ) {
+ int index = doc - baseDocId;
+ assert index >= 0;
+ String uuidString = uuidsByDocId[index];
+ assert uuidString != null;
+ uuids.add(UUID.fromString(uuidString));
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setNextReader(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public void setNextReader( IndexReader reader,
+ int docBase ) throws IOException {
+ this.uuidsByDocId = FieldCache.DEFAULT.getStrings(reader, UUID_FIELD);
+ this.baseDocId = docBase;
+ }
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexSearchProvider.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexLayout.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexLayout.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexLayout.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -1,66 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.IOException;
-import net.jcip.annotations.ThreadSafe;
-import org.jboss.dna.graph.ExecutionContext;
-
-/**
- * The representation of a single layout of one or more Lucene indexes.
- */
-@ThreadSafe
-public interface IndexLayout {
-
- /**
- * Create a new session to the indexes.
- *
- * @param context the execution context for which this session is to be established; may not be null
- * @param sourceName the name of the source; may not be null
- * @param workspaceName the name of the workspace; may not be null
- * @param overwrite true if the existing indexes should be overwritten, or false if they should be used
- * @param readOnly true if the resulting session can be optimized for use in read-only situations, or false if the session
- * needs to allow calling the write methods
- * @return the session to the indexes; never null
- */
- IndexSession createSession( ExecutionContext context,
- String sourceName,
- String workspaceName,
- boolean overwrite,
- boolean readOnly );
-
- /**
- * Destroy the indexes for the workspace with the supplied name.
- *
- * @param context the execution context in which the destruction should be performed; may not be null
- * @param sourceName the name of the source; may not be null
- * @param workspaceName the name of the workspace; may not be null
- * @return true if the indexes for the workspace were destroyed, or false if there was no such workspace index
- * @throws IOException if there is a problem destroying the indexes
- */
- boolean destroyIndexes( ExecutionContext context,
- String sourceName,
- String workspaceName ) throws IOException;
-
-}
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexRules.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -56,10 +56,13 @@
*/
@Immutable
public static interface Rule {
+ /**
+ * Return whether this property should be included in the indexes.
+ *
+ * @return true if it is to be included, or false otherwise
+ */
boolean isIncluded();
- boolean isSkipped();
-
boolean isAnalyzed();
boolean isAnalyzedWithoutNorms();
@@ -131,15 +134,6 @@
/**
* {@inheritDoc}
*
- * @see org.jboss.dna.search.IndexRules.Rule#isSkipped()
- */
- public boolean isSkipped() {
- return true;
- }
-
- /**
- * {@inheritDoc}
- *
* @see org.jboss.dna.search.IndexRules.Rule#isStored()
*/
public boolean isStored() {
@@ -243,15 +237,6 @@
/**
* {@inheritDoc}
*
- * @see org.jboss.dna.search.IndexRules.Rule#isSkipped()
- */
- public boolean isSkipped() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- *
* @see org.jboss.dna.search.IndexRules.Rule#isStored()
*/
public boolean isStored() {
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/IndexSession.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/IndexSession.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/IndexSession.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -1,162 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.IOException;
-import java.util.List;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.search.IndexSearcher;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Node;
-import org.jboss.dna.graph.connector.RepositorySource;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.validate.Schemata;
-import org.jboss.dna.graph.request.ChangeRequest;
-
-/**
- * A stateful session that maintains {@link IndexReader}, {@link IndexWriter} and {@link IndexSearcher} resources to the indexes
- * of a particular source and workspace.
- */
-public interface IndexSession {
-
- /**
- * Get the name of the {@link RepositorySource repository source} for which this session exists. A session instance will
- * always return the same name.
- *
- * @return the source name; never null
- */
- String getSourceName();
-
- /**
- * Get the name of the workspace for which this session exists. A session instance will always return the same name.
- *
- * @return the workspace name; never null
- */
- String getWorkspaceName();
-
- /**
- * Get the execution context in which this session is operating.
- *
- * @return the execution context; never null
- */
- ExecutionContext getContext();
-
- /**
- * Return whether this session made changes to the indexed state.
- *
- * @return true if change were made, or false otherwise
- */
- boolean hasChanges();
-
- /**
- * Perform a full-text search given the supplied query.
- *
- * @param context the context in which the search should be executed; may not be null
- * @param fullTextString the full-text query; never null or blank
- * @param maxResults the maximum number of results that are to be returned; always positive
- * @param offset the number of initial results to skip, or 0 if the first results are to be returned
- * @param results the list where the results should be accumulated; never null
- * @throws IOException if there is a problem indexing or using the indexes
- * @throws ParseException if there is a problem parsing the query
- */
- void search( ExecutionContext context,
- String fullTextString,
- int maxResults,
- int offset,
- List<Location> results ) throws IOException, ParseException;
-
- /**
- * Perform a query of the content. The {@link QueryCommand query} is supplied in the form of the Abstract Query Model, with
- * the {@link Schemata} that defines the tables and views that are available to the query, and the set of index readers (and
- * writers) that should be used.
- *
- * @param queryContext the context in which the query should be executed; may not be null
- * @param query the query; never null
- * @return the results of the query; never null
- * @throws IOException if there is a problem indexing or using the indexes
- * @throws ParseException if there is a problem parsing the query
- */
- QueryResults query( QueryContext queryContext,
- QueryCommand query ) throws IOException, ParseException;
-
- /**
- * Index the node given the index writers. Note that implementors should simply just use the writers to add documents to the
- * index(es), and should never call any of the writer lifecycle methods (e.g., {@link IndexWriter#commit()},
- * {@link IndexWriter#rollback()}, etc.).
- *
- * @param node the node to be indexed; never null
- * @throws IOException if there is a problem indexing or using the writers
- */
- void index( Node node ) throws IOException;
-
- /**
- * Update the indexes to reflect the supplied changes to the graph content. Note that implementors should simply just use the
- * writers to add documents to the index(es), and should never call any of the writer lifecycle methods (e.g.,
- * {@link IndexWriter#commit()}, {@link IndexWriter#rollback()}, etc.).
- *
- * @param changes the set of changes to the content
- * @return the (approximate) number of nodes that were affected by the changes
- * @throws IOException if there is a problem indexing or using the writers
- */
- int apply( Iterable<ChangeRequest> changes ) throws IOException;
-
- /**
- * Remove from the index(es) all of the information pertaining to the nodes at or below the supplied path. Note that
- * implementors should simply just use the writers to add documents to the index(es), and should never call any of the writer
- * lifecycle methods (e.g., {@link IndexWriter#commit()}, {@link IndexWriter#rollback()}, etc.).
- *
- * @param path the path identifying the graph content that is to be removed; never null
- * @return the (approximate) number of nodes that were affected by the changes
- * @throws IOException if there is a problem indexing or using the writers
- */
- int deleteBelow( Path path ) throws IOException;
-
- /**
- * Optimize the indexes, if required.
- *
- * @throws IOException if there is a problem optimizing
- */
- void optimize() throws IOException;
-
- /**
- * Close this session by committing all of the changes. This session is no longer usable after this method is called.
- *
- * @throws IOException if there is a problem committing
- */
- void commit() throws IOException;
-
- /**
- * Close this session by rolling back all of the changes that have been made. This session is no longer usable after this
- * method is called.
- *
- * @throws IOException if there is a problem rolling back
- */
- void rollback() throws IOException;
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -1,708 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.FieldSelectorResult;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.store.Directory;
-import org.jboss.dna.common.i18n.I18n;
-import org.jboss.dna.graph.DnaLexicon;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.JcrLexicon;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.property.Binary;
-import org.jboss.dna.graph.property.Name;
-import org.jboss.dna.graph.property.PropertyType;
-import org.jboss.dna.graph.property.ValueFactory;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryResults.Columns;
-import org.jboss.dna.graph.query.model.And;
-import org.jboss.dna.graph.query.model.Between;
-import org.jboss.dna.graph.query.model.BindVariableName;
-import org.jboss.dna.graph.query.model.ChildNode;
-import org.jboss.dna.graph.query.model.Comparison;
-import org.jboss.dna.graph.query.model.Constraint;
-import org.jboss.dna.graph.query.model.DescendantNode;
-import org.jboss.dna.graph.query.model.DynamicOperand;
-import org.jboss.dna.graph.query.model.FullTextSearch;
-import org.jboss.dna.graph.query.model.FullTextSearchScore;
-import org.jboss.dna.graph.query.model.Length;
-import org.jboss.dna.graph.query.model.Literal;
-import org.jboss.dna.graph.query.model.LowerCase;
-import org.jboss.dna.graph.query.model.NodeDepth;
-import org.jboss.dna.graph.query.model.NodeLocalName;
-import org.jboss.dna.graph.query.model.NodeName;
-import org.jboss.dna.graph.query.model.NodePath;
-import org.jboss.dna.graph.query.model.Not;
-import org.jboss.dna.graph.query.model.Operator;
-import org.jboss.dna.graph.query.model.Or;
-import org.jboss.dna.graph.query.model.PropertyExistence;
-import org.jboss.dna.graph.query.model.PropertyValue;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.model.SameNode;
-import org.jboss.dna.graph.query.model.SelectorName;
-import org.jboss.dna.graph.query.model.SetCriteria;
-import org.jboss.dna.graph.query.model.StaticOperand;
-import org.jboss.dna.graph.query.model.UpperCase;
-import org.jboss.dna.graph.query.model.Visitors;
-import org.jboss.dna.graph.query.model.FullTextSearch.NegationTerm;
-import org.jboss.dna.graph.query.plan.PlanNode;
-import org.jboss.dna.graph.query.process.AbstractAccessComponent;
-import org.jboss.dna.graph.query.process.ProcessingComponent;
-import org.jboss.dna.graph.query.process.SelectComponent;
-import org.jboss.dna.graph.query.process.SelectComponent.Analyzer;
-
-/**
- * An {@link IndexLayout} implementation that stores all content within a set of two indexes: one for the node content and a
- * second one for paths and UUIDs.
- */
-@ThreadSafe
-public class KitchenSinkIndexLayout extends DualIndexLayout {
-
- /**
- * The default set of {@link IndexRules} used by {@link KitchenSinkIndexLayout} instances when no rules are provided.
- */
- public static final IndexRules DEFAULT_RULES;
-
- static {
- IndexRules.Builder builder = IndexRules.createBuilder();
- // Configure the default behavior ...
- builder.defaultTo(IndexRules.INDEX | IndexRules.ANALYZE);
- // Configure the UUID properties to be just indexed (not stored, not analyzed, not included in full-text) ...
- builder.index(JcrLexicon.UUID, DnaLexicon.UUID);
- // Configure the properties that we'll treat as dates ...
- builder.treatAsDates(JcrLexicon.CREATED, JcrLexicon.LAST_MODIFIED);
- DEFAULT_RULES = builder.build();
- }
-
- private final IndexRules rules;
- private final DirectoryConfiguration directoryConfiguration;
-
- public KitchenSinkIndexLayout( DirectoryConfiguration directoryConfiguration ) {
- this.rules = DEFAULT_RULES;
- this.directoryConfiguration = directoryConfiguration;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexLayout#createSession(org.jboss.dna.graph.ExecutionContext, java.lang.String,
- * java.lang.String, boolean, boolean)
- */
- public IndexSession createSession( ExecutionContext context,
- String sourceName,
- String workspaceName,
- boolean overwrite,
- boolean readOnly ) {
- Directory pathIndexDirectory = directoryConfiguration.getDirectory(workspaceName, PATHS_INDEX_NAME);
- Directory contentIndexDirectory = directoryConfiguration.getDirectory(workspaceName, CONTENT_INDEX_NAME);
- assert pathIndexDirectory != null;
- assert contentIndexDirectory != null;
- return new Session(context, sourceName, workspaceName, rules, pathIndexDirectory, contentIndexDirectory, overwrite,
- readOnly);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.IndexLayout#destroyIndexes(org.jboss.dna.graph.ExecutionContext, java.lang.String,
- * java.lang.String)
- */
- public boolean destroyIndexes( ExecutionContext context,
- String sourceName,
- String workspaceName ) {
- directoryConfiguration.destroyDirectory(workspaceName, PATHS_INDEX_NAME);
- directoryConfiguration.destroyDirectory(workspaceName, CONTENT_INDEX_NAME);
- return true;
- }
-
- protected class Session extends LuceneSession {
-
- protected Session( ExecutionContext context,
- String sourceName,
- String workspaceName,
- IndexRules rules,
- Directory pathsIndexDirectory,
- Directory contentIndexDirectory,
- boolean overwrite,
- boolean readOnly ) {
- super(context, sourceName, workspaceName, rules, pathsIndexDirectory, contentIndexDirectory, overwrite, readOnly);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.DualIndexLayout.LuceneSession#createAccessComponent(org.jboss.dna.graph.query.model.QueryCommand,
- * org.jboss.dna.graph.query.QueryContext, org.jboss.dna.graph.query.plan.PlanNode,
- * org.jboss.dna.graph.query.QueryResults.Columns, org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
- */
- @Override
- protected ProcessingComponent createAccessComponent( QueryCommand originalQuery,
- QueryContext context,
- PlanNode accessNode,
- Columns resultColumns,
- Analyzer analyzer ) {
- // Create a processing component for this access query ...
- return new LuceneQueryComponent(this, originalQuery, context, resultColumns, accessNode, analyzer, sourceName,
- workspaceName);
- }
-
- }
-
- /**
- * The {@link ProcessingComponent} implementation that executes a single atomic access query against the Lucene indexes.
- */
- protected static class LuceneQueryComponent extends AbstractAccessComponent {
- private final QueryCommand originalQuery;
- private final Session session;
- private final String sourceName;
- private final String workspaceName;
-
- protected LuceneQueryComponent( Session session,
- QueryCommand originalQuery,
- QueryContext context,
- Columns columns,
- PlanNode accessNode,
- Analyzer analyzer,
- String sourceName,
- String workspaceName ) {
- super(context, columns, accessNode);
- this.originalQuery = originalQuery;
- this.session = session;
- this.sourceName = sourceName;
- this.workspaceName = workspaceName;
- }
-
- protected String fieldNameFor( Name name ) {
- return session.stringFactory.create(name);
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.graph.query.process.ProcessingComponent#execute()
- */
- @Override
- public List<Object[]> execute() {
-
- // Some kinds of constraints are not easily pushed down to Lucene as are of a Lucene Query, and
- // instead are applied by filtering the results. For example, a FullTextSearchScore applies
- // to the score of the tuple, which cannot be (easily?) applied as a Query.
- //
- // Therefore, each of the AND-ed constraints of the query are evaluated separately. After all,
- // each of the tuples returned by the planned query must satisfy all of the AND-ed constraints.
- // Or, to put it another way, if a tuple does not satisfy one of the AND-ed constraints, the
- // tuple should not be included in the query results.
- //
- // Logically, any AND-ed criteria that cannot be pushed down to Lucene can of course be applied
- // as a filter on the results. Thus, each AND-ed constraint is processed to first determine if
- // it can be represented as a Lucene query; all other AND-ed constraints must be handled as
- // a results filter. Since most queries will likely use one or more simple constraints AND-ed
- // together, this approach will likely work very well.
- //
- // The only hairy case is when any AND-ed constraint is actually an OR-ed combination of multiple
- // constraints of which at least one cannot be pushed down to Lucene. In this case, the entire
- // AND-ed constraint must be treated as a results filter (even if many of those constraints that
- // make up the OR-ed constraint can be pushed down). Hopefully, this will not be a common case
- // in actual queries.
-
- // For each of the AND-ed constraints ...
- Query pushDownQuery = null;
- Constraint postProcessConstraint = null;
- try {
- for (Constraint andedConstraint : this.andedConstraints) {
- // Determine if it can be represented as a Lucene query ...
- Query constraintQuery = createQuery(andedConstraint);
- if (constraintQuery != null) {
- // The AND-ed constraint _can_ be represented as a push-down Lucene query ...
- if (pushDownQuery == null) {
- // This must be the first query ...
- pushDownQuery = constraintQuery;
- } else if (pushDownQuery instanceof BooleanQuery) {
- // We have to add the constraint query to the existing boolean ...
- BooleanQuery booleanQuery = (BooleanQuery)pushDownQuery;
- booleanQuery.add(constraintQuery, Occur.MUST);
- } else {
- // This is the second push-down query, so create a BooleanQuery ...
- BooleanQuery booleanQuery = new BooleanQuery();
- booleanQuery.add(pushDownQuery, Occur.MUST);
- booleanQuery.add(constraintQuery, Occur.MUST);
- pushDownQuery = booleanQuery;
- }
- } else {
- // The AND-ed constraint _cannot_ be represented as a push-down Lucene query ...
- if (postProcessConstraint == null) {
- postProcessConstraint = andedConstraint;
- } else {
- postProcessConstraint = new And(postProcessConstraint, andedConstraint);
- }
- }
- }
- } catch (IOException e) {
- // There was a error working with the constraints (such as a ValueFormatException) ...
- QueryContext context = getContext();
- I18n msg = SearchI18n.errorWhilePerformingQuery;
- String origQueryString = Visitors.readable(originalQuery, context.getExecutionContext());
- context.getProblems().addError(e, msg, origQueryString, workspaceName, sourceName, e.getMessage());
- return emptyTuples();
- } catch (RuntimeException e) {
- // There was a error working with the constraints (such as a ValueFormatException) ...
- QueryContext context = getContext();
- I18n msg = SearchI18n.errorWhilePerformingQuery;
- String origQueryString = Visitors.readable(originalQuery, context.getExecutionContext());
- context.getProblems().addError(e, msg, origQueryString, workspaceName, sourceName, e.getMessage());
- return emptyTuples();
- }
-
- if (pushDownQuery == null) {
- // There are no constraints that can be pushed down, so return _all_ the nodes ...
- pushDownQuery = new MatchAllDocsQuery();
- }
-
- // Get the results from Lucene ...
- List<Object[]> tuples = null;
- final Columns columns = getColumns();
- final QueryContext context = getContext();
- final ExecutionContext execContext = context.getExecutionContext();
- try {
- // Execute the query against the content indexes ...
- IndexSearcher searcher = session.getContentSearcher();
- TupleCollector collector = new TupleCollector(columns, execContext.getValueFactories().getUuidFactory());
- searcher.search(pushDownQuery, collector);
- tuples = collector.getTuples();
- } catch (IOException e) {
- // There was a problem executing the Lucene query ...
- I18n msg = SearchI18n.errorWhilePerformingLuceneQuery;
- String origQueryString = Visitors.readable(originalQuery, execContext);
- context.getProblems().addError(e, msg, pushDownQuery, origQueryString, workspaceName, sourceName, e.getMessage());
- return emptyTuples();
- }
-
- if (postProcessConstraint != null && !tuples.isEmpty()) {
- // Create a delegate processing component that will return the tuples we've already found ...
- final List<Object[]> allTuples = tuples;
- ProcessingComponent tuplesProcessor = new ProcessingComponent(context, columns) {
- @Override
- public List<Object[]> execute() {
- return allTuples;
- }
- };
- // Create a processing component that will apply these constraints to the tuples we already found ...
- return new SelectComponent(tuplesProcessor, postProcessConstraint, context.getVariables()).execute();
- }
- return tuples;
- }
-
- protected Query createQuery( Constraint constraint ) throws IOException {
- if (constraint instanceof And) {
- And and = (And)constraint;
- Query leftQuery = createQuery(and.getLeft());
- Query rightQuery = createQuery(and.getRight());
- if (leftQuery == null || rightQuery == null) return null;
- BooleanQuery booleanQuery = new BooleanQuery();
- booleanQuery.add(createQuery(and.getLeft()), Occur.MUST);
- booleanQuery.add(createQuery(and.getRight()), Occur.MUST);
- return booleanQuery;
- }
- if (constraint instanceof Or) {
- Or or = (Or)constraint;
- Query leftQuery = createQuery(or.getLeft());
- Query rightQuery = createQuery(or.getRight());
- if (leftQuery == null) {
- return rightQuery != null ? rightQuery : null;
- } else if (rightQuery == null) {
- return leftQuery;
- }
- BooleanQuery booleanQuery = new BooleanQuery();
- booleanQuery.add(createQuery(or.getLeft()), Occur.SHOULD);
- booleanQuery.add(createQuery(or.getRight()), Occur.SHOULD);
- return booleanQuery;
- }
- if (constraint instanceof Not) {
- Not not = (Not)constraint;
- Query notted = createQuery(not.getConstraint());
- if (notted == null) return new MatchAllDocsQuery();
- }
- if (constraint instanceof SetCriteria) {
- SetCriteria setCriteria = (SetCriteria)constraint;
- DynamicOperand left = setCriteria.getLeftOperand();
- int numRightOperands = setCriteria.getRightOperands().size();
- assert numRightOperands > 0;
- if (numRightOperands == 1) {
- return createQuery(left, Operator.EQUAL_TO, setCriteria.getRightOperands().iterator().next());
- }
- BooleanQuery setQuery = new BooleanQuery();
- for (StaticOperand right : setCriteria.getRightOperands()) {
- Query rightQuery = createQuery(left, Operator.EQUAL_TO, right);
- if (rightQuery == null) return null;
- setQuery.add(rightQuery, Occur.SHOULD);
- }
- return setQuery;
- }
- if (constraint instanceof PropertyExistence) {
- PropertyExistence existence = (PropertyExistence)constraint;
- return createQuery(existence.getSelectorName(), existence.getPropertyName());
- }
- if (constraint instanceof Between) {
- Between between = (Between)constraint;
- return createQuery(between);
- }
- if (constraint instanceof Comparison) {
- Comparison comparison = (Comparison)constraint;
- return createQuery(comparison.getOperand1(), comparison.getOperator(), comparison.getOperand2());
- }
- if (constraint instanceof FullTextSearch) {
- FullTextSearch search = (FullTextSearch)constraint;
- String fieldName = ContentIndex.FULL_TEXT;
- Name propertyName = search.getPropertyName();
- if (propertyName != null) {
- fieldName = session.fullTextFieldName(fieldNameFor(propertyName));
- }
- return createQuery(fieldName, search.getTerm());
- }
- try {
- if (constraint instanceof SameNode) {
- SameNode sameNode = (SameNode)constraint;
- return session.findNodeAt(sameNode.getPath());
- }
- if (constraint instanceof ChildNode) {
- ChildNode childNode = (ChildNode)constraint;
- return session.findChildNodes(childNode.getParentPath());
- }
- if (constraint instanceof DescendantNode) {
- DescendantNode descendantNode = (DescendantNode)constraint;
- return session.findAllNodesBelow(descendantNode.getAncestorPath());
- }
- } catch (IOException e) {
- I18n msg = SearchI18n.errorWhilePerformingQuery;
- getContext().getProblems().addError(e,
- msg,
- Visitors.readable(originalQuery),
- workspaceName,
- sourceName,
- e.getMessage());
- return null;
- }
- // Should not get here ...
- assert false;
- return null;
- }
-
- protected Query createQuery( DynamicOperand left,
- Operator operator,
- StaticOperand right ) throws IOException {
- return createQuery(left, operator, right, true);
- }
-
- protected Query createQuery( DynamicOperand left,
- Operator operator,
- StaticOperand right,
- boolean caseSensitive ) throws IOException {
- // Handle the static operand ...
- Object value = createOperand(right, caseSensitive);
- assert value != null;
-
- // Address the dynamic operand ...
- if (left instanceof FullTextSearchScore) {
- // This can only be represented as a filter ...
- return null;
- } else if (left instanceof PropertyValue) {
- return session.findNodesWith((PropertyValue)left, operator, value, caseSensitive);
- } else if (left instanceof Length) {
- return session.findNodesWith((Length)left, operator, right);
- } else if (left instanceof LowerCase) {
- LowerCase lowercase = (LowerCase)left;
- return createQuery(lowercase.getOperand(), operator, right, false);
- } else if (left instanceof UpperCase) {
- UpperCase lowercase = (UpperCase)left;
- return createQuery(lowercase.getOperand(), operator, right, false);
- } else if (left instanceof NodeDepth) {
- assert operator != Operator.LIKE;
- // Could be represented as a result filter, but let's do this now ...
- return session.findNodesWith((NodeDepth)left, operator, value);
- } else if (left instanceof NodePath) {
- return session.findNodesWith((NodePath)left, operator, value, caseSensitive);
- } else if (left instanceof NodeName) {
- return session.findNodesWith((NodeName)left, operator, value, caseSensitive);
- } else if (left instanceof NodeLocalName) {
- return session.findNodesWith((NodeLocalName)left, operator, value, caseSensitive);
- } else {
- assert false;
- return null;
- }
- }
-
- protected Object createOperand( StaticOperand operand,
- boolean caseSensitive ) {
- Object value = null;
- if (operand instanceof Literal) {
- Literal literal = (Literal)operand;
- value = literal.getValue();
- if (!caseSensitive) value = lowerCase(value);
- } else if (operand instanceof BindVariableName) {
- BindVariableName variable = (BindVariableName)operand;
- String variableName = variable.getVariableName();
- value = getContext().getVariables().get(variableName);
- if (!caseSensitive) value = lowerCase(value);
- } else {
- assert false;
- }
- return value;
- }
-
- protected Query createQuery( DynamicOperand left,
- StaticOperand lower,
- StaticOperand upper,
- boolean includesLower,
- boolean includesUpper,
- boolean caseSensitive ) throws IOException {
- // Handle the static operands ...
- Object lowerValue = createOperand(lower, caseSensitive);
- Object upperValue = createOperand(upper, caseSensitive);
- assert lowerValue != null;
- assert upperValue != null;
-
- // Only in the case of a PropertyValue and Depth will we need to do something special ...
- if (left instanceof NodeDepth) {
- return session.findNodesWithNumericRange((NodeDepth)left, lowerValue, upperValue, includesLower, includesUpper);
- } else if (left instanceof PropertyValue) {
- PropertyType lowerType = PropertyType.discoverType(lowerValue);
- PropertyType upperType = PropertyType.discoverType(upperValue);
- if (upperType == lowerType) {
- switch (upperType) {
- case DATE:
- case LONG:
- case DOUBLE:
- case DECIMAL:
- return session.findNodesWithNumericRange((PropertyValue)left,
- lowerValue,
- upperValue,
- includesLower,
- includesUpper);
- default:
- // continue on and handle as boolean query ...
- }
- }
- }
-
- // Otherwise, just create a boolean query ...
- BooleanQuery query = new BooleanQuery();
- Operator lowerOp = includesLower ? Operator.GREATER_THAN_OR_EQUAL_TO : Operator.GREATER_THAN;
- Operator upperOp = includesUpper ? Operator.LESS_THAN_OR_EQUAL_TO : Operator.LESS_THAN;
- Query lowerQuery = createQuery(left, lowerOp, lower, caseSensitive);
- Query upperQuery = createQuery(left, upperOp, upper, caseSensitive);
- if (lowerQuery == null || upperQuery == null) return null;
- query.add(lowerQuery, Occur.MUST);
- query.add(upperQuery, Occur.MUST);
- return query;
- }
-
- protected Object lowerCase( Object value ) {
- if (value instanceof String) {
- return ((String)value).toLowerCase();
- }
- assert !(value instanceof Binary);
- ValueFactory<String> stringFactory = getContext().getExecutionContext().getValueFactories().getStringFactory();
- ValueFactory<?> valueFactory = getContext().getExecutionContext().getValueFactories().getValueFactory(value);
- return valueFactory.create(stringFactory.create(value).toLowerCase());
- }
-
- protected Query createQuery( SelectorName selectorName,
- Name propertyName ) {
- Term term = new Term(fieldNameFor(propertyName));
- return new TermQuery(term);
- }
-
- protected Query createQuery( String fieldName,
- FullTextSearch.Term term ) {
- if (term instanceof FullTextSearch.Conjunction) {
- FullTextSearch.Conjunction conjunction = (FullTextSearch.Conjunction)term;
- BooleanQuery query = new BooleanQuery();
- for (FullTextSearch.Term nested : conjunction) {
- if (nested instanceof NegationTerm) {
- query.add(createQuery(fieldName, ((NegationTerm)nested).getNegatedTerm()), Occur.MUST_NOT);
- } else {
- query.add(createQuery(fieldName, nested), Occur.MUST);
- }
- }
- return query;
- }
- if (term instanceof FullTextSearch.Disjunction) {
- FullTextSearch.Disjunction disjunction = (FullTextSearch.Disjunction)term;
- BooleanQuery query = new BooleanQuery();
- for (FullTextSearch.Term nested : disjunction) {
- if (nested instanceof NegationTerm) {
- query.add(createQuery(fieldName, ((NegationTerm)nested).getNegatedTerm()), Occur.MUST_NOT);
- } else {
- query.add(createQuery(fieldName, nested), Occur.SHOULD);
- }
- }
- return query;
- }
- if (term instanceof FullTextSearch.SimpleTerm) {
- FullTextSearch.SimpleTerm simple = (FullTextSearch.SimpleTerm)term;
- if (simple.isQuotingRequired()) {
- PhraseQuery query = new PhraseQuery();
- query.setSlop(0); // terms must be adjacent
- for (String value : simple.getValues()) {
- query.add(new Term(fieldName, value));
- }
- return query;
- }
- return new TermQuery(new Term(fieldName, simple.getValue()));
- }
- // Should not get here ...
- assert false;
- return null;
- }
- }
-
- /**
- * This collector is responsible for loading the value for each of the columns into each tuple array.
- */
- protected static class TupleCollector extends Collector {
- private final LinkedList<Object[]> tuples = new LinkedList<Object[]>();
- private final Columns columns;
- private final int numValues;
- private final boolean recordScore;
- private final int scoreIndex;
- private final FieldSelector fieldSelector;
- private final int locationIndex;
- private final ValueFactory<UUID> uuidFactory;
- private Scorer scorer;
- private IndexReader currentReader;
- private int docOffset;
-
- protected TupleCollector( Columns columns,
- ValueFactory<UUID> uuidFactory ) {
- this.columns = columns;
- this.uuidFactory = uuidFactory;
- assert this.columns != null;
- assert this.uuidFactory != null;
- this.numValues = this.columns.getTupleSize();
- assert this.numValues >= 0;
- assert this.columns.getSelectorNames().size() == 1;
- final String selectorName = this.columns.getSelectorNames().get(0);
- this.locationIndex = this.columns.getLocationIndex(selectorName);
- this.recordScore = this.columns.hasFullTextSearchScores();
- this.scoreIndex = this.recordScore ? this.columns.getFullTextSearchScoreIndexFor(selectorName) : -1;
- final Set<String> columnNames = new HashSet<String>(this.columns.getColumnNames());
- columnNames.add(ContentIndex.UUID); // add the UUID, which we'll put into the Location ...
- this.fieldSelector = new FieldSelector() {
- private static final long serialVersionUID = 1L;
-
- public FieldSelectorResult accept( String fieldName ) {
- return columnNames.contains(fieldName) ? FieldSelectorResult.LOAD : FieldSelectorResult.NO_LOAD;
- }
- };
- }
-
- /**
- * @return tuples
- */
- public LinkedList<Object[]> getTuples() {
- return tuples;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#acceptsDocsOutOfOrder()
- */
- @Override
- public boolean acceptsDocsOutOfOrder() {
- return true;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#setNextReader(org.apache.lucene.index.IndexReader, int)
- */
- @Override
- public void setNextReader( IndexReader reader,
- int docBase ) {
- this.currentReader = reader;
- this.docOffset = docBase;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#setScorer(org.apache.lucene.search.Scorer)
- */
- @Override
- public void setScorer( Scorer scorer ) {
- this.scorer = scorer;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.apache.lucene.search.Collector#collect(int)
- */
- @Override
- public void collect( int doc ) throws IOException {
- int docId = doc + docOffset;
- Object[] tuple = new Object[numValues];
- Document document = currentReader.document(docId, fieldSelector);
- for (String columnName : columns.getColumnNames()) {
- int index = columns.getColumnIndexForName(columnName);
- // We just need to retrieve the first value if there is more than one ...
- tuple[index] = document.get(columnName);
- }
-
- // Set the score column if required ...
- if (recordScore) {
- assert scorer != null;
- tuple[scoreIndex] = scorer.score();
- }
-
- // Load the UUID into a Location object ...
- UUID uuid = uuidFactory.create(document.get(ContentIndex.UUID));
- tuple[locationIndex] = Location.create(uuid);
- tuples.add(tuple);
- }
- }
-
-}
Copied: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfiguration.java (from rev 1328, trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfiguration.java)
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfiguration.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfiguration.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -0,0 +1,60 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import net.jcip.annotations.ThreadSafe;
+import org.apache.lucene.store.Directory;
+import org.jboss.dna.graph.search.SearchEngineException;
+
+/**
+ * Interface used to obtain the Lucene {@link Directory} instance that should be used for a workspace given the name of the
+ * workspace. There are several implementations (see {@link LuceneConfigurations}), but custom implementations can always be
+ * used.
+ */
+@ThreadSafe
+public interface LuceneConfiguration {
+ /**
+ * Get the {@link Directory} that should be used for the workspace with the supplied name.
+ *
+ * @param workspaceName the workspace name
+ * @param indexName the name of the index to be created
+ * @return the directory; never null
+ * @throws IllegalArgumentException if the workspace name is null
+ * @throws SearchEngineException if there is a problem creating the directory
+ */
+ Directory getDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException;
+
+ /**
+ * Destroy the {@link Directory} that is used for the workspace with the supplied name.
+ *
+ * @param workspaceName the workspace name
+ * @param indexName the name of the index to be created
+ * @return true if the directory existed and was destroyed, or false if the directory didn't exist
+ * @throws IllegalArgumentException if the workspace name is null
+ * @throws SearchEngineException if there is a problem creating the directory
+ */
+ boolean destroyDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException;
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfiguration.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfigurations.java (from rev 1328, trunk/dna-search/src/main/java/org/jboss/dna/search/DirectoryConfigurations.java)
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfigurations.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfigurations.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -0,0 +1,427 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.concurrent.ConcurrentHashMap;
+import net.jcip.annotations.Immutable;
+import net.jcip.annotations.ThreadSafe;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.RAMDirectory;
+import org.jboss.dna.common.i18n.I18n;
+import org.jboss.dna.common.text.NoOpEncoder;
+import org.jboss.dna.common.text.TextEncoder;
+import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.common.util.FileUtil;
+import org.jboss.dna.common.util.HashCode;
+import org.jboss.dna.graph.search.SearchEngineException;
+
+/**
+ * A family of {@link LuceneConfiguration} implementations.
+ */
+public class LuceneConfigurations {
+
+ /**
+ * Return a new {@link LuceneConfiguration} that creates in-memory directories.
+ *
+ * @return the new directory configuration; never null
+ */
+ public static final LuceneConfiguration inMemory() {
+ return new RamDirectoryFactory();
+ }
+
+ /**
+ * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
+ * workspace names.
+ *
+ * @param parent the parent folder
+ * @return the new directory configuration; never null
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ public static final LuceneConfiguration using( File parent ) {
+ return new FileSystemDirectoryFromNameFactory(parent);
+ }
+
+ /**
+ * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
+ * workspace names.
+ *
+ * @param parent the parent folder
+ * @param lockFactory the lock factory; may be null
+ * @return the new directory configuration; never null
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ public static final LuceneConfiguration using( File parent,
+ LockFactory lockFactory ) {
+ return new FileSystemDirectoryFromNameFactory(parent, lockFactory);
+ }
+
+ /**
+ * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
+ * workspace names.
+ *
+ * @param parent the parent folder
+ * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
+ * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
+ * @return the new directory configuration; never null
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ public static final LuceneConfiguration using( File parent,
+ TextEncoder workspaceNameEncoder,
+ TextEncoder indexNameEncoder ) {
+ return new FileSystemDirectoryFromNameFactory(parent, workspaceNameEncoder, indexNameEncoder);
+ }
+
+ /**
+ * Return a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the allowable
+ * workspace names.
+ *
+ * @param parent the parent folder
+ * @param lockFactory the lock factory; may be null
+ * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
+ * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
+ * @return the new directory configuration; never null
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ public static final LuceneConfiguration using( File parent,
+ LockFactory lockFactory,
+ TextEncoder workspaceNameEncoder,
+ TextEncoder indexNameEncoder ) {
+ return new FileSystemDirectoryFromNameFactory(parent, lockFactory, workspaceNameEncoder, indexNameEncoder);
+ }
+
+ /**
+ * A {@link LuceneConfiguration} implementation that creates {@link Directory} instances of the supplied type for each
+ * workspace and pools the results, ensuring that the same {@link Directory} instance is always returned for the same
+ * workspace name.
+ *
+ * @param <DirectoryType> the concrete type of the directory
+ */
+ @ThreadSafe
+ protected static abstract class PoolingDirectoryFactory<DirectoryType extends Directory> implements LuceneConfiguration {
+ private final ConcurrentHashMap<IndexId, DirectoryType> directories = new ConcurrentHashMap<IndexId, DirectoryType>();
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.LuceneConfiguration#getDirectory(java.lang.String, java.lang.String)
+ */
+ public Directory getDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ IndexId id = new IndexId(workspaceName, indexName);
+ DirectoryType result = directories.get(id);
+ if (result == null) {
+ DirectoryType newDirectory = createDirectory(workspaceName, indexName);
+ result = directories.putIfAbsent(id, newDirectory);
+ if (result == null) result = newDirectory;
+ }
+ return result;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.LuceneConfiguration#destroyDirectory(java.lang.String, java.lang.String)
+ */
+ public boolean destroyDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException {
+ CheckArg.isNotNull(workspaceName, "workspaceName");
+ IndexId id = new IndexId(workspaceName, indexName);
+ DirectoryType result = directories.remove(id);
+ return result != null ? doDestroy(result) : false;
+ }
+
+ /**
+ * Method implemented by subclasses to create a new Directory implementation.
+ *
+ * @param workspaceName the name of the workspace for which the {@link Directory} is to be created; never null
+ * @param indexName the name of the index to be created
+ * @return the new directory; may not be null
+ * @throws SearchEngineException if there is a problem creating the directory
+ */
+ protected abstract DirectoryType createDirectory( String workspaceName,
+ String indexName ) throws SearchEngineException;
+
+ protected abstract boolean doDestroy( DirectoryType directory ) throws SearchEngineException;
+ }
+
+ /**
+ * A {@link LuceneConfiguration} implementation that creates {@link RAMDirectory} instances for each workspace and index
+ * name. Each factory instance maintains a pool of {@link RAMDirectory} instances, ensuring that the same {@link RAMDirectory}
+ * is always returned for the same workspace name.
+ */
+ @ThreadSafe
+ public static class RamDirectoryFactory extends PoolingDirectoryFactory<RAMDirectory> {
+ protected RamDirectoryFactory() {
+ }
+
+ @Override
+ protected RAMDirectory createDirectory( String workspaceName,
+ String indexName ) {
+ return new RAMDirectory();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.LuceneConfigurations.PoolingDirectoryFactory#doDestroy(org.apache.lucene.store.Directory)
+ */
+ @Override
+ protected boolean doDestroy( RAMDirectory directory ) throws SearchEngineException {
+ return directory != null;
+ }
+ }
+
+ /**
+ * A {@link LuceneConfiguration} implementation that creates {@link FSDirectory} instances for each workspace and index
+ * name. This factory is created with a parent directory under which all workspace and index directories are created.
+ * <p>
+ * This uses the supplied encoders to translate the workspace and index names into valid directory names. By default, no
+ * encoding is performed, meaning that the workspace and index names are used explicitly as directory names. This default
+ * behavior, then, means that not all values of workspace names or index names will work. If you want to be sure that all
+ * workspace names work, supply an encoder for the workspace names. (Index names are currently such that they will always be
+ * valid directory names, but you can always supply an encoder if you'd like.)
+ * </p>
+ */
+ public static class FileSystemDirectoryFromNameFactory extends PoolingDirectoryFactory<FSDirectory> {
+ private final File parentFile;
+ private final LockFactory lockFactory;
+ private final TextEncoder workspaceNameEncoder;
+ private final TextEncoder indexNameEncoder;
+
+ /**
+ * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
+ * allowable workspace names.
+ *
+ * @param parent the parent folder
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ protected FileSystemDirectoryFromNameFactory( File parent ) {
+ this(parent, null, null, null);
+ }
+
+ /**
+ * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
+ * allowable workspace names.
+ *
+ * @param parent the parent folder
+ * @param lockFactory the lock factory; may be null
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ protected FileSystemDirectoryFromNameFactory( File parent,
+ LockFactory lockFactory ) {
+ this(parent, lockFactory, null, null);
+ }
+
+ /**
+ * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
+ * allowable workspace names.
+ *
+ * @param parent the parent folder
+ * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
+ * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ protected FileSystemDirectoryFromNameFactory( File parent,
+ TextEncoder workspaceNameEncoder,
+ TextEncoder indexNameEncoder ) {
+ this(parent, null, workspaceNameEncoder, indexNameEncoder);
+ }
+
+ /**
+ * Create a new {@link LuceneConfiguration} that creates {@link FSDirectory} instances mapped to folders under a parent
+ * folder, where the workspace name is used to create the workspace folder. Note that this has ramifications on the
+ * allowable workspace names.
+ *
+ * @param parent the parent folder
+ * @param lockFactory the lock factory; may be null
+ * @param workspaceNameEncoder the encoder that should be used for encoding the workspace name into a directory name
+ * @param indexNameEncoder the encoder that should be used for encoding the index name into a directory name
+ * @throws IllegalArgumentException if the parent file is null
+ */
+ protected FileSystemDirectoryFromNameFactory( File parent,
+ LockFactory lockFactory,
+ TextEncoder workspaceNameEncoder,
+ TextEncoder indexNameEncoder ) {
+ CheckArg.isNotNull(parent, "parent");
+ this.parentFile = parent;
+ this.lockFactory = lockFactory;
+ this.workspaceNameEncoder = workspaceNameEncoder != null ? workspaceNameEncoder : new NoOpEncoder();
+ this.indexNameEncoder = indexNameEncoder != null ? indexNameEncoder : new NoOpEncoder();
+ }
+
+ @Override
+ protected FSDirectory createDirectory( String workspaceName,
+ String indexName ) {
+ File workspaceFile = new File(parentFile, workspaceNameEncoder.encode(workspaceName));
+ if (!workspaceFile.exists()) {
+ workspaceFile.mkdirs();
+ } else {
+ if (!workspaceFile.isDirectory()) {
+ I18n msg = SearchI18n.locationForIndexesIsNotDirectory;
+ throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
+ }
+ if (!workspaceFile.canRead()) {
+ I18n msg = SearchI18n.locationForIndexesCannotBeRead;
+ throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
+ }
+ if (!workspaceFile.canWrite()) {
+ I18n msg = SearchI18n.locationForIndexesCannotBeWritten;
+ throw new SearchEngineException(msg.text(workspaceFile.getAbsolutePath(), workspaceName));
+ }
+ }
+ File directory = workspaceFile;
+ if (indexName != null) {
+ File indexFile = new File(workspaceFile, indexNameEncoder.encode(indexName));
+ if (!indexFile.exists()) {
+ indexFile.mkdirs();
+ } else {
+ if (!indexFile.isDirectory()) {
+ I18n msg = SearchI18n.locationForIndexesIsNotDirectory;
+ throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
+ }
+ if (!indexFile.canRead()) {
+ I18n msg = SearchI18n.locationForIndexesCannotBeRead;
+ throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
+ }
+ if (!indexFile.canWrite()) {
+ I18n msg = SearchI18n.locationForIndexesCannotBeWritten;
+ throw new SearchEngineException(msg.text(indexFile.getAbsolutePath(), workspaceName));
+ }
+ }
+ directory = indexFile;
+ }
+ try {
+ return create(directory, lockFactory);
+ } catch (IOException e) {
+ throw new SearchEngineException(e);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.LuceneConfigurations.PoolingDirectoryFactory#doDestroy(org.apache.lucene.store.Directory)
+ */
+ @Override
+ protected boolean doDestroy( FSDirectory directory ) throws SearchEngineException {
+ File file = directory.getFile();
+ if (file.exists()) {
+ return FileUtil.delete(file);
+ }
+ return false;
+ }
+
+ /**
+ * Override this method to define which subclass of {@link FSDirectory} should be created.
+ *
+ * @param directory the file system directory; never null
+ * @param lockFactory the lock factory; may be null
+ * @return the {@link FSDirectory} instance
+ * @throws IOException if there is a problem creating the FSDirectory instance
+ */
+ protected FSDirectory create( File directory,
+ LockFactory lockFactory ) throws IOException {
+ return FSDirectory.open(directory, lockFactory);
+ }
+ }
+
+ @Immutable
+ protected static final class IndexId {
+ private final String workspaceName;
+ private final String indexName;
+ private final int hc;
+
+ protected IndexId( String workspaceName,
+ String indexName ) {
+ assert workspaceName != null;
+ this.workspaceName = workspaceName;
+ this.indexName = indexName;
+ this.hc = HashCode.compute(this.workspaceName, this.indexName);
+ }
+
+ /**
+ * @return indexName
+ */
+ public String getIndexName() {
+ return indexName;
+ }
+
+ /**
+ * @return workspaceName
+ */
+ public String getWorkspaceName() {
+ return workspaceName;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#hashCode()
+ */
+ @Override
+ public int hashCode() {
+ return hc;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#equals(java.lang.Object)
+ */
+ @Override
+ public boolean equals( Object obj ) {
+ if (obj == this) return true;
+ if (obj instanceof IndexId) {
+ IndexId that = (IndexId)obj;
+ if (this.hashCode() != that.hashCode()) return false;
+ if (!this.workspaceName.equals(that.workspaceName)) return false;
+ if (!this.indexName.equals(that.indexName)) return false;
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return indexName != null ? workspaceName + "/" + this.indexName : this.workspaceName;
+ }
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneConfigurations.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Copied: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneException.java (from rev 1328, trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngineException.java)
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneException.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneException.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -0,0 +1,74 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+
+package org.jboss.dna.search;
+
+/**
+ * A {@link RuntimeException runtime exception} representing a problem operating against Lucene.
+ */
+public class LuceneException extends RuntimeException {
+
+ /**
+ */
+ private static final long serialVersionUID = 8281373010920861138L;
+
+ /**
+ * Construct a system failure exception with no message.
+ */
+ public LuceneException() {
+ }
+
+ /**
+ * Construct a system failure exception with a single message.
+ *
+ * @param message the message describing the failure
+ */
+ public LuceneException( String message ) {
+ super(message);
+
+ }
+
+ /**
+ * Construct a system failure exception with another exception that is the cause of the failure.
+ *
+ * @param cause the original cause of the failure
+ */
+ public LuceneException( Throwable cause ) {
+ super(cause);
+
+ }
+
+ /**
+ * Construct a system failure exception with a single message and another exception that is the cause of the failure.
+ *
+ * @param message the message describing the failure
+ * @param cause the original cause of the failure
+ */
+ public LuceneException( String message,
+ Throwable cause ) {
+ super(message, cause);
+
+ }
+
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneException.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -0,0 +1,609 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.jboss.dna.common.i18n.I18n;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.property.Binary;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.PropertyType;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.model.And;
+import org.jboss.dna.graph.query.model.Between;
+import org.jboss.dna.graph.query.model.BindVariableName;
+import org.jboss.dna.graph.query.model.ChildNode;
+import org.jboss.dna.graph.query.model.Comparison;
+import org.jboss.dna.graph.query.model.Constraint;
+import org.jboss.dna.graph.query.model.DescendantNode;
+import org.jboss.dna.graph.query.model.DynamicOperand;
+import org.jboss.dna.graph.query.model.FullTextSearch;
+import org.jboss.dna.graph.query.model.FullTextSearchScore;
+import org.jboss.dna.graph.query.model.Length;
+import org.jboss.dna.graph.query.model.Literal;
+import org.jboss.dna.graph.query.model.LowerCase;
+import org.jboss.dna.graph.query.model.NodeDepth;
+import org.jboss.dna.graph.query.model.NodeLocalName;
+import org.jboss.dna.graph.query.model.NodeName;
+import org.jboss.dna.graph.query.model.NodePath;
+import org.jboss.dna.graph.query.model.Not;
+import org.jboss.dna.graph.query.model.Operator;
+import org.jboss.dna.graph.query.model.Or;
+import org.jboss.dna.graph.query.model.PropertyExistence;
+import org.jboss.dna.graph.query.model.PropertyValue;
+import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.model.SameNode;
+import org.jboss.dna.graph.query.model.SelectorName;
+import org.jboss.dna.graph.query.model.SetCriteria;
+import org.jboss.dna.graph.query.model.StaticOperand;
+import org.jboss.dna.graph.query.model.UpperCase;
+import org.jboss.dna.graph.query.model.Visitors;
+import org.jboss.dna.graph.query.model.FullTextSearch.NegationTerm;
+import org.jboss.dna.graph.query.plan.PlanNode;
+import org.jboss.dna.graph.query.process.AbstractAccessComponent;
+import org.jboss.dna.graph.query.process.ProcessingComponent;
+import org.jboss.dna.graph.query.process.SelectComponent;
+import org.jboss.dna.graph.query.process.SelectComponent.Analyzer;
+import org.jboss.dna.search.DualIndexSearchProvider.ContentIndex;
+
+/**
+ *
+ */
+/**
+ * The {@link ProcessingComponent} implementation that executes a single atomic access query against the Lucene indexes.
+ */
+public class LuceneQueryComponent extends AbstractAccessComponent {
+ private final QueryCommand originalQuery;
+ private final LuceneSession session;
+ private final String sourceName;
+ private final String workspaceName;
+
+ protected LuceneQueryComponent( LuceneSession session,
+ QueryCommand originalQuery,
+ QueryContext context,
+ Columns columns,
+ PlanNode accessNode,
+ Analyzer analyzer,
+ String sourceName,
+ String workspaceName ) {
+ super(context, columns, accessNode);
+ this.originalQuery = originalQuery;
+ this.session = session;
+ this.sourceName = sourceName;
+ this.workspaceName = workspaceName;
+ }
+
+ protected String fieldNameFor( Name name ) {
+ return session.stringFactory.create(name);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.process.ProcessingComponent#execute()
+ */
+ @Override
+ public List<Object[]> execute() {
+
+ // Some kinds of constraints are not easily pushed down to Lucene as are of a Lucene Query, and
+ // instead are applied by filtering the results. For example, a FullTextSearchScore applies
+ // to the score of the tuple, which cannot be (easily?) applied as a Query.
+ //
+ // Therefore, each of the AND-ed constraints of the query are evaluated separately. After all,
+ // each of the tuples returned by the planned query must satisfy all of the AND-ed constraints.
+ // Or, to put it another way, if a tuple does not satisfy one of the AND-ed constraints, the
+ // tuple should not be included in the query results.
+ //
+ // Logically, any AND-ed criteria that cannot be pushed down to Lucene can of course be applied
+ // as a filter on the results. Thus, each AND-ed constraint is processed to first determine if
+ // it can be represented as a Lucene query; all other AND-ed constraints must be handled as
+ // a results filter. Since most queries will likely use one or more simple constraints AND-ed
+ // together, this approach will likely work very well.
+ //
+ // The only hairy case is when any AND-ed constraint is actually an OR-ed combination of multiple
+ // constraints of which at least one cannot be pushed down to Lucene. In this case, the entire
+ // AND-ed constraint must be treated as a results filter (even if many of those constraints that
+ // make up the OR-ed constraint can be pushed down). Hopefully, this will not be a common case
+ // in actual queries.
+
+ // For each of the AND-ed constraints ...
+ Query pushDownQuery = null;
+ Constraint postProcessConstraint = null;
+ try {
+ for (Constraint andedConstraint : this.andedConstraints) {
+ // Determine if it can be represented as a Lucene query ...
+ Query constraintQuery = createQuery(andedConstraint);
+ if (constraintQuery != null) {
+ // The AND-ed constraint _can_ be represented as a push-down Lucene query ...
+ if (pushDownQuery == null) {
+ // This must be the first query ...
+ pushDownQuery = constraintQuery;
+ } else if (pushDownQuery instanceof BooleanQuery) {
+ // We have to add the constraint query to the existing boolean ...
+ BooleanQuery booleanQuery = (BooleanQuery)pushDownQuery;
+ booleanQuery.add(constraintQuery, Occur.MUST);
+ } else {
+ // This is the second push-down query, so create a BooleanQuery ...
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(pushDownQuery, Occur.MUST);
+ booleanQuery.add(constraintQuery, Occur.MUST);
+ pushDownQuery = booleanQuery;
+ }
+ } else {
+ // The AND-ed constraint _cannot_ be represented as a push-down Lucene query ...
+ if (postProcessConstraint == null) {
+ postProcessConstraint = andedConstraint;
+ } else {
+ postProcessConstraint = new And(postProcessConstraint, andedConstraint);
+ }
+ }
+ }
+ } catch (IOException e) {
+ // There was a error working with the constraints (such as a ValueFormatException) ...
+ QueryContext context = getContext();
+ I18n msg = SearchI18n.errorWhilePerformingQuery;
+ String origQueryString = Visitors.readable(originalQuery, context.getExecutionContext());
+ context.getProblems().addError(e, msg, origQueryString, workspaceName, sourceName, e.getMessage());
+ return emptyTuples();
+ } catch (RuntimeException e) {
+ // There was a error working with the constraints (such as a ValueFormatException) ...
+ QueryContext context = getContext();
+ I18n msg = SearchI18n.errorWhilePerformingQuery;
+ String origQueryString = Visitors.readable(originalQuery, context.getExecutionContext());
+ context.getProblems().addError(e, msg, origQueryString, workspaceName, sourceName, e.getMessage());
+ return emptyTuples();
+ }
+
+ if (pushDownQuery == null) {
+ // There are no constraints that can be pushed down, so return _all_ the nodes ...
+ pushDownQuery = new MatchAllDocsQuery();
+ }
+
+ // Get the results from Lucene ...
+ List<Object[]> tuples = null;
+ final Columns columns = getColumns();
+ final QueryContext context = getContext();
+ final ExecutionContext execContext = context.getExecutionContext();
+ try {
+ // Execute the query against the content indexes ...
+ IndexSearcher searcher = session.getContentSearcher();
+ TupleCollector collector = new TupleCollector(columns, execContext.getValueFactories().getUuidFactory());
+ searcher.search(pushDownQuery, collector);
+ tuples = collector.getTuples();
+ } catch (IOException e) {
+ // There was a problem executing the Lucene query ...
+ I18n msg = SearchI18n.errorWhilePerformingLuceneQuery;
+ String origQueryString = Visitors.readable(originalQuery, execContext);
+ context.getProblems().addError(e, msg, pushDownQuery, origQueryString, workspaceName, sourceName, e.getMessage());
+ return emptyTuples();
+ }
+
+ if (postProcessConstraint != null && !tuples.isEmpty()) {
+ // Create a delegate processing component that will return the tuples we've already found ...
+ final List<Object[]> allTuples = tuples;
+ ProcessingComponent tuplesProcessor = new ProcessingComponent(context, columns) {
+ @Override
+ public List<Object[]> execute() {
+ return allTuples;
+ }
+ };
+ // Create a processing component that will apply these constraints to the tuples we already found ...
+ return new SelectComponent(tuplesProcessor, postProcessConstraint, context.getVariables()).execute();
+ }
+ return tuples;
+ }
+
+ protected Query createQuery( Constraint constraint ) throws IOException {
+ if (constraint instanceof And) {
+ And and = (And)constraint;
+ Query leftQuery = createQuery(and.getLeft());
+ Query rightQuery = createQuery(and.getRight());
+ if (leftQuery == null || rightQuery == null) return null;
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(createQuery(and.getLeft()), Occur.MUST);
+ booleanQuery.add(createQuery(and.getRight()), Occur.MUST);
+ return booleanQuery;
+ }
+ if (constraint instanceof Or) {
+ Or or = (Or)constraint;
+ Query leftQuery = createQuery(or.getLeft());
+ Query rightQuery = createQuery(or.getRight());
+ if (leftQuery == null) {
+ return rightQuery != null ? rightQuery : null;
+ } else if (rightQuery == null) {
+ return leftQuery;
+ }
+ BooleanQuery booleanQuery = new BooleanQuery();
+ booleanQuery.add(createQuery(or.getLeft()), Occur.SHOULD);
+ booleanQuery.add(createQuery(or.getRight()), Occur.SHOULD);
+ return booleanQuery;
+ }
+ if (constraint instanceof Not) {
+ Not not = (Not)constraint;
+ Query notted = createQuery(not.getConstraint());
+ if (notted == null) return new MatchAllDocsQuery();
+ }
+ if (constraint instanceof SetCriteria) {
+ SetCriteria setCriteria = (SetCriteria)constraint;
+ DynamicOperand left = setCriteria.getLeftOperand();
+ int numRightOperands = setCriteria.getRightOperands().size();
+ assert numRightOperands > 0;
+ if (numRightOperands == 1) {
+ return createQuery(left, Operator.EQUAL_TO, setCriteria.getRightOperands().iterator().next());
+ }
+ BooleanQuery setQuery = new BooleanQuery();
+ for (StaticOperand right : setCriteria.getRightOperands()) {
+ Query rightQuery = createQuery(left, Operator.EQUAL_TO, right);
+ if (rightQuery == null) return null;
+ setQuery.add(rightQuery, Occur.SHOULD);
+ }
+ return setQuery;
+ }
+ if (constraint instanceof PropertyExistence) {
+ PropertyExistence existence = (PropertyExistence)constraint;
+ return createQuery(existence.getSelectorName(), existence.getPropertyName());
+ }
+ if (constraint instanceof Between) {
+ Between between = (Between)constraint;
+ return createQuery(between);
+ }
+ if (constraint instanceof Comparison) {
+ Comparison comparison = (Comparison)constraint;
+ return createQuery(comparison.getOperand1(), comparison.getOperator(), comparison.getOperand2());
+ }
+ if (constraint instanceof FullTextSearch) {
+ FullTextSearch search = (FullTextSearch)constraint;
+ String fieldName = ContentIndex.FULL_TEXT;
+ Name propertyName = search.getPropertyName();
+ if (propertyName != null) {
+ fieldName = session.fullTextFieldName(fieldNameFor(propertyName));
+ }
+ return createQuery(fieldName, search.getTerm());
+ }
+ try {
+ if (constraint instanceof SameNode) {
+ SameNode sameNode = (SameNode)constraint;
+ return session.findNodeAt(sameNode.getPath());
+ }
+ if (constraint instanceof ChildNode) {
+ ChildNode childNode = (ChildNode)constraint;
+ return session.findChildNodes(childNode.getParentPath());
+ }
+ if (constraint instanceof DescendantNode) {
+ DescendantNode descendantNode = (DescendantNode)constraint;
+ return session.findAllNodesBelow(descendantNode.getAncestorPath());
+ }
+ } catch (IOException e) {
+ I18n msg = SearchI18n.errorWhilePerformingQuery;
+ getContext().getProblems().addError(e,
+ msg,
+ Visitors.readable(originalQuery),
+ workspaceName,
+ sourceName,
+ e.getMessage());
+ return null;
+ }
+ // Should not get here ...
+ assert false;
+ return null;
+ }
+
+ protected Query createQuery( DynamicOperand left,
+ Operator operator,
+ StaticOperand right ) throws IOException {
+ return createQuery(left, operator, right, true);
+ }
+
+ protected Query createQuery( DynamicOperand left,
+ Operator operator,
+ StaticOperand right,
+ boolean caseSensitive ) throws IOException {
+ // Handle the static operand ...
+ Object value = createOperand(right, caseSensitive);
+ assert value != null;
+
+ // Address the dynamic operand ...
+ if (left instanceof FullTextSearchScore) {
+ // This can only be represented as a filter ...
+ return null;
+ } else if (left instanceof PropertyValue) {
+ return session.findNodesWith((PropertyValue)left, operator, value, caseSensitive);
+ } else if (left instanceof Length) {
+ return session.findNodesWith((Length)left, operator, right);
+ } else if (left instanceof LowerCase) {
+ LowerCase lowercase = (LowerCase)left;
+ return createQuery(lowercase.getOperand(), operator, right, false);
+ } else if (left instanceof UpperCase) {
+ UpperCase lowercase = (UpperCase)left;
+ return createQuery(lowercase.getOperand(), operator, right, false);
+ } else if (left instanceof NodeDepth) {
+ assert operator != Operator.LIKE;
+ // Could be represented as a result filter, but let's do this now ...
+ return session.findNodesWith((NodeDepth)left, operator, value);
+ } else if (left instanceof NodePath) {
+ return session.findNodesWith((NodePath)left, operator, value, caseSensitive);
+ } else if (left instanceof NodeName) {
+ return session.findNodesWith((NodeName)left, operator, value, caseSensitive);
+ } else if (left instanceof NodeLocalName) {
+ return session.findNodesWith((NodeLocalName)left, operator, value, caseSensitive);
+ } else {
+ assert false;
+ return null;
+ }
+ }
+
+ protected Object createOperand( StaticOperand operand,
+ boolean caseSensitive ) {
+ Object value = null;
+ if (operand instanceof Literal) {
+ Literal literal = (Literal)operand;
+ value = literal.getValue();
+ if (!caseSensitive) value = lowerCase(value);
+ } else if (operand instanceof BindVariableName) {
+ BindVariableName variable = (BindVariableName)operand;
+ String variableName = variable.getVariableName();
+ value = getContext().getVariables().get(variableName);
+ if (!caseSensitive) value = lowerCase(value);
+ } else {
+ assert false;
+ }
+ return value;
+ }
+
+ protected Query createQuery( DynamicOperand left,
+ StaticOperand lower,
+ StaticOperand upper,
+ boolean includesLower,
+ boolean includesUpper,
+ boolean caseSensitive ) throws IOException {
+ // Handle the static operands ...
+ Object lowerValue = createOperand(lower, caseSensitive);
+ Object upperValue = createOperand(upper, caseSensitive);
+ assert lowerValue != null;
+ assert upperValue != null;
+
+ // Only in the case of a PropertyValue and Depth will we need to do something special ...
+ if (left instanceof NodeDepth) {
+ return session.findNodesWithNumericRange((NodeDepth)left, lowerValue, upperValue, includesLower, includesUpper);
+ } else if (left instanceof PropertyValue) {
+ PropertyType lowerType = PropertyType.discoverType(lowerValue);
+ PropertyType upperType = PropertyType.discoverType(upperValue);
+ if (upperType == lowerType) {
+ switch (upperType) {
+ case DATE:
+ case LONG:
+ case DOUBLE:
+ case DECIMAL:
+ return session.findNodesWithNumericRange((PropertyValue)left,
+ lowerValue,
+ upperValue,
+ includesLower,
+ includesUpper);
+ default:
+ // continue on and handle as boolean query ...
+ }
+ }
+ }
+
+ // Otherwise, just create a boolean query ...
+ BooleanQuery query = new BooleanQuery();
+ Operator lowerOp = includesLower ? Operator.GREATER_THAN_OR_EQUAL_TO : Operator.GREATER_THAN;
+ Operator upperOp = includesUpper ? Operator.LESS_THAN_OR_EQUAL_TO : Operator.LESS_THAN;
+ Query lowerQuery = createQuery(left, lowerOp, lower, caseSensitive);
+ Query upperQuery = createQuery(left, upperOp, upper, caseSensitive);
+ if (lowerQuery == null || upperQuery == null) return null;
+ query.add(lowerQuery, Occur.MUST);
+ query.add(upperQuery, Occur.MUST);
+ return query;
+ }
+
+ protected Object lowerCase( Object value ) {
+ if (value instanceof String) {
+ return ((String)value).toLowerCase();
+ }
+ assert !(value instanceof Binary);
+ ValueFactory<String> stringFactory = getContext().getExecutionContext().getValueFactories().getStringFactory();
+ ValueFactory<?> valueFactory = getContext().getExecutionContext().getValueFactories().getValueFactory(value);
+ return valueFactory.create(stringFactory.create(value).toLowerCase());
+ }
+
+ protected Query createQuery( SelectorName selectorName,
+ Name propertyName ) {
+ Term term = new Term(fieldNameFor(propertyName));
+ return new TermQuery(term);
+ }
+
+ protected Query createQuery( String fieldName,
+ FullTextSearch.Term term ) {
+ if (term instanceof FullTextSearch.Conjunction) {
+ FullTextSearch.Conjunction conjunction = (FullTextSearch.Conjunction)term;
+ BooleanQuery query = new BooleanQuery();
+ for (FullTextSearch.Term nested : conjunction) {
+ if (nested instanceof NegationTerm) {
+ query.add(createQuery(fieldName, ((NegationTerm)nested).getNegatedTerm()), Occur.MUST_NOT);
+ } else {
+ query.add(createQuery(fieldName, nested), Occur.MUST);
+ }
+ }
+ return query;
+ }
+ if (term instanceof FullTextSearch.Disjunction) {
+ FullTextSearch.Disjunction disjunction = (FullTextSearch.Disjunction)term;
+ BooleanQuery query = new BooleanQuery();
+ for (FullTextSearch.Term nested : disjunction) {
+ if (nested instanceof NegationTerm) {
+ query.add(createQuery(fieldName, ((NegationTerm)nested).getNegatedTerm()), Occur.MUST_NOT);
+ } else {
+ query.add(createQuery(fieldName, nested), Occur.SHOULD);
+ }
+ }
+ return query;
+ }
+ if (term instanceof FullTextSearch.SimpleTerm) {
+ FullTextSearch.SimpleTerm simple = (FullTextSearch.SimpleTerm)term;
+ if (simple.isQuotingRequired()) {
+ PhraseQuery query = new PhraseQuery();
+ query.setSlop(0); // terms must be adjacent
+ for (String value : simple.getValues()) {
+ query.add(new Term(fieldName, value));
+ }
+ return query;
+ }
+ return new TermQuery(new Term(fieldName, simple.getValue()));
+ }
+ // Should not get here ...
+ assert false;
+ return null;
+ }
+
+ /**
+ * This collector is responsible for loading the value for each of the columns into each tuple array.
+ */
+ protected static class TupleCollector extends Collector {
+ private final LinkedList<Object[]> tuples = new LinkedList<Object[]>();
+ private final Columns columns;
+ private final int numValues;
+ private final boolean recordScore;
+ private final int scoreIndex;
+ private final FieldSelector fieldSelector;
+ private final int locationIndex;
+ private final ValueFactory<UUID> uuidFactory;
+ private Scorer scorer;
+ private IndexReader currentReader;
+ private int docOffset;
+
+ protected TupleCollector( Columns columns,
+ ValueFactory<UUID> uuidFactory ) {
+ this.columns = columns;
+ this.uuidFactory = uuidFactory;
+ assert this.columns != null;
+ assert this.uuidFactory != null;
+ this.numValues = this.columns.getTupleSize();
+ assert this.numValues >= 0;
+ assert this.columns.getSelectorNames().size() == 1;
+ final String selectorName = this.columns.getSelectorNames().get(0);
+ this.locationIndex = this.columns.getLocationIndex(selectorName);
+ this.recordScore = this.columns.hasFullTextSearchScores();
+ this.scoreIndex = this.recordScore ? this.columns.getFullTextSearchScoreIndexFor(selectorName) : -1;
+ final Set<String> columnNames = new HashSet<String>(this.columns.getColumnNames());
+ columnNames.add(ContentIndex.UUID); // add the UUID, which we'll put into the Location ...
+ this.fieldSelector = new FieldSelector() {
+ private static final long serialVersionUID = 1L;
+
+ public FieldSelectorResult accept( String fieldName ) {
+ return columnNames.contains(fieldName) ? FieldSelectorResult.LOAD : FieldSelectorResult.NO_LOAD;
+ }
+ };
+ }
+
+ /**
+ * @return tuples
+ */
+ public LinkedList<Object[]> getTuples() {
+ return tuples;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#acceptsDocsOutOfOrder()
+ */
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setNextReader(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ public void setNextReader( IndexReader reader,
+ int docBase ) {
+ this.currentReader = reader;
+ this.docOffset = docBase;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#setScorer(org.apache.lucene.search.Scorer)
+ */
+ @Override
+ public void setScorer( Scorer scorer ) {
+ this.scorer = scorer;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Collector#collect(int)
+ */
+ @Override
+ public void collect( int doc ) throws IOException {
+ int docId = doc + docOffset;
+ Object[] tuple = new Object[numValues];
+ Document document = currentReader.document(docId, fieldSelector);
+ for (String columnName : columns.getColumnNames()) {
+ int index = columns.getColumnIndexForName(columnName);
+ // We just need to retrieve the first value if there is more than one ...
+ tuple[index] = document.get(columnName);
+ }
+
+ // Set the score column if required ...
+ if (recordScore) {
+ assert scorer != null;
+ tuple[scoreIndex] = scorer.score();
+ }
+
+ // Load the UUID into a Location object ...
+ UUID uuid = uuidFactory.create(document.get(ContentIndex.UUID));
+ tuple[locationIndex] = Location.create(uuid);
+ tuples.add(tuple);
+ }
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneQueryComponent.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneSession.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneSession.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneSession.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -0,0 +1,368 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.Set;
+import java.util.UUID;
+import net.jcip.annotations.NotThreadSafe;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.jboss.dna.common.i18n.I18n;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.property.DateTimeFactory;
+import org.jboss.dna.graph.property.Path;
+import org.jboss.dna.graph.property.PathFactory;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.QueryEngine;
+import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.model.Length;
+import org.jboss.dna.graph.query.model.NodeDepth;
+import org.jboss.dna.graph.query.model.NodeLocalName;
+import org.jboss.dna.graph.query.model.NodeName;
+import org.jboss.dna.graph.query.model.NodePath;
+import org.jboss.dna.graph.query.model.Operator;
+import org.jboss.dna.graph.query.model.PropertyValue;
+import org.jboss.dna.graph.query.model.QueryCommand;
+import org.jboss.dna.graph.query.model.Visitors;
+import org.jboss.dna.graph.query.optimize.Optimizer;
+import org.jboss.dna.graph.query.optimize.OptimizerRule;
+import org.jboss.dna.graph.query.optimize.RuleBasedOptimizer;
+import org.jboss.dna.graph.query.plan.CanonicalPlanner;
+import org.jboss.dna.graph.query.plan.PlanHints;
+import org.jboss.dna.graph.query.plan.PlanNode;
+import org.jboss.dna.graph.query.plan.Planner;
+import org.jboss.dna.graph.query.process.ProcessingComponent;
+import org.jboss.dna.graph.query.process.QueryProcessor;
+import org.jboss.dna.graph.search.SearchProvider;
+import org.jboss.dna.search.DualIndexSearchProvider.PathIndex;
+
+/**
+ *
+ */
+@NotThreadSafe
+public abstract class LuceneSession implements SearchProvider.Session {
+ protected final ExecutionContext context;
+ protected final String sourceName;
+ protected final String workspaceName;
+ protected final IndexRules rules;
+ protected final Analyzer analyzer;
+ protected final boolean overwrite;
+ protected final boolean readOnly;
+ protected final ValueFactory<String> stringFactory;
+ protected final DateTimeFactory dateFactory;
+ protected final PathFactory pathFactory;
+ private int changeCount;
+ private QueryEngine queryEngine;
+
+ protected LuceneSession( ExecutionContext context,
+ String sourceName,
+ String workspaceName,
+ IndexRules rules,
+ Analyzer analyzer,
+ boolean overwrite,
+ boolean readOnly ) {
+ this.context = context;
+ this.sourceName = sourceName;
+ this.workspaceName = workspaceName;
+ this.rules = rules;
+ this.overwrite = overwrite;
+ this.readOnly = readOnly;
+ this.analyzer = analyzer;
+ this.stringFactory = context.getValueFactories().getStringFactory();
+ this.dateFactory = context.getValueFactories().getDateFactory();
+ this.pathFactory = context.getValueFactories().getPathFactory();
+ assert this.context != null;
+ assert this.sourceName != null;
+ assert this.workspaceName != null;
+ assert this.rules != null;
+ assert this.analyzer != null;
+ assert this.stringFactory != null;
+ assert this.dateFactory != null;
+ }
+
+ /**
+ * Create the field name that will be used to store the full-text searchable property values.
+ *
+ * @param propertyName the name of the property; may not be null
+ * @return the field name for the full-text searchable property values; never null
+ */
+ protected abstract String fullTextFieldName( String propertyName );
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#getContext()
+ */
+ public final ExecutionContext getContext() {
+ return context;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#getSourceName()
+ */
+ public final String getSourceName() {
+ return sourceName;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#getWorkspaceName()
+ */
+ public String getWorkspaceName() {
+ return workspaceName;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.search.SearchProvider.Session#hasChanges()
+ */
+ public boolean hasChanges() {
+ return changeCount > 0;
+ }
+
+ /**
+ * Get the Lucene index searcher that should be used to execute queries.
+ *
+ * @return the searcher; never null
+ * @throws IOException if there is an error obtaining the index searcher
+ */
+ public abstract IndexSearcher getContentSearcher() throws IOException;
+
+ /**
+ * Get the query engine for this session.
+ *
+ * @return the query engine; never null
+ */
+ protected QueryEngine queryEngine() {
+ if (queryEngine == null) {
+ // Create the query engine ...
+ Planner planner = new CanonicalPlanner();
+ Optimizer optimizer = new RuleBasedOptimizer() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.optimize.RuleBasedOptimizer#populateRuleStack(java.util.LinkedList,
+ * org.jboss.dna.graph.query.plan.PlanHints)
+ */
+ @Override
+ protected void populateRuleStack( LinkedList<OptimizerRule> ruleStack,
+ PlanHints hints ) {
+ super.populateRuleStack(ruleStack, hints);
+ // Add any custom rules here, either at the front of the stack or at the end
+ }
+ };
+ QueryProcessor processor = new QueryProcessor() {
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.process.QueryProcessor#createAccessComponent(org.jboss.dna.graph.query.model.QueryCommand,
+ * org.jboss.dna.graph.query.QueryContext, org.jboss.dna.graph.query.plan.PlanNode,
+ * org.jboss.dna.graph.query.QueryResults.Columns,
+ * org.jboss.dna.graph.query.process.SelectComponent.Analyzer)
+ */
+ @Override
+ protected ProcessingComponent createAccessComponent( QueryCommand originalQuery,
+ QueryContext context,
+ PlanNode accessNode,
+ Columns resultColumns,
+ org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer ) {
+ try {
+ return LuceneSession.this.createAccessComponent(originalQuery,
+ context,
+ accessNode,
+ resultColumns,
+ analyzer);
+ } catch (IOException e) {
+ I18n msg = SearchI18n.errorWhilePerformingQuery;
+ context.getProblems().addError(e,
+ msg,
+ Visitors.readable(originalQuery),
+ getWorkspaceName(),
+ getSourceName(),
+ e.getMessage());
+ return null;
+ }
+ }
+ };
+
+ queryEngine = new QueryEngine(planner, optimizer, processor);
+ }
+ return queryEngine;
+ }
+
+ protected abstract ProcessingComponent createAccessComponent( QueryCommand originalQuery,
+ QueryContext context,
+ PlanNode accessNode,
+ Columns resultColumns,
+ org.jboss.dna.graph.query.process.SelectComponent.Analyzer analyzer )
+ throws IOException;
+
+ /**
+ * Utility method to create a query to find all of the documents representing nodes with the supplied UUIDs.
+ *
+ * @param uuids the UUIDs of the nodes that are to be found; may not be null
+ * @return the query; never null
+ * @throws IOException if there is a problem creating this query
+ */
+ public abstract Query findAllNodesWithUuids( Set<UUID> uuids ) throws IOException;
+
+ public abstract Query findAllNodesBelow( Path ancestorPath ) throws IOException;
+
+ /**
+ * Return a query that can be used to find all of the documents that represent nodes that are children of the node at the
+ * supplied path.
+ *
+ * @param parentPath the path of the parent node.
+ * @return the query; never null
+ * @throws IOException if there is an error creating the query
+ */
+ public abstract Query findChildNodes( Path parentPath ) throws IOException;
+
+ /**
+ * Create a query that can be used to find the one document (or node) that exists at the exact path supplied. This method
+ * first queries the {@link PathIndex path index} to find the UUID of the node at the supplied path, and then returns a query
+ * that matches the UUID.
+ *
+ * @param path the path of the node
+ * @return the query; never null
+ * @throws IOException if there is an error creating the query
+ */
+ public abstract Query findNodeAt( Path path ) throws IOException;
+
+ /**
+ * Create a query that can be used to find documents (or nodes) that have a field value that satisfies the supplied LIKE
+ * expression.
+ *
+ * @param fieldName the name of the document field to search
+ * @param likeExpression the JCR like expression
+ * @return the query; never null
+ * @throws IOException if there is an error creating the query
+ */
+ public abstract Query findNodesLike( String fieldName,
+ String likeExpression ) throws IOException;
+
+ public abstract Query findNodesWith( Length propertyLength,
+ Operator operator,
+ Object value ) throws IOException;
+
+ public abstract Query findNodesWith( PropertyValue propertyValue,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException;
+
+ public abstract Query findNodesWithNumericRange( PropertyValue propertyValue,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) throws IOException;
+
+ public abstract Query findNodesWithNumericRange( NodeDepth depth,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) throws IOException;
+
+ // public abstract Query findNodesWithNumericRange( String field,
+ // Object lowerValue,
+ // Object upperValue,
+ // boolean includesLower,
+ // boolean includesUpper ) throws IOException;
+
+ public abstract Query findNodesWith( NodePath nodePath,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException;
+
+ public abstract Query findNodesWith( NodeName nodeName,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException;
+
+ public abstract Query findNodesWith( NodeLocalName nodeName,
+ Operator operator,
+ Object value,
+ boolean caseSensitive ) throws IOException;
+
+ public abstract Query findNodesWith( NodeDepth depthConstraint,
+ Operator operator,
+ Object value ) throws IOException;
+
+ // public abstract Query createLocalNameQuery( String likeExpression ) throws IOException;
+
+ // public abstract Query createSnsIndexQuery( String likeExpression ) throws IOException;
+
+ /**
+ * Convert the JCR like expression to a Lucene wildcard expression. The JCR like expression uses '%' to match 0 or more
+ * characters, '_' to match any single character, '\x' to match the 'x' character, and all other characters to match
+ * themselves.
+ *
+ * @param likeExpression the like expression; may not be null
+ * @return the expression that can be used with a WildcardQuery; never null
+ */
+ public String toWildcardExpression( String likeExpression ) {
+ assert likeExpression != null;
+ assert likeExpression.length() > 0;
+ return likeExpression.replace('%', '*').replace('_', '?').replaceAll("\\\\(.)", "$1");
+ }
+
+ /**
+ * Convert the JCR like expression to a regular expression. The JCR like expression uses '%' to match 0 or more characters,
+ * '_' to match any single character, '\x' to match the 'x' character, and all other characters to match themselves. Note that
+ * if any regex metacharacters appear in the like expression, they will be escaped within the resulting regular expression.
+ *
+ * @param likeExpression the like expression; may not be null
+ * @return the expression that can be used with a WildcardQuery; never null
+ */
+ public String toRegularExpression( String likeExpression ) {
+ assert likeExpression != null;
+ assert likeExpression.length() > 0;
+ // Replace all '\x' with 'x' ...
+ String result = likeExpression.replaceAll("\\\\(.)", "$1");
+ // Escape characters used as metacharacters in regular expressions, including
+ // '[', '^', '\', '$', '.', '|', '?', '*', '+', '(', and ')'
+ result = result.replaceAll("([[^\\\\$.|?*+()])", "\\$1");
+ // Replace '%'->'[.]+' and '_'->'[.]
+ result = likeExpression.replace("%", "[.]+").replace("_", "[.]");
+ return result;
+ }
+
+ public String pathAsString( Path path,
+ ValueFactory<String> stringFactory ) {
+ assert path != null;
+ if (path.isRoot()) return "/";
+ String pathStr = stringFactory.create(path);
+ if (!pathStr.endsWith("]")) {
+ pathStr = pathStr + '[' + Path.DEFAULT_INDEX + ']';
+ }
+ return pathStr;
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/LuceneSession.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngine.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -1,810 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import net.jcip.annotations.GuardedBy;
-import net.jcip.annotations.ThreadSafe;
-import org.apache.lucene.queryParser.ParseException;
-import org.jboss.dna.common.i18n.I18n;
-import org.jboss.dna.common.util.CheckArg;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.Graph;
-import org.jboss.dna.graph.GraphI18n;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Subgraph;
-import org.jboss.dna.graph.SubgraphNode;
-import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
-import org.jboss.dna.graph.connector.RepositorySource;
-import org.jboss.dna.graph.connector.RepositorySourceException;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.QueryResults;
-import org.jboss.dna.graph.query.model.QueryCommand;
-import org.jboss.dna.graph.query.validate.Schemata;
-import org.jboss.dna.graph.request.ChangeRequest;
-import org.jboss.dna.graph.request.InvalidWorkspaceException;
-
-/**
- * A component that acts as a search engine for the content within a single {@link RepositorySource}. This engine manages a set of
- * indexes and provides search functionality for each of the workspaces within the source, and provides various methods to
- * (re)index the content contained with source's workspaces and keep the indexes up-to-date via changes.
- */
-@ThreadSafe
-public class SearchEngine {
-
- /**
- * The default maximum number of changes that can be made to an index before the indexes are automatically optimized is * * *
- * * {@value}
- */
- public static final int DEFAULT_MAX_CHANGES_BEFORE_AUTOMATIC_OPTIMIZATION = 0;
-
- protected final ExecutionContext context;
- private final String sourceName;
- private final RepositoryConnectionFactory connectionFactory;
- protected final IndexLayout indexLayout;
- private final int maxChangesBeforeAutomaticOptimization;
- @GuardedBy( "workspacesLock" )
- private final Map<String, Workspace> workspacesByName = new HashMap<String, Workspace>();
- private final ReadWriteLock workspacesLock = new ReentrantReadWriteLock();
-
- /**
- * Create a search engine instance given the supplied {@link ExecutionContext execution context}, name of the
- * {@link RepositorySource}, the {@link RepositoryConnectionFactory factory for RepositorySource connections}, and the
- * {@link DirectoryConfiguration directory factory} that defines where each workspace's indexes should be placed.
- *
- * @param context the execution context for indexing and optimization operations
- * @param sourceName the name of the {@link RepositorySource}
- * @param connectionFactory the connection factory
- * @param indexLayout the specification of the Lucene index layout
- * @param maxChangesBeforeAutomaticOptimization the number of changes that can be made to the index before the indexes are
- * automatically optimized; may be 0 or a negative number if no automatic optimization should be done
- * @throws IllegalArgumentException if any of the parameters (other than indexing strategy) are null
- */
- public SearchEngine( ExecutionContext context,
- String sourceName,
- RepositoryConnectionFactory connectionFactory,
- IndexLayout indexLayout,
- int maxChangesBeforeAutomaticOptimization ) {
- CheckArg.isNotNull(context, "context");
- CheckArg.isNotNull(sourceName, "sourceName");
- CheckArg.isNotNull(connectionFactory, "connectionFactory");
- this.sourceName = sourceName;
- this.connectionFactory = connectionFactory;
- this.indexLayout = indexLayout;
- this.context = context;
- this.maxChangesBeforeAutomaticOptimization = maxChangesBeforeAutomaticOptimization < 0 ? 0 : maxChangesBeforeAutomaticOptimization;
- }
-
- /**
- * Create a search engine instance given the supplied {@link ExecutionContext execution context}, name of the
- * {@link RepositorySource}, the {@link RepositoryConnectionFactory factory for RepositorySource connections}, and the
- * {@link DirectoryConfiguration directory factory} that defines where each workspace's indexes should be placed.
- *
- * @param context the execution context for indexing and optimization operations
- * @param sourceName the name of the {@link RepositorySource}
- * @param connectionFactory the connection factory
- * @param indexLayout the specification of the Lucene index layout
- * @throws IllegalArgumentException if any of the parameters (other than indexing strategy) are null
- */
- public SearchEngine( ExecutionContext context,
- String sourceName,
- RepositoryConnectionFactory connectionFactory,
- IndexLayout indexLayout ) {
- this(context, sourceName, connectionFactory, indexLayout, DEFAULT_MAX_CHANGES_BEFORE_AUTOMATIC_OPTIMIZATION);
- }
-
- /**
- * Get the name of the RepositorySource that this engine is to use.
- *
- * @return the source name; never null
- */
- public String getSourceName() {
- return sourceName;
- }
-
- /**
- * Get the context in which all indexing operations execute.
- *
- * @return the execution context; never null
- */
- public ExecutionContext getContext() {
- return context;
- }
-
- /**
- * @return maxChangesBeforeAutomaticOptimization
- */
- public int getMaxChangesBeforeAutomaticOptimization() {
- return maxChangesBeforeAutomaticOptimization;
- }
-
- /**
- * Utility to create a Graph for the source.
- *
- * @return the graph instance; never null
- */
- final Graph graph() {
- return Graph.create(sourceName, connectionFactory, context);
- }
-
- /**
- * Utility to obtain the root path.
- *
- * @return the root path; never null
- */
- final Path rootPath() {
- return context.getValueFactories().getPathFactory().createRootPath();
- }
-
- final String readable( Path path ) {
- return context.getValueFactories().getStringFactory().create(path);
- }
-
- /**
- * Index all of the content at or below the supplied path in the named workspace within the {@link #getSourceName() source}.
- * If the starting point is the root node, then this method will drop the existing index(es) and rebuild from the content in
- * the workspace and source.
- * <p>
- * This method operates synchronously and returns when the requested indexing is completed.
- * </p>
- *
- * @param workspaceName the name of the workspace
- * @param startingPoint the location that represents the content to be indexed; must have a path
- * @param depthPerRead the depth of each subgraph read operation
- * @throws IllegalArgumentException if the workspace name or location are null
- * @throws RepositorySourceException if there is a problem accessing the content
- * @throws SearchEngineException if there is a problem updating the indexes
- * @throws InvalidWorkspaceException if the workspace does not exist
- */
- public void index( String workspaceName,
- Location startingPoint,
- int depthPerRead ) throws RepositorySourceException, SearchEngineException {
- CheckArg.isNotNull(workspaceName, "workspaceName");
- CheckArg.isNotNull(startingPoint, "startingPoint");
- assert startingPoint.hasPath();
-
- Workspace workspace = getWorkspace(workspaceName);
- if (startingPoint.getPath().isRoot()) {
- // More efficient to just start over with a new index ...
- workspace.execute(true, addContent(startingPoint, depthPerRead));
- } else {
- // Have to first remove the content below the starting point, then add it again ...
- workspace.execute(false, removeContent(startingPoint), addContent(startingPoint, depthPerRead));
- }
- }
-
- /**
- * Index all of the content at or below the supplied path in the named workspace within the {@link #getSourceName() source}.
- * If the starting point is the root node, then this method will drop the existing index(es) and rebuild from the content in
- * the workspace and source.
- * <p>
- * This method operates synchronously and returns when the requested indexing is completed.
- * </p>
- *
- * @param workspaceName the name of the workspace
- * @param startingPoint the path that represents the content to be indexed
- * @param depthPerRead the depth of each subgraph read operation
- * @throws IllegalArgumentException if the workspace name or path are null
- * @throws RepositorySourceException if there is a problem accessing the content
- * @throws SearchEngineException if there is a problem updating the indexes
- * @throws InvalidWorkspaceException if the workspace does not exist
- */
- public void index( String workspaceName,
- Path startingPoint,
- int depthPerRead ) throws RepositorySourceException, SearchEngineException {
- CheckArg.isNotNull(workspaceName, "workspaceName");
- CheckArg.isNotNull(startingPoint, "startingPoint");
- index(workspaceName, Location.create(startingPoint), depthPerRead);
- }
-
- /**
- * Index all of the content in the named workspace within the {@link #getSourceName() source}. This method operates
- * synchronously and returns when the requested indexing is completed.
- *
- * @param workspaceName the name of the workspace
- * @param depthPerRead the depth of each subgraph read operation
- * @throws IllegalArgumentException if the workspace name is null
- * @throws RepositorySourceException if there is a problem accessing the content
- * @throws SearchEngineException if there is a problem updating the indexes
- * @throws InvalidWorkspaceException if the workspace does not exist
- */
- public void index( String workspaceName,
- int depthPerRead ) throws RepositorySourceException, SearchEngineException {
- CheckArg.isNotNull(workspaceName, "workspaceName");
- index(workspaceName, rootPath(), depthPerRead);
- }
-
- /**
- * Index (or re-index) all of the content in all of the workspaces within the source. This method operates synchronously and
- * returns when the requested indexing is completed.
- *
- * @param depthPerRead the depth of each subgraph read operation
- * @throws RepositorySourceException if there is a problem accessing the content
- * @throws SearchEngineException if there is a problem updating the indexes
- */
- public void index( int depthPerRead ) throws RepositorySourceException, SearchEngineException {
- Path rootPath = rootPath();
- for (String workspaceName : graph().getWorkspaces()) {
- index(workspaceName, rootPath, depthPerRead);
- }
- }
-
- /**
- * Update the indexes with the supplied set of changes to the content.
- *
- * @param changes the set of changes to the content
- * @throws IllegalArgumentException if the path is null
- * @throws RepositorySourceException if there is a problem accessing the content
- * @throws SearchEngineException if there is a problem updating the indexes
- */
- public void index( final Iterable<ChangeRequest> changes ) throws SearchEngineException {
- // First break up all the changes into different collections, one collection per workspace ...
- Map<String, Collection<ChangeRequest>> changesByWorkspace = new HashMap<String, Collection<ChangeRequest>>();
- for (ChangeRequest request : changes) {
- String workspaceName = request.changedWorkspace();
- Collection<ChangeRequest> changesForWorkspace = changesByWorkspace.get(workspaceName);
- if (changesForWorkspace == null) {
- changesForWorkspace = new LinkedList<ChangeRequest>();
- changesByWorkspace.put(workspaceName, changesForWorkspace);
- }
- changesForWorkspace.add(request);
- }
- // Now update the indexes for each workspace (serially). This minimizes the time that each workspace
- // locks its indexes for writing.
- for (Map.Entry<String, Collection<ChangeRequest>> entry : changesByWorkspace.entrySet()) {
- String workspaceName = entry.getKey();
- Collection<ChangeRequest> changesForWorkspace = entry.getValue();
- getWorkspace(workspaceName).execute(false, updateContent(changesForWorkspace));
- }
- }
-
- /**
- * Invoke the engine's garbage collection on all indexes used by all workspaces in the source. This method reclaims space and
- * optimizes the index. This should be done on a periodic basis after changes are made to the engine's indexes.
- *
- * @throws SearchEngineException if there is a problem during optimization
- */
- public void optimize() throws SearchEngineException {
- for (String workspaceName : graph().getWorkspaces()) {
- getWorkspace(workspaceName).execute(false, optimizeContent());
- }
- }
-
- /**
- * Invoke the engine's garbage collection for the indexes associated with the specified workspace. This method reclaims space
- * and optimizes the index. This should be done on a periodic basis after changes are made to the engine's indexes.
- *
- * @param workspaceName the name of the workspace
- * @throws IllegalArgumentException if the workspace name is null
- * @throws SearchEngineException if there is a problem during optimization
- * @throws InvalidWorkspaceException if the workspace does not exist
- */
- public void optimize( String workspaceName ) throws SearchEngineException {
- CheckArg.isNotNull(workspaceName, "workspaceName");
- getWorkspace(workspaceName).execute(false, optimizeContent());
- }
-
- /**
- * Perform a full-text search of the content in the named workspace, given the maximum number of results and the offset
- * defining the first result the caller is interested in.
- *
- * @param context the execution context in which the search is to take place; may not be null
- * @param workspaceName the name of the workspace
- * @param fullTextSearch the full-text search to be performed; may not be null
- * @param maxResults the maximum number of results that are to be returned; always positive
- * @param offset the number of initial results to skip, or 0 if the first results are to be returned
- * @return the activity that will perform the work
- * @throws IllegalArgumentException if the execution context or workspace name are null
- * @throws SearchEngineException if there is a problem during optimization
- * @throws InvalidWorkspaceException if the workspace does not exist
- */
- public List<Location> fullTextSearch( ExecutionContext context,
- String workspaceName,
- String fullTextSearch,
- int maxResults,
- int offset ) {
- CheckArg.isNotNull(context, "context");
- CheckArg.isNotNull(workspaceName, "workspaceName");
- Search searchActivity = searchContent(context, fullTextSearch, maxResults, offset);
- getWorkspace(workspaceName).execute(false, searchActivity);
- return searchActivity.getResults();
- }
-
- /**
- * Perform a query of the content in the named workspace, given the Abstract Query Model representation of the query.
- *
- * @param context the execution context in which the search is to take place; may not be null
- * @param workspaceName the name of the workspace
- * @param query the query that is to be executed, in the form of the Abstract Query Model
- * @param schemata the definition of the tables and views that can be used in the query; may not be null
- * @return the query results; never null
- * @throws IllegalArgumentException if the context, query, or schemata references are null
- */
- public QueryResults query( ExecutionContext context,
- String workspaceName,
- QueryCommand query,
- Schemata schemata ) {
- CheckArg.isNotNull(context, "context");
- CheckArg.isNotNull(workspaceName, "workspaceName");
- CheckArg.isNotNull(query, "query");
- CheckArg.isNotNull(schemata, "schemata");
- QueryContext queryContext = new QueryContext(context, schemata);
- Query queryActivity = queryContent(queryContext, query);
- getWorkspace(workspaceName).execute(false, queryActivity);
- return queryActivity.getResults();
- }
-
- /**
- * Remove the supplied index from the search engine. This is typically done when the workspace has been deleted from the
- * source, or when
- *
- * @param workspaceName the name of the workspace
- * @throws IllegalArgumentException if the workspace name is null
- * @throws SearchEngineException if there is a problem removing the workspace
- */
- public void removeWorkspace( String workspaceName ) throws SearchEngineException {
- CheckArg.isNotNull(workspaceName, "workspaceName");
- try {
- workspacesLock.writeLock().lock();
- // Check whether another thread got in and created the engine while we waited ...
- Workspace workspace = workspacesByName.remove(workspaceName);
- if (workspace != null) {
- indexLayout.destroyIndexes(context, getSourceName(), workspaceName);
- }
- } catch (IOException e) {
- String message = SearchI18n.errorWhileRemovingIndexesForWorkspace.text(sourceName, workspaceName, e.getMessage());
- throw new SearchEngineException(message, e);
- } finally {
- workspacesLock.writeLock().unlock();
- }
- }
-
- /**
- * Remove from the search engine all workspace-related indexes, thereby cleaning up any resources used by this search engine.
- *
- * @throws SearchEngineException if there is a problem removing any of the workspace
- */
- public void removeWorkspaces() throws SearchEngineException {
- try {
- workspacesLock.writeLock().lock();
- for (String workspaceName : new HashSet<String>(workspacesByName.keySet())) {
- removeWorkspace(workspaceName);
- }
- } finally {
- workspacesLock.writeLock().unlock();
- }
- }
-
- /**
- * Get the search engine for the workspace with the supplied name.
- *
- * @param workspaceName the name of the workspace
- * @return the workspace's search engine
- * @throws InvalidWorkspaceException if the workspace does not exist
- */
- protected Workspace getWorkspace( String workspaceName ) {
- Workspace workspace = null;
- try {
- workspacesLock.readLock().lock();
- workspace = workspacesByName.get(workspaceName);
- } finally {
- workspacesLock.readLock().unlock();
- }
-
- if (workspace == null) {
- // Verify the workspace does exist ...
- if (!graph().getWorkspaces().contains(workspaceName)) {
- String msg = GraphI18n.workspaceDoesNotExistInRepository.text(workspaceName, getSourceName());
- throw new InvalidWorkspaceException(msg);
- }
- try {
- workspacesLock.writeLock().lock();
- // Check whether another thread got in and created the engine while we waited ...
- workspace = workspacesByName.get(workspaceName);
- if (workspace == null) {
- // Create the engine and register it ...
- workspace = new Workspace(workspaceName);
- workspacesByName.put(workspaceName, workspace);
- }
- } finally {
- workspacesLock.writeLock().unlock();
- }
- }
- return workspace;
- }
-
- protected class Workspace {
- private final String sourceName;
- private final String workspaceName;
- protected final AtomicInteger modifiedNodesSinceLastOptimize = new AtomicInteger(0);
-
- protected Workspace( String workspaceName ) {
- this.workspaceName = workspaceName;
- this.sourceName = getSourceName();
- }
-
- /**
- * Get the workspace name.
- *
- * @return the workspace name; never null
- */
- public String getWorkspaceName() {
- return workspaceName;
- }
-
- /**
- * Execute the supplied activities against the indexes.
- *
- * @param overwrite true if the existing indexes should be overwritten, or false if they should be used
- * @param activities the activities to execute
- * @throws SearchEngineException if there is a problem performing the activities
- */
- protected final void execute( boolean overwrite,
- Activity... activities ) throws SearchEngineException {
- // Determine if the activities are readonly ...
- boolean readOnly = true;
- for (Activity activity : activities) {
- if (!(activity instanceof ReadOnlyActivity)) {
- readOnly = false;
- break;
- }
- }
-
- // Create a session ...
- IndexSession session = indexLayout.createSession(context, sourceName, workspaceName, overwrite, readOnly);
- assert session != null;
-
- // Execute the various activities ...
- Throwable error = null;
- try {
- int numChanges = 0;
- for (Activity activity : activities) {
- try {
- numChanges += activity.execute(session);
- } catch (IOException e) {
- error = e;
- throw new SearchEngineException(activity.messageFor(e, sourceName, workspaceName), e);
- } catch (ParseException e) {
- error = e;
- throw new SearchEngineException(activity.messageFor(e, sourceName, workspaceName), e);
- } catch (RuntimeException e) {
- error = e;
- throw e;
- }
- }
- if (numChanges > 0) {
- numChanges = this.modifiedNodesSinceLastOptimize.addAndGet(numChanges);
- // Determine if there have been enough changes made to run the optimizer ...
- int maxChanges = getMaxChangesBeforeAutomaticOptimization();
- if (maxChanges > 0 && numChanges >= maxChanges) {
- Activity optimizer = optimizeContent();
- try {
- optimizer.execute(session);
- } catch (ParseException e) {
- error = e;
- throw new SearchEngineException(optimizer.messageFor(e, sourceName, workspaceName), e);
- } catch (IOException e) {
- error = e;
- throw new SearchEngineException(optimizer.messageFor(e, sourceName, workspaceName), e);
- } catch (RuntimeException e) {
- error = e;
- throw e;
- }
- }
- }
- } finally {
- try {
- if (error == null) {
- session.commit();
- } else {
- session.rollback();
- }
- } catch (IOException e2) {
- // We don't want to lose the existing error, if there is one ...
- if (error == null) {
- I18n msg = SearchI18n.errorWhileCommittingIndexChanges;
- throw new SearchEngineException(msg.text(workspaceName, sourceName, e2.getMessage()), e2);
- }
- }
- }
- }
- }
-
- /**
- * Create an activity that will optimize the indexes.
- *
- * @return the activity that will perform the work
- */
- protected Activity optimizeContent() {
- return new Activity() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.SearchEngine.Activity#execute(org.jboss.dna.search.IndexSession)
- */
- public int execute( IndexSession indexSession ) throws IOException {
- indexSession.optimize();
- return 0; // no lines changed
- }
-
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- return SearchI18n.errorWhileOptimizingIndexes.text(sourceName, workspaceName, error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will read from the source the content at the supplied location and add the content to the search
- * index.
- *
- * @param location the location of the content to read; may not be null
- * @param depthPerRead the depth of each read operation; always positive
- * @return the activity that will perform the work
- */
- protected Activity addContent( final Location location,
- final int depthPerRead ) {
- return new Activity() {
- public int execute( IndexSession indexSession ) throws IOException {
-
- // Create a queue that we'll use to walk the content ...
- LinkedList<Location> locationsToRead = new LinkedList<Location>();
- locationsToRead.add(location);
- int count = 0;
-
- // Now read and index the content ...
- Graph graph = graph();
- graph.useWorkspace(indexSession.getWorkspaceName());
- while (!locationsToRead.isEmpty()) {
- Location location = locationsToRead.poll();
- if (location == null) continue;
- Subgraph subgraph = graph.getSubgraphOfDepth(depthPerRead).at(location);
- // Index all of the nodes within this subgraph ...
- for (SubgraphNode node : subgraph) {
- // Index the node ...
- indexSession.index(node);
- ++count;
-
- // Process the children ...
- for (Location child : node.getChildren()) {
- if (!subgraph.includes(child)) {
- // Record this location as needing to be read ...
- locationsToRead.add(child);
- }
- }
- }
- }
- return count;
- }
-
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- String path = readable(location.getPath());
- return SearchI18n.errorWhileIndexingContentAtPath.text(path, workspaceName, sourceName, error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will remove from the indexes all documents that represent content at or below the specified
- * location.
- *
- * @param location the location of the content to removed; may not be null
- * @return the activity that will perform the work
- */
- protected Activity removeContent( final Location location ) {
- return new Activity() {
-
- public int execute( IndexSession indexSession ) throws IOException {
- // Delete the content at/below the path ...
- return indexSession.deleteBelow(location.getPath());
- }
-
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- String path = readable(location.getPath());
- return SearchI18n.errorWhileRemovingContentAtPath.text(path, workspaceName, sourceName, error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will update the indexes with changes that were already made to the content.
- *
- * @param changes the changes that have been made to the content; may not be null
- * @return the activity that will perform the work
- */
- protected Activity updateContent( final Iterable<ChangeRequest> changes ) {
- return new Activity() {
-
- public int execute( IndexSession indexSession ) throws IOException {
- return indexSession.apply(changes);
- }
-
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- return SearchI18n.errorWhileUpdatingContent.text(workspaceName, sourceName, error.getMessage());
- }
- };
- }
-
- /**
- * Create an activity that will perform a full-text search given the supplied query.
- *
- * @param context the context in which the search is to be performed; may not be null
- * @param fullTextSearch the full-text search to be performed; may not be null
- * @param maxResults the maximum number of results that are to be returned; always positive
- * @param offset the number of initial results to skip, or 0 if the first results are to be returned
- * @return the activity that will perform the work; never null
- */
- protected Search searchContent( final ExecutionContext context,
- final String fullTextSearch,
- final int maxResults,
- final int offset ) {
- final List<Location> results = new ArrayList<Location>(maxResults);
- return new Search() {
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.SearchEngine.Activity#execute(org.jboss.dna.search.IndexSession)
- */
- public int execute( IndexSession session ) throws IOException, ParseException {
- session.search(context, fullTextSearch, maxResults, offset, results);
- return 0;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.SearchEngine.Activity#messageFor(java.lang.Throwable, java.lang.String,
- * java.lang.String)
- */
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- return SearchI18n.errorWhilePerformingSearch.text(fullTextSearch, workspaceName, sourceName, error.getMessage());
- }
-
- public List<Location> getResults() {
- return results;
- }
- };
- }
-
- /**
- * Create an activity that will perform a query against the index.
- *
- * @param context the context in which the search is to be performed; may not be null
- * @param query the query to be performed; may not be null
- * @return the activity that will perform the query; never null
- */
- protected Query queryContent( final QueryContext context,
- final QueryCommand query ) {
- return new Query() {
- private QueryResults results = null;
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.SearchEngine.Activity#execute(org.jboss.dna.search.IndexSession)
- */
- public int execute( IndexSession session ) throws IOException, ParseException {
- results = session.query(context, query);
- return 0;
- }
-
- /**
- * {@inheritDoc}
- *
- * @see org.jboss.dna.search.SearchEngine.Activity#messageFor(java.lang.Throwable, java.lang.String,
- * java.lang.String)
- */
- public String messageFor( Throwable error,
- String sourceName,
- String workspaceName ) {
- return SearchI18n.errorWhilePerformingQuery.text(query, workspaceName, sourceName, error.getMessage());
- }
-
- public QueryResults getResults() {
- return results;
- }
- };
- }
-
- /**
- * Interface for activities that will be executed against a workspace. These activities don't have to commit or roll back the
- * writer, nor do they have to translate the exceptions, since this is done by the
- * {@link Workspace#execute(boolean, Activity...)} method.
- */
- protected interface Activity {
-
- /**
- * Perform the activity by using the index writer.
- *
- * @param indexSession the index session that should be used by the activity; never null
- * @return the number of changes that were made by this activity
- * @throws IOException if there is an error using the writer
- * @throws ParseException if there is an error due to parsing
- */
- int execute( IndexSession indexSession ) throws IOException, ParseException;
-
- /**
- * Translate an exception obtained during {@link #execute(IndexSession) execution} into a single message.
- *
- * @param t the exception
- * @param sourceName the name of the source
- * @param workspaceName the name of the workspace
- * @return the error message
- */
- String messageFor( Throwable t,
- String sourceName,
- String workspaceName );
- }
-
- /**
- * A read-only activity.
- */
- protected interface ReadOnlyActivity extends Activity {
- }
-
- /**
- * A search activity.
- */
- protected interface Search extends ReadOnlyActivity {
- /**
- * Get the results of the search.
- *
- * @return the list of {@link Location} objects for each node satisfying the results; never null
- */
- List<Location> getResults();
- }
-
- /**
- * A query activity.
- */
- protected interface Query extends ReadOnlyActivity {
- /**
- * Get the results of the query.
- *
- * @return the results of a query; never null
- */
- QueryResults getResults();
- }
-
-}
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngineException.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngineException.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/SearchEngineException.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -1,67 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-/**
- * An exception that represents a problem within a search engine.
- */
-public class SearchEngineException extends RuntimeException {
-
- /**
- */
- private static final long serialVersionUID = 1L;
-
- /**
- *
- */
- public SearchEngineException() {
- }
-
- /**
- * @param message
- */
- public SearchEngineException( String message ) {
- super(message);
-
- }
-
- /**
- * @param cause
- */
- public SearchEngineException( Throwable cause ) {
- super(cause);
-
- }
-
- /**
- * @param message
- * @param cause
- */
- public SearchEngineException( String message,
- Throwable cause ) {
- super(message, cause);
-
- }
-
-}
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/SearchI18n.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -28,25 +28,15 @@
import org.jboss.dna.common.CommonI18n;
import org.jboss.dna.common.i18n.I18n;
-/**
- *
- */
public class SearchI18n {
public static I18n locationForIndexesIsNotDirectory;
public static I18n locationForIndexesCannotBeRead;
public static I18n locationForIndexesCannotBeWritten;
- public static I18n errorWhileIndexingContentAtPath;
- public static I18n errorWhileRemovingContentAtPath;
- public static I18n errorWhileUpdatingContent;
public static I18n errorWhileCommittingIndexChanges;
public static I18n errorWhileRollingBackIndexChanges;
- public static I18n errorCreatingIndexWriter;
- public static I18n errorWhileOptimizingIndexes;
public static I18n errorWhilePerformingSearch;
public static I18n errorWhilePerformingQuery;
- public static I18n errorWhileInitializingSearchEngine;
- public static I18n errorWhileRemovingIndexesForWorkspace;
public static I18n errorWhilePerformingLuceneQuery;
static {
Deleted: trunk/dna-search/src/main/java/org/jboss/dna/search/filters/ResultFilter.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/filters/ResultFilter.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/filters/ResultFilter.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -1,36 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search.filters;
-
-import org.jboss.dna.graph.query.QueryResults.Columns;
-
-/**
- *
- */
-public interface ResultFilter {
-
- boolean allows( Object[] tuple,
- Columns columns );
-
-}
Modified: trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties
===================================================================
--- trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/main/resources/org/jboss/dna/search/SearchI18n.properties 2009-11-18 19:39:05 UTC (rev 1329)
@@ -26,15 +26,8 @@
locationForIndexesCannotBeRead = Location "{0}" cannot be used for search indexes for workspace "{1}" because it cannot be read
locationForIndexesCannotBeWritten = Location "{0}" cannot be used for search indexes for workspace "{1}" because its contents cannot be written or updated
-errorWhileIndexingContentAtPath = Error while indexing the content at "{0}" in the "{1}" workspace of the "{2}" source: {3}
-errorWhileRemovingContentAtPath = Error while removing the content at/below "{0}" in the "{1}" workspace of the "{2}" source: {3}
-errorWhileUpdatingContent = Error while updating content in the "{0}" workspace of the "{1}" source: {2}
errorWhileCommittingIndexChanges = Error while committing changes to the indexes for the "{0}" workspace of the "{1}" source: {2}
errorWhileRollingBackIndexChanges = Error while rolling back changes to the indexes for the "{0}" workspace of the "{1}" source: {2}
-errorCreatingIndexWriter = Error attempting to create an index writer for the "{0}" index for the "{1}" workspace of the "{2}" source: {3}
-errorWhileOptimizingIndexes = Error while optimizing the indexes for the "{0}" workspace of the "{1}" source: {2}
-errorWhilePerformingSearch = Error while searching for "{0}" in the "{1}" workspace of the "{2}" source: {3}
+errorWhilePerformingSearch = Error while searching the "{0}" workspace in the "{1}" source for "{2}": {3}
errorWhilePerformingQuery = Error while performing the query "{0}" against the content in the "{1}" workspace of the "{2}" source: {3}
-errorWhileInitializingSearchEngine = Error while initializing the search engine for the "{0}" workspace of the "{1}" source: {2}
-errorWhileRemovingIndexesForWorkspace = Error while removing the indexes for the "{0}" workspace of the "{1}" source: {2}
errorWhilePerformingLuceneQuery = Error while performing the Lucene query "{0}" as part of the "{1}" query against the "{2}" workspace of the "{3}" source: {4}
\ No newline at end of file
Deleted: trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java
===================================================================
--- trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java 2009-11-18 19:37:57 UTC (rev 1328)
+++ trunk/dna-search/src/test/java/org/jboss/dna/search/SearchEngineTest.java 2009-11-18 19:39:05 UTC (rev 1329)
@@ -1,353 +0,0 @@
-/*
- * JBoss DNA (http://www.jboss.org/dna)
- * See the COPYRIGHT.txt file distributed with this work for information
- * regarding copyright ownership. Some portions may be licensed
- * to Red Hat, Inc. under one or more contributor license agreements.
- * See the AUTHORS.txt file in the distribution for a full listing of
- * individual contributors.
- *
- * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
- * is licensed to you under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * JBoss DNA is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this software; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
- */
-package org.jboss.dna.search;
-
-import static org.hamcrest.core.Is.is;
-import static org.hamcrest.core.IsNull.notNullValue;
-import static org.junit.Assert.assertThat;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.stub;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.verifyZeroInteractions;
-import java.util.List;
-import org.jboss.dna.graph.ExecutionContext;
-import org.jboss.dna.graph.Graph;
-import org.jboss.dna.graph.Location;
-import org.jboss.dna.graph.Node;
-import org.jboss.dna.graph.connector.RepositoryConnection;
-import org.jboss.dna.graph.connector.RepositoryConnectionFactory;
-import org.jboss.dna.graph.connector.RepositorySourceException;
-import org.jboss.dna.graph.connector.inmemory.InMemoryRepositorySource;
-import org.jboss.dna.graph.property.Path;
-import org.jboss.dna.graph.query.QueryContext;
-import org.jboss.dna.graph.query.model.Query;
-import org.jboss.dna.graph.query.validate.Schemata;
-import org.jboss.dna.graph.request.InvalidWorkspaceException;
-import org.jboss.dna.search.IndexLayout;
-import org.jboss.dna.search.IndexSession;
-import org.jboss.dna.search.SearchEngine;
-import org.junit.Before;
-import org.junit.Test;
-
-public class SearchEngineTest {
-
- private SearchEngine engine;
- private ExecutionContext context;
- private String sourceName;
- private String workspaceName1;
- private String workspaceName2;
- private InMemoryRepositorySource source;
- private RepositoryConnectionFactory connectionFactory;
- private IndexLayout layout;
- private IndexSession sessionWs1;
- private IndexSession sessionWs2;
- private IndexSession sessionDefault;
- private Graph content;
-
- @Before
- public void beforeEach() throws Exception {
- context = new ExecutionContext();
- sourceName = "sourceA";
- workspaceName1 = "workspace1";
- workspaceName2 = "workspace2";
-
- // Set up the source and graph instance ...
- source = new InMemoryRepositorySource();
- source.setName(sourceName);
- content = Graph.create(source, context);
-
- // Create the workspaces ...
- content.createWorkspace().named(workspaceName1);
- content.createWorkspace().named(workspaceName2);
-
- // Set up the connection factory ...
- connectionFactory = new RepositoryConnectionFactory() {
- @SuppressWarnings( "synthetic-access" )
- public RepositoryConnection createConnection( String sourceName ) throws RepositorySourceException {
- return source.getConnection();
- }
- };
-
- // Set up the index layout ...
- layout = mock(IndexLayout.class);
- sessionWs1 = mockSession(layout, workspaceName1);
- sessionWs2 = mockSession(layout, workspaceName2);
- sessionDefault = mockSession(layout, "");
-
- // Now set up the search engine ...
- engine = new SearchEngine(context, sourceName, connectionFactory, layout);
- }
-
- protected IndexSession mockSession( IndexLayout mockLayout,
- String workspaceName ) {
- IndexSession session = mock(IndexSession.class);
- stub(layout.createSession(context, sourceName, workspaceName, false, false)).toReturn(session);
- stub(layout.createSession(context, sourceName, workspaceName, false, true)).toReturn(session);
- stub(layout.createSession(context, sourceName, workspaceName, true, false)).toReturn(session);
- stub(layout.createSession(context, sourceName, workspaceName, true, true)).toReturn(session);
- stub(session.getWorkspaceName()).toReturn(workspaceName);
- stub(session.getSourceName()).toReturn(sourceName);
- return session;
- }
-
- protected Path path( String path ) {
- return context.getValueFactories().getPathFactory().create(path);
- }
-
- protected void loadContent() throws Exception {
- // Load some content ...
- content.useWorkspace(workspaceName1);
- content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("cars.xml")).into("/");
- content.useWorkspace(workspaceName2);
- content.importXmlFrom(getClass().getClassLoader().getResourceAsStream("aircraft.xml")).into("/");
- }
-
- @Test
- public void shouldReturnSearchWorkspaceForExistingWorkspaceInSource() {
- SearchEngine.Workspace workspace = engine.getWorkspace(workspaceName1);
- assertThat(workspace, is(notNullValue()));
- assertThat(workspace.modifiedNodesSinceLastOptimize.get(), is(0));
- assertThat(workspace.getWorkspaceName(), is(workspaceName1));
- }
-
- @Test( expected = InvalidWorkspaceException.class )
- public void shouldFailToReturnSearchWorkspaceForNonExistantWorkspaceInSource() {
- engine.getWorkspace(workspaceName1 + "foobar");
- }
-
- @Test
- public void shouldDoNothingDuringRemoveWorkspaceIfWorkspaceHasNotBeenLoaded() throws Exception {
- engine.removeWorkspace(workspaceName1);
- verifyZeroInteractions(layout);
- }
-
- @Test
- public void shouldForwardRemoveWorkspaceToIndexLayout() throws Exception {
- engine.getWorkspace(workspaceName1);
- engine.removeWorkspace(workspaceName1);
- verify(layout).destroyIndexes(context, sourceName, workspaceName1);
- verifyNoMoreInteractions(layout);
- }
-
- @Test
- public void shouldForwardRemoveWorkspaceToIndexLayoutForEachWorkspaceThatWasLoaded() throws Exception {
- engine.getWorkspace(workspaceName1);
- engine.removeWorkspaces();
- verify(layout).destroyIndexes(context, sourceName, workspaceName1);
- verifyZeroInteractions(layout);
- }
-
- @Test
- public void shouldForwardRemoveWorkspaceToIndexLayoutForAllWorkspacesThatWereLoaded() throws Exception {
- engine.getWorkspace(workspaceName1);
- engine.getWorkspace(workspaceName2);
- engine.removeWorkspaces();
- verify(layout).destroyIndexes(context, sourceName, workspaceName1);
- verify(layout).destroyIndexes(context, sourceName, workspaceName2);
- verifyNoMoreInteractions(layout);
- }
-
- @Test( expected = IllegalArgumentException.class )
- public void shouldFailIfNullWorkspaceNamePassedToRemoveWorkspace() throws Exception {
- engine.removeWorkspace(null);
- }
-
- @Test
- public void shouldForwardOptimizeOfWorkspaceToIndexSession() throws Exception {
- engine.optimize(workspaceName1);
- verify(sessionWs1).optimize();
- verify(sessionWs1).commit();
- verifyNoMoreInteractions(sessionWs1);
- }
-
- @Test
- public void shouldForwardOptimizeOfAllWorkspacesToEachIndexSession() throws Exception {
- engine.optimize(); // will find all three workspaces
- verify(sessionWs1).optimize();
- verify(sessionWs1).commit();
- verifyNoMoreInteractions(sessionWs1);
- verify(sessionWs2).optimize();
- verify(sessionWs2).commit();
- verifyNoMoreInteractions(sessionWs2);
- verify(sessionDefault).optimize();
- verify(sessionDefault).commit();
- verifyNoMoreInteractions(sessionDefault);
- }
-
- @Test
- public void shouldForwardIndexOfWorkspaceToIndexSession() throws Exception {
- loadContent();
- engine.index(workspaceName1, 3);
- verify(sessionWs1, times(18)).index((Node)anyObject());
- verify(sessionWs1).commit();
- }
-
- @Test
- public void shouldForwardIndexOfSubgraphInWorkspaceToIndexSession() throws Exception {
- loadContent();
- engine.index(workspaceName1, path("/Cars"), 3);
- verify(sessionWs1).deleteBelow(path("/Cars"));
- verify(sessionWs1, times(17)).index((Node)anyObject());
- verify(sessionWs1).commit();
- }
-
- @Test
- public void shouldForwardIndexEntireWorkspaceToIndexSession() throws Exception {
- loadContent();
- engine.index(workspaceName1, path("/"), 3);
- verify(sessionWs1, times(18)).index((Node)anyObject());
- verify(sessionWs1).commit();
- }
-
- @Test
- public void shouldForwardIndexOfAllWorkspacesToEachIndexSession() throws Exception {
- loadContent();
- engine.index(3); // will find all three workspaces
- verify(sessionWs1, times(18)).index((Node)anyObject());
- verify(sessionWs1).commit();
- verify(sessionWs2, times(24)).index((Node)anyObject());
- verify(sessionWs2).commit();
- verify(sessionDefault, times(1)).index((Node)anyObject());
- verify(sessionDefault).commit();
- }
-
- @SuppressWarnings( "unchecked" )
- @Test
- public void shouldForwardSearchToIndexSession() throws Exception {
- String query = "term1 term2";
- engine.fullTextSearch(context, workspaceName1, query, 3, 0);
- verify(sessionWs1).search(eq(context), eq(query), eq(3), eq(0), (List<Location>)anyObject());
- verify(sessionWs1).commit();
- }
-
- @Test
- public void shouldForwardQueryToIndexSession() throws Exception {
- Query query = mock(Query.class);
- Schemata schemata = mock(Schemata.class);
- engine.query(context, workspaceName1, query, schemata);
- verify(sessionWs1).query(eq(new QueryContext(context, schemata)), eq(query));
- verify(sessionWs1).commit();
- }
-
- // These tests expect there to be some real IndexLayout ...
-
- // @Test
- // public void shouldIndexAllContentInRepositorySource() throws Exception {
- // loadContent();
- // engine.index(3);
- // }
- //
- // @Test
- // public void shouldIndexAllContentInWorkspace() throws Exception {
- // loadContent();
- // engine.index(workspaceName1, 3);
- // engine.index(workspaceName2, 5);
- // }
- //
- // @Test
- // public void shouldIndexAllContentInWorkspaceBelowPath() throws Exception {
- // loadContent();
- // engine.index(workspaceName1, path("/Cars/Hybrid"), 3);
- // engine.index(workspaceName2, path("/Aircraft/Commercial"), 5);
- // }
- //
- // @Test
- // public void shouldReIndexAllContentInWorkspaceBelowPath() throws Exception {
- // loadContent();
- // for (int i = 0; i != 0; i++) {
- // engine.index(workspaceName1, path("/Cars/Hybrid"), 3);
- // engine.index(workspaceName2, path("/Aircraft/Commercial"), 5);
- // }
- // }
- //
- // @Test
- // public void shouldHaveLoadedTestContentIntoRepositorySource() {
- // assertThat(content.getNodeAt("/Cars/Hybrid/Toyota Prius").getProperty("msrp").getFirstValue(), is((Object)"$21,500"));
- // }
- //
- // @Test
- // public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfOne() {
- // engine.index(workspaceName1, path("/"), 1);
- // }
- //
- // @Test
- // public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfTwo() {
- // engine.index(workspaceName1, path("/"), 2);
- // }
- //
- // @Test
- // public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfThree() {
- // engine.index(workspaceName1, path("/"), 3);
- // }
- //
- // @Test
- // public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfFour() {
- // engine.index(workspaceName1, path("/"), 4);
- // }
- //
- // @Test
- // public void shouldIndexRepositoryContentStartingAtRootAndUsingDepthOfTen() {
- // engine.index(workspaceName1, path("/"), 10);
- // }
- //
- // @Test
- // public void shouldIndexRepositoryContentStartingAtNonRootNode() {
- // engine.index(workspaceName1, path("/Cars"), 10);
- // }
- //
- // @Test
- // public void shouldReIndexRepositoryContentStartingAtNonRootNode() {
- // for (int i = 0; i != 3; ++i) {
- // engine.index(workspaceName1, path("/Cars"), 10);
- // }
- // }
- //
- // @Test
- // public void shouldFindNodesByFullTextSearch() {
- // engine.index(workspaceName1, path("/"), 100);
- // List<Location> results = engine.fullTextSearch(context, workspaceName1, "Toyota Prius", 10, 0);
- // assertThat(results, is(notNullValue()));
- // assertThat(results.size(), is(2));
- // assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Prius")));
- // assertThat(results.get(1).getPath(), is(path("/Cars/Hybrid/Toyota Highlander")));
- // }
- //
- // @Test
- // public void shouldFindNodesByFullTextSearchWithOffset() {
- // engine.index(workspaceName1, path("/"), 100);
- // List<Location> results = engine.fullTextSearch(context, workspaceName1, "toyota prius", 1, 0);
- // assertThat(results, is(notNullValue()));
- // assertThat(results.size(), is(1));
- // assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Prius")));
- //
- // results = engine.fullTextSearch(context, workspaceName1, "+Toyota", 1, 1);
- // assertThat(results, is(notNullValue()));
- // assertThat(results.size(), is(1));
- // assertThat(results.get(0).getPath(), is(path("/Cars/Hybrid/Toyota Highlander")));
- // }
-}
14 years, 5 months
DNA SVN: r1328 - in trunk/dna-search/src/main/java/org/jboss/dna/search: query and 1 other directory.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-18 14:37:57 -0500 (Wed, 18 Nov 2009)
New Revision: 1328
Added:
trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareLengthQuery.java
Modified:
trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java
trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java
Log:
DNA-467 Added query to constrain the length of a field.
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java 2009-11-17 23:30:25 UTC (rev 1327)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java 2009-11-18 19:37:57 UTC (rev 1328)
@@ -87,6 +87,7 @@
import org.jboss.dna.graph.query.QueryEngine;
import org.jboss.dna.graph.query.QueryResults;
import org.jboss.dna.graph.query.QueryResults.Columns;
+import org.jboss.dna.graph.query.model.Length;
import org.jboss.dna.graph.query.model.NodeDepth;
import org.jboss.dna.graph.query.model.NodeLocalName;
import org.jboss.dna.graph.query.model.NodeName;
@@ -106,6 +107,7 @@
import org.jboss.dna.graph.query.process.QueryProcessor;
import org.jboss.dna.graph.request.ChangeRequest;
import org.jboss.dna.search.IndexRules.Rule;
+import org.jboss.dna.search.query.CompareLengthQuery;
import org.jboss.dna.search.query.CompareNameQuery;
import org.jboss.dna.search.query.ComparePathQuery;
import org.jboss.dna.search.query.CompareStringQuery;
@@ -958,6 +960,36 @@
return query;
}
+ protected Query findNodesWith( Length propertyLength,
+ Operator operator,
+ Object value ) {
+ assert propertyLength != null;
+ assert value != null;
+ PropertyValue propertyValue = propertyLength.getPropertyValue();
+ String field = stringFactory.create(propertyValue.getPropertyName());
+ ValueFactories factories = context.getValueFactories();
+ int length = factories.getLongFactory().create(value).intValue();
+ switch (operator) {
+ case EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldEqualTo(length, field, factories);
+ case NOT_EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldNotEqualTo(length, field, factories);
+ case GREATER_THAN:
+ return CompareLengthQuery.createQueryForNodesWithFieldGreaterThan(length, field, factories);
+ case GREATER_THAN_OR_EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldGreaterThanOrEqualTo(length, field, factories);
+ case LESS_THAN:
+ return CompareLengthQuery.createQueryForNodesWithFieldLessThan(length, field, factories);
+ case LESS_THAN_OR_EQUAL_TO:
+ return CompareLengthQuery.createQueryForNodesWithFieldLessThanOrEqualTo(length, field, factories);
+ case LIKE:
+ // This is not allowed ...
+ assert false;
+ break;
+ }
+ return null;
+ }
+
protected Query findNodesWith( PropertyValue propertyValue,
Operator operator,
Object value,
@@ -1111,7 +1143,6 @@
return null;
}
return null;
-
}
protected Query findNodesWithNumericRange( PropertyValue propertyValue,
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java 2009-11-17 23:30:25 UTC (rev 1327)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java 2009-11-18 19:37:57 UTC (rev 1328)
@@ -449,10 +449,7 @@
} else if (left instanceof PropertyValue) {
return session.findNodesWith((PropertyValue)left, operator, value, caseSensitive);
} else if (left instanceof Length) {
- Length length = (Length)left;
- PropertyValue nested = length.getPropertyValue();
-
- return null;
+ return session.findNodesWith((Length)left, operator, right);
} else if (left instanceof LowerCase) {
LowerCase lowercase = (LowerCase)left;
return createQuery(lowercase.getOperand(), operator, right, false);
Added: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareLengthQuery.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareLengthQuery.java (rev 0)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareLengthQuery.java 2009-11-18 19:37:57 UTC (rev 1328)
@@ -0,0 +1,254 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.search.query;
+
+import java.io.IOException;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Weight;
+import org.jboss.dna.graph.property.ValueFactories;
+import org.jboss.dna.graph.property.ValueFactory;
+import org.jboss.dna.graph.query.model.Length;
+
+/**
+ * A Lucene {@link Query} implementation that is used to apply a {@link Length} constraint against a string field. This query
+ * implementation works by using the {@link Query#weight(Searcher) weight} and
+ * {@link Weight#scorer(IndexReader, boolean, boolean) scorer} of the wrapped query to score (and return) only those documents
+ * with string fields that satisfy the constraint.
+ */
+public class CompareLengthQuery extends CompareQuery<Integer> {
+
+ private static final long serialVersionUID = 1L;
+ protected static final Evaluator<Integer> EQUAL_TO = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue == length;
+ }
+
+ @Override
+ public String toString() {
+ return " = ";
+ }
+ };
+ protected static final Evaluator<Integer> NOT_EQUAL_TO = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue == length;
+ }
+
+ @Override
+ public String toString() {
+ return " != ";
+ }
+ };
+ protected static final Evaluator<Integer> IS_LESS_THAN = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue < length;
+ }
+
+ @Override
+ public String toString() {
+ return " < ";
+ }
+ };
+ protected static final Evaluator<Integer> IS_LESS_THAN_OR_EQUAL_TO = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue < length;
+ }
+
+ @Override
+ public String toString() {
+ return " <= ";
+ }
+ };
+ protected static final Evaluator<Integer> IS_GREATER_THAN = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue < length;
+ }
+
+ @Override
+ public String toString() {
+ return " > ";
+ }
+ };
+ protected static final Evaluator<Integer> IS_GREATER_THAN_OR_EQUAL_TO = new Evaluator<Integer>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean satisfiesConstraint( Integer nodeValue,
+ Integer length ) {
+ return nodeValue < length;
+ }
+
+ @Override
+ public String toString() {
+ return " >= ";
+ }
+ };
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is equal to the supplied constraint
+ * value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldEqualTo( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is not equal to the supplied
+ * constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldNotEqualTo( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is greater than the supplied
+ * constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldGreaterThan( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is greater than or equal to the
+ * supplied constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldGreaterThanOrEqualTo( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_GREATER_THAN_OR_EQUAL_TO);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is less than the supplied
+ * constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldLessThan( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_LESS_THAN);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores documents with a field length that is less than or equal to the
+ * supplied constraint value.
+ *
+ * @param constraintValue the constraint value; may not be null
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param factories the value factories that can be used during the scoring; may not be null
+ * @return the query; never null
+ */
+ public static CompareLengthQuery createQueryForNodesWithFieldLessThanOrEqualTo( Integer constraintValue,
+ String fieldName,
+ ValueFactories factories ) {
+ return new CompareLengthQuery(fieldName, constraintValue, factories.getStringFactory(), IS_LESS_THAN_OR_EQUAL_TO);
+ }
+
+ /**
+ * Construct a {@link Query} implementation that scores nodes according to the supplied comparator.
+ *
+ * @param fieldName the name of the document field containing the value; may not be null
+ * @param constraintValue the constraint value; may not be null
+ * @param stringFactory the string factory that can be used during the scoring; may not be null
+ * @param evaluator the {@link CompareQuery.Evaluator} implementation that returns whether the node path satisfies the
+ * constraint; may not be null
+ */
+ protected CompareLengthQuery( String fieldName,
+ Integer constraintValue,
+ ValueFactory<String> stringFactory,
+ Evaluator<Integer> evaluator ) {
+ super(fieldName, constraintValue, null, stringFactory, evaluator);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.search.query.CompareQuery#readFromDocument(org.apache.lucene.index.IndexReader, int)
+ */
+ @Override
+ protected Integer readFromDocument( IndexReader reader,
+ int docId ) throws IOException {
+ // This implementation reads the length of the field ...
+ Document doc = reader.document(docId, fieldSelector);
+ String valueString = doc.get(fieldName);
+ String value = stringFactory.create(valueString);
+ return value != null ? value.length() : 0;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.apache.lucene.search.Query#clone()
+ */
+ @Override
+ public Object clone() {
+ return new CompareLengthQuery(fieldName, constraintValue, stringFactory, evaluator);
+ }
+}
Property changes on: trunk/dna-search/src/main/java/org/jboss/dna/search/query/CompareLengthQuery.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
14 years, 5 months
DNA SVN: r1327 - trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-17 18:30:25 -0500 (Tue, 17 Nov 2009)
New Revision: 1327
Modified:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java
Log:
DNA-467 Factored out a parse method to allow subclasses to parse tokenized query commands.
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java 2009-11-17 23:21:38 UTC (rev 1326)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java 2009-11-17 23:30:25 UTC (rev 1327)
@@ -482,6 +482,11 @@
Tokenizer tokenizer = new SqlTokenizer(false);
TokenStream tokens = new TokenStream(query, tokenizer, false);
tokens.start();
+ return parseQueryCommand(tokens, context);
+ }
+
+ protected QueryCommand parseQueryCommand( TokenStream tokens,
+ ExecutionContext context ) {
QueryCommand command = null;
if (tokens.matches("SELECT")) {
command = parseQuery(tokens, context);
14 years, 5 months
DNA SVN: r1326 - trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-17 18:21:38 -0500 (Tue, 17 Nov 2009)
New Revision: 1326
Modified:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/FullTextSearchParser.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java
Log:
DNA-467 Documented the SQL and full-text search grammars in the JavaDoc for each parser.
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/FullTextSearchParser.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/FullTextSearchParser.java 2009-11-17 21:09:49 UTC (rev 1325)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/FullTextSearchParser.java 2009-11-17 23:21:38 UTC (rev 1326)
@@ -38,6 +38,24 @@
/**
* A {@link QueryParser} implementation that parses a full-text search expression. This grammar is based on the full-text search
* grammar as defined by the JCR 2.0 specification.
+ * <p>
+ * </p>
+ * <h3>Grammar</h3>
+ * <p>
+ * The grammar for the full-text expression is taken from the JCR 2.0 specification, and is as follows:
+ * </p>
+ *
+ * <pre>
+ * FulltextSearch ::= Disjunct {Space 'OR' Space Disjunct}
+ * Disjunct ::= Term {Space Term}
+ * Term ::= ['-'] SimpleTerm
+ * SimpleTerm ::= Word | '"' Word {Space Word} '"'
+ * Word ::= NonSpaceChar {NonSpaceChar}
+ * Space ::= SpaceChar {SpaceChar}
+ * NonSpaceChar ::= Char - SpaceChar /* Any Char except SpaceChar */
+ * SpaceChar ::= ' '
+ * Char ::= /* Any character */
+ * </pre>
*/
public class FullTextSearchParser {
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java 2009-11-17 21:09:49 UTC (rev 1325)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java 2009-11-17 23:21:38 UTC (rev 1326)
@@ -97,13 +97,342 @@
/**
* A {@link QueryParser} implementation that parses a subset of SQL select and set queries.
* <p>
- * This grammar is based on the SQL grammar as defined by the JCR 2.0 specification, with some useful additions:
+ * This grammar is equivalent to the SQL grammar as defined by the JCR 2.0 specification, with some useful additions:
* <ul>
- * <li>(UNION|INTERSECT|EXCEPT) [ALL]</li>
- * <li>SELECT DISTINCT</li>
- * <li>LIMIT clauses</li>
+ * <li>"<code>... (UNION|INTERSECT|EXCEPT) [ALL] ...</code>" to combine and merge results from multiple queries</li>
+ * <li>"<code>SELECT DISTINCT ...</code>" to remove duplicates</li>
+ * <li>"<code>LIMIT count [OFFSET number]</code>" clauses to control the number of results returned as well as the number of rows
+ * that should be skipped</li>
+ * <li>Support for additional join types, including "<code>FULL OUTER JOIN</code>" and "<code>CROSS JOIN</code>"</li>
+ * <li>Additional dynamic operands "<code>DEPTH([<selectorName>])</code>" and "<code>PATH([<selectorName>])</code>" that
+ * enables placing constraints on the node depth and path, respectively, and which can be used in a manner similar to "
+ * <code>NAME([<selectorName>])</code>" and "<code>LOCALNAME([<selectorName>])</code>. Note in each of these cases, the
+ * selector name is optional if there is only one selector in the query. on the node depth</li>
+ * <li>Support for the IN clause and NOT IN clause to more easily supply a list of valid discrete static operands: "
+ * <code><dynamicOperand> [NOT] IN (<staticOperand> {, <staticOperand>})</code>"</li>
+ * <li>Support for the BETWEEN clause: "<code><dynamicOperand> [NOT] BETWEEN <lowerBoundStaticOperand> [EXCLUSIVE] AND
+ * <upperBoundStaticOperand> [EXCLUSIVE]</code>"</i>
* </ul>
* </p>
+ * <h3>SQL grammar</h3>
+ * <p>
+ * This section defines the complete grammar for the SQL dialect supported by this parser.
+ * </p>
+ * <h4>Queries</h4>
+ *
+ * <pre>
+ * QueryCommand ::= Query | SetQuery
+ *
+ * SetQuery ::= Query ('UNION'|'INTERSECT'|'EXCEPT') [ALL] Query
+ * { ('UNION'|'INTERSECT'|'EXCEPT') [ALL] Query }
+ *
+ * Query ::= 'SELECT' ['DISINCT'] columns
+ * 'FROM' Source
+ * ['WHERE' Constraint]
+ * ['ORDER BY' orderings]
+ * [Limit]
+ * </pre>
+ *
+ * <h4>Sources</h4>
+ *
+ * <pre>
+ * Source ::= Selector | Join
+ *
+ * Selector ::= nodeTypeName ['AS' selectorName]
+ *
+ * nodeTypeName ::= Name
+ * </pre>
+ *
+ * <h4>Joins</h4>
+ *
+ * <pre>
+ * Join ::= left [JoinType] 'JOIN' right 'ON' JoinCondition
+ * // If JoinType is omitted INNER is assumed.
+ *
+ * left ::= Source
+ * right ::= Source
+ *
+ * JoinType ::= Inner | LeftOuter | RightOuter | FullOuter | Cross
+ *
+ * Inner ::= 'INNER' ['JOIN']
+ *
+ * LeftOuter ::= 'LEFT JOIN' | 'OUTER JOIN' | 'LEFT OUTER JOIN'
+ *
+ * RightOuter ::= 'RIGHT OUTER' ['JOIN']
+ *
+ * RightOuter ::= 'FULL OUTER' ['JOIN']
+ *
+ * RightOuter ::= 'CROSS' ['JOIN']
+ *
+ * JoinCondition ::= EquiJoinCondition | SameNodeJoinCondition | ChildNodeJoinCondition | DescendantNodeJoinCondition
+ * </pre>
+ *
+ * <h5>Equi-join conditions</h5>
+ *
+ * <pre>
+ * EquiJoinCondition ::= selector1Name'.'property1Name '=' selector2Name'.'property2Name
+ *
+ * selector1Name ::= selectorName
+ * selector2Name ::= selectorName
+ * property1Name ::= propertyName
+ * property2Name ::= propertyName
+ * </pre>
+ *
+ * <h5>Same-node join condition</h5>
+ *
+ * <pre>
+ * SameNodeJoinCondition ::= 'ISSAMENODE(' selector1Name ',' selector2Name [',' selector2Path] ')'
+ *
+ * selector2Path ::= Path
+ * </pre>
+ *
+ * <h5>Child-node join condition</h5>
+ *
+ * <pre>
+ * ChildNodeJoinCondition ::= 'ISCHILDNODE(' childSelectorName ',' parentSelectorName ')'
+ *
+ * childSelectorName ::= selectorName
+ * parentSelectorName ::= selectorName
+ * </pre>
+ *
+ * <h5>Descendant-node join condition</h5>
+ *
+ * <pre>
+ * DescendantNodeJoinCondition ::= 'ISDESCENDANTNODE(' descendantSelectorName ',' ancestorSelectorName ')'
+ * descendantSelectorName ::= selectorName
+ * ancestorSelectorName ::= selectorName
+ * </pre>
+ *
+ * <h4>Constraints</h4>
+ *
+ * <pre>
+ * Constraint ::= ConstraintItem | '(' ConstraintItem ')'
+ *
+ * ConstraintItem ::= And | Or | Not | Comparison | Between | PropertyExistence | SetConstraint | FullTextSearch |
+ * SameNode | ChildNode | DescendantNode
+ * </pre>
+ *
+ * <h5>And constraint</h5>
+ *
+ * <pre>
+ * And ::= constraint1 'AND' constraint2
+ *
+ * constraint1 ::= Constraint
+ * constraint2 ::= Constraint
+ * </pre>
+ *
+ * <h5>Or constraint</h5>
+ *
+ * <pre>
+ * Or ::= constraint1 'OR' constraint2
+ * </pre>
+ *
+ * <h5>Not constraint</h5>
+ *
+ * <pre>
+ * Not ::= 'NOT' Constraint
+ * </pre>
+ *
+ * <h5>Comparison constraint</h5>
+ *
+ * <pre>
+ * Comparison ::= DynamicOperand Operator StaticOperand
+ *
+ * Operator ::= '=' | '!=' | '<' | '<=' | '>' | '>=' | 'LIKE'
+ * </pre>
+ *
+ * <h5>Between constraint</h5>
+ *
+ * <pre>
+ * Between ::= DynamicOperand ['NOT'] 'BETWEEN' lowerBound ['EXCLUSIVE'] 'AND' upperBound ['EXCLUSIVE']
+ *
+ * lowerBound ::= StaticOperand
+ * upperBound ::= StaticOperand
+ * </pre>
+ *
+ * <h5>Property existence constraint</h5>
+ *
+ * <pre>
+ * PropertyExistence ::= selectorName'.'propertyName 'IS' ['NOT'] 'NULL' |
+ * propertyName 'IS' ['NOT'] 'NULL' /* If only one selector exists in this query */
+ *
+ * </pre>
+ *
+ * <h5>Set constraint</h5>
+ *
+ * <pre>
+ * SetConstraint ::= selectorName'.'propertyName ['NOT'] 'IN' |
+ * propertyName ['NOT'] 'IN' /* If only one selector exists in this query */
+ * '(' firstStaticOperand {',' additionalStaticOperand } ')'
+ * firstStaticOperand ::= StaticOperand
+ * additionalStaticOperand ::= StaticOperand
+ * </pre>
+ *
+ * <h5>Full-text search constraint</h5>
+ *
+ * <pre>
+ * FullTextSearch ::= 'CONTAINS(' ([selectorName'.']propertyName | selectorName'.*')
+ * ',' ''' fullTextSearchExpression''' ')'
+ * /* If only one selector exists in this query, explicit specification of the selectorName
+ * preceding the propertyName is optional */
+ * fullTextSearchExpression ::= /* a full-text search expression, see {@link FullTextSearchParser} */
+ * </pre>
+ *
+ * <h5>Same-node constraint</h5>
+ *
+ * <pre>
+ * SameNode ::= 'ISSAMENODE(' [selectorName ','] Path ')'
+ * /* If only one selector exists in this query, explicit specification of the selectorName
+ * preceding the propertyName is optional */
+ * </pre>
+ *
+ * <h5>Child-node constraint</h5>
+ *
+ * <pre>
+ * ChildNode ::= 'ISCHILDNODE(' [selectorName ','] Path ')'
+ * /* If only one selector exists in this query, explicit specification of the selectorName
+ * preceding the propertyName is optional */
+ * </pre>
+ *
+ * <h5>Descendant-node constraint</h5>
+ *
+ * <pre>
+ * DescendantNode ::= 'ISDESCENDANTNODE(' [selectorName ','] Path ')'
+ * /* If only one selector exists in this query, explicit specification of the selectorName
+ * preceding the propertyName is optional */
+ * </pre>
+ *
+ * <h5>Paths and names</h5>
+ *
+ * <pre>
+ *
+ * Name ::= '[' quotedName ']' | '[' simpleName ']' | simpleName
+ *
+ * quotedName ::= /* A JCR Name (see the JCR specification) */
+ * simpleName ::= /* A JCR Name that contains only SQL-legal characters (namely letters, digits, and underscore) */
+ *
+ * Path ::= '[' quotedPath ']' | '[' simplePath ']' | simplePath
+ *
+ * quotedPath ::= /* A JCR Path that contains non-SQL-legal characters */
+ * simplePath ::= /* A JCR Path (rather Name) that contains only SQL-legal characters (namely letters, digits, and underscore) */
+ * </pre>
+ *
+ * <h4>Static operands</h4>
+ *
+ * <pre>
+ * StaticOperand ::= Literal | BindVariableValue
+ * </pre>
+ *
+ * <h5>Literal</h5>
+ *
+ * <pre>
+ * Literal ::= CastLiteral | UncastLiteral
+ *
+ * CastLiteral ::= 'CAST(' UncastLiteral ' AS ' PropertyType ')'
+ *
+ * PropertyType ::= 'STRING' | 'BINARY' | 'DATE' | 'LONG' | 'DOUBLE' | 'DECIMAL' | 'BOOLEAN' | 'NAME' | 'PATH' |
+ * 'REFERENCE' | 'WEAKREFERENCE' | 'URI'
+ * /* 'WEAKREFERENCE' is not currently supported in JCR 1.0 */
+ *
+ * UncastLiteral ::= UnquotedLiteral | ''' UnquotedLiteral ''' | '"' UnquotedLiteral '"'
+ *
+ * UnquotedLiteral ::= /* String form of a JCR Value, as defined in the JCR specification */
+ * </pre>
+ *
+ * <h5>Bind variables</h5>
+ *
+ * <pre>
+ * BindVariableValue ::= '$'bindVariableName
+ *
+ * bindVariableName ::= /* A string that conforms to the JCR Name syntax, though the prefix does not need to be
+ * a registered namespace prefix. */
+ * </pre>
+ *
+ * <h4>Dynamic operands</h4>
+ *
+ * <pre>
+ * DynamicOperand ::= PropertyValue | Length | NodeName | NodeLocalName | NodePath | NodeDepth |
+ * FullTextSearchScore | LowerCase | UpperCase
+ * </pre>
+ * <h5>Property value</h5>
+ * <pre>
+ * PropertyValue ::= [selectorName'.'] propertyName
+ * /* If only one selector exists in this query, explicit specification of the selectorName
+ * preceding the propertyName is optional */
+ * </pre>
+ * <h5>Property length</h5>
+ * <pre>
+ * Length ::= 'LENGTH(' PropertyValue ')'
+ * </pre>
+ * <h5>Node name</h5>
+ * <pre>
+ * NodeName ::= 'NAME(' [selectorName] ')'
+ * /* If only one selector exists in this query, explicit specification of the selectorName
+ * is optional */
+ * </pre>
+ * <h5>Node local name</h5>
+ * <pre>
+ * NodeLocalName ::= 'LOCALNAME(' [selectorName] ')'
+ * /* If only one selector exists in this query, explicit specification of the selectorName
+ * is optional */
+ * </pre>
+ * <h5>Node path</h5>
+ * <pre>
+ * NodePath ::= 'PATH(' [selectorName] ')'
+ * /* If only one selector exists in this query, explicit specification of the selectorName
+ * is optional */
+ * </pre>
+ * <h5>Node depth</h5>
+ * <pre>
+ * NodeDepth ::= 'DEPTH(' [selectorName] ')'
+ * /* If only one selector exists in this query, explicit specification of the selectorName
+ * is optional */
+ * </pre>
+ * <h5>Full-text search score</h5>
+ * <pre>
+ * FullTextSearchScore ::= 'SCORE(' [selectorName] ')'
+ * /* If only one selector exists in this query, explicit specification of the selectorName
+ * is optional */
+ * </pre>
+ * <h5>Lowercase</h5>
+ * <pre>
+ * LowerCase ::= 'LOWER(' DynamicOperand ')'
+ * </pre>
+ * <h5>Uppercase</h5>
+ * <pre>
+ * UpperCase ::= 'UPPER(' DynamicOperand ')'
+ * </pre>
+ *
+ * <h4>Ordering</h4>
+ *
+ * <pre>
+ * orderings ::= Ordering {',' Ordering}
+ *
+ * Ordering ::= DynamicOperand [Order]
+ *
+ * Order ::= 'ASC' | 'DESC'
+ * </pre>
+ *
+ * <h4>Columns</h4>
+ *
+ * <pre>
+ * columns ::= (Column ',' {Column}) | '*'
+ *
+ * Column ::= ([selectorName'.']propertyName ['AS' columnName]) | (selectorName'.*')
+ * /* If only one selector exists in this query, explicit specification of the selectorName
+ * preceding the propertyName is optional */
+ * selectorName ::= Name
+ * propertyName ::= Name
+ * columnName ::= Name
+ * </pre>
+ *
+ * <h4>Limit</h4>
+ *
+ * <pre>
+ * Limit ::= 'LIMIT' count [ 'OFFSET' offset ]
+ * count ::= /* Positive integer value */
+ * offset ::= /* Non-negative integer value */
+ * </pre>
*/
public class SqlQueryParser implements QueryParser {
@@ -255,11 +584,11 @@
} else if (tokens.canConsume("OUTER", "JOIN") || tokens.canConsume("LEFT", "JOIN")
|| tokens.canConsume("LEFT", "OUTER", "JOIN")) {
joinType = JoinType.LEFT_OUTER;
- } else if (tokens.canConsume("RIGHT", "OUTER", "JOIN")) {
+ } else if (tokens.canConsume("RIGHT", "OUTER", "JOIN") || tokens.canConsume("RIGHT", "OUTER")) {
joinType = JoinType.RIGHT_OUTER;
- } else if (tokens.canConsume("FULL", "OUTER", "JOIN")) {
+ } else if (tokens.canConsume("FULL", "OUTER", "JOIN") || tokens.canConsume("FULL", "OUTER")) {
joinType = JoinType.FULL_OUTER;
- } else if (tokens.canConsume("CROSS", "JOIN")) {
+ } else if (tokens.canConsume("CROSS", "JOIN") || tokens.canConsume("CROSS")) {
joinType = JoinType.CROSS;
}
if (joinType == null) break;
14 years, 5 months
DNA SVN: r1325 - in trunk/extensions/dna-connector-filesystem/src/main: resources/org/jboss/dna/connector/filesystem and 1 other directory.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-17 16:09:49 -0500 (Tue, 17 Nov 2009)
New Revision: 1325
Added:
trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/CustomPropertiesFactory.java
Modified:
trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemConnection.java
trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemI18n.java
trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemRequestProcessor.java
trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemSource.java
trunk/extensions/dna-connector-filesystem/src/main/resources/org/jboss/dna/connector/filesystem/FileSystemI18n.properties
Log:
DNA-553 Enhanced the file system connector so that it is much easier to define how non-standard properties can be made to appear on nt:folder, nt:file, and nt:resource nodes. By default, the file system connector behaves as it has in the past: it only uses the standard properties defined by JCR for files, folders and content nodes.
There is a new CustomPropertiesFactory interface that defines several methods for getting and setting custom properties. Simply implement this interface, and call FileSystemSource.setCustomPropertiesFactory(...) with an instance of your implementation. (Or, optionally subclass FileSystemSource, set the factory in the subclass' constructor, and inherit all other functionality).
The connector uses this factory whenever it reads 'nt:folder', 'nt:file', or 'nt:resource' nodes, and includes in these nodes the properties returned by the factory. The factory methods take several arguments, including the ExecutionContext (which can be used to obtain the PropertyFactory, value factories used to create property values, and even the MIME type detector), the java.io.File object, the Location (which includes the path), and other information. Hopefully this is sufficient to construct the necessary custom properties.
Added: trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/CustomPropertiesFactory.java
===================================================================
--- trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/CustomPropertiesFactory.java (rev 0)
+++ trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/CustomPropertiesFactory.java 2009-11-17 21:09:49 UTC (rev 1325)
@@ -0,0 +1,151 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.connector.filesystem;
+
+import java.io.File;
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.Map;
+import net.jcip.annotations.Immutable;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.JcrLexicon;
+import org.jboss.dna.graph.Location;
+import org.jboss.dna.graph.connector.RepositorySourceException;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.Property;
+
+/**
+ * A simple interface that allows an implementer to define additional properties for "nt:folder", "nt:file", and "nt:resource"
+ * nodes created by the file system connector.
+ * <p>
+ * To use, supply the implementation to a {@link FileSystemSource} object (or register the factory in a subclass of
+ * FileSystemSource). Implementations should be immutable because they are shared between all the connections.
+ * </p>
+ */
+@Immutable
+public interface CustomPropertiesFactory extends Serializable {
+
+ /**
+ * Construct the custom properties that should be created for the supplied directory that is to be treated as an "nt:folder".
+ * The resulting properties should not include the standard {@link JcrLexicon#PRIMARY_TYPE} or {@link JcrLexicon#CREATED}
+ * properties, which are set automatically and will override any returned Property with the same name.
+ *
+ * @param context the execution context; never null
+ * @param location the Location of the node, which always contains a {@link Location#getPath() path}; never null
+ * @param directory the file system object; never null and {@link File#isDirectory()} will always return true
+ * @return the custom properties; never null but possibly empty
+ */
+ Collection<Property> getDirectoryProperties( ExecutionContext context,
+ Location location,
+ File directory );
+
+ /**
+ * Construct the custom properties that should be created for the supplied file that is to be treated as an "nt:resource",
+ * which is the node that contains the content-oriented properties and that is a child of a "nt:file" node. The resulting
+ * properties should not include the standard {@link JcrLexicon#PRIMARY_TYPE}, {@link JcrLexicon#LAST_MODIFIED}, or
+ * {@link JcrLexicon#DATA} properties, which are set automatically and will override any returned Property with the same name.
+ *
+ * @param context the execution context; never null
+ * @param location the Location of the node, which always contains a {@link Location#getPath() path}; never null
+ * @param file the file system object; never null and {@link File#isFile()} will always return true
+ * @param mimeType the mime type for the file, as determined by the {@link ExecutionContext#getMimeTypeDetector() MIME type
+ * detector}, or null if the MIME type could not be determined
+ * @return the custom properties; never null but possibly empty
+ */
+ Collection<Property> getResourceProperties( ExecutionContext context,
+ Location location,
+ File file,
+ String mimeType );
+
+ /**
+ * Construct the custom properties that should be created for the supplied file that is to be treated as an "nt:file". The
+ * resulting properties should not include the standard {@link JcrLexicon#PRIMARY_TYPE} or {@link JcrLexicon#CREATED}
+ * properties, which are set automatically and will override any returned Property with the same name.
+ * <p>
+ * Although the connector does not automatically determine the MIME type for the "nt:file" nodes, an implementation can
+ * determine the MIME type by using the context's {@link ExecutionContext#getMimeTypeDetector() MIME type detector}. Note,
+ * however, that this may be an expensive operation, so it should be used only when needed.
+ * </p>
+ *
+ * @param context the execution context; never null
+ * @param location the Location of the node, which always contains a {@link Location#getPath() path}; never null
+ * @param file the file system object; never null and {@link File#isFile()} will always return true
+ * @return the custom properties; never null but possibly empty
+ */
+ Collection<Property> getFileProperties( ExecutionContext context,
+ Location location,
+ File file );
+
+ /**
+ * Record the supplied properties as being set on the designated "nt:folder" node.
+ *
+ * @param context the execution context; never null
+ * @param sourceName the name of the repository source; never null
+ * @param location the Location of the node, which always contains a {@link Location#getPath() path}; never null
+ * @param file the file system object; never null, and both {@link File#exists()} and {@link File#isDirectory()} will always
+ * return true
+ * @param properties the properties that are to be set
+ * @throws RepositorySourceException if any properties are invalid or cannot be set on these nodes
+ */
+ void recordDirectoryProperties( ExecutionContext context,
+ String sourceName,
+ Location location,
+ File file,
+ Map<Name, Property> properties ) throws RepositorySourceException;
+
+ /**
+ * Record the supplied properties as being set on the designated "nt:file" node.
+ *
+ * @param context the execution context; never null
+ * @param sourceName the name of the repository source; never null
+ * @param location the Location of the node, which always contains a {@link Location#getPath() path}; never null
+ * @param file the file system object; never null, and both {@link File#exists()} and {@link File#isFile()} will always return
+ * true
+ * @param properties the properties that are to be set
+ * @throws RepositorySourceException if any properties are invalid or cannot be set on these nodes
+ */
+ void recordFileProperties( ExecutionContext context,
+ String sourceName,
+ Location location,
+ File file,
+ Map<Name, Property> properties ) throws RepositorySourceException;
+
+ /**
+ * Record the supplied properties as being set on the designated "nt:resource" node.
+ *
+ * @param context the execution context; never null
+ * @param sourceName the name of the repository source; never null
+ * @param location the Location of the node, which always contains a {@link Location#getPath() path}; never null
+ * @param file the file system object; never null, and both {@link File#exists()} and {@link File#isFile()} will always return
+ * true
+ * @param properties the properties that are to be set
+ * @throws RepositorySourceException if any properties are invalid or cannot be set on these nodes
+ */
+ void recordResourceProperties( ExecutionContext context,
+ String sourceName,
+ Location location,
+ File file,
+ Map<Name, Property> properties ) throws RepositorySourceException;
+
+}
Property changes on: trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/CustomPropertiesFactory.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemConnection.java
===================================================================
--- trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemConnection.java 2009-11-17 18:47:00 UTC (rev 1324)
+++ trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemConnection.java 2009-11-17 21:09:49 UTC (rev 1325)
@@ -52,6 +52,7 @@
private final int maxPathLength;
private final String workspaceRootPath;
private final boolean updatesAllowed;
+ private final CustomPropertiesFactory customPropertiesFactory;
FileSystemConnection( String sourceName,
String defaultWorkspaceName,
@@ -62,11 +63,13 @@
String workspaceRootPath,
int maxPathLength,
FilenameFilter filenameFilter,
- boolean updatesAllowed ) {
+ boolean updatesAllowed,
+ CustomPropertiesFactory customPropertiesFactory ) {
assert sourceName != null;
assert sourceName.trim().length() != 0;
assert availableWorkspaces != null;
assert rootNodeUuid != null;
+ assert customPropertiesFactory != null;
this.sourceName = sourceName;
this.defaultWorkspaceName = defaultWorkspaceName;
this.availableWorkspaces = availableWorkspaces;
@@ -77,6 +80,7 @@
this.maxPathLength = maxPathLength;
this.filenameFilter = filenameFilter;
this.updatesAllowed = updatesAllowed;
+ this.customPropertiesFactory = customPropertiesFactory;
}
/**
@@ -126,7 +130,8 @@
Request request ) throws RepositorySourceException {
RequestProcessor proc = new FileSystemRequestProcessor(sourceName, defaultWorkspaceName, availableWorkspaces,
creatingWorkspacesAllowed, rootNodeUuid, workspaceRootPath,
- maxPathLength, context, filenameFilter, updatesAllowed);
+ maxPathLength, context, filenameFilter, updatesAllowed,
+ customPropertiesFactory);
try {
proc.process(request);
} finally {
Modified: trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemI18n.java
===================================================================
--- trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemI18n.java 2009-11-17 18:47:00 UTC (rev 1324)
+++ trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemI18n.java 2009-11-17 21:09:49 UTC (rev 1325)
@@ -51,6 +51,7 @@
public static I18n sourceIsReadOnly;
public static I18n pathIsReadOnly;
public static I18n unableToCreateWorkspaces;
+ public static I18n errorSerializingCustomPropertiesFactory;
// Writable messages
public static I18n parentIsReadOnly;
Modified: trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemRequestProcessor.java
===================================================================
--- trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemRequestProcessor.java 2009-11-17 18:47:00 UTC (rev 1324)
+++ trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemRequestProcessor.java 2009-11-17 21:09:49 UTC (rev 1325)
@@ -30,8 +30,6 @@
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
-import java.util.Arrays;
-import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
@@ -42,7 +40,6 @@
import java.util.UUID;
import org.jboss.dna.common.i18n.I18n;
import org.jboss.dna.common.util.FileUtil;
-import org.jboss.dna.graph.DnaIntLexicon;
import org.jboss.dna.graph.DnaLexicon;
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.JcrLexicon;
@@ -91,31 +88,6 @@
private static final String DEFAULT_MIME_TYPE = "application/octet";
- /**
- * Only certain properties are tolerated when writing content (dna:resource or jcr:resource) nodes. These properties are
- * implicitly stored (primary type, data) or silently ignored (encoded, mimetype, last modified). The silently ignored
- * properties must be accepted to stay compatible with the JCR specification.
- */
- private static final Set<Name> ALLOWABLE_PROPERTIES_FOR_CONTENT = Collections.unmodifiableSet(new HashSet<Name>(
- Arrays.asList(new Name[] {
- JcrLexicon.PRIMARY_TYPE,
- JcrLexicon.DATA,
- JcrLexicon.ENCODED,
- JcrLexicon.MIMETYPE,
- JcrLexicon.LAST_MODIFIED,
- JcrLexicon.UUID,
- DnaIntLexicon.NODE_DEFINITON})));
- /**
- * Only certain properties are tolerated when writing files (nt:file) or folders (nt:folder) nodes. These properties are
- * implicitly stored in the file or folder (primary type, created).
- */
- private static final Set<Name> ALLOWABLE_PROPERTIES_FOR_FILE_OR_FOLDER = Collections.unmodifiableSet(new HashSet<Name>(
- Arrays.asList(new Name[] {
- JcrLexicon.PRIMARY_TYPE,
- JcrLexicon.CREATED,
- JcrLexicon.UUID,
- DnaIntLexicon.NODE_DEFINITON})));
-
private final String defaultNamespaceUri;
private final Map<String, File> availableWorkspaces;
private final boolean creatingWorkspacesAllowed;
@@ -126,6 +98,7 @@
private final boolean updatesAllowed;
private final MimeTypeDetector mimeTypeDetector;
private final UUID rootNodeUuid;
+ private final CustomPropertiesFactory customPropertiesFactory;
/**
* @param sourceName
@@ -141,6 +114,8 @@
* @param filenameFilter the filename filter to use to restrict the allowable nodes, or null if all files/directories are to
* be exposed by this connector
* @param updatesAllowed true if this connector supports updating the file system, or false if the connector is readonly
+ * @param customPropertiesFactory the factory that should be used to create custom properties for "nt:folder", "nt:file", and
+ * "nt:resource" nodes
*/
protected FileSystemRequestProcessor( String sourceName,
String defaultWorkspaceName,
@@ -151,11 +126,13 @@
int maxPathLength,
ExecutionContext context,
FilenameFilter filenameFilter,
- boolean updatesAllowed ) {
+ boolean updatesAllowed,
+ CustomPropertiesFactory customPropertiesFactory ) {
super(sourceName, context, null);
assert defaultWorkspaceName != null;
assert availableWorkspaces != null;
assert rootNodeUuid != null;
+ assert customPropertiesFactory != null;
this.availableWorkspaces = availableWorkspaces;
this.creatingWorkspacesAllowed = creatingWorkspacesAllowed;
this.defaultNamespaceUri = getExecutionContext().getNamespaceRegistry().getDefaultNamespaceUri();
@@ -165,6 +142,7 @@
this.defaultWorkspaceName = defaultWorkspaceName;
this.updatesAllowed = updatesAllowed;
this.mimeTypeDetector = context.getMimeTypeDetector();
+ this.customPropertiesFactory = customPropertiesFactory;
if (workspaceRootPath != null) {
this.workspaceRootPath = new File(workspaceRootPath);
@@ -279,24 +257,17 @@
return;
}
// Generate the properties for this File object ...
- DateTimeFactory dateFactory = getExecutionContext().getValueFactories().getDateFactory();
+ final ExecutionContext context = getExecutionContext();
+ final DateTimeFactory dateFactory = context.getValueFactories().getDateFactory();
// Note that we don't have 'created' timestamps, just last modified, so we'll have to use them
if (file.isDirectory()) {
// Add properties for the directory ...
+ request.addProperties(customPropertiesFactory.getDirectoryProperties(context, location, file));
request.addProperty(factory.create(JcrLexicon.PRIMARY_TYPE, JcrNtLexicon.FOLDER));
request.addProperty(factory.create(JcrLexicon.CREATED, dateFactory.create(file.lastModified())));
-
} else {
// It is a file, but ...
if (path.getLastSegment().getName().equals(JcrLexicon.CONTENT)) {
- // The request is to get properties of the "jcr:content" child node ...
- // ... use the dna:resource node type. This is the same as nt:resource, but is not referenceable
- // since we cannot assume that we control all access to this file and can track its movements
- request.addProperty(factory.create(JcrLexicon.PRIMARY_TYPE, DnaLexicon.RESOURCE));
- request.addProperty(factory.create(JcrLexicon.LAST_MODIFIED, dateFactory.create(file.lastModified())));
- // Don't really know the encoding, either ...
- // request.addProperty(factory.create(JcrLexicon.ENCODED, stringFactory.create("UTF-8")));
-
// Discover the mime type ...
String mimeType = null;
InputStream contents = null;
@@ -319,11 +290,25 @@
}
}
+ // First add any custom properties ...
+ request.addProperties(customPropertiesFactory.getResourceProperties(context, location, file, mimeType));
+
+ // The request is to get properties of the "jcr:content" child node ...
+ // ... use the dna:resource node type. This is the same as nt:resource, but is not referenceable
+ // since we cannot assume that we control all access to this file and can track its movements
+ request.addProperty(factory.create(JcrLexicon.PRIMARY_TYPE, DnaLexicon.RESOURCE));
+ request.addProperty(factory.create(JcrLexicon.LAST_MODIFIED, dateFactory.create(file.lastModified())));
+ // Don't really know the encoding, either ...
+ // request.addProperty(factory.create(JcrLexicon.ENCODED, stringFactory.create("UTF-8")));
+
// Now put the file's content into the "jcr:data" property ...
- BinaryFactory binaryFactory = getExecutionContext().getValueFactories().getBinaryFactory();
+ BinaryFactory binaryFactory = context.getValueFactories().getBinaryFactory();
request.addProperty(factory.create(JcrLexicon.DATA, binaryFactory.create(file)));
} else {
+ // First add any custom properties ...
+ request.addProperties(customPropertiesFactory.getFileProperties(context, location, file));
+
// The request is to get properties for the node representing the file
request.addProperty(factory.create(JcrLexicon.PRIMARY_TYPE, JcrNtLexicon.FILE));
request.addProperty(factory.create(JcrLexicon.CREATED, dateFactory.create(file.lastModified())));
@@ -367,8 +352,9 @@
Property primaryTypeProp = properties.get(JcrLexicon.PRIMARY_TYPE);
Name primaryType = primaryTypeProp == null ? null : nameFactory().create(primaryTypeProp.getFirstValue());
+ Path newPath = pathFactory().create(parentPath, request.named());
+ Location actualLocation = Location.create(newPath);
if (JcrNtLexicon.FILE.equals(primaryType)) {
- ensureValidProperties(request.properties(), ALLOWABLE_PROPERTIES_FOR_FILE_OR_FOLDER);
// The FILE node is represented by the existence of the file
if (!parent.canWrite()) {
@@ -410,8 +396,12 @@
ioe.getMessage()), ioe));
return;
}
+ customPropertiesFactory.recordFileProperties(getExecutionContext(),
+ getSourceName(),
+ actualLocation,
+ newFile,
+ properties);
} else if (JcrNtLexicon.RESOURCE.equals(primaryType) || DnaLexicon.RESOURCE.equals(primaryType)) {
- ensureValidProperties(request.properties(), ALLOWABLE_PROPERTIES_FOR_CONTENT);
if (!JcrLexicon.CONTENT.equals(request.named())) {
I18n msg = FileSystemI18n.invalidNameForResource;
String nodeName = request.named().getString(registry);
@@ -526,9 +516,13 @@
}
}
}
+ customPropertiesFactory.recordResourceProperties(getExecutionContext(),
+ getSourceName(),
+ actualLocation,
+ newFile,
+ properties);
} else if (JcrNtLexicon.FOLDER.equals(primaryType) || primaryType == null) {
- ensureValidProperties(request.properties(), ALLOWABLE_PROPERTIES_FOR_FILE_OR_FOLDER);
ensureValidPathLength(newFile);
if (!newFile.mkdir()) {
@@ -541,6 +535,11 @@
primaryType == null ? "null" : primaryType.getString(registry))));
return;
}
+ customPropertiesFactory.recordDirectoryProperties(getExecutionContext(),
+ getSourceName(),
+ actualLocation,
+ newFile,
+ properties);
} else {
// Set error and return
I18n msg = FileSystemI18n.unsupportedPrimaryType;
@@ -551,8 +550,7 @@
return;
}
- Path newPath = pathFactory().create(parentPath, request.named());
- request.setActualLocationOfNode(Location.create(newPath));
+ request.setActualLocationOfNode(actualLocation);
}
/**
@@ -564,8 +562,9 @@
public void process( UpdatePropertiesRequest request ) {
if (!updatesAllowed(request)) return;
+ Path path = request.on().getPath();
File workspace = getWorkspaceDirectory(request.inWorkspace());
- File target = getExistingFileFor(workspace, request.on().getPath(), request.on(), request);
+ File target = getExistingFileFor(workspace, path, request.on(), request);
if (!target.exists()) {
// getExistingFile fills in the PathNotFoundException for non-existent files
@@ -573,13 +572,31 @@
return;
}
+ Location location = request.on();
if (target.isFile()) {
- ensureValidProperties(request.properties().values(), ALLOWABLE_PROPERTIES_FOR_FILE_OR_FOLDER);
+ if (path.endsWith(JcrLexicon.CONTENT)) {
+ customPropertiesFactory.recordResourceProperties(getExecutionContext(),
+ getSourceName(),
+ location,
+ target,
+ request.properties());
+ } else {
+ customPropertiesFactory.recordFileProperties(getExecutionContext(),
+ getSourceName(),
+ location,
+ target,
+ request.properties());
+ }
} else {
- ensureValidProperties(request.properties().values(), ALLOWABLE_PROPERTIES_FOR_CONTENT);
+ assert target.isDirectory();
+ customPropertiesFactory.recordDirectoryProperties(getExecutionContext(),
+ getSourceName(),
+ location,
+ target,
+ request.properties());
}
- request.setActualLocationOfNode(request.on());
+ request.setActualLocationOfNode(location);
}
/**
@@ -996,32 +1013,6 @@
return getExecutionContext().getValueFactories().getUuidFactory();
}
- /**
- * Checks that the collection of {@code properties} only contains properties with allowable names.
- *
- * @param properties
- * @param validPropertyNames
- * @throws RepositorySourceException if {@code properties} contains a
- * @see #ALLOWABLE_PROPERTIES_FOR_CONTENT
- * @see #ALLOWABLE_PROPERTIES_FOR_FILE_OR_FOLDER
- */
- protected void ensureValidProperties( Collection<Property> properties,
- Set<Name> validPropertyNames ) {
- List<String> invalidNames = new LinkedList<String>();
- NamespaceRegistry registry = getExecutionContext().getNamespaceRegistry();
-
- for (Property property : properties) {
- if (!validPropertyNames.contains(property.getName())) {
- invalidNames.add(property.getName().getString(registry));
- }
- }
-
- if (!invalidNames.isEmpty()) {
- throw new RepositorySourceException(this.getSourceName(),
- FileSystemI18n.invalidPropertyNames.text(invalidNames.toString()));
- }
- }
-
protected void ensureValidPathLength( File root ) {
ensureValidPathLength(root, 0);
}
Modified: trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemSource.java
===================================================================
--- trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemSource.java 2009-11-17 18:47:00 UTC (rev 1324)
+++ trunk/extensions/dna-connector-filesystem/src/main/java/org/jboss/dna/connector/filesystem/FileSystemSource.java 2009-11-17 21:09:49 UTC (rev 1325)
@@ -24,16 +24,28 @@
package org.jboss.dna.connector.filesystem;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.Hashtable;
+import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Pattern;
+import javax.naming.BinaryRefAddr;
import javax.naming.Context;
import javax.naming.RefAddr;
import javax.naming.Reference;
@@ -45,12 +57,19 @@
import org.jboss.dna.common.util.CheckArg;
import org.jboss.dna.common.util.Logger;
import org.jboss.dna.common.util.StringUtil;
+import org.jboss.dna.graph.DnaIntLexicon;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.JcrLexicon;
+import org.jboss.dna.graph.Location;
import org.jboss.dna.graph.cache.CachePolicy;
import org.jboss.dna.graph.connector.RepositoryConnection;
import org.jboss.dna.graph.connector.RepositoryContext;
import org.jboss.dna.graph.connector.RepositorySource;
import org.jboss.dna.graph.connector.RepositorySourceCapabilities;
import org.jboss.dna.graph.connector.RepositorySourceException;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.NamespaceRegistry;
+import org.jboss.dna.graph.property.Property;
/**
* The {@link RepositorySource} for the connector that exposes an area of the local file system as content in a repository. This
@@ -61,6 +80,12 @@
public class FileSystemSource implements RepositorySource, ObjectFactory {
/**
+ * An immutable {@link CustomPropertiesFactory} implementation that is used by default when none is provided. Note that this
+ * implementation does restrict the properties that can be placed on file, folder and resource nodes.
+ */
+ protected static CustomPropertiesFactory DEFAULT_PROPERTIES_FACTORY = new StandardPropertiesFactory();
+
+ /**
* The first serialized version of this source. Version {@value} .
*/
private static final long serialVersionUID = 1L;
@@ -79,7 +104,7 @@
protected static final String ALLOW_CREATING_WORKSPACES = "allowCreatingWorkspaces";
protected static final String MAX_PATH_LENGTH = "maxPathLength";
protected static final String EXCLUSION_PATTERN = "exclusionPattern";
- protected static final String ALLOW_UPDATES = "allowUpdates";
+ protected static final String CUSTOM_PROPERTY_FACTORY = "customPropertyFactory";
/**
* This source supports events.
@@ -125,6 +150,7 @@
SUPPORTS_REFERENCES);
private transient CachePolicy cachePolicy;
private transient Map<String, File> availableWorkspaces;
+ private volatile CustomPropertiesFactory customPropertiesFactory;
/**
*
@@ -404,6 +430,24 @@
}
/**
+ * Get the factory that is used to create custom properties on "nt:folder", "nt:file", and "nt:resource" nodes.
+ *
+ * @return the factory, or null if no custom properties are to be created
+ */
+ public synchronized CustomPropertiesFactory getCustomPropertiesFactory() {
+ return customPropertiesFactory;
+ }
+
+ /**
+ * Set the factory that is used to create custom properties on "nt:folder", "nt:file", and "nt:resource" nodes.
+ *
+ * @param customPropertiesFactory the factory reference, or null if no custom properties will be created
+ */
+ public synchronized void setCustomPropertiesFactory( CustomPropertiesFactory customPropertiesFactory ) {
+ this.customPropertiesFactory = customPropertiesFactory;
+ }
+
+ /**
* {@inheritDoc}
*
* @see org.jboss.dna.graph.connector.RepositorySource#initialize(org.jboss.dna.graph.connector.RepositoryContext)
@@ -435,6 +479,18 @@
if (workspaceNames != null && workspaceNames.length != 0) {
ref.add(new StringRefAddr(PREDEFINED_WORKSPACE_NAMES, StringUtil.combineLines(workspaceNames)));
}
+ if (getCustomPropertiesFactory() != null) {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ CustomPropertiesFactory factory = getCustomPropertiesFactory();
+ try {
+ ObjectOutputStream oos = new ObjectOutputStream(baos);
+ oos.writeObject(factory);
+ ref.add(new BinaryRefAddr(CUSTOM_PROPERTY_FACTORY, baos.toByteArray()));
+ } catch (IOException e) {
+ I18n msg = FileSystemI18n.errorSerializingCustomPropertiesFactory;
+ throw new RepositorySourceException(getName(), msg.text(factory.getClass().getName(), getName()), e);
+ }
+ }
return ref;
}
@@ -446,7 +502,7 @@
Context nameCtx,
Hashtable<?, ?> environment ) throws Exception {
if (obj instanceof Reference) {
- Map<String, String> values = new HashMap<String, String>();
+ Map<String, Object> values = new HashMap<String, Object>();
Reference ref = (Reference)obj;
Enumeration<?> en = ref.getAll();
while (en.hasMoreElements()) {
@@ -455,17 +511,28 @@
String key = subref.getType();
Object value = subref.getContent();
if (value != null) values.put(key, value.toString());
+ } else if (subref instanceof BinaryRefAddr) {
+ String key = subref.getType();
+ Object value = subref.getContent();
+ if (value instanceof byte[]) {
+ // Deserialize ...
+ ByteArrayInputStream bais = new ByteArrayInputStream((byte[])value);
+ ObjectInputStream ois = new ObjectInputStream(bais);
+ value = ois.readObject();
+ values.put(key, value);
+ }
}
}
- String sourceName = values.get(SOURCE_NAME);
- String cacheTtlInMillis = values.get(CACHE_TIME_TO_LIVE_IN_MILLISECONDS);
- String retryLimit = values.get(RETRY_LIMIT);
- String defaultWorkspace = values.get(DEFAULT_WORKSPACE);
- String createWorkspaces = values.get(ALLOW_CREATING_WORKSPACES);
- String exclusionPattern = values.get(DEFAULT_EXCLUSION_PATTERN);
- String maxPathLength = values.get(DEFAULT_MAX_PATH_LENGTH);
+ String sourceName = (String)values.get(SOURCE_NAME);
+ String cacheTtlInMillis = (String)values.get(CACHE_TIME_TO_LIVE_IN_MILLISECONDS);
+ String retryLimit = (String)values.get(RETRY_LIMIT);
+ String defaultWorkspace = (String)values.get(DEFAULT_WORKSPACE);
+ String createWorkspaces = (String)values.get(ALLOW_CREATING_WORKSPACES);
+ String exclusionPattern = (String)values.get(EXCLUSION_PATTERN);
+ String maxPathLength = (String)values.get(DEFAULT_MAX_PATH_LENGTH);
+ Object customPropertiesFactory = values.get(CUSTOM_PROPERTY_FACTORY);
- String combinedWorkspaceNames = values.get(PREDEFINED_WORKSPACE_NAMES);
+ String combinedWorkspaceNames = (String)values.get(PREDEFINED_WORKSPACE_NAMES);
String[] workspaceNames = null;
if (combinedWorkspaceNames != null) {
List<String> paths = StringUtil.splitLines(combinedWorkspaceNames);
@@ -482,6 +549,7 @@
if (workspaceNames != null && workspaceNames.length != 0) source.setPredefinedWorkspaceNames(workspaceNames);
if (exclusionPattern != null) source.setExclusionPattern(exclusionPattern);
if (maxPathLength != null) source.setMaxPathLength(Integer.valueOf(maxPathLength));
+ if (customPropertiesFactory != null) source.setCustomPropertiesFactory((CustomPropertiesFactory)customPropertiesFactory);
return source;
}
return null;
@@ -562,9 +630,10 @@
};
}
+ CustomPropertiesFactory propFactory = customPropertiesFactory != null ? customPropertiesFactory : DEFAULT_PROPERTIES_FACTORY;
return new FileSystemConnection(name, defaultWorkspaceName, availableWorkspaces, isCreatingWorkspacesAllowed(),
cachePolicy, rootNodeUuid, workspaceRootPath, maxPathLength, filenameFilter,
- getSupportsUpdates());
+ getSupportsUpdates(), propFactory);
}
/**
@@ -576,6 +645,127 @@
this.availableWorkspaces = null;
}
+ protected static class StandardPropertiesFactory implements CustomPropertiesFactory {
+ private static final long serialVersionUID = 1L;
+ private final Collection<Property> empty = Collections.emptyList();
+
+ /**
+ * Only certain properties are tolerated when writing content (dna:resource or jcr:resource) nodes. These properties are
+ * implicitly stored (primary type, data) or silently ignored (encoded, mimetype, last modified). The silently ignored
+ * properties must be accepted to stay compatible with the JCR specification.
+ */
+ private final Set<Name> ALLOWABLE_PROPERTIES_FOR_CONTENT = Collections.unmodifiableSet(new HashSet<Name>(
+ Arrays.asList(new Name[] {
+ JcrLexicon.PRIMARY_TYPE,
+ JcrLexicon.DATA,
+ JcrLexicon.ENCODED,
+ JcrLexicon.MIMETYPE,
+ JcrLexicon.LAST_MODIFIED,
+ JcrLexicon.UUID,
+ DnaIntLexicon.NODE_DEFINITON})));
+ /**
+ * Only certain properties are tolerated when writing files (nt:file) or folders (nt:folder) nodes. These properties are
+ * implicitly stored in the file or folder (primary type, created).
+ */
+ private final Set<Name> ALLOWABLE_PROPERTIES_FOR_FILE_OR_FOLDER = Collections.unmodifiableSet(new HashSet<Name>(
+ Arrays.asList(new Name[] {
+ JcrLexicon.PRIMARY_TYPE,
+ JcrLexicon.CREATED,
+ JcrLexicon.UUID,
+ DnaIntLexicon.NODE_DEFINITON})));
+
+ public Collection<Property> getDirectoryProperties( ExecutionContext context,
+ Location location,
+ File directory ) {
+ return empty;
+ }
+
+ public Collection<Property> getFileProperties( ExecutionContext context,
+ Location location,
+ File file ) {
+ return empty;
+ }
+
+ public Collection<Property> getResourceProperties( ExecutionContext context,
+ Location location,
+ File file,
+ String mimeType ) {
+ return empty;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.connector.filesystem.CustomPropertiesFactory#recordDirectoryProperties(org.jboss.dna.graph.ExecutionContext,
+ * java.lang.String, org.jboss.dna.graph.Location, java.io.File, java.util.Map)
+ */
+ public void recordDirectoryProperties( ExecutionContext context,
+ String sourceName,
+ Location location,
+ File file,
+ Map<Name, Property> properties ) throws RepositorySourceException {
+ ensureValidProperties(context, sourceName, properties.values(), ALLOWABLE_PROPERTIES_FOR_FILE_OR_FOLDER);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.connector.filesystem.CustomPropertiesFactory#recordFileProperties(org.jboss.dna.graph.ExecutionContext,
+ * java.lang.String, org.jboss.dna.graph.Location, java.io.File, java.util.Map)
+ */
+ public void recordFileProperties( ExecutionContext context,
+ String sourceName,
+ Location location,
+ File file,
+ Map<Name, Property> properties ) throws RepositorySourceException {
+ ensureValidProperties(context, sourceName, properties.values(), ALLOWABLE_PROPERTIES_FOR_FILE_OR_FOLDER);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.connector.filesystem.CustomPropertiesFactory#recordResourceProperties(org.jboss.dna.graph.ExecutionContext,
+ * java.lang.String, org.jboss.dna.graph.Location, java.io.File, java.util.Map)
+ */
+ public void recordResourceProperties( ExecutionContext context,
+ String sourceName,
+ Location location,
+ File file,
+ Map<Name, Property> properties ) throws RepositorySourceException {
+ ensureValidProperties(context, sourceName, properties.values(), ALLOWABLE_PROPERTIES_FOR_CONTENT);
+ }
+
+ /**
+ * Checks that the collection of {@code properties} only contains properties with allowable names.
+ *
+ * @param context
+ * @param sourceName
+ * @param properties
+ * @param validPropertyNames
+ * @throws RepositorySourceException if {@code properties} contains a
+ * @see #ALLOWABLE_PROPERTIES_FOR_CONTENT
+ * @see #ALLOWABLE_PROPERTIES_FOR_FILE_OR_FOLDER
+ */
+ protected void ensureValidProperties( ExecutionContext context,
+ String sourceName,
+ Collection<Property> properties,
+ Set<Name> validPropertyNames ) {
+ List<String> invalidNames = new LinkedList<String>();
+ NamespaceRegistry registry = context.getNamespaceRegistry();
+
+ for (Property property : properties) {
+ if (!validPropertyNames.contains(property.getName())) {
+ invalidNames.add(property.getName().getString(registry));
+ }
+ }
+
+ if (!invalidNames.isEmpty()) {
+ throw new RepositorySourceException(sourceName, FileSystemI18n.invalidPropertyNames.text(invalidNames.toString()));
+ }
+ }
+
+ }
+
@Immutable
/*package*/class FileSystemCachePolicy implements CachePolicy {
private static final long serialVersionUID = 1L;
Modified: trunk/extensions/dna-connector-filesystem/src/main/resources/org/jboss/dna/connector/filesystem/FileSystemI18n.properties
===================================================================
--- trunk/extensions/dna-connector-filesystem/src/main/resources/org/jboss/dna/connector/filesystem/FileSystemI18n.properties 2009-11-17 18:47:00 UTC (rev 1324)
+++ trunk/extensions/dna-connector-filesystem/src/main/resources/org/jboss/dna/connector/filesystem/FileSystemI18n.properties 2009-11-17 21:09:49 UTC (rev 1325)
@@ -39,6 +39,7 @@
onlyTheDefaultNamespaceIsAllowed = {0} requires node names use the default namespace: {1}
sourceIsReadOnly = The source "{0}" does not allow updates
pathIsReadOnly = The path "{0}" in workspace "{1}" in {2} cannot be written to. See java.io.File\#canWrite().
+errorSerializingCustomPropertiesFactory = Error serializing a {0} instance owned by the {1} FileSystemSource
# Writable tests
parentIsReadOnly = The parent node at path "{0}" in workspace "{1}" in {2} cannot be written to. See java.io.File\#canWrite().
14 years, 5 months
DNA SVN: r1324 - in trunk/dna-graph/src: main/java/org/jboss/dna/graph/query/optimize and 3 other directories.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-17 13:47:00 -0500 (Tue, 17 Nov 2009)
New Revision: 1324
Added:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RemoveEmptyAccessNodes.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RewriteAsRangeCriteria.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RewriteAsRangeCriteriaTest.java
Modified:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Operator.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizer.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/PlanNode.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/PushSelectCriteriaTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ReplaceViewsTest.java
Log:
DNA-552 The abstract query model now has a new Constraint subclass called Between that has a DynamicOperand, a StaticOperand lower boundary, a StaticOperand upper boundary, and a flag for each boundary that specifies whether the boundary is included or excluded in the range. This very capably handles all kinds of continuous ranges.
The dna-searc project was enhanced to support this new Constraint type for numeric values (long, double, decimal, and date).
The SQL language parser now handles the '<dynamicOperand> BETWEEN <staticOperandLowerBound> [EXCLUSIVE] AND <staticOperandUpperBound> [EXCLUSIVE]'.
Additionally, a query optimizer rule was created to look for pairs of Comparison constraints that can be merged or rewritten:
- two range constraints are replaced with a single Between constraint (e.g., 'table.column' >=4 and table.column <=10' replaced with 'table.column BETWEEN 4 AND 10', with the exclusive flags set properly)
- two range constraints that specify a range of a single value are replaced with a single equality constraint (e.g., 'table.column >=4 AND table.column <= 4' replaced with 'table.column = 4')
- unnecessary Comparison constraints are removed (e.g., given 'table.column < 4 AND table.column < 10' the latter constraint is removed)
- conflicting constraints that will never be satisfied (e.g., 'table.column < 4 AND table.column > 10), marking the ACCESS node as having no results (this is handled in the query processor)
The QueryProcessor was also changed to look for an ACCESS node with the (new) ACCESS_NO_RESULTS property, and to create a NoResultsComponent rather than a real access component. This will work in that an access query will never be made when we know the range constraints are invalid, but it does not look for other always-false constraints nor does it optimize the plan. (A RemoveEmptyAccessNodes optimizer plan was created, though it basically is a no-op at the moment.)
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Operator.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Operator.java 2009-11-17 18:46:21 UTC (rev 1323)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Operator.java 2009-11-17 18:47:00 UTC (rev 1324)
@@ -87,6 +87,27 @@
}
/**
+ * Determine whether this operator is one that is used to define a range of values: {@link #LESS_THAN <},
+ * {@link #GREATER_THAN >}, {@link #LESS_THAN_OR_EQUAL_TO <=}, or {@link #GREATER_THAN_OR_EQUAL_TO >=}.
+ *
+ * @return true if this operator is a range operator, or false otherwise
+ */
+ public boolean isRangeOperator() {
+ switch (this) {
+ case GREATER_THAN:
+ case GREATER_THAN_OR_EQUAL_TO:
+ case LESS_THAN:
+ case LESS_THAN_OR_EQUAL_TO:
+ return true;
+ case EQUAL_TO:
+ case LIKE:
+ case NOT_EQUAL_TO:
+ default:
+ return false;
+ }
+ }
+
+ /**
* {@inheritDoc}
*
* @see java.lang.Enum#toString()
Added: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RemoveEmptyAccessNodes.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RemoveEmptyAccessNodes.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RemoveEmptyAccessNodes.java 2009-11-17 18:47:00 UTC (rev 1324)
@@ -0,0 +1,65 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.query.optimize;
+
+import java.util.LinkedList;
+import net.jcip.annotations.Immutable;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.plan.PlanNode;
+import org.jboss.dna.graph.query.plan.PlanNode.Property;
+import org.jboss.dna.graph.query.plan.PlanNode.Type;
+
+/**
+ * An {@link OptimizerRule optimizer rule} that removes any ACCESS nodes that are known to never return any tuples because of
+ * conflicting constraints.
+ */
+@Immutable
+public class RemoveEmptyAccessNodes implements OptimizerRule {
+
+ public static final RemoveEmptyAccessNodes INSTANCE = new RemoveEmptyAccessNodes();
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.optimize.OptimizerRule#execute(org.jboss.dna.graph.query.QueryContext,
+ * org.jboss.dna.graph.query.plan.PlanNode, java.util.LinkedList)
+ */
+ public PlanNode execute( QueryContext context,
+ PlanNode plan,
+ LinkedList<OptimizerRule> ruleStack ) {
+ // Find all access nodes ...
+ for (PlanNode access : plan.findAllAtOrBelow(Type.ACCESS)) {
+ if (access.getProperty(Property.ACCESS_NO_RESULTS, Boolean.class)) {
+ // This node has conflicting constraints and will never return any results ...
+
+ // TODO: implement this rule.
+
+ // At least the QueryProcessor looks for this property and always creates a NoResultsComponent,
+ // saving some work. But implementing this rule will make queries more efficient.
+ }
+ }
+
+ return plan;
+ }
+}
Property changes on: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RemoveEmptyAccessNodes.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Added: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RewriteAsRangeCriteria.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RewriteAsRangeCriteria.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RewriteAsRangeCriteria.java 2009-11-17 18:47:00 UTC (rev 1324)
@@ -0,0 +1,335 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.query.optimize;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import net.jcip.annotations.Immutable;
+import org.jboss.dna.graph.property.ValueComparators;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.model.And;
+import org.jboss.dna.graph.query.model.Between;
+import org.jboss.dna.graph.query.model.BindVariableName;
+import org.jboss.dna.graph.query.model.Comparison;
+import org.jboss.dna.graph.query.model.Constraint;
+import org.jboss.dna.graph.query.model.DynamicOperand;
+import org.jboss.dna.graph.query.model.Literal;
+import org.jboss.dna.graph.query.model.Operator;
+import org.jboss.dna.graph.query.model.SelectorName;
+import org.jboss.dna.graph.query.model.StaticOperand;
+import org.jboss.dna.graph.query.model.Visitor;
+import org.jboss.dna.graph.query.plan.PlanNode;
+import org.jboss.dna.graph.query.plan.PlanNode.Property;
+import org.jboss.dna.graph.query.plan.PlanNode.Type;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Multimaps;
+
+/**
+ * An {@link OptimizerRule optimizer rule} that rewrites two {@link And AND-ed} {@link Constraint}s that constraint a dynamic
+ * operand to a range of values as a single {@link Between} constraint. This rule also collapses and removes any constraints that
+ * are unnecessary because other constraints are more restrictive or because they cancel out other constraints.
+ */
+@Immutable
+public class RewriteAsRangeCriteria implements OptimizerRule {
+
+ protected static final Constraint CONFLICTING_CONSTRAINT = new Constraint() {
+ public void accept( Visitor visitor ) {
+ throw new UnsupportedOperationException();
+ }
+ };
+
+ public static final RewriteAsRangeCriteria INSTANCE = new RewriteAsRangeCriteria();
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.optimize.OptimizerRule#execute(org.jboss.dna.graph.query.QueryContext,
+ * org.jboss.dna.graph.query.plan.PlanNode, java.util.LinkedList)
+ */
+ public PlanNode execute( QueryContext context,
+ PlanNode plan,
+ LinkedList<OptimizerRule> ruleStack ) {
+ // Find all the access nodes ...
+ boolean rewritten = false;
+ boolean foundNoResults = false;
+ for (PlanNode access : plan.findAllAtOrBelow(Type.ACCESS)) {
+ // Look for select nodes below an ACCESS node that have a single Comparison constraint,
+ // and accumulate them keyed by the dynamic operand ...
+ Multimap<DynamicOperand, PlanNode> selectNodeByOperand = Multimaps.newArrayListMultimap();
+ for (PlanNode select : access.findAllAtOrBelow(Type.SELECT)) {
+ Constraint constraint = select.getProperty(Property.SELECT_CRITERIA, Constraint.class);
+ // Look for Comparison constraints that use a range operator
+ if (constraint instanceof Comparison) {
+ Comparison comparison = (Comparison)constraint;
+ if (comparison.getOperator().isRangeOperator()) {
+ selectNodeByOperand.put(comparison.getOperand1(), select);
+ }
+ }
+ }
+
+ if (!selectNodeByOperand.isEmpty()) {
+
+ // Go through the constraints we've found ...
+ for (DynamicOperand operand : selectNodeByOperand.keySet()) {
+ Collection<PlanNode> nodes = selectNodeByOperand.get(operand);
+ if (nodes.size() <= 1) continue;
+
+ // Extract the constraints from the nodes ...
+ List<Comparison> rangeConstraints = new ArrayList<Comparison>(nodes.size());
+ List<PlanNode> selectNodes = new ArrayList<PlanNode>(nodes.size());
+ Set<SelectorName> selectors = null;
+ for (PlanNode select : nodes) {
+ selectNodes.add(select);
+ Comparison constraint = select.getProperty(Property.SELECT_CRITERIA, Comparison.class);
+ rangeConstraints.add(constraint);
+ // Record the selector names (should all be the same) ...
+ if (selectors == null) selectors = select.getSelectors();
+ else assert selectors.equals(select.getSelectors());
+ }
+
+ // Attempt to merge the constraints ...
+ Constraint merged = rewrite(context, rangeConstraints);
+ if (merged == CONFLICTING_CONSTRAINT) {
+ // The ANDed constraints cancel each other out, so this whole access node will return no results ...
+ access.setProperty(Property.ACCESS_NO_RESULTS, Boolean.TRUE);
+ foundNoResults = true;
+ break; // don't do anything else under this access node
+ }
+ if (merged != null) {
+ // Add a SELECT node for the new merged constraint ...
+ PlanNode newSelect = new PlanNode(Type.SELECT);
+ newSelect.getSelectors().addAll(selectors);
+ newSelect.setProperty(Property.SELECT_CRITERIA, merged);
+
+ // And insert the SELECT node into the tree (just below the ACCESS, we'll rerun pushdown selects) ...
+ assert access.getChildCount() == 1;
+ access.getFirstChild().insertAsParent(newSelect);
+ rewritten = true;
+ }
+
+ // Remove any of the SELECT nodes that were not needed (this can happen if the constraints are not needed) ...
+ Iterator<PlanNode> nodeIter = selectNodes.iterator();
+ Iterator<Comparison> constraintIter = rangeConstraints.iterator();
+ while (nodeIter.hasNext()) {
+ assert constraintIter.hasNext();
+ PlanNode node = nodeIter.next();
+ Comparison comparison = constraintIter.next();
+ if (comparison == null) {
+ // This comparison was rewritten, so remove the PlanNode ...
+ node.extractFromParent();
+ nodeIter.remove();
+ }
+ }
+ assert !constraintIter.hasNext();
+ }
+ }
+ }
+
+ if (rewritten) {
+ // We mucked with the SELECT nodes, adding SELECT node for each rewritten constraint.
+ // Rerun the rule that pushes SELECT nodes ...
+ ruleStack.addFirst(PushSelectCriteria.INSTANCE);
+ }
+ if (foundNoResults) {
+ ruleStack.addFirst(RemoveEmptyAccessNodes.INSTANCE);
+ }
+
+ return plan;
+ }
+
+ /**
+ * Rewrite the supplied comparisons, returning the new constraint and nulling in the supplied list those comparisons that were
+ * rewritten (and leaving those that were not rewritten)
+ *
+ * @param context the query context
+ * @param comparisons the list of comparisons that sould be rewritten if possible; never null
+ * @return the rewritten constraint, or null if no comparisons were rewritten
+ */
+ @SuppressWarnings( "fallthrough" )
+ protected Constraint rewrite( QueryContext context,
+ List<Comparison> comparisons ) {
+ // Look for the lower bound (greater-than) and upper bound (less-than) ...
+ Comparison lessThan = null;
+ Comparison greaterThan = null;
+ List<Comparison> notNeeded = new LinkedList<Comparison>();
+ boolean inclusive = false;
+ for (Comparison comparison : comparisons) {
+ switch (comparison.getOperator()) {
+ case GREATER_THAN_OR_EQUAL_TO:
+ inclusive = true;
+ case GREATER_THAN:
+ if (greaterThan != null) {
+ // Find the smallest value ...
+ Comparison newGreaterThan = getComparison(context, greaterThan, comparison, true);
+ notNeeded.add(newGreaterThan == greaterThan ? comparison : greaterThan);
+ greaterThan = newGreaterThan;
+ } else {
+ greaterThan = comparison;
+ }
+ break;
+ case LESS_THAN_OR_EQUAL_TO:
+ inclusive = true;
+ case LESS_THAN:
+ if (lessThan != null) {
+ // Find the largest value ...
+ Comparison newLessThan = getComparison(context, lessThan, comparison, false);
+ notNeeded.add(newLessThan == lessThan ? comparison : lessThan);
+ greaterThan = newLessThan;
+ } else {
+ lessThan = comparison;
+ }
+ break;
+ default:
+ assert false;
+ return null;
+ }
+ }
+ if (lessThan == null || greaterThan == null) return null;
+
+ // Create the new Comparison ...
+ Constraint result = null;
+
+ // Compute the difference between the lessThan value and greaterThan value ...
+ int diff = compareStaticOperands(context, greaterThan, lessThan);
+ if (diff == 0) {
+ // The static operands are equivalent ...
+ if (inclusive) {
+ // At least one of the sides was inclusive, meaning the constraints were something
+ // like 'x >= 2 AND x < 2', so we can replace these with an equality constraint ...
+ result = new Comparison(lessThan.getOperand1(), Operator.EQUAL_TO, lessThan.getOperand2());
+ notNeeded.add(lessThan);
+ notNeeded.add(greaterThan);
+ } else {
+ // Neither is inclusive, so really the constraints are not needed anymore.
+ // And, because the constraints conflict, the whole access will return no nodes.
+ // So return the placeholder ...
+ return CONFLICTING_CONSTRAINT;
+ }
+ } else if (diff < 0) {
+ // The range is valid as is ...
+ boolean lowerInclusive = greaterThan.getOperator() == Operator.GREATER_THAN_OR_EQUAL_TO;
+ boolean upperInclusive = lessThan.getOperator() == Operator.LESS_THAN_OR_EQUAL_TO;
+ result = new Between(lessThan.getOperand1(), greaterThan.getOperand2(), lessThan.getOperand2(), lowerInclusive,
+ upperInclusive);
+ notNeeded.add(lessThan);
+ notNeeded.add(greaterThan);
+ } else {
+ // The range is actually something like 'x < 2 AND x > 4', which can never happen ...
+ return CONFLICTING_CONSTRAINT;
+ }
+
+ // Now null out those comparison objects that are not needed ...
+ nullReference(comparisons, notNeeded);
+ return result;
+ }
+
+ /**
+ * Find all occurrences of the comparison object in the supplied list and null the list's reference to it.
+ *
+ * @param comparisons the collection in which null references are to be placed
+ * @param comparisonToNull the comparison that is to be found and nulled in the collection
+ */
+ protected void nullReference( List<Comparison> comparisons,
+ Comparison comparisonToNull ) {
+ if (comparisonToNull != null) {
+ for (int i = 0; i != comparisons.size(); ++i) {
+ if (comparisons.get(i) == comparisonToNull) comparisons.set(i, null);
+ }
+ }
+ }
+
+ /**
+ * Find all references in the supplied list that match those supplied and set them to null.
+ *
+ * @param comparisons the collection in which null references are to be placed
+ * @param comparisonsToNull the comparisons that are to be found and nulled in the collection
+ */
+ protected void nullReference( List<Comparison> comparisons,
+ Iterable<Comparison> comparisonsToNull ) {
+ for (Comparison comparisonToNull : comparisonsToNull) {
+ nullReference(comparisons, comparisonToNull);
+ }
+ }
+
+ /**
+ * Compare the values used in the two comparisons
+ *
+ * @param context the query context; may not be null
+ * @param comparison1 the first comparison object; may not be null
+ * @param comparison2 the second comparison object; may not be null
+ * @return 0 if the values are the same, less than 0 if the first comparison's value is less than the second's, or greater
+ * than 0 if the first comparison's value is greater than the second's
+ */
+ protected int compareStaticOperands( QueryContext context,
+ Comparison comparison1,
+ Comparison comparison2 ) {
+ Object value1 = getValue(context, comparison1.getOperand2());
+ Object value2 = getValue(context, comparison2.getOperand2());
+ return ValueComparators.OBJECT_COMPARATOR.compare(value1, value2);
+ }
+
+ /**
+ * Get the comparison with the smallest (or largest) value.
+ *
+ * @param context the query context; may not be null
+ * @param comparison1 the first comparison object; may not be null
+ * @param comparison2 the second comparison object; may not be null
+ * @param smallest true if the comparison with the smallest value should be returned, or false otherwise
+ * @return the comparison with the smallest (or largest) value
+ */
+ protected Comparison getComparison( QueryContext context,
+ Comparison comparison1,
+ Comparison comparison2,
+ boolean smallest ) {
+ int diff = compareStaticOperands(context, comparison1, comparison2);
+ if (diff == 0) {
+ // They are the same ...
+ return comparison1;
+ }
+ if (!smallest) diff = -1 * diff;
+ return diff < 1 ? comparison1 : comparison2;
+ }
+
+ /**
+ * Get the value associated with the static operand of the comparison. If the operand is a {@link BindVariableName variable
+ * name}, the variable value is returned.
+ *
+ * @param context the query context; may not be null
+ * @param operand the static operand; may not be null
+ * @return the value of the static operand
+ */
+ protected Object getValue( QueryContext context,
+ StaticOperand operand ) {
+ if (operand instanceof Literal) {
+ Literal literal = (Literal)operand;
+ return literal.getValue();
+ }
+ BindVariableName variable = (BindVariableName)operand;
+ return context.getVariables().get(variable.getVariableName());
+ }
+}
Property changes on: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RewriteAsRangeCriteria.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizer.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizer.java 2009-11-17 18:46:21 UTC (rev 1323)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/optimize/RuleBasedOptimizer.java 2009-11-17 18:47:00 UTC (rev 1324)
@@ -69,6 +69,7 @@
*/
protected void populateRuleStack( LinkedList<OptimizerRule> ruleStack,
PlanHints hints ) {
+ ruleStack.addFirst(RewriteAsRangeCriteria.INSTANCE);
if (hints.hasJoin) {
ruleStack.addFirst(ChooseJoinAlgorithm.USE_ONLY_NESTED_JOIN_ALGORITHM);
ruleStack.addFirst(RewriteIdentityJoins.INSTANCE);
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/PlanNode.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/PlanNode.java 2009-11-17 18:46:21 UTC (rev 1323)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/PlanNode.java 2009-11-17 18:47:00 UTC (rev 1324)
@@ -39,7 +39,6 @@
import org.jboss.dna.common.util.ObjectUtil;
import org.jboss.dna.graph.ExecutionContext;
import org.jboss.dna.graph.query.model.Column;
-import org.jboss.dna.graph.query.model.Command;
import org.jboss.dna.graph.query.model.Constraint;
import org.jboss.dna.graph.query.model.JoinCondition;
import org.jboss.dna.graph.query.model.JoinType;
@@ -189,9 +188,11 @@
/** For LIMIT nodes, the offset value. Value is an {@link Integer} object. */
LIMIT_OFFSET,
- /** For ACCESS nodes, the {@link Command} that is to be executed by the source */
- // ACCESS_COMMAND,
- BOGUS; // remove this
+ /**
+ * For ACESS nodes, this signifies that the node will never return results. Value is a {@link Boolean} object, though the
+ * mere presence of this property signifies that it is no longer needed.
+ */
+ ACCESS_NO_RESULTS
}
private Type type;
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java 2009-11-17 18:46:21 UTC (rev 1323)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/process/QueryProcessor.java 2009-11-17 18:47:00 UTC (rev 1324)
@@ -161,11 +161,16 @@
ProcessingComponent component = null;
switch (node.getType()) {
case ACCESS:
- // Create the component to handle the ACCESS node ...
- assert node.getChildCount() == 1;
- component = createAccessComponent(originalQuery, context, node, columns, analyzer);
- // // Don't do anything special with an access node at the moment ...
- // component = createComponent(context, node.getFirstChild(), columns, analyzer);
+ // If the ACCESS node will not have results ...
+ if (node.getProperty(Property.ACCESS_NO_RESULTS, Boolean.class)) {
+ component = new NoResultsComponent(context, columns);
+ } else {
+ // Create the component to handle the ACCESS node ...
+ assert node.getChildCount() == 1;
+ component = createAccessComponent(originalQuery, context, node, columns, analyzer);
+ // // Don't do anything special with an access node at the moment ...
+ // component = createComponent(context, node.getFirstChild(), columns, analyzer);
+ }
break;
case DUP_REMOVE:
// Create the component under the DUP_REMOVE ...
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/PushSelectCriteriaTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/PushSelectCriteriaTest.java 2009-11-17 18:46:21 UTC (rev 1323)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/PushSelectCriteriaTest.java 2009-11-17 18:47:00 UTC (rev 1324)
@@ -206,7 +206,7 @@
// Execute the rule ...
PlanNode result = rule.execute(context, project, new LinkedList<OptimizerRule>());
- System.out.println(result);
+ // System.out.println(result);
assertThat(result, is(sameInstance(project)));
assertChildren(project, select3);
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ReplaceViewsTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ReplaceViewsTest.java 2009-11-17 18:46:21 UTC (rev 1323)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/ReplaceViewsTest.java 2009-11-17 18:47:00 UTC (rev 1324)
@@ -131,8 +131,8 @@
// Execute the rule ...
PlanNode result = rule.execute(context, project, new LinkedList<OptimizerRule>());
- System.out.println(project);
- System.out.println(result);
+ // System.out.println(project);
+ // System.out.println(result);
assertThat(result.isSameAs(project), is(true));
assertChildren(project, select1);
assertChildren(select1, select2);
Added: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RewriteAsRangeCriteriaTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RewriteAsRangeCriteriaTest.java (rev 0)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RewriteAsRangeCriteriaTest.java 2009-11-17 18:47:00 UTC (rev 1324)
@@ -0,0 +1,417 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.query.optimize;
+
+import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.IsNull.nullValue;
+import static org.hamcrest.core.IsSame.sameInstance;
+import static org.junit.Assert.assertThat;
+import static org.mockito.Mockito.mock;
+import java.util.LinkedList;
+import org.jboss.dna.graph.ExecutionContext;
+import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.query.AbstractQueryTest;
+import org.jboss.dna.graph.query.QueryContext;
+import org.jboss.dna.graph.query.model.Between;
+import org.jboss.dna.graph.query.model.Comparison;
+import org.jboss.dna.graph.query.model.Literal;
+import org.jboss.dna.graph.query.model.Operator;
+import org.jboss.dna.graph.query.model.PropertyValue;
+import org.jboss.dna.graph.query.plan.PlanNode;
+import org.jboss.dna.graph.query.plan.PlanNode.Property;
+import org.jboss.dna.graph.query.plan.PlanNode.Type;
+import org.jboss.dna.graph.query.validate.Schemata;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class RewriteAsRangeCriteriaTest extends AbstractQueryTest {
+
+ private RewriteAsRangeCriteria rule;
+ private LinkedList<OptimizerRule> rules;
+ private QueryContext context;
+ private boolean print = true;
+
+ @Before
+ public void beforeEach() {
+ rule = RewriteAsRangeCriteria.INSTANCE;
+ rules = new LinkedList<OptimizerRule>();
+ rules.add(rule);
+ context = new QueryContext(new ExecutionContext(), mock(Schemata.class));
+ }
+
+ protected void print( PlanNode node ) {
+ if (print) System.out.println(node);
+ }
+
+ /**
+ * Before:
+ *
+ * <pre>
+ * Access [t1]
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 < 3>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 > 1>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ *
+ * And after:
+ *
+ * <pre>
+ * Access [t1]
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c1 BETWEEN 1 EXCLUSIVE AND 3 EXCLUSIVE>
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ */
+ @Test
+ public void shouldReplaceComparisonsSpecifyingExclusiveRangeWithBetweenConstraint() {
+ // Each of the PROJECT, SELECT, and SELECT nodes must have the names of the selectors that they apply to ...
+ PlanNode access = new PlanNode(Type.ACCESS, selector("t1"));
+ PlanNode project = new PlanNode(Type.PROJECT, access, selector("t1"));
+ PlanNode select1 = new PlanNode(Type.SELECT, project, selector("t1"));
+ PlanNode select2 = new PlanNode(Type.SELECT, select1, selector("t1"));
+ PlanNode select3 = new PlanNode(Type.SELECT, select2, selector("t1"));
+ PlanNode source = new PlanNode(Type.SOURCE, select3, selector("t1"));
+ source.setProperty(Property.SOURCE_NAME, selector("t1"));
+ select1.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c2")),
+ Operator.EQUAL_TO, new Literal(100L)));
+ select2.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.LESS_THAN, new Literal(3L)));
+ select3.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.GREATER_THAN, new Literal(1L)));
+
+ // Execute the rule ...
+ print(access);
+ PlanNode result = executeRules(access);
+ print(result);
+
+ // Compare results ...
+ assertThat(result, is(sameInstance(access)));
+ assertChildren(access, project);
+ PlanNode newSelect = project.getFirstChild();
+ assertThat(newSelect.getType(), is(Type.SELECT));
+ assertThat(newSelect.getSelectors(), is(access.getSelectors()));
+ assertThat(newSelect.getParent(), is(sameInstance(project)));
+ Between between = newSelect.getProperty(Property.SELECT_CRITERIA, Between.class);
+ assertThat(between.getOperand(), is(select2.getProperty(Property.SELECT_CRITERIA, Comparison.class).getOperand1()));
+ assertThat(between.getLowerBound(), is(select3.getProperty(Property.SELECT_CRITERIA, Comparison.class).getOperand2()));
+ assertThat(between.getUpperBound(), is(select2.getProperty(Property.SELECT_CRITERIA, Comparison.class).getOperand2()));
+ assertThat(between.isLowerBoundIncluded(), is(false));
+ assertThat(between.isUpperBoundIncluded(), is(false));
+ assertChildren(newSelect, select1);
+ assertChildren(select1, source);
+ }
+
+ /**
+ * Before:
+ *
+ * <pre>
+ * Access [t1]
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 <= 3>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 >= 1>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ *
+ * And after:
+ *
+ * <pre>
+ * Access [t1]
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c1 BETWEEN 1 AND 3>
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ */
+ @Test
+ public void shouldReplaceComparisonsSpecifyingInclusiveRangeWithBetweenConstraint() {
+ // Each of the PROJECT, SELECT, and SELECT nodes must have the names of the selectors that they apply to ...
+ PlanNode access = new PlanNode(Type.ACCESS, selector("t1"));
+ PlanNode project = new PlanNode(Type.PROJECT, access, selector("t1"));
+ PlanNode select1 = new PlanNode(Type.SELECT, project, selector("t1"));
+ PlanNode select2 = new PlanNode(Type.SELECT, select1, selector("t1"));
+ PlanNode select3 = new PlanNode(Type.SELECT, select2, selector("t1"));
+ PlanNode source = new PlanNode(Type.SOURCE, select3, selector("t1"));
+ source.setProperty(Property.SOURCE_NAME, selector("t1"));
+ select1.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c2")),
+ Operator.EQUAL_TO, new Literal(100L)));
+ select2.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.LESS_THAN_OR_EQUAL_TO, new Literal(3L)));
+ select3.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.GREATER_THAN_OR_EQUAL_TO, new Literal(1L)));
+
+ // Execute the rule ...
+ print(access);
+ PlanNode result = executeRules(access);
+ print(result);
+
+ // Compare results ...
+ assertThat(result, is(sameInstance(access)));
+ assertChildren(access, project);
+ PlanNode newSelect = project.getFirstChild();
+ assertThat(newSelect.getType(), is(Type.SELECT));
+ assertThat(newSelect.getSelectors(), is(access.getSelectors()));
+ assertThat(newSelect.getParent(), is(sameInstance(project)));
+ Between between = newSelect.getProperty(Property.SELECT_CRITERIA, Between.class);
+ assertThat(between.getOperand(), is(select2.getProperty(Property.SELECT_CRITERIA, Comparison.class).getOperand1()));
+ assertThat(between.getLowerBound(), is(select3.getProperty(Property.SELECT_CRITERIA, Comparison.class).getOperand2()));
+ assertThat(between.getUpperBound(), is(select2.getProperty(Property.SELECT_CRITERIA, Comparison.class).getOperand2()));
+ assertThat(between.isLowerBoundIncluded(), is(true));
+ assertThat(between.isUpperBoundIncluded(), is(true));
+ assertChildren(newSelect, select1);
+ assertChildren(select1, source);
+ }
+
+ /**
+ * Before:
+ *
+ * <pre>
+ * Access [t1]
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 > 3>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 < 1>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ *
+ * And after:
+ *
+ * <pre>
+ * Access [t1] <ACCESS_NO_RESULTS=true>
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 > 3>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 < 1>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ */
+ @Test
+ public void shouldReplaceComparisonsSpecifyingExclusiveRangeWithNotBetweenConstraint() {
+ // Each of the PROJECT, SELECT, and SELECT nodes must have the names of the selectors that they apply to ...
+ PlanNode access = new PlanNode(Type.ACCESS, selector("t1"));
+ PlanNode project = new PlanNode(Type.PROJECT, access, selector("t1"));
+ PlanNode select1 = new PlanNode(Type.SELECT, project, selector("t1"));
+ PlanNode select2 = new PlanNode(Type.SELECT, select1, selector("t1"));
+ PlanNode select3 = new PlanNode(Type.SELECT, select2, selector("t1"));
+ PlanNode source = new PlanNode(Type.SOURCE, select3, selector("t1"));
+ source.setProperty(Property.SOURCE_NAME, selector("t1"));
+ select1.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c2")),
+ Operator.EQUAL_TO, new Literal(100L)));
+ select2.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.GREATER_THAN, new Literal(3L)));
+ select3.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.LESS_THAN, new Literal(1L)));
+
+ // Execute the rule ...
+ print(access);
+ PlanNode result = executeRules(access);
+ print(result);
+
+ // Compare results ...
+ assertThat(result, is(sameInstance(access)));
+ assertChildren(access, project);
+ assertThat(access.getProperty(Property.ACCESS_NO_RESULTS, Boolean.class), is(true));
+ }
+
+ /**
+ * Before:
+ *
+ * <pre>
+ * Access [t1]
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 >= 3>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 <= 1>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ *
+ * And after:
+ *
+ * <pre>
+ * Access [t1] <ACCESS_NO_RESULTS=true>
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 >= 3>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 <= 1>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ */
+ @Test
+ public void shouldReplaceComparisonsSpecifyingInclusiveRangeWithNotBetweenConstraint() {
+ // Each of the PROJECT, SELECT, and SELECT nodes must have the names of the selectors that they apply to ...
+ PlanNode access = new PlanNode(Type.ACCESS, selector("t1"));
+ PlanNode project = new PlanNode(Type.PROJECT, access, selector("t1"));
+ PlanNode select1 = new PlanNode(Type.SELECT, project, selector("t1"));
+ PlanNode select2 = new PlanNode(Type.SELECT, select1, selector("t1"));
+ PlanNode select3 = new PlanNode(Type.SELECT, select2, selector("t1"));
+ PlanNode source = new PlanNode(Type.SOURCE, select3, selector("t1"));
+ source.setProperty(Property.SOURCE_NAME, selector("t1"));
+ select1.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c2")),
+ Operator.EQUAL_TO, new Literal(100L)));
+ select2.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.GREATER_THAN_OR_EQUAL_TO, new Literal(3L)));
+ select3.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.LESS_THAN_OR_EQUAL_TO, new Literal(1L)));
+
+ // Execute the rule ...
+ print(access);
+ PlanNode result = executeRules(access);
+ print(result);
+
+ // Compare results ...
+ assertThat(result, is(sameInstance(access)));
+ assertChildren(access, project);
+ assertThat(access.getProperty(Property.ACCESS_NO_RESULTS, Boolean.class), is(true));
+ }
+
+ /**
+ * Before:
+ *
+ * <pre>
+ * Access [t1]
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 <= 3>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 >= 3>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ *
+ * And after:
+ *
+ * <pre>
+ * Access [t1]
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c1 = 3>
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ */
+ @Test
+ public void shouldReplaceComparisonsSpecifyingInclusiveRangeWithOverlappingBoundaryEqualityComparison() {
+ // Each of the PROJECT, SELECT, and SELECT nodes must have the names of the selectors that they apply to ...
+ PlanNode access = new PlanNode(Type.ACCESS, selector("t1"));
+ PlanNode project = new PlanNode(Type.PROJECT, access, selector("t1"));
+ PlanNode select1 = new PlanNode(Type.SELECT, project, selector("t1"));
+ PlanNode select2 = new PlanNode(Type.SELECT, select1, selector("t1"));
+ PlanNode select3 = new PlanNode(Type.SELECT, select2, selector("t1"));
+ PlanNode source = new PlanNode(Type.SOURCE, select3, selector("t1"));
+ source.setProperty(Property.SOURCE_NAME, selector("t1"));
+ select1.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c2")),
+ Operator.EQUAL_TO, new Literal(100L)));
+ select2.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.LESS_THAN_OR_EQUAL_TO, new Literal(3L)));
+ select3.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.GREATER_THAN_OR_EQUAL_TO, new Literal(3L)));
+
+ // Execute the rule ...
+ print(access);
+ PlanNode result = executeRules(access);
+ print(result);
+
+ // Compare results ...
+ assertThat(result, is(sameInstance(access)));
+ assertThat(access.getProperty(Property.ACCESS_NO_RESULTS, Boolean.class), is(nullValue()));
+ assertChildren(access, project);
+ PlanNode newSelect = project.getFirstChild();
+ assertThat(newSelect.getType(), is(Type.SELECT));
+ assertThat(newSelect.getSelectors(), is(access.getSelectors()));
+ assertThat(newSelect.getParent(), is(sameInstance(project)));
+ Comparison equality = newSelect.getProperty(Property.SELECT_CRITERIA, Comparison.class);
+ assertThat(equality.getOperand1(), is(select2.getProperty(Property.SELECT_CRITERIA, Comparison.class).getOperand1()));
+ assertThat(equality.getOperator(), is(Operator.EQUAL_TO));
+ assertThat(equality.getOperand2(), is(select2.getProperty(Property.SELECT_CRITERIA, Comparison.class).getOperand2()));
+ assertChildren(newSelect, select1);
+ assertChildren(select1, source);
+ }
+
+ /**
+ * Before:
+ *
+ * <pre>
+ * Access [t1]
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 < 3>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 > 3>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ *
+ * And after:
+ *
+ * <pre>
+ * Access [t1] <ACCESS_NO_RESULTS=true>
+ * Project [t1]
+ * Select [t1] <SELECT_CRITERIA=t1.c2 = 100>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 < 3>
+ * Select [t1] <SELECT_CRITERIA=t1.c1 > 3>
+ * Source [t1] <SOURCE_NAME=t1>
+ * </pre>
+ */
+ @Test
+ public void shouldMarkAsHavingNoResultsWhenComparisonsSpecifyRangeWithNonOverlappingBoundary() {
+ // Each of the PROJECT, SELECT, and SELECT nodes must have the names of the selectors that they apply to ...
+ PlanNode access = new PlanNode(Type.ACCESS, selector("t1"));
+ PlanNode project = new PlanNode(Type.PROJECT, access, selector("t1"));
+ PlanNode select1 = new PlanNode(Type.SELECT, project, selector("t1"));
+ PlanNode select2 = new PlanNode(Type.SELECT, select1, selector("t1"));
+ PlanNode select3 = new PlanNode(Type.SELECT, select2, selector("t1"));
+ PlanNode source = new PlanNode(Type.SOURCE, select3, selector("t1"));
+ source.setProperty(Property.SOURCE_NAME, selector("t1"));
+ select1.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c2")),
+ Operator.EQUAL_TO, new Literal(100L)));
+ select2.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.LESS_THAN, new Literal(3L)));
+ select3.setProperty(Property.SELECT_CRITERIA, new Comparison(new PropertyValue(selector("t1"), name("c1")),
+ Operator.GREATER_THAN, new Literal(3L)));
+
+ // Execute the rule ...
+ print(access);
+ PlanNode result = executeRules(access);
+ print(result);
+
+ // Compare results ...
+ assertThat(result, is(sameInstance(access)));
+ assertChildren(access, project);
+ assertThat(access.getProperty(Property.ACCESS_NO_RESULTS, Boolean.class), is(true));
+ }
+
+ protected PlanNode executeRules( PlanNode node ) {
+ while (!rules.isEmpty()) {
+ OptimizerRule rule = rules.poll();
+ assert rule != null;
+ node = rule.execute(context, node, rules);
+ }
+ return node;
+ }
+
+ protected Name name( String name ) {
+ return context.getExecutionContext().getValueFactories().getNameFactory().create(name);
+ }
+}
Property changes on: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/optimize/RewriteAsRangeCriteriaTest.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
14 years, 5 months
DNA SVN: r1323 - in trunk: dna-graph/src/main/java/org/jboss/dna/graph/query/model and 5 other directories.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-17 13:46:21 -0500 (Tue, 17 Nov 2009)
New Revision: 1323
Added:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Between.java
Modified:
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryBuilder.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitor.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitors.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java
trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/PlanUtil.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/QueryBuilderTest.java
trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/SqlQueryParserTest.java
trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java
trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java
Log:
DNA-552 Added to the abstract query model a new Constraint subclass 'Between' that represents a constraint on a DynamicOperand such that the values are within a certain range. Also added the corresponding support to the SQL parser for 'BETWEEN x AND y' and 'NOT BETWEEN x AND y', which is the syntax commonly used in other SQL grammars. (Note that this is an extension beyond JCRSQL2.) Also added support for 'BETWEEN x EXCLUSIVE AND y' and 'BETWEEN x AND y EXCLUSIVE' to be able to specify that the ranges do or do not include the boundary value. This feature allows the Lucene search/query implementation to apply a more efficient query to the indexes than does an AND of two Comparison constraints.
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryBuilder.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryBuilder.java 2009-11-16 23:57:21 UTC (rev 1322)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/QueryBuilder.java 2009-11-17 18:46:21 UTC (rev 1323)
@@ -44,6 +44,7 @@
import org.jboss.dna.graph.property.ValueFormatException;
import org.jboss.dna.graph.query.model.AllNodes;
import org.jboss.dna.graph.query.model.And;
+import org.jboss.dna.graph.query.model.Between;
import org.jboss.dna.graph.query.model.BindVariableName;
import org.jboss.dna.graph.query.model.ChildNode;
import org.jboss.dna.graph.query.model.ChildNodeJoinCondition;
@@ -1290,36 +1291,27 @@
}
}
- public class CastAs {
- private final RightHandSide rhs;
- private final Object value;
+ public abstract class CastAs<ReturnType> {
+ protected final Object value;
- protected CastAs( RightHandSide rhs,
- Object value ) {
- this.rhs = rhs;
+ protected CastAs( Object value ) {
this.value = value;
}
- private ValueFactories factories() {
- return QueryBuilder.this.context.getValueFactories();
- }
-
/**
* Define the right-hand side literal value cast as the specified type.
*
* @param type the property type; may not be null
* @return the constraint builder; never null
*/
- public ConstraintBuilder as( PropertyType type ) {
- return rhs.comparisonBuilder.is(rhs.operator, factories().getValueFactory(type).create(value));
- }
+ public abstract ReturnType as( PropertyType type );
/**
* Define the right-hand side literal value cast as a {@link PropertyType#STRING}.
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asString() {
+ public ReturnType asString() {
return as(PropertyType.STRING);
}
@@ -1328,7 +1320,7 @@
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asBoolean() {
+ public ReturnType asBoolean() {
return as(PropertyType.BOOLEAN);
}
@@ -1337,7 +1329,7 @@
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asLong() {
+ public ReturnType asLong() {
return as(PropertyType.LONG);
}
@@ -1346,7 +1338,7 @@
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asDouble() {
+ public ReturnType asDouble() {
return as(PropertyType.DOUBLE);
}
@@ -1355,7 +1347,7 @@
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asDecimal() {
+ public ReturnType asDecimal() {
return as(PropertyType.DECIMAL);
}
@@ -1364,7 +1356,7 @@
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asDate() {
+ public ReturnType asDate() {
return as(PropertyType.DATE);
}
@@ -1373,7 +1365,7 @@
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asName() {
+ public ReturnType asName() {
return as(PropertyType.NAME);
}
@@ -1382,7 +1374,7 @@
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asPath() {
+ public ReturnType asPath() {
return as(PropertyType.PATH);
}
@@ -1391,7 +1383,7 @@
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asBinary() {
+ public ReturnType asBinary() {
return as(PropertyType.BINARY);
}
@@ -1400,7 +1392,7 @@
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asReference() {
+ public ReturnType asReference() {
return as(PropertyType.REFERENCE);
}
@@ -1409,7 +1401,7 @@
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asUri() {
+ public ReturnType asUri() {
return as(PropertyType.URI);
}
@@ -1418,11 +1410,88 @@
*
* @return the constraint builder; never null
*/
- public ConstraintBuilder asUuid() {
+ public ReturnType asUuid() {
return as(PropertyType.UUID);
}
}
+ public class CastAsRightHandSide extends CastAs<ConstraintBuilder> {
+ private final RightHandSide rhs;
+
+ protected CastAsRightHandSide( RightHandSide rhs,
+ Object value ) {
+ super(value);
+ this.rhs = rhs;
+ }
+
+ private ValueFactories factories() {
+ return QueryBuilder.this.context.getValueFactories();
+ }
+
+ /**
+ * Define the right-hand side literal value cast as the specified type.
+ *
+ * @param type the property type; may not be null
+ * @return the constraint builder; never null
+ */
+ @Override
+ public ConstraintBuilder as( PropertyType type ) {
+ return rhs.comparisonBuilder.is(rhs.operator, factories().getValueFactory(type).create(value));
+ }
+ }
+
+ public class CastAsUpperBoundary extends CastAs<ConstraintBuilder> {
+ private final UpperBoundary upperBoundary;
+
+ protected CastAsUpperBoundary( UpperBoundary upperBoundary,
+ Object value ) {
+ super(value);
+ this.upperBoundary = upperBoundary;
+ }
+
+ private ValueFactories factories() {
+ return QueryBuilder.this.context.getValueFactories();
+ }
+
+ /**
+ * Define the right-hand side literal value cast as the specified type.
+ *
+ * @param type the property type; may not be null
+ * @return the constraint builder; never null
+ */
+ @Override
+ public ConstraintBuilder as( PropertyType type ) {
+ return upperBoundary.comparisonBuilder.isBetween(upperBoundary.lowerBound, factories().getValueFactory(type)
+ .create(value));
+ }
+ }
+
+ public class CastAsLowerBoundary extends CastAs<AndBuilder<UpperBoundary>> {
+ private final ComparisonBuilder builder;
+
+ protected CastAsLowerBoundary( ComparisonBuilder builder,
+ Object value ) {
+ super(value);
+ this.builder = builder;
+ }
+
+ private ValueFactories factories() {
+ return QueryBuilder.this.context.getValueFactories();
+ }
+
+ /**
+ * Define the left-hand side literal value cast as the specified type.
+ *
+ * @param type the property type; may not be null
+ * @return the builder to complete the constraint; never null
+ */
+ @Override
+ public AndBuilder<UpperBoundary> as( PropertyType type ) {
+ Object literal = factories().getValueFactory(type).create(value);
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(builder, new Literal(literal)));
+ }
+ }
+
public class RightHandSide {
protected final Operator operator;
protected final ComparisonBuilder comparisonBuilder;
@@ -1579,8 +1648,8 @@
* @param literal the literal value that is to be cast
* @return the constraint builder; never null
*/
- public CastAs cast( int literal ) {
- return new CastAs(this, literal);
+ public CastAs<ConstraintBuilder> cast( int literal ) {
+ return new CastAsRightHandSide(this, literal);
}
/**
@@ -1589,8 +1658,8 @@
* @param literal the literal value that is to be cast
* @return the constraint builder; never null
*/
- public CastAs cast( String literal ) {
- return new CastAs(this, literal);
+ public CastAs<ConstraintBuilder> cast( String literal ) {
+ return new CastAsRightHandSide(this, literal);
}
/**
@@ -1599,8 +1668,8 @@
* @param literal the literal value that is to be cast
* @return the constraint builder; never null
*/
- public CastAs cast( boolean literal ) {
- return new CastAs(this, literal);
+ public CastAs<ConstraintBuilder> cast( boolean literal ) {
+ return new CastAsRightHandSide(this, literal);
}
/**
@@ -1609,8 +1678,8 @@
* @param literal the literal value that is to be cast
* @return the constraint builder; never null
*/
- public CastAs cast( long literal ) {
- return new CastAs(this, literal);
+ public CastAs<ConstraintBuilder> cast( long literal ) {
+ return new CastAsRightHandSide(this, literal);
}
/**
@@ -1619,8 +1688,8 @@
* @param literal the literal value that is to be cast
* @return the constraint builder; never null
*/
- public CastAs cast( double literal ) {
- return new CastAs(this, literal);
+ public CastAs<ConstraintBuilder> cast( double literal ) {
+ return new CastAsRightHandSide(this, literal);
}
/**
@@ -1629,8 +1698,8 @@
* @param literal the literal value that is to be cast
* @return the constraint builder; never null
*/
- public CastAs cast( BigDecimal literal ) {
- return new CastAs(this, literal);
+ public CastAs<ConstraintBuilder> cast( BigDecimal literal ) {
+ return new CastAsRightHandSide(this, literal);
}
/**
@@ -1639,8 +1708,8 @@
* @param literal the literal value that is to be cast
* @return the constraint builder; never null
*/
- public CastAs cast( DateTime literal ) {
- return new CastAs(this, literal.toUtcTimeZone());
+ public CastAs<ConstraintBuilder> cast( DateTime literal ) {
+ return new CastAsRightHandSide(this, literal.toUtcTimeZone());
}
/**
@@ -1649,8 +1718,8 @@
* @param literal the literal value that is to be cast
* @return the constraint builder; never null
*/
- public CastAs cast( Name literal ) {
- return new CastAs(this, literal);
+ public CastAs<ConstraintBuilder> cast( Name literal ) {
+ return new CastAsRightHandSide(this, literal);
}
/**
@@ -1659,8 +1728,8 @@
* @param literal the literal value that is to be cast
* @return the constraint builder; never null
*/
- public CastAs cast( Path literal ) {
- return new CastAs(this, literal);
+ public CastAs<ConstraintBuilder> cast( Path literal ) {
+ return new CastAsRightHandSide(this, literal);
}
/**
@@ -1669,8 +1738,8 @@
* @param literal the literal value that is to be cast
* @return the constraint builder; never null
*/
- public CastAs cast( UUID literal ) {
- return new CastAs(this, literal);
+ public CastAs<ConstraintBuilder> cast( UUID literal ) {
+ return new CastAsRightHandSide(this, literal);
}
/**
@@ -1679,17 +1748,537 @@
* @param literal the literal value that is to be cast
* @return the constraint builder; never null
*/
- public CastAs cast( URI literal ) {
- return new CastAs(this, literal);
+ public CastAs<ConstraintBuilder> cast( URI literal ) {
+ return new CastAsRightHandSide(this, literal);
}
}
+ public class UpperBoundary {
+ protected final StaticOperand lowerBound;
+ protected final ComparisonBuilder comparisonBuilder;
+
+ protected UpperBoundary( ComparisonBuilder comparisonBuilder,
+ StaticOperand lowerBound ) {
+ this.lowerBound = lowerBound;
+ this.comparisonBuilder = comparisonBuilder;
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( String literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( int literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( long literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( float literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( double literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( DateTime literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( Path literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( Name literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( URI literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( UUID literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( Binary literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( BigDecimal literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder literal( boolean literal ) {
+ return comparisonBuilder.isBetween(lowerBound, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param variableName the name of the variable
+ * @return the constraint builder; never null
+ */
+ public ConstraintBuilder variable( String variableName ) {
+ return comparisonBuilder.constraintBuilder.setConstraint(new Between(comparisonBuilder.left, lowerBound,
+ new BindVariableName(variableName)));
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<ConstraintBuilder> cast( int literal ) {
+ return new CastAsUpperBoundary(this, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<ConstraintBuilder> cast( String literal ) {
+ return new CastAsUpperBoundary(this, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<ConstraintBuilder> cast( boolean literal ) {
+ return new CastAsUpperBoundary(this, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<ConstraintBuilder> cast( long literal ) {
+ return new CastAsUpperBoundary(this, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<ConstraintBuilder> cast( double literal ) {
+ return new CastAsUpperBoundary(this, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<ConstraintBuilder> cast( BigDecimal literal ) {
+ return new CastAsUpperBoundary(this, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<ConstraintBuilder> cast( DateTime literal ) {
+ return new CastAsUpperBoundary(this, literal.toUtcTimeZone());
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<ConstraintBuilder> cast( Name literal ) {
+ return new CastAsUpperBoundary(this, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<ConstraintBuilder> cast( Path literal ) {
+ return new CastAsUpperBoundary(this, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<ConstraintBuilder> cast( UUID literal ) {
+ return new CastAsUpperBoundary(this, literal);
+ }
+
+ /**
+ * Define the upper boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<ConstraintBuilder> cast( URI literal ) {
+ return new CastAsUpperBoundary(this, literal);
+ }
+ }
+
+ public class LowerBoundary {
+ protected final ComparisonBuilder comparisonBuilder;
+
+ protected LowerBoundary( ComparisonBuilder comparisonBuilder ) {
+ this.comparisonBuilder = comparisonBuilder;
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( String literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( int literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( long literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( float literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( double literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( DateTime literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( Path literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( Name literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( URI literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( UUID literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( Binary literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( BigDecimal literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value;
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> literal( boolean literal ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new Literal(literal)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param variableName the name of the variable
+ * @return the constraint builder; never null
+ */
+ public AndBuilder<UpperBoundary> variable( String variableName ) {
+ return new AndBuilder<UpperBoundary>(new UpperBoundary(comparisonBuilder, new BindVariableName(variableName)));
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<AndBuilder<UpperBoundary>> cast( int literal ) {
+ return new CastAsLowerBoundary(comparisonBuilder, literal);
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<AndBuilder<UpperBoundary>> cast( String literal ) {
+ return new CastAsLowerBoundary(comparisonBuilder, literal);
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<AndBuilder<UpperBoundary>> cast( boolean literal ) {
+ return new CastAsLowerBoundary(comparisonBuilder, literal);
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<AndBuilder<UpperBoundary>> cast( long literal ) {
+ return new CastAsLowerBoundary(comparisonBuilder, literal);
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<AndBuilder<UpperBoundary>> cast( double literal ) {
+ return new CastAsLowerBoundary(comparisonBuilder, literal);
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<AndBuilder<UpperBoundary>> cast( BigDecimal literal ) {
+ return new CastAsLowerBoundary(comparisonBuilder, literal);
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<AndBuilder<UpperBoundary>> cast( DateTime literal ) {
+ return new CastAsLowerBoundary(comparisonBuilder, literal);
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<AndBuilder<UpperBoundary>> cast( Name literal ) {
+ return new CastAsLowerBoundary(comparisonBuilder, literal);
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<AndBuilder<UpperBoundary>> cast( Path literal ) {
+ return new CastAsLowerBoundary(comparisonBuilder, literal);
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<AndBuilder<UpperBoundary>> cast( UUID literal ) {
+ return new CastAsLowerBoundary(comparisonBuilder, literal);
+ }
+
+ /**
+ * Define the lower boundary value of a range.
+ *
+ * @param literal the literal value that is to be cast
+ * @return the constraint builder; never null
+ */
+ public CastAs<AndBuilder<UpperBoundary>> cast( URI literal ) {
+ return new CastAsLowerBoundary(comparisonBuilder, literal);
+ }
+ }
+
/**
* An interface used to set the right-hand side of a constraint.
*/
public class ComparisonBuilder {
- private final DynamicOperand left;
- private final ConstraintBuilder constraintBuilder;
+ protected final DynamicOperand left;
+ protected final ConstraintBuilder constraintBuilder;
protected ComparisonBuilder( ConstraintBuilder constraintBuilder,
DynamicOperand left ) {
@@ -1815,10 +2404,28 @@
public ConstraintBuilder is( Operator operator,
Object literal ) {
assert operator != null;
- return this.constraintBuilder.setConstraint(new Comparison(left, operator, new Literal(literal)));
+ Literal value = literal instanceof Literal ? (Literal)literal : new Literal(literal);
+ return this.constraintBuilder.setConstraint(new Comparison(left, operator, value));
}
/**
+ * Define the right-hand-side of the constraint using the supplied operator.
+ *
+ * @param lowerBoundLiteral the literal value that represents the lower bound of the range (inclusive)
+ * @param upperBoundLiteral the literal value that represents the upper bound of the range (inclusive)
+ * @return the builder used to create the constraint clause, ready to be used to create other constraints clauses or
+ * complete already-started clauses; never null
+ */
+ public ConstraintBuilder isBetween( Object lowerBoundLiteral,
+ Object upperBoundLiteral ) {
+ assert lowerBoundLiteral != null;
+ assert upperBoundLiteral != null;
+ Literal lower = lowerBoundLiteral instanceof Literal ? (Literal)lowerBoundLiteral : new Literal(lowerBoundLiteral);
+ Literal upper = upperBoundLiteral instanceof Literal ? (Literal)upperBoundLiteral : new Literal(upperBoundLiteral);
+ return this.constraintBuilder.setConstraint(new Between(left, lower, upper));
+ }
+
+ /**
* Define the right-hand-side of the constraint to be equivalent to the value of the supplied variable.
*
* @param variableName the name of the variable
@@ -1971,5 +2578,33 @@
public ConstraintBuilder isNotEqualTo( Object literal ) {
return is(Operator.NOT_EQUAL_TO, literal);
}
+
+ /**
+ * Define the constraint as a range between a lower boundary and an upper boundary.
+ *
+ * @return the interface used to specify the lower boundary boundary, the upper boundary, and which will return the
+ * builder interface; never null
+ */
+ public LowerBoundary isBetween() {
+ return new LowerBoundary(this);
+ }
}
+
+ public class AndBuilder<T> {
+ private final T object;
+
+ protected AndBuilder( T object ) {
+ assert object != null;
+ this.object = object;
+ }
+
+ /**
+ * Return the component
+ *
+ * @return the component; never null
+ */
+ public T and() {
+ return this.object;
+ }
+ }
}
Added: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Between.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Between.java (rev 0)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Between.java 2009-11-17 18:46:21 UTC (rev 1323)
@@ -0,0 +1,174 @@
+/*
+ * JBoss DNA (http://www.jboss.org/dna)
+ * See the COPYRIGHT.txt file distributed with this work for information
+ * regarding copyright ownership. Some portions may be licensed
+ * to Red Hat, Inc. under one or more contributor license agreements.
+ * See the AUTHORS.txt file in the distribution for a full listing of
+ * individual contributors.
+ *
+ * JBoss DNA is free software. Unless otherwise indicated, all code in JBoss DNA
+ * is licensed to you under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * JBoss DNA is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.jboss.dna.graph.query.model;
+
+import net.jcip.annotations.Immutable;
+import org.jboss.dna.common.util.CheckArg;
+import org.jboss.dna.common.util.HashCode;
+
+/**
+ * A constraint that evaluates to true when the value defined by the dynamic operand evaluates to be within the specified range.
+ */
+@Immutable
+public class Between extends Constraint {
+
+ private final DynamicOperand operand;
+ private final StaticOperand lowerBound;
+ private final StaticOperand upperBound;
+ private final boolean includeLowerBound;
+ private final boolean includeUpperBound;
+ private final int hc;
+
+ /**
+ * Create a constraint that the values of the supplied dynamic operand are between the lower and upper bounds (inclusive).
+ *
+ * @param operand the dynamic operand describing the values that are to be constrained
+ * @param lowerBound the lower bound of the range
+ * @param upperBound the upper bound of the range
+ * @throws IllegalArgumentException if any of the arguments are null
+ */
+ public Between( DynamicOperand operand,
+ StaticOperand lowerBound,
+ StaticOperand upperBound ) {
+ this(operand, lowerBound, upperBound, true, true);
+ }
+
+ /**
+ * Create a constraint that the values of the supplied dynamic operand are between the lower and upper bounds, specifying
+ * whether the boundary values are to be included in the range.
+ *
+ * @param operand the dynamic operand describing the values that are to be constrained
+ * @param lowerBound the lower bound of the range
+ * @param upperBound the upper bound of the range
+ * @param includeLowerBound true if the lower boundary value is not be included
+ * @param includeUpperBound true if the upper boundary value is not be included
+ * @throws IllegalArgumentException if any of the arguments are null
+ */
+ public Between( DynamicOperand operand,
+ StaticOperand lowerBound,
+ StaticOperand upperBound,
+ boolean includeLowerBound,
+ boolean includeUpperBound ) {
+ CheckArg.isNotNull(operand, "operand");
+ CheckArg.isNotNull(lowerBound, "lowerBound");
+ CheckArg.isNotNull(upperBound, "upperBound");
+ this.operand = operand;
+ this.lowerBound = lowerBound;
+ this.upperBound = upperBound;
+ this.includeLowerBound = includeLowerBound;
+ this.includeUpperBound = includeUpperBound;
+ this.hc = HashCode.compute(this.operand, this.lowerBound, this.upperBound);
+ }
+
+ /**
+ * Get the dynamic operand specification.
+ *
+ * @return the dynamic operand; never null
+ */
+ public final DynamicOperand getOperand() {
+ return operand;
+ }
+
+ /**
+ * @return lowerBound
+ */
+ public StaticOperand getLowerBound() {
+ return lowerBound;
+ }
+
+ /**
+ * @return upperBound
+ */
+ public StaticOperand getUpperBound() {
+ return upperBound;
+ }
+
+ /**
+ * Return whether the lower bound is to be included in the results.
+ *
+ * @return true if the {@link #getLowerBound() lower bound} is to be included, or false otherwise
+ */
+ public boolean isLowerBoundIncluded() {
+ return includeLowerBound;
+ }
+
+ /**
+ * Return whether the upper bound is to be included in the results.
+ *
+ * @return true if the {@link #getUpperBound() upper bound} is to be included, or false otherwise
+ */
+ public boolean isUpperBoundIncluded() {
+ return includeUpperBound;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return Visitors.readable(this);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#hashCode()
+ */
+ @Override
+ public int hashCode() {
+ return hc;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see java.lang.Object#equals(java.lang.Object)
+ */
+ @Override
+ public boolean equals( Object obj ) {
+ if (obj == this) return true;
+ if (obj instanceof Between) {
+ Between that = (Between)obj;
+ if (this.hc != that.hc) return false;
+ if (!this.operand.equals(that.operand)) return false;
+ if (!this.lowerBound.equals(that.lowerBound)) return false;
+ if (!this.upperBound.equals(that.upperBound)) return false;
+ if (this.includeLowerBound != that.includeLowerBound) return false;
+ if (this.includeUpperBound != that.includeUpperBound) return false;
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @see org.jboss.dna.graph.query.model.Visitable#accept(org.jboss.dna.graph.query.model.Visitor)
+ */
+ public void accept( Visitor visitor ) {
+ visitor.visit(this);
+ }
+}
Property changes on: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Between.java
___________________________________________________________________
Name: svn:keywords
+ Id Revision
Name: svn:eol-style
+ LF
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitor.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitor.java 2009-11-16 23:57:21 UTC (rev 1322)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitor.java 2009-11-17 18:46:21 UTC (rev 1323)
@@ -32,6 +32,8 @@
void visit( And obj );
+ void visit( Between obj );
+
void visit( BindVariableName obj );
void visit( ChildNode obj );
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitors.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitors.java 2009-11-16 23:57:21 UTC (rev 1322)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/model/Visitors.java 2009-11-17 18:46:21 UTC (rev 1323)
@@ -239,6 +239,14 @@
/**
* {@inheritDoc}
*
+ * @see org.jboss.dna.graph.query.model.Visitor#visit(org.jboss.dna.graph.query.model.Between)
+ */
+ public void visit( Between obj ) {
+ }
+
+ /**
+ * {@inheritDoc}
+ *
* @see org.jboss.dna.graph.query.model.Visitor#visit(org.jboss.dna.graph.query.model.BindVariableName)
*/
public void visit( BindVariableName obj ) {
@@ -573,6 +581,19 @@
/**
* {@inheritDoc}
*
+ * @see org.jboss.dna.graph.query.model.Visitor#visit(org.jboss.dna.graph.query.model.Between)
+ */
+ public void visit( Between between ) {
+ strategy.visit(between);
+ enqueue(between.getOperand());
+ enqueue(between.getLowerBound());
+ enqueue(between.getUpperBound());
+ visitNext();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
* @see org.jboss.dna.graph.query.model.Visitor#visit(org.jboss.dna.graph.query.model.BindVariableName)
*/
public void visit( BindVariableName variableName ) {
@@ -1004,6 +1025,21 @@
/**
* {@inheritDoc}
*
+ * @see org.jboss.dna.graph.query.model.Visitor#visit(org.jboss.dna.graph.query.model.Between)
+ */
+ public void visit( Between between ) {
+ between.getOperand().accept(this);
+ append(" BETWEEN ");
+ between.getLowerBound().accept(this);
+ if (!between.isLowerBoundIncluded()) append(" EXCLUSIVE");
+ append(" AND ");
+ between.getUpperBound().accept(this);
+ if (!between.isUpperBoundIncluded()) append(" EXCLUSIVE");
+ }
+
+ /**
+ * {@inheritDoc}
+ *
* @see org.jboss.dna.graph.query.model.Visitor#visit(org.jboss.dna.graph.query.model.BindVariableName)
*/
public void visit( BindVariableName variable ) {
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java 2009-11-16 23:57:21 UTC (rev 1322)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/parse/SqlQueryParser.java 2009-11-17 18:46:21 UTC (rev 1323)
@@ -48,6 +48,7 @@
import org.jboss.dna.graph.property.ValueFactory;
import org.jboss.dna.graph.property.ValueFormatException;
import org.jboss.dna.graph.query.model.And;
+import org.jboss.dna.graph.query.model.Between;
import org.jboss.dna.graph.query.model.BindVariableName;
import org.jboss.dna.graph.query.model.ChildNode;
import org.jboss.dna.graph.query.model.ChildNodeJoinCondition;
@@ -416,13 +417,21 @@
String msg = GraphI18n.expectingConstraintCondition.text(name, pos2.getLine(), pos2.getColumn());
throw new ParsingException(pos, msg);
}
- if (tokens.matches("IN", "(")) {
+ if (tokens.matches("IN", "(") || tokens.matches("NOT", "IN", "(")) {
+ boolean not = tokens.canConsume("NOT");
Collection<StaticOperand> staticOperands = parseInClause(tokens, context);
constraint = new SetCriteria(left, staticOperands);
- } else if (tokens.matches("NOT", "IN", "(")) {
- tokens.consume("NOT");
- Collection<StaticOperand> staticOperands = parseInClause(tokens, context);
- constraint = new Not(new SetCriteria(left, staticOperands));
+ if (not) constraint = new Not(constraint);
+ } else if (tokens.matches("BETWEEN") || tokens.matches("NOT", "BETWEEN")) {
+ boolean not = tokens.canConsume("NOT");
+ tokens.consume("BETWEEN");
+ StaticOperand lowerBound = parseStaticOperand(tokens, context);
+ boolean lowerInclusive = !tokens.canConsume("EXCLUSIVE");
+ tokens.consume("AND");
+ StaticOperand upperBound = parseStaticOperand(tokens, context);
+ boolean upperInclusive = !tokens.canConsume("EXCLUSIVE");
+ constraint = new Between(left, lowerBound, upperBound, lowerInclusive, upperInclusive);
+ if (not) constraint = new Not(constraint);
} else {
Operator operator = parseComparisonOperator(tokens);
StaticOperand right = parseStaticOperand(tokens, context);
Modified: trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/PlanUtil.java
===================================================================
--- trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/PlanUtil.java 2009-11-16 23:57:21 UTC (rev 1322)
+++ trunk/dna-graph/src/main/java/org/jboss/dna/graph/query/plan/PlanUtil.java 2009-11-17 18:46:21 UTC (rev 1323)
@@ -36,6 +36,7 @@
import org.jboss.dna.graph.property.ValueFactory;
import org.jboss.dna.graph.query.QueryContext;
import org.jboss.dna.graph.query.model.And;
+import org.jboss.dna.graph.query.model.Between;
import org.jboss.dna.graph.query.model.ChildNode;
import org.jboss.dna.graph.query.model.ChildNodeJoinCondition;
import org.jboss.dna.graph.query.model.Column;
@@ -433,6 +434,15 @@
if (replacement == null) return search;
return new FullTextSearch(replacement, search.getPropertyName(), search.getFullTextSearchExpression());
}
+ if (constraint instanceof Between) {
+ Between between = (Between)constraint;
+ DynamicOperand lhs = between.getOperand();
+ StaticOperand lower = between.getLowerBound(); // Current only a literal; therefore, no reference to selector
+ StaticOperand upper = between.getUpperBound(); // Current only a literal; therefore, no reference to selector
+ DynamicOperand newLhs = replaceReferencesToRemovedSource(context, lhs, rewrittenSelectors);
+ if (lhs == newLhs) return between;
+ return new Between(newLhs, lower, upper, between.isLowerBoundIncluded(), between.isUpperBoundIncluded());
+ }
if (constraint instanceof Comparison) {
Comparison comparison = (Comparison)constraint;
DynamicOperand lhs = comparison.getOperand1();
@@ -653,6 +663,15 @@
return new FullTextSearch(sourceColumn.getSelectorName(), sourceColumn.getPropertyName(),
search.getFullTextSearchExpression());
}
+ if (constraint instanceof Between) {
+ Between between = (Between)constraint;
+ DynamicOperand lhs = between.getOperand();
+ StaticOperand lower = between.getLowerBound(); // Current only a literal; therefore, no reference to selector
+ StaticOperand upper = between.getUpperBound(); // Current only a literal; therefore, no reference to selector
+ DynamicOperand newLhs = replaceViewReferences(context, lhs, mapping, node);
+ if (lhs == newLhs) return between;
+ return new Between(newLhs, lower, upper, between.isLowerBoundIncluded(), between.isUpperBoundIncluded());
+ }
if (constraint instanceof Comparison) {
Comparison comparison = (Comparison)constraint;
DynamicOperand lhs = comparison.getOperand1();
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/QueryBuilderTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/QueryBuilderTest.java 2009-11-16 23:57:21 UTC (rev 1322)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/QueryBuilderTest.java 2009-11-17 18:46:21 UTC (rev 1323)
@@ -280,6 +280,50 @@
}
@Test
+ public void shouldBuildQueryWithBetweenRange() {
+ query = builder.selectStar()
+ .from("table AS nodes")
+ .where()
+ .propertyValue("nodes", "col1")
+ .isBetween()
+ .literal("lower")
+ .and()
+ .literal(true)
+ .end()
+ .query();
+ assertThatSql(query, is("SELECT * FROM table AS nodes WHERE nodes.col1 BETWEEN 'lower' AND true"));
+
+ query = builder.selectStar()
+ .from("table AS nodes")
+ .where()
+ .propertyValue("nodes", "col1")
+ .isBetween()
+ .literal("lower")
+ .and()
+ .literal("upper")
+ .end()
+ .query();
+ assertThatSql(query, is("SELECT * FROM table AS nodes WHERE nodes.col1 BETWEEN 'lower' AND 'upper'"));
+ }
+
+ @Test
+ public void shouldBuildQueryWithBetweenRangeWithCast() {
+ query = builder.selectStar()
+ .from("table AS nodes")
+ .where()
+ .propertyValue("nodes", "col1")
+ .isBetween()
+ .cast("true")
+ .asBoolean()
+ .and()
+ .cast("false")
+ .asBoolean()
+ .end()
+ .query();
+ assertThatSql(query, is("SELECT * FROM table AS nodes WHERE nodes.col1 BETWEEN true AND false"));
+ }
+
+ @Test
public void shouldBuildQueryWithOneHasPropertyConstraint() {
query = builder.selectStar().from("table AS nodes").where().hasProperty("nodes", "col1").end().query();
assertThatSql(query, is("SELECT * FROM table AS nodes WHERE nodes.col1 IS NOT NULL"));
Modified: trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/SqlQueryParserTest.java
===================================================================
--- trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/SqlQueryParserTest.java 2009-11-16 23:57:21 UTC (rev 1322)
+++ trunk/dna-graph/src/test/java/org/jboss/dna/graph/query/parse/SqlQueryParserTest.java 2009-11-17 18:46:21 UTC (rev 1323)
@@ -39,6 +39,7 @@
import org.jboss.dna.graph.property.Name;
import org.jboss.dna.graph.property.Path;
import org.jboss.dna.graph.query.model.And;
+import org.jboss.dna.graph.query.model.Between;
import org.jboss.dna.graph.query.model.BindVariableName;
import org.jboss.dna.graph.query.model.ChildNode;
import org.jboss.dna.graph.query.model.Constraint;
@@ -268,6 +269,108 @@
}
// ----------------------------------------------------------------------------------------------------------------
+ // parseConstraint - between
+ // ----------------------------------------------------------------------------------------------------------------
+
+ @Test
+ public void shouldParseConstraintFromStringWithValidBetweenExpressionUsing() {
+ NamedSelector selector = new NamedSelector(selectorName("tableA"));
+ Constraint constraint = parser.parseConstraint(tokens("tableA.id BETWEEN 'lower' AND 'upper'"), context, selector);
+ assertThat(constraint, is(instanceOf(Between.class)));
+ Between between = (Between)constraint;
+ assertThat(between.isLowerBoundIncluded(), is(true));
+ assertThat(between.isUpperBoundIncluded(), is(true));
+ assertThat(between.getOperand(), is(instanceOf(PropertyValue.class)));
+ PropertyValue operand = (PropertyValue)between.getOperand();
+ assertThat(operand.getSelectorName(), is(selector.getName()));
+ assertThat(operand.getPropertyName(), is(name("id")));
+ assertThat(between.getLowerBound(), is(instanceOf(Literal.class)));
+ assertThat(between.getLowerBound(), is(instanceOf(Literal.class)));
+ assertThat((Literal)between.getLowerBound(), is(literal("lower")));
+ assertThat((Literal)between.getUpperBound(), is(literal("upper")));
+ }
+
+ @Test
+ public void shouldParseConstraintFromStringWithValidBetweenExpressionUsingExclusiveAndExclusive() {
+ NamedSelector selector = new NamedSelector(selectorName("tableA"));
+ Constraint constraint = parser.parseConstraint(tokens("tableA.id BETWEEN 'lower' EXCLUSIVE AND 'upper' EXCLUSIVE"),
+ context,
+ selector);
+ assertThat(constraint, is(instanceOf(Between.class)));
+ Between between = (Between)constraint;
+ assertThat(between.isLowerBoundIncluded(), is(false));
+ assertThat(between.isUpperBoundIncluded(), is(false));
+ assertThat(between.getOperand(), is(instanceOf(PropertyValue.class)));
+ PropertyValue operand = (PropertyValue)between.getOperand();
+ assertThat(operand.getSelectorName(), is(selector.getName()));
+ assertThat(operand.getPropertyName(), is(name("id")));
+ assertThat(between.getLowerBound(), is(instanceOf(Literal.class)));
+ assertThat(between.getLowerBound(), is(instanceOf(Literal.class)));
+ assertThat((Literal)between.getLowerBound(), is(literal("lower")));
+ assertThat((Literal)between.getUpperBound(), is(literal("upper")));
+ }
+
+ @Test
+ public void shouldParseConstraintFromStringWithValidBetweenExpressionUsingInclusiveAndExclusive() {
+ NamedSelector selector = new NamedSelector(selectorName("tableA"));
+ Constraint constraint = parser.parseConstraint(tokens("tableA.id BETWEEN 'lower' AND 'upper' EXCLUSIVE"),
+ context,
+ selector);
+ assertThat(constraint, is(instanceOf(Between.class)));
+ Between between = (Between)constraint;
+ assertThat(between.isLowerBoundIncluded(), is(true));
+ assertThat(between.isUpperBoundIncluded(), is(false));
+ assertThat(between.getOperand(), is(instanceOf(PropertyValue.class)));
+ PropertyValue operand = (PropertyValue)between.getOperand();
+ assertThat(operand.getSelectorName(), is(selector.getName()));
+ assertThat(operand.getPropertyName(), is(name("id")));
+ assertThat(between.getLowerBound(), is(instanceOf(Literal.class)));
+ assertThat(between.getLowerBound(), is(instanceOf(Literal.class)));
+ assertThat((Literal)between.getLowerBound(), is(literal("lower")));
+ assertThat((Literal)between.getUpperBound(), is(literal("upper")));
+ }
+
+ @Test
+ public void shouldParseConstraintFromStringWithValidBetweenExpressionUsingExclusiveAndInclusive() {
+ NamedSelector selector = new NamedSelector(selectorName("tableA"));
+ Constraint constraint = parser.parseConstraint(tokens("tableA.id BETWEEN 'lower' EXCLUSIVE AND 'upper'"),
+ context,
+ selector);
+ assertThat(constraint, is(instanceOf(Between.class)));
+ Between between = (Between)constraint;
+ assertThat(between.isLowerBoundIncluded(), is(false));
+ assertThat(between.isUpperBoundIncluded(), is(true));
+ assertThat(between.getOperand(), is(instanceOf(PropertyValue.class)));
+ PropertyValue operand = (PropertyValue)between.getOperand();
+ assertThat(operand.getSelectorName(), is(selector.getName()));
+ assertThat(operand.getPropertyName(), is(name("id")));
+ assertThat(between.getLowerBound(), is(instanceOf(Literal.class)));
+ assertThat(between.getLowerBound(), is(instanceOf(Literal.class)));
+ assertThat((Literal)between.getLowerBound(), is(literal("lower")));
+ assertThat((Literal)between.getUpperBound(), is(literal("upper")));
+ }
+
+ @Test
+ public void shouldParseConstraintFromStringWithValidNotBetweenExpression() {
+ NamedSelector selector = new NamedSelector(selectorName("tableA"));
+ Constraint constraint = parser.parseConstraint(tokens("tableA.id NOT BETWEEN 'lower' AND 'upper'"), context, selector);
+ assertThat(constraint, is(instanceOf(Not.class)));
+ constraint = ((Not)constraint).getConstraint();
+ assertThat(constraint, is(instanceOf(Between.class)));
+ Between between = (Between)constraint;
+ assertThat(between.isLowerBoundIncluded(), is(true));
+ assertThat(between.isUpperBoundIncluded(), is(true));
+ assertThat(between.getOperand(), is(instanceOf(PropertyValue.class)));
+ PropertyValue operand = (PropertyValue)between.getOperand();
+ assertThat(operand.getSelectorName(), is(selector.getName()));
+ assertThat(operand.getPropertyName(), is(name("id")));
+ assertThat(between.getLowerBound(), is(instanceOf(Literal.class)));
+ assertThat(between.getLowerBound(), is(instanceOf(Literal.class)));
+ assertThat((Literal)between.getLowerBound(), is(literal("lower")));
+ assertThat((Literal)between.getUpperBound(), is(literal("upper")));
+ }
+
+ // ----------------------------------------------------------------------------------------------------------------
// parseConstraint - parentheses
// ----------------------------------------------------------------------------------------------------------------
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java 2009-11-16 23:57:21 UTC (rev 1322)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/DualIndexLayout.java 2009-11-17 18:46:21 UTC (rev 1323)
@@ -1114,6 +1114,52 @@
}
+ protected Query findNodesWithNumericRange( PropertyValue propertyValue,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) {
+ String field = stringFactory.create(propertyValue.getPropertyName());
+ return findNodesWithNumericRange(field, lowerValue, upperValue, includesLower, includesUpper);
+ }
+
+ protected Query findNodesWithNumericRange( NodeDepth depth,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) {
+ return findNodesWithNumericRange(PathIndex.DEPTH, lowerValue, upperValue, includesLower, includesUpper);
+ }
+
+ protected Query findNodesWithNumericRange( String field,
+ Object lowerValue,
+ Object upperValue,
+ boolean includesLower,
+ boolean includesUpper ) {
+ PropertyType type = PropertyType.discoverType(lowerValue);
+ assert type == PropertyType.discoverType(upperValue);
+ ValueFactories factories = context.getValueFactories();
+ switch (type) {
+ case DATE:
+ long lowerDate = factories.getLongFactory().create(lowerValue);
+ long upperDate = factories.getLongFactory().create(upperValue);
+ return NumericRangeQuery.newLongRange(field, lowerDate, upperDate, includesLower, includesUpper);
+ case LONG:
+ long lowerLong = factories.getLongFactory().create(lowerValue);
+ long upperLong = factories.getLongFactory().create(upperValue);
+ return NumericRangeQuery.newLongRange(field, lowerLong, upperLong, includesLower, includesUpper);
+ case DECIMAL:
+ case DOUBLE:
+ double lowerDouble = factories.getDoubleFactory().create(lowerValue);
+ double upperDouble = factories.getDoubleFactory().create(upperValue);
+ return NumericRangeQuery.newDoubleRange(field, lowerDouble, upperDouble, includesLower, includesUpper);
+ default:
+ // This is not allowed ...
+ assert false;
+ return null;
+ }
+ }
+
protected Query findNodesWith( NodePath nodePath,
Operator operator,
Object value,
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java 2009-11-16 23:57:21 UTC (rev 1322)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java 2009-11-17 18:46:21 UTC (rev 1323)
@@ -52,10 +52,12 @@
import org.jboss.dna.graph.Location;
import org.jboss.dna.graph.property.Binary;
import org.jboss.dna.graph.property.Name;
+import org.jboss.dna.graph.property.PropertyType;
import org.jboss.dna.graph.property.ValueFactory;
import org.jboss.dna.graph.query.QueryContext;
import org.jboss.dna.graph.query.QueryResults.Columns;
import org.jboss.dna.graph.query.model.And;
+import org.jboss.dna.graph.query.model.Between;
import org.jboss.dna.graph.query.model.BindVariableName;
import org.jboss.dna.graph.query.model.ChildNode;
import org.jboss.dna.graph.query.model.Comparison;
@@ -381,6 +383,10 @@
PropertyExistence existence = (PropertyExistence)constraint;
return createQuery(existence.getSelectorName(), existence.getPropertyName());
}
+ if (constraint instanceof Between) {
+ Between between = (Between)constraint;
+ return createQuery(between);
+ }
if (constraint instanceof Comparison) {
Comparison comparison = (Comparison)constraint;
return createQuery(comparison.getOperand1(), comparison.getOperator(), comparison.getOperand2());
@@ -433,20 +439,8 @@
StaticOperand right,
boolean caseSensitive ) throws IOException {
// Handle the static operand ...
- Object value = null;
- if (right instanceof Literal) {
- Literal literal = (Literal)right;
- value = literal.getValue();
- if (!caseSensitive) value = lowerCase(value);
- } else if (right instanceof BindVariableName) {
- BindVariableName variable = (BindVariableName)right;
- String variableName = variable.getVariableName();
- value = getContext().getVariables().get(variableName);
- if (!caseSensitive) value = lowerCase(value);
- } else {
- assert false;
- return null;
- }
+ Object value = createOperand(right, caseSensitive);
+ assert value != null;
// Address the dynamic operand ...
if (left instanceof FullTextSearchScore) {
@@ -481,6 +475,71 @@
}
}
+ protected Object createOperand( StaticOperand operand,
+ boolean caseSensitive ) {
+ Object value = null;
+ if (operand instanceof Literal) {
+ Literal literal = (Literal)operand;
+ value = literal.getValue();
+ if (!caseSensitive) value = lowerCase(value);
+ } else if (operand instanceof BindVariableName) {
+ BindVariableName variable = (BindVariableName)operand;
+ String variableName = variable.getVariableName();
+ value = getContext().getVariables().get(variableName);
+ if (!caseSensitive) value = lowerCase(value);
+ } else {
+ assert false;
+ }
+ return value;
+ }
+
+ protected Query createQuery( DynamicOperand left,
+ StaticOperand lower,
+ StaticOperand upper,
+ boolean includesLower,
+ boolean includesUpper,
+ boolean caseSensitive ) throws IOException {
+ // Handle the static operands ...
+ Object lowerValue = createOperand(lower, caseSensitive);
+ Object upperValue = createOperand(upper, caseSensitive);
+ assert lowerValue != null;
+ assert upperValue != null;
+
+ // Only in the case of a PropertyValue and Depth will we need to do something special ...
+ if (left instanceof NodeDepth) {
+ return session.findNodesWithNumericRange((NodeDepth)left, lowerValue, upperValue, includesLower, includesUpper);
+ } else if (left instanceof PropertyValue) {
+ PropertyType lowerType = PropertyType.discoverType(lowerValue);
+ PropertyType upperType = PropertyType.discoverType(upperValue);
+ if (upperType == lowerType) {
+ switch (upperType) {
+ case DATE:
+ case LONG:
+ case DOUBLE:
+ case DECIMAL:
+ return session.findNodesWithNumericRange((PropertyValue)left,
+ lowerValue,
+ upperValue,
+ includesLower,
+ includesUpper);
+ default:
+ // continue on and handle as boolean query ...
+ }
+ }
+ }
+
+ // Otherwise, just create a boolean query ...
+ BooleanQuery query = new BooleanQuery();
+ Operator lowerOp = includesLower ? Operator.GREATER_THAN_OR_EQUAL_TO : Operator.GREATER_THAN;
+ Operator upperOp = includesUpper ? Operator.LESS_THAN_OR_EQUAL_TO : Operator.LESS_THAN;
+ Query lowerQuery = createQuery(left, lowerOp, lower, caseSensitive);
+ Query upperQuery = createQuery(left, upperOp, upper, caseSensitive);
+ if (lowerQuery == null || upperQuery == null) return null;
+ query.add(lowerQuery, Occur.MUST);
+ query.add(upperQuery, Occur.MUST);
+ return query;
+ }
+
protected Object lowerCase( Object value ) {
if (value instanceof String) {
return ((String)value).toLowerCase();
14 years, 5 months
DNA SVN: r1322 - trunk/dna-search/src/main/java/org/jboss/dna/search.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-16 18:57:21 -0500 (Mon, 16 Nov 2009)
New Revision: 1322
Modified:
trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java
Log:
DNA-467 Removed fields that are no longer used
Modified: trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java
===================================================================
--- trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java 2009-11-16 23:57:04 UTC (rev 1321)
+++ trunk/dna-search/src/main/java/org/jboss/dna/search/KitchenSinkIndexLayout.java 2009-11-16 23:57:21 UTC (rev 1322)
@@ -179,7 +179,7 @@
QueryContext context,
PlanNode accessNode,
Columns resultColumns,
- Analyzer analyzer ) throws IOException {
+ Analyzer analyzer ) {
// Create a processing component for this access query ...
return new LuceneQueryComponent(this, originalQuery, context, resultColumns, accessNode, analyzer, sourceName,
workspaceName);
@@ -193,8 +193,6 @@
protected static class LuceneQueryComponent extends AbstractAccessComponent {
private final QueryCommand originalQuery;
private final Session session;
- private final IndexReader pathIndexReader;
- private final Analyzer analyzer;
private final String sourceName;
private final String workspaceName;
@@ -205,12 +203,10 @@
PlanNode accessNode,
Analyzer analyzer,
String sourceName,
- String workspaceName ) throws IOException {
+ String workspaceName ) {
super(context, columns, accessNode);
this.originalQuery = originalQuery;
- this.analyzer = analyzer;
this.session = session;
- this.pathIndexReader = session.getPathsReader();
this.sourceName = sourceName;
this.workspaceName = workspaceName;
}
14 years, 5 months
DNA SVN: r1321 - trunk/dna-search.
by dna-commits@lists.jboss.org
Author: rhauch
Date: 2009-11-16 18:57:04 -0500 (Mon, 16 Nov 2009)
New Revision: 1321
Modified:
trunk/dna-search/pom.xml
Log:
DNA-467 Removed dependency that was accidentally added/committed
Modified: trunk/dna-search/pom.xml
===================================================================
--- trunk/dna-search/pom.xml 2009-11-16 23:25:36 UTC (rev 1320)
+++ trunk/dna-search/pom.xml 2009-11-16 23:57:04 UTC (rev 1321)
@@ -91,20 +91,6 @@
<artifactId>log4j</artifactId>
<scope>test</scope>
</dependency>
-
-
-
-
-
- <dependency>
- <groupId>org.apache.jackrabbit</groupId>
- <artifactId>jackrabbit-core</artifactId>
- <version>2.0-beta1</version>
- </dependency>
-
-
-
-
<!--
Java Concurrency in Practice annotations
-->
14 years, 5 months