Author: skabashnyuk
Date: 2009-10-09 10:02:16 -0400 (Fri, 09 Oct 2009)
New Revision: 263
Added:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/TestApiQueryAll.java
Removed:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/TestAll.java
Modified:
jcr/trunk/component/core/pom.xml
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerEntryWrapper.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ConsistencyCheck.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexingConfigurationImpl.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryBuilder.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RowIteratorImpl.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WeightedHighlighter.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/misc/MatchResult.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/misc/Pattern.java
jcr/trunk/component/core/src/main/resources/binding.xml
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/IndexingRuleTest.java
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/SynonymProviderTest.java
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/IndexingAggregateTest.java
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/TestAll.java
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/usecases/TestExcerpt.java
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/usecases/export/ExportWorkspaceSystemViewTest.java
Log:
EXOJCR-176 : fix TestExcerpt, Indexing rule. order by score. Code clean up
Modified: jcr/trunk/component/core/pom.xml
===================================================================
--- jcr/trunk/component/core/pom.xml 2009-10-08 09:29:58 UTC (rev 262)
+++ jcr/trunk/component/core/pom.xml 2009-10-09 14:02:16 UTC (rev 263)
@@ -419,8 +419,6 @@
<include>**/impl/**/Test*.java</include>
</includes>
<excludes>
- <exclude>**/TestExcerpt.java</exclude>
-
<exclude>**/TestImport.java</exclude>
<exclude>**/TestRollbackBigFiles.java</exclude>
<exclude>**/TestErrorMultithreading.java</exclude>
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerEntryWrapper.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerEntryWrapper.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerEntryWrapper.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -18,664 +18,633 @@
*/
package org.exoplatform.services.jcr.config;
-import org.apache.lucene.search.Query;
-import org.exoplatform.container.configuration.ConfigurationManager;
-import org.exoplatform.services.jcr.datamodel.IllegalNameException;
-import org.exoplatform.services.jcr.impl.Constants;
-import org.exoplatform.services.jcr.impl.core.query.QueryHandler;
-import org.exoplatform.services.jcr.impl.core.query.QueryHandlerContext;
-import org.exoplatform.services.jcr.impl.core.query.QueryImpl;
-import org.exoplatform.services.jcr.impl.core.query.lucene.DefaultHTMLExcerpt;
-import org.exoplatform.services.jcr.impl.core.query.lucene.ExcerptProvider;
-import org.exoplatform.services.jcr.impl.core.query.lucene.IndexingConfiguration;
-import
org.exoplatform.services.jcr.impl.core.query.lucene.IndexingConfigurationEntityResolver;
-import org.exoplatform.services.jcr.impl.core.query.lucene.IndexingConfigurationImpl;
-import org.exoplatform.services.jcr.impl.core.query.lucene.JcrStandartAnalyzer;
-import org.exoplatform.services.jcr.impl.core.query.lucene.NamespaceMappings;
-import org.exoplatform.services.jcr.impl.core.query.lucene.SearchIndex;
-import org.exoplatform.services.jcr.impl.core.query.lucene.SpellChecker;
-import org.exoplatform.services.jcr.impl.core.query.lucene.SynonymProvider;
-import org.exoplatform.services.jcr.util.StringNumberParser;
-import org.exoplatform.services.log.ExoLogger;
-import org.exoplatform.services.log.Log;
-import org.w3c.dom.Element;
-import org.xml.sax.SAXException;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.List;
-
-import javax.jcr.RepositoryException;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
/**
* Created by The eXo Platform SAS.
*
* @author <a href="mailto:geaz@users.sourceforge.net">Gennady Azarenkov
</a>
* @version $Id: QueryHandlerEntry.java 14931 2008-05-29 15:02:08Z ksm $
*/
+@Deprecated
+public class QueryHandlerEntryWrapper
+{
+ //
+ // /**
+ // * The default value for property {@link #extractorBackLog}.
+ // */
+ // public static final int DEFAULT_EXTRACTOR_BACKLOG = 100;
+ //
+ // /**
+ // * The default value for property {@link #extractorPoolSize}.
+ // */
+ // public static final int DEFAULT_EXTRACTOR_POOLSIZE = 0;
+ //
+ // /**
+ // * The default timeout in milliseconds which is granted to the text
+ // * extraction process until fulltext indexing is deferred to a background
+ // * thread.
+ // */
+ // public static final int DEFAULT_EXTRACTOR_TIMEOUT = 100;
+ //
+ // /**
+ // * the default value for property {@link #maxFieldLength}.
+ // */
+ // public static final int DEFAULT_MAX_FIELD_LENGTH = 10000;
+ //
+ // /**
+ // * The default value for property {@link #maxMergeDocs}.
+ // */
+ // public static final int DEFAULT_MAX_MERGE_DOCS = Integer.MAX_VALUE;
+ //
+ // /**
+ // * the default value for property {@link #mergeFactor}.
+ // */
+ // public static final int DEFAULT_MERGE_FACTOR = 10;
+ //
+ // /**
+ // * The default value for property {@link #minMergeDocs}.
+ // */
+ // public static final int DEFAULT_MIN_MERGE_DOCS = 100;
+ //
+ // /**
+ // * Name of the file to persist search internal namespace mappings.
+ // */
+ // public static final String NS_MAPPING_FILE = "ns_mappings.properties";
// TODO
+ //
+ // /**
+ // * The excerpt provider class. Implements {@link ExcerptProvider}.
+ // */
+ // private static final String DEDAULT_EXCERPTPROVIDER_CLASS =
DefaultHTMLExcerpt.class
+ // .getName();
+ //
+ // private static final String DEDAULT_INDEXINGCONFIGURATIONCLASS =
IndexingConfigurationImpl.class
+ // .getName();
+ //
+ // private static final boolean DEFAULT_AUTOREPAIR = true;
+ //
+ // private static final int DEFAULT_BUFFER_SIZE = 10;
+ //
+ // private static final int DEFAULT_CACHE_SIZE = 1000;
+ //
+ // private final static boolean DEFAULT_CONSISTENCYCHECKENABLED = false;
+ //
+ // private final static boolean DEFAULT_DOCUMENTORDER = true;
+ //
+ // private final static boolean DEFAULT_FORCECONSISTENCYCHECK = false;
+ //
+ // /**
+ // * Name of the default query implementation class.
+ // */
+ // private static final String DEFAULT_QUERY_HANDLER_CLASS = SearchIndex.class
+ // .getName();
+ //
+ // /**
+ // * Name of the default query implementation class.
+ // */
+ // private static final String DEFAULT_QUERY_IMPL_CLASS = QueryImpl.class
+ // .getName();
+ //
+ // /**
+ // * The number of documents that are pre fetched when a query is executed.
+ // * <p/>
+ // * Default value is: {@link Integer#MAX_VALUE}.
+ // */
+ // private final static int DEFAULT_RESULTFETCHSIZE = Integer.MAX_VALUE;
+ //
+ // private final static boolean DEFAULT_SUPPORTHIGHLIGHTING = false;
+ //
+ // private final static boolean DEFAULT_USECOMPOUNDFILE = false;
+ //
+ // private final static int DEFAULT_VOLATILEIDLETIME = 3;
+ //
+ // // since
https://jira.jboss.org/jira/browse/EXOJCR-17
+ //
+ // public static final boolean DEFAULT_UPGRADE_INDEX = false;
+ //
+ // private QueryHandlerEntry queryHandlerEntry;
+ //
+ // public QueryHandlerEntry getQueryHandlerEntry() {
+ // return queryHandlerEntry;
+ // }
+ //
+ // private static void initDefaults(QueryHandlerEntry entry) {
+ // entry.putBooleanParameter(PARAM_AUTO_REPAIR, DEFAULT_AUTOREPAIR);
+ // entry.putIntegerParameter(PARAM_BUFFER_SIZE, DEFAULT_BUFFER_SIZE);
+ // entry.putIntegerParameter(PARAM_CACHE_SIZE, DEFAULT_CACHE_SIZE);
+ // entry.putBooleanParameter(PARAM_DOCUMENT_ORDER, DEFAULT_DOCUMENTORDER);
+ // entry.putParameterValue(PARAM_EXCERPTPROVIDER_CLASS,
+ // DEDAULT_EXCERPTPROVIDER_CLASS);
+ //// Null value is forbidden according to the binding.xml, it prevents marshalling
+ //// entry.putParameterValue(PARAM_EXCLUDED_NODE_IDENTIFERS, null);
+ // entry.putIntegerParameter(PARAM_EXTRACTOR_BACKLOG,
+ // DEFAULT_EXTRACTOR_BACKLOG);
+ // entry.putIntegerParameter(PARAM_EXTRACTOR_POOLSIZE,
+ // DEFAULT_EXTRACTOR_POOLSIZE);
+ // entry.putIntegerParameter(PARAM_EXTRACTOR_TIMEOUT,
+ // DEFAULT_EXTRACTOR_TIMEOUT);
+ // }
+ //
+ // public String getType() {
+ // return queryHandlerEntry.getType();
+ // }
+ //
+ // public static QueryHandlerEntry queryHandlerEntryFactory() {
+ // QueryHandlerEntry entry = new QueryHandlerEntry();
+ // initDefaults(entry);
+ // return entry;
+ // }
+ //
+ // /** The logger instance for this class */
+ // private static final Log log = ExoLogger.getLogger(QueryHandlerEntry.class);
+ //
+ // // public QueryHandlerEntry queryHandler;
+ //
+ // public Integer volatileIdleTime;
+ //
+ // /**
+ // * The analyzer we use for indexing.
+ // */
+ // private JcrStandartAnalyzer analyzer;
+ //
+ // private String queryHandlerClass = DEFAULT_QUERY_HANDLER_CLASS;
+ //
+ // public QueryHandlerEntryWrapper(QueryHandlerEntry queryHandlerEntry) {
+ // this.queryHandlerEntry = queryHandlerEntry;
+ // this.analyzer = new JcrStandartAnalyzer();
+ // initDefaults(queryHandlerEntry);
+ // }
+ //
+ // public QueryHandlerEntryWrapper(String type, List params,
+ // QueryHandlerEntry queryHandlerEntry) {
+ // this.queryHandlerEntry = queryHandlerEntry;
+ // queryHandlerEntry.setType(type);
+ // queryHandlerEntry.setParameters(params);
+ // this.analyzer = new JcrStandartAnalyzer();
+ // initDefaults(queryHandlerEntry);
+ // }
+ //
+ // /**
+ // * Creates an excerpt provider for the given <code>query</code>.
+ // *
+ // * @param query
+ // * the query.
+ // * @return an excerpt provider for the given <code>query</code>.
+ // * @throws IOException
+ // * if the provider cannot be created.
+ // */
+ // public ExcerptProvider createExcerptProvider(Query query)
+ // throws IOException {
+ // ExcerptProvider ep;
+ // try {
+ // Class excerptProviderClass = Class.forName(
+ // getExcerptProviderClass(), true, this.getClass()
+ // .getClassLoader());
+ // ep = (ExcerptProvider) excerptProviderClass.newInstance();
+ // } catch (Exception e) {
+ // IOException ex = new IOException();
+ // ex.initCause(e);
+ // throw ex;
+ // }
+ //
+ // return ep;
+ // }
+ //
+ // /**
+ // * @param namespaceMappings
+ // * The namespace mappings
+ // * @return the fulltext indexing configuration or <code>null</code>
if there
+ // * is no configuration.
+ // */
+ // public IndexingConfiguration createIndexingConfiguration(
+ // NamespaceMappings namespaceMappings, QueryHandlerContext context,
+ // ConfigurationManager cfm) throws IOException,
+ // RepositoryConfigurationException {
+ // Element docElement = getIndexingConfigurationDOM(cfm);
+ // if (docElement == null) {
+ // return null;
+ // }
+ // IndexingConfiguration idxCfg = null;
+ // try {
+ // Class indexingConfigurationClass = Class.forName(
+ // getIndexingConfigurationClass(), true, this.getClass()
+ // .getClassLoader());
+ // idxCfg = (IndexingConfiguration) indexingConfigurationClass
+ // .newInstance();
+ // idxCfg.init(docElement, context, namespaceMappings);
+ // } catch (InstantiationException e) {
+ // log.warn("Exception initializing indexing configuration from: "
+ // + getIndexingConfigurationPath(), e);
+ // } catch (IllegalAccessException e) {
+ // log.warn("Exception initializing indexing configuration from: "
+ // + getIndexingConfigurationPath(), e);
+ // } catch (RepositoryException e) {
+ // log.warn("Exception initializing indexing configuration from: "
+ // + getIndexingConfigurationPath(), e);
+ // } catch (IllegalNameException e) {
+ // log.warn("Exception initializing indexing configuration from: "
+ // + getIndexingConfigurationPath(), e);
+ // } catch (Exception e) {
+ // log.warn("Exception initializing indexing configuration from: "
+ // + getIndexingConfigurationPath(), e);
+ // }
+ // return idxCfg;
+ // }
+ //
+ // /**
+ // * Creates a spell checker for this query handler.
+ // *
+ // * @return the spell checker or <code>null</code> if none is
configured or
+ // * an error occurs.
+ // */
+ // public SpellChecker createSpellChecker(QueryHandler handler) {
+ // SpellChecker spCheck = null;
+ // if (getSpellCheckerClass() != null) {
+ // try {
+ // Class spellCheckerClass = Class.forName(getSpellCheckerClass(),
+ // true, this.getClass().getClassLoader());
+ // spCheck = (SpellChecker) spellCheckerClass.newInstance();
+ // spCheck.init(handler);
+ // } catch (Exception e) {
+ // log.warn("Exception initializing spell checker: "
+ // + getSpellCheckerClass(), e);
+ // }
+ // }
+ // return spCheck;
+ // }
+ //
+ // /**
+ // * @param cfm
+ // * @return the configured synonym provider or <code>null</code> if
none is
+ // * configured or an error occurs.
+ // */
+ // public SynonymProvider createSynonymProvider(ConfigurationManager cfm) {
+ // SynonymProvider sp = null;
+ // if (getSynonymProviderClass() != null) {
+ // try {
+ // Class synonymProviderClass = Class.forName(
+ // getSynonymProviderClass(), true, this.getClass()
+ // .getClassLoader());
+ // sp = (SynonymProvider) synonymProviderClass.newInstance();
+ //
+ // sp.initialize(createSynonymProviderConfigResource(cfm));
+ // } catch (Exception e) {
+ // log.warn("Exception initializing synonym provider: "
+ // + getSynonymProviderClass(), e);
+ // sp = null;
+ // }
+ // }
+ // return sp;
+ // }
+ //
+ // public JcrStandartAnalyzer getAnalyzer() {
+ // return analyzer;
+ // }
+ //
+ // private String getParameterString(String name) {
+ // return queryHandlerEntry.getParameterValue(name, null);
+ // }
+ //
+ // private Integer getParameterIntegerInitialized(String name) {
+ // String value = queryHandlerEntry.getParameterValue(name, null);
+ // return StringNumberParser.parseInt(value);
+ // }
+ //
+ // private Boolean getParameterBooleanInitialized(String name) {
+ // String value = queryHandlerEntry.getParameterValue(name, "false");
+ // return Boolean.parseBoolean(value);
+ // }
+ //
+ // /**
+ // * If set <code>true</code> errors detected by the consistency check
are
+ // * repaired. If <code>false</code> the errors are only reported in
the log.
+ // * <p/>
+ // * Default value is: <code>true</code>.
+ // *
+ // * @throws RepositoryConfigurationException
+ // */
+ // public boolean getAutoRepair() throws RepositoryConfigurationException {
+ // return getParameterBooleanInitialized(PARAM_AUTO_REPAIR);
+ // }
+ //
+ // /**
+ // * Number of documents that are buffered before they are added to the index.
+ // *
+ // * @throws RepositoryConfigurationException
+ // */
+ // public int getBufferSize() {
+ // return getParameterIntegerInitialized(PARAM_BUFFER_SIZE);
+ // }
+ //
+ // public int getCacheSize() {
+ // return getParameterIntegerInitialized(PARAM_CACHE_SIZE);
+ // }
+ //
+ // /**
+ // * Flag indicating whether document order is enable as the default ordering.
+ // */
+ // public boolean getDocumentOrder() {
+ // return getParameterBooleanInitialized(PARAM_DOCUMENT_ORDER);
+ // }
+ //
+ // /**
+ // * @return the class name of the excerpt provider implementation.
+ // */
+ // public String getExcerptProviderClass() {
+ // return getParameterString(PARAM_EXCERPTPROVIDER_CLASS);
+ // }
+ //
+ // public String getExcludedNodeIdentifers() {
+ // return getParameterString(PARAM_EXCLUDED_NODE_IDENTIFERS);
+ // }
+ //
+ // /**
+ // * @return the size of the extractor queue back log.
+ // */
+ // public int getExtractorBackLogSize() {
+ // return getParameterIntegerInitialized(PARAM_EXTRACTOR_BACKLOG);
+ // }
+ //
+ // /**
+ // * @return the size of the thread pool which is used to run the text
+ // * extractors when binary content is indexed.
+ // */
+ // public int getExtractorPoolSize() {
+ // return getParameterIntegerInitialized(PARAM_EXTRACTOR_POOLSIZE);
+ // }
+ //
+ // /**
+ // * @return the extractor timeout in milliseconds.
+ // */
+ // public long getExtractorTimeout() {
+ // return getParameterIntegerInitialized(PARAM_EXTRACTOR_TIMEOUT);
+ // }
+ //
+ // /**
+ // * Returns the location of the search index. Returns
<code>null</code> if
+ // * not set.
+ // *
+ // * @return the location of the search index.
+ // * @throws RepositoryConfigurationException
+ // */
+ // public String getIndexDir() throws RepositoryConfigurationException {
+ //
+ // String indexDir;
+ // try {
+ // indexDir = queryHandlerEntry.getParameterValue(PARAM_INDEX_DIR);
+ // } catch (RepositoryConfigurationException e) {
+ // indexDir = queryHandlerEntry.getParameterValue(OLD_PARAM_INDEX_DIR);
+ // }
+ //
+ // indexDir = indexDir.replace("${java.io.tmpdir}", System
+ // .getProperty("java.io.tmpdir"));
+ //
+ // return indexDir;
+ // }
+ //
+ // /**
+ // * @return the class name of the indexing configuration implementation.
+ // */
+ // public String getIndexingConfigurationClass() {
+ // return queryHandlerEntry.getParameterValue(
+ // PARAM_INDEXING_CONFIGURATION_CLASS,
+ // DEDAULT_INDEXINGCONFIGURATIONCLASS);
+ // }
+ //
+ // /**
+ // * @return the path to the indexing configuration file.
+ // */
+ // public String getIndexingConfigurationPath() {
+ // return queryHandlerEntry.getParameterValue(
+ // PARAM_INDEXING_CONFIGURATION_PATH, null);
+ // }
+ //
+ // public int getMaxFieldLength() {
+ // return queryHandlerEntry.getParameterInteger(PARAM_MAX_FIELD_LENGTH,
+ // DEFAULT_MAX_FIELD_LENGTH);
+ // }
+ //
+ // /**
+ // * Returns the current value for maxMergeDocs.
+ // *
+ // * @return the current value for maxMergeDocs.
+ // */
+ // public int getMaxMergeDocs() {
+ // return queryHandlerEntry.getParameterInteger(PARAM_MAX_MERGE_DOCS,
+ // DEFAULT_MAX_MERGE_DOCS);
+ // }
+ //
+ // /**
+ // * Returns the current value for the merge factor.
+ // *
+ // * @return the current value for the merge factor.
+ // */
+ // public int getMergeFactor() {
+ // return queryHandlerEntry.getParameterInteger(PARAM_MERGE_FACTOR,
+ // DEFAULT_MERGE_FACTOR);
+ // }
+ //
+ // /**
+ // * Returns the current value for minMergeDocs.
+ // *
+ // * @return the current value for minMergeDocs.
+ // */
+ // public int getMinMergeDocs() {
+ // return queryHandlerEntry.getParameterInteger(PARAM_MIN_MERGE_DOCS,
+ // DEFAULT_MIN_MERGE_DOCS);
+ // }
+ //
+ // public String getQueryClass() {
+ // return queryHandlerEntry.getParameterValue(PARAM_QUERY_CLASS,
+ // DEFAULT_QUERY_IMPL_CLASS);
+ // }
+ //
+ // /**
+ // * @return the number of results the query handler will fetch initially when
+ // * a query is executed.
+ // */
+ // public int getResultFetchSize() {
+ // return queryHandlerEntry.getParameterInteger(PARAM_RESULT_FETCH_SIZE,
+ // DEFAULT_RESULTFETCHSIZE);
+ // }
+ //
+ // public String getRootNodeIdentifer() {
+ // return queryHandlerEntry.getParameterValue(PARAM_ROOT_NODE_ID,
+ // Constants.ROOT_UUID);
+ // }
+ //
+ // /**
+ // * Get spell checker class.
+ // *
+ // * @return the class name of the spell checker implementation or
+ // * <code>null</code> if none is set.
+ // */
+ // public String getSpellCheckerClass() {
+ // return queryHandlerEntry.getParameterValue(PARAM_SPELLCHECKER_CLASS,
+ // null);
+ // }
+ //
+ // /**
+ // * Get support highlighting.
+ // *
+ // * @return <code>true</code> if highlighting support is enabled.
+ // */
+ // public boolean getSupportHighlighting() {
+ // return queryHandlerEntry.getParameterBoolean(
+ // PARAM_SUPPORT_HIGHLIGHTING, DEFAULT_SUPPORTHIGHLIGHTING);
+ // }
+ //
+ // /**
+ // * Get synonym provider class.
+ // *
+ // * @return the class name of the synonym provider implementation or
+ // * <code>null</code> if none is set.
+ // */
+ // public String getSynonymProviderClass() {
+ // return queryHandlerEntry.getParameterValue(PARAM_SYNONYMPROVIDER_CLASS,
+ // null);
+ // }
+ //
+ // /**
+ // * Get synonym provider configuration path.
+ // *
+ // * @return the configuration path for the synonym provider. If none is set
+ // * this method returns <code>null</code>.
+ // */
+ // public String getSynonymProviderConfigPath() {
+ // return queryHandlerEntry.getParameterValue(
+ // PARAM_SYNONYMPROVIDER_CONFIG_PATH, null);
+ // }
+ //
+ // /**
+ // * Returns the current value for useCompoundFile.
+ // *
+ // * @return the current value for useCompoundFile.
+ // */
+ // public boolean getUseCompoundFile() {
+ // return queryHandlerEntry.getParameterBoolean(PARAM_USE_COMPOUNDFILE,
+ // DEFAULT_USECOMPOUNDFILE);
+ // }
+ //
+ // /**
+ // * Returns the current value for volatileIdleTime.
+ // *
+ // * @return the current value for volatileIdleTime.
+ // */
+ // public int getVolatileIdleTime() {
+ // if (volatileIdleTime == null)
+ // volatileIdleTime = queryHandlerEntry.getParameterInteger(
+ // PARAM_VOLATILE_IDLE_TIME, DEFAULT_VOLATILEIDLETIME);
+ //
+ // return volatileIdleTime;
+ // }
+ //
+ // /**
+ // * If set <code>true</code> the index is checked for consistency
depending
+ // * on the {@link #forceConsistencyCheck} parameter. If set to
+ // * <code>false</code>, no consistency check is performed, even if
the redo
+ // * log had been applied on startup.
+ // * <p/>
+ // * Default value is: <code>false</code>.
+ // *
+ // * @return boolean
+ // */
+ // public boolean isConsistencyCheckEnabled() {
+ // return queryHandlerEntry.getParameterBoolean(
+ // PARAM_CONSISTENCY_CHECK_ENABLED,
+ // DEFAULT_CONSISTENCYCHECKENABLED);
+ // }
+ //
+ // public boolean isForceConsistencyCheck() {
+ // return queryHandlerEntry.getParameterBoolean(
+ // PARAM_FORCE_CONSISTENCYCHECK, DEFAULT_FORCECONSISTENCYCHECK);
+ // }
+ //
+ // /**
+ // *
+ // * @return true if index upgrade allowed.
+ // */
+ // public boolean isUpgradeIndex() {
+ // Boolean updateIndex = queryHandlerEntry.getParameterBoolean(
+ // PARAM_UPGRADE_INDEX, null);
+ // if (updateIndex == null || !updateIndex) {
+ // updateIndex = Boolean.valueOf(System
+ // .getProperty(PARAM_UPGRADE_INDEX));
+ // }
+ // return updateIndex;
+ // }
+ //
+ // /**
+ // * Creates a file system resource to the synonym provider configuration.
+ // *
+ // * @param cfm
+ // * @return a file system resource or <code>null</code> if no path
was
+ // * configured.
+ // * @throws Exception
+ // */
+ // protected InputStream createSynonymProviderConfigResource(
+ // ConfigurationManager cfm) throws Exception {
+ // if (getSynonymProviderConfigPath() != null) {
+ // return cfm.getInputStream(getSynonymProviderConfigPath());
+ // }
+ // return null;
+ // }
+ //
+ // /**
+ // * Returns the document element of the indexing configuration or
+ // * <code>null</code> if there is no indexing configuration.
+ // *
+ // * @return the indexing configuration or <code>null</code> if there
is none.
+ // * @throws IOException
+ // * @throws RepositoryConfigurationException
+ // */
+ // protected Element getIndexingConfigurationDOM(ConfigurationManager cfm)
+ // throws IOException, RepositoryConfigurationException {
+ // String indexingConfigPath = getIndexingConfigurationPath();
+ // Element indexingConfiguration = null;
+ // if (indexingConfigPath != null) {
+ //
+ // InputStream is;
+ // try {
+ // is = cfm.getInputStream(indexingConfigPath);
+ // } catch (Exception e1) {
+ // throw new IOException(e1.getLocalizedMessage());
+ // }
+ //
+ // if (is == null)
+ // throw new IOException("Resource does not exist: "
+ // + indexingConfigPath);
+ //
+ // DocumentBuilderFactory factory = DocumentBuilderFactory
+ // .newInstance();
+ // try {
+ // DocumentBuilder builder = factory.newDocumentBuilder();
+ // builder
+ // .setEntityResolver(new IndexingConfigurationEntityResolver());
+ // indexingConfiguration = builder.parse(is).getDocumentElement();
+ // } catch (ParserConfigurationException e) {
+ // throw new RepositoryConfigurationException(e
+ // .getLocalizedMessage(), e);
+ // } catch (SAXException e) {
+ // throw new RepositoryConfigurationException(e
+ // .getLocalizedMessage(), e);
+ // }
+ // }
+ //
+ // return indexingConfiguration;
+ // }
+ //
+ // /**
+ // * Return ErrorLog file size in Kb String representation.
+ // *
+ // * @return int size in Kb
+ // */
+ // public int getErrorLogSize() {
+ // String size = queryHandlerEntry.getParameterValue(PARAM_ERRORLOG_SIZE,
+ // null);
+ // if ((size == null) || (size.equals(""))) {
+ // return SearchIndex.DEFAULT_ERRORLOG_FILE_SIZE;
+ // } else {
+ // return new Integer(size);
+ // }
+ // }
-public class QueryHandlerEntryWrapper implements QueryHandlerParams {
-
- /**
- * The default value for property {@link #extractorBackLog}.
- */
- public static final int DEFAULT_EXTRACTOR_BACKLOG = 100;
-
- /**
- * The default value for property {@link #extractorPoolSize}.
- */
- public static final int DEFAULT_EXTRACTOR_POOLSIZE = 0;
-
- /**
- * The default timeout in milliseconds which is granted to the text
- * extraction process until fulltext indexing is deferred to a background
- * thread.
- */
- public static final int DEFAULT_EXTRACTOR_TIMEOUT = 100;
-
- /**
- * the default value for property {@link #maxFieldLength}.
- */
- public static final int DEFAULT_MAX_FIELD_LENGTH = 10000;
-
- /**
- * The default value for property {@link #maxMergeDocs}.
- */
- public static final int DEFAULT_MAX_MERGE_DOCS = Integer.MAX_VALUE;
-
- /**
- * the default value for property {@link #mergeFactor}.
- */
- public static final int DEFAULT_MERGE_FACTOR = 10;
-
- /**
- * The default value for property {@link #minMergeDocs}.
- */
- public static final int DEFAULT_MIN_MERGE_DOCS = 100;
-
- /**
- * Name of the file to persist search internal namespace mappings.
- */
- public static final String NS_MAPPING_FILE = "ns_mappings.properties"; //
TODO
-
- /**
- * The excerpt provider class. Implements {@link ExcerptProvider}.
- */
- private static final String DEDAULT_EXCERPTPROVIDER_CLASS = DefaultHTMLExcerpt.class
- .getName();
-
- private static final String DEDAULT_INDEXINGCONFIGURATIONCLASS =
IndexingConfigurationImpl.class
- .getName();
-
- private static final boolean DEFAULT_AUTOREPAIR = true;
-
- private static final int DEFAULT_BUFFER_SIZE = 10;
-
- private static final int DEFAULT_CACHE_SIZE = 1000;
-
- private final static boolean DEFAULT_CONSISTENCYCHECKENABLED = false;
-
- private final static boolean DEFAULT_DOCUMENTORDER = true;
-
- private final static boolean DEFAULT_FORCECONSISTENCYCHECK = false;
-
- /**
- * Name of the default query implementation class.
- */
- private static final String DEFAULT_QUERY_HANDLER_CLASS = SearchIndex.class
- .getName();
-
- /**
- * Name of the default query implementation class.
- */
- private static final String DEFAULT_QUERY_IMPL_CLASS = QueryImpl.class
- .getName();
-
- /**
- * The number of documents that are pre fetched when a query is executed.
- * <p/>
- * Default value is: {@link Integer#MAX_VALUE}.
- */
- private final static int DEFAULT_RESULTFETCHSIZE = Integer.MAX_VALUE;
-
- private final static boolean DEFAULT_SUPPORTHIGHLIGHTING = false;
-
- private final static boolean DEFAULT_USECOMPOUNDFILE = false;
-
- private final static int DEFAULT_VOLATILEIDLETIME = 3;
-
- // since
https://jira.jboss.org/jira/browse/EXOJCR-17
-
- public static final boolean DEFAULT_UPGRADE_INDEX = false;
-
- private QueryHandlerEntry queryHandlerEntry;
-
- public QueryHandlerEntry getQueryHandlerEntry() {
- return queryHandlerEntry;
- }
-
- private static void initDefaults(QueryHandlerEntry entry) {
- entry.putBooleanParameter(PARAM_AUTO_REPAIR, DEFAULT_AUTOREPAIR);
- entry.putIntegerParameter(PARAM_BUFFER_SIZE, DEFAULT_BUFFER_SIZE);
- entry.putIntegerParameter(PARAM_CACHE_SIZE, DEFAULT_CACHE_SIZE);
- entry.putBooleanParameter(PARAM_DOCUMENT_ORDER, DEFAULT_DOCUMENTORDER);
- entry.putParameterValue(PARAM_EXCERPTPROVIDER_CLASS,
- DEDAULT_EXCERPTPROVIDER_CLASS);
-// Null value is forbidden according to the binding.xml, it prevents marshalling
-// entry.putParameterValue(PARAM_EXCLUDED_NODE_IDENTIFERS, null);
- entry.putIntegerParameter(PARAM_EXTRACTOR_BACKLOG,
- DEFAULT_EXTRACTOR_BACKLOG);
- entry.putIntegerParameter(PARAM_EXTRACTOR_POOLSIZE,
- DEFAULT_EXTRACTOR_POOLSIZE);
- entry.putIntegerParameter(PARAM_EXTRACTOR_TIMEOUT,
- DEFAULT_EXTRACTOR_TIMEOUT);
- }
-
- public String getType() {
- return queryHandlerEntry.getType();
- }
-
- public static QueryHandlerEntry queryHandlerEntryFactory() {
- QueryHandlerEntry entry = new QueryHandlerEntry();
- initDefaults(entry);
- return entry;
- }
-
- /** The logger instance for this class */
- private static final Log log = ExoLogger.getLogger(QueryHandlerEntry.class);
-
- // public QueryHandlerEntry queryHandler;
-
- public Integer volatileIdleTime;
-
- /**
- * The analyzer we use for indexing.
- */
- private JcrStandartAnalyzer analyzer;
-
- private String queryHandlerClass = DEFAULT_QUERY_HANDLER_CLASS;
-
- public QueryHandlerEntryWrapper(QueryHandlerEntry queryHandlerEntry) {
- this.queryHandlerEntry = queryHandlerEntry;
- this.analyzer = new JcrStandartAnalyzer();
- initDefaults(queryHandlerEntry);
- }
-
- public QueryHandlerEntryWrapper(String type, List params,
- QueryHandlerEntry queryHandlerEntry) {
- this.queryHandlerEntry = queryHandlerEntry;
- queryHandlerEntry.setType(type);
- queryHandlerEntry.setParameters(params);
- this.analyzer = new JcrStandartAnalyzer();
- initDefaults(queryHandlerEntry);
- }
-
- /**
- * Creates an excerpt provider for the given <code>query</code>.
- *
- * @param query
- * the query.
- * @return an excerpt provider for the given <code>query</code>.
- * @throws IOException
- * if the provider cannot be created.
- */
- public ExcerptProvider createExcerptProvider(Query query)
- throws IOException {
- ExcerptProvider ep;
- try {
- Class excerptProviderClass = Class.forName(
- getExcerptProviderClass(), true, this.getClass()
- .getClassLoader());
- ep = (ExcerptProvider) excerptProviderClass.newInstance();
- } catch (Exception e) {
- IOException ex = new IOException();
- ex.initCause(e);
- throw ex;
- }
-
- return ep;
- }
-
- /**
- * @param namespaceMappings
- * The namespace mappings
- * @return the fulltext indexing configuration or <code>null</code> if
there
- * is no configuration.
- */
- public IndexingConfiguration createIndexingConfiguration(
- NamespaceMappings namespaceMappings, QueryHandlerContext context,
- ConfigurationManager cfm) throws IOException,
- RepositoryConfigurationException {
- Element docElement = getIndexingConfigurationDOM(cfm);
- if (docElement == null) {
- return null;
- }
- IndexingConfiguration idxCfg = null;
- try {
- Class indexingConfigurationClass = Class.forName(
- getIndexingConfigurationClass(), true, this.getClass()
- .getClassLoader());
- idxCfg = (IndexingConfiguration) indexingConfigurationClass
- .newInstance();
- idxCfg.init(docElement, context, namespaceMappings);
- } catch (InstantiationException e) {
- log.warn("Exception initializing indexing configuration from: "
- + getIndexingConfigurationPath(), e);
- } catch (IllegalAccessException e) {
- log.warn("Exception initializing indexing configuration from: "
- + getIndexingConfigurationPath(), e);
- } catch (RepositoryException e) {
- log.warn("Exception initializing indexing configuration from: "
- + getIndexingConfigurationPath(), e);
- } catch (IllegalNameException e) {
- log.warn("Exception initializing indexing configuration from: "
- + getIndexingConfigurationPath(), e);
- } catch (Exception e) {
- log.warn("Exception initializing indexing configuration from: "
- + getIndexingConfigurationPath(), e);
- }
- return idxCfg;
- }
-
- /**
- * Creates a spell checker for this query handler.
- *
- * @return the spell checker or <code>null</code> if none is configured
or
- * an error occurs.
- */
- public SpellChecker createSpellChecker(QueryHandler handler) {
- SpellChecker spCheck = null;
- if (getSpellCheckerClass() != null) {
- try {
- Class spellCheckerClass = Class.forName(getSpellCheckerClass(),
- true, this.getClass().getClassLoader());
- spCheck = (SpellChecker) spellCheckerClass.newInstance();
- spCheck.init(handler);
- } catch (Exception e) {
- log.warn("Exception initializing spell checker: "
- + getSpellCheckerClass(), e);
- }
- }
- return spCheck;
- }
-
- /**
- * @param cfm
- * @return the configured synonym provider or <code>null</code> if none
is
- * configured or an error occurs.
- */
- public SynonymProvider createSynonymProvider(ConfigurationManager cfm) {
- SynonymProvider sp = null;
- if (getSynonymProviderClass() != null) {
- try {
- Class synonymProviderClass = Class.forName(
- getSynonymProviderClass(), true, this.getClass()
- .getClassLoader());
- sp = (SynonymProvider) synonymProviderClass.newInstance();
-
- sp.initialize(createSynonymProviderConfigResource(cfm));
- } catch (Exception e) {
- log.warn("Exception initializing synonym provider: "
- + getSynonymProviderClass(), e);
- sp = null;
- }
- }
- return sp;
- }
-
- public JcrStandartAnalyzer getAnalyzer() {
- return analyzer;
- }
-
- private String getParameterString(String name) {
- return queryHandlerEntry.getParameterValue(name, null);
- }
-
- private Integer getParameterIntegerInitialized(String name) {
- String value = queryHandlerEntry.getParameterValue(name, null);
- return StringNumberParser.parseInt(value);
- }
-
- private Boolean getParameterBooleanInitialized(String name) {
- String value = queryHandlerEntry.getParameterValue(name, "false");
- return Boolean.parseBoolean(value);
- }
-
- /**
- * If set <code>true</code> errors detected by the consistency check are
- * repaired. If <code>false</code> the errors are only reported in the
log.
- * <p/>
- * Default value is: <code>true</code>.
- *
- * @throws RepositoryConfigurationException
- */
- public boolean getAutoRepair() throws RepositoryConfigurationException {
- return getParameterBooleanInitialized(PARAM_AUTO_REPAIR);
- }
-
- /**
- * Number of documents that are buffered before they are added to the index.
- *
- * @throws RepositoryConfigurationException
- */
- public int getBufferSize() {
- return getParameterIntegerInitialized(PARAM_BUFFER_SIZE);
- }
-
- public int getCacheSize() {
- return getParameterIntegerInitialized(PARAM_CACHE_SIZE);
- }
-
- /**
- * Flag indicating whether document order is enable as the default ordering.
- */
- public boolean getDocumentOrder() {
- return getParameterBooleanInitialized(PARAM_DOCUMENT_ORDER);
- }
-
- /**
- * @return the class name of the excerpt provider implementation.
- */
- public String getExcerptProviderClass() {
- return getParameterString(PARAM_EXCERPTPROVIDER_CLASS);
- }
-
- public String getExcludedNodeIdentifers() {
- return getParameterString(PARAM_EXCLUDED_NODE_IDENTIFERS);
- }
-
- /**
- * @return the size of the extractor queue back log.
- */
- public int getExtractorBackLogSize() {
- return getParameterIntegerInitialized(PARAM_EXTRACTOR_BACKLOG);
- }
-
- /**
- * @return the size of the thread pool which is used to run the text
- * extractors when binary content is indexed.
- */
- public int getExtractorPoolSize() {
- return getParameterIntegerInitialized(PARAM_EXTRACTOR_POOLSIZE);
- }
-
- /**
- * @return the extractor timeout in milliseconds.
- */
- public long getExtractorTimeout() {
- return getParameterIntegerInitialized(PARAM_EXTRACTOR_TIMEOUT);
- }
-
- /**
- * Returns the location of the search index. Returns <code>null</code>
if
- * not set.
- *
- * @return the location of the search index.
- * @throws RepositoryConfigurationException
- */
- public String getIndexDir() throws RepositoryConfigurationException {
-
- String indexDir;
- try {
- indexDir = queryHandlerEntry.getParameterValue(PARAM_INDEX_DIR);
- } catch (RepositoryConfigurationException e) {
- indexDir = queryHandlerEntry.getParameterValue(OLD_PARAM_INDEX_DIR);
- }
-
- indexDir = indexDir.replace("${java.io.tmpdir}", System
- .getProperty("java.io.tmpdir"));
-
- return indexDir;
- }
-
- /**
- * @return the class name of the indexing configuration implementation.
- */
- public String getIndexingConfigurationClass() {
- return queryHandlerEntry.getParameterValue(
- PARAM_INDEXING_CONFIGURATION_CLASS,
- DEDAULT_INDEXINGCONFIGURATIONCLASS);
- }
-
- /**
- * @return the path to the indexing configuration file.
- */
- public String getIndexingConfigurationPath() {
- return queryHandlerEntry.getParameterValue(
- PARAM_INDEXING_CONFIGURATION_PATH, null);
- }
-
- public int getMaxFieldLength() {
- return queryHandlerEntry.getParameterInteger(PARAM_MAX_FIELD_LENGTH,
- DEFAULT_MAX_FIELD_LENGTH);
- }
-
- /**
- * Returns the current value for maxMergeDocs.
- *
- * @return the current value for maxMergeDocs.
- */
- public int getMaxMergeDocs() {
- return queryHandlerEntry.getParameterInteger(PARAM_MAX_MERGE_DOCS,
- DEFAULT_MAX_MERGE_DOCS);
- }
-
- /**
- * Returns the current value for the merge factor.
- *
- * @return the current value for the merge factor.
- */
- public int getMergeFactor() {
- return queryHandlerEntry.getParameterInteger(PARAM_MERGE_FACTOR,
- DEFAULT_MERGE_FACTOR);
- }
-
- /**
- * Returns the current value for minMergeDocs.
- *
- * @return the current value for minMergeDocs.
- */
- public int getMinMergeDocs() {
- return queryHandlerEntry.getParameterInteger(PARAM_MIN_MERGE_DOCS,
- DEFAULT_MIN_MERGE_DOCS);
- }
-
- public String getQueryClass() {
- return queryHandlerEntry.getParameterValue(PARAM_QUERY_CLASS,
- DEFAULT_QUERY_IMPL_CLASS);
- }
-
- /**
- * @return the number of results the query handler will fetch initially when
- * a query is executed.
- */
- public int getResultFetchSize() {
- return queryHandlerEntry.getParameterInteger(PARAM_RESULT_FETCH_SIZE,
- DEFAULT_RESULTFETCHSIZE);
- }
-
- public String getRootNodeIdentifer() {
- return queryHandlerEntry.getParameterValue(PARAM_ROOT_NODE_ID,
- Constants.ROOT_UUID);
- }
-
- /**
- * Get spell checker class.
- *
- * @return the class name of the spell checker implementation or
- * <code>null</code> if none is set.
- */
- public String getSpellCheckerClass() {
- return queryHandlerEntry.getParameterValue(PARAM_SPELLCHECKER_CLASS,
- null);
- }
-
- /**
- * Get support highlighting.
- *
- * @return <code>true</code> if highlighting support is enabled.
- */
- public boolean getSupportHighlighting() {
- return queryHandlerEntry.getParameterBoolean(
- PARAM_SUPPORT_HIGHLIGHTING, DEFAULT_SUPPORTHIGHLIGHTING);
- }
-
- /**
- * Get synonym provider class.
- *
- * @return the class name of the synonym provider implementation or
- * <code>null</code> if none is set.
- */
- public String getSynonymProviderClass() {
- return queryHandlerEntry.getParameterValue(PARAM_SYNONYMPROVIDER_CLASS,
- null);
- }
-
- /**
- * Get synonym provider configuration path.
- *
- * @return the configuration path for the synonym provider. If none is set
- * this method returns <code>null</code>.
- */
- public String getSynonymProviderConfigPath() {
- return queryHandlerEntry.getParameterValue(
- PARAM_SYNONYMPROVIDER_CONFIG_PATH, null);
- }
-
- /**
- * Returns the current value for useCompoundFile.
- *
- * @return the current value for useCompoundFile.
- */
- public boolean getUseCompoundFile() {
- return queryHandlerEntry.getParameterBoolean(PARAM_USE_COMPOUNDFILE,
- DEFAULT_USECOMPOUNDFILE);
- }
-
- /**
- * Returns the current value for volatileIdleTime.
- *
- * @return the current value for volatileIdleTime.
- */
- public int getVolatileIdleTime() {
- if (volatileIdleTime == null)
- volatileIdleTime = queryHandlerEntry.getParameterInteger(
- PARAM_VOLATILE_IDLE_TIME, DEFAULT_VOLATILEIDLETIME);
-
- return volatileIdleTime;
- }
-
- /**
- * If set <code>true</code> the index is checked for consistency
depending
- * on the {@link #forceConsistencyCheck} parameter. If set to
- * <code>false</code>, no consistency check is performed, even if the
redo
- * log had been applied on startup.
- * <p/>
- * Default value is: <code>false</code>.
- *
- * @return boolean
- */
- public boolean isConsistencyCheckEnabled() {
- return queryHandlerEntry.getParameterBoolean(
- PARAM_CONSISTENCY_CHECK_ENABLED,
- DEFAULT_CONSISTENCYCHECKENABLED);
- }
-
- public boolean isForceConsistencyCheck() {
- return queryHandlerEntry.getParameterBoolean(
- PARAM_FORCE_CONSISTENCYCHECK, DEFAULT_FORCECONSISTENCYCHECK);
- }
-
- /**
- *
- * @return true if index upgrade allowed.
- */
- public boolean isUpgradeIndex() {
- Boolean updateIndex = queryHandlerEntry.getParameterBoolean(
- PARAM_UPGRADE_INDEX, null);
- if (updateIndex == null || !updateIndex) {
- updateIndex = Boolean.valueOf(System
- .getProperty(PARAM_UPGRADE_INDEX));
- }
- return updateIndex;
- }
-
- /**
- * Creates a file system resource to the synonym provider configuration.
- *
- * @param cfm
- * @return a file system resource or <code>null</code> if no path was
- * configured.
- * @throws Exception
- */
- protected InputStream createSynonymProviderConfigResource(
- ConfigurationManager cfm) throws Exception {
- if (getSynonymProviderConfigPath() != null) {
- return cfm.getInputStream(getSynonymProviderConfigPath());
- }
- return null;
- }
-
- /**
- * Returns the document element of the indexing configuration or
- * <code>null</code> if there is no indexing configuration.
- *
- * @return the indexing configuration or <code>null</code> if there is
none.
- * @throws IOException
- * @throws RepositoryConfigurationException
- */
- protected Element getIndexingConfigurationDOM(ConfigurationManager cfm)
- throws IOException, RepositoryConfigurationException {
- String indexingConfigPath = getIndexingConfigurationPath();
- Element indexingConfiguration = null;
- if (indexingConfigPath != null) {
-
- InputStream is;
- try {
- is = cfm.getInputStream(indexingConfigPath);
- } catch (Exception e1) {
- throw new IOException(e1.getLocalizedMessage());
- }
-
- if (is == null)
- throw new IOException("Resource does not exist: "
- + indexingConfigPath);
-
- DocumentBuilderFactory factory = DocumentBuilderFactory
- .newInstance();
- try {
- DocumentBuilder builder = factory.newDocumentBuilder();
- builder
- .setEntityResolver(new IndexingConfigurationEntityResolver());
- indexingConfiguration = builder.parse(is).getDocumentElement();
- } catch (ParserConfigurationException e) {
- throw new RepositoryConfigurationException(e
- .getLocalizedMessage(), e);
- } catch (SAXException e) {
- throw new RepositoryConfigurationException(e
- .getLocalizedMessage(), e);
- }
- }
-
- return indexingConfiguration;
- }
-
- /**
- * Return ErrorLog file size in Kb String representation.
- *
- * @return int size in Kb
- */
- public int getErrorLogSize() {
- String size = queryHandlerEntry.getParameterValue(PARAM_ERRORLOG_SIZE,
- null);
- if ((size == null) || (size.equals(""))) {
- return SearchIndex.DEFAULT_ERRORLOG_FILE_SIZE;
- } else {
- return new Integer(size);
- }
- }
-
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -24,7 +24,7 @@
import org.exoplatform.container.configuration.ConfigurationManager;
import org.exoplatform.services.document.DocumentReaderService;
import org.exoplatform.services.jcr.config.QueryHandlerEntry;
-import org.exoplatform.services.jcr.config.QueryHandlerEntryWrapper;
+import org.exoplatform.services.jcr.config.QueryHandlerParams;
import org.exoplatform.services.jcr.config.RepositoryConfigurationException;
import org.exoplatform.services.jcr.core.nodetype.NodeTypeDataManager;
import org.exoplatform.services.jcr.dataflow.ItemDataConsumer;
@@ -80,669 +80,781 @@
/**
* Acts as a global entry point to execute queries and index nodes.
*/
-public class SearchManager implements Startable,
- MandatoryItemsPersistenceListener {
+public class SearchManager implements Startable, MandatoryItemsPersistenceListener
+{
- /**
- * Logger instance for this class
- */
- private static final Log log = ExoLogger.getLogger(SearchManager.class);
+ /**
+ * Logger instance for this class
+ */
+ private static final Log log = ExoLogger.getLogger(SearchManager.class);
- protected final QueryHandlerEntryWrapper config;
+ protected final QueryHandlerEntry config;
- /**
- * Text extractor for extracting text content of binary properties.
- */
- protected final DocumentReaderService extractor;
+ /**
+ * Text extractor for extracting text content of binary properties.
+ */
+ protected final DocumentReaderService extractor;
- /**
- * QueryHandler where query execution is delegated to
- */
- protected QueryHandler handler;
+ /**
+ * QueryHandler where query execution is delegated to
+ */
+ protected QueryHandler handler;
- /**
- * The shared item state manager instance for the workspace.
- */
- protected final ItemDataConsumer itemMgr;
+ /**
+ * The shared item state manager instance for the workspace.
+ */
+ protected final ItemDataConsumer itemMgr;
- /**
- * The namespace registry of the repository.
- */
- protected final NamespaceRegistryImpl nsReg;
+ /**
+ * The namespace registry of the repository.
+ */
+ protected final NamespaceRegistryImpl nsReg;
- /**
- * The node type registry.
- */
- protected final NodeTypeDataManager nodeTypeDataManager;
+ /**
+ * The node type registry.
+ */
+ protected final NodeTypeDataManager nodeTypeDataManager;
- /**
- * QueryHandler of the parent search manager or <code>null</code> if
there
- * is none.
- */
- protected final SearchManager parentSearchManager;
+ /**
+ * QueryHandler of the parent search manager or <code>null</code> if
there
+ * is none.
+ */
+ protected final SearchManager parentSearchManager;
- // protected QPath indexingRoot;
- //
- // protected List<QPath> excludedPaths = new ArrayList<QPath>();
+ // protected QPath indexingRoot;
+ //
+ // protected List<QPath> excludedPaths = new ArrayList<QPath>();
- protected IndexingTree indexingTree;
+ protected IndexingTree indexingTree;
- private final ConfigurationManager cfm;
+ private final ConfigurationManager cfm;
- protected LuceneVirtualTableResolver virtualTableResolver;
+ protected LuceneVirtualTableResolver virtualTableResolver;
- /**
- * Creates a new <code>SearchManager</code>.
- *
- * @param config
- * the search configuration.
- * @param nsReg
- * the namespace registry.
- * @param ntReg
- * the node type registry.
- * @param itemMgr
- * the shared item state manager.
- * @param rootNodeId
- * the id of the root node.
- * @param parentMgr
- * the parent search manager or <code>null</code> if there is
no
- * parent search manager.
- * @param excludedNodeId
- * id of the node that should be excluded from indexing. Any
- * descendant of that node will also be excluded from indexing.
- * @throws RepositoryException
- * if the search manager cannot be initialized
- * @throws RepositoryConfigurationException
- */
- public SearchManager(QueryHandlerEntry config, NamespaceRegistryImpl nsReg,
- NodeTypeDataManager ntReg, WorkspacePersistentDataManager itemMgr,
- SystemSearchManagerHolder parentSearchManager,
- DocumentReaderService extractor, ConfigurationManager cfm,
- final RepositoryIndexSearcherHolder indexSearcherHolder)
- throws RepositoryException, RepositoryConfigurationException {
+ /**
+ * Creates a new <code>SearchManager</code>.
+ *
+ * @param config
+ * the search configuration.
+ * @param nsReg
+ * the namespace registry.
+ * @param ntReg
+ * the node type registry.
+ * @param itemMgr
+ * the shared item state manager.
+ * @param rootNodeId
+ * the id of the root node.
+ * @param parentMgr
+ * the parent search manager or <code>null</code> if there is
no
+ * parent search manager.
+ * @param excludedNodeId
+ * id of the node that should be excluded from indexing. Any
+ * descendant of that node will also be excluded from indexing.
+ * @throws RepositoryException
+ * if the search manager cannot be initialized
+ * @throws RepositoryConfigurationException
+ */
+ public SearchManager(QueryHandlerEntry config, NamespaceRegistryImpl nsReg,
NodeTypeDataManager ntReg,
+ WorkspacePersistentDataManager itemMgr, SystemSearchManagerHolder
parentSearchManager,
+ DocumentReaderService extractor, ConfigurationManager cfm, final
RepositoryIndexSearcherHolder indexSearcherHolder)
+ throws RepositoryException, RepositoryConfigurationException
+ {
- this.extractor = extractor;
- indexSearcherHolder.addIndexSearcher(this);
- this.config = new QueryHandlerEntryWrapper(config);
- this.nodeTypeDataManager = ntReg;
- this.nsReg = nsReg;
- this.itemMgr = itemMgr;
- this.cfm = cfm;
- this.virtualTableResolver = new LuceneVirtualTableResolver(
- nodeTypeDataManager, nsReg);
- this.parentSearchManager = parentSearchManager != null ? parentSearchManager
- .get()
- : null;
- itemMgr.addItemPersistenceListener(this);
- }
+ this.extractor = extractor;
+ indexSearcherHolder.addIndexSearcher(this);
+ this.config = config;
+ this.nodeTypeDataManager = ntReg;
+ this.nsReg = nsReg;
+ this.itemMgr = itemMgr;
+ this.cfm = cfm;
+ this.virtualTableResolver = new LuceneVirtualTableResolver(nodeTypeDataManager,
nsReg);
+ this.parentSearchManager = parentSearchManager != null ? parentSearchManager.get()
: null;
+ itemMgr.addItemPersistenceListener(this);
+ }
- /**
- * Creates a query object from a node that can be executed on the workspace.
- *
- * @param session
- * the session of the user executing the query.
- * @param itemMgr
- * the item manager of the user executing the query. Needed to
- * return <code>Node</code> instances in the result set.
- * @param node
- * a node of type nt:query.
- * @return a <code>Query</code> instance to execute.
- * @throws InvalidQueryException
- * if <code>absPath</code> is not a valid persisted query
(that
- * is, a node of type nt:query)
- * @throws RepositoryException
- * if any other error occurs.
- */
- public Query createQuery(SessionImpl session,
- SessionDataManager sessionDataManager, Node node)
- throws InvalidQueryException, RepositoryException {
- AbstractQueryImpl query = createQueryInstance();
- query.init(session, sessionDataManager, handler, node);
- return query;
- }
+ /**
+ * Creates a query object from a node that can be executed on the workspace.
+ *
+ * @param session
+ * the session of the user executing the query.
+ * @param itemMgr
+ * the item manager of the user executing the query. Needed to
+ * return <code>Node</code> instances in the result set.
+ * @param node
+ * a node of type nt:query.
+ * @return a <code>Query</code> instance to execute.
+ * @throws InvalidQueryException
+ * if <code>absPath</code> is not a valid persisted query
(that
+ * is, a node of type nt:query)
+ * @throws RepositoryException
+ * if any other error occurs.
+ */
+ public Query createQuery(SessionImpl session, SessionDataManager sessionDataManager,
Node node)
+ throws InvalidQueryException, RepositoryException
+ {
+ AbstractQueryImpl query = createQueryInstance();
+ query.init(session, sessionDataManager, handler, node);
+ return query;
+ }
- /**
- * Creates a query object that can be executed on the workspace.
- *
- * @param session
- * the session of the user executing the query.
- * @param itemMgr
- * the item manager of the user executing the query. Needed to
- * return <code>Node</code> instances in the result set.
- * @param statement
- * the actual query statement.
- * @param language
- * the syntax of the query statement.
- * @return a <code>Query</code> instance to execute.
- * @throws InvalidQueryException
- * if the query is malformed or the <code>language</code> is
- * unknown.
- * @throws RepositoryException
- * if any other error occurs.
- */
- public Query createQuery(SessionImpl session,
- SessionDataManager sessionDataManager, String statement,
- String language) throws InvalidQueryException, RepositoryException {
- AbstractQueryImpl query = createQueryInstance();
- query.init(session, sessionDataManager, handler, statement, language);
- return query;
- }
+ /**
+ * Creates a query object that can be executed on the workspace.
+ *
+ * @param session
+ * the session of the user executing the query.
+ * @param itemMgr
+ * the item manager of the user executing the query. Needed to
+ * return <code>Node</code> instances in the result set.
+ * @param statement
+ * the actual query statement.
+ * @param language
+ * the syntax of the query statement.
+ * @return a <code>Query</code> instance to execute.
+ * @throws InvalidQueryException
+ * if the query is malformed or the <code>language</code> is
+ * unknown.
+ * @throws RepositoryException
+ * if any other error occurs.
+ */
+ public Query createQuery(SessionImpl session, SessionDataManager sessionDataManager,
String statement,
+ String language) throws InvalidQueryException, RepositoryException
+ {
+ AbstractQueryImpl query = createQueryInstance();
+ query.init(session, sessionDataManager, handler, statement, language);
+ return query;
+ }
- /**
- * just for test use only
- */
- public QueryHandler getHandler() {
+ /**
+ * just for test use only
+ */
+ public QueryHandler getHandler()
+ {
- return handler;
- }
+ return handler;
+ }
- public void onSaveItems(ItemStateChangesLog changesLog) {
- if (handler == null)
- return;
+ public void onSaveItems(ItemStateChangesLog changesLog)
+ {
+ if (handler == null)
+ return;
- long time = System.currentTimeMillis();
+ long time = System.currentTimeMillis();
- // nodes that need to be removed from the index.
- final Set<String> removedNodes = new HashSet<String>();
- // nodes that need to be added to the index.
- final Set<String> addedNodes = new HashSet<String>();
+ // nodes that need to be removed from the index.
+ final Set<String> removedNodes = new HashSet<String>();
+ // nodes that need to be added to the index.
+ final Set<String> addedNodes = new HashSet<String>();
- final Map<String, List<ItemState>> updatedNodes = new HashMap<String,
List<ItemState>>();
+ final Map<String, List<ItemState>> updatedNodes = new
HashMap<String, List<ItemState>>();
- for (Iterator<ItemState> iter = changesLog.getAllStates().iterator(); iter
- .hasNext();) {
- ItemState itemState = iter.next();
+ for (Iterator<ItemState> iter = changesLog.getAllStates().iterator();
iter.hasNext();)
+ {
+ ItemState itemState = iter.next();
- if (!indexingTree.isExcluded(itemState)) {
- String uuid = itemState.isNode() ? itemState.getData()
- .getIdentifier() : itemState.getData()
- .getParentIdentifier();
+ if (!indexingTree.isExcluded(itemState))
+ {
+ String uuid =
+ itemState.isNode() ? itemState.getData().getIdentifier() :
itemState.getData().getParentIdentifier();
- if (itemState.isAdded()) {
- if (itemState.isNode()) {
- addedNodes.add(uuid);
- } else {
- if (!addedNodes.contains(uuid)) {
- createNewOrAdd(uuid, itemState, updatedNodes);
- }
- }
- } else if (itemState.isRenamed()) {
- if (itemState.isNode()) {
- addedNodes.add(uuid);
- } else {
- createNewOrAdd(uuid, itemState, updatedNodes);
- }
- } else if (itemState.isUpdated()) {
- createNewOrAdd(uuid, itemState, updatedNodes);
- } else if (itemState.isMixinChanged()) {
- createNewOrAdd(uuid, itemState, updatedNodes);
- } else if (itemState.isDeleted()) {
- if (itemState.isNode()) {
- if (addedNodes.contains(uuid)) {
- addedNodes.remove(uuid);
- removedNodes.remove(uuid);
- } else {
- removedNodes.add(uuid);
- }
- // remove all changes after node remove
- updatedNodes.remove(uuid);
- } else {
- if (!removedNodes.contains(uuid)
- && !addedNodes.contains(uuid)) {
- createNewOrAdd(uuid, itemState, updatedNodes);
- }
- }
- }
- }
- }
- // TODO make quick changes
- for (String uuid : updatedNodes.keySet()) {
- removedNodes.add(uuid);
- addedNodes.add(uuid);
- }
+ if (itemState.isAdded())
+ {
+ if (itemState.isNode())
+ {
+ addedNodes.add(uuid);
+ }
+ else
+ {
+ if (!addedNodes.contains(uuid))
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ }
+ }
+ else if (itemState.isRenamed())
+ {
+ if (itemState.isNode())
+ {
+ addedNodes.add(uuid);
+ }
+ else
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ }
+ else if (itemState.isUpdated())
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ else if (itemState.isMixinChanged())
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ else if (itemState.isDeleted())
+ {
+ if (itemState.isNode())
+ {
+ if (addedNodes.contains(uuid))
+ {
+ addedNodes.remove(uuid);
+ removedNodes.remove(uuid);
+ }
+ else
+ {
+ removedNodes.add(uuid);
+ }
+ // remove all changes after node remove
+ updatedNodes.remove(uuid);
+ }
+ else
+ {
+ if (!removedNodes.contains(uuid) &&
!addedNodes.contains(uuid))
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ }
+ }
+ }
+ }
+ // TODO make quick changes
+ for (String uuid : updatedNodes.keySet())
+ {
+ removedNodes.add(uuid);
+ addedNodes.add(uuid);
+ }
- Iterator<NodeData> addedStates = new Iterator<NodeData>() {
- private final Iterator<String> iter = addedNodes.iterator();
+ Iterator<NodeData> addedStates = new Iterator<NodeData>()
+ {
+ private final Iterator<String> iter = addedNodes.iterator();
- public boolean hasNext() {
- return iter.hasNext();
- }
+ public boolean hasNext()
+ {
+ return iter.hasNext();
+ }
- public NodeData next() {
+ public NodeData next()
+ {
- // cycle till find a next or meet the end of set
- do {
- String id = iter.next();
- try {
- ItemData item = itemMgr.getItemData(id);
- if (item != null) {
- if (item.isNode())
- return (NodeData) item; // return node
- else
- log.warn("Node not found, but property " + id
- + ", " + item.getQPath().getAsString()
- + " found. ");
- } else
- log.warn("Unable to index node with id " + id
- + ", node does not exist.");
+ // cycle till find a next or meet the end of set
+ do
+ {
+ String id = iter.next();
+ try
+ {
+ ItemData item = itemMgr.getItemData(id);
+ if (item != null)
+ {
+ if (item.isNode())
+ return (NodeData)item; // return node
+ else
+ log.warn("Node not found, but property " + id + ",
" + item.getQPath().getAsString()
+ + " found. ");
+ }
+ else
+ log.warn("Unable to index node with id " + id + ",
node does not exist.");
- } catch (RepositoryException e) {
- log.error("Can't read next node data " + id, e);
- }
- } while (iter.hasNext()); // get next if error or node not found
+ }
+ catch (RepositoryException e)
+ {
+ log.error("Can't read next node data " + id, e);
+ }
+ }
+ while (iter.hasNext()); // get next if error or node not found
- return null; // we met the end of iterator set
- }
+ return null; // we met the end of iterator set
+ }
- public void remove() {
- throw new UnsupportedOperationException();
- }
- };
+ public void remove()
+ {
+ throw new UnsupportedOperationException();
+ }
+ };
- Iterator<String> removedIds = new Iterator<String>() {
- private final Iterator<String> iter = removedNodes.iterator();
+ Iterator<String> removedIds = new Iterator<String>()
+ {
+ private final Iterator<String> iter = removedNodes.iterator();
- public boolean hasNext() {
- return iter.hasNext();
- }
+ public boolean hasNext()
+ {
+ return iter.hasNext();
+ }
- public String next() {
- return nextNodeId();
- }
+ public String next()
+ {
+ return nextNodeId();
+ }
- public String nextNodeId() throws NoSuchElementException {
- return iter.next();
- }
+ public String nextNodeId() throws NoSuchElementException
+ {
+ return iter.next();
+ }
- public void remove() {
- throw new UnsupportedOperationException();
+ public void remove()
+ {
+ throw new UnsupportedOperationException();
- }
- };
+ }
+ };
- if (removedNodes.size() > 0 || addedNodes.size() > 0) {
- try {
- handler.updateNodes(removedIds, addedStates);
- } catch (RepositoryException e) {
- log.error("Error indexing changes " + e, e);
- } catch (IOException e) {
- log.error("Error indexing changes " + e, e);
- try {
- handler.logErrorChanges(removedNodes, addedNodes);
- } catch (IOException ioe) {
- log.warn(
- "Exception occure when errorLog writed. Error log is not complete. "
- + ioe, ioe);
- }
- }
- }
+ if (removedNodes.size() > 0 || addedNodes.size() > 0)
+ {
+ try
+ {
+ handler.updateNodes(removedIds, addedStates);
+ }
+ catch (RepositoryException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ }
+ catch (IOException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ try
+ {
+ handler.logErrorChanges(removedNodes, addedNodes);
+ }
+ catch (IOException ioe)
+ {
+ log.warn("Exception occure when errorLog writed. Error log is not
complete. " + ioe, ioe);
+ }
+ }
+ }
- if (log.isDebugEnabled()) {
- log.debug("onEvent: indexing finished in "
- + String.valueOf(System.currentTimeMillis() - time)
- + " ms.");
- }
- }
+ if (log.isDebugEnabled())
+ {
+ log.debug("onEvent: indexing finished in " +
String.valueOf(System.currentTimeMillis() - time) + " ms.");
+ }
+ }
- public void createNewOrAdd(String key, ItemState state,
- Map<String, List<ItemState>> updatedNodes) {
- List<ItemState> list = updatedNodes.get(key);
- if (list == null) {
- list = new ArrayList<ItemState>();
- updatedNodes.put(key, list);
- }
- list.add(state);
+ public void createNewOrAdd(String key, ItemState state, Map<String,
List<ItemState>> updatedNodes)
+ {
+ List<ItemState> list = updatedNodes.get(key);
+ if (list == null)
+ {
+ list = new ArrayList<ItemState>();
+ updatedNodes.put(key, list);
+ }
+ list.add(state);
- }
+ }
- public void start() {
+ public void start()
+ {
- if (log.isDebugEnabled())
- log.debug("start");
+ if (log.isDebugEnabled())
+ log.debug("start");
+ try
+ {
+ if (indexingTree == null)
+ {
+ List<QPath> excludedPath = new ArrayList<QPath>();
+ // Calculating excluded node identifiers
+ excludedPath.add(Constants.JCR_SYSTEM_PATH);
- if (indexingTree == null) {
- List<QPath> excludedPath = new ArrayList<QPath>();
- // Calculating excluded node identifiers
- excludedPath.add(Constants.JCR_SYSTEM_PATH);
+ //if (config.getExcludedNodeIdentifers() != null)
+ String excludedNodeIdentifer =
+
config.getParameterValue(QueryHandlerParams.PARAM_EXCLUDED_NODE_IDENTIFERS, null);
+ if (excludedNodeIdentifer != null)
+ {
+ StringTokenizer stringTokenizer = new
StringTokenizer(excludedNodeIdentifer);
+ while (stringTokenizer.hasMoreTokens())
+ {
- if (config.getExcludedNodeIdentifers() != null) {
- StringTokenizer stringTokenizer = new StringTokenizer(config
- .getExcludedNodeIdentifers());
- while (stringTokenizer.hasMoreTokens()) {
+ try
+ {
+ ItemData excludeData =
itemMgr.getItemData(stringTokenizer.nextToken());
+ if (excludeData != null)
+ excludedPath.add(excludeData.getQPath());
+ }
+ catch (RepositoryException e)
+ {
+ log.warn(e.getLocalizedMessage());
+ }
+ }
+ }
- try {
- ItemData excludeData = itemMgr
- .getItemData(stringTokenizer.nextToken());
- if (excludeData != null)
- excludedPath.add(excludeData.getQPath());
- } catch (RepositoryException e) {
- log.warn(e.getLocalizedMessage());
- }
- }
- }
+ NodeData indexingRootData = null;
+ String rootNodeIdentifer =
config.getParameterValue(QueryHandlerParams.PARAM_ROOT_NODE_ID, null);
+ if (rootNodeIdentifer != null)
+ {
+ try
+ {
+ ItemData indexingRootDataItem =
itemMgr.getItemData(rootNodeIdentifer);
+ if (indexingRootDataItem != null &&
indexingRootDataItem.isNode())
+ indexingRootData = (NodeData)indexingRootDataItem;
+ }
+ catch (RepositoryException e)
+ {
+ log.warn(e.getLocalizedMessage() + " Indexing root set to " +
Constants.ROOT_PATH.getAsString());
- NodeData indexingRootData = null;
- if (config.getRootNodeIdentifer() != null) {
- try {
- ItemData indexingRootDataItem = itemMgr.getItemData(config
- .getRootNodeIdentifer());
- if (indexingRootDataItem != null
- && indexingRootDataItem.isNode())
- indexingRootData = (NodeData) indexingRootDataItem;
- } catch (RepositoryException e) {
- log.warn(e.getLocalizedMessage() + " Indexing root set to "
- + Constants.ROOT_PATH.getAsString());
+ }
- }
+ }
+ else
+ {
+ try
+ {
+ indexingRootData = (NodeData)itemMgr.getItemData(Constants.ROOT_UUID);
+ }
+ catch (RepositoryException e)
+ {
+ log.error("Fail to load root node data");
+ }
+ }
- } else {
- try {
- indexingRootData = (NodeData) itemMgr
- .getItemData(Constants.ROOT_UUID);
- } catch (RepositoryException e) {
- log.error("Fail to load root node data");
- }
- }
+ indexingTree = new IndexingTree(indexingRootData, excludedPath);
+ }
- indexingTree = new IndexingTree(indexingRootData, excludedPath);
- }
- try {
- initializeQueryHandler();
- } catch (RepositoryException e) {
- log.error(e.getLocalizedMessage());
- handler = null;
- throw new RuntimeException(e.getLocalizedMessage(), e.getCause());
- } catch (RepositoryConfigurationException e) {
- log.error(e.getLocalizedMessage());
- handler = null;
- throw new RuntimeException(e.getLocalizedMessage(), e.getCause());
- }
- }
+ initializeQueryHandler();
+ }
+ catch (RepositoryException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ throw new RuntimeException(e.getLocalizedMessage(), e.getCause());
+ }
+ catch (RepositoryConfigurationException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ throw new RuntimeException(e.getLocalizedMessage(), e.getCause());
+ }
+ }
- public void stop() {
- handler.close();
- log.info("Search manager stopped");
- }
+ public void stop()
+ {
+ handler.close();
+ log.info("Search manager stopped");
+ }
- // /**
- // * Checks if the given event should be excluded based on the
- // * {@link #excludePath} setting.
- // *
- // * @param event
- // * observation event
- // * @return <code>true</code> if the event should be excluded,
- // * <code>false</code> otherwise
- // */
- // protected boolean isExcluded(ItemState event) {
- //
- // for (QPath excludedPath : excludedPaths) {
- // if (event.getData().getQPath().isDescendantOf(excludedPath)
- // || event.getData().getQPath().equals(excludedPath))
- // return true;
- // }
- //
- // return !event.getData().getQPath().isDescendantOf(indexingRoot)
- // && !event.getData().getQPath().equals(indexingRoot);
- // }
+ // /**
+ // * Checks if the given event should be excluded based on the
+ // * {@link #excludePath} setting.
+ // *
+ // * @param event
+ // * observation event
+ // * @return <code>true</code> if the event should be excluded,
+ // * <code>false</code> otherwise
+ // */
+ // protected boolean isExcluded(ItemState event) {
+ //
+ // for (QPath excludedPath : excludedPaths) {
+ // if (event.getData().getQPath().isDescendantOf(excludedPath)
+ // || event.getData().getQPath().equals(excludedPath))
+ // return true;
+ // }
+ //
+ // return !event.getData().getQPath().isDescendantOf(indexingRoot)
+ // && !event.getData().getQPath().equals(indexingRoot);
+ // }
- protected QueryHandlerContext createQueryHandlerContext(
- QueryHandler parentHandler) throws RepositoryConfigurationException {
+ protected QueryHandlerContext createQueryHandlerContext(QueryHandler parentHandler)
+ throws RepositoryConfigurationException
+ {
- QueryHandlerContext context = new QueryHandlerContext(itemMgr,
- indexingTree, nodeTypeDataManager, nsReg, parentHandler, config
- .getIndexDir(), extractor, true, virtualTableResolver);
- return context;
- }
+ QueryHandlerContext context =
+ new QueryHandlerContext(itemMgr, indexingTree, nodeTypeDataManager, nsReg,
parentHandler, getIndexDir(),
+ extractor, true, virtualTableResolver);
+ return context;
+ }
- /**
- * Initializes the query handler.
- *
- * @throws RepositoryException
- * if the query handler cannot be initialized.
- * @throws RepositoryConfigurationException
- * @throws ClassNotFoundException
- */
- protected void initializeQueryHandler() throws RepositoryException,
- RepositoryConfigurationException {
- // initialize query handler
- String className = config.getType();
- if (className == null)
- throw new RepositoryConfigurationException(
- "Content hanler configuration fail");
+ protected String getIndexDir() throws RepositoryConfigurationException
+ {
+ String dir = config.getParameterValue(QueryHandlerParams.PARAM_INDEX_DIR, null);
+ if (dir == null)
+ {
+ log.warn(QueryHandlerParams.PARAM_INDEX_DIR + " parameter not found. Using
outdated parameter name "
+ + QueryHandlerParams.OLD_PARAM_INDEX_DIR);
+ dir = config.getParameterValue(QueryHandlerParams.OLD_PARAM_INDEX_DIR);
+ }
+ return dir;
+ }
- try {
- Class qHandlerClass = Class.forName(className, true, this
- .getClass().getClassLoader());
- Constructor constuctor = qHandlerClass.getConstructor(
- QueryHandlerEntry.class, ConfigurationManager.class);
- handler = (QueryHandler) constuctor.newInstance(config
- .getQueryHandlerEntry(), cfm);
- QueryHandler parentHandler = (this.parentSearchManager != null) ?
parentSearchManager
- .getHandler()
- : null;
- QueryHandlerContext context = createQueryHandlerContext(parentHandler);
- handler.init(context);
+ /**
+ * Initializes the query handler.
+ *
+ * @throws RepositoryException
+ * if the query handler cannot be initialized.
+ * @throws RepositoryConfigurationException
+ * @throws ClassNotFoundException
+ */
+ protected void initializeQueryHandler() throws RepositoryException,
RepositoryConfigurationException
+ {
+ // initialize query handler
+ String className = config.getType();
+ if (className == null)
+ throw new RepositoryConfigurationException("Content hanler
configuration fail");
- } catch (SecurityException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (IllegalArgumentException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (ClassNotFoundException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (NoSuchMethodException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (InstantiationException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (IllegalAccessException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (InvocationTargetException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (IOException e) {
- throw new RepositoryException(e.getMessage(), e);
- }
- }
+ try
+ {
+ Class qHandlerClass = Class.forName(className, true,
this.getClass().getClassLoader());
+ Constructor constuctor = qHandlerClass.getConstructor(QueryHandlerEntry.class,
ConfigurationManager.class);
+ handler = (QueryHandler)constuctor.newInstance(config, cfm);
+ QueryHandler parentHandler = (this.parentSearchManager != null) ?
parentSearchManager.getHandler() : null;
+ QueryHandlerContext context = createQueryHandlerContext(parentHandler);
+ handler.init(context);
- /**
- * Creates a new instance of an {@link AbstractQueryImpl} which is not
- * initialized.
- *
- * @return an new query instance.
- * @throws RepositoryException
- * if an error occurs while creating a new query instance.
- */
- protected AbstractQueryImpl createQueryInstance()
- throws RepositoryException {
- try {
- String queryImplClassName = handler.getQueryClass();
- Object obj = Class.forName(queryImplClassName).newInstance();
- if (obj instanceof AbstractQueryImpl) {
- return (AbstractQueryImpl) obj;
- } else {
- throw new IllegalArgumentException(queryImplClassName
- + " is not of type "
- + AbstractQueryImpl.class.getName());
- }
- } catch (Throwable t) {
- throw new RepositoryException("Unable to create query: "
- + t.toString(), t);
- }
- }
+ }
+ catch (SecurityException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (IllegalArgumentException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (ClassNotFoundException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (NoSuchMethodException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (InstantiationException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (IllegalAccessException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (InvocationTargetException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (IOException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ }
- /**
- * {@inheritDoc}
- */
- public Set<String> getFieldNames() throws IndexException {
- final Set<String> fildsSet = new HashSet<String>();
- if (handler instanceof SearchIndex) {
- IndexReader reader = null;
- try {
- reader = ((SearchIndex) handler).getIndexReader();
- final Collection fields = reader
- .getFieldNames(IndexReader.FieldOption.ALL);
- for (final Object field : fields) {
- fildsSet.add((String) field);
- }
- } catch (IOException e) {
- throw new IndexException(e.getLocalizedMessage(), e);
- } finally {
- try {
- if (reader != null)
- reader.close();
- } catch (IOException e) {
- throw new IndexException(e.getLocalizedMessage(), e);
- }
- }
+ /**
+ * Creates a new instance of an {@link AbstractQueryImpl} which is not
+ * initialized.
+ *
+ * @return an new query instance.
+ * @throws RepositoryException
+ * if an error occurs while creating a new query instance.
+ */
+ protected AbstractQueryImpl createQueryInstance() throws RepositoryException
+ {
+ try
+ {
+ String queryImplClassName = handler.getQueryClass();
+ Object obj = Class.forName(queryImplClassName).newInstance();
+ if (obj instanceof AbstractQueryImpl)
+ {
+ return (AbstractQueryImpl)obj;
+ }
+ else
+ {
+ throw new IllegalArgumentException(queryImplClassName + " is not of type
"
+ + AbstractQueryImpl.class.getName());
+ }
+ }
+ catch (Throwable t)
+ {
+ throw new RepositoryException("Unable to create query: " +
t.toString(), t);
+ }
+ }
- }
- return fildsSet;
- }
+ /**
+ * {@inheritDoc}
+ */
+ public Set<String> getFieldNames() throws IndexException
+ {
+ final Set<String> fildsSet = new HashSet<String>();
+ if (handler instanceof SearchIndex)
+ {
+ IndexReader reader = null;
+ try
+ {
+ reader = ((SearchIndex)handler).getIndexReader();
+ final Collection fields = reader.getFieldNames(IndexReader.FieldOption.ALL);
+ for (final Object field : fields)
+ {
+ fildsSet.add((String)field);
+ }
+ }
+ catch (IOException e)
+ {
+ throw new IndexException(e.getLocalizedMessage(), e);
+ }
+ finally
+ {
+ try
+ {
+ if (reader != null)
+ reader.close();
+ }
+ catch (IOException e)
+ {
+ throw new IndexException(e.getLocalizedMessage(), e);
+ }
+ }
- public Set<String> getNodesByNodeType(final InternalQName nodeType)
- throws RepositoryException {
+ }
+ return fildsSet;
+ }
- return getNodes(virtualTableResolver.resolve(nodeType, true));
- }
+ public Set<String> getNodesByNodeType(final InternalQName nodeType) throws
RepositoryException
+ {
- /**
- * Return set of uuid of nodes. Contains in names prefixes maped to the
- * given uri
- *
- * @param prefix
- * @return
- * @throws RepositoryException
- */
- public Set<String> getNodesByUri(final String uri)
- throws RepositoryException {
- Set<String> result;
- final int defaultClauseCount = BooleanQuery.getMaxClauseCount();
- try {
+ return getNodes(virtualTableResolver.resolve(nodeType, true));
+ }
- // final LocationFactory locationFactory = new
- // LocationFactory(this);
- final ValueFactoryImpl valueFactory = new ValueFactoryImpl(
- new LocationFactory(nsReg));
- BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
- BooleanQuery query = new BooleanQuery();
+ /**
+ * Return set of uuid of nodes. Contains in names prefixes maped to the
+ * given uri
+ *
+ * @param prefix
+ * @return
+ * @throws RepositoryException
+ */
+ public Set<String> getNodesByUri(final String uri) throws RepositoryException
+ {
+ Set<String> result;
+ final int defaultClauseCount = BooleanQuery.getMaxClauseCount();
+ try
+ {
- final String prefix = nsReg.getNamespacePrefixByURI(uri);
- query.add(new WildcardQuery(new Term(FieldNames.LABEL, prefix
- + ":*")), Occur.SHOULD);
- // name of the property
- query.add(new WildcardQuery(new Term(FieldNames.PROPERTIES_SET,
- prefix + ":*")), Occur.SHOULD);
-
-
+ // final LocationFactory locationFactory = new
+ // LocationFactory(this);
+ final ValueFactoryImpl valueFactory = new ValueFactoryImpl(new
LocationFactory(nsReg));
+ BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
+ BooleanQuery query = new BooleanQuery();
+ final String prefix = nsReg.getNamespacePrefixByURI(uri);
+ query.add(new WildcardQuery(new Term(FieldNames.LABEL, prefix +
":*")), Occur.SHOULD);
+ // name of the property
+ query.add(new WildcardQuery(new Term(FieldNames.PROPERTIES_SET, prefix +
":*")), Occur.SHOULD);
- result = getNodes(query);
+ result = getNodes(query);
- // value of the property
+ // value of the property
- try {
- final Set<String> props = getFieldNames();
+ try
+ {
+ final Set<String> props = getFieldNames();
- query = new BooleanQuery();
- for (final String fieldName : props) {
- if (!FieldNames.PROPERTIES_SET.equals(fieldName)) {
- query.add(new WildcardQuery(new Term(fieldName, "*"
- + prefix + ":*")), Occur.SHOULD);
- }
- }
- } catch (final IndexException e) {
- throw new RepositoryException(e.getLocalizedMessage(), e);
- }
+ query = new BooleanQuery();
+ for (final String fieldName : props)
+ {
+ if (!FieldNames.PROPERTIES_SET.equals(fieldName))
+ {
+ query.add(new WildcardQuery(new Term(fieldName, "*" + prefix
+ ":*")), Occur.SHOULD);
+ }
+ }
+ }
+ catch (final IndexException e)
+ {
+ throw new RepositoryException(e.getLocalizedMessage(), e);
+ }
- final Set<String> propSet = getNodes(query);
- // Manually check property values;
- for (final String uuid : propSet) {
- if (isPrefixMatch(valueFactory, uuid, prefix)) {
- result.add(uuid);
- }
- }
- } finally {
- BooleanQuery.setMaxClauseCount(defaultClauseCount);
- }
+ final Set<String> propSet = getNodes(query);
+ // Manually check property values;
+ for (final String uuid : propSet)
+ {
+ if (isPrefixMatch(valueFactory, uuid, prefix))
+ {
+ result.add(uuid);
+ }
+ }
+ }
+ finally
+ {
+ BooleanQuery.setMaxClauseCount(defaultClauseCount);
+ }
- return result;
- }
+ return result;
+ }
- private boolean isPrefixMatch(final InternalQName value, final String prefix)
- throws RepositoryException {
- return value.getNamespace().equals(
- nsReg.getNamespaceURIByPrefix(prefix));
- }
+ private boolean isPrefixMatch(final InternalQName value, final String prefix) throws
RepositoryException
+ {
+ return value.getNamespace().equals(nsReg.getNamespaceURIByPrefix(prefix));
+ }
- private boolean isPrefixMatch(final QPath value, final String prefix)
- throws RepositoryException {
- for (int i = 0; i < value.getEntries().length; i++) {
- if (isPrefixMatch(value.getEntries()[i], prefix)) {
- return true;
- }
- }
- return false;
- }
+ private boolean isPrefixMatch(final QPath value, final String prefix) throws
RepositoryException
+ {
+ for (int i = 0; i < value.getEntries().length; i++)
+ {
+ if (isPrefixMatch(value.getEntries()[i], prefix))
+ {
+ return true;
+ }
+ }
+ return false;
+ }
- /**
- * @param valueFactory
- * @param dm
- * @param uuid
- * @param prefix
- * @throws RepositoryException
- */
- private boolean isPrefixMatch(final ValueFactoryImpl valueFactory,
- final String uuid, final String prefix) throws RepositoryException {
+ /**
+ * @param valueFactory
+ * @param dm
+ * @param uuid
+ * @param prefix
+ * @throws RepositoryException
+ */
+ private boolean isPrefixMatch(final ValueFactoryImpl valueFactory, final String uuid,
final String prefix)
+ throws RepositoryException
+ {
- final ItemData node = itemMgr.getItemData(uuid);
- if (node != null && node.isNode()) {
- final List<PropertyData> props = itemMgr
- .getChildPropertiesData((NodeData) node);
- for (final PropertyData propertyData : props) {
- if (propertyData.getType() == PropertyType.PATH
- || propertyData.getType() == PropertyType.NAME) {
- for (final ValueData vdata : propertyData.getValues()) {
- final Value val = valueFactory.loadValue(
- ((AbstractValueData) vdata)
- .createTransientCopy(), propertyData
- .getType());
- if (propertyData.getType() == PropertyType.PATH) {
- if (isPrefixMatch(((PathValue) val).getQPath(),
- prefix)) {
- return true;
- }
- } else if (propertyData.getType() == PropertyType.NAME) {
- if (isPrefixMatch(((NameValue) val).getQName(),
- prefix)) {
- return true;
- }
- }
- }
- }
- }
- }
- return false;
- }
+ final ItemData node = itemMgr.getItemData(uuid);
+ if (node != null && node.isNode())
+ {
+ final List<PropertyData> props =
itemMgr.getChildPropertiesData((NodeData)node);
+ for (final PropertyData propertyData : props)
+ {
+ if (propertyData.getType() == PropertyType.PATH || propertyData.getType() ==
PropertyType.NAME)
+ {
+ for (final ValueData vdata : propertyData.getValues())
+ {
+ final Value val =
+
valueFactory.loadValue(((AbstractValueData)vdata).createTransientCopy(),
propertyData.getType());
+ if (propertyData.getType() == PropertyType.PATH)
+ {
+ if (isPrefixMatch(((PathValue)val).getQPath(), prefix))
+ {
+ return true;
+ }
+ }
+ else if (propertyData.getType() == PropertyType.NAME)
+ {
+ if (isPrefixMatch(((NameValue)val).getQName(), prefix))
+ {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ }
+ return false;
+ }
- /**
- * @param query
- * @return
- * @throws RepositoryException
- */
- private Set<String> getNodes(final org.apache.lucene.search.Query query)
- throws RepositoryException {
- Set<String> result = new HashSet<String>();
- try {
- QueryHits hits = handler.executeQuery(query);
+ /**
+ * @param query
+ * @return
+ * @throws RepositoryException
+ */
+ private Set<String> getNodes(final org.apache.lucene.search.Query query) throws
RepositoryException
+ {
+ Set<String> result = new HashSet<String>();
+ try
+ {
+ QueryHits hits = handler.executeQuery(query);
- ScoreNode sn;
+ ScoreNode sn;
- while ((sn = hits.nextScoreNode()) != null) {
- // Node node = session.getNodeById(sn.getNodeId());
- result.add(sn.getNodeId());
- }
- } catch (IOException e) {
- throw new RepositoryException(e.getLocalizedMessage(), e);
- }
- return result;
- }
+ while ((sn = hits.nextScoreNode()) != null)
+ {
+ // Node node = session.getNodeById(sn.getNodeId());
+ result.add(sn.getNodeId());
+ }
+ }
+ catch (IOException e)
+ {
+ throw new RepositoryException(e.getLocalizedMessage(), e);
+ }
+ return result;
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -42,91 +42,96 @@
* @version $Id: SystemSearchManager.java 13891 2008-05-05 16:02:30Z pnedonosko
* $
*/
-public class SystemSearchManager extends SearchManager {
+public class SystemSearchManager extends SearchManager
+{
- /**
- * Class logger.
- */
- private final Log log = ExoLogger.getLogger("jcr.SystemSearchManager");
+ /**
+ * Class logger.
+ */
+ private final Log log = ExoLogger.getLogger("jcr.SystemSearchManager");
- /**
- * Is started flag.
- */
- private boolean isStarted = false;
+ /**
+ * Is started flag.
+ */
+ private boolean isStarted = false;
- /**
- * ChangesLog Buffer (used for saves before start).
- */
- private List<ItemStateChangesLog> changesLogBuffer = new
ArrayList<ItemStateChangesLog>();
+ /**
+ * ChangesLog Buffer (used for saves before start).
+ */
+ private List<ItemStateChangesLog> changesLogBuffer = new
ArrayList<ItemStateChangesLog>();
- public static final String INDEX_DIR_SUFFIX = "system";
+ public static final String INDEX_DIR_SUFFIX = "system";
- public SystemSearchManager(QueryHandlerEntry config,
- NamespaceRegistryImpl nsReg, NodeTypeDataManager ntReg,
- WorkspacePersistentDataManager itemMgr,
- DocumentReaderService service, ConfigurationManager cfm,
- RepositoryIndexSearcherHolder indexSearcherHolder)
- throws RepositoryException, RepositoryConfigurationException {
- super(config, nsReg, ntReg, itemMgr, null, service, cfm,
- indexSearcherHolder);
- }
+ public SystemSearchManager(QueryHandlerEntry config, NamespaceRegistryImpl nsReg,
NodeTypeDataManager ntReg,
+ WorkspacePersistentDataManager itemMgr, DocumentReaderService service,
ConfigurationManager cfm,
+ RepositoryIndexSearcherHolder indexSearcherHolder) throws RepositoryException,
RepositoryConfigurationException
+ {
+ super(config, nsReg, ntReg, itemMgr, null, service, cfm, indexSearcherHolder);
+ }
- @Override
- public void onSaveItems(ItemStateChangesLog changesLog) {
- if (!isStarted) {
- changesLogBuffer.add(changesLog);
- } else {
- super.onSaveItems(changesLog);
- }
- }
+ @Override
+ public void onSaveItems(ItemStateChangesLog changesLog)
+ {
+ if (!isStarted)
+ {
+ changesLogBuffer.add(changesLog);
+ }
+ else
+ {
+ super.onSaveItems(changesLog);
+ }
+ }
- @Override
- public void start() {
+ @Override
+ public void start()
+ {
- isStarted = true;
- try {
- if (indexingTree == null) {
- List<QPath> excludedPaths = new ArrayList<QPath>();
+ isStarted = true;
+ try
+ {
+ if (indexingTree == null)
+ {
+ List<QPath> excludedPaths = new ArrayList<QPath>();
- NodeData indexingRootNodeData = (NodeData) itemMgr
- .getItemData(Constants.SYSTEM_UUID);
+ NodeData indexingRootNodeData =
(NodeData)itemMgr.getItemData(Constants.SYSTEM_UUID);
- indexingTree = new IndexingTree(indexingRootNodeData,
- excludedPaths);
- }
- initializeQueryHandler();
+ indexingTree = new IndexingTree(indexingRootNodeData, excludedPaths);
+ }
+ initializeQueryHandler();
- }
+ }
- catch (RepositoryException e) {
- log.error(e.getLocalizedMessage());
- handler = null;
- changesLogBuffer.clear();
- changesLogBuffer = null;
- throw new RuntimeException(e);
- } catch (RepositoryConfigurationException e) {
- log.error(e.getLocalizedMessage());
- handler = null;
- changesLogBuffer.clear();
- changesLogBuffer = null;
- throw new RuntimeException(e);
- }
- for (ItemStateChangesLog bufferedChangesLog : changesLogBuffer) {
- super.onSaveItems(bufferedChangesLog);
- }
- changesLogBuffer.clear();
- changesLogBuffer = null;
- }
+ catch (RepositoryException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ changesLogBuffer.clear();
+ changesLogBuffer = null;
+ throw new RuntimeException(e);
+ }
+ catch (RepositoryConfigurationException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ changesLogBuffer.clear();
+ changesLogBuffer = null;
+ throw new RuntimeException(e);
+ }
+ for (ItemStateChangesLog bufferedChangesLog : changesLogBuffer)
+ {
+ super.onSaveItems(bufferedChangesLog);
+ }
+ changesLogBuffer.clear();
+ changesLogBuffer = null;
+ }
- @Override
- protected QueryHandlerContext createQueryHandlerContext(
- QueryHandler parentHandler) throws RepositoryConfigurationException {
- QueryHandlerContext context = new QueryHandlerContext(itemMgr,
- indexingTree, nodeTypeDataManager, nsReg, parentHandler, config
- .getIndexDir()
- + "_" + INDEX_DIR_SUFFIX, extractor, changesLogBuffer
- .size() > 0
- && !isStarted, virtualTableResolver);
- return context;
- }
+ @Override
+ protected QueryHandlerContext createQueryHandlerContext(QueryHandler parentHandler)
+ throws RepositoryConfigurationException
+ {
+ QueryHandlerContext context =
+ new QueryHandlerContext(itemMgr, indexingTree, nodeTypeDataManager, nsReg,
parentHandler, getIndexDir() + "_"
+ + INDEX_DIR_SUFFIX, extractor, changesLogBuffer.size() > 0 &&
!isStarted, virtualTableResolver);
+ return context;
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -16,16 +16,6 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-import java.io.Reader;
-import java.io.StringReader;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
@@ -37,272 +27,328 @@
import org.apache.lucene.index.TermPositionVector;
import org.apache.lucene.index.TermVectorOffsetInfo;
import org.apache.lucene.search.Query;
-
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
/**
* <code>AbstractExcerpt</code> implements base functionality for an excerpt
* provider.
*/
-public abstract class AbstractExcerpt implements HighlightingExcerptProvider {
+public abstract class AbstractExcerpt implements HighlightingExcerptProvider
+{
- /**
- * Logger instance for this class.
- */
- private static final Logger log = LoggerFactory.getLogger(AbstractExcerpt.class);
+ /**
+ * Logger instance for this class.
+ */
+ private static final Logger log = LoggerFactory.getLogger(AbstractExcerpt.class);
- /**
- * The search index.
- */
- protected SearchIndex index;
+ /**
+ * The search index.
+ */
+ protected SearchIndex index;
- /**
- * The current query.
- */
- protected Query query;
+ /**
+ * The current query.
+ */
+ protected Query query;
- /**
- * Indicates whether the query is already rewritten.
- */
- private boolean rewritten = false;
+ /**
+ * Indicates whether the query is already rewritten.
+ */
+ private boolean rewritten = false;
- /**
- * {@inheritDoc}
- */
- public void init(Query query, SearchIndex index) throws IOException {
- this.index = index;
- this.query = query;
- }
+ /**
+ * {@inheritDoc}
+ */
+ public void init(Query query, SearchIndex index) throws IOException
+ {
+ this.index = index;
+ this.query = query;
+ }
- /**
- * {@inheritDoc}
- */
- public String getExcerpt(String id, int maxFragments, int maxFragmentSize)
- throws IOException {
- IndexReader reader = index.getIndexReader();
- try {
- checkRewritten(reader);
- Term idTerm = new Term(FieldNames.UUID, id);
- TermDocs tDocs = reader.termDocs(idTerm);
- int docNumber;
- Document doc;
- try {
- if (tDocs.next()) {
- docNumber = tDocs.doc();
- doc = reader.document(docNumber);
- } else {
- // node not found in index
- return null;
- }
- } finally {
- tDocs.close();
+ /**
+ * {@inheritDoc}
+ */
+ public String getExcerpt(String id, int maxFragments, int maxFragmentSize) throws
IOException
+ {
+ IndexReader reader = index.getIndexReader();
+ try
+ {
+ checkRewritten(reader);
+ Term idTerm = new Term(FieldNames.UUID, id);
+ TermDocs tDocs = reader.termDocs(idTerm);
+ int docNumber;
+ Document doc;
+ try
+ {
+ if (tDocs.next())
+ {
+ docNumber = tDocs.doc();
+ doc = reader.document(docNumber);
}
- Fieldable[] fields = doc.getFieldables(FieldNames.FULLTEXT);
- if (fields == null) {
- log.debug("Fulltext field not stored, using {}",
- SimpleExcerptProvider.class.getName());
- SimpleExcerptProvider exProvider = new SimpleExcerptProvider();
- exProvider.init(query, index);
- return exProvider.getExcerpt(id, maxFragments, maxFragmentSize);
+ else
+ {
+ // node not found in index
+ return null;
}
- StringBuffer text = new StringBuffer();
- String separator = "";
- for (int i = 0; i < fields.length; i++) {
- if (fields[i].stringValue().length() == 0) {
- continue;
- }
- text.append(separator);
- text.append(fields[i].stringValue());
- // this is a hack! in general multiple fields with the same
- // name are handled properly, that is, offset and position is
- // calculated correctly. there is one case however where
- // the offset gets wrong:
- // if a term text ends with characters that are considered noise
- // then the offset of the next field will be off by the number
- // of noise characters.
- // therefore we delete noise characters at the end of the text.
- // this process is required for all but the last field
- if (i < fields.length - 1) {
- for (int j = text.length() - 1; j >= 0; j--) {
- if (Character.isLetterOrDigit(text.charAt(j))) {
- break;
- } else {
- text.deleteCharAt(j);
- }
- }
- }
- separator = " ";
+ }
+ finally
+ {
+ tDocs.close();
+ }
+ Fieldable[] fields = doc.getFieldables(FieldNames.FULLTEXT);
+
+ if (fields == null)
+ {
+ log.debug("Fulltext field not stored, using {}",
SimpleExcerptProvider.class.getName());
+ SimpleExcerptProvider exProvider = new SimpleExcerptProvider();
+ exProvider.init(query, index);
+ return exProvider.getExcerpt(id, maxFragments, maxFragmentSize);
+ }
+ StringBuffer text = new StringBuffer();
+ String separator = "";
+ for (int i = 0; i < fields.length; i++)
+ {
+ if (fields[i].stringValue().length() == 0)
+ {
+ continue;
}
- TermFreqVector tfv = reader.getTermFreqVector(
- docNumber, FieldNames.FULLTEXT);
- if (tfv instanceof TermPositionVector) {
- return createExcerpt((TermPositionVector) tfv, text.toString(),
- maxFragments, maxFragmentSize);
- } else {
- log.debug("No TermPositionVector on Fulltext field.");
- return null;
+ text.append(separator);
+ text.append(fields[i].stringValue());
+ // this is a hack! in general multiple fields with the same
+ // name are handled properly, that is, offset and position is
+ // calculated correctly. there is one case however where
+ // the offset gets wrong:
+ // if a term text ends with characters that are considered noise
+ // then the offset of the next field will be off by the number
+ // of noise characters.
+ // therefore we delete noise characters at the end of the text.
+ // this process is required for all but the last field
+ if (i < fields.length - 1)
+ {
+ for (int j = text.length() - 1; j >= 0; j--)
+ {
+ if (Character.isLetterOrDigit(text.charAt(j)))
+ {
+ break;
+ }
+ else
+ {
+ text.deleteCharAt(j);
+ }
+ }
}
- } finally {
- Util.closeOrRelease(reader);
- }
- }
+ separator = " ";
+ }
+ TermFreqVector tfv = reader.getTermFreqVector(docNumber, FieldNames.FULLTEXT);
+ if (tfv instanceof TermPositionVector)
+ {
+ return createExcerpt((TermPositionVector)tfv, text.toString(), maxFragments,
maxFragmentSize);
+ }
+ else
+ {
+ log.debug("No TermPositionVector on Fulltext field.");
+ return null;
+ }
+ }
+ finally
+ {
+ Util.closeOrRelease(reader);
+ }
+ }
- /**
- * {@inheritDoc}
- */
- public String highlight(String text) throws IOException {
- checkRewritten(null);
- return createExcerpt(createTermPositionVector(text),
- text, 1, (text.length() + 1) * 2);
- }
+ /**
+ * {@inheritDoc}
+ */
+ public String highlight(String text) throws IOException
+ {
+ checkRewritten(null);
+ return createExcerpt(createTermPositionVector(text), text, 1, (text.length() + 1) *
2);
+ }
- /**
- * Creates an excerpt for the given <code>text</code> using token offset
- * information provided by <code>tpv</code>.
- *
- * @param tpv the term position vector for the fulltext field.
- * @param text the original text.
- * @param maxFragments the maximum number of fragments to create.
- * @param maxFragmentSize the maximum number of characters in a fragment.
- * @return the xml excerpt.
- * @throws IOException if an error occurs while creating the excerpt.
- */
- protected abstract String createExcerpt(TermPositionVector tpv,
- String text,
- int maxFragments,
- int maxFragmentSize)
- throws IOException;
+ /**
+ * Creates an excerpt for the given <code>text</code> using token offset
+ * information provided by <code>tpv</code>.
+ *
+ * @param tpv the term position vector for the fulltext field.
+ * @param text the original text.
+ * @param maxFragments the maximum number of fragments to create.
+ * @param maxFragmentSize the maximum number of characters in a fragment.
+ * @return the xml excerpt.
+ * @throws IOException if an error occurs while creating the excerpt.
+ */
+ protected abstract String createExcerpt(TermPositionVector tpv, String text, int
maxFragments, int maxFragmentSize)
+ throws IOException;
- /**
- * @return the extracted terms from the query.
- */
- protected final Set getQueryTerms() {
- Set extractedTerms = new HashSet();
- Set relevantTerms = new HashSet();
- query.extractTerms(extractedTerms);
- // only keep terms for fulltext fields
- Iterator it = extractedTerms.iterator();
- while (it.hasNext()) {
- Term t = (Term) it.next();
- if (t.field().equals(FieldNames.FULLTEXT)) {
- relevantTerms.add(t);
- } else {
- int idx = t.field().indexOf(FieldNames.FULLTEXT_PREFIX);
- if (idx != -1) {
- relevantTerms.add(new Term(FieldNames.FULLTEXT, t.text()));
- }
+ /**
+ * @return the extracted terms from the query.
+ */
+ protected final Set getQueryTerms()
+ {
+ Set extractedTerms = new HashSet();
+ Set relevantTerms = new HashSet();
+ query.extractTerms(extractedTerms);
+ // only keep terms for fulltext fields
+ Iterator it = extractedTerms.iterator();
+ while (it.hasNext())
+ {
+ Term t = (Term)it.next();
+ if (t.field().equals(FieldNames.FULLTEXT))
+ {
+ relevantTerms.add(t);
+ }
+ else
+ {
+ int idx = t.field().indexOf(FieldNames.FULLTEXT_PREFIX);
+ if (idx != -1)
+ {
+ relevantTerms.add(new Term(FieldNames.FULLTEXT, t.text()));
}
- }
- return relevantTerms;
- }
+ }
+ }
+ return relevantTerms;
+ }
- /**
- * Makes sure the {@link #query} is rewritten. If the query is already
- * rewritten, this method returns immediately.
- *
- * @param reader an optional index reader, if none is passed this method
- * will retrieve one from the {@link #index} and close it
- * again after the rewrite operation.
- * @throws IOException if an error occurs while the query is rewritten.
- */
- private void checkRewritten(IndexReader reader) throws IOException {
- if (!rewritten) {
- IndexReader r = reader;
- if (r == null) {
- r = index.getIndexReader();
+ /**
+ * Makes sure the {@link #query} is rewritten. If the query is already
+ * rewritten, this method returns immediately.
+ *
+ * @param reader an optional index reader, if none is passed this method
+ * will retrieve one from the {@link #index} and close it
+ * again after the rewrite operation.
+ * @throws IOException if an error occurs while the query is rewritten.
+ */
+ private void checkRewritten(IndexReader reader) throws IOException
+ {
+ if (!rewritten)
+ {
+ IndexReader r = reader;
+ if (r == null)
+ {
+ r = index.getIndexReader();
+ }
+ try
+ {
+ query = query.rewrite(r);
+ }
+ finally
+ {
+ // only close reader if this method opened one
+ if (reader == null)
+ {
+ Util.closeOrRelease(r);
}
- try {
- query = query.rewrite(r);
- } finally {
- // only close reader if this method opened one
- if (reader == null) {
- Util.closeOrRelease(r);
- }
- }
- rewritten = true;
- }
- }
+ }
+ rewritten = true;
+ }
+ }
- /**
- * @param text the text.
- * @return a <code>TermPositionVector</code> for the given text.
- */
- private TermPositionVector createTermPositionVector(String text) {
- // term -> TermVectorOffsetInfo[]
- final SortedMap termMap = new TreeMap();
- Reader r = new StringReader(text);
- TokenStream ts = index.getTextAnalyzer().tokenStream("", r);
- Token t = new Token();
- try {
- while ((t = ts.next(t)) != null) {
- String termText = t.term();
- TermVectorOffsetInfo[] info =
- (TermVectorOffsetInfo[]) termMap.get(termText);
- if (info == null) {
- info = new TermVectorOffsetInfo[1];
- } else {
- TermVectorOffsetInfo[] tmp = info;
- info = new TermVectorOffsetInfo[tmp.length + 1];
- System.arraycopy(tmp, 0, info, 0, tmp.length);
- }
- info[info.length - 1] = new TermVectorOffsetInfo(
- t.startOffset(), t.endOffset());
- termMap.put(termText, info);
+ /**
+ * @param text the text.
+ * @return a <code>TermPositionVector</code> for the given text.
+ */
+ private TermPositionVector createTermPositionVector(String text)
+ {
+ // term -> TermVectorOffsetInfo[]
+ final SortedMap termMap = new TreeMap();
+ Reader r = new StringReader(text);
+ TokenStream ts = index.getTextAnalyzer().tokenStream("", r);
+ Token t = new Token();
+ try
+ {
+ while ((t = ts.next(t)) != null)
+ {
+ String termText = t.term();
+ TermVectorOffsetInfo[] info = (TermVectorOffsetInfo[])termMap.get(termText);
+ if (info == null)
+ {
+ info = new TermVectorOffsetInfo[1];
}
- } catch (IOException e) {
- // should never happen, we are reading from a string
- }
+ else
+ {
+ TermVectorOffsetInfo[] tmp = info;
+ info = new TermVectorOffsetInfo[tmp.length + 1];
+ System.arraycopy(tmp, 0, info, 0, tmp.length);
+ }
+ info[info.length - 1] = new TermVectorOffsetInfo(t.startOffset(),
t.endOffset());
+ termMap.put(termText, info);
+ }
+ }
+ catch (IOException e)
+ {
+ // should never happen, we are reading from a string
+ }
- return new TermPositionVector() {
+ return new TermPositionVector()
+ {
- private String[] terms =
- (String[]) termMap.keySet().toArray(new String[termMap.size()]);
+ private String[] terms = (String[])termMap.keySet().toArray(new
String[termMap.size()]);
- public int[] getTermPositions(int index) {
- return null;
- }
+ public int[] getTermPositions(int index)
+ {
+ return null;
+ }
- public TermVectorOffsetInfo[] getOffsets(int index) {
- TermVectorOffsetInfo[] info = TermVectorOffsetInfo.EMPTY_OFFSET_INFO;
- if (index >= 0 && index < terms.length) {
- info = (TermVectorOffsetInfo[]) termMap.get(terms[index]);
- }
- return info;
+ public TermVectorOffsetInfo[] getOffsets(int index)
+ {
+ TermVectorOffsetInfo[] info = TermVectorOffsetInfo.EMPTY_OFFSET_INFO;
+ if (index >= 0 && index < terms.length)
+ {
+ info = (TermVectorOffsetInfo[])termMap.get(terms[index]);
}
+ return info;
+ }
- public String getField() {
- return "";
- }
+ public String getField()
+ {
+ return "";
+ }
- public int size() {
- return terms.length;
- }
+ public int size()
+ {
+ return terms.length;
+ }
- public String[] getTerms() {
- return terms;
- }
+ public String[] getTerms()
+ {
+ return terms;
+ }
- public int[] getTermFrequencies() {
- int[] freqs = new int[terms.length];
- for (int i = 0; i < terms.length; i++) {
- freqs[i] = ((TermVectorOffsetInfo[]) termMap.get(terms[i])).length;
- }
- return freqs;
+ public int[] getTermFrequencies()
+ {
+ int[] freqs = new int[terms.length];
+ for (int i = 0; i < terms.length; i++)
+ {
+ freqs[i] = ((TermVectorOffsetInfo[])termMap.get(terms[i])).length;
}
+ return freqs;
+ }
- public int indexOf(String term) {
- int res = Arrays.binarySearch(terms, term);
- return res >= 0 ? res : -1;
- }
+ public int indexOf(String term)
+ {
+ int res = Arrays.binarySearch(terms, term);
+ return res >= 0 ? res : -1;
+ }
- public int[] indexesOf(String[] terms, int start, int len) {
- int[] res = new int[len];
- for (int i = 0; i < len; i++) {
- res[i] = indexOf(terms[i]);
- }
- return res;
+ public int[] indexesOf(String[] terms, int start, int len)
+ {
+ int[] res = new int[len];
+ for (int i = 0; i < len; i++)
+ {
+ res[i] = indexOf(terms[i]);
}
- };
- }
+ return res;
+ }
+ };
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ConsistencyCheck.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ConsistencyCheck.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ConsistencyCheck.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -16,6 +16,12 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
+import org.apache.lucene.document.Document;
+import org.exoplatform.services.jcr.dataflow.ItemDataConsumer;
+import org.exoplatform.services.jcr.datamodel.NodeData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
@@ -25,13 +31,6 @@
import javax.jcr.RepositoryException;
-import org.apache.lucene.document.Document;
-import org.exoplatform.services.jcr.dataflow.ItemDataConsumer;
-import org.exoplatform.services.jcr.datamodel.NodeData;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
/**
* Implements a consistency check on the search index. Currently the following
* checks are implemented:
@@ -44,328 +43,381 @@
* in the index for such a node are removed, and the node is re-indexed.</li>
* </ul>
*/
-class ConsistencyCheck {
+class ConsistencyCheck
+{
- /**
- * Logger instance for this class
- */
- private static final Logger log = LoggerFactory.getLogger(ConsistencyCheck.class);
+ /**
+ * Logger instance for this class
+ */
+ private static final Logger log = LoggerFactory.getLogger(ConsistencyCheck.class);
- /**
- * The ItemStateManager of the workspace.
- */
- private final ItemDataConsumer stateMgr;
+ /**
+ * The ItemStateManager of the workspace.
+ */
+ private final ItemDataConsumer stateMgr;
- /**
- * The index to check.
- */
- private final MultiIndex index;
+ /**
+ * The index to check.
+ */
+ private final MultiIndex index;
- /**
- * All the document UUIDs within the index.
- */
- private Set documentUUIDs;
+ /**
+ * All the document UUIDs within the index.
+ */
+ private Set documentUUIDs;
- /**
- * List of all errors.
- */
- private final List errors = new ArrayList();
+ /**
+ * List of all errors.
+ */
+ private final List<ConsistencyCheckError> errors = new
ArrayList<ConsistencyCheckError>();
- /**
- * Private constructor.
- */
- private ConsistencyCheck(MultiIndex index, ItemDataConsumer mgr) {
- this.index = index;
- this.stateMgr = mgr;
- }
+ /**
+ * Private constructor.
+ */
+ private ConsistencyCheck(MultiIndex index, ItemDataConsumer mgr)
+ {
+ this.index = index;
+ this.stateMgr = mgr;
+ }
- /**
- * Runs the consistency check on <code>index</code>.
- *
- * @param index the index to check.
- * @param mgr the ItemStateManager from where to load content.
- * @return the consistency check with the results.
- * @throws IOException if an error occurs while checking.
- * @throws RepositoryException
- */
- static ConsistencyCheck run(MultiIndex index, ItemDataConsumer mgr) throws
IOException, RepositoryException {
- ConsistencyCheck check = new ConsistencyCheck(index, mgr);
- check.run();
- return check;
- }
+ /**
+ * Runs the consistency check on <code>index</code>.
+ *
+ * @param index the index to check.
+ * @param mgr the ItemStateManager from where to load content.
+ * @return the consistency check with the results.
+ * @throws IOException if an error occurs while checking.
+ * @throws RepositoryException
+ */
+ static ConsistencyCheck run(MultiIndex index, ItemDataConsumer mgr) throws
IOException, RepositoryException
+ {
+ ConsistencyCheck check = new ConsistencyCheck(index, mgr);
+ check.run();
+ return check;
+ }
- /**
- * Repairs detected errors during the consistency check.
- * @param ignoreFailure if <code>true</code> repair failures are
ignored,
- * the repair continues without throwing an exception. If
- * <code>false</code> the repair procedure is aborted on the first
- * repair failure.
- * @throws IOException if a repair failure occurs.
- */
- void repair(boolean ignoreFailure) throws IOException {
- if (errors.size() == 0) {
- log.info("No errors found.");
- return;
- }
- int notRepairable = 0;
- for (Iterator it = errors.iterator(); it.hasNext();) {
- ConsistencyCheckError error = (ConsistencyCheckError) it.next();
- try {
- if (error.repairable()) {
- error.repair();
- } else {
- log.warn("Not repairable: " + error);
- notRepairable++;
- }
- } catch (Exception e) {
- if (ignoreFailure) {
- log.warn("Exception while reparing: " + e);
- } else {
- if (!(e instanceof IOException)) {
- e = new IOException(e.getMessage());
- }
- throw (IOException) e;
- }
+ /**
+ * Repairs detected errors during the consistency check.
+ * @param ignoreFailure if <code>true</code> repair failures are ignored,
+ * the repair continues without throwing an exception. If
+ * <code>false</code> the repair procedure is aborted on the first
+ * repair failure.
+ * @throws IOException if a repair failure occurs.
+ */
+ void repair(boolean ignoreFailure) throws IOException
+ {
+ if (errors.size() == 0)
+ {
+ log.info("No errors found.");
+ return;
+ }
+ int notRepairable = 0;
+ for (Iterator<ConsistencyCheckError> it = errors.iterator(); it.hasNext();)
+ {
+ ConsistencyCheckError error = it.next();
+ try
+ {
+ if (error.repairable())
+ {
+ error.repair();
}
- }
- log.info("Repaired " + (errors.size() - notRepairable) + "
errors.");
- if (notRepairable > 0) {
- log.warn("" + notRepairable + " error(s) not
repairable.");
- }
- }
+ else
+ {
+ log.warn("Not repairable: " + error);
+ notRepairable++;
+ }
+ }
+ catch (Exception e)
+ {
+ if (ignoreFailure)
+ {
+ log.warn("Exception while reparing: " + e);
+ }
+ else
+ {
+ if (!(e instanceof IOException))
+ {
+ e = new IOException(e.getMessage());
+ }
+ throw (IOException)e;
+ }
+ }
+ }
+ log.info("Repaired " + (errors.size() - notRepairable) + "
errors.");
+ if (notRepairable > 0)
+ {
+ log.warn("" + notRepairable + " error(s) not repairable.");
+ }
+ }
- /**
- * Returns the errors detected by the consistency check.
- * @return the errors detected by the consistency check.
- */
- List getErrors() {
- return new ArrayList(errors);
- }
+ /**
+ * Returns the errors detected by the consistency check.
+ * @return the errors detected by the consistency check.
+ */
+ List<ConsistencyCheckError> getErrors()
+ {
+ return new ArrayList<ConsistencyCheckError>(errors);
+ }
- /**
- * Runs the consistency check.
- * @throws IOException if an error occurs while running the check.
- * @throws RepositoryException
- */
- private void run() throws IOException, RepositoryException {
- // UUIDs of multiple nodes in the index
- Set multipleEntries = new HashSet();
- // collect all documents UUIDs
- documentUUIDs = new HashSet();
- CachingMultiIndexReader reader = index.getIndexReader();
- try {
- for (int i = 0; i < reader.maxDoc(); i++) {
- if (i > 10 && i % (reader.maxDoc() / 5) == 0) {
- long progress = Math.round((100.0 * (float) i) / ((float)
reader.maxDoc() * 2f));
- log.info("progress: " + progress + "%");
- }
- if (reader.isDeleted(i)) {
- continue;
- }
- Document d = reader.document(i, FieldSelectors.UUID);
- String uuid = d.get(FieldNames.UUID);
- if (stateMgr.getItemData(uuid) != null){
- if (!documentUUIDs.add(uuid)) {
- multipleEntries.add(uuid);
- }
- } else {
- errors.add(new NodeDeleted(uuid));
- }
+ /**
+ * Runs the consistency check.
+ * @throws IOException if an error occurs while running the check.
+ * @throws RepositoryException
+ */
+ private void run() throws IOException, RepositoryException
+ {
+ // UUIDs of multiple nodes in the index
+ Set multipleEntries = new HashSet();
+ // collect all documents UUIDs
+ documentUUIDs = new HashSet();
+ CachingMultiIndexReader reader = index.getIndexReader();
+ try
+ {
+ for (int i = 0; i < reader.maxDoc(); i++)
+ {
+ if (i > 10 && i % (reader.maxDoc() / 5) == 0)
+ {
+ long progress = Math.round((100.0 * (float)i) / ((float)reader.maxDoc() *
2f));
+ log.info("progress: " + progress + "%");
}
- } finally {
- reader.release();
- }
+ if (reader.isDeleted(i))
+ {
+ continue;
+ }
+ Document d = reader.document(i, FieldSelectors.UUID);
+ String uuid = d.get(FieldNames.UUID);
+ if (stateMgr.getItemData(uuid) != null)
+ {
+ if (!documentUUIDs.add(uuid))
+ {
+ multipleEntries.add(uuid);
+ }
+ }
+ else
+ {
+ errors.add(new NodeDeleted(uuid));
+ }
+ }
+ }
+ finally
+ {
+ reader.release();
+ }
- // create multiple entries errors
- for (Iterator it = multipleEntries.iterator(); it.hasNext();) {
- errors.add(new MultipleEntries((String)it.next()));
- }
+ // create multiple entries errors
+ for (Iterator it = multipleEntries.iterator(); it.hasNext();)
+ {
+ errors.add(new MultipleEntries((String)it.next()));
+ }
- reader = index.getIndexReader();
- try {
- // run through documents again and check parent
- for (int i = 0; i < reader.maxDoc(); i++) {
- if (i > 10 && i % (reader.maxDoc() / 5) == 0) {
- long progress = Math.round((100.0 * (float) i) / ((float)
reader.maxDoc() * 2f));
- log.info("progress: " + (progress + 50) + "%");
- }
- if (reader.isDeleted(i)) {
- continue;
- }
- Document d = reader.document(i, FieldSelectors.UUID_AND_PARENT);
- String uuid = d.get(FieldNames.UUID);
- String parentUUIDString = d.get(FieldNames.PARENT);
+ reader = index.getIndexReader();
+ try
+ {
+ // run through documents again and check parent
+ for (int i = 0; i < reader.maxDoc(); i++)
+ {
+ if (i > 10 && i % (reader.maxDoc() / 5) == 0)
+ {
+ long progress = Math.round((100.0 * (float)i) / ((float)reader.maxDoc() *
2f));
+ log.info("progress: " + (progress + 50) + "%");
+ }
+ if (reader.isDeleted(i))
+ {
+ continue;
+ }
+ Document d = reader.document(i, FieldSelectors.UUID_AND_PARENT);
+ String uuid = d.get(FieldNames.UUID);
+ String parentUUIDString = d.get(FieldNames.PARENT);
- if (parentUUIDString == null || documentUUIDs.contains(parentUUIDString))
{
- continue;
- }
-
-
- // parent is missing
- //NodeId parentId = new NodeId(parentUUID);
- if (stateMgr.getItemData(parentUUIDString) != null)
- {
- errors.add(new MissingAncestor(uuid, parentUUIDString));
- }
- else
- {
- errors.add(new UnknownParent(uuid, parentUUIDString));
- }
+ if (parentUUIDString == null || documentUUIDs.contains(parentUUIDString))
+ {
+ continue;
}
- } finally {
- reader.release();
- }
- }
- /**
- * Returns the path for <code>node</code>. If an error occurs this
method
- * returns the uuid of the node.
- *
- * @param node the node to retrieve the path from
- * @return the path of the node or its uuid.
- */
- private String getPath(NodeData node) {
- // remember as fallback
- return node.getQPath().getAsString();
- }
+ // parent is missing
+ //NodeId parentId = new NodeId(parentUUID);
+ if (stateMgr.getItemData(parentUUIDString) != null)
+ {
+ errors.add(new MissingAncestor(uuid, parentUUIDString));
+ }
+ else
+ {
+ errors.add(new UnknownParent(uuid, parentUUIDString));
+ }
+ }
+ }
+ finally
+ {
+ reader.release();
+ }
+ }
- //-------------------< ConsistencyCheckError classes >----------------------
+ /**
+ * Returns the path for <code>node</code>. If an error occurs this method
+ * returns the uuid of the node.
+ *
+ * @param node the node to retrieve the path from
+ * @return the path of the node or its uuid.
+ */
+ private String getPath(NodeData node)
+ {
+ // remember as fallback
+ return node.getQPath().getAsString();
+ }
- /**
- * One or more ancestors of an indexed node are not available in the index.
- */
- private class MissingAncestor extends ConsistencyCheckError {
+ //-------------------< ConsistencyCheckError classes >----------------------
- private final String parentUUID;
+ /**
+ * One or more ancestors of an indexed node are not available in the index.
+ */
+ private class MissingAncestor extends ConsistencyCheckError
+ {
- private MissingAncestor(String uuid, String parentUUID) {
- super("Parent of " + uuid + " missing in index. Parent: "
+ parentUUID, uuid);
- this.parentUUID = parentUUID;
- }
+ private final String parentUUID;
- /**
- * Returns <code>true</code>.
- * @return <code>true</code>.
- */
- public boolean repairable() {
- return true;
- }
+ private MissingAncestor(String uuid, String parentUUID)
+ {
+ super("Parent of " + uuid + " missing in index. Parent: " +
parentUUID, uuid);
+ this.parentUUID = parentUUID;
+ }
- /**
- * Repairs the missing node by indexing the missing ancestors.
- * @throws IOException if an error occurs while repairing.
- */
- public void repair() throws IOException {
- String parentId = parentUUID;
- while (parentId != null && !documentUUIDs.contains(parentId))
- {
- try
- {
- NodeData n = (NodeData)stateMgr.getItemData(parentId);
- log.info("Reparing missing node " + getPath(n));
- Document d = index.createDocument(n);
- index.addDocument(d);
- documentUUIDs.add(n.getIdentifier());
- parentId = n.getParentIdentifier();
- }
- catch (RepositoryException e)
- {
- throw new IOException(e.toString());
- }
- }
- }
- }
+ /**
+ * Returns <code>true</code>.
+ * @return <code>true</code>.
+ */
+ public boolean repairable()
+ {
+ return true;
+ }
- /**
- * The parent of a node is not available through the ItemStateManager.
- */
- private class UnknownParent extends ConsistencyCheckError {
+ /**
+ * Repairs the missing node by indexing the missing ancestors.
+ * @throws IOException if an error occurs while repairing.
+ */
+ public void repair() throws IOException
+ {
+ String parentId = parentUUID;
+ while (parentId != null && !documentUUIDs.contains(parentId))
+ {
+ try
+ {
+ NodeData n = (NodeData)stateMgr.getItemData(parentId);
+ log.info("Reparing missing node " + getPath(n));
+ Document d = index.createDocument(n);
+ index.addDocument(d);
+ documentUUIDs.add(n.getIdentifier());
+ parentId = n.getParentIdentifier();
+ }
+ catch (RepositoryException e)
+ {
+ throw new IOException(e.toString());
+ }
+ }
+ }
+ }
- private UnknownParent(String uuid, String parentUUID) {
- super("Node " + uuid + " has unknown parent: " +
parentUUID, uuid);
- }
+ /**
+ * The parent of a node is not available through the ItemStateManager.
+ */
+ private class UnknownParent extends ConsistencyCheckError
+ {
- /**
- * Not reparable (yet).
- * @return <code>false</code>.
- */
- public boolean repairable() {
- return false;
- }
+ private UnknownParent(String uuid, String parentUUID)
+ {
+ super("Node " + uuid + " has unknown parent: " + parentUUID,
uuid);
+ }
- /**
- * No operation.
- */
- public void repair() throws IOException {
- log.warn("Unknown parent for " + uuid + " cannot be
repaired");
- }
- }
+ /**
+ * Not reparable (yet).
+ * @return <code>false</code>.
+ */
+ public boolean repairable()
+ {
+ return false;
+ }
- /**
- * A node is present multiple times in the index.
- */
- private class MultipleEntries extends ConsistencyCheckError {
+ /**
+ * No operation.
+ */
+ public void repair() throws IOException
+ {
+ log.warn("Unknown parent for " + uuid + " cannot be
repaired");
+ }
+ }
- MultipleEntries(String uuid) {
- super("Multiple entries found for node " + uuid, uuid);
- }
+ /**
+ * A node is present multiple times in the index.
+ */
+ private class MultipleEntries extends ConsistencyCheckError
+ {
- /**
- * Returns <code>true</code>.
- * @return <code>true</code>.
- */
- public boolean repairable() {
- return true;
- }
+ MultipleEntries(String uuid)
+ {
+ super("Multiple entries found for node " + uuid, uuid);
+ }
- /**
- * Removes the nodes with the identical uuids from the index and
- * re-index the node.
- * @throws IOException if an error occurs while repairing.
- */
- public void repair() throws IOException {
- // first remove all occurrences
- index.removeAllDocuments(uuid);
- // then re-index the node
- try
- {
- NodeData node = (NodeData)stateMgr.getItemData(uuid);
- log.info("Re-indexing duplicate node occurrences in index: " +
getPath(node));
- Document d = index.createDocument(node);
- index.addDocument(d);
- documentUUIDs.add(node.getIdentifier());
- }
- catch (RepositoryException e)
- {
- throw new IOException(e.toString());
- }
- }
- }
+ /**
+ * Returns <code>true</code>.
+ * @return <code>true</code>.
+ */
+ public boolean repairable()
+ {
+ return true;
+ }
- /**
- * Indicates that a node has been deleted but is still in the index.
- */
- private class NodeDeleted extends ConsistencyCheckError {
+ /**
+ * Removes the nodes with the identical uuids from the index and
+ * re-index the node.
+ * @throws IOException if an error occurs while repairing.
+ */
+ public void repair() throws IOException
+ {
+ // first remove all occurrences
+ index.removeAllDocuments(uuid);
+ // then re-index the node
+ try
+ {
+ NodeData node = (NodeData)stateMgr.getItemData(uuid);
+ log.info("Re-indexing duplicate node occurrences in index: " +
getPath(node));
+ Document d = index.createDocument(node);
+ index.addDocument(d);
+ documentUUIDs.add(node.getIdentifier());
+ }
+ catch (RepositoryException e)
+ {
+ throw new IOException(e.toString());
+ }
+ }
+ }
- NodeDeleted(String uuid) {
- super("Node " + uuid + " does not longer exist.", uuid);
- }
+ /**
+ * Indicates that a node has been deleted but is still in the index.
+ */
+ private class NodeDeleted extends ConsistencyCheckError
+ {
- /**
- * Returns <code>true</code>.
- * @return <code>true</code>.
- */
- public boolean repairable() {
- return true;
- }
+ NodeDeleted(String uuid)
+ {
+ super("Node " + uuid + " does not longer exist.", uuid);
+ }
- /**
- * Deletes the nodes from the index.
- * @throws IOException if an error occurs while repairing.
- */
- public void repair() throws IOException {
- log.info("Removing deleted node from index: " + uuid);
- index.removeDocument(uuid);
- }
- }
+ /**
+ * Returns <code>true</code>.
+ * @return <code>true</code>.
+ */
+ public boolean repairable()
+ {
+ return true;
+ }
+
+ /**
+ * Deletes the nodes from the index.
+ * @throws IOException if an error occurs while repairing.
+ */
+ public void repair() throws IOException
+ {
+ log.info("Removing deleted node from index: " + uuid);
+ index.removeDocument(uuid);
+ }
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexingConfigurationImpl.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexingConfigurationImpl.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexingConfigurationImpl.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -18,7 +18,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.exoplatform.services.jcr.core.NamespaceAccessor;
-import org.exoplatform.services.jcr.core.nodetype.NodeTypeData;
import org.exoplatform.services.jcr.core.nodetype.NodeTypeDataManager;
import org.exoplatform.services.jcr.dataflow.ItemDataConsumer;
import org.exoplatform.services.jcr.datamodel.IllegalNameException;
@@ -51,6 +50,7 @@
import java.util.List;
import java.util.Map;
import java.util.Properties;
+import java.util.Set;
import javax.jcr.PropertyType;
import javax.jcr.RepositoryException;
@@ -114,7 +114,7 @@
// nsResolver);
NodeTypeDataManager ntReg = context.getNodeTypeDataManager();
- List<NodeTypeData> ntNames = ntReg.getAllNodeTypes();
+ //List<NodeTypeData> ntNames = ntReg.getAllNodeTypes();
List<AggregateRuleImpl> idxAggregates = new
ArrayList<AggregateRuleImpl>();
NodeList indexingConfigs = config.getChildNodes();
for (int i = 0; i < indexingConfigs.getLength(); i++)
@@ -125,21 +125,21 @@
IndexingRule element = new IndexingRule(configNode);
// register under node type and all its sub types
log.debug("Found rule '{}' for NodeType '{}'",
element, element.getNodeTypeName());
- for (NodeTypeData nodeTypeData : ntNames)
+ Set<InternalQName> subs =
ntReg.getSubtypes(element.getNodeTypeName());
+ subs.add(element.getNodeTypeName());
+ for (InternalQName subTypeName : subs)
{
-
- if (ntReg.isNodeType(element.getNodeTypeName(), nodeTypeData.getName()))
+ List<IndexingRule> perNtConfig = configElements.get(subTypeName);
+ if (perNtConfig == null)
{
- List<IndexingRule> perNtConfig =
configElements.get(nodeTypeData);
- if (perNtConfig == null)
- {
- perNtConfig = new ArrayList<IndexingRule>();
- configElements.put(nodeTypeData.getName(), perNtConfig);
- }
- log.debug("Registering it for name '{}'",
nodeTypeData);
- perNtConfig.add(new IndexingRule(element, nodeTypeData.getName()));
+ perNtConfig = new ArrayList<IndexingRule>();
+ configElements.put(subTypeName, perNtConfig);
}
+ log.debug("Registering it for name '{}'", subTypeName);
+ perNtConfig.add(new IndexingRule(element, subTypeName));
+
}
+
}
else if (configNode.getNodeName().equals("aggregate"))
{
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryBuilder.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryBuilder.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryBuilder.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -75,1129 +75,1236 @@
* index. todo introduce a node type hierarchy for efficient translation of
* NodeTypeQueryNode
*/
-public class LuceneQueryBuilder implements QueryNodeVisitor {
- /**
- * Namespace URI for xpath functions
- */
- private static final String NS_FN_PREFIX = "fn";
+public class LuceneQueryBuilder implements QueryNodeVisitor
+{
+ /**
+ * Namespace URI for xpath functions
+ */
+ private static final String NS_FN_PREFIX = "fn";
- public static final String NS_FN_URI =
"http://www.w3.org/2005/xpath-functions";
+ public static final String NS_FN_URI =
"http://www.w3.org/2005/xpath-functions";
- /**
- * Deprecated namespace URI for xpath functions
- */
- private static final String NS_FN_OLD_PREFIX = "fn_old";
+ /**
+ * Deprecated namespace URI for xpath functions
+ */
+ private static final String NS_FN_OLD_PREFIX = "fn_old";
- public static final String NS_FN_OLD_URI =
"http://www.w3.org/2004/10/xpath-functions";
+ public static final String NS_FN_OLD_URI =
"http://www.w3.org/2004/10/xpath-functions";
- /**
- * Namespace URI for XML schema
- */
- private static final String NS_XS_PREFIX = "xs";
+ /**
+ * Namespace URI for XML schema
+ */
+ private static final String NS_XS_PREFIX = "xs";
- public static final String NS_XS_URI = "http://www.w3.org/2001/XMLSchema";
+ public static final String NS_XS_URI = "http://www.w3.org/2001/XMLSchema";
- /**
- * Logger for this class
- */
- private static final Logger log = LoggerFactory
- .getLogger(LuceneQueryBuilder.class);
+ /**
+ * Logger for this class
+ */
+ private static final Logger log = LoggerFactory.getLogger(LuceneQueryBuilder.class);
- /**
- * Root node of the abstract query tree
- */
- private final QueryRootNode root;
+ /**
+ * Root node of the abstract query tree
+ */
+ private final QueryRootNode root;
- /**
- * Session of the user executing this query
- */
- private final SessionImpl session;
+ /**
+ * Session of the user executing this query
+ */
+ private final SessionImpl session;
- /**
- * The shared item state manager of the workspace.
- */
- private final ItemDataConsumer sharedItemMgr;
+ /**
+ * The shared item state manager of the workspace.
+ */
+ private final ItemDataConsumer sharedItemMgr;
- // /**
- // * A hierarchy manager based on {@link #sharedItemMgr} to resolve paths.
- // */
- // private final HierarchyManager hmgr;
+ // /**
+ // * A hierarchy manager based on {@link #sharedItemMgr} to resolve paths.
+ // */
+ // private final HierarchyManager hmgr;
- /**
- * Namespace mappings to internal prefixes
- */
- private final NamespaceMappings nsMappings;
+ /**
+ * Namespace mappings to internal prefixes
+ */
+ private final NamespaceMappings nsMappings;
- /**
- * Name and Path resolver
- */
- private final LocationFactory resolver;
+ /**
+ * Name and Path resolver
+ */
+ private final LocationFactory resolver;
- /**
- * The analyzer instance to use for contains function query parsing
- */
- private final Analyzer analyzer;
+ /**
+ * The analyzer instance to use for contains function query parsing
+ */
+ private final Analyzer analyzer;
- /**
- * The property type registry.
- */
- private final PropertyTypeRegistry propRegistry;
+ /**
+ * The property type registry.
+ */
+ private final PropertyTypeRegistry propRegistry;
- /**
- * The synonym provider or <code>null</code> if none is configured.
- */
- private final SynonymProvider synonymProvider;
+ /**
+ * The synonym provider or <code>null</code> if none is configured.
+ */
+ private final SynonymProvider synonymProvider;
- /**
- * Wether the index format is new or old.
- */
- private final IndexFormatVersion indexFormatVersion;
+ /**
+ * Wether the index format is new or old.
+ */
+ private final IndexFormatVersion indexFormatVersion;
- /**
- * Exceptions thrown during tree translation
- */
- private final List exceptions = new ArrayList();
+ /**
+ * Exceptions thrown during tree translation
+ */
+ private final List exceptions = new ArrayList();
- private final NodeTypeDataManager nodeTypeDataManager;
+ private final NodeTypeDataManager nodeTypeDataManager;
- private final VirtualTableResolver<Query> virtualTableResolver;
+ private final VirtualTableResolver<Query> virtualTableResolver;
- /**
- * Creates a new <code>LuceneQueryBuilder</code> instance.
- *
- * @param root
- * the root node of the abstract query tree.
- * @param session
- * of the user executing this query.
- * @param sharedItemMgr
- * the shared item state manager of the workspace.
- * @param hmgr
- * a hierarchy manager based on sharedItemMgr.
- * @param nsMappings
- * namespace resolver for internal prefixes.
- * @param analyzer
- * for parsing the query statement of the contains function.
- * @param propReg
- * the property type registry.
- * @param synonymProvider
- * the synonym provider or <code>null</code> if node is
- * configured.
- * @param indexFormatVersion
- * the index format version for the lucene query.
- * @param virtualTableResolver
- * @throws RepositoryException
- */
- private LuceneQueryBuilder(
- QueryRootNode root,
- SessionImpl session,
- ItemDataConsumer sharedItemMgr,
- // HierarchyManager hmgr,
- NamespaceMappings nsMappings, Analyzer analyzer,
- PropertyTypeRegistry propReg, SynonymProvider synonymProvider,
- IndexFormatVersion indexFormatVersion,
- VirtualTableResolver<Query> virtualTableResolver)
- throws RepositoryException {
- this.root = root;
- this.session = session;
- this.sharedItemMgr = sharedItemMgr;
- // this.hmgr = hmgr;
- this.nsMappings = nsMappings;
- this.analyzer = analyzer;
- this.propRegistry = propReg;
- this.synonymProvider = synonymProvider;
- this.indexFormatVersion = indexFormatVersion;
- this.virtualTableResolver = virtualTableResolver;
- this.nodeTypeDataManager = session.getWorkspace().getNodeTypesHolder();
- this.resolver = new LocationFactory(nsMappings);
- }
+ /**
+ * Creates a new <code>LuceneQueryBuilder</code> instance.
+ *
+ * @param root
+ * the root node of the abstract query tree.
+ * @param session
+ * of the user executing this query.
+ * @param sharedItemMgr
+ * the shared item state manager of the workspace.
+ * @param hmgr
+ * a hierarchy manager based on sharedItemMgr.
+ * @param nsMappings
+ * namespace resolver for internal prefixes.
+ * @param analyzer
+ * for parsing the query statement of the contains function.
+ * @param propReg
+ * the property type registry.
+ * @param synonymProvider
+ * the synonym provider or <code>null</code> if node is
+ * configured.
+ * @param indexFormatVersion
+ * the index format version for the lucene query.
+ * @param virtualTableResolver
+ * @throws RepositoryException
+ */
+ private LuceneQueryBuilder(QueryRootNode root, SessionImpl session, ItemDataConsumer
sharedItemMgr,
+ // HierarchyManager hmgr,
+ NamespaceMappings nsMappings, Analyzer analyzer, PropertyTypeRegistry propReg,
SynonymProvider synonymProvider,
+ IndexFormatVersion indexFormatVersion, VirtualTableResolver<Query>
virtualTableResolver)
+ throws RepositoryException
+ {
+ this.root = root;
+ this.session = session;
+ this.sharedItemMgr = sharedItemMgr;
+ // this.hmgr = hmgr;
+ this.nsMappings = nsMappings;
+ this.analyzer = analyzer;
+ this.propRegistry = propReg;
+ this.synonymProvider = synonymProvider;
+ this.indexFormatVersion = indexFormatVersion;
+ this.virtualTableResolver = virtualTableResolver;
+ this.nodeTypeDataManager = session.getWorkspace().getNodeTypesHolder();
+ this.resolver = new LocationFactory(nsMappings);
+ }
- /**
- * Creates a lucene {@link org.apache.lucene.search.Query} tree from an
- * abstract query tree.
- *
- * @param root
- * the root node of the abstract query tree.
- * @param session
- * of the user executing the query.
- * @param sharedItemMgr
- * the shared item state manager of the workspace.
- * @param nsMappings
- * namespace resolver for internal prefixes.
- * @param analyzer
- * for parsing the query statement of the contains function.
- * @param propReg
- * the property type registry to lookup type information.
- * @param synonymProvider
- * the synonym provider or <code>null</code> if node is
- * configured.
- * @param indexFormatVersion
- * the index format version to be used
- * @return the lucene query tree.
- * @throws RepositoryException
- * if an error occurs during the translation.
- */
- public static Query createQuery(QueryRootNode root, SessionImpl session,
- ItemDataConsumer sharedItemMgr, NamespaceMappings nsMappings,
- Analyzer analyzer, PropertyTypeRegistry propReg,
- SynonymProvider synonymProvider,
- IndexFormatVersion indexFormatVersion,
- VirtualTableResolver<Query> virtualTableResolver)
- throws RepositoryException {
+ /**
+ * Creates a lucene {@link org.apache.lucene.search.Query} tree from an
+ * abstract query tree.
+ *
+ * @param root
+ * the root node of the abstract query tree.
+ * @param session
+ * of the user executing the query.
+ * @param sharedItemMgr
+ * the shared item state manager of the workspace.
+ * @param nsMappings
+ * namespace resolver for internal prefixes.
+ * @param analyzer
+ * for parsing the query statement of the contains function.
+ * @param propReg
+ * the property type registry to lookup type information.
+ * @param synonymProvider
+ * the synonym provider or <code>null</code> if node is
+ * configured.
+ * @param indexFormatVersion
+ * the index format version to be used
+ * @return the lucene query tree.
+ * @throws RepositoryException
+ * if an error occurs during the translation.
+ */
+ public static Query createQuery(QueryRootNode root, SessionImpl session,
ItemDataConsumer sharedItemMgr,
+ NamespaceMappings nsMappings, Analyzer analyzer, PropertyTypeRegistry propReg,
SynonymProvider synonymProvider,
+ IndexFormatVersion indexFormatVersion, VirtualTableResolver<Query>
virtualTableResolver)
+ throws RepositoryException
+ {
- LuceneQueryBuilder builder = new LuceneQueryBuilder(root, session,
- sharedItemMgr, nsMappings, analyzer, propReg, synonymProvider,
- indexFormatVersion, virtualTableResolver);
+ LuceneQueryBuilder builder =
+ new LuceneQueryBuilder(root, session, sharedItemMgr, nsMappings, analyzer,
propReg, synonymProvider,
+ indexFormatVersion, virtualTableResolver);
- Query q = builder.createLuceneQuery();
- if (builder.exceptions.size() > 0) {
- StringBuffer msg = new StringBuffer();
- for (Iterator it = builder.exceptions.iterator(); it.hasNext();) {
- msg.append(it.next().toString()).append('\n');
- }
- throw new RepositoryException("Exception building query: "
- + msg.toString());
- }
- return q;
- }
+ Query q = builder.createLuceneQuery();
+ if (builder.exceptions.size() > 0)
+ {
+ StringBuffer msg = new StringBuffer();
+ for (Iterator it = builder.exceptions.iterator(); it.hasNext();)
+ {
+ msg.append(it.next().toString()).append('\n');
+ }
+ throw new RepositoryException("Exception building query: " +
msg.toString());
+ }
+ return q;
+ }
- /**
- * Starts the tree traversal and returns the lucene
- * {@link org.apache.lucene.search.Query}.
- *
- * @return the lucene <code>Query</code>.
- * @throws RepositoryException
- */
- private Query createLuceneQuery() throws RepositoryException {
- return (Query) root.accept(this, null);
- }
+ /**
+ * Starts the tree traversal and returns the lucene
+ * {@link org.apache.lucene.search.Query}.
+ *
+ * @return the lucene <code>Query</code>.
+ * @throws RepositoryException
+ */
+ private Query createLuceneQuery() throws RepositoryException
+ {
+ return (Query)root.accept(this, null);
+ }
- // ---------------------< QueryNodeVisitor interface
- // >-----------------------
+ // ---------------------< QueryNodeVisitor interface
+ // >-----------------------
- public Object visit(QueryRootNode node, Object data)
- throws RepositoryException {
- BooleanQuery root = new BooleanQuery();
+ public Object visit(QueryRootNode node, Object data) throws RepositoryException
+ {
+ BooleanQuery root = new BooleanQuery();
- Query wrapped = root;
- if (node.getLocationNode() != null) {
- wrapped = (Query) node.getLocationNode().accept(this, root);
- }
+ Query wrapped = root;
+ if (node.getLocationNode() != null)
+ {
+ wrapped = (Query)node.getLocationNode().accept(this, root);
+ }
- return wrapped;
- }
+ return wrapped;
+ }
- public Object visit(OrQueryNode node, Object data)
- throws RepositoryException {
- BooleanQuery orQuery = new BooleanQuery();
- Object[] result = node.acceptOperands(this, null);
- for (int i = 0; i < result.length; i++) {
- Query operand = (Query) result[i];
- orQuery.add(operand, Occur.SHOULD);
- }
- return orQuery;
- }
+ public Object visit(OrQueryNode node, Object data) throws RepositoryException
+ {
+ BooleanQuery orQuery = new BooleanQuery();
+ Object[] result = node.acceptOperands(this, null);
+ for (int i = 0; i < result.length; i++)
+ {
+ Query operand = (Query)result[i];
+ orQuery.add(operand, Occur.SHOULD);
+ }
+ return orQuery;
+ }
- public Object visit(AndQueryNode node, Object data)
- throws RepositoryException {
- Object[] result = node.acceptOperands(this, null);
- if (result.length == 0) {
- return null;
- }
- BooleanQuery andQuery = new BooleanQuery();
- for (int i = 0; i < result.length; i++) {
- Query operand = (Query) result[i];
- andQuery.add(operand, Occur.MUST);
- }
- return andQuery;
- }
+ public Object visit(AndQueryNode node, Object data) throws RepositoryException
+ {
+ Object[] result = node.acceptOperands(this, null);
+ if (result.length == 0)
+ {
+ return null;
+ }
+ BooleanQuery andQuery = new BooleanQuery();
+ for (int i = 0; i < result.length; i++)
+ {
+ Query operand = (Query)result[i];
+ andQuery.add(operand, Occur.MUST);
+ }
+ return andQuery;
+ }
- public Object visit(NotQueryNode node, Object data)
- throws RepositoryException {
- Object[] result = node.acceptOperands(this, null);
- if (result.length == 0) {
- return data;
- }
- // join the results
- BooleanQuery b = new BooleanQuery();
- for (int i = 0; i < result.length; i++) {
- b.add((Query) result[i], Occur.SHOULD);
- }
- // negate
- return new NotQuery(b);
- }
+ public Object visit(NotQueryNode node, Object data) throws RepositoryException
+ {
+ Object[] result = node.acceptOperands(this, null);
+ if (result.length == 0)
+ {
+ return data;
+ }
+ // join the results
+ BooleanQuery b = new BooleanQuery();
+ for (int i = 0; i < result.length; i++)
+ {
+ b.add((Query)result[i], Occur.SHOULD);
+ }
+ // negate
+ return new NotQuery(b);
+ }
- public Object visit(ExactQueryNode node, Object data) {
- String field = "";
- String value = "";
- try {
- field = resolver.createJCRName(node.getPropertyName())
- .getAsString();
- value = resolver.createJCRName(node.getValue()).getAsString();
- } catch (RepositoryException e) {
- // will never happen, prefixes are created when unknown
- }
- return new JcrTermQuery(new Term(FieldNames.PROPERTIES,
- FieldNames.createNamedValue(field, value)));
- }
+ public Object visit(ExactQueryNode node, Object data)
+ {
+ String field = "";
+ String value = "";
+ try
+ {
+ field = resolver.createJCRName(node.getPropertyName()).getAsString();
+ value = resolver.createJCRName(node.getValue()).getAsString();
+ }
+ catch (RepositoryException e)
+ {
+ // will never happen, prefixes are created when unknown
+ }
+ return new JcrTermQuery(new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, value)));
+ }
- public Object visit(NodeTypeQueryNode node, Object data) {
+ public Object visit(NodeTypeQueryNode node, Object data)
+ {
- try {
- return virtualTableResolver.resolve(node.getValue(), true);
- } catch (InvalidQueryException e1) {
- exceptions.add(e1);
- } catch (RepositoryException e1) {
- exceptions.add(e1);
- }
- return new BooleanQuery();
- // // (result)
- // List terms = new ArrayList();
- // try {
- // String mixinTypesField = resolver.createJCRName(
- // Constants.JCR_MIXINTYPES).getAsString();
- // String primaryTypeField = resolver.createJCRName(
- // Constants.JCR_PRIMARYTYPE).getAsString();
- //
- // NodeTypeData base = nodeTypeDataManager.findNodeType(node
- // .getValue());
- //
- // if (base.isMixin()) {
- // // search for nodes where jcr:mixinTypes is set to this mixin
- // Term t = new Term(FieldNames.PROPERTIES, FieldNames
- // .createNamedValue(mixinTypesField, resolver
- // .createJCRName(node.getValue()).getAsString()));
- // terms.add(t);
- // } else {
- // // search for nodes where jcr:primaryType is set to this type
- // Term t = new Term(FieldNames.PROPERTIES, FieldNames
- // .createNamedValue(primaryTypeField, resolver
- // .createJCRName(node.getValue()).getAsString()));
- // terms.add(t);
- // }
- //
- // // now search for all node types that are derived from base
- // Collection<NodeTypeData> allTypes = nodeTypeDataManager
- // .getAllNodeTypes();
- // for (NodeTypeData nodeTypeData : allTypes) {
- // InternalQName[] superTypes = nodeTypeData
- // .getDeclaredSupertypeNames();
- // if (Arrays.asList(superTypes).contains(base.getName())) {
- // String ntName = nsMappings.translateName(nodeTypeData
- // .getName());
- // Term t;
- // if (nodeTypeData.isMixin()) {
- // // search on jcr:mixinTypes
- // t = new Term(FieldNames.PROPERTIES, FieldNames
- // .createNamedValue(mixinTypesField, ntName));
- // } else {
- // // search on jcr:primaryType
- // t = new Term(FieldNames.PROPERTIES, FieldNames
- // .createNamedValue(primaryTypeField, ntName));
- // }
- // terms.add(t);
- // }
- // }
- // } catch (IllegalNameException e) {
- // exceptions.add(e);
- // } catch (RepositoryException e) {
- // exceptions.add(e);
- // }
- // if (terms.size() == 0) {
- // // exception occured
- // return new BooleanQuery();
- // } else if (terms.size() == 1) {
- // return new JackrabbitTermQuery((Term) terms.get(0));
- // } else {
- // BooleanQuery b = new BooleanQuery();
- // for (Iterator it = terms.iterator(); it.hasNext();) {
- // b.add(new JackrabbitTermQuery((Term) it.next()), Occur.SHOULD);
- // }
- // return b;
- // }
- }
+ try
+ {
+ return virtualTableResolver.resolve(node.getValue(), true);
+ }
+ catch (InvalidQueryException e1)
+ {
+ exceptions.add(e1);
+ }
+ catch (RepositoryException e1)
+ {
+ exceptions.add(e1);
+ }
+ return new BooleanQuery();
+ // // (result)
+ // List terms = new ArrayList();
+ // try {
+ // String mixinTypesField = resolver.createJCRName(
+ // Constants.JCR_MIXINTYPES).getAsString();
+ // String primaryTypeField = resolver.createJCRName(
+ // Constants.JCR_PRIMARYTYPE).getAsString();
+ //
+ // NodeTypeData base = nodeTypeDataManager.findNodeType(node
+ // .getValue());
+ //
+ // if (base.isMixin()) {
+ // // search for nodes where jcr:mixinTypes is set to this mixin
+ // Term t = new Term(FieldNames.PROPERTIES, FieldNames
+ // .createNamedValue(mixinTypesField, resolver
+ // .createJCRName(node.getValue()).getAsString()));
+ // terms.add(t);
+ // } else {
+ // // search for nodes where jcr:primaryType is set to this type
+ // Term t = new Term(FieldNames.PROPERTIES, FieldNames
+ // .createNamedValue(primaryTypeField, resolver
+ // .createJCRName(node.getValue()).getAsString()));
+ // terms.add(t);
+ // }
+ //
+ // // now search for all node types that are derived from base
+ // Collection<NodeTypeData> allTypes = nodeTypeDataManager
+ // .getAllNodeTypes();
+ // for (NodeTypeData nodeTypeData : allTypes) {
+ // InternalQName[] superTypes = nodeTypeData
+ // .getDeclaredSupertypeNames();
+ // if (Arrays.asList(superTypes).contains(base.getName())) {
+ // String ntName = nsMappings.translateName(nodeTypeData
+ // .getName());
+ // Term t;
+ // if (nodeTypeData.isMixin()) {
+ // // search on jcr:mixinTypes
+ // t = new Term(FieldNames.PROPERTIES, FieldNames
+ // .createNamedValue(mixinTypesField, ntName));
+ // } else {
+ // // search on jcr:primaryType
+ // t = new Term(FieldNames.PROPERTIES, FieldNames
+ // .createNamedValue(primaryTypeField, ntName));
+ // }
+ // terms.add(t);
+ // }
+ // }
+ // } catch (IllegalNameException e) {
+ // exceptions.add(e);
+ // } catch (RepositoryException e) {
+ // exceptions.add(e);
+ // }
+ // if (terms.size() == 0) {
+ // // exception occured
+ // return new BooleanQuery();
+ // } else if (terms.size() == 1) {
+ // return new JackrabbitTermQuery((Term) terms.get(0));
+ // } else {
+ // BooleanQuery b = new BooleanQuery();
+ // for (Iterator it = terms.iterator(); it.hasNext();) {
+ // b.add(new JackrabbitTermQuery((Term) it.next()), Occur.SHOULD);
+ // }
+ // return b;
+ // }
+ }
- public Object visit(TextsearchQueryNode node, Object data) {
- try {
- QPath relPath = node.getRelativePath();
- String fieldname;
- if (relPath == null || !node.getReferencesProperty()) {
- // fulltext on node
- fieldname = FieldNames.FULLTEXT;
- } else {
- // final path element is a property name
+ public Object visit(TextsearchQueryNode node, Object data)
+ {
+ try
+ {
+ QPath relPath = node.getRelativePath();
+ String fieldname;
+ if (relPath == null || !node.getReferencesProperty())
+ {
+ // fulltext on node
+ fieldname = FieldNames.FULLTEXT;
+ }
+ else
+ {
+ // final path element is a property name
- fieldname = resolver.createJCRName(relPath.getName())
- .getAsString();
- int idx = fieldname.indexOf(':');
- fieldname = fieldname.substring(0, idx + 1)
- + FieldNames.FULLTEXT_PREFIX
- + fieldname.substring(idx + 1);
+ fieldname = resolver.createJCRName(relPath.getName()).getAsString();
+ int idx = fieldname.indexOf(':');
+ fieldname = fieldname.substring(0, idx + 1) + FieldNames.FULLTEXT_PREFIX +
fieldname.substring(idx + 1);
- }
- QueryParser parser = new JcrQueryParser(fieldname, analyzer,
- synonymProvider);
- Query context = parser.parse(node.getQuery());
- if (relPath != null
- && (!node.getReferencesProperty() || relPath.getEntries().length > 1))
{
- // text search on some child axis
- QPathEntry[] elements = relPath.getEntries();
- for (int i = elements.length - 1; i >= 0; i--) {
- QPathEntry name = null;
- if (!elements[i].equals(RelationQueryNode.STAR_NAME_TEST)) {
- name = elements[i];
- }
- // join text search with name test
- // if path references property that's elements.length - 2
- // if path references node that's elements.length - 1
- if (name != null
- && ((node.getReferencesProperty() && i == elements.length - 2) ||
(!node
- .getReferencesProperty() && i == elements.length - 1))) {
- Query q = new NameQuery(name, indexFormatVersion,
- nsMappings);
- BooleanQuery and = new BooleanQuery();
- and.add(q, Occur.MUST);
- and.add(context, Occur.MUST);
- context = and;
- } else if ((node.getReferencesProperty() && i < elements.length - 2)
- || (!node.getReferencesProperty() && i < elements.length - 1)) {
- // otherwise do a parent axis step
- context = new ParentAxisQuery(context, name,
- indexFormatVersion, nsMappings);
- }
- }
- // finally select parent
- context = new ParentAxisQuery(context, null,
- indexFormatVersion, nsMappings);
- }
- return context;
- } catch (NamespaceException e) {
- exceptions.add(e);
- } catch (ParseException e) {
- exceptions.add(e);
- } catch (RepositoryException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- return null;
- }
+ }
+ QueryParser parser = new JcrQueryParser(fieldname, analyzer, synonymProvider);
+ Query context = parser.parse(node.getQuery());
+ if (relPath != null && (!node.getReferencesProperty() ||
relPath.getEntries().length > 1))
+ {
+ // text search on some child axis
+ QPathEntry[] elements = relPath.getEntries();
+ for (int i = elements.length - 1; i >= 0; i--)
+ {
+ QPathEntry name = null;
+ if (!elements[i].equals(RelationQueryNode.STAR_NAME_TEST))
+ {
+ name = elements[i];
+ }
+ // join text search with name test
+ // if path references property that's elements.length - 2
+ // if path references node that's elements.length - 1
+ if (name != null
+ && ((node.getReferencesProperty() && i ==
elements.length - 2) || (!node.getReferencesProperty() && i == elements.length -
1)))
+ {
+ Query q = new NameQuery(name, indexFormatVersion, nsMappings);
+ BooleanQuery and = new BooleanQuery();
+ and.add(q, Occur.MUST);
+ and.add(context, Occur.MUST);
+ context = and;
+ }
+ else if ((node.getReferencesProperty() && i < elements.length -
2)
+ || (!node.getReferencesProperty() && i < elements.length -
1))
+ {
+ // otherwise do a parent axis step
+ context = new ParentAxisQuery(context, name, indexFormatVersion,
nsMappings);
+ }
+ }
+ // finally select parent
+ context = new ParentAxisQuery(context, null, indexFormatVersion,
nsMappings);
+ }
+ return context;
+ }
+ catch (NamespaceException e)
+ {
+ exceptions.add(e);
+ }
+ catch (ParseException e)
+ {
+ exceptions.add(e);
+ }
+ catch (RepositoryException e)
+ {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ return null;
+ }
- public Object visit(PathQueryNode node, Object data)
- throws RepositoryException {
- Query context = null;
- LocationStepQueryNode[] steps = node.getPathSteps();
- if (steps.length > 0) {
- if (node.isAbsolute() && !steps[0].getIncludeDescendants()) {
- // eat up first step
- InternalQName nameTest = steps[0].getNameTest();
- if (nameTest == null) {
- // this is equivalent to the root node
- context = new JcrTermQuery(new Term(FieldNames.UUID,
- Constants.ROOT_UUID));
- } else if (nameTest.getName().length() == 0) {
- // root node
- context = new JcrTermQuery(new Term(FieldNames.UUID,
- Constants.ROOT_UUID));
- } else {
- // then this is a node != the root node
- // will never match anything!
- BooleanQuery and = new BooleanQuery();
- and.add(new JcrTermQuery(new Term(FieldNames.UUID,
- Constants.ROOT_UUID)), Occur.MUST);
- and.add(new NameQuery(nameTest, indexFormatVersion,
- nsMappings), Occur.MUST);
- context = and;
- }
- LocationStepQueryNode[] tmp = new LocationStepQueryNode[steps.length - 1];
- System.arraycopy(steps, 1, tmp, 0, steps.length - 1);
- steps = tmp;
- } else {
- // path is 1) relative or 2) descendant-or-self
- // use root node as context
- context = new JcrTermQuery(new Term(FieldNames.UUID,
- Constants.ROOT_UUID));
- }
- } else {
- exceptions.add(new InvalidQueryException(
- "Number of location steps must be > 0"));
- }
- // loop over steps
- for (int i = 0; i < steps.length; i++) {
- context = (Query) steps[i].accept(this, context);
- }
- if (data instanceof BooleanQuery) {
- BooleanQuery constraint = (BooleanQuery) data;
- if (constraint.getClauses().length > 0) {
- constraint.add(context, Occur.MUST);
- context = constraint;
- }
- }
- return context;
- }
+ public Object visit(PathQueryNode node, Object data) throws RepositoryException
+ {
+ Query context = null;
+ LocationStepQueryNode[] steps = node.getPathSteps();
+ if (steps.length > 0)
+ {
+ if (node.isAbsolute() && !steps[0].getIncludeDescendants())
+ {
+ // eat up first step
+ InternalQName nameTest = steps[0].getNameTest();
+ if (nameTest == null)
+ {
+ // this is equivalent to the root node
+ context = new JcrTermQuery(new Term(FieldNames.UUID,
Constants.ROOT_UUID));
+ }
+ else if (nameTest.getName().length() == 0)
+ {
+ // root node
+ context = new JcrTermQuery(new Term(FieldNames.UUID,
Constants.ROOT_UUID));
+ }
+ else
+ {
+ // then this is a node != the root node
+ // will never match anything!
+ BooleanQuery and = new BooleanQuery();
+ and.add(new JcrTermQuery(new Term(FieldNames.UUID, Constants.ROOT_UUID)),
Occur.MUST);
+ and.add(new NameQuery(nameTest, indexFormatVersion, nsMappings),
Occur.MUST);
+ context = and;
+ }
+ LocationStepQueryNode[] tmp = new LocationStepQueryNode[steps.length - 1];
+ System.arraycopy(steps, 1, tmp, 0, steps.length - 1);
+ steps = tmp;
+ }
+ else
+ {
+ // path is 1) relative or 2) descendant-or-self
+ // use root node as context
+ context = new JcrTermQuery(new Term(FieldNames.UUID, Constants.ROOT_UUID));
+ }
+ }
+ else
+ {
+ exceptions.add(new InvalidQueryException("Number of location steps must be
> 0"));
+ }
+ // loop over steps
+ for (int i = 0; i < steps.length; i++)
+ {
+ context = (Query)steps[i].accept(this, context);
+ }
+ if (data instanceof BooleanQuery)
+ {
+ BooleanQuery constraint = (BooleanQuery)data;
+ if (constraint.getClauses().length > 0)
+ {
+ constraint.add(context, Occur.MUST);
+ context = constraint;
+ }
+ }
+ return context;
+ }
- public Object visit(LocationStepQueryNode node, Object data)
- throws RepositoryException {
- Query context = (Query) data;
- BooleanQuery andQuery = new BooleanQuery();
+ public Object visit(LocationStepQueryNode node, Object data) throws
RepositoryException
+ {
+ Query context = (Query)data;
+ BooleanQuery andQuery = new BooleanQuery();
- if (context == null) {
- exceptions.add(new IllegalArgumentException("Unsupported query"));
- }
+ if (context == null)
+ {
+ exceptions.add(new IllegalArgumentException("Unsupported query"));
+ }
- // predicate on step?
- Object[] predicates = node.acceptOperands(this, data);
- for (int i = 0; i < predicates.length; i++) {
- andQuery.add((Query) predicates[i], Occur.MUST);
- }
+ // predicate on step?
+ Object[] predicates = node.acceptOperands(this, data);
+ for (int i = 0; i < predicates.length; i++)
+ {
+ andQuery.add((Query)predicates[i], Occur.MUST);
+ }
- // check for position predicate
- QueryNode[] pred = node.getPredicates();
- for (int i = 0; i < pred.length; i++) {
- if (pred[i].getType() == QueryNode.TYPE_RELATION) {
- RelationQueryNode pos = (RelationQueryNode) pred[i];
- if (pos.getValueType() == QueryConstants.TYPE_POSITION) {
- node.setIndex(pos.getPositionValue());
- }
- }
- }
+ // check for position predicate
+ QueryNode[] pred = node.getPredicates();
+ for (int i = 0; i < pred.length; i++)
+ {
+ if (pred[i].getType() == QueryNode.TYPE_RELATION)
+ {
+ RelationQueryNode pos = (RelationQueryNode)pred[i];
+ if (pos.getValueType() == QueryConstants.TYPE_POSITION)
+ {
+ node.setIndex(pos.getPositionValue());
+ }
+ }
+ }
- NameQuery nameTest = null;
- if (node.getNameTest() != null) {
- nameTest = new NameQuery(node.getNameTest(), indexFormatVersion,
- nsMappings);
- }
+ NameQuery nameTest = null;
+ if (node.getNameTest() != null)
+ {
+ nameTest = new NameQuery(node.getNameTest(), indexFormatVersion, nsMappings);
+ }
- if (node.getIncludeDescendants()) {
- if (nameTest != null) {
- andQuery.add(new DescendantSelfAxisQuery(context, nameTest,
- false), Occur.MUST);
- } else {
- // descendant-or-self with nametest=*
- if (predicates.length > 0) {
- // if we have a predicate attached, the condition acts as
- // the sub query.
+ if (node.getIncludeDescendants())
+ {
+ if (nameTest != null)
+ {
+ andQuery.add(new DescendantSelfAxisQuery(context, nameTest, false),
Occur.MUST);
+ }
+ else
+ {
+ // descendant-or-self with nametest=*
+ if (predicates.length > 0)
+ {
+ // if we have a predicate attached, the condition acts as
+ // the sub query.
- // only use descendant axis if path is not //*
- // otherwise the query for the predicate can be used itself
- PathQueryNode pathNode = (PathQueryNode) node.getParent();
- if (pathNode.getPathSteps()[0] != node) {
- Query subQuery = new DescendantSelfAxisQuery(context,
- andQuery, false);
- andQuery = new BooleanQuery();
- andQuery.add(subQuery, Occur.MUST);
- }
- } else {
- // todo this will traverse the whole index, optimize!
- // only use descendant axis if path is not //*
- PathQueryNode pathNode = (PathQueryNode) node.getParent();
- if (pathNode.getPathSteps()[0] != node) {
- if (node.getIndex() == LocationStepQueryNode.NONE) {
- context = new DescendantSelfAxisQuery(context,
- false);
- andQuery.add(context, Occur.MUST);
- } else {
- context = new DescendantSelfAxisQuery(context, true);
- andQuery
- .add(new ChildAxisQuery(sharedItemMgr,
- context, null, node.getIndex(),
- indexFormatVersion, nsMappings),
- Occur.MUST);
- }
- } else {
- andQuery.add(new MatchAllDocsQuery(), Occur.MUST);
- }
- }
- }
- } else {
- // name test
- if (nameTest != null) {
- andQuery.add(new ChildAxisQuery(sharedItemMgr, context,
- nameTest.getName(), node.getIndex(),
- indexFormatVersion, nsMappings), Occur.MUST);
- } else {
- // select child nodes
- andQuery.add(new ChildAxisQuery(sharedItemMgr, context, null,
- node.getIndex(), indexFormatVersion, nsMappings),
- Occur.MUST);
- }
- }
+ // only use descendant axis if path is not //*
+ // otherwise the query for the predicate can be used itself
+ PathQueryNode pathNode = (PathQueryNode)node.getParent();
+ if (pathNode.getPathSteps()[0] != node)
+ {
+ Query subQuery = new DescendantSelfAxisQuery(context, andQuery,
false);
+ andQuery = new BooleanQuery();
+ andQuery.add(subQuery, Occur.MUST);
+ }
+ }
+ else
+ {
+ // todo this will traverse the whole index, optimize!
+ // only use descendant axis if path is not //*
+ PathQueryNode pathNode = (PathQueryNode)node.getParent();
+ if (pathNode.getPathSteps()[0] != node)
+ {
+ if (node.getIndex() == LocationStepQueryNode.NONE)
+ {
+ context = new DescendantSelfAxisQuery(context, false);
+ andQuery.add(context, Occur.MUST);
+ }
+ else
+ {
+ context = new DescendantSelfAxisQuery(context, true);
+ andQuery.add(new ChildAxisQuery(sharedItemMgr, context, null,
node.getIndex(), indexFormatVersion,
+ nsMappings), Occur.MUST);
+ }
+ }
+ else
+ {
+ andQuery.add(new MatchAllDocsQuery(), Occur.MUST);
+ }
+ }
+ }
+ }
+ else
+ {
+ // name test
+ if (nameTest != null)
+ {
+ andQuery.add(new ChildAxisQuery(sharedItemMgr, context, nameTest.getName(),
node.getIndex(),
+ indexFormatVersion, nsMappings), Occur.MUST);
+ }
+ else
+ {
+ // select child nodes
+ andQuery.add(new ChildAxisQuery(sharedItemMgr, context, null,
node.getIndex(), indexFormatVersion,
+ nsMappings), Occur.MUST);
+ }
+ }
- return andQuery;
- }
+ return andQuery;
+ }
- public Object visit(DerefQueryNode node, Object data)
- throws RepositoryException {
- Query context = (Query) data;
- if (context == null) {
- exceptions.add(new IllegalArgumentException("Unsupported query"));
- }
+ public Object visit(DerefQueryNode node, Object data) throws RepositoryException
+ {
+ Query context = (Query)data;
+ if (context == null)
+ {
+ exceptions.add(new IllegalArgumentException("Unsupported query"));
+ }
- try {
- String refProperty = resolver.createJCRName(node.getRefProperty())
- .getAsString();
+ try
+ {
+ String refProperty =
resolver.createJCRName(node.getRefProperty()).getAsString();
- if (node.getIncludeDescendants()) {
- Query refPropQuery = Util.createMatchAllQuery(refProperty,
- indexFormatVersion);
- context = new DescendantSelfAxisQuery(context, refPropQuery,
- false);
- }
+ if (node.getIncludeDescendants())
+ {
+ Query refPropQuery = Util.createMatchAllQuery(refProperty,
indexFormatVersion);
+ context = new DescendantSelfAxisQuery(context, refPropQuery, false);
+ }
- context = new DerefQuery(context, refProperty, node.getNameTest(),
- indexFormatVersion, nsMappings);
+ context = new DerefQuery(context, refProperty, node.getNameTest(),
indexFormatVersion, nsMappings);
- // attach predicates
- Object[] predicates = node.acceptOperands(this, data);
- if (predicates.length > 0) {
- BooleanQuery andQuery = new BooleanQuery();
- for (int i = 0; i < predicates.length; i++) {
- andQuery.add((Query) predicates[i], Occur.MUST);
- }
- andQuery.add(context, Occur.MUST);
- context = andQuery;
- }
+ // attach predicates
+ Object[] predicates = node.acceptOperands(this, data);
+ if (predicates.length > 0)
+ {
+ BooleanQuery andQuery = new BooleanQuery();
+ for (int i = 0; i < predicates.length; i++)
+ {
+ andQuery.add((Query)predicates[i], Occur.MUST);
+ }
+ andQuery.add(context, Occur.MUST);
+ context = andQuery;
+ }
- } catch (NamespaceException e) {
- // should never happen
- exceptions.add(e);
- }
+ }
+ catch (NamespaceException e)
+ {
+ // should never happen
+ exceptions.add(e);
+ }
- return context;
- }
+ return context;
+ }
- public Object visit(RelationQueryNode node, Object data)
- throws RepositoryException {
- Query query;
- String[] stringValues = new String[1];
- switch (node.getValueType()) {
- case 0:
- // not set: either IS NULL or IS NOT NULL
- break;
- case QueryConstants.TYPE_DATE:
- stringValues[0] = DateField.dateToString(node.getDateValue());
- break;
- case QueryConstants.TYPE_DOUBLE:
- stringValues[0] = DoubleField.doubleToString(node.getDoubleValue());
- break;
- case QueryConstants.TYPE_LONG:
- stringValues[0] = LongField.longToString(node.getLongValue());
- break;
- case QueryConstants.TYPE_STRING:
- if (node.getOperation() == QueryConstants.OPERATION_EQ_GENERAL
- || node.getOperation() == QueryConstants.OPERATION_EQ_VALUE
- || node.getOperation() == QueryConstants.OPERATION_NE_GENERAL
- || node.getOperation() == QueryConstants.OPERATION_NE_VALUE) {
- // only use coercing on non-range operations
- InternalQName propertyName = node.getRelativePath().getName();
- stringValues = getStringValues(propertyName, node
- .getStringValue());
- } else {
- stringValues[0] = node.getStringValue();
- }
- break;
- case QueryConstants.TYPE_POSITION:
- // ignore position. is handled in the location step
- return null;
- default:
- throw new IllegalArgumentException("Unknown relation type: "
- + node.getValueType());
- }
+ public Object visit(RelationQueryNode node, Object data) throws RepositoryException
+ {
+ Query query;
+ String[] stringValues = new String[1];
+ switch (node.getValueType())
+ {
+ case 0 :
+ // not set: either IS NULL or IS NOT NULL
+ break;
+ case QueryConstants.TYPE_DATE :
+ stringValues[0] = DateField.dateToString(node.getDateValue());
+ break;
+ case QueryConstants.TYPE_DOUBLE :
+ stringValues[0] = DoubleField.doubleToString(node.getDoubleValue());
+ break;
+ case QueryConstants.TYPE_LONG :
+ stringValues[0] = LongField.longToString(node.getLongValue());
+ break;
+ case QueryConstants.TYPE_STRING :
+ if (node.getOperation() == QueryConstants.OPERATION_EQ_GENERAL
+ || node.getOperation() == QueryConstants.OPERATION_EQ_VALUE
+ || node.getOperation() == QueryConstants.OPERATION_NE_GENERAL
+ || node.getOperation() == QueryConstants.OPERATION_NE_VALUE)
+ {
+ // only use coercing on non-range operations
+ InternalQName propertyName = node.getRelativePath().getName();
+ stringValues = getStringValues(propertyName, node.getStringValue());
+ }
+ else
+ {
+ stringValues[0] = node.getStringValue();
+ }
+ break;
+ case QueryConstants.TYPE_POSITION :
+ // ignore position. is handled in the location step
+ return null;
+ default :
+ throw new IllegalArgumentException("Unknown relation type: " +
node.getValueType());
+ }
- if (node.getRelativePath() == null
- && node.getOperation() != QueryConstants.OPERATION_SIMILAR
- && node.getOperation() != QueryConstants.OPERATION_SPELLCHECK) {
- exceptions.add(new InvalidQueryException(
- "@* not supported in predicate"));
- return data;
- }
+ if (node.getRelativePath() == null && node.getOperation() !=
QueryConstants.OPERATION_SIMILAR
+ && node.getOperation() != QueryConstants.OPERATION_SPELLCHECK)
+ {
+ exceptions.add(new InvalidQueryException("@* not supported in
predicate"));
+ return data;
+ }
- // get property transformation
- final int[] transform = new int[] { TransformConstants.TRANSFORM_NONE };
- node.acceptOperands(new DefaultQueryNodeVisitor() {
- public Object visit(PropertyFunctionQueryNode node, Object data) {
- if (node.getFunctionName().equals(
- PropertyFunctionQueryNode.LOWER_CASE)) {
- transform[0] = TransformConstants.TRANSFORM_LOWER_CASE;
- } else if (node.getFunctionName().equals(
- PropertyFunctionQueryNode.UPPER_CASE)) {
- transform[0] = TransformConstants.TRANSFORM_UPPER_CASE;
- }
- return data;
- }
- }, null);
+ // get property transformation
+ final int[] transform = new int[]{TransformConstants.TRANSFORM_NONE};
+ node.acceptOperands(new DefaultQueryNodeVisitor()
+ {
+ public Object visit(PropertyFunctionQueryNode node, Object data)
+ {
+ if (node.getFunctionName().equals(PropertyFunctionQueryNode.LOWER_CASE))
+ {
+ transform[0] = TransformConstants.TRANSFORM_LOWER_CASE;
+ }
+ else if
(node.getFunctionName().equals(PropertyFunctionQueryNode.UPPER_CASE))
+ {
+ transform[0] = TransformConstants.TRANSFORM_UPPER_CASE;
+ }
+ return data;
+ }
+ }, null);
- QPath relPath = node.getRelativePath();
- if (node.getOperation() == QueryConstants.OPERATION_SIMILAR) {
- // this is a bit ugly:
- // add the name of a dummy property because relPath actually
- // references a property. whereas the relPath of the similar
- // operation references a node
- relPath = QPath.makeChildPath(relPath, Constants.JCR_PRIMARYTYPE);
- }
- String field = "";
- try {
- field = resolver.createJCRName(relPath.getName()).getAsString();
- } catch (NamespaceException e) {
- // should never happen
- exceptions.add(e);
- }
+ QPath relPath = node.getRelativePath();
+ if (node.getOperation() == QueryConstants.OPERATION_SIMILAR)
+ {
+ // this is a bit ugly:
+ // add the name of a dummy property because relPath actually
+ // references a property. whereas the relPath of the similar
+ // operation references a node
+ relPath = QPath.makeChildPath(relPath, Constants.JCR_PRIMARYTYPE);
+ }
+ String field = "";
+ try
+ {
+ field = resolver.createJCRName(relPath.getName()).getAsString();
+ }
+ catch (NamespaceException e)
+ {
+ // should never happen
+ exceptions.add(e);
+ }
- // support for fn:name()
- InternalQName propName = relPath.getName();
- if (propName.getNamespace().equals(NS_FN_URI)
- && propName.getName().equals("name()")) {
- if (node.getValueType() != QueryConstants.TYPE_STRING) {
- exceptions.add(new InvalidQueryException("Name function can "
- + "only be used in conjunction with a string literal"));
- return data;
- }
- if (node.getOperation() != QueryConstants.OPERATION_EQ_VALUE
- && node.getOperation() != QueryConstants.OPERATION_EQ_GENERAL) {
- exceptions
- .add(new InvalidQueryException(
- "Name function can "
- + "only be used in conjunction with an equals operator"));
- return data;
- }
- // check if string literal is a valid XML Name
- if (XMLChar.isValidName(node.getStringValue())) {
- // parse string literal as JCR Name
- try {
- InternalQName n = session
- .getLocationFactory()
- .parseJCRName(ISO9075.decode(node.getStringValue()))
- .getInternalName();
- query = new NameQuery(n, indexFormatVersion, nsMappings);
- } catch (RepositoryException e) {
- exceptions.add(e);
- return data;
- }
- } else {
- // will never match -> create dummy query
- query = new BooleanQuery();
- }
- } else {
- switch (node.getOperation()) {
- case QueryConstants.OPERATION_EQ_VALUE: // =
- case QueryConstants.OPERATION_EQ_GENERAL:
- BooleanQuery or = new BooleanQuery();
- for (int i = 0; i < stringValues.length; i++) {
- Term t = new Term(FieldNames.PROPERTIES, FieldNames
- .createNamedValue(field, stringValues[i]));
- Query q;
- if (transform[0] == TransformConstants.TRANSFORM_UPPER_CASE) {
- q = new CaseTermQuery.Upper(t);
- } else if (transform[0] == TransformConstants.TRANSFORM_LOWER_CASE) {
- q = new CaseTermQuery.Lower(t);
- } else {
- q = new JcrTermQuery(t);
- }
- or.add(q, Occur.SHOULD);
- }
- query = or;
- if (node.getOperation() == QueryConstants.OPERATION_EQ_VALUE) {
- query = createSingleValueConstraint(or, field);
- }
- break;
- case QueryConstants.OPERATION_GE_VALUE: // >=
- case QueryConstants.OPERATION_GE_GENERAL:
- or = new BooleanQuery();
- for (int i = 0; i < stringValues.length; i++) {
- Term lower = new Term(FieldNames.PROPERTIES, FieldNames
- .createNamedValue(field, stringValues[i]));
- Term upper = new Term(FieldNames.PROPERTIES, FieldNames
- .createNamedValue(field, "\uFFFF"));
- or.add(new RangeQuery(lower, upper, true, transform[0]),
- Occur.SHOULD);
- }
- query = or;
- if (node.getOperation() == QueryConstants.OPERATION_GE_VALUE) {
- query = createSingleValueConstraint(or, field);
- }
- break;
- case QueryConstants.OPERATION_GT_VALUE: // >
- case QueryConstants.OPERATION_GT_GENERAL:
- or = new BooleanQuery();
- for (int i = 0; i < stringValues.length; i++) {
- Term lower = new Term(FieldNames.PROPERTIES, FieldNames
- .createNamedValue(field, stringValues[i]));
- Term upper = new Term(FieldNames.PROPERTIES, FieldNames
- .createNamedValue(field, "\uFFFF"));
- or.add(new RangeQuery(lower, upper, false, transform[0]),
- Occur.SHOULD);
- }
- query = or;
- if (node.getOperation() == QueryConstants.OPERATION_GT_VALUE) {
- query = createSingleValueConstraint(or, field);
- }
- break;
- case QueryConstants.OPERATION_LE_VALUE: // <=
- case QueryConstants.OPERATION_LE_GENERAL: // <=
- or = new BooleanQuery();
- for (int i = 0; i < stringValues.length; i++) {
- Term lower = new Term(FieldNames.PROPERTIES, FieldNames
- .createNamedValue(field, ""));
- Term upper = new Term(FieldNames.PROPERTIES, FieldNames
- .createNamedValue(field, stringValues[i]));
- or.add(new RangeQuery(lower, upper, true, transform[0]),
- Occur.SHOULD);
- }
- query = or;
- if (node.getOperation() == QueryConstants.OPERATION_LE_VALUE) {
- query = createSingleValueConstraint(query, field);
- }
- break;
- case QueryConstants.OPERATION_LIKE: // LIKE
- // the like operation always has one string value.
- // no coercing, see above
- if (stringValues[0].equals("%")) {
- query = Util.createMatchAllQuery(field, indexFormatVersion);
- } else {
- query = new WildcardQuery(FieldNames.PROPERTIES, field,
- stringValues[0], transform[0]);
- }
- break;
- case QueryConstants.OPERATION_LT_VALUE: // <
- case QueryConstants.OPERATION_LT_GENERAL:
- or = new BooleanQuery();
- for (int i = 0; i < stringValues.length; i++) {
- Term lower = new Term(FieldNames.PROPERTIES, FieldNames
- .createNamedValue(field, ""));
- Term upper = new Term(FieldNames.PROPERTIES, FieldNames
- .createNamedValue(field, stringValues[i]));
- or.add(new RangeQuery(lower, upper, false, transform[0]),
- Occur.SHOULD);
- }
- query = or;
- if (node.getOperation() == QueryConstants.OPERATION_LT_VALUE) {
- query = createSingleValueConstraint(or, field);
- }
- break;
- case QueryConstants.OPERATION_NE_VALUE: // !=
- // match nodes with property 'field' that includes svp and mvp
- BooleanQuery notQuery = new BooleanQuery();
- notQuery.add(Util
- .createMatchAllQuery(field, indexFormatVersion),
- Occur.SHOULD);
- // exclude all nodes where 'field' has the term in question
- for (int i = 0; i < stringValues.length; i++) {
- Term t = new Term(FieldNames.PROPERTIES, FieldNames
- .createNamedValue(field, stringValues[i]));
- Query q;
- if (transform[0] == TransformConstants.TRANSFORM_UPPER_CASE) {
- q = new CaseTermQuery.Upper(t);
- } else if (transform[0] == TransformConstants.TRANSFORM_LOWER_CASE) {
- q = new CaseTermQuery.Lower(t);
- } else {
- q = new JcrTermQuery(t);
- }
- notQuery.add(q, Occur.MUST_NOT);
- }
- // and exclude all nodes where 'field' is multi valued
- notQuery.add(new JcrTermQuery(new Term(FieldNames.MVP,
- field)), Occur.MUST_NOT);
- query = notQuery;
- break;
- case QueryConstants.OPERATION_NE_GENERAL: // !=
- // that's:
- // all nodes with property 'field'
- // minus the nodes that have a single property 'field' that is
- // not equal to term in question
- // minus the nodes that have a multi-valued property 'field' and
- // all values are equal to term in question
- notQuery = new BooleanQuery();
- notQuery.add(Util
- .createMatchAllQuery(field, indexFormatVersion),
- Occur.SHOULD);
- for (int i = 0; i < stringValues.length; i++) {
- // exclude the nodes that have the term and are single
- // valued
- Term t = new Term(FieldNames.PROPERTIES, FieldNames
- .createNamedValue(field, stringValues[i]));
- Query svp = new NotQuery(new JcrTermQuery(new Term(
- FieldNames.MVP, field)));
- BooleanQuery and = new BooleanQuery();
- Query q;
- if (transform[0] == TransformConstants.TRANSFORM_UPPER_CASE) {
- q = new CaseTermQuery.Upper(t);
- } else if (transform[0] == TransformConstants.TRANSFORM_LOWER_CASE) {
- q = new CaseTermQuery.Lower(t);
- } else {
- q = new JcrTermQuery(t);
- }
- and.add(q, Occur.MUST);
- and.add(svp, Occur.MUST);
- notQuery.add(and, Occur.MUST_NOT);
- }
- // todo above also excludes multi-valued properties that contain
- // multiple instances of only stringValues. e.g. text={foo, foo}
- query = notQuery;
- break;
- case QueryConstants.OPERATION_NULL:
- query = new NotQuery(Util.createMatchAllQuery(field,
- indexFormatVersion));
- break;
- case QueryConstants.OPERATION_SIMILAR:
- String uuid = "x";
- try {
- // throw new UnsupportedOperationException();
- QPath path = resolver.parseJCRPath(node.getStringValue())
- .getInternalPath();
- NodeData parent = (NodeData) sharedItemMgr
- .getItemData(Constants.ROOT_UUID);
+ // support for fn:name()
+ InternalQName propName = relPath.getName();
+ if (propName.getNamespace().equals(NS_FN_URI) &&
propName.getName().equals("name()"))
+ {
+ if (node.getValueType() != QueryConstants.TYPE_STRING)
+ {
+ exceptions.add(new InvalidQueryException("Name function can "
+ + "only be used in conjunction with a string literal"));
+ return data;
+ }
+ if (node.getOperation() != QueryConstants.OPERATION_EQ_VALUE
+ && node.getOperation() != QueryConstants.OPERATION_EQ_GENERAL)
+ {
+ exceptions.add(new InvalidQueryException("Name function can "
+ + "only be used in conjunction with an equals operator"));
+ return data;
+ }
+ // check if string literal is a valid XML Name
+ if (XMLChar.isValidName(node.getStringValue()))
+ {
+ // parse string literal as JCR Name
+ try
+ {
+ InternalQName n =
+
session.getLocationFactory().parseJCRName(ISO9075.decode(node.getStringValue())).getInternalName();
+ query = new NameQuery(n, indexFormatVersion, nsMappings);
+ }
+ catch (RepositoryException e)
+ {
+ exceptions.add(e);
+ return data;
+ }
+ }
+ else
+ {
+ // will never match -> create dummy query
+ query = new BooleanQuery();
+ }
+ }
+ else
+ {
+ switch (node.getOperation())
+ {
+ case QueryConstants.OPERATION_EQ_VALUE : // =
+ case QueryConstants.OPERATION_EQ_GENERAL :
+ BooleanQuery or = new BooleanQuery();
+ for (int i = 0; i < stringValues.length; i++)
+ {
+ Term t = new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, stringValues[i]));
+ Query q;
+ if (transform[0] == TransformConstants.TRANSFORM_UPPER_CASE)
+ {
+ q = new CaseTermQuery.Upper(t);
+ }
+ else if (transform[0] == TransformConstants.TRANSFORM_LOWER_CASE)
+ {
+ q = new CaseTermQuery.Lower(t);
+ }
+ else
+ {
+ q = new JcrTermQuery(t);
+ }
+ or.add(q, Occur.SHOULD);
+ }
+ query = or;
+ if (node.getOperation() == QueryConstants.OPERATION_EQ_VALUE)
+ {
+ query = createSingleValueConstraint(or, field);
+ }
+ break;
+ case QueryConstants.OPERATION_GE_VALUE : // >=
+ case QueryConstants.OPERATION_GE_GENERAL :
+ or = new BooleanQuery();
+ for (int i = 0; i < stringValues.length; i++)
+ {
+ Term lower = new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, stringValues[i]));
+ Term upper = new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, "\uFFFF"));
+ or.add(new RangeQuery(lower, upper, true, transform[0]),
Occur.SHOULD);
+ }
+ query = or;
+ if (node.getOperation() == QueryConstants.OPERATION_GE_VALUE)
+ {
+ query = createSingleValueConstraint(or, field);
+ }
+ break;
+ case QueryConstants.OPERATION_GT_VALUE : // >
+ case QueryConstants.OPERATION_GT_GENERAL :
+ or = new BooleanQuery();
+ for (int i = 0; i < stringValues.length; i++)
+ {
+ Term lower = new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, stringValues[i]));
+ Term upper = new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, "\uFFFF"));
+ or.add(new RangeQuery(lower, upper, false, transform[0]),
Occur.SHOULD);
+ }
+ query = or;
+ if (node.getOperation() == QueryConstants.OPERATION_GT_VALUE)
+ {
+ query = createSingleValueConstraint(or, field);
+ }
+ break;
+ case QueryConstants.OPERATION_LE_VALUE : // <=
+ case QueryConstants.OPERATION_LE_GENERAL : // <=
+ or = new BooleanQuery();
+ for (int i = 0; i < stringValues.length; i++)
+ {
+ Term lower = new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, ""));
+ Term upper = new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, stringValues[i]));
+ or.add(new RangeQuery(lower, upper, true, transform[0]),
Occur.SHOULD);
+ }
+ query = or;
+ if (node.getOperation() == QueryConstants.OPERATION_LE_VALUE)
+ {
+ query = createSingleValueConstraint(query, field);
+ }
+ break;
+ case QueryConstants.OPERATION_LIKE : // LIKE
+ // the like operation always has one string value.
+ // no coercing, see above
+ if (stringValues[0].equals("%"))
+ {
+ query = Util.createMatchAllQuery(field, indexFormatVersion);
+ }
+ else
+ {
+ query = new WildcardQuery(FieldNames.PROPERTIES, field,
stringValues[0], transform[0]);
+ }
+ break;
+ case QueryConstants.OPERATION_LT_VALUE : // <
+ case QueryConstants.OPERATION_LT_GENERAL :
+ or = new BooleanQuery();
+ for (int i = 0; i < stringValues.length; i++)
+ {
+ Term lower = new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, ""));
+ Term upper = new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, stringValues[i]));
+ or.add(new RangeQuery(lower, upper, false, transform[0]),
Occur.SHOULD);
+ }
+ query = or;
+ if (node.getOperation() == QueryConstants.OPERATION_LT_VALUE)
+ {
+ query = createSingleValueConstraint(or, field);
+ }
+ break;
+ case QueryConstants.OPERATION_NE_VALUE : // !=
+ // match nodes with property 'field' that includes svp and mvp
+ BooleanQuery notQuery = new BooleanQuery();
+ notQuery.add(Util.createMatchAllQuery(field, indexFormatVersion),
Occur.SHOULD);
+ // exclude all nodes where 'field' has the term in question
+ for (int i = 0; i < stringValues.length; i++)
+ {
+ Term t = new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, stringValues[i]));
+ Query q;
+ if (transform[0] == TransformConstants.TRANSFORM_UPPER_CASE)
+ {
+ q = new CaseTermQuery.Upper(t);
+ }
+ else if (transform[0] == TransformConstants.TRANSFORM_LOWER_CASE)
+ {
+ q = new CaseTermQuery.Lower(t);
+ }
+ else
+ {
+ q = new JcrTermQuery(t);
+ }
+ notQuery.add(q, Occur.MUST_NOT);
+ }
+ // and exclude all nodes where 'field' is multi valued
+ notQuery.add(new JcrTermQuery(new Term(FieldNames.MVP, field)),
Occur.MUST_NOT);
+ query = notQuery;
+ break;
+ case QueryConstants.OPERATION_NE_GENERAL : // !=
+ // that's:
+ // all nodes with property 'field'
+ // minus the nodes that have a single property 'field' that is
+ // not equal to term in question
+ // minus the nodes that have a multi-valued property 'field' and
+ // all values are equal to term in question
+ notQuery = new BooleanQuery();
+ notQuery.add(Util.createMatchAllQuery(field, indexFormatVersion),
Occur.SHOULD);
+ for (int i = 0; i < stringValues.length; i++)
+ {
+ // exclude the nodes that have the term and are single
+ // valued
+ Term t = new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue(field, stringValues[i]));
+ Query svp = new NotQuery(new JcrTermQuery(new Term(FieldNames.MVP,
field)));
+ BooleanQuery and = new BooleanQuery();
+ Query q;
+ if (transform[0] == TransformConstants.TRANSFORM_UPPER_CASE)
+ {
+ q = new CaseTermQuery.Upper(t);
+ }
+ else if (transform[0] == TransformConstants.TRANSFORM_LOWER_CASE)
+ {
+ q = new CaseTermQuery.Lower(t);
+ }
+ else
+ {
+ q = new JcrTermQuery(t);
+ }
+ and.add(q, Occur.MUST);
+ and.add(svp, Occur.MUST);
+ notQuery.add(and, Occur.MUST_NOT);
+ }
+ // todo above also excludes multi-valued properties that contain
+ // multiple instances of only stringValues. e.g. text={foo, foo}
+ query = notQuery;
+ break;
+ case QueryConstants.OPERATION_NULL :
+ query = new NotQuery(Util.createMatchAllQuery(field,
indexFormatVersion));
+ break;
+ case QueryConstants.OPERATION_SIMILAR :
+ String uuid = "x";
+ try
+ {
+ // throw new UnsupportedOperationException();
+ QPath path =
resolver.parseJCRPath(node.getStringValue()).getInternalPath();
+ NodeData parent =
(NodeData)sharedItemMgr.getItemData(Constants.ROOT_UUID);
- if (path.equals(Constants.ROOT_PATH)) {
- uuid = Constants.ROOT_UUID;
- } else {
- QPathEntry[] relPathEntries = path.getRelPath(path
- .getDepth());
- ItemData item = parent;
- for (int i = 0; i < relPathEntries.length; i++) {
- item = sharedItemMgr.getItemData(parent,
- relPathEntries[i]);
+ if (path.equals(Constants.ROOT_PATH))
+ {
+ uuid = Constants.ROOT_UUID;
+ }
+ else
+ {
+ QPathEntry[] relPathEntries = path.getRelPath(path.getDepth());
+ ItemData item = parent;
+ for (int i = 0; i < relPathEntries.length; i++)
+ {
+ item = sharedItemMgr.getItemData(parent, relPathEntries[i]);
- if (item == null)
- break;
+ if (item == null)
+ break;
- if (item.isNode())
- parent = (NodeData) item;
- else if (i < relPathEntries.length - 1)
- throw new IllegalPathException(
- "Path can not contains a property as the intermediate element");
- }
- uuid = item.getIdentifier();
- }
+ if (item.isNode())
+ parent = (NodeData)item;
+ else if (i < relPathEntries.length - 1)
+ throw new IllegalPathException(
+ "Path can not contains a property as the intermediate
element");
+ }
+ uuid = item.getIdentifier();
+ }
- } catch (RepositoryException e) {
- exceptions.add(e);
- }
- query = new SimilarityQuery(uuid, analyzer);
- break;
- case QueryConstants.OPERATION_NOT_NULL:
- query = Util.createMatchAllQuery(field, indexFormatVersion);
- break;
- case QueryConstants.OPERATION_SPELLCHECK:
- query = Util.createMatchAllQuery(field, indexFormatVersion);
- break;
- default:
- throw new IllegalArgumentException(
- "Unknown relation operation: " + node.getOperation());
- }
- }
+ }
+ catch (RepositoryException e)
+ {
+ exceptions.add(e);
+ }
+ query = new SimilarityQuery(uuid, analyzer);
+ break;
+ case QueryConstants.OPERATION_NOT_NULL :
+ query = Util.createMatchAllQuery(field, indexFormatVersion);
+ break;
+ case QueryConstants.OPERATION_SPELLCHECK :
+ query = Util.createMatchAllQuery(field, indexFormatVersion);
+ break;
+ default :
+ throw new IllegalArgumentException("Unknown relation operation:
" + node.getOperation());
+ }
+ }
- if (relPath.getEntries().length > 1) {
- // child axis in relation
- QPathEntry[] elements = relPath.getEntries();
- // elements.length - 1 = property name
- // elements.length - 2 = last child axis name test
- for (int i = elements.length - 2; i >= 0; i--) {
- QPathEntry name = null;
- if (!elements[i].equals(RelationQueryNode.STAR_NAME_TEST)) {
- name = elements[i];
- }
- if (i == elements.length - 2) {
- // join name test with property query if there is one
- if (name != null) {
- Query nameTest = new NameQuery(name,
- indexFormatVersion, nsMappings);
- BooleanQuery and = new BooleanQuery();
- and.add(query, Occur.MUST);
- and.add(nameTest, Occur.MUST);
- query = and;
- } else {
- // otherwise the query can be used as is
- }
- } else {
- query = new ParentAxisQuery(query, name,
- indexFormatVersion, nsMappings);
- }
- }
- // finally select the parent of the selected nodes
- query = new ParentAxisQuery(query, null, indexFormatVersion,
- nsMappings);
- }
+ if (relPath.getEntries().length > 1)
+ {
+ // child axis in relation
+ QPathEntry[] elements = relPath.getEntries();
+ // elements.length - 1 = property name
+ // elements.length - 2 = last child axis name test
+ for (int i = elements.length - 2; i >= 0; i--)
+ {
+ QPathEntry name = null;
+ if (!elements[i].equals(RelationQueryNode.STAR_NAME_TEST))
+ {
+ name = elements[i];
+ }
+ if (i == elements.length - 2)
+ {
+ // join name test with property query if there is one
+ if (name != null)
+ {
+ Query nameTest = new NameQuery(name, indexFormatVersion, nsMappings);
+ BooleanQuery and = new BooleanQuery();
+ and.add(query, Occur.MUST);
+ and.add(nameTest, Occur.MUST);
+ query = and;
+ }
+ else
+ {
+ // otherwise the query can be used as is
+ }
+ }
+ else
+ {
+ query = new ParentAxisQuery(query, name, indexFormatVersion, nsMappings);
+ }
+ }
+ // finally select the parent of the selected nodes
+ query = new ParentAxisQuery(query, null, indexFormatVersion, nsMappings);
+ }
- return query;
- }
+ return query;
+ }
- public Object visit(OrderQueryNode node, Object data) {
- return data;
- }
+ public Object visit(OrderQueryNode node, Object data)
+ {
+ return data;
+ }
- public Object visit(PropertyFunctionQueryNode node, Object data) {
- return data;
- }
+ public Object visit(PropertyFunctionQueryNode node, Object data)
+ {
+ return data;
+ }
- // ---------------------------< internal
- // >-----------------------------------
+ // ---------------------------< internal
+ // >-----------------------------------
- /**
- * Wraps a constraint query around <code>q</code> that limits the nodes
to
- * those where <code>propName</code> is the name of a single value
property
- * on the node instance.
- *
- * @param q
- * the query to wrap.
- * @param propName
- * the name of a property that only has one value.
- * @return the wrapped query <code>q</code>.
- */
- private Query createSingleValueConstraint(Query q, String propName) {
- // get nodes with multi-values in propName
- Query mvp = new JcrTermQuery(new Term(FieldNames.MVP, propName));
- // now negate, that gives the nodes that have propName as single
- // values but also all others
- Query svp = new NotQuery(mvp);
- // now join the two, which will result in those nodes where propName
- // only contains a single value. This works because q already restricts
- // the result to those nodes that have a property propName
- BooleanQuery and = new BooleanQuery();
- and.add(q, Occur.MUST);
- and.add(svp, Occur.MUST);
- return and;
- }
+ /**
+ * Wraps a constraint query around <code>q</code> that limits the nodes
to
+ * those where <code>propName</code> is the name of a single value
property
+ * on the node instance.
+ *
+ * @param q
+ * the query to wrap.
+ * @param propName
+ * the name of a property that only has one value.
+ * @return the wrapped query <code>q</code>.
+ */
+ private Query createSingleValueConstraint(Query q, String propName)
+ {
+ // get nodes with multi-values in propName
+ Query mvp = new JcrTermQuery(new Term(FieldNames.MVP, propName));
+ // now negate, that gives the nodes that have propName as single
+ // values but also all others
+ Query svp = new NotQuery(mvp);
+ // now join the two, which will result in those nodes where propName
+ // only contains a single value. This works because q already restricts
+ // the result to those nodes that have a property propName
+ BooleanQuery and = new BooleanQuery();
+ and.add(q, Occur.MUST);
+ and.add(svp, Occur.MUST);
+ return and;
+ }
- /**
- * Returns an array of String values to be used as a term to lookup the
- * search index for a String <code>literal</code> of a certain property
- * name. This method will lookup the <code>propertyName</code> in the
node
- * type registry trying to find out the {@link javax.jcr.PropertyType}s. If
- * no property type is found looking up node type information, this method
- * will guess the property type.
- *
- * @param propertyName
- * the name of the property in the relation.
- * @param literal
- * the String literal in the relation.
- * @return the String values to use as term for the query.
- */
- private String[] getStringValues(InternalQName propertyName, String literal) {
- PropertyTypeRegistry.TypeMapping[] types = propRegistry
- .getPropertyTypes(propertyName);
- List<String> values = new ArrayList<String>();
- for (int i = 0; i < types.length; i++) {
- switch (types[i].type) {
- case PropertyType.NAME:
- // try to translate name
- try {
- InternalQName n = session.getLocationFactory()
- .parseJCRName(literal).getInternalName();
- values.add(nsMappings.translateName(n));
- log.debug("Coerced " + literal + " into NAME.");
- } catch (RepositoryException e) {
- log.warn("Unable to coerce '" + literal + "' into a NAME:
"
- + e.toString());
- } catch (IllegalNameException e) {
- log.warn("Unable to coerce '" + literal + "' into a NAME:
"
- + e.toString());
- }
- break;
- case PropertyType.PATH:
- // try to translate path
- try {
- QPath p = session.getLocationFactory()
- .parseJCRPath(literal).getInternalPath();
- values.add(resolver.createJCRPath(p).getAsString(true));
- log.debug("Coerced " + literal + " into PATH.");
- } catch (RepositoryException e) {
- log.warn("Unable to coerce '" + literal + "' into a PATH:
"
- + e.toString());
- }
- break;
- case PropertyType.DATE:
- // try to parse date
- Calendar c = ISO8601.parse(literal);
- if (c != null) {
- values.add(DateField.timeToString(c.getTimeInMillis()));
- log.debug("Coerced " + literal + " into DATE.");
- } else {
- log.warn("Unable to coerce '" + literal + "' into a
DATE.");
- }
- break;
- case PropertyType.DOUBLE:
- // try to parse double
- try {
- double d = Double.parseDouble(literal);
- values.add(DoubleField.doubleToString(d));
- log.debug("Coerced " + literal + " into DOUBLE.");
- } catch (NumberFormatException e) {
- log.warn("Unable to coerce '" + literal
- + "' into a DOUBLE: " + e.toString());
- }
- break;
- case PropertyType.LONG:
- // try to parse long
- try {
- long l = Long.parseLong(literal);
- values.add(LongField.longToString(l));
- log.debug("Coerced " + literal + " into LONG.");
- } catch (NumberFormatException e) {
- log.warn("Unable to coerce '" + literal + "' into a LONG:
"
- + e.toString());
- }
- break;
- case PropertyType.STRING:
- values.add(literal);
- log.debug("Using literal " + literal + " as is.");
- break;
- }
- }
- if (values.size() == 0) {
- // use literal as is then try to guess other types
- values.add(literal);
+ /**
+ * Returns an array of String values to be used as a term to lookup the
+ * search index for a String <code>literal</code> of a certain property
+ * name. This method will lookup the <code>propertyName</code> in the
node
+ * type registry trying to find out the {@link javax.jcr.PropertyType}s. If
+ * no property type is found looking up node type information, this method
+ * will guess the property type.
+ *
+ * @param propertyName
+ * the name of the property in the relation.
+ * @param literal
+ * the String literal in the relation.
+ * @return the String values to use as term for the query.
+ */
+ private String[] getStringValues(InternalQName propertyName, String literal)
+ {
+ PropertyTypeRegistry.TypeMapping[] types =
propRegistry.getPropertyTypes(propertyName);
+ List<String> values = new ArrayList<String>();
+ for (int i = 0; i < types.length; i++)
+ {
+ switch (types[i].type)
+ {
+ case PropertyType.NAME :
+ // try to translate name
+ try
+ {
+ InternalQName n =
session.getLocationFactory().parseJCRName(literal).getInternalName();
+ values.add(nsMappings.translateName(n));
+ log.debug("Coerced " + literal + " into NAME.");
+ }
+ catch (RepositoryException e)
+ {
+ log.warn("Unable to coerce '" + literal + "'
into a NAME: " + e.toString());
+ }
+ catch (IllegalNameException e)
+ {
+ log.warn("Unable to coerce '" + literal + "'
into a NAME: " + e.toString());
+ }
+ break;
+ case PropertyType.PATH :
+ // try to translate path
+ try
+ {
+ QPath p =
session.getLocationFactory().parseJCRPath(literal).getInternalPath();
+ values.add(resolver.createJCRPath(p).getAsString(true));
+ log.debug("Coerced " + literal + " into PATH.");
+ }
+ catch (RepositoryException e)
+ {
+ log.warn("Unable to coerce '" + literal + "'
into a PATH: " + e.toString());
+ }
+ break;
+ case PropertyType.DATE :
+ // try to parse date
+ Calendar c = ISO8601.parse(literal);
+ if (c != null)
+ {
+ values.add(DateField.timeToString(c.getTimeInMillis()));
+ log.debug("Coerced " + literal + " into DATE.");
+ }
+ else
+ {
+ log.warn("Unable to coerce '" + literal + "'
into a DATE.");
+ }
+ break;
+ case PropertyType.DOUBLE :
+ // try to parse double
+ try
+ {
+ double d = Double.parseDouble(literal);
+ values.add(DoubleField.doubleToString(d));
+ log.debug("Coerced " + literal + " into DOUBLE.");
+ }
+ catch (NumberFormatException e)
+ {
+ log.warn("Unable to coerce '" + literal + "'
into a DOUBLE: " + e.toString());
+ }
+ break;
+ case PropertyType.LONG :
+ // try to parse long
+ try
+ {
+ long l = Long.parseLong(literal);
+ values.add(LongField.longToString(l));
+ log.debug("Coerced " + literal + " into LONG.");
+ }
+ catch (NumberFormatException e)
+ {
+ log.warn("Unable to coerce '" + literal + "'
into a LONG: " + e.toString());
+ }
+ break;
+ case PropertyType.STRING :
+ values.add(literal);
+ log.debug("Using literal " + literal + " as is.");
+ break;
+ }
+ }
+ if (values.size() == 0)
+ {
+ // use literal as is then try to guess other types
+ values.add(literal);
- // try to guess property type
- if (literal.indexOf('/') > -1) {
- // might be a path
- try {
- QPath p = session.getLocationFactory()
- .parseJCRPath(literal).getInternalPath();
- values.add(resolver.createJCRPath(p).getAsString(true));
- log.debug("Coerced " + literal + " into PATH.");
- } catch (Exception e) {
- // not a path
- }
- }
- if (XMLChar.isValidName(literal)) {
- // might be a name
- try {
- InternalQName n = session.getLocationFactory()
- .parseJCRName(literal).getInternalName();
- values.add(nsMappings.translateName(n));
- log.debug("Coerced " + literal + " into NAME.");
- } catch (Exception e) {
- // not a name
- }
- }
- if (literal.indexOf(':') > -1) {
- // is it a date?
- Calendar c = ISO8601.parse(literal);
- if (c != null) {
- values.add(DateField.timeToString(c.getTimeInMillis()));
- log.debug("Coerced " + literal + " into DATE.");
- }
- } else {
- // long or double are possible at this point
- try {
- values.add(LongField.longToString(Long.parseLong(literal)));
- log.debug("Coerced " + literal + " into LONG.");
- } catch (NumberFormatException e) {
- // not a long
- // try double
- try {
- values.add(DoubleField.doubleToString(Double
- .parseDouble(literal)));
- log.debug("Coerced " + literal + " into DOUBLE.");
- } catch (NumberFormatException e1) {
- // not a double
- }
- }
- }
- }
- // if still no values use literal as is
- if (values.size() == 0) {
- values.add(literal);
- log.debug("Using literal " + literal + " as is.");
- }
- return (String[]) values.toArray(new String[values.size()]);
- }
+ // try to guess property type
+ if (literal.indexOf('/') > -1)
+ {
+ // might be a path
+ try
+ {
+ QPath p =
session.getLocationFactory().parseJCRPath(literal).getInternalPath();
+ values.add(resolver.createJCRPath(p).getAsString(true));
+ log.debug("Coerced " + literal + " into PATH.");
+ }
+ catch (Exception e)
+ {
+ // not a path
+ }
+ }
+ if (XMLChar.isValidName(literal))
+ {
+ // might be a name
+ try
+ {
+ InternalQName n =
session.getLocationFactory().parseJCRName(literal).getInternalName();
+ values.add(nsMappings.translateName(n));
+ log.debug("Coerced " + literal + " into NAME.");
+ }
+ catch (Exception e)
+ {
+ // not a name
+ }
+ }
+ if (literal.indexOf(':') > -1)
+ {
+ // is it a date?
+ Calendar c = ISO8601.parse(literal);
+ if (c != null)
+ {
+ values.add(DateField.timeToString(c.getTimeInMillis()));
+ log.debug("Coerced " + literal + " into DATE.");
+ }
+ }
+ else
+ {
+ // long or double are possible at this point
+ try
+ {
+ values.add(LongField.longToString(Long.parseLong(literal)));
+ log.debug("Coerced " + literal + " into LONG.");
+ }
+ catch (NumberFormatException e)
+ {
+ // not a long
+ // try double
+ try
+ {
+ values.add(DoubleField.doubleToString(Double.parseDouble(literal)));
+ log.debug("Coerced " + literal + " into DOUBLE.");
+ }
+ catch (NumberFormatException e1)
+ {
+ // not a double
+ }
+ }
+ }
+ }
+ // if still no values use literal as is
+ if (values.size() == 0)
+ {
+ values.add(literal);
+ log.debug("Using literal " + literal + " as is.");
+ }
+ return (String[])values.toArray(new String[values.size()]);
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -16,20 +16,6 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.Reader;
-import java.io.StringReader;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.List;
-
-import javax.jcr.NamespaceException;
-import javax.jcr.PropertyType;
-import javax.jcr.RepositoryException;
-
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
@@ -48,919 +34,975 @@
import org.exoplatform.services.jcr.impl.core.LocationFactory;
import org.exoplatform.services.jcr.impl.core.value.ValueFactoryImpl;
import org.exoplatform.services.jcr.impl.dataflow.AbstractValueData;
-
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.List;
+
+import javax.jcr.NamespaceException;
+import javax.jcr.PropertyType;
+import javax.jcr.RepositoryException;
+
/**
* Creates a lucene <code>Document</code> object from a {@link
javax.jcr.Node}.
*/
-public class NodeIndexer {
+public class NodeIndexer
+{
- /**
- * The logger instance for this class.
- */
- private static final Logger log = LoggerFactory.getLogger(NodeIndexer.class);
+ /**
+ * The logger instance for this class.
+ */
+ private static final Logger log = LoggerFactory.getLogger(NodeIndexer.class);
- /**
- * The default boost for a lucene field: 1.0f.
- */
- protected static final float DEFAULT_BOOST = 1.0f;
+ /**
+ * The default boost for a lucene field: 1.0f.
+ */
+ protected static final float DEFAULT_BOOST = 1.0f;
- /**
- * The <code>NodeState</code> of the node to index
- */
- protected final NodeData node;
+ /**
+ * The <code>NodeState</code> of the node to index
+ */
+ protected final NodeData node;
- /**
- * The persistent item state provider
- */
- protected final ItemDataConsumer stateProvider;
+ /**
+ * The persistent item state provider
+ */
+ protected final ItemDataConsumer stateProvider;
- /**
- * Namespace mappings to use for indexing. This is the internal
- * namespace mapping.
- */
- protected final NamespaceMappings mappings;
+ /**
+ * Namespace mappings to use for indexing. This is the internal
+ * namespace mapping.
+ */
+ protected final NamespaceMappings mappings;
- /**
- * Name and Path resolver.
- */
- protected final LocationFactory resolver;
+ /**
+ * Name and Path resolver.
+ */
+ protected final LocationFactory resolver;
- /**
- * Content extractor.
- */
- protected final DocumentReaderService extractor;
+ /**
+ * Content extractor.
+ */
+ protected final DocumentReaderService extractor;
- /**
- * The indexing configuration or <code>null</code> if none is available.
- */
- protected IndexingConfiguration indexingConfig;
+ /**
+ * The indexing configuration or <code>null</code> if none is available.
+ */
+ protected IndexingConfiguration indexingConfig;
- /**
- * If set to <code>true</code> the fulltext field is stored and and a
term
- * vector is created with offset information.
- */
- protected boolean supportHighlighting = false;
+ /**
+ * If set to <code>true</code> the fulltext field is stored and and a
term
+ * vector is created with offset information.
+ */
+ protected boolean supportHighlighting = false;
- /**
- * Indicates index format for this node indexer.
- */
- protected IndexFormatVersion indexFormatVersion = IndexFormatVersion.V1;
+ /**
+ * Indicates index format for this node indexer.
+ */
+ protected IndexFormatVersion indexFormatVersion = IndexFormatVersion.V1;
- /**
- * List of {@link FieldNames#FULLTEXT} fields which should not be used in
- * an excerpt.
- */
- protected List doNotUseInExcerpt = new ArrayList();
-
- private ValueFactoryImpl vFactory;
+ /**
+ * List of {@link FieldNames#FULLTEXT} fields which should not be used in
+ * an excerpt.
+ */
+ protected List doNotUseInExcerpt = new ArrayList();
- /**
- * Creates a new node indexer.
- *
- * @param node the node state to index.
- * @param stateProvider the persistent item state manager to retrieve properties.
- * @param mappings internal namespace mappings.
- * @param extractor content extractor
- */
- public NodeIndexer(NodeData node, ItemDataConsumer stateProvider, NamespaceMappings
mappings,
- DocumentReaderService extractor)
- {
- this.node = node;
- this.stateProvider = stateProvider;
- this.mappings = mappings;
- this.resolver = new LocationFactory(mappings);
- this.extractor = extractor;
- this.vFactory = new ValueFactoryImpl(this.resolver);
- }
+ private ValueFactoryImpl vFactory;
- /**
- * Returns the <code>NodeId</code> of the indexed node.
- * @return the <code>NodeId</code> of the indexed node.
- */
- public String getNodeId()
- {
- return node.getIdentifier();
- }
+ /**
+ * Creates a new node indexer.
+ *
+ * @param node the node state to index.
+ * @param stateProvider the persistent item state manager to retrieve properties.
+ * @param mappings internal namespace mappings.
+ * @param extractor content extractor
+ */
+ public NodeIndexer(NodeData node, ItemDataConsumer stateProvider, NamespaceMappings
mappings,
+ DocumentReaderService extractor)
+ {
+ this.node = node;
+ this.stateProvider = stateProvider;
+ this.mappings = mappings;
+ this.resolver = new LocationFactory(mappings);
+ this.extractor = extractor;
+ this.vFactory = new ValueFactoryImpl(this.resolver);
+ }
- /**
- * If set to <code>true</code> additional information is stored in the
index
- * to support highlighting using the rep:excerpt pseudo property.
- *
- * @param b <code>true</code> to enable highlighting support.
- */
- public void setSupportHighlighting(boolean b) {
- supportHighlighting = b;
- }
+ /**
+ * Returns the <code>NodeId</code> of the indexed node.
+ * @return the <code>NodeId</code> of the indexed node.
+ */
+ public String getNodeId()
+ {
+ return node.getIdentifier();
+ }
- /**
- * Sets the index format version
- *
- * @param indexFormatVersion the index format version
- */
- public void setIndexFormatVersion(IndexFormatVersion indexFormatVersion) {
- this.indexFormatVersion = indexFormatVersion;
- }
+ /**
+ * If set to <code>true</code> additional information is stored in the
index
+ * to support highlighting using the rep:excerpt pseudo property.
+ *
+ * @param b <code>true</code> to enable highlighting support.
+ */
+ public void setSupportHighlighting(boolean b)
+ {
+ supportHighlighting = b;
+ }
- /**
- * Sets the indexing configuration for this node indexer.
- *
- * @param config the indexing configuration.
- */
- public void setIndexingConfiguration(IndexingConfiguration config) {
- this.indexingConfig = config;
- }
+ /**
+ * Sets the index format version
+ *
+ * @param indexFormatVersion the index format version
+ */
+ public void setIndexFormatVersion(IndexFormatVersion indexFormatVersion)
+ {
+ this.indexFormatVersion = indexFormatVersion;
+ }
- /**
- * Creates a lucene Document.
- *
- * @return the lucene Document with the index layout.
- * @throws RepositoryException if an error occurs while reading property
- * values from the
<code>ItemStateProvider</code>.
- */
- protected Document createDoc() throws RepositoryException {
- doNotUseInExcerpt.clear();
- Document doc = new Document();
+ /**
+ * Sets the indexing configuration for this node indexer.
+ *
+ * @param config the indexing configuration.
+ */
+ public void setIndexingConfiguration(IndexingConfiguration config)
+ {
+ this.indexingConfig = config;
+ }
- doc.setBoost(getNodeBoost());
+ /**
+ * Creates a lucene Document.
+ *
+ * @return the lucene Document with the index layout.
+ * @throws RepositoryException if an error occurs while reading property
+ * values from the
<code>ItemStateProvider</code>.
+ */
+ protected Document createDoc() throws RepositoryException
+ {
+ doNotUseInExcerpt.clear();
+ Document doc = new Document();
- // special fields
- // UUID
- doc.add(new Field(
- FieldNames.UUID, node.getIdentifier(),
- Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
- try {
-
- if (node.getParentIdentifier() == null ) {
- // root node
- doc.add(new Field(FieldNames.PARENT, "", Field.Store.YES,
- Field.Index.NOT_ANALYZED_NO_NORMS));
- addNodeName(doc, "", "");
- } else {
- addParentChildRelation(doc, node.getParentIdentifier());
-// } else {
- // shareable node
-// for (Iterator it = node.getSharedSet().iterator(); it.hasNext(); ) {
-// addParentChildRelation(doc, (NodeId) it.next());
-// }
- // mark shareable nodes
-// doc.add(new Field(FieldNames.SHAREABLE_NODE, "",
-// Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
- }
- } catch (NamespaceException e) {
- // will never happen, because this.mappings will dynamically add
- // unknown uri<->prefix mappings
- }
+ doc.setBoost(getNodeBoost());
- for (PropertyData prop : stateProvider.listChildPropertiesData(node))
- {
-
- // add each property to the _PROPERTIES_SET for searching
- // beginning with V2
- if (indexFormatVersion.getVersion()
- >= IndexFormatVersion.V2.getVersion()) {
- addPropertyName(doc, prop.getQPath().getName());
- }
+ // special fields
+ // UUID
+ doc.add(new Field(FieldNames.UUID, node.getIdentifier(), Field.Store.YES,
Field.Index.NOT_ANALYZED_NO_NORMS));
+ try
+ {
- addValues(doc, prop);
+ if (node.getParentIdentifier() == null)
+ {
+ // root node
+ doc.add(new Field(FieldNames.PARENT, "", Field.Store.YES,
Field.Index.NOT_ANALYZED_NO_NORMS));
+ addNodeName(doc, "", "");
+ }
+ else
+ {
+ addParentChildRelation(doc, node.getParentIdentifier());
+ // } else {
+ // shareable node
+ // for (Iterator it = node.getSharedSet().iterator();
it.hasNext(); ) {
+ // addParentChildRelation(doc, (NodeId) it.next());
+ // }
+ // mark shareable nodes
+ // doc.add(new Field(FieldNames.SHAREABLE_NODE, "",
+ // Field.Store.NO,
Field.Index.NOT_ANALYZED_NO_NORMS));
+ }
+ }
+ catch (NamespaceException e)
+ {
+ // will never happen, because this.mappings will dynamically add
+ // unknown uri<->prefix mappings
+ }
- }
+ for (PropertyData prop : stateProvider.listChildPropertiesData(node))
+ {
- // now add fields that are not used in excerpt (must go at the end)
- for (Iterator it = doNotUseInExcerpt.iterator(); it.hasNext(); ) {
- doc.add((Fieldable) it.next());
- }
- return doc;
- }
-
- /**
- * Wraps the exception <code>e</code> into a
<code>RepositoryException</code>
- * and throws the created exception.
- *
- * @param e the base exception.
- */
- private void throwRepositoryException(Exception e)
- throws RepositoryException {
- String msg = "Error while indexing node: " + node.getIdentifier() +
" of "
- + "type: " + node.getPrimaryTypeName().getAsString();
- throw new RepositoryException(msg, e);
- }
+ // add each property to the _PROPERTIES_SET for searching
+ // beginning with V2
+ if (indexFormatVersion.getVersion() >= IndexFormatVersion.V2.getVersion())
+ {
+ addPropertyName(doc, prop.getQPath().getName());
+ }
- /**
- * Adds a {@link FieldNames#MVP} field to <code>doc</code> with the
resolved
- * <code>name</code> using the internal search index namespace mapping.
- *
- * @param doc the lucene document.
- * @param name the name of the multi-value property.
- * @throws RepositoryException
- */
- private void addMVPName(Document doc, InternalQName name) throws RepositoryException
{
- try {
- String propName = resolver.createJCRName(name).getAsString();
- doc.add(new Field(FieldNames.MVP, propName, Field.Store.NO,
Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
- } catch (NamespaceException e) {
- // will never happen, prefixes are created dynamically
- }
- }
+ addValues(doc, prop);
- /**
- * Adds a value to the lucene Document.
- *
- * @param doc the document.
- * @param value the internal value.
- * @param name the name of the property.
- */
- private void addValues(final Document doc, final PropertyData prop) throws
RepositoryException
- {
- int propType = prop.getType();
- String fieldName = resolver.createJCRName(prop.getQPath().getName()).getAsString();
- if (propType == PropertyType.BINARY)
- {
+ }
- List<ValueData> data = null;
- if (node.getQPath().getName().equals(Constants.JCR_CONTENT))
- {
+ // now add fields that are not used in excerpt (must go at the end)
+ for (Iterator it = doNotUseInExcerpt.iterator(); it.hasNext();)
+ {
+ doc.add((Fieldable)it.next());
+ }
+ return doc;
+ }
- // seems nt:file found, try for nt:resource props
- PropertyData pmime =
- (PropertyData)stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_MIMETYPE, 0));
- if (pmime != null)
- {
- // index if have jcr:mimeType sibling for this binary property only
- try
- {
- DocumentReader dreader =
- extractor.getDocumentReader(new
String(pmime.getValues().get(0).getAsByteArray()));
+ /**
+ * Wraps the exception <code>e</code> into a
<code>RepositoryException</code>
+ * and throws the created exception.
+ *
+ * @param e the base exception.
+ */
+ private void throwRepositoryException(Exception e) throws RepositoryException
+ {
+ String msg =
+ "Error while indexing node: " + node.getIdentifier() + " of
" + "type: "
+ + node.getPrimaryTypeName().getAsString();
+ throw new RepositoryException(msg, e);
+ }
- // ok, have a reader
- // if the prop obtainer from cache it will contains a values,
- // otherwise read prop with values from DM
- data =
- prop.getValues().size() > 0 ? prop.getValues() :
((PropertyData)stateProvider.getItemData(node,
- new QPathEntry(Constants.JCR_DATA, 0))).getValues();
- if (data == null)
- log.warn("null value found at property " +
prop.getQPath().getAsString());
+ /**
+ * Adds a {@link FieldNames#MVP} field to <code>doc</code> with the
resolved
+ * <code>name</code> using the internal search index namespace mapping.
+ *
+ * @param doc the lucene document.
+ * @param name the name of the multi-value property.
+ * @throws RepositoryException
+ */
+ private void addMVPName(Document doc, InternalQName name) throws RepositoryException
+ {
+ try
+ {
+ String propName = resolver.createJCRName(name).getAsString();
+ doc.add(new Field(FieldNames.MVP, propName, Field.Store.NO,
Field.Index.NOT_ANALYZED_NO_NORMS,
+ Field.TermVector.NO));
+ }
+ catch (NamespaceException e)
+ {
+ // will never happen, prefixes are created dynamically
+ }
+ }
- // check the jcr:encoding property
- PropertyData encProp =
- (PropertyData)stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_ENCODING, 0));
+ /**
+ * Adds a value to the lucene Document.
+ *
+ * @param doc the document.
+ * @param value the internal value.
+ * @param name the name of the property.
+ */
+ private void addValues(final Document doc, final PropertyData prop) throws
RepositoryException
+ {
+ int propType = prop.getType();
+ String fieldName =
resolver.createJCRName(prop.getQPath().getName()).getAsString();
+ if (propType == PropertyType.BINARY)
+ {
- if (encProp != null)
- {
- // encoding parameter used
- String encoding = new
String(encProp.getValues().get(0).getAsByteArray());
- for (ValueData pvd : data)
- {
- InputStream is = null;
- try
- {
-
- is = pvd.getAsStream();
- Reader reader = new StringReader(dreader.getContentAsText(is,
encoding));
- doc.add(createFulltextField(reader));
-
- }
- finally
- {
- try
- {
- is.close();
- }
- catch (Throwable e)
- {
- }
- }
- }
- }
- else
- {
- // no encoding parameter
- for (ValueData pvd : data)
- {
- InputStream is = null;
- try
- {
- doc.add(createFulltextField(dreader.getContentAsText(is =
pvd.getAsStream())));
- }
- finally
- {
- try
- {
- is.close();
- }
- catch (Throwable e)
- {
- }
- }
- }
- }
+ List<ValueData> data = null;
+ if (node.getQPath().getName().equals(Constants.JCR_CONTENT))
+ {
- if (data.size() > 1)
- {
- // real multi-valued
- addMVPName(doc, prop.getQPath().getName());
- }
+ // seems nt:file found, try for nt:resource props
+ PropertyData pmime =
+ (PropertyData)stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_MIMETYPE, 0));
+ if (pmime != null)
+ {
+ // index if have jcr:mimeType sibling for this binary property only
+ try
+ {
+ DocumentReader dreader =
+ extractor.getDocumentReader(new
String(pmime.getValues().get(0).getAsByteArray()));
- }
- catch (HandlerNotFoundException e)
- {
- // no handler - no index
- if (log.isDebugEnabled())
- log.warn("This content is not readable " + e);
- }
- catch (IOException e)
- {
- // no data - no index
- if (log.isDebugEnabled())
- log.warn("Binary value indexer IO error " + e, e);
- }
- catch (Exception e)
- {
- log.error("Binary value indexer error " + e, e);
- }
- }
- }
+ // ok, have a reader
+ // if the prop obtainer from cache it will contains a values,
+ // otherwise read prop with values from DM
+ data =
+ prop.getValues().size() > 0 ? prop.getValues() :
((PropertyData)stateProvider.getItemData(node,
+ new QPathEntry(Constants.JCR_DATA, 0))).getValues();
+ if (data == null)
+ log.warn("null value found at property " +
prop.getQPath().getAsString());
- }
- else
- {
- try
- {
- // if the prop obtainer from cache it will contains a values, otherwise
- // read prop with values from DM
- // WARN. DON'T USE access item BY PATH - it's may be a node in case of
- // residual definitions in NT
- List<ValueData> data =
- prop.getValues().size() > 0 ? prop.getValues() :
((PropertyData)stateProvider.getItemData(prop
- .getIdentifier())).getValues();
+ // check the jcr:encoding property
+ PropertyData encProp =
+ (PropertyData)stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_ENCODING, 0));
- if (data == null)
- log.warn("null value found at property " +
prop.getQPath().getAsString());
+ if (encProp != null)
+ {
+ // encoding parameter used
+ String encoding = new
String(encProp.getValues().get(0).getAsByteArray());
+ for (ValueData pvd : data)
+ {
+ InputStream is = null;
+ try
+ {
- ExtendedValue val = null;
- InternalQName name = prop.getQPath().getName();
+ is = pvd.getAsStream();
+ Reader reader = new StringReader(dreader.getContentAsText(is,
encoding));
+ doc.add(createFulltextField(reader));
- for (ValueData value : data)
- {
- val =
(ExtendedValue)vFactory.loadValue(((AbstractValueData)value).createTransientCopy(),
propType);
+ }
+ finally
+ {
+ try
+ {
+ is.close();
+ }
+ catch (Throwable e)
+ {
+ }
+ }
+ }
+ }
+ else
+ {
+ // no encoding parameter
+ for (ValueData pvd : data)
+ {
+ InputStream is = null;
+ try
+ {
+ doc.add(createFulltextField(dreader.getContentAsText(is =
pvd.getAsStream())));
+ }
+ finally
+ {
+ try
+ {
+ is.close();
+ }
+ catch (Throwable e)
+ {
+ }
+ }
+ }
+ }
- switch (propType)
- {
- case PropertyType.BOOLEAN :
- if (isIndexed(name))
- {
- addBooleanValue(doc, fieldName,
Boolean.valueOf(val.getBoolean()));
- }
- break;
- case PropertyType.DATE :
- if (isIndexed(name))
- {
- addCalendarValue(doc, fieldName, val.getDate());
- }
- break;
- case PropertyType.DOUBLE :
- if (isIndexed(name))
- {
- addDoubleValue(doc, fieldName, new Double(val.getDouble()));
- }
- break;
- case PropertyType.LONG :
- if (isIndexed(name))
- {
- addLongValue(doc, fieldName, new Long(val.getLong()));
- }
- break;
- case PropertyType.REFERENCE :
- if (isIndexed(name))
- {
- addReferenceValue(doc, fieldName, val.getString());
- }
- break;
- case PropertyType.PATH :
- if (isIndexed(name))
- {
- addPathValue(doc, fieldName, val.getString());
- }
- break;
- case PropertyType.STRING :
- if (isIndexed(name))
- {
- // never fulltext index jcr:uuid String
- if (name.equals(Constants.JCR_UUID))
- {
- addStringValue(doc, fieldName, val.getString(), false, false,
DEFAULT_BOOST);
- }
- else
- {
- addStringValue(doc, fieldName, val.getString(), true,
isIncludedInNodeIndex(name),
- getPropertyBoost(name),useInExcerpt(name));
- }
- }
- break;
- case PropertyType.NAME :
- // jcr:primaryType and jcr:mixinTypes are required for correct
- // node type resolution in queries
- if (isIndexed(name) || name.equals(Constants.JCR_PRIMARYTYPE)
- || name.equals(Constants.JCR_MIXINTYPES))
- {
- addNameValue(doc, fieldName, val.getString());
- }
- break;
- case ExtendedPropertyType.PERMISSION :
- break;
- default :
- throw new IllegalArgumentException("illegal internal value type
" + propType);
- }
- // add length
- // add not planed
- if (indexFormatVersion.getVersion() >=
IndexFormatVersion.V3.getVersion())
- {
- addLength(doc, fieldName, value, propType);
- }
- }
- if (data.size() > 1)
- // real multi-valued
- addMVPName(doc, prop.getQPath().getName());
- }
- catch (RepositoryException e)
- {
- e.printStackTrace();
- throw new RepositoryException("Index of property value error. " +
prop.getQPath().getAsString() + ". " + e,
- e);
- }
- }
- }
+ if (data.size() > 1)
+ {
+ // real multi-valued
+ addMVPName(doc, prop.getQPath().getName());
+ }
- /**
- * Adds the property name to the lucene _:PROPERTIES_SET field.
- *
- * @param doc the document.
- * @param name the name of the property.
- * @throws RepositoryException
- */
- private void addPropertyName(Document doc, InternalQName name) throws
RepositoryException {
- String fieldName = name.getName();
- try {
- fieldName = resolver.createJCRName(name).getAsString();
- } catch (NamespaceException e) {
- // will never happen
- }
- doc.add(new Field(FieldNames.PROPERTIES_SET, fieldName, Field.Store.NO,
Field.Index.NOT_ANALYZED_NO_NORMS));
- }
+ }
+ catch (HandlerNotFoundException e)
+ {
+ // no handler - no index
+ if (log.isDebugEnabled())
+ log.warn("This content is not readable " + e);
+ }
+ catch (IOException e)
+ {
+ // no data - no index
+ if (log.isDebugEnabled())
+ log.warn("Binary value indexer IO error " + e, e);
+ }
+ catch (Exception e)
+ {
+ log.error("Binary value indexer error " + e, e);
+ }
+ }
+ }
+ }
+ else
+ {
+ try
+ {
+ // if the prop obtainer from cache it will contains a values, otherwise
+ // read prop with values from DM
+ // WARN. DON'T USE access item BY PATH - it's may be a node in case
of
+ // residual definitions in NT
+ List<ValueData> data =
+ prop.getValues().size() > 0 ? prop.getValues() :
((PropertyData)stateProvider.getItemData(prop
+ .getIdentifier())).getValues();
+ if (data == null)
+ log.warn("null value found at property " +
prop.getQPath().getAsString());
+ ExtendedValue val = null;
+ InternalQName name = prop.getQPath().getName();
- /**
- * Adds the string representation of the boolean value to the document as
- * the named field.
- *
- * @param doc The document to which to add the field
- * @param fieldName The name of the field to add
- * @param internalValue The value for the field to add to the document.
- */
- protected void addBooleanValue(Document doc, String fieldName, Object internalValue)
{
- doc.add(createFieldWithoutNorms(fieldName, internalValue.toString(),
- PropertyType.BOOLEAN));
- }
+ for (ValueData value : data)
+ {
+ val =
(ExtendedValue)vFactory.loadValue(((AbstractValueData)value).createTransientCopy(),
propType);
- /**
- * Creates a field of name <code>fieldName</code> with the value of
<code>
- * internalValue</code>. The created field is indexed without norms.
- *
- * @param fieldName The name of the field to add
- * @param internalValue The value for the field to add to the document.
- * @param propertyType the property type.
- */
- protected Field createFieldWithoutNorms(String fieldName,
- String internalValue,
- int propertyType) {
- if (indexFormatVersion.getVersion()
- >= IndexFormatVersion.V3.getVersion()) {
- Field field = new Field(FieldNames.PROPERTIES,
- new SingletonTokenStream(
- FieldNames.createNamedValue(fieldName, internalValue),
- propertyType)
- );
- field.setOmitNorms(true);
- return field;
- } else {
- return new Field(FieldNames.PROPERTIES,
- FieldNames.createNamedValue(fieldName, internalValue),
- Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS,
- Field.TermVector.NO);
- }
- }
+ switch (propType)
+ {
+ case PropertyType.BOOLEAN :
+ if (isIndexed(name))
+ {
+ addBooleanValue(doc, fieldName,
Boolean.valueOf(val.getBoolean()));
+ }
+ break;
+ case PropertyType.DATE :
+ if (isIndexed(name))
+ {
+ addCalendarValue(doc, fieldName, val.getDate());
+ }
+ break;
+ case PropertyType.DOUBLE :
+ if (isIndexed(name))
+ {
+ addDoubleValue(doc, fieldName, new Double(val.getDouble()));
+ }
+ break;
+ case PropertyType.LONG :
+ if (isIndexed(name))
+ {
+ addLongValue(doc, fieldName, new Long(val.getLong()));
+ }
+ break;
+ case PropertyType.REFERENCE :
+ if (isIndexed(name))
+ {
+ addReferenceValue(doc, fieldName, val.getString());
+ }
+ break;
+ case PropertyType.PATH :
+ if (isIndexed(name))
+ {
+ addPathValue(doc, fieldName, val.getString());
+ }
+ break;
+ case PropertyType.STRING :
+ if (isIndexed(name))
+ {
+ // never fulltext index jcr:uuid String
+ if (name.equals(Constants.JCR_UUID))
+ {
+ addStringValue(doc, fieldName, val.getString(), false, false,
DEFAULT_BOOST);
+ }
+ else
+ {
+ addStringValue(doc, fieldName, val.getString(), true,
isIncludedInNodeIndex(name),
+ getPropertyBoost(name), useInExcerpt(name));
+ }
+ }
+ break;
+ case PropertyType.NAME :
+ // jcr:primaryType and jcr:mixinTypes are required for correct
+ // node type resolution in queries
+ if (isIndexed(name) || name.equals(Constants.JCR_PRIMARYTYPE)
+ || name.equals(Constants.JCR_MIXINTYPES))
+ {
+ addNameValue(doc, fieldName, val.getString());
+ }
+ break;
+ case ExtendedPropertyType.PERMISSION :
+ break;
+ default :
+ throw new IllegalArgumentException("illegal internal value type
" + propType);
+ }
+ // add length
+ // add not planed
+ if (indexFormatVersion.getVersion() >=
IndexFormatVersion.V3.getVersion())
+ {
+ addLength(doc, fieldName, value, propType);
+ }
+ }
+ if (data.size() > 1)
+ // real multi-valued
+ addMVPName(doc, prop.getQPath().getName());
+ }
+ catch (RepositoryException e)
+ {
+ e.printStackTrace();
+ throw new RepositoryException("Index of property value error. " +
prop.getQPath().getAsString() + ". " + e,
+ e);
+ }
+ }
+ }
- /**
- * Adds the calendar value to the document as the named field. The calendar
- * value is converted to an indexable string value using the
- * {@link DateField} class.
- *
- * @param doc
- * The document to which to add the field
- * @param fieldName
- * The name of the field to add
- * @param internalValue
- * The value for the field to add to the document.
- */
- protected void addCalendarValue(Document doc, String fieldName, Object internalValue)
{
- Calendar value = (Calendar) internalValue;
- long millis = value.getTimeInMillis();
- try {
- doc.add(createFieldWithoutNorms(fieldName, DateField.timeToString(millis),
- PropertyType.DATE));
- } catch (IllegalArgumentException e) {
- log.warn("'{}' is outside of supported date value range.",
- new Date(value.getTimeInMillis()));
- }
- }
+ /**
+ * Adds the property name to the lucene _:PROPERTIES_SET field.
+ *
+ * @param doc the document.
+ * @param name the name of the property.
+ * @throws RepositoryException
+ */
+ private void addPropertyName(Document doc, InternalQName name) throws
RepositoryException
+ {
+ String fieldName = name.getName();
+ try
+ {
+ fieldName = resolver.createJCRName(name).getAsString();
+ }
+ catch (NamespaceException e)
+ {
+ // will never happen
+ }
+ doc.add(new Field(FieldNames.PROPERTIES_SET, fieldName, Field.Store.NO,
Field.Index.NOT_ANALYZED_NO_NORMS));
+ }
- /**
- * Adds the double value to the document as the named field. The double
- * value is converted to an indexable string value using the
- * {@link DoubleField} class.
- *
- * @param doc The document to which to add the field
- * @param fieldName The name of the field to add
- * @param internalValue The value for the field to add to the document.
- */
- protected void addDoubleValue(Document doc, String fieldName, Object internalValue)
{
- double doubleVal = ((Double) internalValue).doubleValue();
- doc.add(createFieldWithoutNorms(fieldName,
DoubleField.doubleToString(doubleVal),
- PropertyType.DOUBLE));
- }
+ /**
+ * Adds the string representation of the boolean value to the document as
+ * the named field.
+ *
+ * @param doc The document to which to add the field
+ * @param fieldName The name of the field to add
+ * @param internalValue The value for the field to add to the document.
+ */
+ protected void addBooleanValue(Document doc, String fieldName, Object internalValue)
+ {
+ doc.add(createFieldWithoutNorms(fieldName, internalValue.toString(),
PropertyType.BOOLEAN));
+ }
- /**
- * Adds the long value to the document as the named field. The long
- * value is converted to an indexable string value using the {@link LongField}
- * class.
- *
- * @param doc The document to which to add the field
- * @param fieldName The name of the field to add
- * @param internalValue The value for the field to add to the document.
- */
- protected void addLongValue(Document doc, String fieldName, Object internalValue) {
- long longVal = ((Long) internalValue).longValue();
- doc.add(createFieldWithoutNorms(fieldName, LongField.longToString(longVal),
- PropertyType.LONG));
- }
+ /**
+ * Creates a field of name <code>fieldName</code> with the value of
<code>
+ * internalValue</code>. The created field is indexed without norms.
+ *
+ * @param fieldName The name of the field to add
+ * @param internalValue The value for the field to add to the document.
+ * @param propertyType the property type.
+ */
+ protected Field createFieldWithoutNorms(String fieldName, String internalValue, int
propertyType)
+ {
+ if (indexFormatVersion.getVersion() >= IndexFormatVersion.V3.getVersion())
+ {
+ Field field =
+ new Field(FieldNames.PROPERTIES, new
SingletonTokenStream(FieldNames.createNamedValue(fieldName,
+ internalValue), propertyType));
+ field.setOmitNorms(true);
+ return field;
+ }
+ else
+ {
+ return new Field(FieldNames.PROPERTIES, FieldNames.createNamedValue(fieldName,
internalValue), Field.Store.NO,
+ Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO);
+ }
+ }
- /**
- * Adds the reference value to the document as the named field. The value's
- * string representation is added as the reference data. Additionally the
- * reference data is stored in the index.
- *
- * @param doc The document to which to add the field
- * @param fieldName The name of the field to add
- * @param internalValue The value for the field to add to the document.
- */
- protected void addReferenceValue(Document doc, String fieldName, Object
internalValue) {
- String uuid = internalValue.toString();
- doc.add(createFieldWithoutNorms(fieldName, uuid,
- PropertyType.REFERENCE));
- doc.add(new Field(FieldNames.PROPERTIES,
- FieldNames.createNamedValue(fieldName, uuid),
- Field.Store.YES, Field.Index.NO, Field.TermVector.NO));
- }
+ /**
+ * Adds the calendar value to the document as the named field. The calendar
+ * value is converted to an indexable string value using the
+ * {@link DateField} class.
+ *
+ * @param doc
+ * The document to which to add the field
+ * @param fieldName
+ * The name of the field to add
+ * @param internalValue
+ * The value for the field to add to the document.
+ */
+ protected void addCalendarValue(Document doc, String fieldName, Object internalValue)
+ {
+ Calendar value = (Calendar)internalValue;
+ long millis = value.getTimeInMillis();
+ try
+ {
+ doc.add(createFieldWithoutNorms(fieldName, DateField.timeToString(millis),
PropertyType.DATE));
+ }
+ catch (IllegalArgumentException e)
+ {
+ log.warn("'{}' is outside of supported date value range.", new
Date(value.getTimeInMillis()));
+ }
+ }
- /**
- * Adds the path value to the document as the named field. The path
- * value is converted to an indexable string value using the name space
- * mappings with which this class has been created.
- *
- * @param doc The document to which to add the field
- * @param fieldName The name of the field to add
- * @param internalValue The value for the field to add to the document.
- */
- protected void addPathValue(Document doc, String fieldName, Object pathString) {
+ /**
+ * Adds the double value to the document as the named field. The double
+ * value is converted to an indexable string value using the
+ * {@link DoubleField} class.
+ *
+ * @param doc The document to which to add the field
+ * @param fieldName The name of the field to add
+ * @param internalValue The value for the field to add to the document.
+ */
+ protected void addDoubleValue(Document doc, String fieldName, Object internalValue)
+ {
+ double doubleVal = ((Double)internalValue).doubleValue();
+ doc.add(createFieldWithoutNorms(fieldName, DoubleField.doubleToString(doubleVal),
PropertyType.DOUBLE));
+ }
- doc.add(createFieldWithoutNorms(fieldName, pathString.toString(),
- PropertyType.PATH));
- }
+ /**
+ * Adds the long value to the document as the named field. The long
+ * value is converted to an indexable string value using the {@link LongField}
+ * class.
+ *
+ * @param doc The document to which to add the field
+ * @param fieldName The name of the field to add
+ * @param internalValue The value for the field to add to the document.
+ */
+ protected void addLongValue(Document doc, String fieldName, Object internalValue)
+ {
+ long longVal = ((Long)internalValue).longValue();
+ doc.add(createFieldWithoutNorms(fieldName, LongField.longToString(longVal),
PropertyType.LONG));
+ }
- /**
- * Adds the string value to the document both as the named field and for
- * full text indexing.
- *
- * @param doc The document to which to add the field
- * @param fieldName The name of the field to add
- * @param internalValue The value for the field to add to the document.
- * @deprecated Use {@link #addStringValue(Document, String, Object, boolean)
- * addStringValue(Document, String, Object, boolean)} instead.
- */
- protected void addStringValue(Document doc, String fieldName, Object internalValue)
{
- addStringValue(doc, fieldName, internalValue, true, true, DEFAULT_BOOST);
- }
+ /**
+ * Adds the reference value to the document as the named field. The value's
+ * string representation is added as the reference data. Additionally the
+ * reference data is stored in the index.
+ *
+ * @param doc The document to which to add the field
+ * @param fieldName The name of the field to add
+ * @param internalValue The value for the field to add to the document.
+ */
+ protected void addReferenceValue(Document doc, String fieldName, Object
internalValue)
+ {
+ String uuid = internalValue.toString();
+ doc.add(createFieldWithoutNorms(fieldName, uuid, PropertyType.REFERENCE));
+ doc.add(new Field(FieldNames.PROPERTIES, FieldNames.createNamedValue(fieldName,
uuid), Field.Store.YES,
+ Field.Index.NO, Field.TermVector.NO));
+ }
- /**
- * Adds the string value to the document both as the named field and
- * optionally for full text indexing if <code>tokenized</code> is
- * <code>true</code>.
- *
- * @param doc The document to which to add the field
- * @param fieldName The name of the field to add
- * @param internalValue The value for the field to add to the document.
- * @param tokenized If <code>true</code> the string is also
tokenized
- * and fulltext indexed.
- */
- protected void addStringValue(Document doc, String fieldName,
- Object internalValue, boolean tokenized) {
- addStringValue(doc, fieldName, internalValue, tokenized, true, DEFAULT_BOOST);
- }
+ /**
+ * Adds the path value to the document as the named field. The path
+ * value is converted to an indexable string value using the name space
+ * mappings with which this class has been created.
+ *
+ * @param doc The document to which to add the field
+ * @param fieldName The name of the field to add
+ * @param internalValue The value for the field to add to the document.
+ */
+ protected void addPathValue(Document doc, String fieldName, Object pathString)
+ {
- /**
- * Adds the string value to the document both as the named field and
- * optionally for full text indexing if <code>tokenized</code> is
- * <code>true</code>.
- *
- * @param doc The document to which to add the field
- * @param fieldName The name of the field to add
- * @param internalValue The value for the field to add to the
- * document.
- * @param tokenized If <code>true</code> the string is also
- * tokenized and fulltext indexed.
- * @param includeInNodeIndex If <code>true</code> the string is also
- * tokenized and added to the node scope fulltext
- * index.
- * @param boost the boost value for this string field.
- * @deprecated use {@link #addStringValue(Document, String, Object, boolean, boolean,
float, boolean)} instead.
- */
- protected void addStringValue(Document doc, String fieldName,
- Object internalValue, boolean tokenized,
- boolean includeInNodeIndex, float boost) {
- addStringValue(doc, fieldName, internalValue, tokenized, includeInNodeIndex,
boost, true);
- }
+ doc.add(createFieldWithoutNorms(fieldName, pathString.toString(),
PropertyType.PATH));
+ }
- /**
- * Adds the string value to the document both as the named field and
- * optionally for full text indexing if <code>tokenized</code> is
- * <code>true</code>.
- *
- * @param doc The document to which to add the field
- * @param fieldName The name of the field to add
- * @param internalValue The value for the field to add to the
- * document.
- * @param tokenized If <code>true</code> the string is also
- * tokenized and fulltext indexed.
- * @param includeInNodeIndex If <code>true</code> the string is also
- * tokenized and added to the node scope fulltext
- * index.
- * @param boost the boost value for this string field.
- * @param useInExcerpt If <code>true</code> the string may show up
in
- * an excerpt.
- */
- protected void addStringValue(Document doc, String fieldName,
- Object internalValue, boolean tokenized,
- boolean includeInNodeIndex, float boost,
- boolean useInExcerpt) {
+ /**
+ * Adds the string value to the document both as the named field and for
+ * full text indexing.
+ *
+ * @param doc The document to which to add the field
+ * @param fieldName The name of the field to add
+ * @param internalValue The value for the field to add to the document.
+ * @deprecated Use {@link #addStringValue(Document, String, Object, boolean)
+ * addStringValue(Document, String, Object, boolean)} instead.
+ */
+ protected void addStringValue(Document doc, String fieldName, Object internalValue)
+ {
+ addStringValue(doc, fieldName, internalValue, true, true, DEFAULT_BOOST);
+ }
- // simple String
- String stringValue = (String) internalValue;
- doc.add(createFieldWithoutNorms(fieldName, stringValue,
- PropertyType.STRING));
- if (tokenized) {
- if (stringValue.length() == 0) {
- return;
- }
- // create fulltext index on property
- int idx = fieldName.indexOf(':');
- fieldName = fieldName.substring(0, idx + 1)
- + FieldNames.FULLTEXT_PREFIX + fieldName.substring(idx + 1);
- Field f = new Field(fieldName, stringValue,
- Field.Store.NO,
- Field.Index.ANALYZED,
- Field.TermVector.NO);
- f.setBoost(boost);
- doc.add(f);
+ /**
+ * Adds the string value to the document both as the named field and
+ * optionally for full text indexing if <code>tokenized</code> is
+ * <code>true</code>.
+ *
+ * @param doc The document to which to add the field
+ * @param fieldName The name of the field to add
+ * @param internalValue The value for the field to add to the document.
+ * @param tokenized If <code>true</code> the string is also tokenized
+ * and fulltext indexed.
+ */
+ protected void addStringValue(Document doc, String fieldName, Object internalValue,
boolean tokenized)
+ {
+ addStringValue(doc, fieldName, internalValue, tokenized, true, DEFAULT_BOOST);
+ }
- if (includeInNodeIndex) {
- // also create fulltext index of this value
- boolean store = supportHighlighting && useInExcerpt;
- f = createFulltextField(stringValue, store, supportHighlighting);
- if (useInExcerpt) {
- doc.add(f);
- } else {
- doNotUseInExcerpt.add(f);
- }
- }
- }
- }
+ /**
+ * Adds the string value to the document both as the named field and
+ * optionally for full text indexing if <code>tokenized</code> is
+ * <code>true</code>.
+ *
+ * @param doc The document to which to add the field
+ * @param fieldName The name of the field to add
+ * @param internalValue The value for the field to add to the
+ * document.
+ * @param tokenized If <code>true</code> the string is also
+ * tokenized and fulltext indexed.
+ * @param includeInNodeIndex If <code>true</code> the string is also
+ * tokenized and added to the node scope fulltext
+ * index.
+ * @param boost the boost value for this string field.
+ * @deprecated use {@link #addStringValue(Document, String, Object, boolean, boolean,
float, boolean)} instead.
+ */
+ protected void addStringValue(Document doc, String fieldName, Object internalValue,
boolean tokenized,
+ boolean includeInNodeIndex, float boost)
+ {
+ addStringValue(doc, fieldName, internalValue, tokenized, includeInNodeIndex, boost,
true);
+ }
- /**
- * Adds the name value to the document as the named field. The name
- * value is converted to an indexable string treating the internal value
- * as a qualified name and mapping the name space using the name space
- * mappings with which this class has been created.
- *
- * @param doc The document to which to add the field
- * @param fieldName The name of the field to add
- * @param internalValue The value for the field to add to the document.
- */
- protected void addNameValue(Document doc, String fieldName, Object internalValue) {
- doc.add(createFieldWithoutNorms(fieldName, internalValue.toString(),
- PropertyType.NAME));
- }
+ /**
+ * Adds the string value to the document both as the named field and
+ * optionally for full text indexing if <code>tokenized</code> is
+ * <code>true</code>.
+ *
+ * @param doc The document to which to add the field
+ * @param fieldName The name of the field to add
+ * @param internalValue The value for the field to add to the
+ * document.
+ * @param tokenized If <code>true</code> the string is also
+ * tokenized and fulltext indexed.
+ * @param includeInNodeIndex If <code>true</code> the string is also
+ * tokenized and added to the node scope fulltext
+ * index.
+ * @param boost the boost value for this string field.
+ * @param useInExcerpt If <code>true</code> the string may show up
in
+ * an excerpt.
+ */
+ protected void addStringValue(Document doc, String fieldName, Object internalValue,
boolean tokenized,
+ boolean includeInNodeIndex, float boost, boolean useInExcerpt)
+ {
- /**
- * Creates a fulltext field for the string <code>value</code>.
- *
- * @param value the string value.
- * @return a lucene field.
- * @deprecated use {@link #createFulltextField(String, boolean, boolean)} instead.
- */
- protected Field createFulltextField(String value) {
- return createFulltextField(value, supportHighlighting, supportHighlighting);
- }
+ // simple String
+ String stringValue = (String)internalValue;
+ doc.add(createFieldWithoutNorms(fieldName, stringValue, PropertyType.STRING));
+ if (tokenized)
+ {
+ if (stringValue.length() == 0)
+ {
+ return;
+ }
+ // create fulltext index on property
+ int idx = fieldName.indexOf(':');
+ fieldName = fieldName.substring(0, idx + 1) + FieldNames.FULLTEXT_PREFIX +
fieldName.substring(idx + 1);
+ Field f = new Field(fieldName, stringValue, Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.NO);
+ f.setBoost(boost);
+ doc.add(f);
- /**
- * Creates a fulltext field for the string <code>value</code>.
- *
- * @param value the string value.
- * @param store if the value of the field should be stored.
- * @param withOffsets if a term vector with offsets should be stored.
- * @return a lucene field.
- */
- protected Field createFulltextField(String value,
- boolean store,
- boolean withOffsets) {
- Field.TermVector tv;
- if (withOffsets) {
- tv = Field.TermVector.WITH_OFFSETS;
- } else {
- tv = Field.TermVector.NO;
- }
- if (store) {
- // store field compressed if greater than 16k
- Field.Store stored;
- if (value.length() > 0x4000) {
- stored = Field.Store.COMPRESS;
- } else {
- stored = Field.Store.YES;
+ if (includeInNodeIndex)
+ {
+ // also create fulltext index of this value
+ boolean store = supportHighlighting && useInExcerpt;
+ f = createFulltextField(stringValue, store, supportHighlighting);
+ if (useInExcerpt)
+ {
+ doc.add(f);
}
- return new Field(FieldNames.FULLTEXT, value, stored,
- Field.Index.ANALYZED, tv);
- } else {
- return new Field(FieldNames.FULLTEXT, value,
- Field.Store.NO, Field.Index.ANALYZED, tv);
- }
- }
+ else
+ {
+ doNotUseInExcerpt.add(f);
+ }
+ }
+ }
+ }
- /**
- * Creates a fulltext field for the reader <code>value</code>.
- *
- * @param value the reader value.
- * @return a lucene field.
- */
- protected Fieldable createFulltextField(Reader value) {
- if (supportHighlighting) {
- return new LazyTextExtractorField(FieldNames.FULLTEXT, value, true, true);
- } else {
- return new LazyTextExtractorField(FieldNames.FULLTEXT, value, false, false);
- }
- }
+ /**
+ * Adds the name value to the document as the named field. The name
+ * value is converted to an indexable string treating the internal value
+ * as a qualified name and mapping the name space using the name space
+ * mappings with which this class has been created.
+ *
+ * @param doc The document to which to add the field
+ * @param fieldName The name of the field to add
+ * @param internalValue The value for the field to add to the document.
+ */
+ protected void addNameValue(Document doc, String fieldName, Object internalValue)
+ {
+ doc.add(createFieldWithoutNorms(fieldName, internalValue.toString(),
PropertyType.NAME));
+ }
- /**
- * Returns <code>true</code> if the property with the given name should
be
- * indexed.
- *
- * @param propertyName name of a property.
- * @return <code>true</code> if the property should be fulltext indexed;
- * <code>false</code> otherwise.
- */
- protected boolean isIndexed(InternalQName propertyName) {
- if (indexingConfig == null) {
- return true;
- } else {
- return indexingConfig.isIndexed(node, propertyName);
- }
- }
+ /**
+ * Creates a fulltext field for the string <code>value</code>.
+ *
+ * @param value the string value.
+ * @return a lucene field.
+ * @deprecated use {@link #createFulltextField(String, boolean, boolean)} instead.
+ */
+ protected Field createFulltextField(String value)
+ {
+ return createFulltextField(value, supportHighlighting, supportHighlighting);
+ }
- /**
- * Returns <code>true</code> if the property with the given name should
also
- * be added to the node scope index.
- *
- * @param propertyName the name of a property.
- * @return <code>true</code> if it should be added to the node scope
index;
- * <code>false</code> otherwise.
- */
- protected boolean isIncludedInNodeIndex(InternalQName propertyName) {
- if (indexingConfig == null) {
- return true;
- } else {
- return indexingConfig.isIncludedInNodeScopeIndex(node, propertyName);
- }
- }
+ /**
+ * Creates a fulltext field for the string <code>value</code>.
+ *
+ * @param value the string value.
+ * @param store if the value of the field should be stored.
+ * @param withOffsets if a term vector with offsets should be stored.
+ * @return a lucene field.
+ */
+ protected Field createFulltextField(String value, boolean store, boolean withOffsets)
+ {
+ Field.TermVector tv;
+ if (withOffsets)
+ {
+ tv = Field.TermVector.WITH_OFFSETS;
+ }
+ else
+ {
+ tv = Field.TermVector.NO;
+ }
+ if (store)
+ {
+ // store field compressed if greater than 16k
+ Field.Store stored;
+ if (value.length() > 0x4000)
+ {
+ stored = Field.Store.COMPRESS;
+ }
+ else
+ {
+ stored = Field.Store.YES;
+ }
+ return new Field(FieldNames.FULLTEXT, value, stored, Field.Index.ANALYZED, tv);
+ }
+ else
+ {
+ return new Field(FieldNames.FULLTEXT, value, Field.Store.NO,
Field.Index.ANALYZED, tv);
+ }
+ }
- /**
- * Returns <code>true</code> if the content of the property with the
given
- * name should the used to create an excerpt.
- *
- * @param propertyName the name of a property.
- * @return <code>true</code> if it should be used to create an excerpt;
- * <code>false</code> otherwise.
- */
- protected boolean useInExcerpt(InternalQName propertyName) {
- if (indexingConfig == null) {
- return true;
- } else {
- return indexingConfig.useInExcerpt(node, propertyName);
- }
- }
+ /**
+ * Creates a fulltext field for the reader <code>value</code>.
+ *
+ * @param value the reader value.
+ * @return a lucene field.
+ */
+ protected Fieldable createFulltextField(Reader value)
+ {
+ if (supportHighlighting)
+ {
+ return new LazyTextExtractorField(FieldNames.FULLTEXT, value, true, true);
+ }
+ else
+ {
+ return new LazyTextExtractorField(FieldNames.FULLTEXT, value, false, false);
+ }
+ }
- /**
- * Returns the boost value for the given property name.
- *
- * @param propertyName the name of a property.
- * @return the boost value for the given property name.
- */
- protected float getPropertyBoost(InternalQName propertyName) {
- if (indexingConfig == null) {
- return DEFAULT_BOOST;
- } else {
- return indexingConfig.getPropertyBoost(node, propertyName);
- }
- }
+ /**
+ * Returns <code>true</code> if the property with the given name should
be
+ * indexed.
+ *
+ * @param propertyName name of a property.
+ * @return <code>true</code> if the property should be fulltext indexed;
+ * <code>false</code> otherwise.
+ */
+ protected boolean isIndexed(InternalQName propertyName)
+ {
+ if (indexingConfig == null)
+ {
+ return true;
+ }
+ else
+ {
+ return indexingConfig.isIndexed(node, propertyName);
+ }
+ }
- /**
- * @return the boost value for this {@link #node} state.
- */
- protected float getNodeBoost() {
- if (indexingConfig == null) {
- return DEFAULT_BOOST;
- } else {
- return indexingConfig.getNodeBoost(node);
- }
- }
+ /**
+ * Returns <code>true</code> if the property with the given name should
also
+ * be added to the node scope index.
+ *
+ * @param propertyName the name of a property.
+ * @return <code>true</code> if it should be added to the node scope
index;
+ * <code>false</code> otherwise.
+ */
+ protected boolean isIncludedInNodeIndex(InternalQName propertyName)
+ {
+ if (indexingConfig == null)
+ {
+ return true;
+ }
+ else
+ {
+ return indexingConfig.isIncludedInNodeScopeIndex(node, propertyName);
+ }
+ }
- /**
- * Adds a {@link FieldNames#PROPERTY_LENGTHS} field to
<code>document</code>
- * with a named length value.
- *
- * @param doc the lucene document.
- * @param propertyName the property name.
- * @param value the internal value.
- * @param propType
- */
- protected void addLength(Document doc,
- String propertyName,
- ValueData value, int propType) {
- long length = Util.getLength(value,propType);
- if (length != -1) {
- doc.add(new Field(FieldNames.PROPERTY_LENGTHS,
- FieldNames.createNamedLength(propertyName, length),
- Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
- }
- }
+ /**
+ * Returns <code>true</code> if the content of the property with the
given
+ * name should the used to create an excerpt.
+ *
+ * @param propertyName the name of a property.
+ * @return <code>true</code> if it should be used to create an excerpt;
+ * <code>false</code> otherwise.
+ */
+ protected boolean useInExcerpt(InternalQName propertyName)
+ {
+ if (indexingConfig == null)
+ {
+ return true;
+ }
+ else
+ {
+ return indexingConfig.useInExcerpt(node, propertyName);
+ }
+ }
- /**
- * Depending on the index format version adds one or two fields to the
- * document for the node name.
- *
- * @param doc the lucene document.
- * @param namespaceURI the namespace URI of the node name.
- * @param localName the local name of the node.
- * @throws RepositoryException
- */
- protected void addNodeName(Document doc,
- String namespaceURI,
- String localName) throws RepositoryException {
- String name = mappings.getNamespacePrefixByURI(namespaceURI) + ":" +
localName;
- doc.add(new Field(FieldNames.LABEL, name, Field.Store.NO,
Field.Index.NOT_ANALYZED_NO_NORMS));
- // as of version 3, also index combination of namespace URI and local name
- if (indexFormatVersion.getVersion() >= IndexFormatVersion.V3.getVersion()) {
- doc.add(new Field(FieldNames.NAMESPACE_URI, namespaceURI, Field.Store.NO,
Field.Index.NOT_ANALYZED_NO_NORMS));
- doc.add(new Field(FieldNames.LOCAL_NAME, localName, Field.Store.NO,
Field.Index.NOT_ANALYZED_NO_NORMS));
- }
- }
+ /**
+ * Returns the boost value for the given property name.
+ *
+ * @param propertyName the name of a property.
+ * @return the boost value for the given property name.
+ */
+ protected float getPropertyBoost(InternalQName propertyName)
+ {
+ if (indexingConfig == null)
+ {
+ return DEFAULT_BOOST;
+ }
+ else
+ {
+ return indexingConfig.getPropertyBoost(node, propertyName);
+ }
+ }
- /**
- * Adds a parent child relation to the given <code>doc</code>.
- *
- * @param doc the document.
- * @param parentId the id of the parent node.
- * @throws ItemStateException if the parent node cannot be read.
- * @throws RepositoryException if the parent node does not have a child node
- * entry for the current node.
- */
- protected void addParentChildRelation(Document doc,
- String parentId )
- throws RepositoryException {
- doc.add(new Field(
- FieldNames.PARENT, parentId,
- Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS,
Field.TermVector.NO));
-// NodeState parent = (NodeState) stateProvider.getItemState(parentId);
-// ChildNodeEntry child = parent.getChildNodeEntry(node.getNodeId());
-// if (child == null) {
-// // this can only happen when jackrabbit
-// // is running in a cluster.
-// throw new RepositoryException(
-// "Missing child node entry for node with id: "
-// + node.getNodeId());
-// }
- InternalQName name = node.getQPath().getName();
- addNodeName(doc, name.getNamespace(), name.getName());
- }
+ /**
+ * @return the boost value for this {@link #node} state.
+ */
+ protected float getNodeBoost()
+ {
+ if (indexingConfig == null)
+ {
+ return DEFAULT_BOOST;
+ }
+ else
+ {
+ return indexingConfig.getNodeBoost(node);
+ }
+ }
+
+ /**
+ * Adds a {@link FieldNames#PROPERTY_LENGTHS} field to
<code>document</code>
+ * with a named length value.
+ *
+ * @param doc the lucene document.
+ * @param propertyName the property name.
+ * @param value the internal value.
+ * @param propType
+ */
+ protected void addLength(Document doc, String propertyName, ValueData value, int
propType)
+ {
+ long length = Util.getLength(value, propType);
+ if (length != -1)
+ {
+ doc.add(new Field(FieldNames.PROPERTY_LENGTHS,
FieldNames.createNamedLength(propertyName, length),
+ Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
+ }
+ }
+
+ /**
+ * Depending on the index format version adds one or two fields to the
+ * document for the node name.
+ *
+ * @param doc the lucene document.
+ * @param namespaceURI the namespace URI of the node name.
+ * @param localName the local name of the node.
+ * @throws RepositoryException
+ */
+ protected void addNodeName(Document doc, String namespaceURI, String localName) throws
RepositoryException
+ {
+ String name = mappings.getNamespacePrefixByURI(namespaceURI) + ":" +
localName;
+ doc.add(new Field(FieldNames.LABEL, name, Field.Store.NO,
Field.Index.NOT_ANALYZED_NO_NORMS));
+ // as of version 3, also index combination of namespace URI and local name
+ if (indexFormatVersion.getVersion() >= IndexFormatVersion.V3.getVersion())
+ {
+ doc.add(new Field(FieldNames.NAMESPACE_URI, namespaceURI, Field.Store.NO,
Field.Index.NOT_ANALYZED_NO_NORMS));
+ doc.add(new Field(FieldNames.LOCAL_NAME, localName, Field.Store.NO,
Field.Index.NOT_ANALYZED_NO_NORMS));
+ }
+ }
+
+ /**
+ * Adds a parent child relation to the given <code>doc</code>.
+ *
+ * @param doc the document.
+ * @param parentId the id of the parent node.
+ * @throws ItemStateException if the parent node cannot be read.
+ * @throws RepositoryException if the parent node does not have a child node
+ * entry for the current node.
+ */
+ protected void addParentChildRelation(Document doc, String parentId) throws
RepositoryException
+ {
+ doc.add(new Field(FieldNames.PARENT, parentId, Field.Store.YES,
Field.Index.NOT_ANALYZED_NO_NORMS,
+ Field.TermVector.NO));
+ // NodeState parent = (NodeState) stateProvider.getItemState(parentId);
+ // ChildNodeEntry child = parent.getChildNodeEntry(node.getNodeId());
+ // if (child == null) {
+ // // this can only happen when jackrabbit
+ // // is running in a cluster.
+ // throw new RepositoryException(
+ // "Missing child node entry for node with id: "
+ // + node.getNodeId());
+ // }
+ InternalQName name = node.getQPath().getName();
+ addNodeName(doc, name.getNamespace(), name.getName());
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RowIteratorImpl.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RowIteratorImpl.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RowIteratorImpl.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -52,732 +52,827 @@
* Implements the {@link javax.jcr.query.RowIterator} interface returned by a
* {@link javax.jcr.query.QueryResult}.
*/
-class RowIteratorImpl implements RowIterator {
+class RowIteratorImpl implements RowIterator
+{
- /**
- * The logger instance for this class.
- */
- private static final Logger log = LoggerFactory
- .getLogger(RowIteratorImpl.class);
+ /**
+ * The logger instance for this class.
+ */
+ private static final Logger log = LoggerFactory.getLogger(RowIteratorImpl.class);
- /**
- * The QValue factory.
- */
- // private static final QValueFactory QVALUE_FACTORY =
- // QValueFactoryImpl.getInstance();
+ /**
+ * The QValue factory.
+ */
+ // private static final QValueFactory QVALUE_FACTORY =
+ // QValueFactoryImpl.getInstance();
- /**
- * The name of the excerpt function without prefix but with left
- * parenthesis.
- */
- private static final String EXCERPT_FUNC_LPAR = "excerpt(";
+ /**
+ * The name of the excerpt function without prefix but with left
+ * parenthesis.
+ */
+ private static final String EXCERPT_FUNC_LPAR = "excerpt(";
- /**
- * The name of the spell check function without prefix but with left
- * parenthesis.
- */
- private static final String SPELLCHECK_FUNC_LPAR = "spellcheck(";
+ /**
+ * The name of the spell check function without prefix but with left
+ * parenthesis.
+ */
+ private static final String SPELLCHECK_FUNC_LPAR = "spellcheck(";
- /**
- * The start Name for the rep:excerpt function: rep:excerpt(
- */
- private static final InternalQName REP_EXCERPT_LPAR = new InternalQName(
- Constants.NS_REP_URI, EXCERPT_FUNC_LPAR);
+ /**
+ * The start Name for the rep:excerpt function: rep:excerpt(
+ */
+ private static final InternalQName REP_EXCERPT_LPAR = new
InternalQName(Constants.NS_REP_URI, EXCERPT_FUNC_LPAR);
- /**
- * Iterator over nodes, that constitute the result set.
- */
- private final ScoreNodeIterator scoreNodes;
+ /**
+ * Iterator over nodes, that constitute the result set.
+ */
+ private final ScoreNodeIterator scoreNodes;
- /**
- * Array of select property names
- */
- private final InternalQName[] properties;
+ /**
+ * Array of select property names
+ */
+ private final InternalQName[] properties;
- /**
- * Set of select property <code>Name</code>s.
- */
- private Set propertySet;
+ /**
+ * Set of select property <code>Name</code>s.
+ */
+ private Set propertySet;
- /**
- * List of valid selector {@link Name}s.
- */
- private final List selectorNames = new ArrayList();
+ /**
+ * List of valid selector {@link Name}s.
+ */
+ private final List selectorNames = new ArrayList();
- /**
- * The item manager of the session that executes the query.
- */
- private final SessionDataManager itemMgr;
+ /**
+ * The item manager of the session that executes the query.
+ */
+ private final SessionDataManager itemMgr;
- /**
- * The <code>NamePathResolver</code> of the user
<code>Session</code>.
- */
- private final LocationFactory resolver;
+ /**
+ * The <code>NamePathResolver</code> of the user
<code>Session</code>.
+ */
+ private final LocationFactory resolver;
- /**
- * The excerpt provider or <code>null</code> if none is available.
- */
- private final ExcerptProvider excerptProvider;
+ /**
+ * The excerpt provider or <code>null</code> if none is available.
+ */
+ private final ExcerptProvider excerptProvider;
- /**
- * The spell suggestion or <code>null</code> if none is available.
- */
- private final SpellSuggestion spellSuggestion;
+ /**
+ * The spell suggestion or <code>null</code> if none is available.
+ */
+ private final SpellSuggestion spellSuggestion;
- /**
- * A value factory for the session that executes the query.
- */
- private final ValueFactory valueFactory;
+ /**
+ * A value factory for the session that executes the query.
+ */
+ private final ValueFactory valueFactory;
- /**
- * Creates a new <code>RowIteratorImpl</code> that iterates over the
result
- * nodes.
- *
- * @param scoreNodes
- * a <code>ScoreNodeIterator</code> that contains the nodes
of
- * the query result.
- * @param properties
- * <code>Name</code> of the select properties.
- * @param selectorNames
- * the selector names.
- * @param itemMgr
- * the item manager of the session that executes the query.
- * @param hmgr
- * the hierarchy manager of the workspace.
- * @param resolver
- * <code>NamespaceResolver</code> of the user
- * <code>Session</code>.
- * @param exProvider
- * the excerpt provider associated with the query result that
- * created this row iterator.
- * @param spellSuggestion
- * the spell suggestion associated with the query result or
- * <code>null</code> if none is available.
- */
+ /**
+ * Creates a new <code>RowIteratorImpl</code> that iterates over the
result
+ * nodes.
+ *
+ * @param scoreNodes
+ * a <code>ScoreNodeIterator</code> that contains the nodes of
+ * the query result.
+ * @param properties
+ * <code>Name</code> of the select properties.
+ * @param selectorNames
+ * the selector names.
+ * @param itemMgr
+ * the item manager of the session that executes the query.
+ * @param hmgr
+ * the hierarchy manager of the workspace.
+ * @param resolver
+ * <code>NamespaceResolver</code> of the user
+ * <code>Session</code>.
+ * @param exProvider
+ * the excerpt provider associated with the query result that
+ * created this row iterator.
+ * @param spellSuggestion
+ * the spell suggestion associated with the query result or
+ * <code>null</code> if none is available.
+ */
- RowIteratorImpl(ScoreNodeIterator scoreNodes, InternalQName[] properties,
- InternalQName[] selectorNames, SessionDataManager itemMgr,
+ RowIteratorImpl(ScoreNodeIterator scoreNodes, InternalQName[] properties,
InternalQName[] selectorNames,
+ SessionDataManager itemMgr,
- LocationFactory resolver, ExcerptProvider exProvider,
- SpellSuggestion spellSuggestion) {
- this.scoreNodes = scoreNodes;
- this.properties = properties;
- this.selectorNames.addAll(Arrays.asList(selectorNames));
- this.itemMgr = itemMgr;
- this.resolver = resolver;
- this.excerptProvider = exProvider;
- this.spellSuggestion = spellSuggestion;
- this.valueFactory = new ValueFactoryImpl(resolver);
- }
+ LocationFactory resolver, ExcerptProvider exProvider, SpellSuggestion
spellSuggestion)
+ {
+ this.scoreNodes = scoreNodes;
+ this.properties = properties;
+ this.selectorNames.addAll(Arrays.asList(selectorNames));
+ this.itemMgr = itemMgr;
+ this.resolver = resolver;
+ this.excerptProvider = exProvider;
+ this.spellSuggestion = spellSuggestion;
+ this.valueFactory = new ValueFactoryImpl(resolver);
+ }
- /**
- * Returns the next <code>Row</code> in the iteration.
- *
- * @return the next <code>Row</code> in the iteration.
- * @throws NoSuchElementException
- * if iteration has no more <code>Row</code>s.
- */
- public Row nextRow() throws NoSuchElementException {
- return new RowImpl(scoreNodes.nextScoreNodes());
- }
+ /**
+ * Returns the next <code>Row</code> in the iteration.
+ *
+ * @return the next <code>Row</code> in the iteration.
+ * @throws NoSuchElementException
+ * if iteration has no more <code>Row</code>s.
+ */
+ public Row nextRow() throws NoSuchElementException
+ {
+ return new RowImpl(scoreNodes.nextScoreNodes());
+ }
- /**
- * Skip a number of <code>Row</code>s in this iterator.
- *
- * @param skipNum
- * the non-negative number of <code>Row</code>s to skip
- * @throws NoSuchElementException
- * if skipped past the last <code>Row</code> in this
iterator.
- */
- public void skip(long skipNum) throws NoSuchElementException {
- scoreNodes.skip(skipNum);
- }
+ /**
+ * Skip a number of <code>Row</code>s in this iterator.
+ *
+ * @param skipNum
+ * the non-negative number of <code>Row</code>s to skip
+ * @throws NoSuchElementException
+ * if skipped past the last <code>Row</code> in this
iterator.
+ */
+ public void skip(long skipNum) throws NoSuchElementException
+ {
+ scoreNodes.skip(skipNum);
+ }
- /**
- * Returns the number of <code>Row</code>s in this iterator.
- *
- * @return the number of <code>Row</code>s in this iterator.
- */
- public long getSize() {
- return scoreNodes.getSize();
- }
+ /**
+ * Returns the number of <code>Row</code>s in this iterator.
+ *
+ * @return the number of <code>Row</code>s in this iterator.
+ */
+ public long getSize()
+ {
+ return scoreNodes.getSize();
+ }
- /**
- * Returns the current position within this iterator. The number returned is
- * the 0-based index of the next <code>Row</code> in the iterator, i.e.
the
- * one that will be returned on the subsequent <code>next</code> call.
- * <p/>
- * Note that this method does not check if there is a next element, i.e. an
- * empty iterator will always return 0.
- *
- * @return the current position withing this iterator.
- */
- public long getPosition() {
- return scoreNodes.getPosition();
- }
+ /**
+ * Returns the current position within this iterator. The number returned is
+ * the 0-based index of the next <code>Row</code> in the iterator, i.e.
the
+ * one that will be returned on the subsequent <code>next</code> call.
+ * <p/>
+ * Note that this method does not check if there is a next element, i.e. an
+ * empty iterator will always return 0.
+ *
+ * @return the current position withing this iterator.
+ */
+ public long getPosition()
+ {
+ return scoreNodes.getPosition();
+ }
- /**
- * @throws UnsupportedOperationException
- * always.
- */
- public void remove() {
- throw new UnsupportedOperationException("remove");
- }
+ /**
+ * @throws UnsupportedOperationException
+ * always.
+ */
+ public void remove()
+ {
+ throw new UnsupportedOperationException("remove");
+ }
- /**
- * Returns <code>true</code> if the iteration has more
<code>Row</code>s.
- * (In other words, returns <code>true</code> if
<code>next</code> would
- * return an <code>Row</code> rather than throwing an exception.)
- *
- * @return <code>true</code> if the iterator has more elements.
- */
- public boolean hasNext() {
- return scoreNodes.hasNext();
- }
+ /**
+ * Returns <code>true</code> if the iteration has more
<code>Row</code>s.
+ * (In other words, returns <code>true</code> if
<code>next</code> would
+ * return an <code>Row</code> rather than throwing an exception.)
+ *
+ * @return <code>true</code> if the iterator has more elements.
+ */
+ public boolean hasNext()
+ {
+ return scoreNodes.hasNext();
+ }
- /**
- * Returns the next <code>Row</code> in the iteration.
- *
- * @return the next <code>Row</code> in the iteration.
- * @throws NoSuchElementException
- * if iteration has no more <code>Row</code>s.
- */
- public Object next() throws NoSuchElementException {
- return nextRow();
- }
+ /**
+ * Returns the next <code>Row</code> in the iteration.
+ *
+ * @return the next <code>Row</code> in the iteration.
+ * @throws NoSuchElementException
+ * if iteration has no more <code>Row</code>s.
+ */
+ public Object next() throws NoSuchElementException
+ {
+ return nextRow();
+ }
- // ---------------------< class RowImpl
- // >------------------------------------
+ // ---------------------< class RowImpl
+ // >------------------------------------
- /**
- * Implements the {@link javax.jcr.query.Row} interface, which represents a
- * row in the query result.
- */
- class RowImpl implements Row {
+ /**
+ * Implements the {@link javax.jcr.query.Row} interface, which represents a
+ * row in the query result.
+ */
+ class RowImpl implements Row
+ {
- /**
- * The score for this result row
- */
- private final float score;
+ /**
+ * The score for this result row
+ */
+ private final float score;
- /**
- * The underlying <code>Node</code> of this result row.
- */
- private NodeImpl node;
+ /**
+ * The underlying <code>Node</code> of this result row.
+ */
+ private NodeImpl node;
- /**
- * The score nodes associated with this row.
- */
- private final ScoreNode[] sn;
+ /**
+ * The score nodes associated with this row.
+ */
+ private final ScoreNode[] sn;
- /**
- * Cached value array for returned by {@link #getValues()}.
- */
- private Value[] values;
+ /**
+ * Cached value array for returned by {@link #getValues()}.
+ */
+ private Value[] values;
- /**
- * Creates a new <code>RowImpl</code> instance based on
- * <code>node</code>.
- *
- * @param sn
- * the score nodes associated with this row.
- */
- RowImpl(ScoreNode[] sn) {
- this.sn = sn;
- this.score = sn[0].getScore();
- }
+ /**
+ * Creates a new <code>RowImpl</code> instance based on
+ * <code>node</code>.
+ *
+ * @param sn
+ * the score nodes associated with this row.
+ */
+ RowImpl(ScoreNode[] sn)
+ {
+ this.sn = sn;
+ this.score = sn[0].getScore();
+ }
- /**
- * Returns an array of all the values in the same order as the property
- * names (column names) returned by
- * {@link javax.jcr.query.QueryResult#getColumnNames()}.
- *
- * @return a <code>Value</code> array.
- * @throws RepositoryException
- * if an error occurs while retrieving the values from the
- * <code>Node</code>.
- */
- public Value[] getValues() throws RepositoryException {
- if (values == null) {
- Value[] tmp = new Value[properties.length];
- for (int i = 0; i < properties.length; i++) {
- String propertyName = resolver.createJCRName(properties[i])
- .getAsString();
- node = (NodeImpl) getNode();
- if (node.hasProperty(propertyName)) {
- PropertyImpl prop = (PropertyImpl) node
- .getProperty(propertyName);
- if (!prop.getDefinition().isMultiple()) {
- if (prop.getDefinition().getRequiredType() == PropertyType.UNDEFINED) {
- tmp[i] = valueFactory.createValue(prop
- .getString());
- } else {
- tmp[i] = prop.getValue();
- }
- } else {
- // mvp values cannot be returned
- tmp[i] = null;
- }
- } else {
- // property not set or one of the following:
- // jcr:path / jcr:score / rep:excerpt / rep:spellcheck
- if (Constants.JCR_PATH.equals(properties[i])) {
- tmp[i] = valueFactory.createValue(node.getPath(),
- PropertyType.PATH);
- } else if (Constants.JCR_SCORE.equals(properties[i])) {
- tmp[i] = valueFactory.createValue(Math
- .round(score * 1000f));
- } else if (isExcerptFunction(properties[i])) {
- tmp[i] = getExcerpt();
- } else if (isSpellCheckFunction(properties[i])) {
- tmp[i] = getSpellCheckedStatement();
- } else {
- tmp[i] = null;
- }
- }
- }
- values = tmp;
- }
- // return a copy of the array
- Value[] ret = new Value[values.length];
- System.arraycopy(values, 0, ret, 0, values.length);
- return ret;
- }
+ /**
+ * Returns an array of all the values in the same order as the property
+ * names (column names) returned by
+ * {@link javax.jcr.query.QueryResult#getColumnNames()}.
+ *
+ * @return a <code>Value</code> array.
+ * @throws RepositoryException
+ * if an error occurs while retrieving the values from the
+ * <code>Node</code>.
+ */
+ public Value[] getValues() throws RepositoryException
+ {
+ if (values == null)
+ {
+ Value[] tmp = new Value[properties.length];
+ for (int i = 0; i < properties.length; i++)
+ {
+ String propertyName =
resolver.createJCRName(properties[i]).getAsString();
+ node = (NodeImpl)getNode();
+ if (node.hasProperty(propertyName))
+ {
+ PropertyImpl prop = (PropertyImpl)node.getProperty(propertyName);
+ if (!prop.getDefinition().isMultiple())
+ {
+ if (prop.getDefinition().getRequiredType() ==
PropertyType.UNDEFINED)
+ {
+ tmp[i] = valueFactory.createValue(prop.getString());
+ }
+ else
+ {
+ tmp[i] = prop.getValue();
+ }
+ }
+ else
+ {
+ // mvp values cannot be returned
+ tmp[i] = null;
+ }
+ }
+ else
+ {
+ // property not set or one of the following:
+ // jcr:path / jcr:score / rep:excerpt / rep:spellcheck
+ if (Constants.JCR_PATH.equals(properties[i]))
+ {
+ tmp[i] = valueFactory.createValue(node.getPath(),
PropertyType.PATH);
+ }
+ else if (Constants.JCR_SCORE.equals(properties[i]))
+ {
+ tmp[i] = valueFactory.createValue(Math.round(score * 1000f));
+ }
+ else if (isExcerptFunction(properties[i]))
+ {
+ tmp[i] = getExcerpt();
+ }
+ else if (isSpellCheckFunction(properties[i]))
+ {
+ tmp[i] = getSpellCheckedStatement();
+ }
+ else
+ {
+ tmp[i] = null;
+ }
+ }
+ }
+ values = tmp;
+ }
+ // return a copy of the array
+ Value[] ret = new Value[values.length];
+ System.arraycopy(values, 0, ret, 0, values.length);
+ return ret;
+ }
- /**
- * Returns the value of the indicated property in this <code>Row</code>.
- * <p/>
- * If <code>propertyName</code> is not among the column names of the
- * query result table, an <code>ItemNotFoundException</code> is thrown.
- *
- * @return a <code>Value</code>
- * @throws ItemNotFoundException
- * if <code>propertyName</code> is not among the column
- * names of the query result table.
- * @throws RepositoryException
- * if <code>propertyName</code> is not a valid property
- * name.
- */
- public Value getValue(String propertyName)
- throws ItemNotFoundException, RepositoryException {
- if (propertySet == null) {
- // create the set first
- Set tmp = new HashSet();
- tmp.addAll(Arrays.asList(properties));
- propertySet = tmp;
- }
- try {
- InternalQName prop = resolver.parseJCRName(propertyName)
- .getInternalName();
- if (!propertySet.contains(prop)) {
- if (isExcerptFunction(propertyName)) {
- // excerpt function with parameter
- return getExcerpt(propertyName);
- } else {
- throw new ItemNotFoundException(propertyName);
- }
- }
- if (Constants.JCR_PATH.equals(prop)) {
- // QValue p =
- // QVALUE_FACTORY.create(hmgr.getPath(sn[0].getNodeId()));
- ItemData item = itemMgr.getItemData(sn[0].getNodeId());
- if (item == null)
- throw new ItemNotFoundException(sn[0].getNodeId());
- String path = resolver.createJCRPath(item.getQPath())
- .getAsString(false);
- return valueFactory.createValue(path, PropertyType.PATH);
- } else if (getNodeImpl().hasProperty(propertyName)) {
- Property p = getNodeImpl().getProperty(propertyName);
- if (p.getDefinition().getRequiredType() == PropertyType.UNDEFINED) {
- return valueFactory.createValue(p.getString());
- } else {
- return p.getValue();
- }
- } else {
+ /**
+ * Returns the value of the indicated property in this
<code>Row</code>.
+ * <p/>
+ * If <code>propertyName</code> is not among the column names of the
+ * query result table, an <code>ItemNotFoundException</code> is
thrown.
+ *
+ * @return a <code>Value</code>
+ * @throws ItemNotFoundException
+ * if <code>propertyName</code> is not among the column
+ * names of the query result table.
+ * @throws RepositoryException
+ * if <code>propertyName</code> is not a valid property
+ * name.
+ */
+ public Value getValue(String propertyName) throws ItemNotFoundException,
RepositoryException
+ {
+ if (propertySet == null)
+ {
+ // create the set first
+ Set tmp = new HashSet();
+ tmp.addAll(Arrays.asList(properties));
+ propertySet = tmp;
+ }
+ try
+ {
+ InternalQName prop = resolver.parseJCRName(propertyName).getInternalName();
+ if (!propertySet.contains(prop))
+ {
+ if (isExcerptFunction(propertyName))
+ {
+ // excerpt function with parameter
+ return getExcerpt(propertyName);
+ }
+ else
+ {
+ throw new ItemNotFoundException(propertyName);
+ }
+ }
+ if (Constants.JCR_PATH.equals(prop))
+ {
+ // QValue p =
+ // QVALUE_FACTORY.create(hmgr.getPath(sn[0].getNodeId()));
+ ItemData item = itemMgr.getItemData(sn[0].getNodeId());
+ if (item == null)
+ throw new ItemNotFoundException(sn[0].getNodeId());
+ String path = resolver.createJCRPath(item.getQPath()).getAsString(false);
+ return valueFactory.createValue(path, PropertyType.PATH);
+ }
+ else if (getNodeImpl().hasProperty(propertyName))
+ {
+ Property p = getNodeImpl().getProperty(propertyName);
+ if (p.getDefinition().getRequiredType() == PropertyType.UNDEFINED)
+ {
+ return valueFactory.createValue(p.getString());
+ }
+ else
+ {
+ return p.getValue();
+ }
+ }
+ else
+ {
- // either jcr:score, rep:excerpt,
- // rep:spellcheck or not set
- if (Constants.JCR_SCORE.equals(prop)) {
- return valueFactory.createValue(Math
- .round(score * 1000f));
- } else if (isExcerptFunction(prop)) {
- return getExcerpt();
- } else if (isSpellCheckFunction(prop)) {
- return getSpellCheckedStatement();
- } else {
- return null;
- }
- }
- } catch (RepositoryException e) {
- if (isExcerptFunction(propertyName)) {
- // excerpt function with parameter
- return getExcerpt(propertyName);
- } else {
- throw new RepositoryException(e.getMessage(), e);
- }
- }
- }
+ // either jcr:score, rep:excerpt,
+ // rep:spellcheck or not set
+ if (Constants.JCR_SCORE.equals(prop))
+ {
+ return valueFactory.createValue(Math.round(score * 1000f));
+ }
+ else if (isExcerptFunction(prop))
+ {
+ return getExcerpt();
+ }
+ else if (isSpellCheckFunction(prop))
+ {
+ return getSpellCheckedStatement();
+ }
+ else
+ {
+ return null;
+ }
+ }
+ }
+ catch (RepositoryException e)
+ {
+ if (isExcerptFunction(propertyName))
+ {
+ // excerpt function with parameter
+ return getExcerpt(propertyName);
+ }
+ else
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ }
+ }
- /**
- * Returns the <code>Node</code> corresponding to this
<code>Row</code>.
- * <p/>
- * A <code>RepositoryException</code> is thrown if this
<code>Row</code>
- * contains values from more than one node. This will be the case when
- * more than one selector is included among the columns specified for
- * the query.
- *
- * @return a <code>Node</code>
- * @throws RepositoryException
- * if this query has more than one selector (and therefore,
- * this <code>Row</code> corresponds to more than one
- * <code>Node</code>) or if another error occurs.
- * @since JCR 2.0
- */
- public Node getNode() throws RepositoryException {
- checkSingleSelector("Use getNode(String) instead.");
- return getNodeImpl();
- }
+ /**
+ * Returns the <code>Node</code> corresponding to this
<code>Row</code>.
+ * <p/>
+ * A <code>RepositoryException</code> is thrown if this
<code>Row</code>
+ * contains values from more than one node. This will be the case when
+ * more than one selector is included among the columns specified for
+ * the query.
+ *
+ * @return a <code>Node</code>
+ * @throws RepositoryException
+ * if this query has more than one selector (and therefore,
+ * this <code>Row</code> corresponds to more than one
+ * <code>Node</code>) or if another error occurs.
+ * @since JCR 2.0
+ */
+ public Node getNode() throws RepositoryException
+ {
+ checkSingleSelector("Use getNode(String) instead.");
+ return getNodeImpl();
+ }
- /**
- * Returns the <code>Node</code> corresponding to this
<code>Row</code>
- * and the specified selector.
- *
- * @param selectorName
- * a <code>String</code>
- * @return a <code>Node</code>
- * @throws RepositoryException
- * if <code>selectorName</code> is not the alias of a
- * selector in this query or if another error occurs.
- * @since JCR 2.0
- */
- public Node getNode(String selectorName) throws RepositoryException {
- ScoreNode s = sn[getSelectorIndex(selectorName)];
- if (s == null) {
- // TODO correct?
- return null;
- }
- return (Node) itemMgr.getItemByIdentifier(s.getNodeId(), true);
- }
+ /**
+ * Returns the <code>Node</code> corresponding to this
<code>Row</code>
+ * and the specified selector.
+ *
+ * @param selectorName
+ * a <code>String</code>
+ * @return a <code>Node</code>
+ * @throws RepositoryException
+ * if <code>selectorName</code> is not the alias of a
+ * selector in this query or if another error occurs.
+ * @since JCR 2.0
+ */
+ public Node getNode(String selectorName) throws RepositoryException
+ {
+ ScoreNode s = sn[getSelectorIndex(selectorName)];
+ if (s == null)
+ {
+ // TODO correct?
+ return null;
+ }
+ return (Node)itemMgr.getItemByIdentifier(s.getNodeId(), true);
+ }
- /**
- * Equivalent to <code>Row.getNode().getPath()</code>. However, some
- * implementations may be able gain efficiency by not resolving the
- * actual <code>Node</code>.
- *
- * @return a <code>String</code>
- * @throws RepositoryException
- * if this query has more than one selector (and therefore,
- * this <code>Row</code> corresponds to more than one
- * <code>Node</code>) or if another error occurs.
- * @since JCR 2.0
- */
- public String getPath() throws RepositoryException {
- checkSingleSelector("Use getPath(String) instead.");
- ItemData item = itemMgr.getItemData(sn[0].getNodeId());
- if (item == null)
- throw new ItemNotFoundException("Item not found "
- + sn[0].getNodeId());
- return resolver.createJCRPath(item.getQPath()).getAsString(false);
- }
+ /**
+ * Equivalent to <code>Row.getNode().getPath()</code>. However, some
+ * implementations may be able gain efficiency by not resolving the
+ * actual <code>Node</code>.
+ *
+ * @return a <code>String</code>
+ * @throws RepositoryException
+ * if this query has more than one selector (and therefore,
+ * this <code>Row</code> corresponds to more than one
+ * <code>Node</code>) or if another error occurs.
+ * @since JCR 2.0
+ */
+ public String getPath() throws RepositoryException
+ {
+ checkSingleSelector("Use getPath(String) instead.");
+ ItemData item = itemMgr.getItemData(sn[0].getNodeId());
+ if (item == null)
+ throw new ItemNotFoundException("Item not found " +
sn[0].getNodeId());
+ return resolver.createJCRPath(item.getQPath()).getAsString(false);
+ }
- /**
- * Equivalent to <code>Row.getNode(selectorName).getPath()</code>.
- * However, some implementations may be able gain efficiency by not
- * resolving the actual <code>Node</code>.
- *
- * @param selectorName
- * a <code>String</code>
- * @return a <code>String</code>
- * @throws RepositoryException
- * if <code>selectorName</code> is not the alias of a
- * selector in this query or if another error occurs.
- * @since JCR 2.0
- */
- public String getPath(String selectorName) throws RepositoryException {
- Node n = getNode(selectorName);
- if (n != null) {
- return n.getPath();
- } else {
- return null;
- }
- }
+ /**
+ * Equivalent to <code>Row.getNode(selectorName).getPath()</code>.
+ * However, some implementations may be able gain efficiency by not
+ * resolving the actual <code>Node</code>.
+ *
+ * @param selectorName
+ * a <code>String</code>
+ * @return a <code>String</code>
+ * @throws RepositoryException
+ * if <code>selectorName</code> is not the alias of a
+ * selector in this query or if another error occurs.
+ * @since JCR 2.0
+ */
+ public String getPath(String selectorName) throws RepositoryException
+ {
+ Node n = getNode(selectorName);
+ if (n != null)
+ {
+ return n.getPath();
+ }
+ else
+ {
+ return null;
+ }
+ }
- /**
- * Returns the full text search score for this row associated with the
- * default selector. This corresponds to the score of a particular node.
- * <p/>
- * If no <code>FullTextSearchScore</code> AQM object is associated with
- * the default selector this method will still return a value. However,
- * in that case the returned value may not be meaningful or may simply
- * reflect the minimum possible relevance level (for example, in some
- * systems this might be a score of 0).
- * <p/>
- * Note, in JCR-SQL2 a <code>FullTextSearchScore</code> AQM object is
- * represented by a <code>SCORE()</code> function. In JCR-JQOM it is
- * represented by a Java object of type
- * <code>javax.jcr.query.qom.FullTextSearchScore</code>.
- *
- * @return a <code>double</code>
- * @throws RepositoryException
- * if this query has more than one selector (and therefore,
- * this <code>Row</code> corresponds to more than one
- * <code>Node</code>) or if another error occurs.
- * @since JCR 2.0
- */
- public double getScore() throws RepositoryException {
- checkSingleSelector("Use getScore(String) instead.");
- return score;
- }
+ /**
+ * Returns the full text search score for this row associated with the
+ * default selector. This corresponds to the score of a particular node.
+ * <p/>
+ * If no <code>FullTextSearchScore</code> AQM object is associated
with
+ * the default selector this method will still return a value. However,
+ * in that case the returned value may not be meaningful or may simply
+ * reflect the minimum possible relevance level (for example, in some
+ * systems this might be a score of 0).
+ * <p/>
+ * Note, in JCR-SQL2 a <code>FullTextSearchScore</code> AQM object is
+ * represented by a <code>SCORE()</code> function. In JCR-JQOM it is
+ * represented by a Java object of type
+ * <code>javax.jcr.query.qom.FullTextSearchScore</code>.
+ *
+ * @return a <code>double</code>
+ * @throws RepositoryException
+ * if this query has more than one selector (and therefore,
+ * this <code>Row</code> corresponds to more than one
+ * <code>Node</code>) or if another error occurs.
+ * @since JCR 2.0
+ */
+ public double getScore() throws RepositoryException
+ {
+ checkSingleSelector("Use getScore(String) instead.");
+ return score;
+ }
- /**
- * Returns the full text search score for this row associated with the
- * specified selector. This corresponds to the score of a particular
- * node.
- * <p/>
- * If no <code>FullTextSearchScore</code> AQM object is associated with
- * the selector <code>selectorName</code> this method will still return
- * a value. However, in that case the returned value may not be
- * meaningful or may simply reflect the minimum possible relevance level
- * (for example, in some systems this might be a score of 0).
- * <p/>
- * Note, in JCR-SQL2 a <code>FullTextSearchScore</code> AQM object is
- * represented by a <code>SCORE()</code> function. In JCR-JQOM it is
- * represented by a Java object of type
- * <code>javax.jcr.query.qom.FullTextSearchScore</code>.
- *
- * @param selectorName
- * a <code>String</code>
- * @return a <code>String</code>
- * @throws RepositoryException
- * if <code>selectorName</code> is not the alias of a
- * selector in this query or if another error occurs.
- * @since JCR 2.0
- */
- public double getScore(String selectorName) throws RepositoryException {
- ScoreNode s = sn[getSelectorIndex(selectorName)];
- if (s == null) {
- // TODO correct?
- return Double.NaN;
- }
- return s.getScore();
- }
+ /**
+ * Returns the full text search score for this row associated with the
+ * specified selector. This corresponds to the score of a particular
+ * node.
+ * <p/>
+ * If no <code>FullTextSearchScore</code> AQM object is associated
with
+ * the selector <code>selectorName</code> this method will still
return
+ * a value. However, in that case the returned value may not be
+ * meaningful or may simply reflect the minimum possible relevance level
+ * (for example, in some systems this might be a score of 0).
+ * <p/>
+ * Note, in JCR-SQL2 a <code>FullTextSearchScore</code> AQM object is
+ * represented by a <code>SCORE()</code> function. In JCR-JQOM it is
+ * represented by a Java object of type
+ * <code>javax.jcr.query.qom.FullTextSearchScore</code>.
+ *
+ * @param selectorName
+ * a <code>String</code>
+ * @return a <code>String</code>
+ * @throws RepositoryException
+ * if <code>selectorName</code> is not the alias of a
+ * selector in this query or if another error occurs.
+ * @since JCR 2.0
+ */
+ public double getScore(String selectorName) throws RepositoryException
+ {
+ ScoreNode s = sn[getSelectorIndex(selectorName)];
+ if (s == null)
+ {
+ // TODO correct?
+ return Double.NaN;
+ }
+ return s.getScore();
+ }
- // -----------------------------< internal
- // >-----------------------------
+ // -----------------------------< internal
+ // >-----------------------------
- /**
- * Returns the node corresponding to this row.
- *
- * @return the node.
- * @throws RepositoryException
- * if an error occurs while retrieving the node. e.g. node
- * does not exist anymore.
- */
- private NodeImpl getNodeImpl() throws RepositoryException {
- if (node == null) {
- node = (NodeImpl) itemMgr.getItemByIdentifier(
- sn[0].getNodeId(), true);
- }
- return node;
- }
+ /**
+ * Returns the node corresponding to this row.
+ *
+ * @return the node.
+ * @throws RepositoryException
+ * if an error occurs while retrieving the node. e.g. node
+ * does not exist anymore.
+ */
+ private NodeImpl getNodeImpl() throws RepositoryException
+ {
+ if (node == null)
+ {
+ node = (NodeImpl)itemMgr.getItemByIdentifier(sn[0].getNodeId(), true);
+ }
+ return node;
+ }
- /**
- * Checks if there is a single selector and otherwise throws a
- * RepositoryException.
- *
- * @param useInstead
- * message telling, which method to use instead.
- * @throws RepositoryException
- * if there is more than one selector.
- */
- private void checkSingleSelector(String useInstead)
- throws RepositoryException {
- if (sn.length > 1) {
- String msg = "More than one selector. " + useInstead;
- throw new RepositoryException(msg);
- }
- }
+ /**
+ * Checks if there is a single selector and otherwise throws a
+ * RepositoryException.
+ *
+ * @param useInstead
+ * message telling, which method to use instead.
+ * @throws RepositoryException
+ * if there is more than one selector.
+ */
+ private void checkSingleSelector(String useInstead) throws RepositoryException
+ {
+ if (sn.length > 1)
+ {
+ String msg = "More than one selector. " + useInstead;
+ throw new RepositoryException(msg);
+ }
+ }
- /**
- * Gets the selector index for the given <code>selectorName</code>.
- *
- * @param selectorName
- * the selector name.
- * @return the selector index.
- * @throws RepositoryException
- * if the selector name is not a valid JCR name or the
- * selector name is not the alias of a selector in this
- * query.
- */
- private int getSelectorIndex(String selectorName)
- throws RepositoryException {
- int idx = selectorNames
- .indexOf(resolver.parseJCRName(selectorName));
- if (idx == -1) {
- throw new RepositoryException("Unknown selector name: "
- + selectorName);
- }
- return idx;
- }
+ /**
+ * Gets the selector index for the given <code>selectorName</code>.
+ *
+ * @param selectorName
+ * the selector name.
+ * @return the selector index.
+ * @throws RepositoryException
+ * if the selector name is not a valid JCR name or the
+ * selector name is not the alias of a selector in this
+ * query.
+ */
+ private int getSelectorIndex(String selectorName) throws RepositoryException
+ {
+ int idx = selectorNames.indexOf(resolver.parseJCRName(selectorName));
+ if (idx == -1)
+ {
+ throw new RepositoryException("Unknown selector name: " +
selectorName);
+ }
+ return idx;
+ }
- /**
- * @param name
- * a Name.
- * @return <code>true</code> if <code>name</code> is the
rep:excerpt
- * function, <code>false</code> otherwise.
- */
- private boolean isExcerptFunction(InternalQName name) {
- return name.getNamespace().equals(Constants.NS_REP_URI)
- && name.getName().startsWith(EXCERPT_FUNC_LPAR);
- }
+ /**
+ * @param name
+ * a Name.
+ * @return <code>true</code> if <code>name</code> is the
rep:excerpt
+ * function, <code>false</code> otherwise.
+ */
+ private boolean isExcerptFunction(InternalQName name)
+ {
+ return name.getNamespace().equals(Constants.NS_REP_URI) &&
name.getName().startsWith(EXCERPT_FUNC_LPAR);
+ }
- /**
- * @param name
- * a String.
- * @return <code>true</code> if <code>name</code> is the
rep:excerpt
- * function, <code>false</code> otherwise.
- */
- private boolean isExcerptFunction(String name) {
- try {
- return name.startsWith(resolver.createJCRName(REP_EXCERPT_LPAR)
- .getAsString());
- } catch (NamespaceException e) {
- // will never happen
- return false;
- } catch (RepositoryException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- return false;
- }
- }
+ /**
+ * @param name
+ * a String.
+ * @return <code>true</code> if <code>name</code> is the
rep:excerpt
+ * function, <code>false</code> otherwise.
+ */
+ private boolean isExcerptFunction(String name)
+ {
+ try
+ {
+ return
name.startsWith(resolver.createJCRName(REP_EXCERPT_LPAR).getAsString());
+ }
+ catch (NamespaceException e)
+ {
+ // will never happen
+ return false;
+ }
+ catch (RepositoryException e)
+ {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ return false;
+ }
+ }
- /**
- * Returns an excerpt for the node associated with this row.
- *
- * @return a StringValue or <code>null</code> if the excerpt cannot be
- * created or an error occurs.
- */
- private Value getExcerpt() {
- return createExcerpt(sn[0].getNodeId());
- }
+ /**
+ * Returns an excerpt for the node associated with this row.
+ *
+ * @return a StringValue or <code>null</code> if the excerpt cannot be
+ * created or an error occurs.
+ */
+ private Value getExcerpt()
+ {
+ return createExcerpt(sn[0].getNodeId());
+ }
- /**
- * Returns an excerpt for the node indicated by the relative path
- * parameter of the rep:excerpt function. The relative path is resolved
- * against the node associated with this row.
- *
- * @param excerptCall
- * the rep:excerpt function with the parameter as string.
- * @return a StringValue or <code>null</code> if the excerpt cannot be
- * created or an error occurs.
- * @throws RepositoryException
- * if the function call is not well-formed.
- */
- private Value getExcerpt(String excerptCall) throws RepositoryException {
- int idx = excerptCall.indexOf(EXCERPT_FUNC_LPAR);
- int end = excerptCall.lastIndexOf(')');
- if (end == -1) {
- throw new RepositoryException("Missing right parenthesis");
- }
- String pathStr = excerptCall.substring(
- idx + EXCERPT_FUNC_LPAR.length(), end).trim();
- String decodedPath = ISO9075.decode(pathStr);
- try {
- NodeImpl n = (NodeImpl) getNodeImpl().getNode(decodedPath);
- return createExcerpt(n.getData().getIdentifier());
- } catch (PathNotFoundException e) {
- // does not exist or references a property
- try {
- Property p = getNode().getProperty(decodedPath);
- return highlight(p.getValue().getString());
- } catch (PathNotFoundException e1) {
- // does not exist
- return null;
- }
- }
- }
+ /**
+ * Returns an excerpt for the node indicated by the relative path
+ * parameter of the rep:excerpt function. The relative path is resolved
+ * against the node associated with this row.
+ *
+ * @param excerptCall
+ * the rep:excerpt function with the parameter as string.
+ * @return a StringValue or <code>null</code> if the excerpt cannot be
+ * created or an error occurs.
+ * @throws RepositoryException
+ * if the function call is not well-formed.
+ */
+ private Value getExcerpt(String excerptCall) throws RepositoryException
+ {
+ int idx = excerptCall.indexOf(EXCERPT_FUNC_LPAR);
+ int end = excerptCall.lastIndexOf(')');
+ if (end == -1)
+ {
+ throw new RepositoryException("Missing right parenthesis");
+ }
+ String pathStr = excerptCall.substring(idx + EXCERPT_FUNC_LPAR.length(),
end).trim();
+ String decodedPath = ISO9075.decode(pathStr);
+ try
+ {
+ NodeImpl n = (NodeImpl)getNodeImpl().getNode(decodedPath);
+ return createExcerpt(n.getData().getIdentifier());
+ }
+ catch (PathNotFoundException e)
+ {
+ // does not exist or references a property
+ try
+ {
+ Property p = getNode().getProperty(decodedPath);
+ return highlight(p.getValue().getString());
+ }
+ catch (PathNotFoundException e1)
+ {
+ // does not exist
+ return null;
+ }
+ }
+ }
- /**
- * Creates an excerpt for node with the given <code>id</code>.
- *
- * @return a StringValue or <code>null</code> if the excerpt cannot be
- * created or an error occurs.
- */
- private Value createExcerpt(String id) {
- if (excerptProvider == null) {
- return null;
- }
- try {
- long time = System.currentTimeMillis();
- String excerpt = excerptProvider.getExcerpt(id, 3, 150);
- time = System.currentTimeMillis() - time;
- log.debug("Created excerpt in {} ms.", new Long(time));
- if (excerpt != null) {
- return valueFactory.createValue(excerpt);
- } else {
- return null;
- }
- } catch (IOException e) {
- return null;
- }
- }
+ /**
+ * Creates an excerpt for node with the given <code>id</code>.
+ *
+ * @return a StringValue or <code>null</code> if the excerpt cannot be
+ * created or an error occurs.
+ */
+ private Value createExcerpt(String id)
+ {
+ if (excerptProvider == null)
+ {
+ return null;
+ }
+ try
+ {
+ long time = System.currentTimeMillis();
+ String excerpt = excerptProvider.getExcerpt(id, 3, 150);
- /**
- * Highlights the matching terms in the passed <code>text</code>.
- *
- * @return a StringValue or <code>null</code> if highlighting fails.
- */
- private Value highlight(String text) {
- if (!(excerptProvider instanceof HighlightingExcerptProvider)) {
- return null;
- }
- HighlightingExcerptProvider hep = (HighlightingExcerptProvider) excerptProvider;
- try {
- long time = System.currentTimeMillis();
- text = hep.highlight(text);
- time = System.currentTimeMillis() - time;
- log.debug("Highlighted text in {} ms.", new Long(time));
- return valueFactory.createValue(text);
- } catch (IOException e) {
- return null;
- }
- }
+ time = System.currentTimeMillis() - time;
+ log.debug("Created excerpt in {} ms.", new Long(time));
+ if (excerpt != null)
+ {
+ return valueFactory.createValue(excerpt);
+ }
+ else
+ {
+ return null;
+ }
+ }
+ catch (IOException e)
+ {
+ return null;
+ }
+ }
- /**
- * @param name
- * a Name.
- * @return <code>true</code> if <code>name</code> is the
rep:spellcheck
- * function, <code>false</code> otherwise.
- */
- private boolean isSpellCheckFunction(InternalQName name) {
- return name.getNamespace().equals(Constants.NS_REP_URI)
- && name.getName().startsWith(SPELLCHECK_FUNC_LPAR);
- }
+ /**
+ * Highlights the matching terms in the passed <code>text</code>.
+ *
+ * @return a StringValue or <code>null</code> if highlighting fails.
+ */
+ private Value highlight(String text)
+ {
+ if (!(excerptProvider instanceof HighlightingExcerptProvider))
+ {
+ return null;
+ }
+ HighlightingExcerptProvider hep = (HighlightingExcerptProvider)excerptProvider;
+ try
+ {
+ long time = System.currentTimeMillis();
+ text = hep.highlight(text);
+ time = System.currentTimeMillis() - time;
+ log.debug("Highlighted text in {} ms.", new Long(time));
+ return valueFactory.createValue(text);
+ }
+ catch (IOException e)
+ {
+ return null;
+ }
+ }
- /**
- * Returns the spell checked string of the first relation query node
- * with a spellcheck operation.
- *
- * @return a StringValue or <code>null</code> if the spell checker
- * thinks the words are spelled correctly. This method also
- * returns <code>null</code> if no spell checker is configured.
- */
- private Value getSpellCheckedStatement() {
- String v = null;
- if (spellSuggestion != null) {
- try {
- v = spellSuggestion.getSuggestion();
- } catch (IOException e) {
- log.warn("Spell checking failed", e);
- } catch (RepositoryException e) {
- log.warn("Spell checking failed", e);
- }
- }
- if (v != null) {
- return valueFactory.createValue(v);
- } else {
- return null;
- }
- }
- }
+ /**
+ * @param name
+ * a Name.
+ * @return <code>true</code> if <code>name</code> is the
rep:spellcheck
+ * function, <code>false</code> otherwise.
+ */
+ private boolean isSpellCheckFunction(InternalQName name)
+ {
+ return name.getNamespace().equals(Constants.NS_REP_URI) &&
name.getName().startsWith(SPELLCHECK_FUNC_LPAR);
+ }
+
+ /**
+ * Returns the spell checked string of the first relation query node
+ * with a spellcheck operation.
+ *
+ * @return a StringValue or <code>null</code> if the spell checker
+ * thinks the words are spelled correctly. This method also
+ * returns <code>null</code> if no spell checker is
configured.
+ */
+ private Value getSpellCheckedStatement()
+ {
+ String v = null;
+ if (spellSuggestion != null)
+ {
+ try
+ {
+ v = spellSuggestion.getSuggestion();
+ }
+ catch (IOException e)
+ {
+ log.warn("Spell checking failed", e);
+ }
+ catch (RepositoryException e)
+ {
+ log.warn("Spell checking failed", e);
+ }
+ }
+ if (v != null)
+ {
+ return valueFactory.createValue(v);
+ }
+ else
+ {
+ return null;
+ }
+ }
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -62,7 +62,9 @@
import org.w3c.dom.Element;
import org.xml.sax.SAXException;
+import java.io.BufferedInputStream;
import java.io.File;
+import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
@@ -85,2354 +87,2556 @@
* Implements a {@link org.apache.jackrabbit.core.query.QueryHandler} using
* Lucene.
*/
-public class SearchIndex extends AbstractQueryHandler {
+public class SearchIndex extends AbstractQueryHandler
+{
- private static final DefaultQueryNodeFactory DEFAULT_QUERY_NODE_FACTORY = new
DefaultQueryNodeFactory();
+ private static final DefaultQueryNodeFactory DEFAULT_QUERY_NODE_FACTORY = new
DefaultQueryNodeFactory();
- /** The logger instance for this class */
- private static final Logger log = LoggerFactory
- .getLogger(SearchIndex.class);
+ /** The logger instance for this class */
+ private static final Logger log = LoggerFactory.getLogger(SearchIndex.class);
- /**
- * Name of the file to persist search internal namespace mappings.
- */
- private static final String NS_MAPPING_FILE = "ns_mappings.properties";
+ /**
+ * Name of the file to persist search internal namespace mappings.
+ */
+ private static final String NS_MAPPING_FILE = "ns_mappings.properties";
- /**
- * The default value for property {@link #minMergeDocs}.
- */
- public static final int DEFAULT_MIN_MERGE_DOCS = 100;
+ /**
+ * The default value for property {@link #minMergeDocs}.
+ */
+ public static final int DEFAULT_MIN_MERGE_DOCS = 100;
- /**
- * The default value for property {@link #maxMergeDocs}.
- */
- public static final int DEFAULT_MAX_MERGE_DOCS = Integer.MAX_VALUE;
+ /**
+ * The default value for property {@link #maxMergeDocs}.
+ */
+ public static final int DEFAULT_MAX_MERGE_DOCS = Integer.MAX_VALUE;
- /**
- * the default value for property {@link #mergeFactor}.
- */
- public static final int DEFAULT_MERGE_FACTOR = 10;
+ /**
+ * the default value for property {@link #mergeFactor}.
+ */
+ public static final int DEFAULT_MERGE_FACTOR = 10;
- /**
- * the default value for property {@link #maxFieldLength}.
- */
- public static final int DEFAULT_MAX_FIELD_LENGTH = 10000;
+ /**
+ * the default value for property {@link #maxFieldLength}.
+ */
+ public static final int DEFAULT_MAX_FIELD_LENGTH = 10000;
- /**
- * The default value for property {@link #extractorPoolSize}.
- *
- * @deprecated this value is not used anymore. Instead the default value is
- * calculated as follows: 2 *
- * Runtime.getRuntime().availableProcessors().
- */
- public static final int DEFAULT_EXTRACTOR_POOL_SIZE = 0;
+ /**
+ * The default value for property {@link #extractorPoolSize}.
+ *
+ * @deprecated this value is not used anymore. Instead the default value is
+ * calculated as follows: 2 *
+ * Runtime.getRuntime().availableProcessors().
+ */
+ public static final int DEFAULT_EXTRACTOR_POOL_SIZE = 0;
- /**
- * The default value for property {@link #extractorBackLog}.
- */
- public static final int DEFAULT_EXTRACTOR_BACK_LOG = Integer.MAX_VALUE;
+ /**
+ * The default value for property {@link #extractorBackLog}.
+ */
+ public static final int DEFAULT_EXTRACTOR_BACK_LOG = Integer.MAX_VALUE;
- /**
- * The default timeout in milliseconds which is granted to the text
- * extraction process until fulltext indexing is deferred to a background
- * thread.
- */
- public static final long DEFAULT_EXTRACTOR_TIMEOUT = 100;
+ /**
+ * The default timeout in milliseconds which is granted to the text
+ * extraction process until fulltext indexing is deferred to a background
+ * thread.
+ */
+ public static final long DEFAULT_EXTRACTOR_TIMEOUT = 100;
- /**
- * The default value for {@link #termInfosIndexDivisor}.
- */
- public static final int DEFAULT_TERM_INFOS_INDEX_DIVISOR = 1;
+ /**
+ * The default value for {@link #termInfosIndexDivisor}.
+ */
+ public static final int DEFAULT_TERM_INFOS_INDEX_DIVISOR = 1;
- /**
- * Default name of the error log file
- */
- private static final String ERROR_LOG = "error.log";
+ /**
+ * Default name of the error log file
+ */
+ private static final String ERROR_LOG = "error.log";
- /**
- * The actual index
- */
- private MultiIndex index;
+ /**
+ * The actual index
+ */
+ private MultiIndex index;
- /**
- * The analyzer we use for indexing.
- */
- private JcrStandartAnalyzer analyzer;
+ /**
+ * The analyzer we use for indexing.
+ */
+ private JcrStandartAnalyzer analyzer;
- /**
- * The namespace mappings used internally.
- */
- private NamespaceMappings nsMappings;
+ /**
+ * The namespace mappings used internally.
+ */
+ private NamespaceMappings nsMappings;
- /**
- * The location of the search index.
- * <p/>
- * Note: This is a <b>mandatory</b> parameter!
- */
- private String path;
+ /**
+ * The location of the search index.
+ * <p/>
+ * Note: This is a <b>mandatory</b> parameter!
+ */
+ private String path;
- /**
- * minMergeDocs config parameter.
- */
- private int minMergeDocs = DEFAULT_MIN_MERGE_DOCS;
+ /**
+ * minMergeDocs config parameter.
+ */
+ private int minMergeDocs = DEFAULT_MIN_MERGE_DOCS;
- /**
- * The maximum volatile index size in bytes until it is written to disk. The
- * default value is 1048576 (1MB).
- */
- private long maxVolatileIndexSize = 1024 * 1024;
+ /**
+ * The maximum volatile index size in bytes until it is written to disk. The
+ * default value is 1048576 (1MB).
+ */
+ private long maxVolatileIndexSize = 1024 * 1024;
- /**
- * volatileIdleTime config parameter.
- */
- private int volatileIdleTime = 3;
+ /**
+ * volatileIdleTime config parameter.
+ */
+ private int volatileIdleTime = 3;
- /**
- * maxMergeDocs config parameter
- */
- private int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
+ /**
+ * maxMergeDocs config parameter
+ */
+ private int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
- /**
- * mergeFactor config parameter
- */
- private int mergeFactor = DEFAULT_MERGE_FACTOR;
+ /**
+ * mergeFactor config parameter
+ */
+ private int mergeFactor = DEFAULT_MERGE_FACTOR;
- /**
- * maxFieldLength config parameter
- */
- private int maxFieldLength = DEFAULT_MAX_FIELD_LENGTH;
+ /**
+ * maxFieldLength config parameter
+ */
+ private int maxFieldLength = DEFAULT_MAX_FIELD_LENGTH;
- /**
- * extractorPoolSize config parameter
- */
- private int extractorPoolSize = 2 * Runtime.getRuntime()
- .availableProcessors();
+ /**
+ * extractorPoolSize config parameter
+ */
+ private int extractorPoolSize = 2 * Runtime.getRuntime().availableProcessors();
- /**
- * extractorBackLog config parameter
- */
- private int extractorBackLog = DEFAULT_EXTRACTOR_BACK_LOG;
+ /**
+ * extractorBackLog config parameter
+ */
+ private int extractorBackLog = DEFAULT_EXTRACTOR_BACK_LOG;
- /**
- * extractorTimeout config parameter
- */
- private long extractorTimeout = DEFAULT_EXTRACTOR_TIMEOUT;
+ /**
+ * extractorTimeout config parameter
+ */
+ private long extractorTimeout = DEFAULT_EXTRACTOR_TIMEOUT;
- /**
- * Number of documents that are buffered before they are added to the index.
- */
- private int bufferSize = 10;
+ /**
+ * Number of documents that are buffered before they are added to the index.
+ */
+ private int bufferSize = 10;
- /**
- * Compound file flag
- */
- private boolean useCompoundFile = true;
+ /**
+ * Compound file flag
+ */
+ private boolean useCompoundFile = true;
- /**
- * Flag indicating whether document order is enabled as the default
- * ordering.
- * <p/>
- * Default value is: <code>false</code>.
- */
- private boolean documentOrder = true;
+ /**
+ * Flag indicating whether document order is enabled as the default
+ * ordering.
+ * <p/>
+ * Default value is: <code>false</code>.
+ */
+ private boolean documentOrder = true;
- /**
- * If set <code>true</code> the index is checked for consistency on
startup.
- * If <code>false</code> a consistency check is only performed when
there
- * are entries in the redo log on startup.
- * <p/>
- * Default value is: <code>false</code>.
- */
- private boolean forceConsistencyCheck = false;
+ /**
+ * If set <code>true</code> the index is checked for consistency on
startup.
+ * If <code>false</code> a consistency check is only performed when there
+ * are entries in the redo log on startup.
+ * <p/>
+ * Default value is: <code>false</code>.
+ */
+ private boolean forceConsistencyCheck = false;
- /**
- * If set <code>true</code> the index is checked for consistency
depending
- * on the {@link #forceConsistencyCheck} parameter. If set to
- * <code>false</code>, no consistency check is performed, even if the
redo
- * log had been applied on startup.
- * <p/>
- * Default value is: <code>false</code>.
- */
- private boolean consistencyCheckEnabled = false;
+ /**
+ * If set <code>true</code> the index is checked for consistency
depending
+ * on the {@link #forceConsistencyCheck} parameter. If set to
+ * <code>false</code>, no consistency check is performed, even if the
redo
+ * log had been applied on startup.
+ * <p/>
+ * Default value is: <code>false</code>.
+ */
+ private boolean consistencyCheckEnabled = false;
- /**
- * If set <code>true</code> errors detected by the consistency check are
- * repaired. If <code>false</code> the errors are only reported in the
log.
- * <p/>
- * Default value is: <code>true</code>.
- */
- private boolean autoRepair = true;
+ /**
+ * If set <code>true</code> errors detected by the consistency check are
+ * repaired. If <code>false</code> the errors are only reported in the
log.
+ * <p/>
+ * Default value is: <code>true</code>.
+ */
+ private boolean autoRepair = true;
- /**
- * The uuid resolver cache size.
- * <p/>
- * Default value is: <code>1000</code>.
- */
- private int cacheSize = 1000;
+ /**
+ * The uuid resolver cache size.
+ * <p/>
+ * Default value is: <code>1000</code>.
+ */
+ private int cacheSize = 1000;
- /**
- * The number of documents that are pre fetched when a query is executed.
- * <p/>
- * Default value is: {@link Integer#MAX_VALUE}.
- */
- private int resultFetchSize = Integer.MAX_VALUE;
+ /**
+ * The number of documents that are pre fetched when a query is executed.
+ * <p/>
+ * Default value is: {@link Integer#MAX_VALUE}.
+ */
+ private int resultFetchSize = Integer.MAX_VALUE;
- /**
- * If set to <code>true</code> the fulltext field is stored and and a
term
- * vector is created with offset information.
- * <p/>
- * Default value is: <code>false</code>.
- */
- private boolean supportHighlighting = false;
+ /**
+ * If set to <code>true</code> the fulltext field is stored and and a
term
+ * vector is created with offset information.
+ * <p/>
+ * Default value is: <code>false</code>.
+ */
+ private boolean supportHighlighting = false;
- /**
- * The excerpt provider class. Implements {@link ExcerptProvider}.
- */
- private Class excerptProviderClass = DefaultHTMLExcerpt.class;
+ /**
+ * The excerpt provider class. Implements {@link ExcerptProvider}.
+ */
+ private Class<? extends ExcerptProvider> excerptProviderClass =
DefaultHTMLExcerpt.class;
- /**
- * The path to the indexing configuration file.
- */
- private String indexingConfigPath;
+ /**
+ * The path to the indexing configuration file.
+ */
+ private String indexingConfigPath;
- /**
- * The DOM with the indexing configuration or <code>null</code> if there
is
- * no such configuration.
- */
- private Element indexingConfiguration;
+ /**
+ * The DOM with the indexing configuration or <code>null</code> if there
is
+ * no such configuration.
+ */
+ private Element indexingConfiguration;
- /**
- * The indexing configuration.
- */
- private IndexingConfiguration indexingConfig;
+ /**
+ * The indexing configuration.
+ */
+ private IndexingConfiguration indexingConfig;
- /**
- * The name and path resolver used internally.
- */
- private LocationFactory npResolver;
+ /**
+ * The name and path resolver used internally.
+ */
+ private LocationFactory npResolver;
- /**
- * The indexing configuration class. Implements
- * {@link IndexingConfiguration}.
- */
- private Class indexingConfigurationClass = IndexingConfigurationImpl.class;
+ /**
+ * The indexing configuration class. Implements
+ * {@link IndexingConfiguration}.
+ */
+ private Class<? extends IndexingConfiguration> indexingConfigurationClass =
IndexingConfigurationImpl.class;
- /**
- * The class that implements {@link SynonymProvider}.
- */
- private Class synonymProviderClass;
+ /**
+ * The class that implements {@link SynonymProvider}.
+ */
+ private Class<? extends SynonymProvider> synonymProviderClass;
- /**
- * The currently set synonym provider.
- */
- private SynonymProvider synProvider;
+ /**
+ * The currently set synonym provider.
+ */
+ private SynonymProvider synProvider;
- /**
- * The configuration path for the synonym provider.
- */
- private String synonymProviderConfigPath;
+ /**
+ * The configuration path for the synonym provider.
+ */
+ private String synonymProviderConfigPath;
- /**
- * The FileSystem for the synonym if the query handler context does not
- * provide one.
- */
- private InputStream synonymProviderConfigFs;
+ /**
+ * The FileSystem for the synonym if the query handler context does not
+ * provide one.
+ */
+ private InputStream synonymProviderConfigFs;
- /**
- * Indicates the index format version which is relevant to a
<b>query</b>.
- * This value may be different from what
- * {@link MultiIndex#getIndexFormatVersion()} returns because queries may be
- * executed on two physical indexes with different formats. Index format
- * versions are considered backward compatible. That is, the lower version
- * of the two physical indexes is used for querying.
- */
- private IndexFormatVersion indexFormatVersion;
+ /**
+ * Indicates the index format version which is relevant to a
<b>query</b>.
+ * This value may be different from what
+ * {@link MultiIndex#getIndexFormatVersion()} returns because queries may be
+ * executed on two physical indexes with different formats. Index format
+ * versions are considered backward compatible. That is, the lower version
+ * of the two physical indexes is used for querying.
+ */
+ private IndexFormatVersion indexFormatVersion;
- /**
- * The class that implements {@link SpellChecker}.
- */
- private Class spellCheckerClass;
+ /**
+ * The class that implements {@link SpellChecker}.
+ */
+ private Class<? extends SpellChecker> spellCheckerClass;
- /**
- * The spell checker for this query handler or <code>null</code> if none
is
- * configured.
- */
- private SpellChecker spellChecker;
+ /**
+ * The spell checker for this query handler or <code>null</code> if none
is
+ * configured.
+ */
+ private SpellChecker spellChecker;
- /**
- * The similarity in use for indexing and searching.
- */
- private Similarity similarity = Similarity.getDefault();
+ /**
+ * The similarity in use for indexing and searching.
+ */
+ private Similarity similarity = Similarity.getDefault();
- /**
- * The name of the directory manager class implementation.
- */
- private String directoryManagerClass = FSDirectoryManager.class.getName();
+ /**
+ * The name of the directory manager class implementation.
+ */
+ private String directoryManagerClass = FSDirectoryManager.class.getName();
- /**
- * The directory manager.
- */
- private DirectoryManager directoryManager;
+ /**
+ * The directory manager.
+ */
+ private DirectoryManager directoryManager;
- /**
- * The termInfosIndexDivisor.
- */
- private int termInfosIndexDivisor = DEFAULT_TERM_INFOS_INDEX_DIVISOR;
+ /**
+ * The termInfosIndexDivisor.
+ */
+ private int termInfosIndexDivisor = DEFAULT_TERM_INFOS_INDEX_DIVISOR;
- /**
- * The sort comparator source for indexed properties.
- */
- private SortComparatorSource scs;
+ /**
+ * The sort comparator source for indexed properties.
+ */
+ private SortComparatorSource scs;
- /**
- * Flag that indicates whether the hierarchy cache should be initialized
- * immediately on startup.
- */
- private boolean initializeHierarchyCache = true;
+ /**
+ * Flag that indicates whether the hierarchy cache should be initialized
+ * immediately on startup.
+ */
+ private boolean initializeHierarchyCache = true;
- /**
- * Indicates if this <code>SearchIndex</code> is closed and cannot be
used
- * anymore.
- */
- private boolean closed = false;
+ /**
+ * Indicates if this <code>SearchIndex</code> is closed and cannot be
used
+ * anymore.
+ */
+ private boolean closed = false;
- /**
- * Text extractor for extracting text content of binary properties.
- */
- private DocumentReaderService extractor;
+ /**
+ * Text extractor for extracting text content of binary properties.
+ */
+ private DocumentReaderService extractor;
- public static final int DEFAULT_ERRORLOG_FILE_SIZE = 50; // Kb
+ public static final int DEFAULT_ERRORLOG_FILE_SIZE = 50; // Kb
- private int errorLogfileSize = DEFAULT_ERRORLOG_FILE_SIZE;
- /**
- * The ErrorLog of this <code>MultiIndex</code>. All changes that must be
in
- * index but interrupted by IOException are here.
- */
- private ErrorLog errorLog;
+ private int errorLogfileSize = DEFAULT_ERRORLOG_FILE_SIZE;
- private final ConfigurationManager cfm;
+ /**
+ * The ErrorLog of this <code>MultiIndex</code>. All changes that must be
in
+ * index but interrupted by IOException are here.
+ */
+ private ErrorLog errorLog;
- /**
- * Working constructor.
- *
- * @throws RepositoryConfigurationException
- * @throws IOException
- */
- public SearchIndex(QueryHandlerEntry queryHandlerConfig,
- ConfigurationManager cfm) throws IOException,
- RepositoryConfigurationException {
- this.analyzer = new JcrStandartAnalyzer();
- // this.queryHandlerConfig = new QueryHandlerEntryWrapper(
- // queryHandlerConfig);
- this.cfm = cfm;
- SearchIndexConfigurationHelper searchIndexConfigurationHelper = new
SearchIndexConfigurationHelper(
- this);
- searchIndexConfigurationHelper.init(queryHandlerConfig);
- }
+ private final ConfigurationManager cfm;
- /**
- * For test constructor.
- */
- public SearchIndex() {
- this.analyzer = new JcrStandartAnalyzer();
- // this.queryHandlerConfig = null;
- this.cfm = null;
- }
+ /**
+ * Working constructor.
+ *
+ * @throws RepositoryConfigurationException
+ * @throws IOException
+ */
+ public SearchIndex(QueryHandlerEntry queryHandlerConfig, ConfigurationManager cfm)
throws IOException,
+ RepositoryConfigurationException
+ {
+ this.analyzer = new JcrStandartAnalyzer();
+ // this.queryHandlerConfig = new QueryHandlerEntryWrapper(
+ // queryHandlerConfig);
+ this.cfm = cfm;
+ SearchIndexConfigurationHelper searchIndexConfigurationHelper = new
SearchIndexConfigurationHelper(this);
+ searchIndexConfigurationHelper.init(queryHandlerConfig);
+ }
- /**
- * Initializes this <code>QueryHandler</code>. This implementation
requires
- * that a path parameter is set in the configuration. If this condition is
- * not met, a <code>IOException</code> is thrown.
- *
- * @throws IOException
- * if an error occurs while initializing this handler.
- * @throws RepositoryException
- */
- public void doInit() throws IOException, RepositoryException {
- QueryHandlerContext context = getContext();
- setPath(context.getIndexDirectory());
- if (path == null) {
- throw new IOException(
- "SearchIndex requires 'path' parameter in configuration!");
- }
+ /**
+ * For test constructor.
+ */
+ public SearchIndex()
+ {
+ this.analyzer = new JcrStandartAnalyzer();
+ // this.queryHandlerConfig = null;
+ this.cfm = null;
+ }
- File indexDirectory;
- if (path != null) {
+ /**
+ * Initializes this <code>QueryHandler</code>. This implementation
requires
+ * that a path parameter is set in the configuration. If this condition is
+ * not met, a <code>IOException</code> is thrown.
+ *
+ * @throws IOException
+ * if an error occurs while initializing this handler.
+ * @throws RepositoryException
+ */
+ public void doInit() throws IOException, RepositoryException
+ {
+ QueryHandlerContext context = getContext();
+ setPath(context.getIndexDirectory());
+ if (path == null)
+ {
+ throw new IOException("SearchIndex requires 'path' parameter in
configuration!");
+ }
- indexDirectory = new File(path);
- if (!indexDirectory.exists())
- if (!indexDirectory.mkdirs())
- throw new RepositoryException("fail to create index dir "
- + path);
- } else {
- throw new IOException(
- "SearchIndex requires 'path' parameter in configuration!");
- }
- log.info("path=" + path);
+ File indexDirectory;
+ if (path != null)
+ {
- // Set excludedIDs = new HashSet();
- // if (context.getExcludedNodeId() != null)
- // {
- // excludedIDs.add(context.getExcludedNodeId());
- // }
+ indexDirectory = new File(path);
+ if (!indexDirectory.exists())
+ if (!indexDirectory.mkdirs())
+ throw new RepositoryException("fail to create index dir " +
path);
+ }
+ else
+ {
+ throw new IOException("SearchIndex requires 'path' parameter in
configuration!");
+ }
+ log.info("path=" + path);
- extractor = context.getExtractor();
- // synProvider = queryHandlerConfig.createSynonymProvider(cfm);
- directoryManager = createDirectoryManager();
+ // Set excludedIDs = new HashSet();
+ // if (context.getExcludedNodeId() != null)
+ // {
+ // excludedIDs.add(context.getExcludedNodeId());
+ // }
- if (context.getParentHandler() instanceof SearchIndex) {
- // use system namespace mappings
- SearchIndex sysIndex = (SearchIndex) context.getParentHandler();
- nsMappings = sysIndex.getNamespaceMappings();
- } else {
- // read local namespace mappings
- File mapFile = new File(indexDirectory, NS_MAPPING_FILE);
- if (mapFile.exists()) {
- // be backward compatible and use ns_mappings.properties from
- // index folder
- nsMappings = new FileBasedNamespaceMappings(mapFile);
- } else {
- // otherwise use repository wide stable index prefix from
- // namespace registry
- nsMappings = new NSRegistryBasedNamespaceMappings(context
- .getNamespaceRegistry());
- }
- }
+ extractor = context.getExtractor();
+ synProvider =
createSynonymProvider();//queryHandlerConfig.createSynonymProvider(cfm);
+ directoryManager = createDirectoryManager();
- scs = new SharedFieldSortComparator(FieldNames.PROPERTIES, context
- .getItemStateManager(), nsMappings);
- npResolver = new LocationFactory(nsMappings);
+ if (context.getParentHandler() instanceof SearchIndex)
+ {
+ // use system namespace mappings
+ SearchIndex sysIndex = (SearchIndex)context.getParentHandler();
+ nsMappings = sysIndex.getNamespaceMappings();
+ }
+ else
+ {
+ // read local namespace mappings
+ File mapFile = new File(indexDirectory, NS_MAPPING_FILE);
+ if (mapFile.exists())
+ {
+ // be backward compatible and use ns_mappings.properties from
+ // index folder
+ nsMappings = new FileBasedNamespaceMappings(mapFile);
+ }
+ else
+ {
+ // otherwise use repository wide stable index prefix from
+ // namespace registry
+ nsMappings = new
NSRegistryBasedNamespaceMappings(context.getNamespaceRegistry());
+ }
+ }
- indexingConfig = createIndexingConfiguration(nsMappings);
- analyzer.setIndexingConfig(indexingConfig);
+ scs = new SharedFieldSortComparator(FieldNames.PROPERTIES,
context.getItemStateManager(), nsMappings);
+ npResolver = new LocationFactory(nsMappings);
- index = new MultiIndex(this, context.getIndexingTree());
- if (index.numDocs() == 0 && context.isCreateInitialIndex()) {
+ indexingConfig = createIndexingConfiguration(nsMappings);
+ analyzer.setIndexingConfig(indexingConfig);
- index.createInitialIndex(context.getItemStateManager());
- }
- if (consistencyCheckEnabled
- && (index.getRedoLogApplied() || forceConsistencyCheck)) {
- log.info("Running consistency check...");
- try {
- ConsistencyCheck check = ConsistencyCheck.run(index, context
- .getItemStateManager());
- if (autoRepair) {
- check.repair(true);
- } else {
- List errors = check.getErrors();
- if (errors.size() == 0) {
- log.info("No errors detected.");
- }
- for (Iterator it = errors.iterator(); it.hasNext();) {
- ConsistencyCheckError err = (ConsistencyCheckError) it
- .next();
- log.info(err.toString());
- }
- }
- } catch (Exception e) {
- log.warn("Failed to run consistency check on index: " + e);
- }
- }
+ index = new MultiIndex(this, context.getIndexingTree());
+ if (index.numDocs() == 0 && context.isCreateInitialIndex())
+ {
- // initialize spell checker
- spellChecker = createSpellChecker();
+ index.createInitialIndex(context.getItemStateManager());
+ }
+ if (consistencyCheckEnabled && (index.getRedoLogApplied() ||
forceConsistencyCheck))
+ {
+ log.info("Running consistency check...");
+ try
+ {
+ ConsistencyCheck check = ConsistencyCheck.run(index,
context.getItemStateManager());
+ if (autoRepair)
+ {
+ check.repair(true);
+ }
+ else
+ {
+ List<ConsistencyCheckError> errors = check.getErrors();
+ if (errors.size() == 0)
+ {
+ log.info("No errors detected.");
+ }
+ for (Iterator<ConsistencyCheckError> it = errors.iterator();
it.hasNext();)
+ {
+ ConsistencyCheckError err = it.next();
+ log.info(err.toString());
+ }
+ }
+ }
+ catch (Exception e)
+ {
+ log.warn("Failed to run consistency check on index: " + e);
+ }
+ }
- log.info("Index initialized: {} Version: {}", new Object[] { path,
- index.getIndexFormatVersion() });
- if (!index.getIndexFormatVersion().equals(getIndexFormatVersion())) {
- log.warn("Using Version {} for reading. Please re-index version "
- + "storage for optimal performance.", new Integer(
- getIndexFormatVersion().getVersion()));
- }
+ // initialize spell checker
+ spellChecker = createSpellChecker();
- File file = new File(indexDirectory, ERROR_LOG);
- errorLog = new ErrorLog(file, errorLogfileSize);
- // reprocess any notfinished notifies;
- recoverErrorLog(errorLog);
+ log.info("Index initialized: {} Version: {}", new Object[]{path,
index.getIndexFormatVersion()});
+ if (!index.getIndexFormatVersion().equals(getIndexFormatVersion()))
+ {
+ log.warn("Using Version {} for reading. Please re-index version " +
"storage for optimal performance.",
+ new Integer(getIndexFormatVersion().getVersion()));
+ }
- }
+ File file = new File(indexDirectory, ERROR_LOG);
+ errorLog = new ErrorLog(file, errorLogfileSize);
+ // reprocess any notfinished notifies;
+ recoverErrorLog(errorLog);
- /**
- * @return the errorLogfileSize
- */
- public int getErrorLogfileSize() {
- return errorLogfileSize;
- }
+ }
- /**
- * @param errorLogfileSize
- * the errorLogfileSize to set
- */
- public void setErrorLogfileSize(int errorLogfileSize) {
- this.errorLogfileSize = errorLogfileSize;
- }
+ /**
+ * @return the errorLogfileSize
+ */
+ public int getErrorLogfileSize()
+ {
+ return errorLogfileSize;
+ }
- /**
- * Adds the <code>node</code> to the search index.
- *
- * @param node
- * the node to add.
- * @throws RepositoryException
- * if an error occurs while indexing the node.
- * @throws IOException
- * if an error occurs while adding the node to the index.
- */
- public void addNode(NodeData node) throws RepositoryException, IOException {
- throw new UnsupportedOperationException("addNode");
- }
+ /**
+ * @param errorLogfileSize
+ * the errorLogfileSize to set
+ */
+ public void setErrorLogfileSize(int errorLogfileSize)
+ {
+ this.errorLogfileSize = errorLogfileSize;
+ }
- /**
- * Removes the node with <code>uuid</code> from the search index.
- *
- * @param id
- * the id of the node to remove from the index.
- * @throws IOException
- * if an error occurs while removing the node from the index.
- */
- public void deleteNode(String id) throws IOException {
- throw new UnsupportedOperationException("deleteNode");
- }
+ /**
+ * Adds the <code>node</code> to the search index.
+ *
+ * @param node
+ * the node to add.
+ * @throws RepositoryException
+ * if an error occurs while indexing the node.
+ * @throws IOException
+ * if an error occurs while adding the node to the index.
+ */
+ public void addNode(NodeData node) throws RepositoryException, IOException
+ {
+ throw new UnsupportedOperationException("addNode");
+ }
- /**
- * This implementation forwards the call to
- * {@link MultiIndex#update(Collection, Collection)} and transforms the two
- * iterators to the required types.
- *
- * @param remove
- * uuids of nodes to remove.
- * @param add
- * NodeStates to add. Calls to <code>next()</code> on this
- * iterator may return <code>null</code>, to indicate that a
node
- * could not be indexed successfully.
- * @throws RepositoryException
- * if an error occurs while indexing a node.
- * @throws IOException
- * if an error occurs while updating the index.
- */
- @Override
- public void updateNodes(final Iterator<String> remove,
- final Iterator<NodeData> add) throws RepositoryException,
- IOException {
- checkOpen();
- final Map<String, NodeData> aggregateRoots = new HashMap<String,
NodeData>();
- final Set<String> removedNodeIds = new HashSet<String>();
- final Set<String> addedNodeIds = new HashSet<String>();
+ /**
+ * Removes the node with <code>uuid</code> from the search index.
+ *
+ * @param id
+ * the id of the node to remove from the index.
+ * @throws IOException
+ * if an error occurs while removing the node from the index.
+ */
+ public void deleteNode(String id) throws IOException
+ {
+ throw new UnsupportedOperationException("deleteNode");
+ }
- index.update(IteratorUtils.toList(new TransformIterator(remove,
- new Transformer() {
- public Object transform(Object input) {
- String uuid = ((String) input);
- removedNodeIds.add(uuid);
- return uuid;
- }
- })), IteratorUtils.toList(new TransformIterator(add,
- new Transformer() {
- public Object transform(Object input) {
- NodeData state = (NodeData) input;
- if (state == null) {
- return null;
- }
- String uuid = state.getIdentifier();
- addedNodeIds.add(uuid);
- removedNodeIds.remove(uuid);
- Document doc = null;
- try {
- doc = createDocument(state, getNamespaceMappings(),
- index.getIndexFormatVersion());
- retrieveAggregateRoot(state, aggregateRoots);
- } catch (RepositoryException e) {
- log.warn(
- "Exception while creating document for node: "
- + state.getIdentifier() + ": "
- + e.toString(), e);
- }
- return doc;
- }
- })));
+ /**
+ * This implementation forwards the call to
+ * {@link MultiIndex#update(Collection, Collection)} and transforms the two
+ * iterators to the required types.
+ *
+ * @param remove
+ * uuids of nodes to remove.
+ * @param add
+ * NodeStates to add. Calls to <code>next()</code> on this
+ * iterator may return <code>null</code>, to indicate that a
node
+ * could not be indexed successfully.
+ * @throws RepositoryException
+ * if an error occurs while indexing a node.
+ * @throws IOException
+ * if an error occurs while updating the index.
+ */
+ @Override
+ public void updateNodes(final Iterator<String> remove, final
Iterator<NodeData> add) throws RepositoryException,
+ IOException
+ {
+ checkOpen();
+ final Map<String, NodeData> aggregateRoots = new HashMap<String,
NodeData>();
+ final Set<String> removedNodeIds = new HashSet<String>();
+ final Set<String> addedNodeIds = new HashSet<String>();
- // remove any aggregateRoot nodes that are new
- // and therefore already up-to-date
- aggregateRoots.keySet().removeAll(addedNodeIds);
+ index.update(IteratorUtils.toList(new TransformIterator(remove, new Transformer()
+ {
+ public Object transform(Object input)
+ {
+ String uuid = ((String)input);
+ removedNodeIds.add(uuid);
+ return uuid;
+ }
+ })), IteratorUtils.toList(new TransformIterator(add, new Transformer()
+ {
+ public Object transform(Object input)
+ {
+ NodeData state = (NodeData)input;
+ if (state == null)
+ {
+ return null;
+ }
+ String uuid = state.getIdentifier();
+ addedNodeIds.add(uuid);
+ removedNodeIds.remove(uuid);
+ Document doc = null;
+ try
+ {
+ doc = createDocument(state, getNamespaceMappings(),
index.getIndexFormatVersion());
+ retrieveAggregateRoot(state, aggregateRoots);
+ }
+ catch (RepositoryException e)
+ {
+ log
+ .warn("Exception while creating document for node: " +
state.getIdentifier() + ": " + e.toString(), e);
+ }
+ return doc;
+ }
+ })));
- // based on removed UUIDs get affected aggregate root nodes
- retrieveAggregateRoot(removedNodeIds, aggregateRoots);
+ // remove any aggregateRoot nodes that are new
+ // and therefore already up-to-date
+ aggregateRoots.keySet().removeAll(addedNodeIds);
- // update aggregates if there are any affected
- if (aggregateRoots.size() > 0) {
- Collection modified = TransformedCollection.decorate(
- new ArrayList(), new Transformer() {
- public Object transform(Object input) {
- NodeData state = (NodeData) input;
- try {
- return createDocument(state,
- getNamespaceMappings(), index
- .getIndexFormatVersion());
- } catch (RepositoryException e) {
- log
- .warn("Exception while creating document for node: "
- + state.getIdentifier()
- + ": "
- + e.toString());
- }
- return null;
- }
- });
- modified.addAll(aggregateRoots.values());
- index.update(aggregateRoots.keySet(), modified);
- }
- }
+ // based on removed UUIDs get affected aggregate root nodes
+ retrieveAggregateRoot(removedNodeIds, aggregateRoots);
- /**
- * Creates a new query by specifying the query statement itself and the
- * language in which the query is stated. If the query statement is
- * syntactically invalid, given the language specified, an
- * InvalidQueryException is thrown. <code>language</code> must specify a
- * query language string from among those returned by
- * QueryManager.getSupportedQueryLanguages(); if it is not then an
- * <code>InvalidQueryException</code> is thrown.
- *
- * @param session
- * the session of the current user creating the query object.
- * @param itemMgr
- * the item manager of the current user.
- * @param statement
- * the query statement.
- * @param language
- * the syntax of the query statement.
- * @throws InvalidQueryException
- * if statement is invalid or language is unsupported.
- * @return A <code>Query</code> object.
- */
- public ExecutableQuery createExecutableQuery(SessionImpl session,
- SessionDataManager itemMgr, String statement, String language)
- throws InvalidQueryException {
- QueryImpl query = new QueryImpl(session, itemMgr, this, getContext()
- .getPropertyTypeRegistry(), statement, language,
- getQueryNodeFactory());
- query.setRespectDocumentOrder(documentOrder);
- return query;
- }
+ // update aggregates if there are any affected
+ if (aggregateRoots.size() > 0)
+ {
+ Collection modified = TransformedCollection.decorate(new ArrayList(), new
Transformer()
+ {
+ public Object transform(Object input)
+ {
+ NodeData state = (NodeData)input;
+ try
+ {
+ return createDocument(state, getNamespaceMappings(),
index.getIndexFormatVersion());
+ }
+ catch (RepositoryException e)
+ {
+ log
+ .warn("Exception while creating document for node: " +
state.getIdentifier() + ": " + e.toString());
+ }
+ return null;
+ }
+ });
+ modified.addAll(aggregateRoots.values());
+ index.update(aggregateRoots.keySet(), modified);
+ }
+ }
- // /**
- // * Creates a new query by specifying the query object model. If the query
- // * object model is considered invalid for the implementing class, an
- // * InvalidQueryException is thrown.
- // *
- // * @param session the session of the current user creating the query
- // * object.
- // * @param itemMgr the item manager of the current user.
- // * @param qomTree query query object model tree.
- // * @return A <code>Query</code> object.
- // * @throws javax.jcr.query.InvalidQueryException
- // * if the query object model tree is invalid.
- // * @see QueryHandler#createExecutableQuery(SessionImpl, ItemManager,
- // QueryObjectModelTree)
- // */
- // public ExecutableQuery createExecutableQuery(SessionImpl session,
- // SessionDataManager itemMgr, QueryObjectModelTree qomTree)
- // throws InvalidQueryException
- // {
- // QueryObjectModelImpl query =
- // new QueryObjectModelImpl(session, itemMgr, this,
- // getContext().getPropertyTypeRegistry(), qomTree);
- // query.setRespectDocumentOrder(documentOrder);
- // return query;
- // }
+ /**
+ * Creates a new query by specifying the query statement itself and the
+ * language in which the query is stated. If the query statement is
+ * syntactically invalid, given the language specified, an
+ * InvalidQueryException is thrown. <code>language</code> must specify a
+ * query language string from among those returned by
+ * QueryManager.getSupportedQueryLanguages(); if it is not then an
+ * <code>InvalidQueryException</code> is thrown.
+ *
+ * @param session
+ * the session of the current user creating the query object.
+ * @param itemMgr
+ * the item manager of the current user.
+ * @param statement
+ * the query statement.
+ * @param language
+ * the syntax of the query statement.
+ * @throws InvalidQueryException
+ * if statement is invalid or language is unsupported.
+ * @return A <code>Query</code> object.
+ */
+ public ExecutableQuery createExecutableQuery(SessionImpl session, SessionDataManager
itemMgr, String statement,
+ String language) throws InvalidQueryException
+ {
+ QueryImpl query =
+ new QueryImpl(session, itemMgr, this, getContext().getPropertyTypeRegistry(),
statement, language,
+ getQueryNodeFactory());
+ query.setRespectDocumentOrder(documentOrder);
+ return query;
+ }
- /**
- * This method returns the QueryNodeFactory used to parse Queries. This
- * method may be overridden to provide a customized QueryNodeFactory
- */
- protected DefaultQueryNodeFactory getQueryNodeFactory() {
- return DEFAULT_QUERY_NODE_FACTORY;
- }
+ // /**
+ // * Creates a new query by specifying the query object model. If the query
+ // * object model is considered invalid for the implementing class, an
+ // * InvalidQueryException is thrown.
+ // *
+ // * @param session the session of the current user creating the query
+ // * object.
+ // * @param itemMgr the item manager of the current user.
+ // * @param qomTree query query object model tree.
+ // * @return A <code>Query</code> object.
+ // * @throws javax.jcr.query.InvalidQueryException
+ // * if the query object model tree is invalid.
+ // * @see QueryHandler#createExecutableQuery(SessionImpl, ItemManager,
+ // QueryObjectModelTree)
+ // */
+ // public ExecutableQuery createExecutableQuery(SessionImpl session,
+ // SessionDataManager itemMgr, QueryObjectModelTree qomTree)
+ // throws InvalidQueryException
+ // {
+ // QueryObjectModelImpl query =
+ // new QueryObjectModelImpl(session, itemMgr, this,
+ // getContext().getPropertyTypeRegistry(), qomTree);
+ // query.setRespectDocumentOrder(documentOrder);
+ // return query;
+ // }
- /**
- * Closes this <code>QueryHandler</code> and frees resources attached to
- * this handler.
- */
- public void close() {
- if (synonymProviderConfigFs != null) {
- try {
- synonymProviderConfigFs.close();
- } catch (IOException e) {
- log.warn("Exception while closing FileSystem", e);
- }
- }
- // shutdown extractor
- if (extractor instanceof PooledTextExtractor) {
- // ((PooledTextExtractor)extractor).shutdown();
- }
- if (spellChecker != null) {
- spellChecker.close();
- }
- index.close();
- getContext().destroy();
- closed = true;
- log.info("Index closed: " + path);
- }
+ /**
+ * This method returns the QueryNodeFactory used to parse Queries. This
+ * method may be overridden to provide a customized QueryNodeFactory
+ */
+ protected DefaultQueryNodeFactory getQueryNodeFactory()
+ {
+ return DEFAULT_QUERY_NODE_FACTORY;
+ }
- /**
- * Executes the query on the search index.
- *
- * @param session
- * the session that executes the query.
- * @param queryImpl
- * the query impl.
- * @param query
- * the lucene query.
- * @param orderProps
- * name of the properties for sort order.
- * @param orderSpecs
- * the order specs for the sort order properties.
- * <code>true</code> indicates ascending order,
- * <code>false</code> indicates descending.
- * @param resultFetchHint
- * a hint on how many results should be fetched.
- * @return the query hits.
- * @throws IOException
- * if an error occurs while searching the index.
- * @throws RepositoryException
- */
- public MultiColumnQueryHits executeQuery(SessionImpl session,
- AbstractQueryImpl queryImpl, Query query, QPath[] orderProps,
- boolean[] orderSpecs, long resultFetchHint) throws IOException,
- RepositoryException {
- checkOpen();
+ /**
+ * Closes this <code>QueryHandler</code> and frees resources attached to
+ * this handler.
+ */
+ public void close()
+ {
+ if (synonymProviderConfigFs != null)
+ {
+ try
+ {
+ synonymProviderConfigFs.close();
+ }
+ catch (IOException e)
+ {
+ log.warn("Exception while closing FileSystem", e);
+ }
+ }
+ // shutdown extractor
+ if (extractor instanceof PooledTextExtractor)
+ {
+ // ((PooledTextExtractor)extractor).shutdown();
+ }
+ if (spellChecker != null)
+ {
+ spellChecker.close();
+ }
+ index.close();
+ getContext().destroy();
+ closed = true;
+ log.info("Index closed: " + path);
+ }
- Sort sort = new Sort(createSortFields(orderProps, orderSpecs));
+ /**
+ * Executes the query on the search index.
+ *
+ * @param session
+ * the session that executes the query.
+ * @param queryImpl
+ * the query impl.
+ * @param query
+ * the lucene query.
+ * @param orderProps
+ * name of the properties for sort order.
+ * @param orderSpecs
+ * the order specs for the sort order properties.
+ * <code>true</code> indicates ascending order,
+ * <code>false</code> indicates descending.
+ * @param resultFetchHint
+ * a hint on how many results should be fetched.
+ * @return the query hits.
+ * @throws IOException
+ * if an error occurs while searching the index.
+ * @throws RepositoryException
+ */
+ public MultiColumnQueryHits executeQuery(SessionImpl session, AbstractQueryImpl
queryImpl, Query query,
+ QPath[] orderProps, boolean[] orderSpecs, long resultFetchHint) throws IOException,
RepositoryException
+ {
+ checkOpen();
- final IndexReader reader = getIndexReader(queryImpl.needsSystemTree());
- JcrIndexSearcher searcher = new JcrIndexSearcher(session,
- reader, getContext().getItemStateManager());
- searcher.setSimilarity(getSimilarity());
- return new FilterMultiColumnQueryHits(searcher.execute(query, sort,
- resultFetchHint, QueryImpl.DEFAULT_SELECTOR_NAME)) {
- public void close() throws IOException {
- try {
- super.close();
- } finally {
- PerQueryCache.getInstance().dispose();
- Util.closeOrRelease(reader);
- }
- }
- };
- }
+ Sort sort = new Sort(createSortFields(orderProps, orderSpecs));
- /**
- * Executes the query on the search index.
- *
- * @param session
- * the session that executes the query.
- * @param query
- * the query.
- * @param orderProps
- * name of the properties for sort order.
- * @param orderSpecs
- * the order specs for the sort order properties.
- * <code>true</code> indicates ascending order,
- * <code>false</code> indicates descending.
- * @param resultFetchHint
- * a hint on how many results should be fetched.
- * @return the query hits.
- * @throws IOException
- * if an error occurs while searching the index.
- * @throws RepositoryException
- */
- public MultiColumnQueryHits executeQuery(SessionImpl session,
- MultiColumnQuery query, QPath[] orderProps, boolean[] orderSpecs,
- long resultFetchHint) throws IOException, RepositoryException {
- checkOpen();
+ final IndexReader reader = getIndexReader(queryImpl.needsSystemTree());
+ JcrIndexSearcher searcher = new JcrIndexSearcher(session, reader,
getContext().getItemStateManager());
+ searcher.setSimilarity(getSimilarity());
+ return new FilterMultiColumnQueryHits(searcher.execute(query, sort,
resultFetchHint,
+ QueryImpl.DEFAULT_SELECTOR_NAME))
+ {
+ public void close() throws IOException
+ {
+ try
+ {
+ super.close();
+ }
+ finally
+ {
+ PerQueryCache.getInstance().dispose();
+ Util.closeOrRelease(reader);
+ }
+ }
+ };
+ }
- Sort sort = new Sort(createSortFields(orderProps, orderSpecs));
+ /**
+ * Executes the query on the search index.
+ *
+ * @param session
+ * the session that executes the query.
+ * @param query
+ * the query.
+ * @param orderProps
+ * name of the properties for sort order.
+ * @param orderSpecs
+ * the order specs for the sort order properties.
+ * <code>true</code> indicates ascending order,
+ * <code>false</code> indicates descending.
+ * @param resultFetchHint
+ * a hint on how many results should be fetched.
+ * @return the query hits.
+ * @throws IOException
+ * if an error occurs while searching the index.
+ * @throws RepositoryException
+ */
+ public MultiColumnQueryHits executeQuery(SessionImpl session, MultiColumnQuery query,
QPath[] orderProps,
+ boolean[] orderSpecs, long resultFetchHint) throws IOException,
RepositoryException
+ {
+ checkOpen();
- final IndexReader reader = getIndexReader();
- JcrIndexSearcher searcher = new JcrIndexSearcher(session,
- reader, getContext().getItemStateManager());
- searcher.setSimilarity(getSimilarity());
- return new FilterMultiColumnQueryHits(query.execute(searcher, sort,
- resultFetchHint)) {
- public void close() throws IOException {
- try {
- super.close();
- } finally {
- PerQueryCache.getInstance().dispose();
- Util.closeOrRelease(reader);
- }
- }
- };
- }
+ Sort sort = new Sort(createSortFields(orderProps, orderSpecs));
- /**
- * Creates an excerpt provider for the given <code>query</code>.
- *
- * @param query
- * the query.
- * @return an excerpt provider for the given <code>query</code>.
- * @throws IOException
- * if the provider cannot be created.
- */
- public ExcerptProvider createExcerptProvider(Query query)
- throws IOException {
- ExcerptProvider ep;
- try {
- ep = (ExcerptProvider) excerptProviderClass.newInstance();
- } catch (Exception e) {
- throw Util.createIOException(e);
- }
- ep.init(query, this);
- return ep;
- }
+ final IndexReader reader = getIndexReader();
+ JcrIndexSearcher searcher = new JcrIndexSearcher(session, reader,
getContext().getItemStateManager());
+ searcher.setSimilarity(getSimilarity());
+ return new FilterMultiColumnQueryHits(query.execute(searcher, sort,
resultFetchHint))
+ {
+ public void close() throws IOException
+ {
+ try
+ {
+ super.close();
+ }
+ finally
+ {
+ PerQueryCache.getInstance().dispose();
+ Util.closeOrRelease(reader);
+ }
+ }
+ };
+ }
- /**
- * Returns the analyzer in use for indexing.
- *
- * @return the analyzer in use for indexing.
- */
- public Analyzer getTextAnalyzer() {
- return analyzer;
- }
+ /**
+ * Creates an excerpt provider for the given <code>query</code>.
+ *
+ * @param query
+ * the query.
+ * @return an excerpt provider for the given <code>query</code>.
+ * @throws IOException
+ * if the provider cannot be created.
+ */
+ public ExcerptProvider createExcerptProvider(Query query) throws IOException
+ {
+ ExcerptProvider ep;
+ try
+ {
+ ep = excerptProviderClass.newInstance();
+ }
+ catch (Exception e)
+ {
+ throw Util.createIOException(e);
+ }
+ ep.init(query, this);
+ return ep;
+ }
- // /**
- // * Returns the text extractor in use for indexing.
- // *
- // * @return the text extractor in use for indexing.
- // */
- // public TextExtractor getTextExtractor()
- // {
- // return extractor;
- // }
+ /**
+ * Returns the analyzer in use for indexing.
+ *
+ * @return the analyzer in use for indexing.
+ */
+ public Analyzer getTextAnalyzer()
+ {
+ return analyzer;
+ }
- /**
- * Returns the namespace mappings for the internal representation.
- *
- * @return the namespace mappings for the internal representation.
- */
- public NamespaceMappings getNamespaceMappings() {
- return nsMappings;
- }
+ // /**
+ // * Returns the text extractor in use for indexing.
+ // *
+ // * @return the text extractor in use for indexing.
+ // */
+ // public TextExtractor getTextExtractor()
+ // {
+ // return extractor;
+ // }
- /**
- * @return the indexing configuration or <code>null</code> if there is
none.
- */
- public IndexingConfiguration getIndexingConfig() {
- return indexingConfig;
- }
+ /**
+ * Returns the namespace mappings for the internal representation.
+ *
+ * @return the namespace mappings for the internal representation.
+ */
+ public NamespaceMappings getNamespaceMappings()
+ {
+ return nsMappings;
+ }
- /**
- * @return the synonym provider of this search index. If none is set for
- * this search index the synonym provider of the parent handler is
- * returned if there is any.
- */
- public SynonymProvider getSynonymProvider() {
- if (synProvider != null) {
- return synProvider;
- } else {
- QueryHandler handler = getContext().getParentHandler();
- if (handler instanceof SearchIndex) {
- return ((SearchIndex) handler).getSynonymProvider();
- } else {
- return null;
- }
- }
- }
+ /**
+ * @return the indexing configuration or <code>null</code> if there is
none.
+ */
+ public IndexingConfiguration getIndexingConfig()
+ {
+ return indexingConfig;
+ }
- /**
- * @return the spell checker of this search index. If none is configured
- * this method returns <code>null</code>.
- */
- public SpellChecker getSpellChecker() {
- return spellChecker;
- }
+ /**
+ * @return the synonym provider of this search index. If none is set for
+ * this search index the synonym provider of the parent handler is
+ * returned if there is any.
+ */
+ public SynonymProvider getSynonymProvider()
+ {
+ if (synProvider != null)
+ {
+ return synProvider;
+ }
+ else
+ {
+ QueryHandler handler = getContext().getParentHandler();
+ if (handler instanceof SearchIndex)
+ {
+ return ((SearchIndex)handler).getSynonymProvider();
+ }
+ else
+ {
+ return null;
+ }
+ }
+ }
- /**
- * @return the similarity, which should be used for indexing and searching.
- */
- public Similarity getSimilarity() {
- return similarity;
- }
+ /**
+ * @return the spell checker of this search index. If none is configured
+ * this method returns <code>null</code>.
+ */
+ public SpellChecker getSpellChecker()
+ {
+ return spellChecker;
+ }
- /**
- * Returns an index reader for this search index. The caller of this method
- * is responsible for closing the index reader when he is finished using it.
- *
- * @return an index reader for this search index.
- * @throws IOException
- * the index reader cannot be obtained.
- */
- public IndexReader getIndexReader() throws IOException {
- return getIndexReader(true);
- }
+ /**
+ * @return the similarity, which should be used for indexing and searching.
+ */
+ public Similarity getSimilarity()
+ {
+ return similarity;
+ }
- /**
- * Returns the index format version that this search index is able to
- * support when a query is executed on this index.
- *
- * @return the index format version for this search index.
- */
- public IndexFormatVersion getIndexFormatVersion() {
- if (indexFormatVersion == null) {
- if (getContext().getParentHandler() instanceof SearchIndex) {
- SearchIndex parent = (SearchIndex) getContext()
- .getParentHandler();
- if (parent.getIndexFormatVersion().getVersion() < index
- .getIndexFormatVersion().getVersion()) {
- indexFormatVersion = parent.getIndexFormatVersion();
- } else {
- indexFormatVersion = index.getIndexFormatVersion();
- }
- } else {
- indexFormatVersion = index.getIndexFormatVersion();
- }
- }
- return indexFormatVersion;
- }
+ /**
+ * Returns an index reader for this search index. The caller of this method
+ * is responsible for closing the index reader when he is finished using it.
+ *
+ * @return an index reader for this search index.
+ * @throws IOException
+ * the index reader cannot be obtained.
+ */
+ public IndexReader getIndexReader() throws IOException
+ {
+ return getIndexReader(true);
+ }
- /**
- * @return the directory manager for this search index.
- */
- public DirectoryManager getDirectoryManager() {
- return directoryManager;
- }
+ /**
+ * Returns the index format version that this search index is able to
+ * support when a query is executed on this index.
+ *
+ * @return the index format version for this search index.
+ */
+ public IndexFormatVersion getIndexFormatVersion()
+ {
+ if (indexFormatVersion == null)
+ {
+ if (getContext().getParentHandler() instanceof SearchIndex)
+ {
+ SearchIndex parent = (SearchIndex)getContext().getParentHandler();
+ if (parent.getIndexFormatVersion().getVersion() <
index.getIndexFormatVersion().getVersion())
+ {
+ indexFormatVersion = parent.getIndexFormatVersion();
+ }
+ else
+ {
+ indexFormatVersion = index.getIndexFormatVersion();
+ }
+ }
+ else
+ {
+ indexFormatVersion = index.getIndexFormatVersion();
+ }
+ }
+ return indexFormatVersion;
+ }
- /**
- * Returns an index reader for this search index. The caller of this method
- * is responsible for closing the index reader when he is finished using it.
- *
- * @param includeSystemIndex
- * if <code>true</code> the index reader will cover the
complete
- * workspace. If <code>false</code> the returned index reader
- * will not contains any nodes under /jcr:system.
- * @return an index reader for this search index.
- * @throws IOException
- * the index reader cannot be obtained.
- */
- protected IndexReader getIndexReader(boolean includeSystemIndex)
- throws IOException {
- QueryHandler parentHandler = getContext().getParentHandler();
- CachingMultiIndexReader parentReader = null;
- if (parentHandler instanceof SearchIndex && includeSystemIndex) {
- parentReader = ((SearchIndex) parentHandler).index.getIndexReader();
- }
+ /**
+ * @return the directory manager for this search index.
+ */
+ public DirectoryManager getDirectoryManager()
+ {
+ return directoryManager;
+ }
- IndexReader reader;
- if (parentReader != null) {
- CachingMultiIndexReader[] readers = { index.getIndexReader(),
- parentReader };
- reader = new CombinedIndexReader(readers);
- } else {
- reader = index.getIndexReader();
- }
- return new JcrIndexReader(reader);
- }
+ /**
+ * Returns an index reader for this search index. The caller of this method
+ * is responsible for closing the index reader when he is finished using it.
+ *
+ * @param includeSystemIndex
+ * if <code>true</code> the index reader will cover the
complete
+ * workspace. If <code>false</code> the returned index reader
+ * will not contains any nodes under /jcr:system.
+ * @return an index reader for this search index.
+ * @throws IOException
+ * the index reader cannot be obtained.
+ */
+ protected IndexReader getIndexReader(boolean includeSystemIndex) throws IOException
+ {
+ QueryHandler parentHandler = getContext().getParentHandler();
+ CachingMultiIndexReader parentReader = null;
+ if (parentHandler instanceof SearchIndex && includeSystemIndex)
+ {
+ parentReader = ((SearchIndex)parentHandler).index.getIndexReader();
+ }
- /**
- * Creates the SortFields for the order properties.
- *
- * @param orderProps
- * the order properties.
- * @param orderSpecs
- * the order specs for the properties.
- * @return an array of sort fields
- * @throws RepositoryException
- */
- protected SortField[] createSortFields(QPath[] orderProps,
- boolean[] orderSpecs) throws RepositoryException {
- List sortFields = new ArrayList();
- for (int i = 0; i < orderProps.length; i++) {
- if (orderProps[i].getDepth() == 1
- && Constants.JCR_SCORE.equals(orderProps[i].getName())) {
- // order on jcr:score does not use the natural order as
- // implemented in lucene. score ascending in lucene means that
- // higher scores are first. JCR specs that lower score values
- // are first.
- sortFields.add(new SortField(null, SortField.SCORE,
- orderSpecs[i]));
- } else {
- path = npResolver.createJCRPath(orderProps[i]).getAsString(
- false);
- sortFields.add(new SortField(path, scs, !orderSpecs[i]));
- }
- }
- return (SortField[]) sortFields
- .toArray(new SortField[sortFields.size()]);
- }
+ IndexReader reader;
+ if (parentReader != null)
+ {
+ CachingMultiIndexReader[] readers = {index.getIndexReader(), parentReader};
+ reader = new CombinedIndexReader(readers);
+ }
+ else
+ {
+ reader = index.getIndexReader();
+ }
+ return new JcrIndexReader(reader);
+ }
- /**
- * Creates a lucene <code>Document</code> for a node state using the
- * namespace mappings <code>nsMappings</code>.
- *
- * @param node
- * the node state to index.
- * @param nsMappings
- * the namespace mappings of the search index.
- * @param indexFormatVersion
- * the index format version that should be used to index the
- * passed node state.
- * @return a lucene <code>Document</code> that contains all properties
of
- * <code>node</code>.
- * @throws RepositoryException
- * if an error occurs while indexing the <code>node</code>.
- */
- protected Document createDocument(NodeData node,
- NamespaceMappings nsMappings, IndexFormatVersion indexFormatVersion)
- throws RepositoryException {
- NodeIndexer indexer = new NodeIndexer(node, getContext()
- .getItemStateManager(), nsMappings, extractor);
- indexer.setSupportHighlighting(supportHighlighting);
- indexer.setIndexingConfiguration(indexingConfig);
- indexer.setIndexFormatVersion(indexFormatVersion);
- Document doc = indexer.createDoc();
- mergeAggregatedNodeIndexes(node, doc);
- return doc;
- }
+ /**
+ * Creates the SortFields for the order properties.
+ *
+ * @param orderProps
+ * the order properties.
+ * @param orderSpecs
+ * the order specs for the properties.
+ * @return an array of sort fields
+ * @throws RepositoryException
+ */
+ protected SortField[] createSortFields(QPath[] orderProps, boolean[] orderSpecs)
throws RepositoryException
+ {
+ List<SortField> sortFields = new ArrayList<SortField>();
+ for (int i = 0; i < orderProps.length; i++)
+ {
+ if (orderProps[i].getEntries().length == 1 &&
Constants.JCR_SCORE.equals(orderProps[i].getName()))
+ {
+ // order on jcr:score does not use the natural order as
+ // implemented in lucene. score ascending in lucene means that
+ // higher scores are first. JCR specs that lower score values
+ // are first.
+ sortFields.add(new SortField(null, SortField.SCORE, orderSpecs[i]));
+ }
+ else
+ {
+ path = npResolver.createJCRPath(orderProps[i]).getAsString(false);
+ sortFields.add(new SortField(path, scs, !orderSpecs[i]));
+ }
+ }
+ return sortFields.toArray(new SortField[sortFields.size()]);
+ }
- /**
- * Returns the actual index.
- *
- * @return the actual index.
- */
- public MultiIndex getIndex() {
- return index;
- }
+ /**
+ * Creates a lucene <code>Document</code> for a node state using the
+ * namespace mappings <code>nsMappings</code>.
+ *
+ * @param node
+ * the node state to index.
+ * @param nsMappings
+ * the namespace mappings of the search index.
+ * @param indexFormatVersion
+ * the index format version that should be used to index the
+ * passed node state.
+ * @return a lucene <code>Document</code> that contains all properties of
+ * <code>node</code>.
+ * @throws RepositoryException
+ * if an error occurs while indexing the <code>node</code>.
+ */
+ protected Document createDocument(NodeData node, NamespaceMappings nsMappings,
IndexFormatVersion indexFormatVersion)
+ throws RepositoryException
+ {
+ NodeIndexer indexer = new NodeIndexer(node, getContext().getItemStateManager(),
nsMappings, extractor);
+ indexer.setSupportHighlighting(supportHighlighting);
+ indexer.setIndexingConfiguration(indexingConfig);
+ indexer.setIndexFormatVersion(indexFormatVersion);
+ Document doc = indexer.createDoc();
+ mergeAggregatedNodeIndexes(node, doc);
+ return doc;
+ }
- /**
- * @return the sort comparator source for this index.
- */
- protected SortComparatorSource getSortComparatorSource() {
- return scs;
- }
+ /**
+ * Returns the actual index.
+ *
+ * @return the actual index.
+ */
+ public MultiIndex getIndex()
+ {
+ return index;
+ }
- // /**
- // * Factory method to create the <code>TextExtractor</code> instance.
- // *
- // * @return the <code>TextExtractor</code> instance this index should
use.
- // */
- // protected TextExtractor createTextExtractor()
- // {
- // TextExtractor txtExtr = new JackrabbitTextExtractor(textFilterClasses);
- // if (extractorPoolSize > 0)
- // {
- // // wrap with pool
- // txtExtr = new PooledTextExtractor(txtExtr, extractorPoolSize,
- // extractorBackLog, extractorTimeout);
- // }
- // return txtExtr;
- // }
+ /**
+ * @return the sort comparator source for this index.
+ */
+ protected SortComparatorSource getSortComparatorSource()
+ {
+ return scs;
+ }
- /**
- * @param namespaceMappings
- * The namespace mappings
- * @return the fulltext indexing configuration or <code>null</code> if
there
- * is no configuration.
- */
- protected IndexingConfiguration createIndexingConfiguration(
- NamespaceMappings namespaceMappings) {
- Element docElement = getIndexingConfigurationDOM();
- if (docElement == null) {
- return null;
- }
- try {
- IndexingConfiguration idxCfg = (IndexingConfiguration) indexingConfigurationClass
- .newInstance();
- idxCfg.init(docElement, getContext(), namespaceMappings);
- return idxCfg;
- } catch (Exception e) {
- log.warn("Exception initializing indexing configuration from: "
- + indexingConfigPath, e);
- }
- log.warn(indexingConfigPath + " ignored.");
- return null;
- }
+ // /**
+ // * Factory method to create the <code>TextExtractor</code> instance.
+ // *
+ // * @return the <code>TextExtractor</code> instance this index should
use.
+ // */
+ // protected TextExtractor createTextExtractor()
+ // {
+ // TextExtractor txtExtr = new JackrabbitTextExtractor(textFilterClasses);
+ // if (extractorPoolSize > 0)
+ // {
+ // // wrap with pool
+ // txtExtr = new PooledTextExtractor(txtExtr, extractorPoolSize,
+ // extractorBackLog, extractorTimeout);
+ // }
+ // return txtExtr;
+ // }
- /**
- * @return the configured synonym provider or <code>null</code> if none
is
- * configured or an error occurs.
- */
- protected SynonymProvider createSynonymProvider() {
- SynonymProvider sp = null;
- if (synonymProviderClass != null) {
- try {
- sp = (SynonymProvider) synonymProviderClass.newInstance();
- // sp.initialize(createSynonymProviderConfigResource());
- } catch (Exception e) {
- log.warn("Exception initializing synonym provider: "
- + synonymProviderClass, e);
- sp = null;
- }
- }
- return sp;
- }
+ /**
+ * @param namespaceMappings
+ * The namespace mappings
+ * @return the fulltext indexing configuration or <code>null</code> if
there
+ * is no configuration.
+ */
+ protected IndexingConfiguration createIndexingConfiguration(NamespaceMappings
namespaceMappings)
+ {
+ Element docElement = getIndexingConfigurationDOM();
+ if (docElement == null)
+ {
+ return null;
+ }
+ try
+ {
+ IndexingConfiguration idxCfg = indexingConfigurationClass.newInstance();
+ idxCfg.init(docElement, getContext(), namespaceMappings);
+ return idxCfg;
+ }
+ catch (Exception e)
+ {
+ log.warn("Exception initializing indexing configuration from: " +
indexingConfigPath, e);
+ }
+ log.warn(indexingConfigPath + " ignored.");
+ return null;
+ }
- /**
- * @return an initialized {@link DirectoryManager}.
- * @throws IOException
- * if the directory manager cannot be instantiated or an
- * exception occurs while initializing the manager.
- */
- protected DirectoryManager createDirectoryManager() throws IOException {
- try {
- Class clazz = Class.forName(directoryManagerClass);
- if (!DirectoryManager.class.isAssignableFrom(clazz)) {
- throw new IOException(directoryManagerClass
- + " is not a DirectoryManager implementation");
- }
- DirectoryManager df = (DirectoryManager) clazz.newInstance();
- df.init(this);
- return df;
- } catch (IOException e) {
- throw e;
- } catch (Exception e) {
- IOException ex = new IOException();
- ex.initCause(e);
- throw ex;
- }
- }
+ /**
+ * @return the configured synonym provider or <code>null</code> if none
is
+ * configured or an error occurs.
+ */
+ protected SynonymProvider createSynonymProvider()
+ {
+ SynonymProvider sp = null;
+ if (synonymProviderClass != null)
+ {
+ try
+ {
+ sp = synonymProviderClass.newInstance();
+ sp.initialize(createSynonymProviderConfigResource());
+ }
+ catch (Exception e)
+ {
+ log.warn("Exception initializing synonym provider: " +
synonymProviderClass, e);
+ sp = null;
+ }
+ }
+ return sp;
+ }
- // /**
- // * Creates a file system resource to the synonym provider configuration.
- // *
- // * @return a file system resource or <code>null</code> if no path was
- // * configured.
- // * @throws FileSystemException if an exception occurs accessing the file
- // * system.
- // */
- // protected FileSystemResource createSynonymProviderConfigResource() throws
- // FileSystemException, IOException
- // {
- // if (synonymProviderConfigPath != null)
- // {
- // FileSystemResource fsr;
- // // simple sanity check
- // if (synonymProviderConfigPath.endsWith(FileSystem.SEPARATOR))
- // {
- // throw new FileSystemException("Invalid synonymProviderConfigPath: " +
- // synonymProviderConfigPath);
- // }
- // FileSystem fs = getContext().getFileSystem();
- // if (fs == null)
- // {
- // fs = new LocalFileSystem();
- // int lastSeparator =
- // synonymProviderConfigPath.lastIndexOf(FileSystem.SEPARATOR_CHAR);
- // if (lastSeparator != -1)
- // {
- // File root = new File(path, synonymProviderConfigPath.substring(0,
- // lastSeparator));
- // ((LocalFileSystem)fs).setRoot(root.getCanonicalFile());
- // fs.init();
- // fsr = new FileSystemResource(fs,
- // synonymProviderConfigPath.substring(lastSeparator + 1));
- // }
- // else
- // {
- // ((LocalFileSystem)fs).setPath(path);
- // fs.init();
- // fsr = new FileSystemResource(fs, synonymProviderConfigPath);
- // }
- // synonymProviderConfigFs = fs;
- // }
- // else
- // {
- // fsr = new FileSystemResource(fs, synonymProviderConfigPath);
- // }
- // return fsr;
- // }
- // else
- // {
- // // path not configured
- // return null;
- // }
- // }
+ /**
+ * @return an initialized {@link DirectoryManager}.
+ * @throws IOException
+ * if the directory manager cannot be instantiated or an
+ * exception occurs while initializing the manager.
+ */
+ protected DirectoryManager createDirectoryManager() throws IOException
+ {
+ try
+ {
+ Class clazz = Class.forName(directoryManagerClass);
+ if (!DirectoryManager.class.isAssignableFrom(clazz))
+ {
+ throw new IOException(directoryManagerClass + " is not a
DirectoryManager implementation");
+ }
+ DirectoryManager df = (DirectoryManager)clazz.newInstance();
+ df.init(this);
+ return df;
+ }
+ catch (IOException e)
+ {
+ throw e;
+ }
+ catch (Exception e)
+ {
+ IOException ex = new IOException();
+ ex.initCause(e);
+ throw ex;
+ }
+ }
- /**
- * Creates a spell checker for this query handler.
- *
- * @return the spell checker or <code>null</code> if none is configured
or
- * an error occurs.
- */
- protected SpellChecker createSpellChecker() {
- SpellChecker spCheck = null;
- if (spellCheckerClass != null) {
- try {
- spCheck = (SpellChecker) spellCheckerClass.newInstance();
- spCheck.init(this);
- } catch (Exception e) {
- log.warn("Exception initializing spell checker: "
- + spellCheckerClass, e);
- }
- }
- return spCheck;
- }
+ /**
+ * Creates a file system resource to the synonym provider configuration.
+ *
+ * @return a file system resource or <code>null</code> if no path was
+ * configured.
+ * @throws FileSystemException if an exception occurs accessing the file
+ * system.
+ */
+ protected InputStream createSynonymProviderConfigResource() throws IOException
+ {
+ if (synonymProviderConfigPath != null)
+ {
+ InputStream fsr;
+ // simple sanity check
+ String separator = System.getProperty("file.separator");
+ if
(synonymProviderConfigPath.endsWith(System.getProperty("file.separator")))
+ {
+ throw new IOException("Invalid synonymProviderConfigPath: " +
synonymProviderConfigPath);
+ }
- /**
- * Returns the document element of the indexing configuration or
- * <code>null</code> if there is no indexing configuration.
- *
- * @return the indexing configuration or <code>null</code> if there is
none.
- */
- protected Element getIndexingConfigurationDOM() {
- if (indexingConfiguration == null) {
- if (indexingConfigPath != null) {
+ if (cfm == null)
+ {
+ int lastSeparator = synonymProviderConfigPath.lastIndexOf(separator);
+ if (lastSeparator != -1)
+ {
+ File root = new File(path, synonymProviderConfigPath.substring(0,
lastSeparator));
+ fsr =
+ new BufferedInputStream(new FileInputStream(new File(root,
synonymProviderConfigPath
+ .substring(lastSeparator + 1))));
+ }
+ else
+ {
+ fsr = new BufferedInputStream(new FileInputStream(new
File(synonymProviderConfigPath)));
- // File config = new File(indexingConfigPath);
+ }
+ synonymProviderConfigFs = fsr;
+ }
+ else
+ {
+ try
+ {
+ fsr = cfm.getInputStream(synonymProviderConfigPath);
+ }
+ catch (Exception e)
+ {
+ throw new IOException(e.getLocalizedMessage());
+ }
+ }
+ return fsr;
+ }
+ else
+ {
+ // path not configured
+ return null;
+ }
+ }
- InputStream is = SearchIndex.class
- .getResourceAsStream(indexingConfigPath);
- if (is == null) {
- try {
- is = cfm.getInputStream(indexingConfigPath);
- } catch (Exception e1) {
- log.warn("Unable to load configuration "
- + indexingConfigPath);
- }
- }
+ /**
+ * Creates a spell checker for this query handler.
+ *
+ * @return the spell checker or <code>null</code> if none is configured
or
+ * an error occurs.
+ */
+ protected SpellChecker createSpellChecker()
+ {
+ SpellChecker spCheck = null;
+ if (spellCheckerClass != null)
+ {
+ try
+ {
+ spCheck = spellCheckerClass.newInstance();
+ spCheck.init(this);
+ }
+ catch (Exception e)
+ {
+ log.warn("Exception initializing spell checker: " +
spellCheckerClass, e);
+ }
+ }
+ return spCheck;
+ }
- try {
- DocumentBuilderFactory factory = DocumentBuilderFactory
- .newInstance();
- DocumentBuilder builder = factory.newDocumentBuilder();
- builder
- .setEntityResolver(new IndexingConfigurationEntityResolver());
- indexingConfiguration = builder.parse(is)
- .getDocumentElement();
- } catch (ParserConfigurationException e) {
- log.warn("Unable to create XML parser", e);
- } catch (IOException e) {
- log.warn("Exception parsing " + indexingConfigPath, e);
- } catch (SAXException e) {
- log.warn("Exception parsing " + indexingConfigPath, e);
- }
- }
- }
- return indexingConfiguration;
- }
+ /**
+ * Returns the document element of the indexing configuration or
+ * <code>null</code> if there is no indexing configuration.
+ *
+ * @return the indexing configuration or <code>null</code> if there is
none.
+ */
+ protected Element getIndexingConfigurationDOM()
+ {
+ if (indexingConfiguration == null)
+ {
+ if (indexingConfigPath != null)
+ {
- /**
- * Merges the fulltext indexed fields of the aggregated node states into
- * <code>doc</code>.
- *
- * @param state
- * the node state on which <code>doc</code> was created.
- * @param doc
- * the lucene document with index fields from
<code>state</code>.
- */
- protected void mergeAggregatedNodeIndexes(NodeData state, Document doc) {
- if (indexingConfig != null) {
- AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
- if (aggregateRules == null) {
- return;
- }
- try {
- ItemDataConsumer ism = getContext().getItemStateManager();
- for (int i = 0; i < aggregateRules.length; i++) {
- boolean ruleMatched = false;
- // node includes
- NodeData[] aggregates = aggregateRules[i]
- .getAggregatedNodeStates(state);
- if (aggregates != null) {
- ruleMatched = true;
- for (int j = 0; j < aggregates.length; j++) {
- Document aDoc = createDocument(aggregates[j],
- getNamespaceMappings(), index
- .getIndexFormatVersion());
- // transfer fields to doc if there are any
- Fieldable[] fulltextFields = aDoc
- .getFieldables(FieldNames.FULLTEXT);
- if (fulltextFields != null) {
- for (int k = 0; k < fulltextFields.length; k++) {
- doc.add(fulltextFields[k]);
- }
- doc.add(new Field(
- FieldNames.AGGREGATED_NODE_UUID,
- aggregates[j].getIdentifier(),
- Field.Store.NO,
- Field.Index.NOT_ANALYZED_NO_NORMS));
- }
- }
- }
- // property includes
- PropertyData[] propStates = aggregateRules[i]
- .getAggregatedPropertyStates(state);
- if (propStates != null) {
- ruleMatched = true;
- for (int j = 0; j < propStates.length; j++) {
- PropertyData propState = propStates[j];
- String namePrefix = FieldNames
- .createNamedValue(getNamespaceMappings()
- .translateName(
- propState.getQPath()
- .getName()), "");
- NodeData parent = (NodeData) ism
- .getItemData(propState
- .getParentIdentifier());
- Document aDoc = createDocument(parent,
- getNamespaceMappings(), getIndex()
- .getIndexFormatVersion());
- try {
- // find the right fields to transfer
- Fieldable[] fields = aDoc
- .getFieldables(FieldNames.PROPERTIES);
- Token t = new Token();
- for (int k = 0; k < fields.length; k++) {
- Fieldable field = fields[k];
- // assume properties fields use
- // SingleTokenStream
- t = field.tokenStreamValue().next(t);
- String value = new String(t.termBuffer(),
- 0, t.termLength());
- if (value.startsWith(namePrefix)) {
- // extract value
- value = value.substring(namePrefix
- .length());
- // create new named value
- QPath p = getRelativePath(state,
- propState);
- String path = getNamespaceMappings()
- .translatePath(p);
- value = FieldNames.createNamedValue(
- path, value);
- t.setTermBuffer(value);
- doc.add(new Field(field.name(),
- new SingletonTokenStream(t)));
- doc
- .add(new Field(
- FieldNames.AGGREGATED_NODE_UUID,
- parent.getIdentifier(),
- Field.Store.NO,
- Field.Index.NOT_ANALYZED_NO_NORMS));
- }
- }
- } finally {
- Util.disposeDocument(aDoc);
- }
- }
- }
+ // File config = new File(indexingConfigPath);
- // only use first aggregate definition that matches
- if (ruleMatched) {
- break;
- }
- }
- } catch (Exception e) {
- // do not fail if aggregate cannot be created
- log.warn("Exception while building indexing aggregate for"
- + " node with UUID: " + state.getIdentifier(), e);
- }
- }
- }
+ InputStream is = SearchIndex.class.getResourceAsStream(indexingConfigPath);
+ if (is == null)
+ {
+ try
+ {
+ is = cfm.getInputStream(indexingConfigPath);
+ }
+ catch (Exception e1)
+ {
+ log.warn("Unable to load configuration " +
indexingConfigPath);
+ }
+ }
- /**
- * Returns the relative path from <code>nodeState</code> to
- * <code>propState</code>.
- *
- * @param nodeState
- * a node state.
- * @param propState
- * a property state.
- * @return the relative path.
- * @throws RepositoryException
- * if an error occurs while resolving paths.
- * @throws ItemStateException
- * if an error occurs while reading item states.
- */
- protected QPath getRelativePath(NodeData nodeState, PropertyData propState)
- throws RepositoryException
+ try
+ {
+ DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
+ DocumentBuilder builder = factory.newDocumentBuilder();
+ builder.setEntityResolver(new IndexingConfigurationEntityResolver());
+ indexingConfiguration = builder.parse(is).getDocumentElement();
+ }
+ catch (ParserConfigurationException e)
+ {
+ log.warn("Unable to create XML parser", e);
+ }
+ catch (IOException e)
+ {
+ log.warn("Exception parsing " + indexingConfigPath, e);
+ }
+ catch (SAXException e)
+ {
+ log.warn("Exception parsing " + indexingConfigPath, e);
+ }
+ }
+ }
+ return indexingConfiguration;
+ }
- {
+ /**
+ * Merges the fulltext indexed fields of the aggregated node states into
+ * <code>doc</code>.
+ *
+ * @param state
+ * the node state on which <code>doc</code> was created.
+ * @param doc
+ * the lucene document with index fields from
<code>state</code>.
+ */
+ protected void mergeAggregatedNodeIndexes(NodeData state, Document doc)
+ {
+ if (indexingConfig != null)
+ {
+ AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
+ if (aggregateRules == null)
+ {
+ return;
+ }
+ try
+ {
+ ItemDataConsumer ism = getContext().getItemStateManager();
+ for (int i = 0; i < aggregateRules.length; i++)
+ {
+ boolean ruleMatched = false;
+ // node includes
+ NodeData[] aggregates = aggregateRules[i].getAggregatedNodeStates(state);
+ if (aggregates != null)
+ {
+ ruleMatched = true;
+ for (int j = 0; j < aggregates.length; j++)
+ {
+ Document aDoc =
+ createDocument(aggregates[j], getNamespaceMappings(),
index.getIndexFormatVersion());
+ // transfer fields to doc if there are any
+ Fieldable[] fulltextFields =
aDoc.getFieldables(FieldNames.FULLTEXT);
+ if (fulltextFields != null)
+ {
+ for (int k = 0; k < fulltextFields.length; k++)
+ {
+ doc.add(fulltextFields[k]);
+ }
+ doc.add(new Field(FieldNames.AGGREGATED_NODE_UUID,
aggregates[j].getIdentifier(),
+ Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
+ }
+ }
+ }
+ // property includes
+ PropertyData[] propStates =
aggregateRules[i].getAggregatedPropertyStates(state);
+ if (propStates != null)
+ {
+ ruleMatched = true;
+ for (int j = 0; j < propStates.length; j++)
+ {
+ PropertyData propState = propStates[j];
+ String namePrefix =
+ FieldNames.createNamedValue(getNamespaceMappings()
+ .translateName(propState.getQPath().getName()),
"");
+ NodeData parent =
(NodeData)ism.getItemData(propState.getParentIdentifier());
+ Document aDoc = createDocument(parent, getNamespaceMappings(),
getIndex().getIndexFormatVersion());
+ try
+ {
+ // find the right fields to transfer
+ Fieldable[] fields = aDoc.getFieldables(FieldNames.PROPERTIES);
+ Token t = new Token();
+ for (int k = 0; k < fields.length; k++)
+ {
+ Fieldable field = fields[k];
+ // assume properties fields use
+ // SingleTokenStream
+ t = field.tokenStreamValue().next(t);
+ String value = new String(t.termBuffer(), 0, t.termLength());
+ if (value.startsWith(namePrefix))
+ {
+ // extract value
+ value = value.substring(namePrefix.length());
+ // create new named value
+ QPath p = getRelativePath(state, propState);
+ String path = getNamespaceMappings().translatePath(p);
+ value = FieldNames.createNamedValue(path, value);
+ t.setTermBuffer(value);
+ doc.add(new Field(field.name(), new
SingletonTokenStream(t)));
+ doc.add(new Field(FieldNames.AGGREGATED_NODE_UUID,
parent.getIdentifier(),
+ Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
+ }
+ }
+ }
+ finally
+ {
+ Util.disposeDocument(aDoc);
+ }
+ }
+ }
- QPath nodePath = nodeState.getQPath();
- QPath propPath = propState.getQPath();
- throw new RepositoryException();
- // Path p = nodePath.computeRelativePath(propPath);
- // // make sure it does not contain indexes
- // boolean clean = true;
- // Path.Element[] elements = p.getElements();
- // for (int i = 0; i < elements.length; i++)
- // {
- // if (elements[i].getIndex() != 0)
- // {
- // elements[i] = PATH_FACTORY.createElement(elements[i].getName());
- // clean = false;
- // }
- // }
- // if (!clean)
- // {
- // p = PATH_FACTORY.create(elements);
- // }
+ // only use first aggregate definition that matches
+ if (ruleMatched)
+ {
+ break;
+ }
+ }
+ }
+ catch (Exception e)
+ {
+ // do not fail if aggregate cannot be created
+ log
+ .warn("Exception while building indexing aggregate for" + "
node with UUID: " + state.getIdentifier(), e);
+ }
+ }
+ }
- // return p;
- }
+ /**
+ * Returns the relative path from <code>nodeState</code> to
+ * <code>propState</code>.
+ *
+ * @param nodeState
+ * a node state.
+ * @param propState
+ * a property state.
+ * @return the relative path.
+ * @throws RepositoryException
+ * if an error occurs while resolving paths.
+ * @throws ItemStateException
+ * if an error occurs while reading item states.
+ */
+ protected QPath getRelativePath(NodeData nodeState, PropertyData propState) throws
RepositoryException
- /**
- * Retrieves the root of the indexing aggregate for <code>state</code>
and
- * puts it into <code>map</code>.
- *
- * @param state
- * the node state for which we want to retrieve the aggregate
- * root.
- * @param map
- * aggregate roots are collected in this map. Key=UUID,
- * value=NodeState.
- */
- protected void retrieveAggregateRoot(NodeData state,
- Map<String, NodeData> map) {
- if (indexingConfig != null) {
- AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
- if (aggregateRules == null) {
- return;
- }
- try {
- for (int i = 0; i < aggregateRules.length; i++) {
- NodeData root = aggregateRules[i].getAggregateRoot(state);
- if (root != null) {
- map.put(root.getIdentifier(), root);
- }
- }
- } catch (Exception e) {
- log.warn("Unable to get aggregate root for "
- + state.getIdentifier(), e);
- }
- }
- }
+ {
- /**
- * Retrieves the root of the indexing aggregate for
- * <code>removedUUIDs</code> and puts it into
<code>map</code>.
- *
- * @param removedUUIDs
- * the UUIDs of removed nodes.
- * @param map
- * aggregate roots are collected in this map. Key=UUID,
- * value=NodeState.
- */
- protected void retrieveAggregateRoot(Set<String> removedNodeIds,
- Map<String, NodeData> map)
+ QPath nodePath = nodeState.getQPath();
+ QPath propPath = propState.getQPath();
+ throw new RepositoryException();
+ // Path p = nodePath.computeRelativePath(propPath);
+ // // make sure it does not contain indexes
+ // boolean clean = true;
+ // Path.Element[] elements = p.getElements();
+ // for (int i = 0; i < elements.length; i++)
+ // {
+ // if (elements[i].getIndex() != 0)
+ // {
+ // elements[i] = PATH_FACTORY.createElement(elements[i].getName());
+ // clean = false;
+ // }
+ // }
+ // if (!clean)
+ // {
+ // p = PATH_FACTORY.create(elements);
+ // }
- {
+ // return p;
+ }
- if (indexingConfig != null) {
- AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
- if (aggregateRules == null) {
- return;
- }
- int found = 0;
- long time = System.currentTimeMillis();
- try {
- CachingMultiIndexReader reader = index.getIndexReader();
- try {
- Term aggregateUUIDs = new Term(
- FieldNames.AGGREGATED_NODE_UUID, "");
- TermDocs tDocs = reader.termDocs();
- try {
- ItemDataConsumer ism = getContext()
- .getItemStateManager();
- for (Iterator<String> it = removedNodeIds.iterator(); it
- .hasNext();) {
- String id = it.next();
- aggregateUUIDs = aggregateUUIDs.createTerm(id);
- tDocs.seek(aggregateUUIDs);
- while (tDocs.next()) {
- Document doc = reader.document(tDocs.doc(),
- FieldSelectors.UUID);
- String uuid = doc.get(FieldNames.UUID);
- ItemData itd = ism.getItemData(uuid);
- if (itd == null)
- continue;
- if (!itd.isNode())
- throw new RepositoryException(
- "Item with id:" + uuid
- + " is not a node");
- map.put(uuid, (NodeData) itd);
- found++;
- }
- }
- } finally {
- tDocs.close();
- }
- } finally {
- reader.release();
- }
- } catch (Exception e) {
- log.warn("Exception while retrieving aggregate roots", e);
- }
- time = System.currentTimeMillis() - time;
- log.debug("Retrieved {} aggregate roots in {} ms.", new Integer(
- found), new Long(time));
- }
- }
+ /**
+ * Retrieves the root of the indexing aggregate for <code>state</code>
and
+ * puts it into <code>map</code>.
+ *
+ * @param state
+ * the node state for which we want to retrieve the aggregate
+ * root.
+ * @param map
+ * aggregate roots are collected in this map. Key=UUID,
+ * value=NodeState.
+ */
+ protected void retrieveAggregateRoot(NodeData state, Map<String, NodeData> map)
+ {
+ if (indexingConfig != null)
+ {
+ AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
+ if (aggregateRules == null)
+ {
+ return;
+ }
+ try
+ {
+ for (int i = 0; i < aggregateRules.length; i++)
+ {
+ NodeData root = aggregateRules[i].getAggregateRoot(state);
+ if (root != null)
+ {
+ map.put(root.getIdentifier(), root);
+ }
+ }
+ }
+ catch (Exception e)
+ {
+ log.warn("Unable to get aggregate root for " +
state.getIdentifier(), e);
+ }
+ }
+ }
- // ----------------------------< internal
- // >----------------------------------
+ /**
+ * Retrieves the root of the indexing aggregate for
+ * <code>removedUUIDs</code> and puts it into
<code>map</code>.
+ *
+ * @param removedUUIDs
+ * the UUIDs of removed nodes.
+ * @param map
+ * aggregate roots are collected in this map. Key=UUID,
+ * value=NodeState.
+ */
+ protected void retrieveAggregateRoot(Set<String> removedNodeIds, Map<String,
NodeData> map)
- /**
- * Combines multiple {@link CachingMultiIndexReader} into a
- * <code>MultiReader</code> with {@link HierarchyResolver} support.
- */
- protected static final class CombinedIndexReader extends MultiReader
- implements HierarchyResolver, MultiIndexReader {
+ {
- /**
- * The sub readers.
- */
- private final CachingMultiIndexReader[] subReaders;
+ if (indexingConfig != null)
+ {
+ AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
+ if (aggregateRules == null)
+ {
+ return;
+ }
+ int found = 0;
+ long time = System.currentTimeMillis();
+ try
+ {
+ CachingMultiIndexReader reader = index.getIndexReader();
+ try
+ {
+ Term aggregateUUIDs = new Term(FieldNames.AGGREGATED_NODE_UUID,
"");
+ TermDocs tDocs = reader.termDocs();
+ try
+ {
+ ItemDataConsumer ism = getContext().getItemStateManager();
+ for (Iterator<String> it = removedNodeIds.iterator();
it.hasNext();)
+ {
+ String id = it.next();
+ aggregateUUIDs = aggregateUUIDs.createTerm(id);
+ tDocs.seek(aggregateUUIDs);
+ while (tDocs.next())
+ {
+ Document doc = reader.document(tDocs.doc(),
FieldSelectors.UUID);
+ String uuid = doc.get(FieldNames.UUID);
+ ItemData itd = ism.getItemData(uuid);
+ if (itd == null)
+ continue;
+ if (!itd.isNode())
+ throw new RepositoryException("Item with id:" + uuid
+ " is not a node");
+ map.put(uuid, (NodeData)itd);
+ found++;
+ }
+ }
+ }
+ finally
+ {
+ tDocs.close();
+ }
+ }
+ finally
+ {
+ reader.release();
+ }
+ }
+ catch (Exception e)
+ {
+ log.warn("Exception while retrieving aggregate roots", e);
+ }
+ time = System.currentTimeMillis() - time;
+ log.debug("Retrieved {} aggregate roots in {} ms.", new
Integer(found), new Long(time));
+ }
+ }
- /**
- * Doc number starts for each sub reader
- */
- private int[] starts;
+ // ----------------------------< internal
+ // >----------------------------------
- public CombinedIndexReader(CachingMultiIndexReader[] indexReaders) {
- super(indexReaders);
- this.subReaders = indexReaders;
- this.starts = new int[subReaders.length + 1];
+ /**
+ * Combines multiple {@link CachingMultiIndexReader} into a
+ * <code>MultiReader</code> with {@link HierarchyResolver} support.
+ */
+ protected static final class CombinedIndexReader extends MultiReader implements
HierarchyResolver, MultiIndexReader
+ {
- int maxDoc = 0;
- for (int i = 0; i < subReaders.length; i++) {
- starts[i] = maxDoc;
- maxDoc += subReaders[i].maxDoc();
- }
- starts[subReaders.length] = maxDoc;
- }
+ /**
+ * The sub readers.
+ */
+ private final CachingMultiIndexReader[] subReaders;
- /**
- * @inheritDoc
- */
- public int[] getParents(int n, int[] docNumbers) throws IOException {
- int i = readerIndex(n);
- DocId id = subReaders[i].getParentDocId(n - starts[i]);
- id = id.applyOffset(starts[i]);
- return id.getDocumentNumbers(this, docNumbers);
- }
+ /**
+ * Doc number starts for each sub reader
+ */
+ private int[] starts;
- // -------------------------< MultiIndexReader
- // >-------------------------
+ public CombinedIndexReader(CachingMultiIndexReader[] indexReaders)
+ {
+ super(indexReaders);
+ this.subReaders = indexReaders;
+ this.starts = new int[subReaders.length + 1];
- /**
- * {@inheritDoc}
- */
- public IndexReader[] getIndexReaders() {
- IndexReader[] readers = new IndexReader[subReaders.length];
- System.arraycopy(subReaders, 0, readers, 0, subReaders.length);
- return readers;
- }
+ int maxDoc = 0;
+ for (int i = 0; i < subReaders.length; i++)
+ {
+ starts[i] = maxDoc;
+ maxDoc += subReaders[i].maxDoc();
+ }
+ starts[subReaders.length] = maxDoc;
+ }
- /**
- * {@inheritDoc}
- */
- public void release() throws IOException {
- for (int i = 0; i < subReaders.length; i++) {
- subReaders[i].release();
- }
- }
+ /**
+ * @inheritDoc
+ */
+ public int[] getParents(int n, int[] docNumbers) throws IOException
+ {
+ int i = readerIndex(n);
+ DocId id = subReaders[i].getParentDocId(n - starts[i]);
+ id = id.applyOffset(starts[i]);
+ return id.getDocumentNumbers(this, docNumbers);
+ }
- // ---------------------------< internal
- // >-------------------------------
+ // -------------------------< MultiIndexReader
+ // >-------------------------
- /**
- * Returns the reader index for document <code>n</code>. Implementation
- * copied from lucene MultiReader class.
- *
- * @param n
- * document number.
- * @return the reader index.
- */
- private int readerIndex(int n) {
- int lo = 0; // search starts array
- int hi = subReaders.length - 1; // for first element less
+ /**
+ * {@inheritDoc}
+ */
+ public IndexReader[] getIndexReaders()
+ {
+ IndexReader[] readers = new IndexReader[subReaders.length];
+ System.arraycopy(subReaders, 0, readers, 0, subReaders.length);
+ return readers;
+ }
- while (hi >= lo) {
- int mid = (lo + hi) >> 1;
- int midValue = starts[mid];
- if (n < midValue) {
- hi = mid - 1;
- } else if (n > midValue) {
- lo = mid + 1;
- } else { // found a match
- while (mid + 1 < subReaders.length
- && starts[mid + 1] == midValue) {
- mid++; // scan to last match
- }
- return mid;
- }
- }
- return hi;
- }
+ /**
+ * {@inheritDoc}
+ */
+ public void release() throws IOException
+ {
+ for (int i = 0; i < subReaders.length; i++)
+ {
+ subReaders[i].release();
+ }
+ }
- public boolean equals(Object obj) {
- if (obj instanceof CombinedIndexReader) {
- CombinedIndexReader other = (CombinedIndexReader) obj;
- return Arrays.equals(subReaders, other.subReaders);
- }
- return false;
- }
+ // ---------------------------< internal
+ // >-------------------------------
- public int hashCode() {
- int hash = 0;
- for (int i = 0; i < subReaders.length; i++) {
- hash = 31 * hash + subReaders[i].hashCode();
- }
- return hash;
- }
+ /**
+ * Returns the reader index for document <code>n</code>.
Implementation
+ * copied from lucene MultiReader class.
+ *
+ * @param n
+ * document number.
+ * @return the reader index.
+ */
+ private int readerIndex(int n)
+ {
+ int lo = 0; // search starts array
+ int hi = subReaders.length - 1; // for first element less
- /**
- * {@inheritDoc}
- */
- public ForeignSegmentDocId createDocId(String uuid) throws IOException {
- for (int i = 0; i < subReaders.length; i++) {
- CachingMultiIndexReader subReader = subReaders[i];
- ForeignSegmentDocId doc = subReader.createDocId(uuid);
- if (doc != null) {
- return doc;
- }
- }
- return null;
- }
+ while (hi >= lo)
+ {
+ int mid = (lo + hi) >> 1;
+ int midValue = starts[mid];
+ if (n < midValue)
+ {
+ hi = mid - 1;
+ }
+ else if (n > midValue)
+ {
+ lo = mid + 1;
+ }
+ else
+ { // found a match
+ while (mid + 1 < subReaders.length && starts[mid + 1] ==
midValue)
+ {
+ mid++; // scan to last match
+ }
+ return mid;
+ }
+ }
+ return hi;
+ }
- /**
- * {@inheritDoc}
- */
- public int getDocumentNumber(ForeignSegmentDocId docId) {
- for (int i = 0; i < subReaders.length; i++) {
- CachingMultiIndexReader subReader = subReaders[i];
- int realDoc = subReader.getDocumentNumber(docId);
- if (realDoc >= 0) {
- return realDoc + starts[i];
- }
- }
- return -1;
- }
- }
+ public boolean equals(Object obj)
+ {
+ if (obj instanceof CombinedIndexReader)
+ {
+ CombinedIndexReader other = (CombinedIndexReader)obj;
+ return Arrays.equals(subReaders, other.subReaders);
+ }
+ return false;
+ }
- // --------------------------< properties
- // >----------------------------------
+ public int hashCode()
+ {
+ int hash = 0;
+ for (int i = 0; i < subReaders.length; i++)
+ {
+ hash = 31 * hash + subReaders[i].hashCode();
+ }
+ return hash;
+ }
- /**
- * Sets the analyzer in use for indexing. The given analyzer class name must
- * satisfy the following conditions:
- * <ul>
- * <li>the class must exist in the class path</li>
- * <li>the class must have a public default constructor</li>
- * <li>the class must be a Lucene Analyzer</li>
- * </ul>
- * <p>
- * If the above conditions are met, then a new instance of the class is set
- * as the analyzer. Otherwise a warning is logged and the current analyzer
- * is not changed.
- * <p>
- * This property setter method is normally invoked by the Jackrabbit
- * configuration mechanism if the "analyzer" parameter is set in the
search
- * configuration.
- *
- * @param analyzerClassName
- * the analyzer class name
- */
- public void setAnalyzer(String analyzerClassName) {
- try {
- Class analyzerClass = Class.forName(analyzerClassName);
- analyzer.setDefaultAnalyzer((Analyzer) analyzerClass.newInstance());
- } catch (Exception e) {
- log.warn("Invalid Analyzer class: " + analyzerClassName, e);
- }
- }
+ /**
+ * {@inheritDoc}
+ */
+ public ForeignSegmentDocId createDocId(String uuid) throws IOException
+ {
+ for (int i = 0; i < subReaders.length; i++)
+ {
+ CachingMultiIndexReader subReader = subReaders[i];
+ ForeignSegmentDocId doc = subReader.createDocId(uuid);
+ if (doc != null)
+ {
+ return doc;
+ }
+ }
+ return null;
+ }
- /**
- * Returns the class name of the analyzer that is currently in use.
- *
- * @return class name of analyzer in use.
- */
- public String getAnalyzer() {
- return analyzer.getClass().getName();
- }
+ /**
+ * {@inheritDoc}
+ */
+ public int getDocumentNumber(ForeignSegmentDocId docId)
+ {
+ for (int i = 0; i < subReaders.length; i++)
+ {
+ CachingMultiIndexReader subReader = subReaders[i];
+ int realDoc = subReader.getDocumentNumber(docId);
+ if (realDoc >= 0)
+ {
+ return realDoc + starts[i];
+ }
+ }
+ return -1;
+ }
+ }
- /**
- * Sets the location of the search index.
- *
- * @param path
- * the location of the search index.
- * @throws IOException
- */
- public void setPath(String path) {
+ // --------------------------< properties
+ // >----------------------------------
- this.path = path.replace("${java.io.tmpdir}", System
- .getProperty("java.io.tmpdir"));
+ /**
+ * Sets the analyzer in use for indexing. The given analyzer class name must
+ * satisfy the following conditions:
+ * <ul>
+ * <li>the class must exist in the class path</li>
+ * <li>the class must have a public default constructor</li>
+ * <li>the class must be a Lucene Analyzer</li>
+ * </ul>
+ * <p>
+ * If the above conditions are met, then a new instance of the class is set
+ * as the analyzer. Otherwise a warning is logged and the current analyzer
+ * is not changed.
+ * <p>
+ * This property setter method is normally invoked by the Jackrabbit
+ * configuration mechanism if the "analyzer" parameter is set in the search
+ * configuration.
+ *
+ * @param analyzerClassName
+ * the analyzer class name
+ */
+ public void setAnalyzer(String analyzerClassName)
+ {
+ try
+ {
+ Class analyzerClass = Class.forName(analyzerClassName);
+ analyzer.setDefaultAnalyzer((Analyzer)analyzerClass.newInstance());
+ }
+ catch (Exception e)
+ {
+ log.warn("Invalid Analyzer class: " + analyzerClassName, e);
+ }
+ }
- }
+ /**
+ * Returns the class name of the analyzer that is currently in use.
+ *
+ * @return class name of analyzer in use.
+ */
+ public String getAnalyzer()
+ {
+ return analyzer.getClass().getName();
+ }
- /**
- * Returns the location of the search index. Returns <code>null</code>
if
- * not set.
- *
- * @return the location of the search index.
- */
- public String getPath() {
- return path;
- }
+ /**
+ * Sets the location of the search index.
+ *
+ * @param path
+ * the location of the search index.
+ * @throws IOException
+ */
+ public void setPath(String path)
+ {
- /**
- * The lucene index writer property: useCompoundFile
- */
- public void setUseCompoundFile(boolean b) {
- useCompoundFile = b;
- }
+ this.path = path.replace("${java.io.tmpdir}",
System.getProperty("java.io.tmpdir"));
- /**
- * Returns the current value for useCompoundFile.
- *
- * @return the current value for useCompoundFile.
- */
- public boolean getUseCompoundFile() {
- return useCompoundFile;
- }
+ }
- /**
- * The lucene index writer property: minMergeDocs
- */
- public void setMinMergeDocs(int minMergeDocs) {
- this.minMergeDocs = minMergeDocs;
- }
+ /**
+ * Returns the location of the search index. Returns <code>null</code> if
+ * not set.
+ *
+ * @return the location of the search index.
+ */
+ public String getPath()
+ {
+ return path;
+ }
- /**
- * Returns the current value for minMergeDocs.
- *
- * @return the current value for minMergeDocs.
- */
- public int getMinMergeDocs() {
- return minMergeDocs;
- }
+ /**
+ * The lucene index writer property: useCompoundFile
+ */
+ public void setUseCompoundFile(boolean b)
+ {
+ useCompoundFile = b;
+ }
- /**
- * Sets the property: volatileIdleTime
- *
- * @param volatileIdleTime
- * idle time in seconds
- */
- public void setVolatileIdleTime(int volatileIdleTime) {
- this.volatileIdleTime = volatileIdleTime;
- }
+ /**
+ * Returns the current value for useCompoundFile.
+ *
+ * @return the current value for useCompoundFile.
+ */
+ public boolean getUseCompoundFile()
+ {
+ return useCompoundFile;
+ }
- /**
- * Returns the current value for volatileIdleTime.
- *
- * @return the current value for volatileIdleTime.
- */
- public int getVolatileIdleTime() {
- return volatileIdleTime;
- }
+ /**
+ * The lucene index writer property: minMergeDocs
+ */
+ public void setMinMergeDocs(int minMergeDocs)
+ {
+ this.minMergeDocs = minMergeDocs;
+ }
- /**
- * The lucene index writer property: maxMergeDocs
- */
- public void setMaxMergeDocs(int maxMergeDocs) {
- this.maxMergeDocs = maxMergeDocs;
- }
+ /**
+ * Returns the current value for minMergeDocs.
+ *
+ * @return the current value for minMergeDocs.
+ */
+ public int getMinMergeDocs()
+ {
+ return minMergeDocs;
+ }
- /**
- * Returns the current value for maxMergeDocs.
- *
- * @return the current value for maxMergeDocs.
- */
- public int getMaxMergeDocs() {
- return maxMergeDocs;
- }
+ /**
+ * Sets the property: volatileIdleTime
+ *
+ * @param volatileIdleTime
+ * idle time in seconds
+ */
+ public void setVolatileIdleTime(int volatileIdleTime)
+ {
+ this.volatileIdleTime = volatileIdleTime;
+ }
- /**
- * The lucene index writer property: mergeFactor
- */
- public void setMergeFactor(int mergeFactor) {
- this.mergeFactor = mergeFactor;
- }
+ /**
+ * Returns the current value for volatileIdleTime.
+ *
+ * @return the current value for volatileIdleTime.
+ */
+ public int getVolatileIdleTime()
+ {
+ return volatileIdleTime;
+ }
- /**
- * Returns the current value for the merge factor.
- *
- * @return the current value for the merge factor.
- */
- public int getMergeFactor() {
- return mergeFactor;
- }
+ /**
+ * The lucene index writer property: maxMergeDocs
+ */
+ public void setMaxMergeDocs(int maxMergeDocs)
+ {
+ this.maxMergeDocs = maxMergeDocs;
+ }
- /**
- * @see VolatileIndex#setBufferSize(int)
- */
- public void setBufferSize(int size) {
- bufferSize = size;
- }
+ /**
+ * Returns the current value for maxMergeDocs.
+ *
+ * @return the current value for maxMergeDocs.
+ */
+ public int getMaxMergeDocs()
+ {
+ return maxMergeDocs;
+ }
- /**
- * Returns the current value for the buffer size.
- *
- * @return the current value for the buffer size.
- */
- public int getBufferSize() {
- return bufferSize;
- }
+ /**
+ * The lucene index writer property: mergeFactor
+ */
+ public void setMergeFactor(int mergeFactor)
+ {
+ this.mergeFactor = mergeFactor;
+ }
- public void setRespectDocumentOrder(boolean docOrder) {
- documentOrder = docOrder;
- }
+ /**
+ * Returns the current value for the merge factor.
+ *
+ * @return the current value for the merge factor.
+ */
+ public int getMergeFactor()
+ {
+ return mergeFactor;
+ }
- public boolean getRespectDocumentOrder() {
- return documentOrder;
- }
+ /**
+ * @see VolatileIndex#setBufferSize(int)
+ */
+ public void setBufferSize(int size)
+ {
+ bufferSize = size;
+ }
- public void setForceConsistencyCheck(boolean b) {
- forceConsistencyCheck = b;
- }
+ /**
+ * Returns the current value for the buffer size.
+ *
+ * @return the current value for the buffer size.
+ */
+ public int getBufferSize()
+ {
+ return bufferSize;
+ }
- public boolean getForceConsistencyCheck() {
- return forceConsistencyCheck;
- }
+ public void setRespectDocumentOrder(boolean docOrder)
+ {
+ documentOrder = docOrder;
+ }
- public void setAutoRepair(boolean b) {
- autoRepair = b;
- }
+ public boolean getRespectDocumentOrder()
+ {
+ return documentOrder;
+ }
- public boolean getAutoRepair() {
- return autoRepair;
- }
+ public void setForceConsistencyCheck(boolean b)
+ {
+ forceConsistencyCheck = b;
+ }
- public void setCacheSize(int size) {
- cacheSize = size;
- }
+ public boolean getForceConsistencyCheck()
+ {
+ return forceConsistencyCheck;
+ }
- public int getCacheSize() {
- return cacheSize;
- }
+ public void setAutoRepair(boolean b)
+ {
+ autoRepair = b;
+ }
- public void setMaxFieldLength(int length) {
- maxFieldLength = length;
- }
+ public boolean getAutoRepair()
+ {
+ return autoRepair;
+ }
- public int getMaxFieldLength() {
- return maxFieldLength;
- }
+ public void setCacheSize(int size)
+ {
+ cacheSize = size;
+ }
- //
- // /**
- // * Sets the list of text extractors (and text filters) to use for
- // * extracting text content from binary properties. The list must be
- // * comma (or whitespace) separated, and contain fully qualified class
- // * names of the {@link TextExtractor} (and {@link
- // org.apache.jackrabbit.core.query.TextFilter}) classes
- // * to be used. The configured classes must all have a public default
- // * constructor.
- // *
- // * @param filterClasses comma separated list of class names
- // */
- // public void setTextFilterClasses(String filterClasses)
- // {
- // this.textFilterClasses = filterClasses;
- // }
+ public int getCacheSize()
+ {
+ return cacheSize;
+ }
- // /**
- // * Returns the fully qualified class names of the text filter instances
- // * currently in use. The names are comma separated.
- // *
- // * @return class names of the text filters in use.
- // */
- // public String getTextFilterClasses()
- // {
- // return textFilterClasses;
- // }
+ public void setMaxFieldLength(int length)
+ {
+ maxFieldLength = length;
+ }
- /**
- * Tells the query handler how many result should be fetched initially when
- * a query is executed.
- *
- * @param size
- * the number of results to fetch initially.
- */
- public void setResultFetchSize(int size) {
- resultFetchSize = size;
- }
+ public int getMaxFieldLength()
+ {
+ return maxFieldLength;
+ }
- /**
- * @return the number of results the query handler will fetch initially when
- * a query is executed.
- */
- public int getResultFetchSize() {
- return resultFetchSize;
- }
+ //
+ // /**
+ // * Sets the list of text extractors (and text filters) to use for
+ // * extracting text content from binary properties. The list must be
+ // * comma (or whitespace) separated, and contain fully qualified class
+ // * names of the {@link TextExtractor} (and {@link
+ // org.apache.jackrabbit.core.query.TextFilter}) classes
+ // * to be used. The configured classes must all have a public default
+ // * constructor.
+ // *
+ // * @param filterClasses comma separated list of class names
+ // */
+ // public void setTextFilterClasses(String filterClasses)
+ // {
+ // this.textFilterClasses = filterClasses;
+ // }
- /**
- * The number of background threads for the extractor pool.
- *
- * @param numThreads
- * the number of threads.
- */
- public void setExtractorPoolSize(int numThreads) {
- if (numThreads < 0) {
- numThreads = 0;
- }
- extractorPoolSize = numThreads;
- }
+ // /**
+ // * Returns the fully qualified class names of the text filter instances
+ // * currently in use. The names are comma separated.
+ // *
+ // * @return class names of the text filters in use.
+ // */
+ // public String getTextFilterClasses()
+ // {
+ // return textFilterClasses;
+ // }
- /**
- * @return the size of the thread pool which is used to run the text
- * extractors when binary content is indexed.
- */
- public int getExtractorPoolSize() {
- return extractorPoolSize;
- }
+ /**
+ * Tells the query handler how many result should be fetched initially when
+ * a query is executed.
+ *
+ * @param size
+ * the number of results to fetch initially.
+ */
+ public void setResultFetchSize(int size)
+ {
+ resultFetchSize = size;
+ }
- /**
- * The number of extractor jobs that are queued until a new job is executed
- * with the current thread instead of using the thread pool.
- *
- * @param backLog
- * size of the extractor job queue.
- */
- public void setExtractorBackLogSize(int backLog) {
- extractorBackLog = backLog;
- }
+ /**
+ * @return the number of results the query handler will fetch initially when
+ * a query is executed.
+ */
+ public int getResultFetchSize()
+ {
+ return resultFetchSize;
+ }
- /**
- * @return the size of the extractor queue back log.
- */
- public int getExtractorBackLogSize() {
- return extractorBackLog;
- }
+ /**
+ * The number of background threads for the extractor pool.
+ *
+ * @param numThreads
+ * the number of threads.
+ */
+ public void setExtractorPoolSize(int numThreads)
+ {
+ if (numThreads < 0)
+ {
+ numThreads = 0;
+ }
+ extractorPoolSize = numThreads;
+ }
- /**
- * The timeout in milliseconds which is granted to the text extraction
- * process until fulltext indexing is deferred to a background thread.
- *
- * @param timeout
- * the timeout in milliseconds.
- */
- public void setExtractorTimeout(long timeout) {
- extractorTimeout = timeout;
- }
+ /**
+ * @return the size of the thread pool which is used to run the text
+ * extractors when binary content is indexed.
+ */
+ public int getExtractorPoolSize()
+ {
+ return extractorPoolSize;
+ }
- /**
- * @return the extractor timeout in milliseconds.
- */
- public long getExtractorTimeout() {
- return extractorTimeout;
- }
+ /**
+ * The number of extractor jobs that are queued until a new job is executed
+ * with the current thread instead of using the thread pool.
+ *
+ * @param backLog
+ * size of the extractor job queue.
+ */
+ public void setExtractorBackLogSize(int backLog)
+ {
+ extractorBackLog = backLog;
+ }
- /**
- * If set to <code>true</code> additional information is stored in the
index
- * to support highlighting using the rep:excerpt pseudo property.
- *
- * @param b
- * <code>true</code> to enable highlighting support.
- */
- public void setSupportHighlighting(boolean b) {
- supportHighlighting = b;
- }
+ /**
+ * @return the size of the extractor queue back log.
+ */
+ public int getExtractorBackLogSize()
+ {
+ return extractorBackLog;
+ }
- /**
- * @return <code>true</code> if highlighting support is enabled.
- */
- public boolean getSupportHighlighting() {
- return supportHighlighting;
- }
+ /**
+ * The timeout in milliseconds which is granted to the text extraction
+ * process until fulltext indexing is deferred to a background thread.
+ *
+ * @param timeout
+ * the timeout in milliseconds.
+ */
+ public void setExtractorTimeout(long timeout)
+ {
+ extractorTimeout = timeout;
+ }
- /**
- * Sets the class name for the {@link ExcerptProvider} that should be used
- * for the rep:excerpt pseudo property in a query.
- *
- * @param className
- * the name of a class that implements {@link ExcerptProvider}.
- */
- public void setExcerptProviderClass(String className) {
- try {
- Class clazz = Class.forName(className);
- if (ExcerptProvider.class.isAssignableFrom(clazz)) {
- excerptProviderClass = clazz;
- } else {
- log
- .warn(
- "Invalid value for excerptProviderClass, {} does "
- + "not implement ExcerptProvider interface.",
- className);
- }
- } catch (ClassNotFoundException e) {
- log
- .warn(
- "Invalid value for excerptProviderClass, class {} not found.",
- className);
- }
- }
+ /**
+ * @return the extractor timeout in milliseconds.
+ */
+ public long getExtractorTimeout()
+ {
+ return extractorTimeout;
+ }
- /**
- * @return the class name of the excerpt provider implementation.
- */
- public String getExcerptProviderClass() {
- return excerptProviderClass.getName();
- }
+ /**
+ * If set to <code>true</code> additional information is stored in the
index
+ * to support highlighting using the rep:excerpt pseudo property.
+ *
+ * @param b
+ * <code>true</code> to enable highlighting support.
+ */
+ public void setSupportHighlighting(boolean b)
+ {
+ supportHighlighting = b;
+ }
- /**
- * Sets the path to the indexing configuration file.
- *
- * @param path
- * the path to the configuration file.
- */
- public void setIndexingConfiguration(String path) {
- indexingConfigPath = path;
- }
+ /**
+ * @return <code>true</code> if highlighting support is enabled.
+ */
+ public boolean getSupportHighlighting()
+ {
+ return supportHighlighting;
+ }
- /**
- * @return the path to the indexing configuration file.
- */
- public String getIndexingConfiguration() {
- return indexingConfigPath;
- }
+ /**
+ * Sets the class name for the {@link ExcerptProvider} that should be used
+ * for the rep:excerpt pseudo property in a query.
+ *
+ * @param className
+ * the name of a class that implements {@link ExcerptProvider}.
+ */
+ public void setExcerptProviderClass(String className)
+ {
+ try
+ {
+ Class clazz = Class.forName(className);
+ if (ExcerptProvider.class.isAssignableFrom(clazz))
+ {
+ excerptProviderClass = clazz;
+ }
+ else
+ {
+ log.warn("Invalid value for excerptProviderClass, {} does " +
"not implement ExcerptProvider interface.",
+ className);
+ }
+ }
+ catch (ClassNotFoundException e)
+ {
+ log.warn("Invalid value for excerptProviderClass, class {} not
found.", className);
+ }
+ }
- /**
- * Sets the name of the class that implements {@link IndexingConfiguration}.
- * The default value is
- *
<code>org.apache.jackrabbit.core.query.lucene.IndexingConfigurationImpl</code>
- * .
- *
- * @param className
- * the name of the class that implements
- * {@link IndexingConfiguration}.
- */
- public void setIndexingConfigurationClass(String className) {
- try {
- Class clazz = Class.forName(className);
- if (IndexingConfiguration.class.isAssignableFrom(clazz)) {
- indexingConfigurationClass = clazz;
- } else {
- log
- .warn(
- "Invalid value for indexingConfigurationClass, {} "
- + "does not implement IndexingConfiguration interface.",
- className);
- }
- } catch (ClassNotFoundException e) {
- log
- .warn(
- "Invalid value for indexingConfigurationClass, class {} not found.",
- className);
- }
- }
+ /**
+ * @return the class name of the excerpt provider implementation.
+ */
+ public String getExcerptProviderClass()
+ {
+ return excerptProviderClass.getName();
+ }
- /**
- * @return the class name of the indexing configuration implementation.
- */
- public String getIndexingConfigurationClass() {
- return indexingConfigurationClass.getName();
- }
+ /**
+ * Sets the path to the indexing configuration file.
+ *
+ * @param path
+ * the path to the configuration file.
+ */
+ public void setIndexingConfiguration(String path)
+ {
+ indexingConfigPath = path;
+ }
- /**
- * Sets the name of the class that implements {@link SynonymProvider}. The
- * default value is <code>null</code> (none set).
- *
- * @param className
- * name of the class that implements {@link SynonymProvider}.
- */
- public void setSynonymProviderClass(String className) {
- try {
- Class clazz = Class.forName(className);
- if (SynonymProvider.class.isAssignableFrom(clazz)) {
- synonymProviderClass = clazz;
- } else {
- log.warn("Invalid value for synonymProviderClass, {} "
- + "does not implement SynonymProvider interface.",
- className);
- }
- } catch (ClassNotFoundException e) {
- log
- .warn(
- "Invalid value for synonymProviderClass, class {} not found.",
- className);
- }
- }
+ /**
+ * @return the path to the indexing configuration file.
+ */
+ public String getIndexingConfiguration()
+ {
+ return indexingConfigPath;
+ }
- /**
- * @return the class name of the synonym provider implementation or
- * <code>null</code> if none is set.
- */
- public String getSynonymProviderClass() {
- if (synonymProviderClass != null) {
- return synonymProviderClass.getName();
- } else {
- return null;
- }
- }
+ /**
+ * Sets the name of the class that implements {@link IndexingConfiguration}.
+ * The default value is
+ *
<code>org.apache.jackrabbit.core.query.lucene.IndexingConfigurationImpl</code>
+ * .
+ *
+ * @param className
+ * the name of the class that implements
+ * {@link IndexingConfiguration}.
+ */
+ public void setIndexingConfigurationClass(String className)
+ {
+ try
+ {
+ Class clazz = Class.forName(className);
+ if (IndexingConfiguration.class.isAssignableFrom(clazz))
+ {
+ indexingConfigurationClass = clazz;
+ }
+ else
+ {
+ log.warn("Invalid value for indexingConfigurationClass, {} "
+ + "does not implement IndexingConfiguration interface.",
className);
+ }
+ }
+ catch (ClassNotFoundException e)
+ {
+ log.warn("Invalid value for indexingConfigurationClass, class {} not
found.", className);
+ }
+ }
- /**
- * Sets the name of the class that implements {@link SpellChecker}. The
- * default value is <code>null</code> (none set).
- *
- * @param className
- * name of the class that implements {@link SpellChecker}.
- */
- public void setSpellCheckerClass(String className) {
- try {
- Class clazz = Class.forName(className);
- if (SpellChecker.class.isAssignableFrom(clazz)) {
- spellCheckerClass = clazz;
- } else {
- log.warn("Invalid value for spellCheckerClass, {} "
- + "does not implement SpellChecker interface.",
- className);
- }
- } catch (ClassNotFoundException e) {
- log.warn("Invalid value for spellCheckerClass,"
- + " class {} not found.", className);
- }
- }
+ /**
+ * @return the class name of the indexing configuration implementation.
+ */
+ public String getIndexingConfigurationClass()
+ {
+ return indexingConfigurationClass.getName();
+ }
- /**
- * @return the class name of the spell checker implementation or
- * <code>null</code> if none is set.
- */
- public String getSpellCheckerClass() {
- if (spellCheckerClass != null) {
- return spellCheckerClass.getName();
- } else {
- return null;
- }
- }
+ /**
+ * Sets the name of the class that implements {@link SynonymProvider}. The
+ * default value is <code>null</code> (none set).
+ *
+ * @param className
+ * name of the class that implements {@link SynonymProvider}.
+ */
+ public void setSynonymProviderClass(String className)
+ {
+ try
+ {
+ Class clazz = Class.forName(className);
+ if (SynonymProvider.class.isAssignableFrom(clazz))
+ {
+ synonymProviderClass = clazz;
+ }
+ else
+ {
+ log.warn("Invalid value for synonymProviderClass, {} " + "does
not implement SynonymProvider interface.",
+ className);
+ }
+ }
+ catch (ClassNotFoundException e)
+ {
+ log.warn("Invalid value for synonymProviderClass, class {} not
found.", className);
+ }
+ }
- /**
- * Enables or disables the consistency check on startup. Consistency checks
- * are disabled per default.
- *
- * @param b
- * <code>true</code> enables consistency checks.
- * @see #setForceConsistencyCheck(boolean)
- */
- public void setEnableConsistencyCheck(boolean b) {
- this.consistencyCheckEnabled = b;
- }
+ /**
+ * @return the class name of the synonym provider implementation or
+ * <code>null</code> if none is set.
+ */
+ public String getSynonymProviderClass()
+ {
+ if (synonymProviderClass != null)
+ {
+ return synonymProviderClass.getName();
+ }
+ else
+ {
+ return null;
+ }
+ }
- /**
- * @return <code>true</code> if consistency checks are enabled.
- */
- public boolean getEnableConsistencyCheck() {
- return consistencyCheckEnabled;
- }
+ /**
+ * Sets the name of the class that implements {@link SpellChecker}. The
+ * default value is <code>null</code> (none set).
+ *
+ * @param className
+ * name of the class that implements {@link SpellChecker}.
+ */
+ public void setSpellCheckerClass(String className)
+ {
+ try
+ {
+ Class clazz = Class.forName(className);
+ if (SpellChecker.class.isAssignableFrom(clazz))
+ {
+ spellCheckerClass = clazz;
+ }
+ else
+ {
+ log.warn("Invalid value for spellCheckerClass, {} " + "does
not implement SpellChecker interface.",
+ className);
+ }
+ }
+ catch (ClassNotFoundException e)
+ {
+ log.warn("Invalid value for spellCheckerClass," + " class {} not
found.", className);
+ }
+ }
- /**
- * Sets the configuration path for the synonym provider.
- *
- * @param path
- * the configuration path for the synonym provider.
- */
- public void setSynonymProviderConfigPath(String path) {
- synonymProviderConfigPath = path;
- }
+ /**
+ * @return the class name of the spell checker implementation or
+ * <code>null</code> if none is set.
+ */
+ public String getSpellCheckerClass()
+ {
+ if (spellCheckerClass != null)
+ {
+ return spellCheckerClass.getName();
+ }
+ else
+ {
+ return null;
+ }
+ }
- /**
- * @return the configuration path for the synonym provider. If none is set
- * this method returns <code>null</code>.
- */
- public String getSynonymProviderConfigPath() {
- return synonymProviderConfigPath;
- }
+ /**
+ * Enables or disables the consistency check on startup. Consistency checks
+ * are disabled per default.
+ *
+ * @param b
+ * <code>true</code> enables consistency checks.
+ * @see #setForceConsistencyCheck(boolean)
+ */
+ public void setEnableConsistencyCheck(boolean b)
+ {
+ this.consistencyCheckEnabled = b;
+ }
- /**
- * Sets the similarity implementation, which will be used for indexing and
- * searching. The implementation must extend {@link Similarity}.
- *
- * @param className
- * a {@link Similarity} implementation.
- */
- public void setSimilarityClass(String className) {
- try {
- Class similarityClass = Class.forName(className);
- similarity = (Similarity) similarityClass.newInstance();
- } catch (Exception e) {
- log.warn("Invalid Similarity class: " + className, e);
- }
- }
+ /**
+ * @return <code>true</code> if consistency checks are enabled.
+ */
+ public boolean getEnableConsistencyCheck()
+ {
+ return consistencyCheckEnabled;
+ }
- /**
- * @return the name of the similarity class.
- */
- public String getSimilarityClass() {
- return similarity.getClass().getName();
- }
+ /**
+ * Sets the configuration path for the synonym provider.
+ *
+ * @param path
+ * the configuration path for the synonym provider.
+ */
+ public void setSynonymProviderConfigPath(String path)
+ {
+ synonymProviderConfigPath = path;
+ }
- /**
- * Sets a new maxVolatileIndexSize value.
- *
- * @param maxVolatileIndexSize
- * the new value.
- */
- public void setMaxVolatileIndexSize(long maxVolatileIndexSize) {
- this.maxVolatileIndexSize = maxVolatileIndexSize;
- }
+ /**
+ * @return the configuration path for the synonym provider. If none is set
+ * this method returns <code>null</code>.
+ */
+ public String getSynonymProviderConfigPath()
+ {
+ return synonymProviderConfigPath;
+ }
- /**
- * @return the maxVolatileIndexSize in bytes.
- */
- public long getMaxVolatileIndexSize() {
- return maxVolatileIndexSize;
- }
+ /**
+ * Sets the similarity implementation, which will be used for indexing and
+ * searching. The implementation must extend {@link Similarity}.
+ *
+ * @param className
+ * a {@link Similarity} implementation.
+ */
+ public void setSimilarityClass(String className)
+ {
+ try
+ {
+ Class similarityClass = Class.forName(className);
+ similarity = (Similarity)similarityClass.newInstance();
+ }
+ catch (Exception e)
+ {
+ log.warn("Invalid Similarity class: " + className, e);
+ }
+ }
- /**
- * @return the name of the directory manager class.
- */
- public String getDirectoryManagerClass() {
- return directoryManagerClass;
- }
+ /**
+ * @return the name of the similarity class.
+ */
+ public String getSimilarityClass()
+ {
+ return similarity.getClass().getName();
+ }
- /**
- * Sets name of the directory manager class. The class must implement
- * {@link DirectoryManager}.
- *
- * @param className
- * the name of the class that implements directory manager.
- */
- public void setDirectoryManagerClass(String className) {
- this.directoryManagerClass = className;
- }
+ /**
+ * Sets a new maxVolatileIndexSize value.
+ *
+ * @param maxVolatileIndexSize
+ * the new value.
+ */
+ public void setMaxVolatileIndexSize(long maxVolatileIndexSize)
+ {
+ this.maxVolatileIndexSize = maxVolatileIndexSize;
+ }
- /**
- * @return the current value for termInfosIndexDivisor.
- */
- public int getTermInfosIndexDivisor() {
- return termInfosIndexDivisor;
- }
+ /**
+ * @return the maxVolatileIndexSize in bytes.
+ */
+ public long getMaxVolatileIndexSize()
+ {
+ return maxVolatileIndexSize;
+ }
- /**
- * Sets a new value for termInfosIndexDivisor.
- *
- * @param termInfosIndexDivisor
- * the new value.
- */
- public void setTermInfosIndexDivisor(int termInfosIndexDivisor) {
- this.termInfosIndexDivisor = termInfosIndexDivisor;
- }
+ /**
+ * @return the name of the directory manager class.
+ */
+ public String getDirectoryManagerClass()
+ {
+ return directoryManagerClass;
+ }
- /**
- * @return <code>true</code> if the hierarchy cache should be
initialized
- * immediately on startup.
- */
- public boolean isInitializeHierarchyCache() {
- return initializeHierarchyCache;
- }
+ /**
+ * Sets name of the directory manager class. The class must implement
+ * {@link DirectoryManager}.
+ *
+ * @param className
+ * the name of the class that implements directory manager.
+ */
+ public void setDirectoryManagerClass(String className)
+ {
+ this.directoryManagerClass = className;
+ }
- /**
- * Whether the hierarchy cache should be initialized immediately on startup.
- *
- * @param initializeHierarchyCache
- * <code>true</code> if the cache should be initialized
- * immediately.
- */
- public void setInitializeHierarchyCache(boolean initializeHierarchyCache) {
- this.initializeHierarchyCache = initializeHierarchyCache;
- }
+ /**
+ * @return the current value for termInfosIndexDivisor.
+ */
+ public int getTermInfosIndexDivisor()
+ {
+ return termInfosIndexDivisor;
+ }
- // ----------------------------< internal
- // >----------------------------------
+ /**
+ * Sets a new value for termInfosIndexDivisor.
+ *
+ * @param termInfosIndexDivisor
+ * the new value.
+ */
+ public void setTermInfosIndexDivisor(int termInfosIndexDivisor)
+ {
+ this.termInfosIndexDivisor = termInfosIndexDivisor;
+ }
- /**
- * Checks if this <code>SearchIndex</code> is open, otherwise throws an
- * <code>IOException</code>.
- *
- * @throws IOException
- * if this <code>SearchIndex</code> had been closed.
- */
- private void checkOpen() throws IOException {
- if (closed) {
- throw new IOException(
- "query handler closed and cannot be used anymore.");
- }
- }
+ /**
+ * @return <code>true</code> if the hierarchy cache should be initialized
+ * immediately on startup.
+ */
+ public boolean isInitializeHierarchyCache()
+ {
+ return initializeHierarchyCache;
+ }
- /**
- * Log unindexed changes into error.log
- *
- * @param removed
- * set of removed node uuids
- * @param added
- * map of added node states and uuids
- * @throws IOException
- */
- public void logErrorChanges(Set<String> removed, Set<String> added)
- throws IOException {
- // backup the remove and add iterators
- errorLog.writeChanges(removed, added);
- }
+ /**
+ * Whether the hierarchy cache should be initialized immediately on startup.
+ *
+ * @param initializeHierarchyCache
+ * <code>true</code> if the cache should be initialized
+ * immediately.
+ */
+ public void setInitializeHierarchyCache(boolean initializeHierarchyCache)
+ {
+ this.initializeHierarchyCache = initializeHierarchyCache;
+ }
- private void recoverErrorLog(ErrorLog errlog) throws IOException,
- RepositoryException {
- final Set<String> rem = new HashSet<String>();
- final Set<String> add = new HashSet<String>();
+ // ----------------------------< internal
+ // >----------------------------------
- errlog.readChanges(rem, add);
+ /**
+ * Checks if this <code>SearchIndex</code> is open, otherwise throws an
+ * <code>IOException</code>.
+ *
+ * @throws IOException
+ * if this <code>SearchIndex</code> had been closed.
+ */
+ private void checkOpen() throws IOException
+ {
+ if (closed)
+ {
+ throw new IOException("query handler closed and cannot be used
anymore.");
+ }
+ }
- // check is any notifies in log
- if (rem.isEmpty() && add.isEmpty()) {
- // there is no sense to continue
- return;
- }
+ /**
+ * Log unindexed changes into error.log
+ *
+ * @param removed
+ * set of removed node uuids
+ * @param added
+ * map of added node states and uuids
+ * @throws IOException
+ */
+ public void logErrorChanges(Set<String> removed, Set<String> added) throws
IOException
+ {
+ // backup the remove and add iterators
+ errorLog.writeChanges(removed, added);
+ }
- Iterator<String> removedStates = rem.iterator();
+ private void recoverErrorLog(ErrorLog errlog) throws IOException, RepositoryException
+ {
+ final Set<String> rem = new HashSet<String>();
+ final Set<String> add = new HashSet<String>();
- // make a new iterator;
- Iterator<NodeData> addedStates = new Iterator<NodeData>() {
- private final Iterator<String> iter = add.iterator();
+ errlog.readChanges(rem, add);
- public boolean hasNext() {
- return iter.hasNext();
- }
+ // check is any notifies in log
+ if (rem.isEmpty() && add.isEmpty())
+ {
+ // there is no sense to continue
+ return;
+ }
- public NodeData next() {
- String id;
- // we have to iterrate through items till will meet ones
- // existing in
- // workspace
- while (iter.hasNext()) {
- id = iter.next();
+ Iterator<String> removedStates = rem.iterator();
- try {
- ItemData item = getContext().getItemStateManager()
- .getItemData(id);
- if (item != null) {
- if (item.isNode()) {
- return (NodeData) item; // return node here
- } else
- log
- .warn("Node expected but property found with id "
- + id
- + ". Skipping "
- + item.getQPath().getAsString());
- } else {
- log.warn("Unable to recovery node index " + id
- + ". Node not found.");
- }
- } catch (RepositoryException e) {
- log.error("ErrorLog recovery error. Item id " + id
- + ". " + e, e);
- }
- }
+ // make a new iterator;
+ Iterator<NodeData> addedStates = new Iterator<NodeData>()
+ {
+ private final Iterator<String> iter = add.iterator();
- return null;
- }
+ public boolean hasNext()
+ {
+ return iter.hasNext();
+ }
- public void remove() {
- throw new UnsupportedOperationException();
- }
- };
+ public NodeData next()
+ {
+ String id;
+ // we have to iterrate through items till will meet ones
+ // existing in
+ // workspace
+ while (iter.hasNext())
+ {
+ id = iter.next();
- updateNodes(removedStates, addedStates);
+ try
+ {
+ ItemData item = getContext().getItemStateManager().getItemData(id);
+ if (item != null)
+ {
+ if (item.isNode())
+ {
+ return (NodeData)item; // return node here
+ }
+ else
+ log.warn("Node expected but property found with id " +
id + ". Skipping "
+ + item.getQPath().getAsString());
+ }
+ else
+ {
+ log.warn("Unable to recovery node index " + id + ".
Node not found.");
+ }
+ }
+ catch (RepositoryException e)
+ {
+ log.error("ErrorLog recovery error. Item id " + id + ".
" + e, e);
+ }
+ }
- errlog.clear();
- }
+ return null;
+ }
- /**
- * @see
org.exoplatform.services.jcr.impl.core.query.QueryHandler#executeQuery(org.apache.lucene.search.Query,
- * boolean, org.exoplatform.services.jcr.datamodel.InternalQName[],
- * boolean[])
- */
- public QueryHits executeQuery(Query query) throws IOException {
- checkOpen();
+ public void remove()
+ {
+ throw new UnsupportedOperationException();
+ }
+ };
- IndexReader reader = getIndexReader(true);
- IndexSearcher searcher = new IndexSearcher(reader);
- searcher.setSimilarity(getSimilarity());
+ updateNodes(removedStates, addedStates);
- return new LuceneQueryHits(reader, searcher, query);
- }
+ errlog.clear();
+ }
+ /**
+ * @see
org.exoplatform.services.jcr.impl.core.query.QueryHandler#executeQuery(org.apache.lucene.search.Query,
+ * boolean, org.exoplatform.services.jcr.datamodel.InternalQName[],
+ * boolean[])
+ */
+ public QueryHits executeQuery(Query query) throws IOException
+ {
+ checkOpen();
+
+ IndexReader reader = getIndexReader(true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ searcher.setSimilarity(getSimilarity());
+
+ return new LuceneQueryHits(reader, searcher, query);
+ }
+
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WeightedHighlighter.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WeightedHighlighter.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WeightedHighlighter.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -16,6 +16,12 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
+import org.apache.lucene.index.TermPositionVector;
+import org.apache.lucene.index.TermVectorOffsetInfo;
+import org.apache.lucene.util.PriorityQueue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.BitSet;
@@ -26,331 +32,368 @@
import java.util.Map;
import java.util.Set;
-import org.apache.lucene.index.TermPositionVector;
-import org.apache.lucene.index.TermVectorOffsetInfo;
-import org.apache.lucene.util.PriorityQueue;
-
/**
* <code>WeightedHighlighter</code> implements a highlighter that weights
the
* fragments based on the proximity of the highlighted terms to each other. The
* returned fragments are not necessarily in sequence as the text occurs in the
* content.
*/
-public class WeightedHighlighter extends DefaultHighlighter {
+public class WeightedHighlighter extends DefaultHighlighter
+{
- /**
- * Punctuation characters that mark the end of a sentence.
- */
- private static final BitSet PUNCTUATION = new BitSet();
+ /**
+ * The logger instance for this class.
+ */
+ private static final Logger log = LoggerFactory.getLogger(WeightedHighlighter.class);
- static {
- PUNCTUATION.set('.');
- PUNCTUATION.set('!');
- PUNCTUATION.set(0xa1); // inverted exclamation mark
- PUNCTUATION.set('?');
- PUNCTUATION.set(0xbf); // inverted question mark
- // todo add more
- }
+ /**
+ * Punctuation characters that mark the end of a sentence.
+ */
+ private static final BitSet PUNCTUATION = new BitSet();
- protected WeightedHighlighter() {
- }
+ static
+ {
+ PUNCTUATION.set('.');
+ PUNCTUATION.set('!');
+ PUNCTUATION.set(0xa1); // inverted exclamation mark
+ PUNCTUATION.set('?');
+ PUNCTUATION.set(0xbf); // inverted question mark
+ // todo add more
+ }
- /**
- * @param tvec the term position vector for this hit
- * @param queryTerms the query terms.
- * @param text the original text that was used to create the
- * tokens.
- * @param excerptStart this string is prepended to the excerpt
- * @param excerptEnd this string is appended to the excerpt
- * @param fragmentStart this string is prepended to every fragment
- * @param fragmentEnd this string is appended to the end of every
- * fragement.
- * @param hlStart the string used to prepend a highlighted token, for
- * example
<tt>"<b>"</tt>
- * @param hlEnd the string used to append a highlighted token, for
- * example
<tt>"</b>"</tt>
- * @param maxFragments the maximum number of fragments
- * @param surround the maximum number of chars surrounding a
- * highlighted token
- * @return a String with text fragments where tokens from the query are
- * highlighted
- */
- public static String highlight(TermPositionVector tvec,
- Set queryTerms,
- String text,
- String excerptStart,
- String excerptEnd,
- String fragmentStart,
- String fragmentEnd,
- String hlStart,
- String hlEnd,
- int maxFragments,
- int surround) throws IOException {
- return new WeightedHighlighter().doHighlight(tvec, queryTerms, text,
- excerptStart, excerptEnd, fragmentStart, fragmentEnd, hlStart,
- hlEnd, maxFragments, surround);
- }
+ protected WeightedHighlighter()
+ {
+ }
- /**
- * @param tvec the term position vector for this hit
- * @param queryTerms the query terms.
- * @param text the original text that was used to create the tokens.
- * @param maxFragments the maximum number of fragments
- * @param surround the maximum number of chars surrounding a highlighted
- * token
- * @return a String with text fragments where tokens from the query are
- * highlighted
- */
- public static String highlight(TermPositionVector tvec,
- Set queryTerms,
- String text,
- int maxFragments,
- int surround) throws IOException {
- return highlight(tvec, queryTerms, text, START_EXCERPT, END_EXCERPT,
- START_FRAGMENT_SEPARATOR, END_FRAGMENT_SEPARATOR,
- START_HIGHLIGHT, END_HIGHLIGHT, maxFragments, surround);
- }
+ /**
+ * @param tvec the term position vector for this hit
+ * @param queryTerms the query terms.
+ * @param text the original text that was used to create the
+ * tokens.
+ * @param excerptStart this string is prepended to the excerpt
+ * @param excerptEnd this string is appended to the excerpt
+ * @param fragmentStart this string is prepended to every fragment
+ * @param fragmentEnd this string is appended to the end of every
+ * fragement.
+ * @param hlStart the string used to prepend a highlighted token, for
+ * example
<tt>"<b>"</tt>
+ * @param hlEnd the string used to append a highlighted token, for
+ * example
<tt>"</b>"</tt>
+ * @param maxFragments the maximum number of fragments
+ * @param surround the maximum number of chars surrounding a
+ * highlighted token
+ * @return a String with text fragments where tokens from the query are
+ * highlighted
+ */
+ public static String highlight(TermPositionVector tvec, Set queryTerms, String text,
String excerptStart,
+ String excerptEnd, String fragmentStart, String fragmentEnd, String hlStart, String
hlEnd, int maxFragments,
+ int surround) throws IOException
+ {
+ return new WeightedHighlighter().doHighlight(tvec, queryTerms, text, excerptStart,
excerptEnd, fragmentStart,
+ fragmentEnd, hlStart, hlEnd, maxFragments, surround);
+ }
- protected String mergeFragments(TermVectorOffsetInfo[] offsets,
- String text,
- String excerptStart,
- String excerptEnd,
- String fragmentStart,
- String fragmentEnd,
- String hlStart,
- String hlEnd,
- int maxFragments,
- int surround) throws IOException {
- if (offsets == null || offsets.length == 0) {
- // nothing to highlight
- return createDefaultExcerpt(text, excerptStart, excerptEnd,
- fragmentStart, fragmentEnd, surround * 2);
- }
+ /**
+ * @param tvec the term position vector for this hit
+ * @param queryTerms the query terms.
+ * @param text the original text that was used to create the tokens.
+ * @param maxFragments the maximum number of fragments
+ * @param surround the maximum number of chars surrounding a highlighted
+ * token
+ * @return a String with text fragments where tokens from the query are
+ * highlighted
+ */
+ public static String highlight(TermPositionVector tvec, Set queryTerms, String text,
int maxFragments, int surround)
+ throws IOException
+ {
+ return highlight(tvec, queryTerms, text, START_EXCERPT, END_EXCERPT,
START_FRAGMENT_SEPARATOR,
+ END_FRAGMENT_SEPARATOR, START_HIGHLIGHT, END_HIGHLIGHT, maxFragments,
surround);
+ }
- PriorityQueue bestFragments = new FragmentInfoPriorityQueue(maxFragments);
- for (int i = 0; i < offsets.length; i++) {
- if (offsets[i].getEndOffset() <= text.length()) {
- FragmentInfo fi = new FragmentInfo(offsets[i], surround * 2);
- for (int j = i + 1; j < offsets.length; j++) {
- if (offsets[j].getEndOffset() > text.length()) {
- break;
- }
- if (!fi.add(offsets[j], text)) {
- break;
- }
- }
- bestFragments.insert(fi);
+ protected String mergeFragments(TermVectorOffsetInfo[] offsets, String text, String
excerptStart, String excerptEnd,
+ String fragmentStart, String fragmentEnd, String hlStart, String hlEnd, int
maxFragments, int surround)
+ throws IOException
+ {
+
+ if (offsets == null || offsets.length == 0)
+ {
+ // nothing to highlight
+ return createDefaultExcerpt(text, excerptStart, excerptEnd, fragmentStart,
fragmentEnd, surround * 2);
+ }
+
+ PriorityQueue bestFragments = new FragmentInfoPriorityQueue(maxFragments);
+ for (int i = 0; i < offsets.length; i++)
+ {
+ if (offsets[i].getEndOffset() <= text.length())
+ {
+ FragmentInfo fi = new FragmentInfo(offsets[i], surround * 2);
+ for (int j = i + 1; j < offsets.length; j++)
+ {
+ if (offsets[j].getEndOffset() > text.length())
+ {
+ break;
+ }
+ if (!fi.add(offsets[j], text))
+ {
+ break;
+ }
}
- }
+ bestFragments.insert(fi);
+ }
+ }
- if (bestFragments.size() == 0) {
- return createDefaultExcerpt(text, excerptStart, excerptEnd,
- fragmentStart, fragmentEnd, surround * 2);
- }
+ if (bestFragments.size() == 0)
+ {
+ return createDefaultExcerpt(text, excerptStart, excerptEnd, fragmentStart,
fragmentEnd, surround * 2);
+ }
- // retrieve fragment infos from queue and fill into list, least
- // fragment comes out first
- List infos = new LinkedList();
- while (bestFragments.size() > 0) {
- FragmentInfo fi = (FragmentInfo) bestFragments.pop();
- infos.add(0, fi);
- }
+ // retrieve fragment infos from queue and fill into list, least
+ // fragment comes out first
+ List infos = new LinkedList();
+ while (bestFragments.size() > 0)
+ {
+ FragmentInfo fi = (FragmentInfo)bestFragments.pop();
+ infos.add(0, fi);
+ }
- Map offsetInfos = new IdentityHashMap();
- // remove overlapping fragment infos
- Iterator it = infos.iterator();
- while (it.hasNext()) {
- FragmentInfo fi = (FragmentInfo) it.next();
- boolean overlap = false;
- Iterator fit = fi.iterator();
- while (fit.hasNext() && !overlap) {
- TermVectorOffsetInfo oi = (TermVectorOffsetInfo) fit.next();
- if (offsetInfos.containsKey(oi)) {
- overlap = true;
- }
+ Map offsetInfos = new IdentityHashMap();
+ // remove overlapping fragment infos
+ Iterator it = infos.iterator();
+ while (it.hasNext())
+ {
+ FragmentInfo fi = (FragmentInfo)it.next();
+ boolean overlap = false;
+ Iterator fit = fi.iterator();
+ while (fit.hasNext() && !overlap)
+ {
+ TermVectorOffsetInfo oi = (TermVectorOffsetInfo)fit.next();
+ if (offsetInfos.containsKey(oi))
+ {
+ overlap = true;
}
- if (overlap) {
- it.remove();
- } else {
- Iterator oit = fi.iterator();
- while (oit.hasNext()) {
- offsetInfos.put(oit.next(), null);
- }
+ }
+ if (overlap)
+ {
+ it.remove();
+ }
+ else
+ {
+ Iterator oit = fi.iterator();
+ while (oit.hasNext())
+ {
+ offsetInfos.put(oit.next(), null);
}
- }
+ }
+ }
- // create excerpts
- StringBuffer sb = new StringBuffer(excerptStart);
- it = infos.iterator();
- while (it.hasNext()) {
- FragmentInfo fi = (FragmentInfo) it.next();
- sb.append(fragmentStart);
- int limit = Math.max(0, fi.getStartOffset() / 2 + fi.getEndOffset() / 2 -
surround);
- int len = startFragment(sb, text, fi.getStartOffset(), limit);
- TermVectorOffsetInfo lastOffsetInfo = null;
- Iterator fIt = fi.iterator();
- while (fIt.hasNext()) {
- TermVectorOffsetInfo oi = (TermVectorOffsetInfo) fIt.next();
- if (lastOffsetInfo != null) {
- // fill in text between terms
- sb.append(text.substring(lastOffsetInfo.getEndOffset(),
oi.getStartOffset()));
- }
- sb.append(hlStart);
- sb.append(text.substring(oi.getStartOffset(), oi.getEndOffset()));
- sb.append(hlEnd);
- lastOffsetInfo = oi;
+ // create excerpts
+ StringBuffer sb = new StringBuffer(excerptStart);
+ it = infos.iterator();
+ while (it.hasNext())
+ {
+ FragmentInfo fi = (FragmentInfo)it.next();
+ sb.append(fragmentStart);
+ int limit = Math.max(0, fi.getStartOffset() / 2 + fi.getEndOffset() / 2 -
surround);
+ int len = startFragment(sb, text, fi.getStartOffset(), limit);
+ TermVectorOffsetInfo lastOffsetInfo = null;
+ Iterator fIt = fi.iterator();
+ while (fIt.hasNext())
+ {
+ TermVectorOffsetInfo oi = (TermVectorOffsetInfo)fIt.next();
+ if (lastOffsetInfo != null)
+ {
+ // fill in text between terms
+ sb.append(text.substring(lastOffsetInfo.getEndOffset(),
oi.getStartOffset()));
}
- limit = Math.min(text.length(), fi.getStartOffset() - len + (surround * 2));
- endFragment(sb, text, fi.getEndOffset(), limit);
- sb.append(fragmentEnd);
- }
- sb.append(excerptEnd);
- return sb.toString();
- }
+ sb.append(hlStart);
+ sb.append(text.substring(oi.getStartOffset(), oi.getEndOffset()));
+ sb.append(hlEnd);
+ lastOffsetInfo = oi;
+ }
+ limit = Math.min(text.length(), fi.getStartOffset() - len + (surround * 2));
+ endFragment(sb, text, fi.getEndOffset(), limit);
+ sb.append(fragmentEnd);
+ }
+ sb.append(excerptEnd);
+ return sb.toString();
+ }
- /**
- * Writes the start of a fragment to the string buffer <code>sb</code>.
The
- * first occurrence of a matching term is indicated by the
- * <code>offset</code> into the <code>text</code>.
- *
- * @param sb where to append the start of the fragment.
- * @param text the original text.
- * @param offset the start offset of the first matching term in the
- * fragment.
- * @param limit do not go back further than <code>limit</code>.
- * @return the length of the start fragment that was appended to
- * <code>sb</code>.
- */
- private static int startFragment(StringBuffer sb, String text, int offset, int limit)
{
- if (limit == 0) {
- // append all
- sb.append(text.substring(0, offset));
- return offset;
- }
- String intro = "... ";
- int start = offset;
- for (int i = offset - 1; i >= limit; i--) {
- if (Character.isWhitespace(text.charAt(i))) {
- // potential start
- start = i + 1;
- if (i - 1 >= limit && PUNCTUATION.get(text.charAt(i - 1))) {
- // start of sentence found
- intro = "";
- break;
- }
+ /**
+ * Writes the start of a fragment to the string buffer <code>sb</code>.
The
+ * first occurrence of a matching term is indicated by the
+ * <code>offset</code> into the <code>text</code>.
+ *
+ * @param sb where to append the start of the fragment.
+ * @param text the original text.
+ * @param offset the start offset of the first matching term in the
+ * fragment.
+ * @param limit do not go back further than <code>limit</code>.
+ * @return the length of the start fragment that was appended to
+ * <code>sb</code>.
+ */
+ private static int startFragment(StringBuffer sb, String text, int offset, int limit)
+ {
+ if (limit == 0)
+ {
+ // append all
+ sb.append(text.substring(0, offset));
+ return offset;
+ }
+ String intro = "... ";
+ int start = offset;
+ for (int i = offset - 1; i >= limit; i--)
+ {
+ if (Character.isWhitespace(text.charAt(i)))
+ {
+ // potential start
+ start = i + 1;
+ if (i - 1 >= limit && PUNCTUATION.get(text.charAt(i - 1)))
+ {
+ // start of sentence found
+ intro = "";
+ break;
}
- }
- sb.append(intro).append(text.substring(start, offset));
- return offset - start;
- }
+ }
+ }
+ sb.append(intro).append(text.substring(start, offset));
+ return offset - start;
+ }
- /**
- * Writes the end of a fragment to the string buffer <code>sb</code>.
The
- * last occurrence of a matching term is indicated by the
- * <code>offset</code> into the <code>text</code>.
- *
- * @param sb where to append the start of the fragment.
- * @param text the original text.
- * @param offset the end offset of the last matching term in the fragment.
- * @param limit do not go further than <code>limit</code>.
- */
- private static void endFragment(StringBuffer sb, String text, int offset, int limit)
{
- if (limit == text.length()) {
- // append all
- sb.append(text.substring(offset));
- return;
- }
- int end = offset;
- for (int i = end; i < limit; i++) {
- if (Character.isWhitespace(text.charAt(i))) {
- // potential end
- end = i;
- }
- }
- sb.append(text.substring(offset, end)).append(" ...");
- }
+ /**
+ * Writes the end of a fragment to the string buffer <code>sb</code>. The
+ * last occurrence of a matching term is indicated by the
+ * <code>offset</code> into the <code>text</code>.
+ *
+ * @param sb where to append the start of the fragment.
+ * @param text the original text.
+ * @param offset the end offset of the last matching term in the fragment.
+ * @param limit do not go further than <code>limit</code>.
+ */
+ private static void endFragment(StringBuffer sb, String text, int offset, int limit)
+ {
+ if (limit == text.length())
+ {
+ // append all
+ sb.append(text.substring(offset));
+ return;
+ }
+ int end = offset;
+ for (int i = end; i < limit; i++)
+ {
+ if (Character.isWhitespace(text.charAt(i)))
+ {
+ // potential end
+ end = i;
+ }
+ }
+ sb.append(text.substring(offset, end)).append(" ...");
+ }
- private static class FragmentInfo {
- ArrayList offsetInfosList;
- int startOffset;
- int endOffset;
- int maxFragmentSize;
- int quality;
+ private static class FragmentInfo
+ {
+ ArrayList offsetInfosList;
- public FragmentInfo(TermVectorOffsetInfo offsetinfo, int maxFragmentSize) {
- offsetInfosList = new ArrayList();
- offsetInfosList.add(offsetinfo);
- startOffset = offsetinfo.getStartOffset();
- endOffset = offsetinfo.getEndOffset();
- this.maxFragmentSize = maxFragmentSize;
- quality = 0;
- }
+ int startOffset;
- public boolean add(TermVectorOffsetInfo offsetinfo, String text) {
- if (offsetinfo.getEndOffset() > (startOffset + maxFragmentSize)) {
- return false;
+ int endOffset;
+
+ int maxFragmentSize;
+
+ int quality;
+
+ public FragmentInfo(TermVectorOffsetInfo offsetinfo, int maxFragmentSize)
+ {
+ offsetInfosList = new ArrayList();
+ offsetInfosList.add(offsetinfo);
+ startOffset = offsetinfo.getStartOffset();
+ endOffset = offsetinfo.getEndOffset();
+ this.maxFragmentSize = maxFragmentSize;
+ quality = 0;
+ }
+
+ public boolean add(TermVectorOffsetInfo offsetinfo, String text)
+ {
+ if (offsetinfo.getEndOffset() > (startOffset + maxFragmentSize))
+ {
+ return false;
+ }
+ offsetInfosList.add(offsetinfo);
+ if (offsetinfo.getStartOffset() - endOffset <= 3)
+ {
+ // boost quality when terms are adjacent
+ // and only separated by whitespace character
+ boolean boost = true;
+ for (int i = endOffset; i < offsetinfo.getStartOffset(); i++)
+ {
+ if (!Character.isWhitespace(text.charAt(i)))
+ {
+ boost = false;
+ break;
+ }
}
- offsetInfosList.add(offsetinfo);
- if (offsetinfo.getStartOffset() - endOffset <= 3) {
- // boost quality when terms are adjacent
- // and only separated by whitespace character
- boolean boost = true;
- for (int i = endOffset; i < offsetinfo.getStartOffset(); i++) {
- if (!Character.isWhitespace(text.charAt(i))) {
- boost = false;
- break;
- }
- }
- if (boost) {
- quality += 10;
- } else {
- quality++;
- }
- } else {
- quality++;
+ if (boost)
+ {
+ quality += 10;
}
- endOffset = offsetinfo.getEndOffset();
- return true;
- }
+ else
+ {
+ quality++;
+ }
+ }
+ else
+ {
+ quality++;
+ }
+ endOffset = offsetinfo.getEndOffset();
+ return true;
+ }
- public Iterator iterator() {
- return offsetInfosList.iterator();
- }
+ public Iterator iterator()
+ {
+ return offsetInfosList.iterator();
+ }
- public int getStartOffset() {
- return startOffset;
- }
+ public int getStartOffset()
+ {
+ return startOffset;
+ }
- public int getEndOffset() {
- return endOffset;
- }
+ public int getEndOffset()
+ {
+ return endOffset;
+ }
- public int getQuality() {
- return quality;
- }
+ public int getQuality()
+ {
+ return quality;
+ }
- }
+ }
- private static class FragmentInfoPriorityQueue extends PriorityQueue {
+ private static class FragmentInfoPriorityQueue extends PriorityQueue
+ {
- public FragmentInfoPriorityQueue(int size) {
- initialize(size);
- }
+ public FragmentInfoPriorityQueue(int size)
+ {
+ initialize(size);
+ }
- /**
- * Checks the quality of two {@link FragmentInfo} objects. The one with
- * the lower quality is considered less than the other. If both
- * fragments have the same quality, the one with the higher start offset
- * is considered the lesser. This will result in a queue that keeps the
- * {@link FragmentInfo} with the best quality.
- */
- protected boolean lessThan(Object a, Object b) {
- FragmentInfo infoA = (FragmentInfo) a;
- FragmentInfo infoB = (FragmentInfo) b;
- if (infoA.getQuality() == infoB.getQuality()) {
- return infoA.getStartOffset() > infoB.getStartOffset();
- }
- return infoA.getQuality() < infoB.getQuality();
- }
- }
+ /**
+ * Checks the quality of two {@link FragmentInfo} objects. The one with
+ * the lower quality is considered less than the other. If both
+ * fragments have the same quality, the one with the higher start offset
+ * is considered the lesser. This will result in a queue that keeps the
+ * {@link FragmentInfo} with the best quality.
+ */
+ protected boolean lessThan(Object a, Object b)
+ {
+ FragmentInfo infoA = (FragmentInfo)a;
+ FragmentInfo infoB = (FragmentInfo)b;
+ if (infoA.getQuality() == infoB.getQuality())
+ {
+ return infoA.getStartOffset() > infoB.getStartOffset();
+ }
+ return infoA.getQuality() < infoB.getQuality();
+ }
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/misc/MatchResult.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/misc/MatchResult.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/misc/MatchResult.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -16,117 +16,134 @@
*/
package org.exoplatform.services.jcr.impl.core.query.misc;
-import javax.jcr.RepositoryException;
-
import org.exoplatform.services.jcr.datamodel.QPath;
+import javax.jcr.RepositoryException;
-
/**
* A MatchResult instance represents the result of matching a {@link Pattern} against
* a {@link Path}.
*/
-public class MatchResult {
- private final QPath path;
- private final int pathLength;
- private int matchPos;
- private final int matchLength;
+public class MatchResult
+{
+ private final QPath path;
- MatchResult(QPath path, int length) {
- this(path, 0, length);
- }
+ private final int pathLength;
- MatchResult(QPath path, int pos, int length) {
- super();
-// if (!path.isNormalized()) {
-// throw new IllegalArgumentException("Path not normalized");
-// }
- this.path = path;
- this.matchPos = pos;
- this.matchLength = length;
- this.pathLength = path.getDepth();
- }
+ private int matchPos;
- /**
- * Returns the remaining path after the matching part.
- * @return The remaining path after the matching part such that the path constructed
from
- * {@link #getMatch()} followed by {@link #getRemainder()} is the original path or
- * <code>null</code> if {@link #isFullMatch()} is
<code>true</code>.
- */
- public QPath getRemainder() {
- if (matchPos + matchLength >= pathLength) {
- return null;
- }
- else {
- try {
- throw new RepositoryException("Not implemented");
- //return path.subPath(matchPos + matchLength, pathLength);
- }
- catch (RepositoryException e) {
- throw (IllegalStateException) new IllegalStateException("Path not
normalized")
- .initCause(e);
- }
- }
- }
+ private final int matchLength;
- /**
- * Returns the path which was matched by the {@link Pattern}.
- * @return The path which was matched such that the path constructed from
- * {@link #getMatch()} followed by {@link #getRemainder()} is the original path or
- * <code>null</code> if {@link #getMatchLength()} is
<code>0</code>.
- */
- public QPath getMatch() {
- if (matchLength == 0) {
- return null;
- }
- else {
- try {
- //return path.subPath(matchPos, matchPos + matchLength);
- throw new RepositoryException("Not implemented");
- }
- catch (RepositoryException e) {
- throw (IllegalStateException) new IllegalStateException("Path not
normalized")
- .initCause(e);
- }
- }
+ MatchResult(QPath path, int length)
+ {
+ this(path, 0, length);
+ }
- }
+ MatchResult(QPath path, int pos, int length)
+ {
+ super();
+ // if (!path.isNormalized()) {
+ // throw new IllegalArgumentException("Path not normalized");
+ // }
+ this.path = path;
+ this.matchPos = pos;
+ this.matchLength = length;
+ this.pathLength = path.getEntries().length;
+ }
- /**
- * Returns the position of the match
- * @return
- */
- public int getMatchPos() {
- return matchPos;
- }
+ /**
+ * Returns the remaining path after the matching part.
+ * @return The remaining path after the matching part such that the path constructed
from
+ * {@link #getMatch()} followed by {@link #getRemainder()} is the original path or
+ * <code>null</code> if {@link #isFullMatch()} is
<code>true</code>.
+ */
+ public QPath getRemainder()
+ {
+ if (matchPos + matchLength >= pathLength)
+ {
+ return null;
+ }
+ else
+ {
+ try
+ {
+ throw new RepositoryException("Not implemented");
+ //return path.subPath(matchPos + matchLength, pathLength);
+ }
+ catch (RepositoryException e)
+ {
+ throw (IllegalStateException)new IllegalStateException("Path not
normalized").initCause(e);
+ }
+ }
+ }
- /**
- * Returns the number of elements which where matched by the {@link Pattern}.
- * @return
- */
- public int getMatchLength() {
- return matchLength;
- }
+ /**
+ * Returns the path which was matched by the {@link Pattern}.
+ * @return The path which was matched such that the path constructed from
+ * {@link #getMatch()} followed by {@link #getRemainder()} is the original path or
+ * <code>null</code> if {@link #getMatchLength()} is
<code>0</code>.
+ */
+ public QPath getMatch()
+ {
+ if (matchLength == 0)
+ {
+ return null;
+ }
+ else
+ {
+ try
+ {
+ //return path.subPath(matchPos, matchPos + matchLength);
+ throw new RepositoryException("Not implemented");
+ }
+ catch (RepositoryException e)
+ {
+ throw (IllegalStateException)new IllegalStateException("Path not
normalized").initCause(e);
+ }
+ }
- /**
- * Returns true if the {@link Pattern} matched anything or false otherwise.
- * @return
- */
- public boolean isMatch() {
- return matchLength > 0;
- }
+ }
- /**
- * Returns true if the {@link Pattern} matched the whole {@link Path}.
- * @return
- */
- public boolean isFullMatch() {
- return pathLength == matchLength;
- }
+ /**
+ * Returns the position of the match
+ * @return
+ */
+ public int getMatchPos()
+ {
+ return matchPos;
+ }
- MatchResult setPos(int matchPos) {
- this.matchPos = matchPos;
- return this;
- }
+ /**
+ * Returns the number of elements which where matched by the {@link Pattern}.
+ * @return
+ */
+ public int getMatchLength()
+ {
+ return matchLength;
+ }
+ /**
+ * Returns true if the {@link Pattern} matched anything or false otherwise.
+ * @return
+ */
+ public boolean isMatch()
+ {
+ return matchLength > 0;
+ }
+
+ /**
+ * Returns true if the {@link Pattern} matched the whole {@link Path}.
+ * @return
+ */
+ public boolean isFullMatch()
+ {
+ return pathLength == matchLength;
+ }
+
+ MatchResult setPos(int matchPos)
+ {
+ this.matchPos = matchPos;
+ return this;
+ }
+
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/misc/Pattern.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/misc/Pattern.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/misc/Pattern.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -16,14 +16,12 @@
*/
package org.exoplatform.services.jcr.impl.core.query.misc;
-import javax.jcr.RepositoryException;
-
import org.exoplatform.services.jcr.datamodel.InternalQName;
import org.exoplatform.services.jcr.datamodel.QPath;
import org.exoplatform.services.jcr.datamodel.QPathEntry;
+import javax.jcr.RepositoryException;
-
/**
* Pattern to match normalized {@link QPath}s.
* A pattern matches either a constant path, a name of a path element, a selection of
@@ -33,476 +31,557 @@
* returns what's left as an instance of {@link MatchResult}.
* Use the {@link Matcher} class for matching a whole path or finding matches inside a
path.
*/
-public abstract class Pattern {
+public abstract class Pattern
+{
- /**
- * Matches this pattern against the input.
- * @param input path to match with this pattern
- * @return result from the matching <code>pattern</code> against
<code>input</code>
- * @throws IllegalArgumentException if <code>input</code> is not
normalized
- */
- public MatchResult match(QPath input) {
- try {
- return match(new Context(input)).getMatchResult();
- }
- catch (RepositoryException e) {
- throw (IllegalArgumentException) new IllegalArgumentException("QPath not
normalized")
- .initCause(e);
- }
- }
+ /**
+ * Matches this pattern against the input.
+ * @param input path to match with this pattern
+ * @return result from the matching <code>pattern</code> against
<code>input</code>
+ * @throws IllegalArgumentException if <code>input</code> is not
normalized
+ */
+ public MatchResult match(QPath input)
+ {
+ try
+ {
+ return match(new Context(input)).getMatchResult();
+ }
+ catch (RepositoryException e)
+ {
+ throw (IllegalArgumentException)new IllegalArgumentException("QPath not
normalized").initCause(e);
+ }
+ }
- protected abstract Context match(Context input) throws RepositoryException;
+ protected abstract Context match(Context input) throws RepositoryException;
- /**
- * Construct a new pattern which matches an exact path
- * @param path
- * @return A pattern which matches <code>path</code> and nothing else
- * @throws IllegalArgumentException if <code>path</code> is
<code>null</code>
- */
- public static Pattern path(QPath path) {
- if (path == null) {
- throw new IllegalArgumentException("path cannot be null");
- }
- return new PathPattern(path);
- }
+ /**
+ * Construct a new pattern which matches an exact path
+ * @param path
+ * @return A pattern which matches <code>path</code> and nothing else
+ * @throws IllegalArgumentException if <code>path</code> is
<code>null</code>
+ */
+ public static Pattern path(QPath path)
+ {
+ if (path == null)
+ {
+ throw new IllegalArgumentException("path cannot be null");
+ }
+ return new PathPattern(path);
+ }
- /**
- * Construct a new pattern which matches a path element of a given name
- * @param name
- * @return A pattern which matches a path element with name
<code>name</code>
- * @throws IllegalArgumentException if <code>name</code> is
<code>null</code>
- */
- public static Pattern name(QPathEntry name) {
- if (name == null) {
- throw new IllegalArgumentException("name cannot be null");
- }
- return new NamePattern(name);
- }
+ /**
+ * Construct a new pattern which matches a path element of a given name
+ * @param name
+ * @return A pattern which matches a path element with name
<code>name</code>
+ * @throws IllegalArgumentException if <code>name</code> is
<code>null</code>
+ */
+ public static Pattern name(QPathEntry name)
+ {
+ if (name == null)
+ {
+ throw new IllegalArgumentException("name cannot be null");
+ }
+ return new NamePattern(name);
+ }
- /**
- * Constructs a pattern which matches a path elements against regular expressions.
- * @param namespaceUri A regular expression used for matching the name space URI of
- * a path element.
- * @param localName A regular expression used for matching the local name of a path
- * element
- * @return A pattern which matches a path element if namespaceUri matches the
- * name space URI of the path element and localName matches the local name of the
- * path element.
- * @throws IllegalArgumentException if either <code>namespaceUri</code>
or
- * <code>localName</code> is <code>null</code>
- *
- * @see java.util.regex.Pattern
- */
- public static Pattern name(String namespaceUri, String localName) {
- if (namespaceUri == null || localName == null) {
- throw new IllegalArgumentException("neither namespaceUri nor localName
can be null");
- }
- return new RegexPattern(namespaceUri, localName);
- }
+ /**
+ * Constructs a pattern which matches a path elements against regular expressions.
+ * @param namespaceUri A regular expression used for matching the name space URI of
+ * a path element.
+ * @param localName A regular expression used for matching the local name of a path
+ * element
+ * @return A pattern which matches a path element if namespaceUri matches the
+ * name space URI of the path element and localName matches the local name of the
+ * path element.
+ * @throws IllegalArgumentException if either <code>namespaceUri</code>
or
+ * <code>localName</code> is <code>null</code>
+ *
+ * @see java.util.regex.Pattern
+ */
+ public static Pattern name(String namespaceUri, String localName)
+ {
+ if (namespaceUri == null || localName == null)
+ {
+ throw new IllegalArgumentException("neither namespaceUri nor localName can
be null");
+ }
+ return new RegexPattern(namespaceUri, localName);
+ }
- private static final Pattern ALL_PATTERN = new Pattern() {
- protected Context match(Context input) {
- return input.matchToEnd();
- }
+ private static final Pattern ALL_PATTERN = new Pattern()
+ {
+ protected Context match(Context input)
+ {
+ return input.matchToEnd();
+ }
- public String toString() {
- return "[ALL]";
- }
+ public String toString()
+ {
+ return "[ALL]";
+ }
- };
+ };
- /**
- * A pattern which matches all input.
- * @return
- */
- public static Pattern all() {
- return ALL_PATTERN;
- }
+ /**
+ * A pattern which matches all input.
+ * @return
+ */
+ public static Pattern all()
+ {
+ return ALL_PATTERN;
+ }
- private static final Pattern NOTHING_PATTERN = new Pattern() {
- protected Context match(Context input) {
- return input.match(0);
- }
+ private static final Pattern NOTHING_PATTERN = new Pattern()
+ {
+ protected Context match(Context input)
+ {
+ return input.match(0);
+ }
- public String toString() {
- return "[NOTHING]";
- }
- };
+ public String toString()
+ {
+ return "[NOTHING]";
+ }
+ };
- /**
- * A pattern which matches nothing.
- * @return
- */
- public static Pattern nothing() {
- return NOTHING_PATTERN;
- }
+ /**
+ * A pattern which matches nothing.
+ * @return
+ */
+ public static Pattern nothing()
+ {
+ return NOTHING_PATTERN;
+ }
- /**
- * A pattern which matches <code>pattern1</code> followed by
<code>pattern2</code> and
- * returns the longer of the two matches.
- * @param pattern1
- * @param pattern2
- * @return
- * @throws IllegalArgumentException if either argument is
<code>null</code>
- */
- public static Pattern selection(Pattern pattern1, Pattern pattern2) {
- if (pattern1 == null || pattern2 == null) {
- throw new IllegalArgumentException("Neither pattern can be null");
- }
- return new SelectPattern(pattern1, pattern2);
- }
+ /**
+ * A pattern which matches <code>pattern1</code> followed by
<code>pattern2</code> and
+ * returns the longer of the two matches.
+ * @param pattern1
+ * @param pattern2
+ * @return
+ * @throws IllegalArgumentException if either argument is
<code>null</code>
+ */
+ public static Pattern selection(Pattern pattern1, Pattern pattern2)
+ {
+ if (pattern1 == null || pattern2 == null)
+ {
+ throw new IllegalArgumentException("Neither pattern can be null");
+ }
+ return new SelectPattern(pattern1, pattern2);
+ }
- /**
- * A pattern which matches <code>pattern1</code> followed by
<code>pattern2</code>.
- * @param pattern1
- * @param pattern2
- * @return
- */
- public static Pattern sequence(Pattern pattern1, Pattern pattern2) {
- if (pattern1 == null || pattern2 == null) {
- throw new IllegalArgumentException("Neither pattern can be null");
- }
- return new SequencePattern(pattern1, pattern2);
- }
+ /**
+ * A pattern which matches <code>pattern1</code> followed by
<code>pattern2</code>.
+ * @param pattern1
+ * @param pattern2
+ * @return
+ */
+ public static Pattern sequence(Pattern pattern1, Pattern pattern2)
+ {
+ if (pattern1 == null || pattern2 == null)
+ {
+ throw new IllegalArgumentException("Neither pattern can be null");
+ }
+ return new SequencePattern(pattern1, pattern2);
+ }
- /**
- * A pattern which matches <code>pattern</code> as many times as
possible
- * @param pattern
- * @return
- */
- public static Pattern repeat(Pattern pattern) {
- if (pattern == null) {
- throw new IllegalArgumentException("Pattern can not be null");
- }
- return new RepeatPattern(pattern);
- }
+ /**
+ * A pattern which matches <code>pattern</code> as many times as possible
+ * @param pattern
+ * @return
+ */
+ public static Pattern repeat(Pattern pattern)
+ {
+ if (pattern == null)
+ {
+ throw new IllegalArgumentException("Pattern can not be null");
+ }
+ return new RepeatPattern(pattern);
+ }
- /**
- * A pattern which matches <code>pattern</code> as many times as
possible
- * but at least <code>min</code> times and at most
<code>max</code> times.
- * @param pattern
- * @param min
- * @param max
- * @return
- */
- public static Pattern repeat(Pattern pattern, int min, int max) {
- if (pattern == null) {
- throw new IllegalArgumentException("Pattern can not be null");
- }
- return new RepeatPattern(pattern, min, max);
- }
+ /**
+ * A pattern which matches <code>pattern</code> as many times as possible
+ * but at least <code>min</code> times and at most
<code>max</code> times.
+ * @param pattern
+ * @param min
+ * @param max
+ * @return
+ */
+ public static Pattern repeat(Pattern pattern, int min, int max)
+ {
+ if (pattern == null)
+ {
+ throw new IllegalArgumentException("Pattern can not be null");
+ }
+ return new RepeatPattern(pattern, min, max);
+ }
- // -----------------------------------------------------< Context >---
+ // -----------------------------------------------------< Context >---
- private static class Context {
- private final QPath path;
- private final int length;
- private final int pos;
- private final boolean isMatch;
+ private static class Context
+ {
+ private final QPath path;
- public Context(QPath path) {
- super();
- this.path = path;
- length = path.getDepth();
- isMatch = false;
- pos = 0;
- }
+ private final int length;
- public Context(Context context, int pos, boolean matched) {
- path = context.path;
- length = context.length;
- this.pos = pos;
- this.isMatch = matched;
- if (pos > length) {
- throw new IllegalArgumentException("Cannot match beyond end of
input");
- }
- }
+ private final int pos;
- public Context matchToEnd() {
- return new Context(this, length, true);
- }
+ private final boolean isMatch;
- public Context match(int count) {
- return new Context(this, pos + count, true);
- }
+ public Context(QPath path)
+ {
+ super();
+ this.path = path;
+ length = path.getEntries().length;
+ isMatch = false;
+ pos = 0;
+ }
- public Context noMatch() {
- return new Context(this, this.pos, false);
- }
+ public Context(Context context, int pos, boolean matched)
+ {
+ path = context.path;
+ length = context.length;
+ this.pos = pos;
+ this.isMatch = matched;
+ if (pos > length)
+ {
+ throw new IllegalArgumentException("Cannot match beyond end of
input");
+ }
+ }
- public boolean isMatch() {
- return isMatch;
- }
+ public Context matchToEnd()
+ {
+ return new Context(this, length, true);
+ }
- public QPath getRemainder() throws RepositoryException {
- if (pos >= length) {
- return null;
- }
- else {
- throw new RepositoryException("not implemented");
- // return path.subPath(pos, length);
- }
- }
+ public Context match(int count)
+ {
+ return new Context(this, pos + count, true);
+ }
- public boolean isExhausted() {
- return pos == length;
- }
+ public Context noMatch()
+ {
+ return new Context(this, this.pos, false);
+ }
- public MatchResult getMatchResult() {
- return new MatchResult(path, isMatch? pos : 0);
- }
+ public boolean isMatch()
+ {
+ return isMatch;
+ }
- public String toString() {
- return pos + " @ " + path;
- }
+ public QPath getRemainder() throws RepositoryException
+ {
+ if (pos >= length)
+ {
+ return null;
+ }
+ else
+ {
- }
+ //throw new RepositoryException("not implemented");
+ return subPath(path, pos, length);
+ }
+ }
- // -----------------------------------------------------< SelectPattern >---
+ /**
+ * @see Path#subPath(int, int)
+ */
+ public QPath subPath(QPath source, int from, int to) throws
IllegalArgumentException, RepositoryException
+ {
+ QPathEntry[] elements = source.getEntries();
+ if (from < 0 || to > elements.length || from >= to)
+ {
+ throw new IllegalArgumentException();
+ }
+ // if (!isNormalized())
+ // {
+ // throw new RepositoryException("Cannot extract sub-Path from a
non-normalized Path: " + this);
+ // }
+ QPathEntry[] dest = new QPathEntry[to - from];
+ System.arraycopy(elements, from, dest, 0, dest.length);
+ //Builder pb = new Builder(dest);
+ return new QPath(dest);
+ }
- private static class SelectPattern extends Pattern {
- private final Pattern pattern1;
- private final Pattern pattern2;
+ public boolean isExhausted()
+ {
+ return pos == length;
+ }
- public SelectPattern(Pattern pattern1, Pattern pattern2) {
- super();
- this.pattern1 = pattern1;
- this.pattern2 = pattern2;
- }
+ public MatchResult getMatchResult()
+ {
+ return new MatchResult(path, isMatch ? pos : 0);
+ }
- protected Context match(Context input) throws RepositoryException {
- Context remainder1 = pattern1.match(input);
- Context remainder2 = pattern2.match(input);
- return remainder1.pos > remainder2.pos ?
- remainder1 : remainder2;
- }
+ public String toString()
+ {
+ return pos + " @ " + path;
+ }
- public String toString() {
- return new StringBuffer()
- .append("(")
- .append(pattern1)
- .append("|")
- .append(pattern2)
- .append(")")
- .toString();
- }
- }
+ }
- // -----------------------------------------------------< SequencePattern >---
+ // -----------------------------------------------------< SelectPattern >---
- private static class SequencePattern extends Pattern {
- private final Pattern pattern1;
- private final Pattern pattern2;
+ private static class SelectPattern extends Pattern
+ {
+ private final Pattern pattern1;
- public SequencePattern(Pattern pattern1, Pattern pattern2) {
- super();
- this.pattern1 = pattern1;
- this.pattern2 = pattern2;
- }
+ private final Pattern pattern2;
- protected Context match(Context input) throws RepositoryException {
- Context context1 = pattern1.match(input);
- if (context1.isMatch()) {
- return pattern2.match(context1);
- }
- else {
- return input.noMatch();
- }
- }
+ public SelectPattern(Pattern pattern1, Pattern pattern2)
+ {
+ super();
+ this.pattern1 = pattern1;
+ this.pattern2 = pattern2;
+ }
- public String toString() {
- return new StringBuffer()
- .append("(")
- .append(pattern1)
- .append(", ")
- .append(pattern2)
- .append(")")
- .toString();
- }
- }
+ protected Context match(Context input) throws RepositoryException
+ {
+ Context remainder1 = pattern1.match(input);
+ Context remainder2 = pattern2.match(input);
+ return remainder1.pos > remainder2.pos ? remainder1 : remainder2;
+ }
- // -----------------------------------------------------< RepeatPattern >---
+ public String toString()
+ {
+ return new
StringBuffer().append("(").append(pattern1).append("|").append(pattern2).append(")").toString();
+ }
+ }
- private static class RepeatPattern extends Pattern {
- private final Pattern pattern;
- private final int min;
- private final int max;
- private boolean hasBounds;
+ // -----------------------------------------------------< SequencePattern >---
- public RepeatPattern(Pattern pattern) {
- this(pattern, 0, 0);
- this.hasBounds = false;
- }
+ private static class SequencePattern extends Pattern
+ {
+ private final Pattern pattern1;
- public RepeatPattern(Pattern pattern, int min, int max) {
- super();
- this.pattern = pattern;
- this.min = min;
- this.max = max;
- this.hasBounds = true;
- }
+ private final Pattern pattern2;
- protected Context match(Context input) throws RepositoryException {
- Context nextInput;
- Context output = input.match(0);
- int matchCount = -1;
- do {
- nextInput = output;
- output = pattern.match(nextInput);
- matchCount++;
- } while (output.isMatch() && (output.pos > nextInput.pos));
+ public SequencePattern(Pattern pattern1, Pattern pattern2)
+ {
+ super();
+ this.pattern1 = pattern1;
+ this.pattern2 = pattern2;
+ }
- if (!hasBounds() || (min <= matchCount && matchCount <= max))
{
- return nextInput;
- }
- else {
- return input.noMatch();
- }
- }
+ protected Context match(Context input) throws RepositoryException
+ {
+ Context context1 = pattern1.match(input);
+ if (context1.isMatch())
+ {
+ return pattern2.match(context1);
+ }
+ else
+ {
+ return input.noMatch();
+ }
+ }
- private boolean hasBounds() {
- return hasBounds;
- }
+ public String toString()
+ {
+ return new StringBuffer().append("(").append(pattern1).append(",
").append(pattern2).append(")").toString();
+ }
+ }
- public String toString() {
- return new StringBuffer()
- .append("(")
- .append(pattern)
- .append(")*")
- .toString();
- }
+ // -----------------------------------------------------< RepeatPattern >---
- }
+ private static class RepeatPattern extends Pattern
+ {
+ private final Pattern pattern;
- // -----------------------------------------------------< PathPattern >---
+ private final int min;
- private static class PathPattern extends Pattern {
- private final QPath path;
- private final QPathEntry[] patternElements;
+ private final int max;
- public PathPattern(QPath path) {
- super();
- this.path = path;
- patternElements = path.getEntries();
- }
+ private boolean hasBounds;
- protected Context match(Context input) throws RepositoryException {
- if (input.isExhausted()) {
- return input;
- }
+ public RepeatPattern(Pattern pattern)
+ {
+ this(pattern, 0, 0);
+ this.hasBounds = false;
+ }
- QPath inputPath = input.getRemainder();
-// if (!inputPath.isNormalized()) {
-// throw new IllegalArgumentException("Not normalized");
-// }
+ public RepeatPattern(Pattern pattern, int min, int max)
+ {
+ super();
+ this.pattern = pattern;
+ this.min = min;
+ this.max = max;
+ this.hasBounds = true;
+ }
- QPathEntry[] inputElements = inputPath.getEntries();
- int inputLength = inputElements.length;
- int patternLength = patternElements.length;
- if (patternLength > inputLength) {
- return input.noMatch();
- }
+ protected Context match(Context input) throws RepositoryException
+ {
+ Context nextInput;
+ Context output = input.match(0);
+ int matchCount = -1;
+ do
+ {
+ nextInput = output;
+ output = pattern.match(nextInput);
+ matchCount++;
+ }
+ while (output.isMatch() && (output.pos > nextInput.pos));
- for (int k = 0; k < patternLength; k++) {
- if (!patternElements[k].equals(inputElements[k])) {
- return input.noMatch();
- }
- }
+ if (!hasBounds() || (min <= matchCount && matchCount <= max))
+ {
+ return nextInput;
+ }
+ else
+ {
+ return input.noMatch();
+ }
+ }
- return input.match(patternLength);
- }
+ private boolean hasBounds()
+ {
+ return hasBounds;
+ }
- public String toString() {
- return new StringBuffer()
- .append("\"")
- .append(path)
- .append("\"")
- .toString();
- }
- }
+ public String toString()
+ {
+ return new
StringBuffer().append("(").append(pattern).append(")*").toString();
+ }
- // -----------------------------------------------------< AbstractNamePattern
>---
+ }
- private static abstract class AbstractNamePattern extends Pattern {
- protected abstract boolean matches(QPathEntry element);
+ // -----------------------------------------------------< PathPattern >---
- protected Context match(Context input) throws RepositoryException {
- if (input.isExhausted()) {
- return input.noMatch();
- }
+ private static class PathPattern extends Pattern
+ {
+ private final QPath path;
- QPath inputPath = input.getRemainder();
-// if (!inputPath.isNormalized()) {
-// throw new IllegalArgumentException("Not normalized");
-// }
+ private final QPathEntry[] patternElements;
- QPathEntry[] inputElements = inputPath.getEntries();
- if (inputElements.length < 1 || !matches(inputElements[0])) {
- return input.noMatch();
+ public PathPattern(QPath path)
+ {
+ super();
+ this.path = path;
+ patternElements = path.getEntries();
+ }
+
+ protected Context match(Context input) throws RepositoryException
+ {
+ if (input.isExhausted())
+ {
+ return input;
+ }
+
+ QPath inputPath = input.getRemainder();
+ // if (!inputPath.isNormalized()) {
+ // throw new IllegalArgumentException("Not
normalized");
+ // }
+
+ QPathEntry[] inputElements = inputPath.getEntries();
+ int inputLength = inputElements.length;
+ int patternLength = patternElements.length;
+ if (patternLength > inputLength)
+ {
+ return input.noMatch();
+ }
+
+ for (int k = 0; k < patternLength; k++)
+ {
+ if (!patternElements[k].equals(inputElements[k]))
+ {
+ return input.noMatch();
}
+ }
- return input.match(1);
- }
+ return input.match(patternLength);
+ }
- }
+ public String toString()
+ {
+ return new
StringBuffer().append("\"").append(path).append("\"").toString();
+ }
+ }
- // -----------------------------------------------------< NameNamePattern >---
+ // -----------------------------------------------------< AbstractNamePattern
>---
- private static class NamePattern extends AbstractNamePattern {
- private final InternalQName name;
+ private static abstract class AbstractNamePattern extends Pattern
+ {
+ protected abstract boolean matches(QPathEntry element);
- public NamePattern(InternalQName name) {
- super();
- this.name = name;
- }
+ protected Context match(Context input) throws RepositoryException
+ {
+ if (input.isExhausted())
+ {
+ return input.noMatch();
+ }
- protected boolean matches(QPathEntry element) {
- return name.equals(element);
- }
+ QPath inputPath = input.getRemainder();
+ // if (!inputPath.isNormalized()) {
+ // throw new IllegalArgumentException("Not
normalized");
+ // }
- public String toString() {
- return new StringBuffer()
- .append("\"")
- .append(name)
- .append("\"")
- .toString();
- }
- }
+ QPathEntry[] inputElements = inputPath.getEntries();
+ if (inputElements.length < 1 || !matches(inputElements[0]))
+ {
+ return input.noMatch();
+ }
- // -----------------------------------------------------< StringNamePattern
>---
+ return input.match(1);
+ }
- private static class RegexPattern extends AbstractNamePattern {
- private final java.util.regex.Pattern namespaceUri;
- private final java.util.regex.Pattern localName;
- private final String localNameStr;
- private final String namespaceUriStr;
+ }
- public RegexPattern(String namespaceUri, String localName) {
- super();
+ // -----------------------------------------------------< NameNamePattern >---
- this.namespaceUri = java.util.regex.Pattern.compile(namespaceUri);
- this.localName = java.util.regex.Pattern.compile(localName);
- this.namespaceUriStr = namespaceUri;
- this.localNameStr = localName;
- }
+ private static class NamePattern extends AbstractNamePattern
+ {
+ private final InternalQName name;
- protected boolean matches(QPathEntry element) {
- InternalQName name = element;
- boolean nsMatches = namespaceUri.matcher(name.getNamespace()).matches();
- boolean localMatches = localName.matcher(name.getName()).matches();
- return nsMatches && localMatches;
- }
+ public NamePattern(InternalQName name)
+ {
+ super();
+ this.name = name;
+ }
- public String toString() {
- return new StringBuffer()
- .append("\"{")
- .append(namespaceUriStr)
- .append("}")
- .append(localNameStr)
- .append("\"")
+ protected boolean matches(QPathEntry element)
+ {
+ return name.equals(element);
+ }
+
+ public String toString()
+ {
+ return new
StringBuffer().append("\"").append(name).append("\"").toString();
+ }
+ }
+
+ // -----------------------------------------------------< StringNamePattern
>---
+
+ private static class RegexPattern extends AbstractNamePattern
+ {
+ private final java.util.regex.Pattern namespaceUri;
+
+ private final java.util.regex.Pattern localName;
+
+ private final String localNameStr;
+
+ private final String namespaceUriStr;
+
+ public RegexPattern(String namespaceUri, String localName)
+ {
+ super();
+
+ this.namespaceUri = java.util.regex.Pattern.compile(namespaceUri);
+ this.localName = java.util.regex.Pattern.compile(localName);
+ this.namespaceUriStr = namespaceUri;
+ this.localNameStr = localName;
+ }
+
+ protected boolean matches(QPathEntry element)
+ {
+ InternalQName name = element;
+ boolean nsMatches = namespaceUri.matcher(name.getNamespace()).matches();
+ boolean localMatches = localName.matcher(name.getName()).matches();
+ return nsMatches && localMatches;
+ }
+
+ public String toString()
+ {
+ return new
StringBuffer().append("\"{").append(namespaceUriStr).append("}").append(localNameStr).append("\"")
.toString();
- }
- }
+ }
+ }
}
-
Modified: jcr/trunk/component/core/src/main/resources/binding.xml
===================================================================
--- jcr/trunk/component/core/src/main/resources/binding.xml 2009-10-08 09:29:58 UTC (rev
262)
+++ jcr/trunk/component/core/src/main/resources/binding.xml 2009-10-09 14:02:16 UTC (rev
263)
@@ -49,7 +49,6 @@
</structure>
<structure name="query-handler" field="queryHandler"
-
factory="org.exoplatform.services.jcr.config.QueryHandlerEntryWrapper.queryHandlerEntryFactory"
usage="optional">
<value name="class" field="type" style="attribute"
/>
<collection name="properties" field="parameters"
usage="optional"
Modified:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/IndexingRuleTest.java
===================================================================
---
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/IndexingRuleTest.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/IndexingRuleTest.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -16,110 +16,111 @@
*/
package org.exoplatform.services.jcr.api.core.query;
-import javax.jcr.RepositoryException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
import javax.jcr.Node;
import javax.jcr.NodeIterator;
+import javax.jcr.RepositoryException;
import javax.jcr.Value;
import javax.jcr.query.RowIterator;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Arrays;
/**
* <code>IndexingRuleTest</code> performs indexing rule tests.
*/
-public class IndexingRuleTest extends AbstractIndexingTest {
+public class IndexingRuleTest extends AbstractIndexingTest
+{
- private static final String NT_UNSTRUCTURED = "nt:unstructured";
+ private static final String NT_UNSTRUCTURED = "nt:unstructured";
- private static final String TEXT = "the quick brown fox jumps over the lazy
dog";
+ private static final String TEXT = "the quick brown fox jumps over the lazy
dog";
- public void testRegexp() throws RepositoryException {
- Node node1 = testRootNode.addNode(nodeName1, NT_UNSTRUCTURED);
- node1.setProperty("rule", "regexp");
- node1.setProperty("Text", "foo");
- Node node2 = testRootNode.addNode(nodeName2, NT_UNSTRUCTURED);
- node2.setProperty("rule", "regexp");
- node2.setProperty("OtherText", "foo");
- Node node3 = testRootNode.addNode(nodeName3, NT_UNSTRUCTURED);
- node3.setProperty("rule", "regexp");
- node3.setProperty("Textle", "foo");
- testRootNode.save();
- String stmt = "/jcr:root" + testRootNode.getPath() +
- "/*[jcr:contains(., 'foo')]";
- checkResult(executeQuery(stmt), new Node[]{node1, node2});
- }
+ public void testRegexp() throws RepositoryException
+ {
+ Node node1 = testRootNode.addNode(nodeName1, NT_UNSTRUCTURED);
+ node1.setProperty("rule", "regexp");
+ node1.setProperty("Text", "foo");
+ Node node2 = testRootNode.addNode(nodeName2, NT_UNSTRUCTURED);
+ node2.setProperty("rule", "regexp");
+ node2.setProperty("OtherText", "foo");
+ Node node3 = testRootNode.addNode(nodeName3, NT_UNSTRUCTURED);
+ node3.setProperty("rule", "regexp");
+ node3.setProperty("Textle", "foo");
+ testRootNode.save();
+ String stmt = "/jcr:root" + testRootNode.getPath() +
"/*[jcr:contains(., 'foo')]";
+ checkResult(executeQuery(stmt), new Node[]{node1, node2});
+ }
- public void testBoost() throws RepositoryException {
- Node node1 = testRootNode.addNode(nodeName1, NT_UNSTRUCTURED);
- node1.setProperty("rule", "boost1");
- node1.setProperty("text", TEXT);
- Node node2 = testRootNode.addNode(nodeName2, NT_UNSTRUCTURED);
- node2.setProperty("rule", "boost2");
- node2.setProperty("text", TEXT);
- Node node3 = testRootNode.addNode(nodeName3, NT_UNSTRUCTURED);
- node3.setProperty("rule", "boost3");
- node3.setProperty("text", TEXT);
- testRootNode.save();
- String stmt = "/jcr:root" + testRootNode.getPath() +
- "/*[jcr:contains(@text, 'quick')] order by @jcr:score
descending";
- List names = new ArrayList();
- for (NodeIterator it = executeQuery(stmt).getNodes(); it.hasNext(); ) {
- names.add(it.nextNode().getName());
- }
- assertEquals("Wrong sequence or number of results.",
- Arrays.asList(new String[]{nodeName3, nodeName2, nodeName1}),
- names);
- }
+ public void testBoost() throws RepositoryException
+ {
+ Node node1 = testRootNode.addNode(nodeName1, NT_UNSTRUCTURED);
+ node1.setProperty("rule", "boost1");
+ node1.setProperty("text", TEXT);
+ Node node2 = testRootNode.addNode(nodeName2, NT_UNSTRUCTURED);
+ node2.setProperty("rule", "boost2");
+ node2.setProperty("text", TEXT);
+ Node node3 = testRootNode.addNode(nodeName3, NT_UNSTRUCTURED);
+ node3.setProperty("rule", "boost3");
+ node3.setProperty("text", TEXT);
+ testRootNode.save();
+ String stmt =
+ "/jcr:root" + testRootNode.getPath() + "/*[jcr:contains(@text,
'quick')] order by @jcr:score descending";
+ List<String> names = new ArrayList<String>();
+ for (NodeIterator it = executeQuery(stmt).getNodes(); it.hasNext();)
+ {
+ names.add(it.nextNode().getName());
+ }
+ assertEquals("Wrong sequence or number of results.",
+ Arrays.asList(new String[]{nodeName3, nodeName2, nodeName1}), names);
+ }
- public void testNodeScopeIndex() throws RepositoryException {
- Node node1 = testRootNode.addNode(nodeName1, NT_UNSTRUCTURED);
- node1.setProperty("rule", "nsiTrue");
- node1.setProperty("text", TEXT);
- Node node2 = testRootNode.addNode(nodeName2, NT_UNSTRUCTURED);
- node2.setProperty("rule", "nsiFalse");
- node2.setProperty("text", TEXT);
- testRootNode.save();
- String stmt = "/jcr:root" + testRootNode.getPath() +
- "/*[jcr:contains(., 'quick')]";
- checkResult(executeQuery(stmt), new Node[]{node1});
- }
+ public void testNodeScopeIndex() throws RepositoryException
+ {
+ Node node1 = testRootNode.addNode(nodeName1, NT_UNSTRUCTURED);
+ node1.setProperty("rule", "nsiTrue");
+ node1.setProperty("text", TEXT);
+ Node node2 = testRootNode.addNode(nodeName2, NT_UNSTRUCTURED);
+ node2.setProperty("rule", "nsiFalse");
+ node2.setProperty("text", TEXT);
+ testRootNode.save();
+ String stmt = "/jcr:root" + testRootNode.getPath() +
"/*[jcr:contains(., 'quick')]";
+ checkResult(executeQuery(stmt), new Node[]{node1});
+ }
- public void testNodeType() throws RepositoryException {
- // assumes there is an index-rule for nt:hierarchyNode that
- // does not include the property jcr:created
- Node node1 = testRootNode.addNode(nodeName1, "nt:folder");
- testRootNode.save();
- String stmt = "/jcr:root" + testRootNode.getPath() +
- "/*[@" + jcrCreated + " = xs:dateTime('" +
- node1.getProperty(jcrCreated).getString() + "')]";
- checkResult(executeQuery(stmt), new Node[]{});
- }
+ public void testNodeType() throws RepositoryException
+ {
+ // assumes there is an index-rule for nt:hierarchyNode that
+ // does not include the property jcr:created
+ Node node1 = testRootNode.addNode(nodeName1, "nt:folder");
+ testRootNode.save();
+ String stmt =
+ "/jcr:root" + testRootNode.getPath() + "/*[@" + jcrCreated +
" = xs:dateTime('"
+ + node1.getProperty(jcrCreated).getString() + "')]";
+ checkResult(executeQuery(stmt), new Node[]{});
+ }
- public void testUseInExcerpt() throws RepositoryException {
- Node node = testRootNode.addNode(nodeName1, NT_UNSTRUCTURED);
- node.setProperty("rule", "excerpt");
- node.setProperty("title", "Apache Jackrabbit");
- node.setProperty("text", "Jackrabbit is a JCR
implementation");
- testRootNode.save();
- String stmt = "/jcr:root" + testRootNode.getPath() +
- "/*[jcr:contains(., 'jackrabbit
implementation')]/rep:excerpt(.)";
- RowIterator rows = executeQuery(stmt).getRows();
- assertTrue("No results returned", rows.hasNext());
- Value excerpt = rows.nextRow().getValue("rep:excerpt(.)");
- assertNotNull("No excerpt created", excerpt);
- assertTrue("Title must not be present in excerpt",
- excerpt.getString().indexOf("Apache") == -1);
- assertTrue("Missing highlight",
-
excerpt.getString().indexOf("<strong>implementation</strong>") !=
-1);
+ public void testUseInExcerpt() throws RepositoryException
+ {
+ Node node = testRootNode.addNode(nodeName1, NT_UNSTRUCTURED);
+ node.setProperty("rule", "excerpt");
+ node.setProperty("title", "Apache Jackrabbit");
+ node.setProperty("text", "Jackrabbit is a JCR
implementation");
+ testRootNode.save();
+ String stmt =
+ "/jcr:root" + testRootNode.getPath() + "/*[jcr:contains(.,
'jackrabbit implementation')]/rep:excerpt(.)";
+ RowIterator rows = executeQuery(stmt).getRows();
+ assertTrue("No results returned", rows.hasNext());
+ Value excerpt = rows.nextRow().getValue("rep:excerpt(.)");
+ assertNotNull("No excerpt created", excerpt);
+ assertTrue("Title must not be present in excerpt",
excerpt.getString().indexOf("Apache") == -1);
+ assertTrue("Missing highlight",
excerpt.getString().indexOf("<strong>implementation</strong>") !=
-1);
- stmt = "/jcr:root" + testRootNode.getPath() +
- "/*[jcr:contains(., 'apache')]/rep:excerpt(.)";
- rows = executeQuery(stmt).getRows();
- assertTrue("No results returned", rows.hasNext());
- excerpt = rows.nextRow().getValue("rep:excerpt(.)");
- assertNotNull("No excerpt created", excerpt);
- assertTrue("Title must not be present in excerpt",
- excerpt.getString().indexOf("Apache") == -1);
- }
+ stmt = "/jcr:root" + testRootNode.getPath() + "/*[jcr:contains(.,
'apache')]/rep:excerpt(.)";
+ rows = executeQuery(stmt).getRows();
+ assertTrue("No results returned", rows.hasNext());
+ excerpt = rows.nextRow().getValue("rep:excerpt(.)");
+ assertNotNull("No excerpt created", excerpt);
+ assertTrue("Title must not be present in excerpt",
excerpt.getString().indexOf("Apache") == -1);
+ }
}
Modified:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/SynonymProviderTest.java
===================================================================
---
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/SynonymProviderTest.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/SynonymProviderTest.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -16,8 +16,8 @@
*/
package org.exoplatform.services.jcr.api.core.query;
+import javax.jcr.Node;
import javax.jcr.RepositoryException;
-import javax.jcr.Node;
/**
* <code>SynonymProviderTest</code> contains test cases for the
@@ -29,35 +29,40 @@
* <li>ASF <-> Apache Software Foundation</li>
* </ul>
*/
-public class SynonymProviderTest extends AbstractQueryTest {
+public class SynonymProviderTest extends AbstractQueryTest
+{
- public void testSynonyms() throws RepositoryException {
- Node n = testRootNode.addNode(nodeName1);
- n.setProperty(propertyName1, "The quick brown fox jumps over the lazy
dog.");
- testRootNode.save();
- executeXPathQuery(testPath + "//*[jcr:contains(., '~fast')]",
new Node[]{n});
- executeXPathQuery(testPath + "//*[jcr:contains(., '~Fast')]",
new Node[]{n});
- executeXPathQuery(testPath + "//*[jcr:contains(., '~quick')]",
new Node[]{n});
- executeXPathQuery(testPath + "//*[jcr:contains(.,
'~sluggish')]", new Node[]{n});
- executeXPathQuery(testPath + "//*[jcr:contains(.,
'~sluGGish')]", new Node[]{n});
- executeXPathQuery(testPath + "//*[jcr:contains(., '~lazy')]",
new Node[]{n});
- // check term which is not in the synonym provider
- executeXPathQuery(testPath + "//*[jcr:contains(., '~brown')]",
new Node[]{n});
- }
+ public void testSynonyms() throws RepositoryException
+ {
+ Node n = testRootNode.addNode(nodeName1);
+ n.setProperty(propertyName1, "The quick brown fox jumps over the lazy
dog.");
+ testRootNode.save();
+ executeXPathQuery(testPath + "//*[jcr:contains(., '~fast')]", new
Node[]{n});
+ executeXPathQuery(testPath + "//*[jcr:contains(., '~Fast')]", new
Node[]{n});
+ executeXPathQuery(testPath + "//*[jcr:contains(., '~quick')]",
new Node[]{n});
+ executeXPathQuery(testPath + "//*[jcr:contains(., '~sluggish')]",
new Node[]{n});
+ executeXPathQuery(testPath + "//*[jcr:contains(., '~sluGGish')]",
new Node[]{n});
+ executeXPathQuery(testPath + "//*[jcr:contains(., '~lazy')]", new
Node[]{n});
+ // check term which is not in the synonym provider
+ executeXPathQuery(testPath + "//*[jcr:contains(., '~brown')]",
new Node[]{n});
+ }
- public void testPhrase() throws RepositoryException {
- Node n = testRootNode.addNode(nodeName1);
- n.setProperty(propertyName1, "Licensed to the Apache Software Foundation
...");
- testRootNode.save();
- executeXPathQuery(testPath + "//*[jcr:contains(., '~ASF')]",
new Node[]{n});
- executeXPathQuery(testPath + "//*[jcr:contains(., '~asf')]",
new Node[]{n});
- executeXPathQuery(testPath + "//*[jcr:contains(., 'asf')]", new
Node[]{});
- }
+ public void testPhrase() throws RepositoryException
+ {
+ Node n = testRootNode.addNode(nodeName1);
+ n.setProperty(propertyName1, "Licensed to the Apache Software Foundation
...");
+ testRootNode.save();
+ executeXPathQuery(testPath + "//*[jcr:contains(., '~ASF')]", new
Node[]{n});
+ executeXPathQuery(testPath + "//*[jcr:contains(., '~asf')]", new
Node[]{n});
+ executeXPathQuery(testPath + "//*[jcr:contains(., 'asf')]", new
Node[]{});
+ }
- public void disabled_testReload() throws RepositoryException, InterruptedException {
- for (int i = 0; i < 60; i++) {
- Thread.sleep(1 * 1000);
- executeXPathQuery(testPath + "//*[jcr:contains(.,
'~asf')]", new Node[]{});
- }
- }
+ public void disabled_testReload() throws RepositoryException, InterruptedException
+ {
+ for (int i = 0; i < 60; i++)
+ {
+ Thread.sleep(1 * 1000);
+ executeXPathQuery(testPath + "//*[jcr:contains(., '~asf')]",
new Node[]{});
+ }
+ }
}
Deleted:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/TestAll.java
===================================================================
---
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/TestAll.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/TestAll.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- *
http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.exoplatform.services.jcr.api.core.query;
-
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-
-import org.exoplatform.services.jcr.api.core.query.lucene.hits.ArrayHitsTest;
-
-/**
- * Test suite that includes all testcases for the Search module.
- */
-public class TestAll extends TestCase {
-
- /**
- * Returns a <code>Test</code> suite that executes all tests inside this
- * package.
- *
- * @return a <code>Test</code> suite that executes all tests inside this
- * package.
- */
- public static Test suite() {
- TestSuite suite = new TestSuite("Search tests");
-
- suite.addTestSuite(SimpleQueryTest.class);
- suite.addTestSuite(FulltextQueryTest.class);
- suite.addTestSuite(SelectClauseTest.class);
- suite.addTestSuite(SQLTest.class);
- suite.addTestSuite(OrderByTest.class);
- suite.addTestSuite(XPathAxisTest.class);
- suite.addTestSuite(SkipDeletedNodesTest.class);
- suite.addTestSuite(SkipDeniedNodesTest.class);
- suite.addTestSuite(MixinTest.class);
- suite.addTestSuite(DerefTest.class);
- suite.addTestSuite(VersionStoreQueryTest.class);
- suite.addTestSuite(UpperLowerCaseQueryTest.class);
- suite.addTestSuite(ChildAxisQueryTest.class);
- suite.addTestSuite(QueryResultTest.class);
- suite.addTestSuite(FnNameQueryTest.class);
- suite.addTestSuite(PathQueryNodeTest.class);
- // suite.addTestSuite(SynonymProviderTest.class);
- suite.addTestSuite(ArrayHitsTest.class);
- // suite.addTestSuite(ExcerptTest.class);
- suite.addTestSuite(IndexFormatVersionTest.class);
- // suite.addTestSuite(IndexingRuleTest.class);
- suite.addTestSuite(ShareableNodeTest.class);
-
- return suite;
- }
-}
Copied:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/TestApiQueryAll.java
(from rev 262,
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/TestAll.java)
===================================================================
---
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/TestApiQueryAll.java
(rev 0)
+++
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/TestApiQueryAll.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.exoplatform.services.jcr.api.core.query;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import org.exoplatform.services.jcr.api.core.query.lucene.hits.ArrayHitsTest;
+
+/**
+ * Test suite that includes all testcases for the Search module.
+ */
+public class TestApiQueryAll extends TestCase
+{
+
+ /**
+ * Returns a <code>Test</code> suite that executes all tests inside this
+ * package.
+ *
+ * @return a <code>Test</code> suite that executes all tests inside this
+ * package.
+ */
+ public static Test suite()
+ {
+ TestSuite suite = new TestSuite("Search tests");
+
+ suite.addTestSuite(SimpleQueryTest.class);
+ suite.addTestSuite(FulltextQueryTest.class);
+ suite.addTestSuite(SelectClauseTest.class);
+ suite.addTestSuite(SQLTest.class);
+ suite.addTestSuite(OrderByTest.class);
+ suite.addTestSuite(XPathAxisTest.class);
+ suite.addTestSuite(SkipDeletedNodesTest.class);
+ suite.addTestSuite(SkipDeniedNodesTest.class);
+ suite.addTestSuite(MixinTest.class);
+ suite.addTestSuite(DerefTest.class);
+ suite.addTestSuite(VersionStoreQueryTest.class);
+ suite.addTestSuite(UpperLowerCaseQueryTest.class);
+ suite.addTestSuite(ChildAxisQueryTest.class);
+ suite.addTestSuite(QueryResultTest.class);
+ suite.addTestSuite(FnNameQueryTest.class);
+ suite.addTestSuite(PathQueryNodeTest.class);
+ suite.addTestSuite(SynonymProviderTest.class);
+ suite.addTestSuite(ArrayHitsTest.class);
+ suite.addTestSuite(ExcerptTest.class);
+ suite.addTestSuite(IndexFormatVersionTest.class);
+ suite.addTestSuite(IndexingRuleTest.class);
+ suite.addTestSuite(ShareableNodeTest.class);
+
+ return suite;
+ }
+}
Modified:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/IndexingAggregateTest.java
===================================================================
---
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/IndexingAggregateTest.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/IndexingAggregateTest.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -34,151 +34,158 @@
import java.util.Collections;
import java.util.Iterator;
-
-
/**
* <code>IndexingAggregateTest</code> checks if the nt:file nt:resource
* aggregate defined in workspace indexing-test works properly.
*/
-public class IndexingAggregateTest extends AbstractIndexingTest {
+public class IndexingAggregateTest extends AbstractIndexingTest
+{
- public void testNtFileAggregate() throws Exception {
- String sqlBase = "SELECT * FROM nt:file"
- + " WHERE jcr:path LIKE '" + testRoot + "/%"
- + "' AND CONTAINS";
- String sqlCat = sqlBase + "(., 'cat')";
- String sqlDog = sqlBase + "(., 'dog')";
+ public void testNtFileAggregate() throws Exception
+ {
+ String sqlBase = "SELECT * FROM nt:file" + " WHERE jcr:path LIKE
'" + testRoot + "/%" + "' AND CONTAINS";
+ String sqlCat = sqlBase + "(., 'cat')";
+ String sqlDog = sqlBase + "(., 'dog')";
- ByteArrayOutputStream out = new ByteArrayOutputStream();
- Writer writer = new OutputStreamWriter(out, "UTF-8");
- writer.write("the quick brown fox jumps over the lazy dog.");
- writer.flush();
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ Writer writer = new OutputStreamWriter(out, "UTF-8");
+ writer.write("the quick brown fox jumps over the lazy dog.");
+ writer.flush();
- Node file = testRootNode.addNode("myFile", "nt:file");
- Node resource = file.addNode("jcr:content", "nt:resource");
- resource.setProperty("jcr:lastModified", Calendar.getInstance());
- resource.setProperty("jcr:encoding", "UTF-8");
- resource.setProperty("jcr:mimeType", "text/plain");
- resource.setProperty("jcr:data", new
ByteArrayInputStream(out.toByteArray()));
+ Node file = testRootNode.addNode("myFile", "nt:file");
+ Node resource = file.addNode("jcr:content", "nt:resource");
+ resource.setProperty("jcr:lastModified", Calendar.getInstance());
+ resource.setProperty("jcr:encoding", "UTF-8");
+ resource.setProperty("jcr:mimeType", "text/plain");
+ resource.setProperty("jcr:data", new
ByteArrayInputStream(out.toByteArray()));
- testRootNode.save();
- waitUntilQueueEmpty();
+ testRootNode.save();
+ waitUntilQueueEmpty();
- executeSQLQuery(sqlDog, new Node[]{file});
+ executeSQLQuery(sqlDog, new Node[]{file});
- // update jcr:data
- out.reset();
- writer.write("the quick brown fox jumps over the lazy cat.");
- writer.flush();
- resource.setProperty("jcr:data", new
ByteArrayInputStream(out.toByteArray()));
- testRootNode.save();
- waitUntilQueueEmpty();
+ // update jcr:data
+ out.reset();
+ writer.write("the quick brown fox jumps over the lazy cat.");
+ writer.flush();
+ resource.setProperty("jcr:data", new
ByteArrayInputStream(out.toByteArray()));
+ testRootNode.save();
+ waitUntilQueueEmpty();
- executeSQLQuery(sqlCat, new Node[]{file});
+ executeSQLQuery(sqlCat, new Node[]{file});
- // replace jcr:content with unstructured
- resource.remove();
- Node unstrContent = file.addNode("jcr:content",
"nt:unstructured");
- Node foo = unstrContent.addNode("foo");
- foo.setProperty("text", "the quick brown fox jumps over the lazy
dog.");
- testRootNode.save();
+ // // replace jcr:content with unstructured
+ // resource.remove();
+ // Node unstrContent = file.addNode("jcr:content",
"nt:unstructured");
+ // Node foo = unstrContent.addNode("foo");
+ // foo.setProperty("text", "the quick brown fox jumps over
the lazy dog.");
+ // testRootNode.save();
+ //
+ // executeSQLQuery(sqlDog, new Node[]{file});
+ //
+ // // remove foo
+ // foo.remove();
+ // testRootNode.save();
+ //
+ // executeSQLQuery(sqlDog, new Node[]{});
+ //
+ // // replace jcr:content again with resource
+ // unstrContent.remove();
+ // resource = file.addNode("jcr:content",
"nt:resource");
+ // resource.setProperty("jcr:lastModified",
Calendar.getInstance());
+ // resource.setProperty("jcr:encoding", "UTF-8");
+ // resource.setProperty("jcr:mimeType", "text/plain");
+ // resource.setProperty("jcr:data", new
ByteArrayInputStream(out.toByteArray()));
+ // testRootNode.save();
+ // waitUntilQueueEmpty();
+ //
+ // executeSQLQuery(sqlCat, new Node[]{file});
+ }
- executeSQLQuery(sqlDog, new Node[]{file});
+ protected void waitUntilQueueEmpty() throws Exception
+ {
+ SearchIndex index = (SearchIndex)getQueryHandler();
+ IndexingQueue queue = index.getIndex().getIndexingQueue();
+ index.getIndex().flush();
+ synchronized (index.getIndex())
+ {
+ while (queue.getNumPendingDocuments() > 0)
+ {
+ index.getIndex().wait(50);
+ }
+ }
+ }
- // remove foo
- foo.remove();
- testRootNode.save();
+ public void testContentLastModified() throws RepositoryException
+ {
+ List expected = new ArrayList();
+ long time = System.currentTimeMillis();
+ for (int i = 0; i < 10; i++)
+ {
+ expected.add(addFile(testRootNode, "file" + i, time));
+ time += 1000;
+ }
+ testRootNode.save();
- executeSQLQuery(sqlDog, new Node[]{});
+ String stmt = testPath + "/* order by jcr:content/@jcr:lastModified";
+ Query q = qm.createQuery(stmt, Query.XPATH);
+ checkResultSequence(q.execute().getRows(), (Node[])expected.toArray(new
Node[expected.size()]));
- // replace jcr:content again with resource
- unstrContent.remove();
- resource = file.addNode("jcr:content", "nt:resource");
- resource.setProperty("jcr:lastModified", Calendar.getInstance());
- resource.setProperty("jcr:encoding", "UTF-8");
- resource.setProperty("jcr:mimeType", "text/plain");
- resource.setProperty("jcr:data", new
ByteArrayInputStream(out.toByteArray()));
- testRootNode.save();
- waitUntilQueueEmpty();
+ // descending
+ stmt = testPath + "/* order by jcr:content/@jcr:lastModified
descending";
+ q = qm.createQuery(stmt, Query.XPATH);
+ Collections.reverse(expected);
+ checkResultSequence(q.execute().getRows(), (Node[])expected.toArray(new
Node[expected.size()]));
- executeSQLQuery(sqlCat, new Node[]{file});
- }
+ // reverse order in content
+ for (Iterator it = expected.iterator(); it.hasNext();)
+ {
+ Node file = (Node)it.next();
+ Calendar cal = Calendar.getInstance();
+ cal.setTimeInMillis(time);
+ file.getNode("jcr:content").setProperty("jcr:lastModified",
cal);
+ time -= 1000;
+ }
+ testRootNode.save();
- protected void waitUntilQueueEmpty() throws Exception {
- SearchIndex index = (SearchIndex) getQueryHandler();
- IndexingQueue queue = index.getIndex().getIndexingQueue();
- index.getIndex().flush();
- synchronized (index.getIndex()) {
- while (queue.getNumPendingDocuments() > 0) {
- index.getIndex().wait(50);
- }
- }
- }
+ stmt = testPath + "/* order by jcr:content/@jcr:lastModified
descending";
+ q = qm.createQuery(stmt, Query.XPATH);
+ checkResultSequence(q.execute().getRows(), (Node[])expected.toArray(new
Node[expected.size()]));
+ }
- public void testContentLastModified() throws RepositoryException {
- List expected = new ArrayList();
- long time = System.currentTimeMillis();
- for (int i = 0; i < 10; i++) {
- expected.add(addFile(testRootNode, "file" + i, time));
- time += 1000;
- }
- testRootNode.save();
+ private static Node addFile(Node folder, String name, long lastModified) throws
RepositoryException
+ {
+ Node file = folder.addNode(name, "nt:file");
+ Node resource = file.addNode("jcr:content", "nt:resource");
+ Calendar cal = Calendar.getInstance();
+ cal.setTimeInMillis(lastModified);
+ resource.setProperty("jcr:lastModified", cal);
+ resource.setProperty("jcr:encoding", "UTF-8");
+ resource.setProperty("jcr:mimeType", "text/plain");
+ resource.setProperty("jcr:data", new
ByteArrayInputStream("test".getBytes()));
+ return file;
+ }
- String stmt = testPath + "/* order by jcr:content/@jcr:lastModified";
- Query q = qm.createQuery(stmt, Query.XPATH);
- checkResultSequence(q.execute().getRows(), (Node[]) expected.toArray(new
Node[expected.size()]));
+ private int createNodes(Node n, int nodesPerLevel, int levels, int count,
NodeCreationCallback callback)
+ throws RepositoryException
+ {
+ levels--;
+ for (int i = 0; i < nodesPerLevel; i++)
+ {
+ Node child = n.addNode("node" + i);
+ count++;
+ callback.nodeCreated(child, count);
+ if (levels > 0)
+ {
+ count = createNodes(child, nodesPerLevel, levels, count, callback);
+ }
+ }
+ return count;
+ }
- // descending
- stmt = testPath + "/* order by jcr:content/@jcr:lastModified
descending";
- q = qm.createQuery(stmt, Query.XPATH);
- Collections.reverse(expected);
- checkResultSequence(q.execute().getRows(), (Node[]) expected.toArray(new
Node[expected.size()]));
+ private static interface NodeCreationCallback
+ {
- // reverse order in content
- for (Iterator it = expected.iterator(); it.hasNext(); ) {
- Node file = (Node) it.next();
- Calendar cal = Calendar.getInstance();
- cal.setTimeInMillis(time);
-
file.getNode("jcr:content").setProperty("jcr:lastModified", cal);
- time -= 1000;
- }
- testRootNode.save();
-
- stmt = testPath + "/* order by jcr:content/@jcr:lastModified
descending";
- q = qm.createQuery(stmt, Query.XPATH);
- checkResultSequence(q.execute().getRows(), (Node[]) expected.toArray(new
Node[expected.size()]));
- }
-
- private static Node addFile(Node folder, String name, long lastModified)
- throws RepositoryException {
- Node file = folder.addNode(name, "nt:file");
- Node resource = file.addNode("jcr:content", "nt:resource");
- Calendar cal = Calendar.getInstance();
- cal.setTimeInMillis(lastModified);
- resource.setProperty("jcr:lastModified", cal);
- resource.setProperty("jcr:encoding", "UTF-8");
- resource.setProperty("jcr:mimeType", "text/plain");
- resource.setProperty("jcr:data", new
ByteArrayInputStream("test".getBytes()));
- return file;
- }
-
- private int createNodes(Node n, int nodesPerLevel, int levels,
- int count, NodeCreationCallback callback)
- throws RepositoryException {
- levels--;
- for (int i = 0; i < nodesPerLevel; i++) {
- Node child = n.addNode("node" + i);
- count++;
- callback.nodeCreated(child, count);
- if (levels > 0) {
- count = createNodes(child, nodesPerLevel, levels, count, callback);
- }
- }
- return count;
- }
-
- private static interface NodeCreationCallback {
-
- public void nodeCreated(Node node, int count) throws RepositoryException;
- }
+ public void nodeCreated(Node node, int count) throws RepositoryException;
+ }
}
Modified:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/TestAll.java
===================================================================
---
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/TestAll.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/TestAll.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -23,21 +23,23 @@
/**
* Test suite that includes all testcases for the Search module.
*/
-public class TestAll extends TestCase {
+public class TestAll extends TestCase
+{
- /**
- * Returns a <code>Test</code> suite that executes all tests inside this
- * package.
- *
- * @return a <code>Test</code> suite that executes all tests inside this
- * package.
- */
- public static Test suite() {
- TestSuite suite = new TestSuite("Search tests");
+ /**
+ * Returns a <code>Test</code> suite that executes all tests inside this
+ * package.
+ *
+ * @return a <code>Test</code> suite that executes all tests inside this
+ * package.
+ */
+ public static Test suite()
+ {
+ TestSuite suite = new TestSuite("Search tests");
- suite.addTestSuite(IndexingQueueTest.class);
- // suite.addTestSuite(IndexingAggregateTest.class);
+ suite.addTestSuite(IndexingQueueTest.class);
+ suite.addTestSuite(IndexingAggregateTest.class);
- return suite;
- }
+ return suite;
+ }
}
\ No newline at end of file
Modified:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/usecases/TestExcerpt.java
===================================================================
---
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/usecases/TestExcerpt.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/usecases/TestExcerpt.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -46,7 +46,7 @@
+ "the configuration parameter for this setting is:" + "This is
the test for Excerpt query";
private String string1_excerpt =
- "<div><span>Node1 Additionally there is a parameter that controls
the format of the "
+ "<div><span>Additionally there is a parameter that controls the
format of the "
+ "<strong>excerpt</strong> created. In JCR 1.9 the default is
set ...</span><span>"
+ "the configuration parameter for this setting is:This is the test for
"
+ "<strong>Excerpt</strong>
query</span></div>";
@@ -58,7 +58,7 @@
private String string2_excerpt =
// "<div><span>It is a test for
<strong>excerpt</strong> query.Searching with synonyms is integrated in the
jcr:contains() function and uses the same syntax like synonym searches
...</span></div>";
- "<div><span>Node2 It is a test for
<strong>excerpt</strong> query.Searching with synonyms is integrated in the
jcr:contains() function and uses the same syntax like synonym
...</span></div>";
+ "<div><span>It is a test for <strong>excerpt</strong>
query.Searching with synonyms is integrated in the jcr:contains() function and uses the
same syntax like synonym searches ...</span></div>";
private String s3 = "JCR supports such features as Lucene Fuzzy Searches";
@@ -104,40 +104,44 @@
public void testExcerpt() throws Exception
{
+ for (int z = 0; z < 1; z++)
+ {
- Node excerptTest = testRoot.addNode("testExcerpt");
+ Node excerptTest = testRoot.addNode("testExcerpt");
- Node node1 = excerptTest.addNode("Node1", "exo:article");
- node1.setProperty("exo:title", "Node1");
- node1.setProperty("exo:text", s1);
+ Node node1 = excerptTest.addNode("Node1", "exo:article");
+ node1.setProperty("exo:title", "");
+ node1.setProperty("exo:text", s1);
- Node node2 = excerptTest.addNode("Node2", "exo:article");
- node2.setProperty("exo:title", "Node2");
- node2.setProperty("exo:text", s2);
+ Node node2 = excerptTest.addNode("Node2", "exo:article");
+ node2.setProperty("exo:title", "");
+ node2.setProperty("exo:text", s2);
- Node node3 = excerptTest.addNode("Node3", "exo:article");
- node3.setProperty("exo:title", "Node3");
- node3.setProperty("exo:text", s3);
+ Node node3 = excerptTest.addNode("Node3", "exo:article");
+ node3.setProperty("exo:title", "");
+ node3.setProperty("exo:text", s3);
- testSession.save();
+ testSession.save();
- QueryManager queryManager = testSession.getWorkspace().getQueryManager();
- Query q1 =
- queryManager.createQuery("select exo:text, excerpt(.) from exo:article
where jcr:path LIKE '"
- + excerptTest.getPath() + "/%' and contains(., 'excerpt')
ORDER BY exo:title", Query.SQL);
- for (int i = 0; i < 10; i++)
- {
- checkResult(q1);
- }
+ QueryManager queryManager = testSession.getWorkspace().getQueryManager();
+ Query q1 =
+ queryManager.createQuery("select exo:text, excerpt(.) from exo:article
where jcr:path LIKE '"
+ + excerptTest.getPath() + "/%' and contains(., 'excerpt')
ORDER BY exo:title", Query.SQL);
+ for (int i = 0; i < 1; i++)
+ {
+ checkResult(q1);
+ }
- Query q2 =
- queryManager.createQuery("/jcr:root/" + excerptTest.getPath()
- + "//*[jcr:contains(., 'excerpt')]/(@exo:text|rep:excerpt(.))
order by @exo:title", Query.XPATH);
- for (int i = 0; i < 10; i++)
- {
- checkResult(q2);
+ Query q2 =
+ queryManager.createQuery("/jcr:root/" + excerptTest.getPath()
+ + "//*[jcr:contains(., 'excerpt')]/(@exo:text|rep:excerpt(.))
order by @exo:title", Query.XPATH);
+ for (int i = 0; i < 1; i++)
+ {
+ checkResult(q2);
+ }
+ excerptTest.remove();
+ testSession.save();
}
-
}
private void checkResult(Query query) throws RepositoryException
@@ -151,19 +155,15 @@
Row r = it.nextRow();
Value excerpt = r.getValue("rep:excerpt(.)");
Value text = r.getValue("exo:text");
- System.out.println(excerpt.getString());
- System.out.println(text.getString());
if (text.getString().equals(s1))
{
- System.out.println("s1" +
string1_excerpt.equals(excerpt.getString()));
- //assertEquals(string1_excerpt, excerpt.getString());
+ assertEquals(string1_excerpt, excerpt.getString());
}
else if (text.getString().equals(s2))
{
- System.out.println("s2" +
string2_excerpt.equals(excerpt.getString()));
- //assertEquals(string2_excerpt, excerpt.getString());
+ assertEquals(string2_excerpt, excerpt.getString());
}
}
}
Modified:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/usecases/export/ExportWorkspaceSystemViewTest.java
===================================================================
---
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/usecases/export/ExportWorkspaceSystemViewTest.java 2009-10-08
09:29:58 UTC (rev 262)
+++
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/usecases/export/ExportWorkspaceSystemViewTest.java 2009-10-09
14:02:16 UTC (rev 263)
@@ -20,6 +20,7 @@
import org.exoplatform.services.jcr.config.ContainerEntry;
import org.exoplatform.services.jcr.config.QueryHandlerEntry;
+import org.exoplatform.services.jcr.config.QueryHandlerParams;
import org.exoplatform.services.jcr.config.SimpleParameterEntry;
import org.exoplatform.services.jcr.config.WorkspaceEntry;
import org.exoplatform.services.jcr.config.WorkspaceInitializerEntry;
@@ -119,7 +120,7 @@
// Indexer
ArrayList qParams = new ArrayList();
- qParams.add(new SimpleParameterEntry("indexDir", "target" +
File.separator + name));
+ qParams.add(new SimpleParameterEntry(QueryHandlerParams.PARAM_INDEX_DIR,
"target" + File.separator + name));
QueryHandlerEntry qEntry =
new
QueryHandlerEntry("org.exoplatform.services.jcr.impl.core.query.lucene.SearchIndex",
qParams);